diff --git a/.gitattributes b/.gitattributes index c7d9f3332a950355d5a77d85000f05e6f45435ea..4014ed35c7437eb1776c22733c126df987a9966a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -32,3 +32,42 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +voice_bridge/fbgemm.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/hdf5.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/libcrypto-1_1.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/libiomp5md.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/libopenblas.3hbpcjb5bpqgkwvzavebxnnj2q2g3tup.gfortran-win_amd64.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/llvmlite/binding/llvmlite.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/numpy/.libs/libopenblas.4SP5SUA7CBGXUEOC35YP2ASOICYYEQZZ.gfortran-win_amd64.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/numpy/core/_multiarray_umath.pyd filter=lfs diff=lfs merge=lfs -text +voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/sys.dic filter=lfs diff=lfs merge=lfs -text +voice_bridge/pyopenjtalk/openjtalk.pyd filter=lfs diff=lfs merge=lfs -text +voice_bridge/python38.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/run.exe filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/.libs/lib_arpack-.7LJHSWCJNV2L27UVBY6IEIQMV7LQJHJM.gfortran-win_amd64.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/.libs/libbispeu.7AH3PCQ2E2NGLC3AQD7FFAH73KGJTZCJ.gfortran-win_amd64.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/.libs/libd_odr.SJ2UBBXLPDDMD64MKGXB66R2CWD5IC45.gfortran-win_amd64.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/.libs/libdfft.NU4EIZBEDIVVXBWR26HLW3PTNEKKIRCU.gfortran-win_amd64.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/.libs/libdfitpack.LMAPXDO5462XTHNWXJBZFJU252ZVABKI.gfortran-win_amd64.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/.libs/libdgamln.WP5Q52HGLVXILXN6MQ6JUKPFUZEHPO3N.gfortran-win_amd64.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/.libs/libopenblas.3HBPCJB5BPQGKWVZAVEBXNNJ2Q2G3TUP.gfortran-win_amd64.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/.libs/libspecfun.LQCTHMCYNULEOOGKIO6AGREE6D6V37RU.gfortran-win_amd64.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/.libs/libvode-f2p.CPK3WLWI3UO7R5A2TENGVYGYTZJJVIU5.gfortran-win_amd64.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/linalg/_flapack.pyd filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/misc/face.dat filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/optimize/_highs/_highs_wrapper.pyd filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/sparse/_sparsetools.pyd filter=lfs diff=lfs merge=lfs -text +voice_bridge/scipy/special/cython_special.pyd filter=lfs diff=lfs merge=lfs -text +voice_bridge/sentencepiece/_sentencepiece.pyd filter=lfs diff=lfs merge=lfs -text +voice_bridge/sqlite3.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/tcl86t.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/tk86t.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/torch_cpu.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/torch_python.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/torch/bin/protoc.exe filter=lfs diff=lfs merge=lfs -text +voice_bridge/torch/lib/fbgemm.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/torch/lib/libiomp5md.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/torch/lib/torch_cpu.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/torch/lib/torch_python.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/ucrtbase.dll filter=lfs diff=lfs merge=lfs -text +voice_bridge/unicodedata.pyd filter=lfs diff=lfs merge=lfs -text diff --git a/voice_bridge/Grammar3.8.10.final.0.pickle b/voice_bridge/Grammar3.8.10.final.0.pickle new file mode 100644 index 0000000000000000000000000000000000000000..56f2ba639beb4cb82f6dd8df2cc70667429c4f06 --- /dev/null +++ b/voice_bridge/Grammar3.8.10.final.0.pickle @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3131a0354ebd272fdb9f419a5045a4e814b8d28b4482f7ba2874eb6a1d4fc228 +size 15309 diff --git a/voice_bridge/PatternGrammar3.8.10.final.0.pickle b/voice_bridge/PatternGrammar3.8.10.final.0.pickle new file mode 100644 index 0000000000000000000000000000000000000000..e9de5e3aa351535c40c4d95139afac6eb97e675d --- /dev/null +++ b/voice_bridge/PatternGrammar3.8.10.final.0.pickle @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36ee934395b9209737b13893ddaff05fad8e239c2fdfac29d401d3fceeb30768 +size 1225 diff --git a/voice_bridge/_asyncio.pyd b/voice_bridge/_asyncio.pyd new file mode 100644 index 0000000000000000000000000000000000000000..63a2e950e57d2910a812e0ba59bac7cb1981262b Binary files /dev/null and b/voice_bridge/_asyncio.pyd differ diff --git a/voice_bridge/_bz2.pyd b/voice_bridge/_bz2.pyd new file mode 100644 index 0000000000000000000000000000000000000000..df8e00ae44b42cc671ed6a93c9da107a54a15684 Binary files /dev/null and b/voice_bridge/_bz2.pyd differ diff --git a/voice_bridge/_cffi_backend.pyd b/voice_bridge/_cffi_backend.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f9821ab4393475046d1bedf79bb527e74d8fd94a Binary files /dev/null and b/voice_bridge/_cffi_backend.pyd differ diff --git a/voice_bridge/_ctypes.pyd b/voice_bridge/_ctypes.pyd new file mode 100644 index 0000000000000000000000000000000000000000..fba68288be6cbbd07c08913dafcc0fb2659b2136 Binary files /dev/null and b/voice_bridge/_ctypes.pyd differ diff --git a/voice_bridge/_decimal.pyd b/voice_bridge/_decimal.pyd new file mode 100644 index 0000000000000000000000000000000000000000..daab285bc87407d4a3ec6c7b91d7ac225b3c677a Binary files /dev/null and b/voice_bridge/_decimal.pyd differ diff --git a/voice_bridge/_elementtree.pyd b/voice_bridge/_elementtree.pyd new file mode 100644 index 0000000000000000000000000000000000000000..020c55d23f315757015fcd274910c259cfc5cf15 Binary files /dev/null and b/voice_bridge/_elementtree.pyd differ diff --git a/voice_bridge/_hashlib.pyd b/voice_bridge/_hashlib.pyd new file mode 100644 index 0000000000000000000000000000000000000000..54f4d0a99b16c8987ee2691d50b1479e557db857 Binary files /dev/null and b/voice_bridge/_hashlib.pyd differ diff --git a/voice_bridge/_lzma.pyd b/voice_bridge/_lzma.pyd new file mode 100644 index 0000000000000000000000000000000000000000..6a96c4e781b3e42ba520c569a24f0f1955771915 Binary files /dev/null and b/voice_bridge/_lzma.pyd differ diff --git a/voice_bridge/_multiprocessing.pyd b/voice_bridge/_multiprocessing.pyd new file mode 100644 index 0000000000000000000000000000000000000000..9db8f427281870875d3e9bf939767f534dba5c5d Binary files /dev/null and b/voice_bridge/_multiprocessing.pyd differ diff --git a/voice_bridge/_overlapped.pyd b/voice_bridge/_overlapped.pyd new file mode 100644 index 0000000000000000000000000000000000000000..be7fdc37d5a3dd7bdabfb04efd1949adbe2656d5 Binary files /dev/null and b/voice_bridge/_overlapped.pyd differ diff --git a/voice_bridge/api-ms-win-core-console-l1-1-0.dll b/voice_bridge/api-ms-win-core-console-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..726b97532aed389cd32f423c79fe854b94e44b1d Binary files /dev/null and b/voice_bridge/api-ms-win-core-console-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-datetime-l1-1-0.dll b/voice_bridge/api-ms-win-core-datetime-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..f2ecfa7abe6267621bdc89abe4abd1162ba41338 Binary files /dev/null and b/voice_bridge/api-ms-win-core-datetime-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-debug-l1-1-0.dll b/voice_bridge/api-ms-win-core-debug-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..7bd075bcd030dfdbfc8d4a241dd78153b240921e Binary files /dev/null and b/voice_bridge/api-ms-win-core-debug-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-delayload-l1-1-0.dll b/voice_bridge/api-ms-win-core-delayload-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..2d369e50014461a5c4ecc0bd657ca404e2084bb1 Binary files /dev/null and b/voice_bridge/api-ms-win-core-delayload-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-errorhandling-l1-1-0.dll b/voice_bridge/api-ms-win-core-errorhandling-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..3bafba91c172aeab14dd7d0dab83b5652b545712 Binary files /dev/null and b/voice_bridge/api-ms-win-core-errorhandling-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-fibers-l1-1-0.dll b/voice_bridge/api-ms-win-core-fibers-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..651ffe133322c03b850997c71518ec621cf8f235 Binary files /dev/null and b/voice_bridge/api-ms-win-core-fibers-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-file-l1-1-0.dll b/voice_bridge/api-ms-win-core-file-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..12bf0b6c060378dd39091699525aa6abf6b4c7db Binary files /dev/null and b/voice_bridge/api-ms-win-core-file-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-file-l1-2-0.dll b/voice_bridge/api-ms-win-core-file-l1-2-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..0b48d5a14b19704e7fdf5dd8b24e445e43e43910 Binary files /dev/null and b/voice_bridge/api-ms-win-core-file-l1-2-0.dll differ diff --git a/voice_bridge/api-ms-win-core-handle-l1-1-0.dll b/voice_bridge/api-ms-win-core-handle-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..c96e31d98fb8325be858ebcade2639a79ba12fb7 Binary files /dev/null and b/voice_bridge/api-ms-win-core-handle-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-heap-l1-1-0.dll b/voice_bridge/api-ms-win-core-heap-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..baa932fd5c65fe52dbd4f6691278528b6e1afc3e Binary files /dev/null and b/voice_bridge/api-ms-win-core-heap-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-interlocked-l1-1-0.dll b/voice_bridge/api-ms-win-core-interlocked-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..7aa063977029575d4cc08a787e92dde45d959eb9 Binary files /dev/null and b/voice_bridge/api-ms-win-core-interlocked-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-io-l1-1-0.dll b/voice_bridge/api-ms-win-core-io-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..a6df318472df3c243fbc71e65c7cb23d0ca63c78 Binary files /dev/null and b/voice_bridge/api-ms-win-core-io-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-libraryloader-l1-1-0.dll b/voice_bridge/api-ms-win-core-libraryloader-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..ddd5e276e06900aa1ee504f965d558bee60597e3 Binary files /dev/null and b/voice_bridge/api-ms-win-core-libraryloader-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-localization-l1-2-0.dll b/voice_bridge/api-ms-win-core-localization-l1-2-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..dabe28e9bf2cdd020e96292bbf6f243ed7c63fa4 Binary files /dev/null and b/voice_bridge/api-ms-win-core-localization-l1-2-0.dll differ diff --git a/voice_bridge/api-ms-win-core-memory-l1-1-0.dll b/voice_bridge/api-ms-win-core-memory-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..63e54f31b72830c2ac509a7942eb12bb9155b3fe Binary files /dev/null and b/voice_bridge/api-ms-win-core-memory-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-namedpipe-l1-1-0.dll b/voice_bridge/api-ms-win-core-namedpipe-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..37e956eca194fc883db0d5f0fc8152327f68589b Binary files /dev/null and b/voice_bridge/api-ms-win-core-namedpipe-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-processenvironment-l1-1-0.dll b/voice_bridge/api-ms-win-core-processenvironment-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..a2f36050aca8f8f8a16376a1dc3236cd2c812b69 Binary files /dev/null and b/voice_bridge/api-ms-win-core-processenvironment-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-processthreads-l1-1-0.dll b/voice_bridge/api-ms-win-core-processthreads-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..f4d3a03399681ce00cdbccf25831392dd6c88cb4 Binary files /dev/null and b/voice_bridge/api-ms-win-core-processthreads-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-processthreads-l1-1-1.dll b/voice_bridge/api-ms-win-core-processthreads-l1-1-1.dll new file mode 100644 index 0000000000000000000000000000000000000000..245cf98d3c9d3785d9a7185c8f0f9e6291451237 Binary files /dev/null and b/voice_bridge/api-ms-win-core-processthreads-l1-1-1.dll differ diff --git a/voice_bridge/api-ms-win-core-profile-l1-1-0.dll b/voice_bridge/api-ms-win-core-profile-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..da2b687a1448253f7c7a9afab3826644bb6781d0 Binary files /dev/null and b/voice_bridge/api-ms-win-core-profile-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-rtlsupport-l1-1-0.dll b/voice_bridge/api-ms-win-core-rtlsupport-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..ae6dce55e66a5cf38f928677b53191827799ac00 Binary files /dev/null and b/voice_bridge/api-ms-win-core-rtlsupport-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-string-l1-1-0.dll b/voice_bridge/api-ms-win-core-string-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..32b52be787dd06dfe4a2f0176fc3753026ba0296 Binary files /dev/null and b/voice_bridge/api-ms-win-core-string-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-synch-l1-1-0.dll b/voice_bridge/api-ms-win-core-synch-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..b88f76af0fbb281c898d8f6284dca682fb1f39c0 Binary files /dev/null and b/voice_bridge/api-ms-win-core-synch-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-synch-l1-2-0.dll b/voice_bridge/api-ms-win-core-synch-l1-2-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..f1126e424cddaddc440426aa53e8a712565fc995 Binary files /dev/null and b/voice_bridge/api-ms-win-core-synch-l1-2-0.dll differ diff --git a/voice_bridge/api-ms-win-core-sysinfo-l1-1-0.dll b/voice_bridge/api-ms-win-core-sysinfo-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..527d1a12cc2db5b6af104d7fdacb789a78474fbb Binary files /dev/null and b/voice_bridge/api-ms-win-core-sysinfo-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-timezone-l1-1-0.dll b/voice_bridge/api-ms-win-core-timezone-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..23ad51bec87496a2c01920404c255756a7025d5c Binary files /dev/null and b/voice_bridge/api-ms-win-core-timezone-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-core-util-l1-1-0.dll b/voice_bridge/api-ms-win-core-util-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..080a9c959211f45538359c19bd451dba0ff199b5 Binary files /dev/null and b/voice_bridge/api-ms-win-core-util-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-crt-conio-l1-1-0.dll b/voice_bridge/api-ms-win-crt-conio-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..2355a627ed6b545a081ac106d184d6eb5da68f93 Binary files /dev/null and b/voice_bridge/api-ms-win-crt-conio-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-crt-locale-l1-1-0.dll b/voice_bridge/api-ms-win-crt-locale-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..571eaf068cc3799c6195064becafd76c96dafe6c Binary files /dev/null and b/voice_bridge/api-ms-win-crt-locale-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-crt-process-l1-1-0.dll b/voice_bridge/api-ms-win-crt-process-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..bc346dc355c6a76d5bb7f1aae99d60860e044ed7 Binary files /dev/null and b/voice_bridge/api-ms-win-crt-process-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-crt-runtime-l1-1-0.dll b/voice_bridge/api-ms-win-crt-runtime-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..8cec0e933cdc8d65ec163a4a5f6bf12bd2bc8daa Binary files /dev/null and b/voice_bridge/api-ms-win-crt-runtime-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-crt-string-l1-1-0.dll b/voice_bridge/api-ms-win-crt-string-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..45a77d5bfa07876d868405d37bc6cd96343cb97d Binary files /dev/null and b/voice_bridge/api-ms-win-crt-string-l1-1-0.dll differ diff --git a/voice_bridge/api-ms-win-crt-time-l1-1-0.dll b/voice_bridge/api-ms-win-crt-time-l1-1-0.dll new file mode 100644 index 0000000000000000000000000000000000000000..1d1bdd7a9b8ee6f0ad05f8ab384604a7eb8c7735 Binary files /dev/null and b/voice_bridge/api-ms-win-crt-time-l1-1-0.dll differ diff --git a/voice_bridge/asmjit.dll b/voice_bridge/asmjit.dll new file mode 100644 index 0000000000000000000000000000000000000000..cd8a4c51d4213a2f4fb48da1084b3d477748db48 Binary files /dev/null and b/voice_bridge/asmjit.dll differ diff --git a/voice_bridge/bridge_config.yaml b/voice_bridge/bridge_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cf456b1efddf4e53b1c4e617b2935ad65ecf05a0 --- /dev/null +++ b/voice_bridge/bridge_config.yaml @@ -0,0 +1,113 @@ +gloal_style_setting: &gloal_style_setting + sampling_rate: 44100 + g2p: pyopenjtalk_accent_with_pause + +global_tts_inference_init_args: &global_tts_inference_init_args + speed_control_alpha: 1.0 + noise_scale: 0.333 + noise_scale_dur: 0.333 + +global_token_id_converter_init_args: &global_token_id_converter_init_args + token_list: + - + - + - '1' + - '2' + - '0' + - '3' + - '4' + - '-1' + - '5' + - a + - o + - '-2' + - i + - '-3' + - u + - e + - k + - n + - t + - '6' + - r + - '-4' + - s + - N + - m + - pau + - '7' + - sh + - d + - g + - w + - '8' + - U + - '-5' + - I + - cl + - h + - y + - b + - '9' + - j + - ts + - ch + - '-6' + - z + - p + - '-7' + - f + - ky + - ry + - '-8' + - gy + - '-9' + - hy + - ny + - '-10' + - by + - my + - '-11' + - '-12' + - '-13' + - py + - '-14' + - '-15' + - v + - '10' + - '-16' + - '-17' + - '11' + - '-21' + - '-20' + - '12' + - '-19' + - '13' + - '-18' + - '14' + - dy + - '15' + - ty + - '-22' + - '16' + - '18' + - '19' + - '17' + - + + +host: '127.0.0.1' +speakers: + - name: Suga + speaker_uuid: aa33c99b-a43b-49b0-a2c8-6a81922f8213 + version: 0.0.1 + styles: + - name: γƒŽγƒΌγƒžγƒ« + id: 0 + <<: *gloal_style_setting + tts_inference_init_args: + train_config: model/config.yaml + model_file: model/100epoch.pth + <<: *global_tts_inference_init_args + token_id_converter_init_args: + <<: *global_token_id_converter_init_args \ No newline at end of file diff --git a/voice_bridge/c10.dll b/voice_bridge/c10.dll new file mode 100644 index 0000000000000000000000000000000000000000..c38ed136253f2ac755eda8b0c24710399ae926e4 Binary files /dev/null and b/voice_bridge/c10.dll differ diff --git a/voice_bridge/certifi/cacert.pem b/voice_bridge/certifi/cacert.pem new file mode 100644 index 0000000000000000000000000000000000000000..df9e4e3c75560ac0381a036a20771674e8099e91 --- /dev/null +++ b/voice_bridge/certifi/cacert.pem @@ -0,0 +1,4527 @@ + +# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Label: "GlobalSign Root CA" +# Serial: 4835703278459707669005204 +# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a +# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c +# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Label: "Entrust.net Premium 2048 Secure Server CA" +# Serial: 946069240 +# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90 +# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31 +# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77 +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Label: "Baltimore CyberTrust Root" +# Serial: 33554617 +# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 +# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 +# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Issuer: CN=AAA Certificate Services O=Comodo CA Limited +# Subject: CN=AAA Certificate Services O=Comodo CA Limited +# Label: "Comodo AAA Services root" +# Serial: 1 +# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 +# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 +# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2" +# Serial: 1289 +# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b +# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7 +# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86 +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3" +# Serial: 1478 +# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf +# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85 +# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35 +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1 +# Subject: O=SECOM Trust.net OU=Security Communication RootCA1 +# Label: "Security Communication Root CA" +# Serial: 0 +# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a +# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7 +# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY +MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t +dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 +WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD +VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 +9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ +DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 +Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N +QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ +xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G +A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG +kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr +Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 +Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU +JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot +RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== +-----END CERTIFICATE----- + +# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com +# Label: "XRamp Global CA Root" +# Serial: 107108908803651509692980124233745014957 +# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1 +# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6 +# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2 +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Label: "Go Daddy Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 +# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 +# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- + +# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Label: "Starfield Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 +# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a +# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG +# Label: "SwissSign Gold CA - G2" +# Serial: 13492815561806991280 +# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93 +# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61 +# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95 +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG +# Label: "SwissSign Silver CA - G2" +# Serial: 5700383053117599563 +# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13 +# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb +# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5 +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu +IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow +RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY +U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv +Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br +YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF +nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH +6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt +eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ +c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ +MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH +HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf +jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 +5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB +rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c +wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB +AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp +WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 +xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ +2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ +IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 +aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X +em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR +dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ +OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ +hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy +tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +# Issuer: CN=SecureTrust CA O=SecureTrust Corporation +# Subject: CN=SecureTrust CA O=SecureTrust Corporation +# Label: "SecureTrust CA" +# Serial: 17199774589125277788362757014266862032 +# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1 +# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11 +# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73 +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +# Issuer: CN=Secure Global CA O=SecureTrust Corporation +# Subject: CN=Secure Global CA O=SecureTrust Corporation +# Label: "Secure Global CA" +# Serial: 9751836167731051554232119481456978597 +# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de +# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b +# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69 +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 104350513648249232941998508985834464573 +# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75 +# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b +# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66 +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- + +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Issuer: CN=Certigna O=Dhimyotis +# Subject: CN=Certigna O=Dhimyotis +# Label: "Certigna" +# Serial: 18364802974209362175 +# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff +# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97 +# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority +# Label: "ePKI Root Certification Authority" +# Serial: 28956088682735189655030529057352760477 +# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3 +# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0 +# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5 +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +# Issuer: O=certSIGN OU=certSIGN ROOT CA +# Subject: O=certSIGN OU=certSIGN ROOT CA +# Label: "certSIGN ROOT CA" +# Serial: 35210227249154 +# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17 +# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b +# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services) +# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny" +# Serial: 80544274841616 +# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88 +# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91 +# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98 +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post +# Label: "Hongkong Post Root CA 1" +# Serial: 1000 +# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca +# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58 +# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2 +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx +FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg +Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG +A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr +b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ +jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn +PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh +ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9 +nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h +q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED +MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC +mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3 +7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB +oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs +EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO +fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi +AmvZWg== +-----END CERTIFICATE----- + +# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc. +# Label: "SecureSign RootCA11" +# Serial: 1 +# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26 +# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3 +# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12 +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr +MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG +A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0 +MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp +Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD +QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz +i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8 +h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV +MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9 +UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni +8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC +h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB +AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm +KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ +X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr +QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5 +pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN +QSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. +# Label: "Microsec e-Szigno Root CA 2009" +# Serial: 14014712776195784473 +# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1 +# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e +# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78 +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 6047274297262753887 +# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3 +# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa +# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy +MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD +VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv +ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl +AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF +661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 +am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 +ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 +PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS +3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k +SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF +3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM +ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g +StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz +Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB +jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +# Issuer: CN=Izenpe.com O=IZENPE S.A. +# Subject: CN=Izenpe.com O=IZENPE S.A. +# Label: "Izenpe.com" +# Serial: 917563065490389241595536686991402621 +# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73 +# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19 +# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Services Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2 +# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f +# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5 +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA" +# Serial: 279744 +# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78 +# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e +# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA +# Label: "TWCA Root Certification Authority" +# Serial: 1 +# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79 +# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48 +# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44 +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2 +# Label: "Security Communication RootCA2" +# Serial: 0 +# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43 +# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74 +# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967 +# Label: "Actalis Authentication Root CA" +# Serial: 6271844772424770508 +# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6 +# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac +# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66 +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 2 Root CA" +# Serial: 2 +# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29 +# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99 +# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48 +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- + +# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327 +# Label: "Buypass Class 3 Root CA" +# Serial: 2 +# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec +# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57 +# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 3" +# Serial: 1 +# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef +# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1 +# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 2009" +# Serial: 623603 +# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f +# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0 +# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH +# Label: "D-TRUST Root Class 3 CA 2 EV 2009" +# Serial: 623604 +# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6 +# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83 +# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81 +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +# Issuer: CN=CA Disig Root R2 O=Disig a.s. +# Subject: CN=CA Disig Root R2 O=Disig a.s. +# Label: "CA Disig Root R2" +# Serial: 10572350602393338211 +# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03 +# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71 +# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03 +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV +# Label: "ACCVRAIZ1" +# Serial: 6828503384748696800 +# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02 +# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17 +# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13 +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE +AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw +CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ +BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND +VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb +qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY +HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo +G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA +lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr +IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/ +0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH +k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47 +4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO +m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa +cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl +uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI +KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls +ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG +AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT +VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG +CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA +cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA +QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA +7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA +cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA +QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA +czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu +aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt +aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud +DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF +BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp +D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU +JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m +AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD +vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms +tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH +7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA +h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF +d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H +pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA +# Label: "TWCA Global Root CA" +# Serial: 3262 +# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96 +# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65 +# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- + +# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera +# Label: "TeliaSonera Root CA v1" +# Serial: 199041966741090107964904287217786801558 +# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c +# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37 +# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89 +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi +# Subject: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi +# Label: "E-Tugra Certification Authority" +# Serial: 7667447206703254355 +# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49 +# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39 +# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC +aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV +BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1 +Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz +MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+ +BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp +em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY +B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH +D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF +Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo +q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D +k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH +fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut +dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM +ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8 +zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX +U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6 +Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5 +XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF +Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR +HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY +GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c +77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3 ++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK +vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6 +FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl +yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P +AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD +y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d +NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center +# Label: "T-TeleSec GlobalRoot Class 2" +# Serial: 1 +# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a +# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9 +# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- + +# Issuer: CN=Atos TrustedRoot 2011 O=Atos +# Subject: CN=Atos TrustedRoot 2011 O=Atos +# Label: "Atos TrustedRoot 2011" +# Serial: 6643877497813316402 +# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56 +# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21 +# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74 +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 1 G3" +# Serial: 687049649626669250736271037606554624078720034195 +# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab +# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67 +# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 2 G3" +# Serial: 390156079458959257446133169266079962026824725800 +# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06 +# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36 +# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited +# Label: "QuoVadis Root CA 3 G3" +# Serial: 268090761170461462463995952157327242137089239581 +# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7 +# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d +# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46 +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Label: "COMODO RSA Certification Authority" +# Serial: 101909084537582093308941363524873193117 +# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 +# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 +# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Label: "USERTrust RSA Certification Authority" +# Serial: 2645093764781058787591871645665788717 +# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 +# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e +# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Label: "USERTrust ECC Certification Authority" +# Serial: 123013823720199481456569720443997572134 +# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 +# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 +# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Label: "GlobalSign ECC Root CA - R5" +# Serial: 32785792099990507226680698011560947931244 +# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 +# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa +# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust +# Label: "IdenTrust Commercial Root CA 1" +# Serial: 13298821034946342390520003877796839426 +# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7 +# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25 +# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust +# Label: "IdenTrust Public Sector Root CA 1" +# Serial: 13298821034946342390521976156843933698 +# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba +# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd +# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority +# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority +# Label: "CFCA EV ROOT" +# Serial: 407555286 +# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30 +# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83 +# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. +# Label: "SZAFIR ROOT CA2" +# Serial: 357043034767186914217277344587386743377558296292 +# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99 +# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de +# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL +BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6 +ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw +NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg +Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN +QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT +3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw +3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6 +3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5 +BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN +XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF +AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw +8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG +nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP +oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy +d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg +LvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Network CA 2" +# Serial: 44979900017204383099463764357512596969 +# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2 +# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92 +# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04 +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce +# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6 +# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36 +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix +DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k +IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT +N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v +dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG +A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh +ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx +QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA +4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0 +AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10 +4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C +ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV +9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD +gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6 +Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq +NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko +LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd +ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I +XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI +M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot +9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V +Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea +j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh +X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ +l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf +bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4 +pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK +e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0 +vm9qp/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority +# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015" +# Serial: 0 +# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef +# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66 +# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33 +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN +BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl +bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv +b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ +BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj +YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5 +MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0 +dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg +QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa +jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi +C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep +lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof +TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X1 O=Internet Security Research Group +# Subject: CN=ISRG Root X1 O=Internet Security Research Group +# Label: "ISRG Root X1" +# Serial: 172886928669790476064670243504169061120 +# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e +# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8 +# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6 +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- + +# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM +# Label: "AC RAIZ FNMT-RCM" +# Serial: 485876308206448804701554682760554759 +# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d +# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20 +# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx +CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ +WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ +BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG +Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/ +yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf +BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz +WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF +tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z +374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC +IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL +mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7 +wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS +MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2 +ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet +UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H +YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3 +LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1 +RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM +LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf +77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N +JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm +fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp +6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp +1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B +9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok +RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv +uu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 1 O=Amazon +# Subject: CN=Amazon Root CA 1 O=Amazon +# Label: "Amazon Root CA 1" +# Serial: 143266978916655856878034712317230054538369994 +# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6 +# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16 +# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 2 O=Amazon +# Subject: CN=Amazon Root CA 2 O=Amazon +# Label: "Amazon Root CA 2" +# Serial: 143266982885963551818349160658925006970653239 +# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66 +# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a +# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4 +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK +gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ +W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg +1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K +8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r +2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me +z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR +8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj +mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz +7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 ++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI +0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm +UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 +LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS +k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl +7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm +btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl +urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ +fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 +n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE +76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H +9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT +4PsJYGw= +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 3 O=Amazon +# Subject: CN=Amazon Root CA 3 O=Amazon +# Label: "Amazon Root CA 3" +# Serial: 143266986699090766294700635381230934788665930 +# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87 +# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e +# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4 +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl +ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr +ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr +BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM +YyRIHN8wfdVoOw== +-----END CERTIFICATE----- + +# Issuer: CN=Amazon Root CA 4 O=Amazon +# Subject: CN=Amazon Root CA 4 O=Amazon +# Label: "Amazon Root CA 4" +# Serial: 143266989758080763974105200630763877849284878 +# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd +# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be +# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92 +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi +9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk +M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB +MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw +CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW +1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- + +# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM +# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1" +# Serial: 1 +# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49 +# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca +# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16 +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx +GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp +bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w +KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0 +BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy +dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG +EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll +IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU +QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT +TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg +LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7 +a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr +LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr +N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X +YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/ +iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f +AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH +V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf +IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4 +lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c +8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf +lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Label: "GlobalSign Root CA - R6" +# Serial: 1417766617973444989252670301619537 +# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae +# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1 +# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69 +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg +MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx +MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET +MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI +xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k +ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD +aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw +LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw +1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX +k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2 +SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h +bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n +WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY +rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce +MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu +bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt +Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61 +55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj +vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf +cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz +oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp +nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs +pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v +JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R +8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4 +5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GC CA" +# Serial: 44084345621038548146064804565436152554 +# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23 +# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31 +# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d +-----BEGIN CERTIFICATE----- +MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw +CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91 +bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg +Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ +BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu +ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS +b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni +eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W +p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T +rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV +57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg +Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 +-----END CERTIFICATE----- + +# Issuer: CN=UCA Global G2 Root O=UniTrust +# Subject: CN=UCA Global G2 Root O=UniTrust +# Label: "UCA Global G2 Root" +# Serial: 124779693093741543919145257850076631279 +# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8 +# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a +# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9 +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH +bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x +CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds +b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr +b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9 +kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm +VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R +VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc +C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj +tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY +D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv +j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl +NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6 +iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP +O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV +ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj +L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5 +1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl +1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU +b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV +PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj +y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb +EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg +DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI ++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy +YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX +UB+K+wb1whnw0A== +-----END CERTIFICATE----- + +# Issuer: CN=UCA Extended Validation Root O=UniTrust +# Subject: CN=UCA Extended Validation Root O=UniTrust +# Label: "UCA Extended Validation Root" +# Serial: 106100277556486529736699587978573607008 +# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2 +# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a +# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH +MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF +eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx +MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV +BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog +D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS +sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop +O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk +sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi +c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj +VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz +KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/ +TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G +sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs +1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD +fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN +l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR +ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ +VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5 +c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp +4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s +t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj +2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO +vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C +xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx +cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM +fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax +-----END CERTIFICATE----- + +# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036 +# Label: "Certigna Root CA" +# Serial: 269714418870597844693661054334862075617 +# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77 +# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43 +# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68 +-----BEGIN CERTIFICATE----- +MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw +WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw +MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x +MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD +VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX +BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO +ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M +CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu +I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm +TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh +C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf +ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz +IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT +Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k +JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5 +hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB +GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of +1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov +L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo +dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr +aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq +hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L +6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG +HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6 +0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB +lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi +o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1 +gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v +faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63 +Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh +jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw +3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign Root CA - G1" +# Serial: 235931866688319308814040 +# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac +# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c +# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67 +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD +VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU +ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH +MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO +MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv +Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz +f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO +8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq +d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM +tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt +Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB +o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x +PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM +wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d +GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH +6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby +RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx +iN66zB+Afko= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI +# Label: "emSign ECC Root CA - G3" +# Serial: 287880440101571086945156 +# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40 +# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1 +# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b +-----BEGIN CERTIFICATE----- +MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG +EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo +bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g +RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ +TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s +b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0 +WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS +fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB +zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq +hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB +CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD ++JbNR6iC8hZVdyR+EhCVBCyj +-----END CERTIFICATE----- + +# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI +# Label: "emSign Root CA - C1" +# Serial: 825510296613316004955058 +# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68 +# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01 +# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG +A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg +SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v +dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ +BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ +HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH +3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH +GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c +xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1 +aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq +TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87 +/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4 +kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG +YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT ++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo +WXzhriKi4gp6D/piq1JM4fHfyr6DDUI= +-----END CERTIFICATE----- + +# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI +# Label: "emSign ECC Root CA - C3" +# Serial: 582948710642506000014504 +# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5 +# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66 +# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3 +-----BEGIN CERTIFICATE----- +MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG +EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx +IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw +MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln +biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND +IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci +MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti +sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O +BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c +3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J +0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ== +-----END CERTIFICATE----- + +# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post +# Label: "Hongkong Post Root CA 3" +# Serial: 46170865288971385588281144162979347873371282084 +# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0 +# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02 +# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6 +-----BEGIN CERTIFICATE----- +MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ +SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n +a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5 +NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT +CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u +Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO +dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI +VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV +9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY +2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY +vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt +bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb +x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+ +l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK +TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj +Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e +i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw +DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG +7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk +MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr +gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk +GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS +3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm +Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+ +l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c +JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP +L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa +LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG +mpv0 +-----END CERTIFICATE----- + +# Issuer: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G4" +# Serial: 289383649854506086828220374796556676440 +# MD5 Fingerprint: 89:53:f1:83:23:b7:7c:8e:05:f1:8c:71:38:4e:1f:88 +# SHA1 Fingerprint: 14:88:4e:86:26:37:b0:26:af:59:62:5c:40:77:ec:35:29:ba:96:01 +# SHA256 Fingerprint: db:35:17:d1:f6:73:2a:2d:5a:b9:7c:53:3e:c7:07:79:ee:32:70:a6:2f:b4:ac:42:38:37:24:60:e6:f0:1e:88 +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIRANm1Q3+vqTkPAAAAAFVlrVgwDQYJKoZIhvcNAQELBQAw +gb4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQL +Ex9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykg +MjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAw +BgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0 +MB4XDTE1MDUyNzExMTExNloXDTM3MTIyNzExNDExNlowgb4xCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1 +c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJ +bmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3Qg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAsewsQu7i0TD/pZJH4i3DumSXbcr3DbVZwbPLqGgZ +2K+EbTBwXX7zLtJTmeH+H17ZSK9dE43b/2MzTdMAArzE+NEGCJR5WIoV3imz/f3E +T+iq4qA7ec2/a0My3dl0ELn39GjUu9CH1apLiipvKgS1sqbHoHrmSKvS0VnM1n4j +5pds8ELl3FFLFUHtSUrJ3hCX1nbB76W1NhSXNdh4IjVS70O92yfbYVaCNNzLiGAM +C1rlLAHGVK/XqsEQe9IFWrhAnoanw5CGAlZSCXqc0ieCU0plUmr1POeo8pyvi73T +DtTUXm6Hnmo9RR3RXRv06QqsYJn7ibT/mCzPfB3pAqoEmh643IhuJbNsZvc8kPNX +wbMv9W3y+8qh+CmdRouzavbmZwe+LGcKKh9asj5XxNMhIWNlUpEbsZmOeX7m640A +2Vqq6nPopIICR5b+W45UYaPrL0swsIsjdXJ8ITzI9vF01Bx7owVV7rtNOzK+mndm +nqxpkCIHH2E6lr7lmk/MBTwoWdPBDFSoWWG9yHJM6Nyfh3+9nEg2XpWjDrk4JFX8 +dWbrAuMINClKxuMrLzOg2qOGpRKX/YAr2hRC45K9PvJdXmd0LhyIRyk0X+IyqJwl +N4y6mACXi0mWHv0liqzc2thddG5msP9E36EYxr5ILzeUePiVSj9/E15dWf10hkNj +c0kCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFJ84xFYjwznooHFs6FRM5Og6sb9nMA0GCSqGSIb3DQEBCwUAA4ICAQAS +5UKme4sPDORGpbZgQIeMJX6tuGguW8ZAdjwD+MlZ9POrYs4QjbRaZIxowLByQzTS +Gwv2LFPSypBLhmb8qoMi9IsabyZIrHZ3CL/FmFz0Jomee8O5ZDIBf9PD3Vht7LGr +hFV0d4QEJ1JrhkzO3bll/9bGXp+aEJlLdWr+aumXIOTkdnrG0CSqkM0gkLpHZPt/ +B7NTeLUKYvJzQ85BK4FqLoUWlFPUa19yIqtRLULVAJyZv967lDtX/Zr1hstWO1uI +AeV8KEsD+UmDfLJ/fOPtjqF/YFOOVZ1QNBIPt5d7bIdKROf1beyAN/BYGW5KaHbw +H5Lk6rWS02FREAutp9lfx1/cH6NcjKF+m7ee01ZvZl4HliDtC3T7Zk6LERXpgUl+ +b7DUUH8i119lAg2m9IUe2K4GS0qn0jFmwvjO5QimpAKWRGhXxNUzzxkvFMSUHHuk +2fCfDrGA4tGeEWSpiBE6doLlYsKA2KSD7ZPvfC+QsDJMlhVoSFLUmQjAJOgc47Ol +IQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk +5F6G+TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuY +n/PIjhs4ViFqUZPTkcpG2om3PVODLAgfi49T3f+sHw== +-----END CERTIFICATE----- + +# Issuer: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation +# Subject: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation +# Label: "Microsoft ECC Root Certificate Authority 2017" +# Serial: 136839042543790627607696632466672567020 +# MD5 Fingerprint: dd:a1:03:e6:4a:93:10:d1:bf:f0:19:42:cb:fe:ed:67 +# SHA1 Fingerprint: 99:9a:64:c3:7f:f4:7d:9f:ab:95:f1:47:69:89:14:60:ee:c4:c3:c5 +# SHA256 Fingerprint: 35:8d:f3:9d:76:4a:f9:e1:b7:66:e9:c9:72:df:35:2e:e1:5c:fa:c2:27:af:6a:d1:d7:0e:8e:4a:6e:dc:ba:02 +-----BEGIN CERTIFICATE----- +MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYD +VQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIw +MTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4MjMxNjA0WjBlMQswCQYDVQQGEwJV +UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNy +b3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZR +ogPZnZH6thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYb +hGBKia/teQ87zvH2RPUBeMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBTIy5lycFIM+Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3 +FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlfXu5gKcs68tvWMoQZP3zV +L8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaReNtUjGUB +iudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M= +-----END CERTIFICATE----- + +# Issuer: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation +# Subject: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation +# Label: "Microsoft RSA Root Certificate Authority 2017" +# Serial: 40975477897264996090493496164228220339 +# MD5 Fingerprint: 10:ff:00:ff:cf:c9:f8:c7:7a:c0:ee:35:8e:c9:0f:47 +# SHA1 Fingerprint: 73:a5:e6:4a:3b:ff:83:16:ff:0e:dc:cc:61:8a:90:6e:4e:ae:4d:74 +# SHA256 Fingerprint: c7:41:f7:0f:4b:2a:8d:88:bf:2e:71:c1:41:22:ef:53:ef:10:eb:a0:cf:a5:e6:4c:fa:20:f4:18:85:30:73:e0 +-----BEGIN CERTIFICATE----- +MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl +MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw +NAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIwNzE4MjMwMDIzWjBlMQswCQYDVQQG +EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1N +aWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZ +Nt9GkMml7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0 +ZdDMbRnMlfl7rEqUrQ7eS0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1 +HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw71VdyvD/IybLeS2v4I2wDwAW9lcfNcztm +gGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+dkC0zVJhUXAoP8XFWvLJ +jEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49FyGcohJUc +aDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaG +YaRSMLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6 +W6IYZVcSn2i51BVrlMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4K +UGsTuqwPN1q3ErWQgR5WrlcihtnJ0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH ++FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJClTUFLkqqNfs+avNJVgyeY+Q +W5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZC +LgLNFgVZJ8og6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OC +gMNPOsduET/m4xaRhPtthH80dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6 +tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk+ONVFT24bcMKpBLBaYVu32TxU5nh +SnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex/2kskZGT4d9Mozd2 +TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDyAmH3 +pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGR +xpl/j8nWZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiApp +GWSZI1b7rCoucL5mxAyE7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9 +dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKTc0QWbej09+CVgI+WXTik9KveCjCHk9hN +AHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D5KbvtwEwXlGjefVwaaZB +RA+GsCyRxj3qrg+E +-----END CERTIFICATE----- + +# Issuer: CN=e-Szigno Root CA 2017 O=Microsec Ltd. +# Subject: CN=e-Szigno Root CA 2017 O=Microsec Ltd. +# Label: "e-Szigno Root CA 2017" +# Serial: 411379200276854331539784714 +# MD5 Fingerprint: de:1f:f6:9e:84:ae:a7:b4:21:ce:1e:58:7d:d1:84:98 +# SHA1 Fingerprint: 89:d4:83:03:4f:9e:9a:48:80:5f:72:37:d4:a9:a6:ef:cb:7c:1f:d1 +# SHA256 Fingerprint: be:b0:0b:30:83:9b:9b:c3:2c:32:e4:44:79:05:95:06:41:f2:64:21:b1:5e:d0:89:19:8b:51:8a:e2:ea:1b:99 +-----BEGIN CERTIFICATE----- +MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNV +BAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRk +LjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJv +b3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZaFw00MjA4MjIxMjA3MDZaMHExCzAJ +BgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMg +THRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25v +IFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtv +xie+RJCxs1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+H +Wyx7xf58etqjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBSHERUI0arBeAyxr87GyZDvvzAEwDAfBgNVHSMEGDAWgBSHERUI0arB +eAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEAtVfd14pVCzbhhkT61Nlo +jbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxOsvxyqltZ ++efcMQ== +-----END CERTIFICATE----- + +# Issuer: O=CERTSIGN SA OU=certSIGN ROOT CA G2 +# Subject: O=CERTSIGN SA OU=certSIGN ROOT CA G2 +# Label: "certSIGN Root CA G2" +# Serial: 313609486401300475190 +# MD5 Fingerprint: 8c:f1:75:8a:c6:19:cf:94:b7:f7:65:20:87:c3:97:c7 +# SHA1 Fingerprint: 26:f9:93:b4:ed:3d:28:27:b0:b9:4b:a7:e9:15:1d:a3:8d:92:e5:32 +# SHA256 Fingerprint: 65:7c:fe:2f:a7:3f:aa:38:46:25:71:f3:32:a2:36:3a:46:fc:e7:02:09:51:71:07:02:cd:fb:b6:ee:da:33:05 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV +BAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04g +Uk9PVCBDQSBHMjAeFw0xNzAyMDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJ +BgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJ +R04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDF +dRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05N0Iw +vlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZ +uIt4ImfkabBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhp +n+Sc8CnTXPnGFiWeI8MgwT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKs +cpc/I1mbySKEwQdPzH/iV8oScLumZfNpdWO9lfsbl83kqK/20U6o2YpxJM02PbyW +xPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91QqhngLjYl/rNUssuHLoPj1P +rCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732jcZZroiF +DsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fx +DTvf95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgy +LcsUDFDYg2WD7rlcz8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6C +eWRgKRM+o/1Pcmqr4tTluCRVLERLiohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSCIS1mxteg4BXrzkwJ +d8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOBywaK8SJJ6ejq +kX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC +b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQl +qiCA2ClV9+BB/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0 +OJD7uNGzcgbJceaBxXntC6Z58hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+c +NywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5BiKDUyUM/FHE5r7iOZULJK2v0ZXk +ltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklWatKcsWMy5WHgUyIO +pwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tUSxfj +03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZk +PuXaTH4MNMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE +1LlSVHJ7liXMvGnjSG4N0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MX +QRBdJ3NghVdJIgc= +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global Certification Authority" +# Serial: 1846098327275375458322922162 +# MD5 Fingerprint: f8:1c:18:2d:2f:ba:5f:6d:a1:6c:bc:c7:ab:91:c7:0e +# SHA1 Fingerprint: 2f:8f:36:4f:e1:58:97:44:21:59:87:a5:2a:9a:d0:69:95:26:7f:b5 +# SHA256 Fingerprint: 97:55:20:15:f5:dd:fc:3c:87:88:c0:06:94:45:55:40:88:94:45:00:84:f1:00:86:70:86:bc:1a:2b:b5:8d:c8 +-----BEGIN CERTIFICATE----- +MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQsw +CQYDVQQGEwJVUzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28x +ITAfBgNVBAoMGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1 +c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMx +OTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJVUzERMA8GA1UECAwI +SWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2ZSBI +b2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +ALldUShLPDeS0YLOvR29zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0Xzn +swuvCAAJWX/NKSqIk4cXGIDtiLK0thAfLdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu +7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4BqstTnoApTAbqOl5F2brz8 +1Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9oWN0EACyW +80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotP +JqX+OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1l +RtzuzWniTY+HKE40Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfw +hI0Vcnyh78zyiGG69Gm7DIwLdVcEuE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10 +coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm+9jaJXLE9gCxInm943xZYkqc +BW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqjifLJS3tBEW1n +twiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1Ud +DwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W +0OhUKDtkLSGm+J1WE2pIPU/HPinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfe +uyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0HZJDmHvUqoai7PF35owgLEQzxPy0Q +lG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla4gt5kNdXElE1GYhB +aCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5RvbbE +sLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPT +MaCm/zjdzyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qe +qu5AvzSxnI9O4fKSTx+O856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxh +VicGaeVyQYHTtgGJoC86cnn+OjC/QezHYj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8 +h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu3R3y4G5OBVixwJAWKqQ9 +EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP29FpHOTK +yeC2nOnOcXHebD8WpHk= +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global ECC P256 Certification Authority" +# Serial: 4151900041497450638097112925 +# MD5 Fingerprint: 5b:44:e3:8d:5d:36:86:26:e8:0d:05:d2:59:a7:83:54 +# SHA1 Fingerprint: b4:90:82:dd:45:0c:be:8b:5b:b1:66:d3:e2:a4:08:26:cd:ed:42:cf +# SHA256 Fingerprint: 94:5b:bc:82:5e:a5:54:f4:89:d1:fd:51:a7:3d:df:2e:a6:24:ac:70:19:a0:52:05:22:5c:22:a7:8c:cf:a8:b4 +-----BEGIN CERTIFICATE----- +MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf +BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3 +YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYDVQQGEwJVUzERMA8G +A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0 +d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF +Q0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqG +SM49AwEHA0IABH77bOYj43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoN +FWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqmP62jQzBBMA8GA1UdEwEB/wQFMAMBAf8w +DwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt0UrrdaVKEJmzsaGLSvcw +CgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjzRM4q3wgh +DDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7 +-----END CERTIFICATE----- + +# Issuer: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc. +# Subject: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc. +# Label: "Trustwave Global ECC P384 Certification Authority" +# Serial: 2704997926503831671788816187 +# MD5 Fingerprint: ea:cf:60:c4:3b:b9:15:29:40:a1:97:ed:78:27:93:d6 +# SHA1 Fingerprint: e7:f3:a3:c8:cf:6f:c3:04:2e:6d:0e:67:32:c5:9e:68:95:0d:5e:d2 +# SHA256 Fingerprint: 55:90:38:59:c8:c0:c3:eb:b8:75:9e:ce:4e:25:57:22:5f:f5:75:8b:bd:38:eb:d4:82:76:60:1e:1b:d5:80:97 +-----BEGIN CERTIFICATE----- +MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf +BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3 +YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYDVQQGEwJVUzERMA8G +A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0 +d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF +Q0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuB +BAAiA2IABGvaDXU1CDFHBa5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJ +j9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr/TklZvFe/oyujUF5nQlgziip04pt89ZF +1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNVHQ8BAf8EBQMDBwYAMB0G +A1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNnADBkAjA3 +AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsC +MGclCrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVu +Sw== +-----END CERTIFICATE----- + +# Issuer: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp. +# Subject: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp. +# Label: "NAVER Global Root Certification Authority" +# Serial: 9013692873798656336226253319739695165984492813 +# MD5 Fingerprint: c8:7e:41:f6:25:3b:f5:09:b3:17:e8:46:3d:bf:d0:9b +# SHA1 Fingerprint: 8f:6b:f2:a9:27:4a:da:14:a0:c4:f4:8e:61:27:f9:c0:1e:78:5d:d1 +# SHA256 Fingerprint: 88:f4:38:dc:f8:ff:d1:fa:8f:42:91:15:ff:e5:f8:2a:e1:e0:6e:0c:70:c3:75:fa:ad:71:7b:34:a4:9e:72:65 +-----BEGIN CERTIFICATE----- +MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEM +BQAwaTELMAkGA1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRG +T1JNIENvcnAuMTIwMAYDVQQDDClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4NDJaFw0zNzA4MTgyMzU5NTlaMGkx +CzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVTUyBQTEFURk9STSBD +b3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVA +iQqrDZBbUGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH +38dq6SZeWYp34+hInDEW+j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lE +HoSTGEq0n+USZGnQJoViAbbJAh2+g1G7XNr4rRVqmfeSVPc0W+m/6imBEtRTkZaz +kVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2aacp+yPOiNgSnABIqKYP +szuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4Yb8Obtoq +vC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHf +nZ3zVHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaG +YQ5fG8Ir4ozVu53BA0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo +0es+nPxdGoMuK8u180SdOqcXYZaicdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3a +CJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejyYhbLgGvtPe31HzClrkvJE+2K +AQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNVHQ4EFgQU0p+I +36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB +Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoN +qo0hV4/GPnrK21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatj +cu3cvuzHV+YwIHHW1xDBE1UBjCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm ++LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bxhYTeodoS76TiEJd6eN4MUZeoIUCL +hr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTgE34h5prCy8VCZLQe +lHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTHD8z7 +p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8 +piKCk5XQA76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLR +LBT/DShycpWbXgnbiUSYqqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX +5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oGI/hGoiLtk/bdmuYqh7GYVPEi92tF4+KO +dh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmgkpzNNIaRkPpkUZ3+/uul +9XXeifdy +-----END CERTIFICATE----- + +# Issuer: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Subject: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres +# Label: "AC RAIZ FNMT-RCM SERVIDORES SEGUROS" +# Serial: 131542671362353147877283741781055151509 +# MD5 Fingerprint: 19:36:9c:52:03:2f:d2:d1:bb:23:cc:dd:1e:12:55:bb +# SHA1 Fingerprint: 62:ff:d9:9e:c0:65:0d:03:ce:75:93:d2:ed:3f:2d:32:c9:e3:e5:4a +# SHA256 Fingerprint: 55:41:53:b1:3d:2c:f9:dd:b7:53:bf:be:1a:4e:0a:e0:8d:0a:a4:18:70:58:fe:60:a2:b8:62:b2:e4:b8:7b:cb +-----BEGIN CERTIFICATE----- +MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQsw +CQYDVQQGEwJFUzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgw +FgYDVQRhDA9WQVRFUy1RMjgyNjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1S +Q00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4MTIyMDA5MzczM1oXDTQzMTIyMDA5 +MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQtUkNNMQ4wDAYDVQQL +DAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNBQyBS +QUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LH +sbI6GA60XYyzZl2hNPk2LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oK +Um8BA06Oi6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqGSM49BAMDA2kAMGYCMQCu +SuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoDzBOQn5IC +MQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJy +v+c= +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root R46 O=GlobalSign nv-sa +# Label: "GlobalSign Root R46" +# Serial: 1552617688466950547958867513931858518042577 +# MD5 Fingerprint: c4:14:30:e4:fa:66:43:94:2a:6a:1b:24:5f:19:d0:ef +# SHA1 Fingerprint: 53:a2:b0:4b:ca:6b:d6:45:e6:39:8a:8e:c4:0d:d2:bf:77:c3:a2:90 +# SHA256 Fingerprint: 4f:a3:12:6d:8d:3a:11:d1:c4:85:5a:4f:80:7c:ba:d6:cf:91:9d:3a:5a:88:b0:3b:ea:2c:63:72:d9:3c:40:c9 +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUA +MEYxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYD +VQQDExNHbG9iYWxTaWduIFJvb3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMy +MDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYt +c2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08EsCVeJ +OaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQG +vGIFAha/r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud +316HCkD7rRlr+/fKYIje2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo +0q3v84RLHIf8E6M6cqJaESvWJ3En7YEtbWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSE +y132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvjK8Cd+RTyG/FWaha/LIWF +zXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD412lPFzYE ++cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCN +I/onccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzs +x2sZy/N78CsHpdlseVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqa +ByFrgY/bxFn63iLABJzjqls2k+g9vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC +4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEMBQADggIBAHx4 +7PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg +JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti +2kM3S+LGteWygxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIk +pnnpHs6i58FZFZ8d4kuaPp92CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRF +FRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZmOUdkLG5NrmJ7v2B0GbhWrJKsFjLt +rWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qqJZ4d16GLuc1CLgSk +ZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwyeqiv5 +u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP +4vkYxboznxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6 +N3ec592kD3ZDZopD8p/7DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3 +vouXsXgxT7PntgMTzlSdriVZzH81Xwj3QEUxeCp6 +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Subject: CN=GlobalSign Root E46 O=GlobalSign nv-sa +# Label: "GlobalSign Root E46" +# Serial: 1552617690338932563915843282459653771421763 +# MD5 Fingerprint: b5:b8:66:ed:de:08:83:e3:c9:e2:01:34:06:ac:51:6f +# SHA1 Fingerprint: 39:b4:6c:d5:fe:80:06:eb:e2:2f:4a:bb:08:33:a0:af:db:b9:dd:84 +# SHA256 Fingerprint: cb:b9:c4:4d:84:b8:04:3e:10:50:ea:31:a6:9f:51:49:55:d7:bf:d2:e2:c6:b4:93:01:01:9a:d6:1d:9f:50:58 +-----BEGIN CERTIFICATE----- +MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYx +CzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQD +ExNHbG9iYWxTaWduIFJvb3QgRTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAw +MDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex +HDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkBjtjq +R+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGdd +yXqBPCCjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBQxCpCPtsad0kRLgLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ +7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZkvLtoURMMA/cVi4RguYv/Uo7njLwcAjA8 ++RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+CAezNIm8BZ/3Hobui3A= +-----END CERTIFICATE----- + +# Issuer: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH +# Subject: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH +# Label: "GLOBALTRUST 2020" +# Serial: 109160994242082918454945253 +# MD5 Fingerprint: 8a:c7:6f:cb:6d:e3:cc:a2:f1:7c:83:fa:0e:78:d7:e8 +# SHA1 Fingerprint: d0:67:c1:13:51:01:0c:aa:d0:c7:6a:65:37:31:16:26:4f:53:71:a2 +# SHA256 Fingerprint: 9a:29:6a:51:82:d1:d4:51:a2:e3:7f:43:9b:74:da:af:a2:67:52:33:29:f9:0f:9a:0d:20:07:c3:34:e2:3c:9a +-----BEGIN CERTIFICATE----- +MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkG +A1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkw +FwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYx +MDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9u +aXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMIICIjANBgkq +hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWiD59b +RatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9Z +YybNpyrOVPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3 +QWPKzv9pj2gOlTblzLmMCcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPw +yJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCmfecqQjuCgGOlYx8ZzHyyZqjC0203b+J+ +BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKAA1GqtH6qRNdDYfOiaxaJ +SaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9ORJitHHmkH +r96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj0 +4KlGDfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9Me +dKZssCz3AwyIDMvUclOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIw +q7ejMZdnrY8XD2zHc+0klGvIg5rQmjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2 +nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1UdIwQYMBaAFNwu +H9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA +VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJC +XtzoRlgHNQIw4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd +6IwPS3BD0IL/qMy/pJTAvoe9iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf ++I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS8cE54+X1+NZK3TTN+2/BT+MAi1bi +kvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2HcqtbepBEX4tdJP7 +wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxSvTOB +TI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6C +MUO+1918oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn +4rnvyOL2NSl6dPrFf4IFYqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+I +aFvowdlxfv1k7/9nR4hYJS8+hge9+6jlgqispdNpQ80xiEmEU5LAsTkbOYMBMMTy +qfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg== +-----END CERTIFICATE----- + +# Issuer: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Subject: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz +# Label: "ANF Secure Server Root CA" +# Serial: 996390341000653745 +# MD5 Fingerprint: 26:a6:44:5a:d9:af:4e:2f:b2:1d:b6:65:b0:4e:e8:96 +# SHA1 Fingerprint: 5b:6e:68:d0:cc:15:b6:a0:5f:1e:c1:5f:ae:02:fc:6b:2f:5d:6f:74 +# SHA256 Fingerprint: fb:8f:ec:75:91:69:b9:10:6b:1e:51:16:44:c6:18:c5:13:04:37:3f:6c:06:43:08:8d:8b:ef:fd:1b:99:75:99 +-----BEGIN CERTIFICATE----- +MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNV +BAUTCUc2MzI4NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlk +YWQgZGUgQ2VydGlmaWNhY2lvbjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNV +BAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3QgQ0EwHhcNMTkwOTA0MTAwMDM4WhcN +MzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEwMQswCQYDVQQGEwJF +UzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQwEgYD +VQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9v +dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCj +cqQZAZ2cC4Ffc0m6p6zzBE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9q +yGFOtibBTI3/TO80sh9l2Ll49a2pcbnvT1gdpd50IJeh7WhM3pIXS7yr/2WanvtH +2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcvB2VSAKduyK9o7PQUlrZX +H1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXsezx76W0OL +zc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyR +p1RMVwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQz +W7i1o0TJrH93PB0j7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/ +SiOL9V8BY9KHcyi1Swr1+KuCLH5zJTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJn +LNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe8TZBAQIvfXOn3kLMTOmJDVb3 +n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVOHj1tyRRM4y5B +u8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj +o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC +AgEATh65isagmD9uw2nAalxJUqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L +9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzxj6ptBZNscsdW699QIyjlRRA96Gej +rw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDtdD+4E5UGUcjohybK +pFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM5gf0 +vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjq +OknkJjCb5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ +/zo1PqVUSlJZS2Db7v54EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ9 +2zg/LFis6ELhDtjTO0wugumDLmsx2d1Hhk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI ++PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGyg77FGr8H6lnco4g175x2 +MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3r5+qPeoo +tt7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw= +-----END CERTIFICATE----- + +# Issuer: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum EC-384 CA" +# Serial: 160250656287871593594747141429395092468 +# MD5 Fingerprint: b6:65:b3:96:60:97:12:a1:ec:4e:e1:3d:a3:c6:c9:f1 +# SHA1 Fingerprint: f3:3e:78:3c:ac:df:f4:a2:cc:ac:67:55:69:56:d7:e5:16:3c:e1:ed +# SHA256 Fingerprint: 6b:32:80:85:62:53:18:aa:50:d1:73:c9:8d:8b:da:09:d5:7e:27:41:3d:11:4c:f7:87:a0:f5:d0:6c:03:0c:f6 +-----BEGIN CERTIFICATE----- +MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQsw +CQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScw +JQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMT +EENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2MDcyNDU0WhcNNDMwMzI2MDcyNDU0 +WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBT +LkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAX +BgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATE +KI6rGFtqvm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7Tm +Fy8as10CW4kjPMIRBSqniBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68Kj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI0GZnQkdjrzife81r1HfS+8 +EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjADVS2m5hjEfO/J +UG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0QoSZ/6vn +nvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k= +-----END CERTIFICATE----- + +# Issuer: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Subject: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority +# Label: "Certum Trusted Root CA" +# Serial: 40870380103424195783807378461123655149 +# MD5 Fingerprint: 51:e1:c2:e7:fe:4c:84:af:59:0e:2f:f4:54:6f:ea:29 +# SHA1 Fingerprint: c8:83:44:c0:18:ae:9f:cc:f1:87:b7:8f:22:d1:c5:d7:45:84:ba:e5 +# SHA256 Fingerprint: fe:76:96:57:38:55:77:3e:37:a9:5e:7a:d4:d9:cc:96:c3:01:57:c1:5d:31:76:5b:a9:b1:57:04:e1:ae:78:fd +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6 +MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEu +MScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNV +BAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwHhcNMTgwMzE2MTIxMDEzWhcNNDMw +MzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEg +U3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZ +n0EGze2jusDbCSzBfN8pfktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/q +p1x4EaTByIVcJdPTsuclzxFUl6s1wB52HO8AU5853BSlLCIls3Jy/I2z5T4IHhQq +NwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2fJmItdUDmj0VDT06qKhF +8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGtg/BKEiJ3 +HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGa +mqi4NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi +7VdNIuJGmj8PkTQkfVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSF +ytKAQd8FqKPVhJBPC/PgP5sZ0jeJP/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0P +qafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSYnjYJdmZm/Bo/6khUHL4wvYBQ +v3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHKHRzQ+8S1h9E6 +Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1 +vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQAD +ggIBAEii1QALLtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4 +WxmB82M+w85bj/UvXgF2Ez8sALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvo +zMrnadyHncI013nR03e4qllY/p0m+jiGPp2Kh2RX5Rc64vmNueMzeMGQ2Ljdt4NR +5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8CYyqOhNf6DR5UMEQ +GfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA4kZf +5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq +0Uc9NneoWWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7D +P78v3DSk+yshzWePS/Tj6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTM +qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP +0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf +E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb +-----END CERTIFICATE----- + +# Issuer: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Subject: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique +# Label: "TunTrust Root CA" +# Serial: 108534058042236574382096126452369648152337120275 +# MD5 Fingerprint: 85:13:b9:90:5b:36:5c:b6:5e:b8:5a:f8:e0:31:57:b4 +# SHA1 Fingerprint: cf:e9:70:84:0f:e0:73:0f:9d:f6:0c:7f:2c:4b:ee:20:46:34:9c:bb +# SHA256 Fingerprint: 2e:44:10:2a:b5:8c:b8:54:19:45:1c:8e:19:d9:ac:f3:66:2c:af:bc:61:4b:6a:53:96:0a:30:f7:d0:e2:eb:41 +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQEL +BQAwYTELMAkGA1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUg +Q2VydGlmaWNhdGlvbiBFbGVjdHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJv +b3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQwNDI2MDg1NzU2WjBhMQswCQYDVQQG +EwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBDZXJ0aWZpY2F0aW9u +IEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZ +n56eY+hz2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd +2JQDoOw05TDENX37Jk0bbjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgF +VwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZ +GoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAdgjH8KcwAWJeRTIAAHDOF +li/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViWVSHbhlnU +r8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2 +eY8fTpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIb +MlEsPvLfe/ZdeikZjuXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISg +jwBUFfyRbVinljvrS5YnzWuioYasDXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB +7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwSVXAkPcvCFDVDXSdOvsC9qnyW +5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI04Y+oXNZtPdE +ITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0 +90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+z +xiD2BkewhpMl0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYu +QEkHDVneixCwSQXi/5E/S7fdAo74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4 +FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRYYdZ2vyJ/0Adqp2RT8JeNnYA/u8EH +22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJpadbGNjHh/PqAulxP +xOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65xxBzn +dFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5 +Xc0yGYuPjCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7b +nV2UqL1g52KAdoGDDIzMMEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQ +CvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9zZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZH +u/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3rAZ3r2OvEhJn7wAzMMujj +d9qDRIueVSjAi1jTkD5OGwDxFa2DK5o= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS RSA Root CA 2021" +# Serial: 76817823531813593706434026085292783742 +# MD5 Fingerprint: 65:47:9b:58:86:dd:2c:f0:fc:a2:84:1f:1e:96:c4:91 +# SHA1 Fingerprint: 02:2d:05:82:fa:88:ce:14:0c:06:79:de:7f:14:10:e9:45:d7:a5:6d +# SHA256 Fingerprint: d9:5d:0e:8e:da:79:52:5b:f9:be:b1:1b:14:d2:10:0d:32:94:98:5f:0c:62:d9:fa:bd:9c:d9:99:ec:cb:7b:1d +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv +b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l +mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE +4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv +a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M +pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw +Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b +LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY +AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB +AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq +E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr +W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ +CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU +X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3 +f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja +H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP +JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P +zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt +jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0 +/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT +BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79 +aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW +xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU +63ZTGI0RmLo= +-----END CERTIFICATE----- + +# Issuer: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Subject: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA +# Label: "HARICA TLS ECC Root CA 2021" +# Serial: 137515985548005187474074462014555733966 +# MD5 Fingerprint: ae:f7:4c:e5:66:35:d1:b7:9b:8c:22:93:74:d3:4b:b0 +# SHA1 Fingerprint: bc:b0:c1:9d:e9:98:92:70:19:38:57:e9:8d:a7:b4:5d:6e:ee:01:48 +# SHA256 Fingerprint: 3f:99:cc:47:4a:cf:ce:4d:fe:d5:87:94:66:5e:47:8d:15:47:73:9f:2e:78:0f:1b:b4:ca:9b:13:30:97:d4:01 +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg +Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7 +KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y +STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD +AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw +SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN +nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- + +# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068 +# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068" +# Serial: 1977337328857672817 +# MD5 Fingerprint: 4e:6e:9b:54:4c:ca:b7:fa:48:e4:90:b1:15:4b:1c:a3 +# SHA1 Fingerprint: 0b:be:c2:27:22:49:cb:39:aa:db:35:5c:53:e3:8c:ae:78:ff:b6:fe +# SHA256 Fingerprint: 57:de:05:83:ef:d2:b2:6e:03:61:da:99:da:9d:f4:64:8d:ef:7e:e8:44:1c:3b:72:8a:fa:9b:cd:e0:f9:b2:6a +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1 +MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1UdDgQWBBRlzeurNR4APn7VdMAc +tHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4wgZswgZgGBFUd +IAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j +b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABC +AG8AbgBhAG4AbwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAw +ADEANzAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9m +iWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL4QjbEwj4KKE1soCzC1HA01aajTNF +Sa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDbLIpgD7dvlAceHabJ +hfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1ilI45P +Vf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZE +EAEeiGaPcjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV +1aUsIC+nmCjuRfzxuIgALI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2t +CsvMo2ebKHTEm9caPARYpoKdrcd7b/+Alun4jWq9GJAd/0kakFI3ky88Al2CdgtR +5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH9IBk9W6VULgRfhVwOEqw +f9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpfNIbnYrX9 +ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNK +GbqEZycPvEJdvSRUDewdcAZfpLz6IHxV +-----END CERTIFICATE----- + +# Issuer: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus ECC Root CA" +# Serial: 630369271402956006249506845124680065938238527194 +# MD5 Fingerprint: de:4b:c1:f5:52:8c:9b:43:e1:3e:8f:55:54:17:8d:85 +# SHA1 Fingerprint: f6:9c:db:b0:fc:f6:02:13:b6:52:32:a6:a3:91:3f:16:70:da:c3:e1 +# SHA256 Fingerprint: 30:fb:ba:2c:32:23:8e:2a:98:54:7a:f9:79:31:e5:50:42:8b:9b:3f:1c:8e:eb:66:33:dc:fa:86:c5:b2:7d:d3 +-----BEGIN CERTIFICATE----- +MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMw +RzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAY +BgNVBAMTEXZUcnVzIEVDQyBSb290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDcz +MTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28u +LEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+cToL0 +v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUd +e4BdS49nTPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIw +V53dVvHH4+m4SVBrm2nDb+zDfSXkV5UTQJtS0zvzQBm8JsctBp61ezaf9SXUY2sA +AjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQLYgmRWAD5Tfs0aNoJrSEG +GJTO +-----END CERTIFICATE----- + +# Issuer: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Subject: CN=vTrus Root CA O=iTrusChina Co.,Ltd. +# Label: "vTrus Root CA" +# Serial: 387574501246983434957692974888460947164905180485 +# MD5 Fingerprint: b8:c9:37:df:fa:6b:31:84:64:c5:ea:11:6a:1b:75:fc +# SHA1 Fingerprint: 84:1a:69:fb:f5:cd:1a:25:34:13:3d:e3:f8:fc:b8:99:d0:c9:14:b7 +# SHA256 Fingerprint: 8a:71:de:65:59:33:6f:42:6c:26:e5:38:80:d0:0d:88:a1:8d:a4:c6:a9:1f:0d:cb:61:94:e2:06:c5:c9:63:87 +-----BEGIN CERTIFICATE----- +MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQEL +BQAwQzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4x +FjAUBgNVBAMTDXZUcnVzIFJvb3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMx +MDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoGA1UEChMTaVRydXNDaGluYSBDby4s +THRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZotsSKYc +IrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykU +AyyNJJrIZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+ +GrPSbcKvdmaVayqwlHeFXgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z9 +8Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KAYPxMvDVTAWqXcoKv8R1w6Jz1717CbMdH +flqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70kLJrxLT5ZOrpGgrIDajt +J8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2AXPKBlim +0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZN +pGvu/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQ +UqqzApVg+QxMaPnu1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHW +OXSuTEGC2/KmSNGzm/MzqvOmwMVO9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMB +AAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYgscasGrz2iTAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAKbqSSaet +8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd +nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1j +bhd47F18iMjrjld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvM +Kar5CKXiNxTKsbhm7xqC5PD48acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIiv +TDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJnxDHO2zTlJQNgJXtxmOTAGytfdELS +S8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554WgicEFOwE30z9J4nfr +I8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4sEb9 +b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNB +UvupLnKWnyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1P +Ti07NEPhmg4NpGaXutIcSkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929ven +sBxXVsFy6K2ir40zSbofitzmdHxghm+Hl3s= +-----END CERTIFICATE----- + +# Issuer: CN=ISRG Root X2 O=Internet Security Research Group +# Subject: CN=ISRG Root X2 O=Internet Security Research Group +# Label: "ISRG Root X2" +# Serial: 87493402998870891108772069816698636114 +# MD5 Fingerprint: d3:9e:c4:1e:23:3c:a6:df:cf:a3:7e:6d:e0:14:e6:e5 +# SHA1 Fingerprint: bd:b1:b9:3c:d5:97:8d:45:c6:26:14:55:f8:db:95:c7:5a:d1:53:af +# SHA256 Fingerprint: 69:72:9b:8e:15:a8:6e:fc:17:7a:57:af:b7:17:1d:fc:64:ad:d2:8c:2f:ca:8c:f1:50:7e:34:45:3c:cb:14:70 +-----BEGIN CERTIFICATE----- +MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw +CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg +R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00 +MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT +ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW ++1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9 +ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI +zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW +tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1 +/q4AaOeMSQ+2b1tbFfLn +-----END CERTIFICATE----- + +# Issuer: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Subject: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd. +# Label: "HiPKI Root CA - G1" +# Serial: 60966262342023497858655262305426234976 +# MD5 Fingerprint: 69:45:df:16:65:4b:e8:68:9a:8f:76:5f:ff:80:9e:d3 +# SHA1 Fingerprint: 6a:92:e4:a8:ee:1b:ec:96:45:37:e3:29:57:49:cd:96:e3:e5:d2:60 +# SHA256 Fingerprint: f0:15:ce:3c:c2:39:bf:ef:06:4b:e9:f1:d2:c4:17:e1:a0:26:4a:0a:94:be:1f:0c:8d:12:18:64:eb:69:49:cc +-----BEGIN CERTIFICATE----- +MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBP +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xGzAZBgNVBAMMEkhpUEtJIFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRa +Fw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3 +YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kgUm9vdCBDQSAtIEcx +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0o9Qw +qNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twv +Vcg3Px+kwJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6 +lZgRZq2XNdZ1AYDgr/SEYYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnz +Qs7ZngyzsHeXZJzA9KMuH5UHsBffMNsAGJZMoYFL3QRtU6M9/Aes1MU3guvklQgZ +KILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfdhSi8MEyr48KxRURHH+CK +FgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj1jOXTyFj +HluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDr +y+K49a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ +/W3c1pzAtH2lsN0/Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgM +a/aOEmem8rJY5AIJEzypuxC00jBF8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6 +fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQDAgGGMA0GCSqG +SIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi +7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqc +SE5XCV0vrPSltJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6Fza +ZsT0pPBWGTMpWmWSBUdGSquEwx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9Tc +XzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07QJNBAsNB1CI69aO4I1258EHBGG3zg +iLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv5wiZqAxeJoBF1Pho +L5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+GpzjLrF +Ne85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wr +kkVbbiVghUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+ +vhV4nYWBSipX3tUZQ9rbyltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQU +YDksswBVLuT1sw5XxJFBAJw/6KXf6vb/yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ== +-----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4 +# Label: "GlobalSign ECC Root CA - R4" +# Serial: 159662223612894884239637590694 +# MD5 Fingerprint: 26:29:f8:6d:e1:88:bf:a2:65:7f:aa:c4:cd:0f:7f:fc +# SHA1 Fingerprint: 6b:a0:b0:98:e1:71:ef:5a:ad:fe:48:15:80:77:10:f4:bd:6f:0b:28 +# SHA256 Fingerprint: b0:85:d7:0b:96:4f:19:1a:73:e4:af:0d:54:ae:7a:0e:07:aa:fd:af:9b:71:dd:08:62:13:8a:b7:32:5a:24:a2 +-----BEGIN CERTIFICATE----- +MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD +VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw +MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g +UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT +BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx +uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV +HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/ ++wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147 +bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R1 O=Google Trust Services LLC +# Subject: CN=GTS Root R1 O=Google Trust Services LLC +# Label: "GTS Root R1" +# Serial: 159662320309726417404178440727 +# MD5 Fingerprint: 05:fe:d0:bf:71:a8:a3:76:63:da:01:e0:d8:52:dc:40 +# SHA1 Fingerprint: e5:8c:1c:c4:91:3b:38:63:4b:e9:10:6e:e3:ad:8e:6b:9d:d9:81:4a +# SHA256 Fingerprint: d9:47:43:2a:bd:e7:b7:fa:90:fc:2e:6b:59:10:1b:12:80:e0:e1:c7:e4:e4:0f:a3:c6:88:7f:ff:57:a7:f4:cf +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo +27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w +Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw +TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl +qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH +szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8 +Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk +MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92 +wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p +aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN +VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb +C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe +QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy +h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4 +7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J +ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef +MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/ +Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT +6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ +0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm +2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb +bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R2 O=Google Trust Services LLC +# Subject: CN=GTS Root R2 O=Google Trust Services LLC +# Label: "GTS Root R2" +# Serial: 159662449406622349769042896298 +# MD5 Fingerprint: 1e:39:c0:53:e6:1e:29:82:0b:ca:52:55:36:5d:57:dc +# SHA1 Fingerprint: 9a:44:49:76:32:db:de:fa:d0:bc:fb:5a:7b:17:bd:9e:56:09:24:94 +# SHA256 Fingerprint: 8d:25:cd:97:22:9d:bf:70:35:6b:da:4e:b3:cc:73:40:31:e2:4c:f0:0f:af:cf:d3:2d:c7:6e:b5:84:1c:7e:a8 +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt +nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY +6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu +MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k +RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg +f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV ++3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo +dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW +Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa +G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq +gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H +vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8 +0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC +B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u +NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg +yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev +HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6 +xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR +TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg +JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV +7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl +6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R3 O=Google Trust Services LLC +# Subject: CN=GTS Root R3 O=Google Trust Services LLC +# Label: "GTS Root R3" +# Serial: 159662495401136852707857743206 +# MD5 Fingerprint: 3e:e7:9d:58:02:94:46:51:94:e5:e0:22:4a:8b:e7:73 +# SHA1 Fingerprint: ed:e5:71:80:2b:c8:92:b9:5b:83:3c:d2:32:68:3f:09:cd:a0:1e:46 +# SHA256 Fingerprint: 34:d8:a7:3e:e2:08:d9:bc:db:0d:95:65:20:93:4b:4e:40:e6:94:82:59:6e:8b:6f:73:c8:42:6b:01:0a:6f:48 +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G +jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2 +4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7 +VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm +ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X +-----END CERTIFICATE----- + +# Issuer: CN=GTS Root R4 O=Google Trust Services LLC +# Subject: CN=GTS Root R4 O=Google Trust Services LLC +# Label: "GTS Root R4" +# Serial: 159662532700760215368942768210 +# MD5 Fingerprint: 43:96:83:77:19:4d:76:b3:9d:65:52:e4:1d:22:a5:e8 +# SHA1 Fingerprint: 77:d3:03:67:b5:e0:0c:15:f6:0c:38:61:df:7c:e1:3b:92:46:4d:47 +# SHA256 Fingerprint: 34:9d:fa:40:58:c5:e2:63:12:3b:39:8a:e7:95:57:3c:4e:13:13:c8:3f:e6:8f:93:55:6c:d5:e8:03:1b:3c:7d +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi +QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR +HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D +9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8 +p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD +-----END CERTIFICATE----- + +# Issuer: CN=Telia Root CA v2 O=Telia Finland Oyj +# Subject: CN=Telia Root CA v2 O=Telia Finland Oyj +# Label: "Telia Root CA v2" +# Serial: 7288924052977061235122729490515358 +# MD5 Fingerprint: 0e:8f:ac:aa:82:df:85:b1:f4:dc:10:1c:fc:99:d9:48 +# SHA1 Fingerprint: b9:99:cd:d1:73:50:8a:c4:47:05:08:9c:8c:88:fb:be:a0:2b:40:cd +# SHA256 Fingerprint: 24:2b:69:74:2f:cb:1e:5b:2a:bf:98:89:8b:94:57:21:87:54:4e:5b:4d:99:11:78:65:73:62:1f:6a:74:b8:2c +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQx +CzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UE +AwwQVGVsaWEgUm9vdCBDQSB2MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1 +NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZ +MBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ76zBq +AMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9 +vVYiQJ3q9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9 +lRdU2HhE8Qx3FZLgmEKnpNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTOD +n3WhUidhOPFZPY5Q4L15POdslv5e2QJltI5c0BE0312/UqeBAMN/mUWZFdUXyApT +7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW5olWK8jjfN7j/4nlNW4o +6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNrRBH0pUPC +TEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6 +WT0EBXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63R +DolUK5X6wK0dmBR4M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZI +pEYslOqodmJHixBTB0hXbOKSTbauBcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGj +YzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7Wxy+G2CQ5MB0GA1UdDgQWBBRy +rOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ +8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi +0f6X+J8wfBj5tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMM +A8iZGok1GTzTyVR8qPAs5m4HeW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBS +SRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+Cy748fdHif64W1lZYudogsYMVoe+K +TTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygCQMez2P2ccGrGKMOF +6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15h2Er +3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMt +Ty3EHD70sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pT +VmBds9hCG1xLEooc6+t9xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAW +ysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQraVplI/owd8k+BsHMYeB2F326CjYSlKA +rBPuUBQemMc= +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST BR Root CA 1 2020" +# Serial: 165870826978392376648679885835942448534 +# MD5 Fingerprint: b5:aa:4b:d5:ed:f7:e3:55:2e:8f:72:0a:f3:75:b8:ed +# SHA1 Fingerprint: 1f:5b:98:f0:e3:b5:f7:74:3c:ed:e6:b0:36:7d:32:cd:f4:09:41:67 +# SHA256 Fingerprint: e5:9a:aa:81:60:09:c2:2b:ff:5b:25:ba:d3:7d:f3:06:f0:49:79:7c:1f:81:d8:5a:b0:89:e6:57:bd:8f:00:44 +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEJSIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5 +NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7dPYS +zuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0 +QVK5buXuQqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/ +VbNafAkl1bK6CKBrqx9tMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2JyX3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFW +wKrY7RjEsK70PvomAjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHV +dWNbFJWcHwHP2NVypw87 +-----END CERTIFICATE----- + +# Issuer: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Subject: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH +# Label: "D-TRUST EV Root CA 1 2020" +# Serial: 126288379621884218666039612629459926992 +# MD5 Fingerprint: 8c:2d:9d:70:9f:48:99:11:06:11:fb:e9:cb:30:c0:6e +# SHA1 Fingerprint: 61:db:8c:21:59:69:03:90:d8:7c:9c:12:86:54:cf:9d:3d:f4:dd:07 +# SHA256 Fingerprint: 08:17:0d:1a:a3:64:53:90:1a:2f:95:92:45:e3:47:db:0c:8d:37:ab:aa:bc:56:b8:1a:a1:00:dc:95:89:70:db +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQsw +CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS +VVNUIEVWIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5 +NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG +A1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB +BAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8ZRCC +/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rD +wpdhQntJraOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3 +OqQo5FD4pPfsazK2/umLMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g +PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2V2X3Jvb3Rf +Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l +dC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1 +c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO +PQQDAwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CA +y/m0sRtW9XLS/BnRAjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJb +gfM0agPnIjhQW+0ZT0MW +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS ECC P384 Root G5" +# Serial: 13129116028163249804115411775095713523 +# MD5 Fingerprint: d3:71:04:6a:43:1c:db:a6:59:e1:a8:a3:aa:c5:71:ed +# SHA1 Fingerprint: 17:f3:de:5e:9f:0f:19:e9:8e:f6:1f:32:26:6e:20:c4:07:ae:30:ee +# SHA256 Fingerprint: 01:8e:13:f0:77:25:32:cf:80:9b:d1:b1:72:81:86:72:83:fc:48:c6:e1:3b:e9:c6:98:12:85:4a:49:0c:1b:05 +-----BEGIN CERTIFICATE----- +MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURp +Z2lDZXJ0IFRMUyBFQ0MgUDM4NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2 +MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJ +bmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQgUm9vdCBHNTB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1TzvdlHJS +7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp +0zVozptjn4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICIS +B4CIfBFqMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49 +BAMDA2gAMGUCMQCJao1H5+z8blUD2WdsJk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQ +LgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIxAJSdYsiJvRmEFOml+wG4 +DXZDjC5Ty3zfDBeWUA== +-----END CERTIFICATE----- + +# Issuer: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Subject: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc. +# Label: "DigiCert TLS RSA4096 Root G5" +# Serial: 11930366277458970227240571539258396554 +# MD5 Fingerprint: ac:fe:f7:34:96:a9:f2:b3:b4:12:4b:e4:27:41:6f:e1 +# SHA1 Fingerprint: a7:88:49:dc:5d:7c:75:8c:8c:de:39:98:56:b3:aa:d0:b2:a5:71:35 +# SHA256 Fingerprint: 37:1a:00:dc:05:33:b3:72:1a:7e:eb:40:e8:41:9e:70:79:9d:2b:0a:0f:2c:1d:80:69:31:65:f7:ce:c4:ad:75 +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBN +MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMT +HERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcN +NDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQs +IEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS87IE+ +ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG0 +2C+JFvuUAT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgp +wgscONyfMXdcvyej/Cestyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZM +pG2T6T867jp8nVid9E6P/DsjyG244gXazOvswzH016cpVIDPRFtMbzCe88zdH5RD +nU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnVDdXifBBiqmvwPXbzP6Po +sMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9qTXeXAaDx +Zre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cd +Lvvyz6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvX +KyY//SovcfXWJL5/MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNe +XoVPzthwiHvOAbWWl9fNff2C+MIkwcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPL +tgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4EFgQUUTMc7TZArxfTJc1paPKv +TiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN +AQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw +GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7H +PNtQOa27PShNlnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLF +O4uJ+DQtpBflF+aZfTCIITfNMBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQ +REtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/u4cnYiWB39yhL/btp/96j1EuMPik +AdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9GOUrYU9DzLjtxpdRv +/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh47a+ +p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilw +MUc/dNAUFvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WF +qUITVuwhd4GTWgzqltlJyqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCK +ovfepEWFJqgejF0pW8hL2JpqA15w8oVPbEtoL8pU9ozaMv7Da4M/OMZ+ +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root R1 O=Certainly +# Subject: CN=Certainly Root R1 O=Certainly +# Label: "Certainly Root R1" +# Serial: 188833316161142517227353805653483829216 +# MD5 Fingerprint: 07:70:d4:3e:82:87:a0:fa:33:36:13:f4:fa:33:e7:12 +# SHA1 Fingerprint: a0:50:ee:0f:28:71:f4:27:b2:12:6d:6f:50:96:25:ba:cc:86:42:af +# SHA256 Fingerprint: 77:b8:2c:d8:64:4c:43:05:f7:ac:c5:cb:15:6b:45:67:50:04:03:3d:51:c6:0c:62:02:a8:e0:c3:34:67:d3:a0 +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAw +PTELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2Vy +dGFpbmx5IFJvb3QgUjEwHhcNMjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9 +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0 +YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANA2 +1B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O5MQT +vqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbed +aFySpvXl8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b0 +1C7jcvk2xusVtyWMOvwlDbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5 +r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGIXsXwClTNSaa/ApzSRKft43jvRl5tcdF5 +cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkNKPl6I7ENPT2a/Z2B7yyQ +wHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQAjeZjOVJ +6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA +2CnbrlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyH +Wyf5QBGenDPBt+U1VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMR +eiFPCyEQtkA6qyI6BJyLm4SGcprSp6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB +/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTgqj8ljZ9EXME66C6u +d0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAszHQNTVfSVcOQr +PbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d +8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi +1wrykXprOQ4vMMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrd +rRT90+7iIgXr0PK3aBLXWopBGsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9di +taY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+gjwN/KUD+nsa2UUeYNrEjvn8K8l7 +lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgHJBu6haEaBQmAupVj +yTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7fpYn +Kx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLy +yCwzk5Iwx06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5n +wXARPbv0+Em34yaXOp/SX3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6 +OV+KmalBWQewLK8= +-----END CERTIFICATE----- + +# Issuer: CN=Certainly Root E1 O=Certainly +# Subject: CN=Certainly Root E1 O=Certainly +# Label: "Certainly Root E1" +# Serial: 8168531406727139161245376702891150584 +# MD5 Fingerprint: 0a:9e:ca:cd:3e:52:50:c6:36:f3:4b:a3:ed:a7:53:e9 +# SHA1 Fingerprint: f9:e1:6d:dc:01:89:cf:d5:82:45:63:3e:c5:37:7d:c2:eb:93:6f:2b +# SHA256 Fingerprint: b4:58:5f:22:e4:ac:75:6a:4e:86:12:a1:36:1c:5d:9d:03:1a:93:fd:84:fe:bb:77:8f:a3:06:8b:0f:c4:2d:c2 +-----BEGIN CERTIFICATE----- +MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQsw +CQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlu +bHkgUm9vdCBFMTAeFw0yMTA0MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJ +BgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlubHkxGjAYBgNVBAMTEUNlcnRhaW5s +eSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4fxzf7flHh4axpMCK ++IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9YBk2 +QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4 +hevIIgcwCgYIKoZIzj0EAwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozm +ut6Dacpps6kFtZaSF4fC0urQe87YQVt8rgIwRt7qy12a7DLCZRawTDBcMPPaTnOG +BtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Subject: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Label: "E-Tugra Global Root CA RSA v3" +# Serial: 75951268308633135324246244059508261641472512052 +# MD5 Fingerprint: 22:be:10:f6:c2:f8:03:88:73:5f:33:29:47:28:47:a4 +# SHA1 Fingerprint: e9:a8:5d:22:14:52:1c:5b:aa:0a:b4:be:24:6a:23:8a:c9:ba:e2:a9 +# SHA256 Fingerprint: ef:66:b0:b1:0a:3c:db:9f:2e:36:48:c7:6b:d2:af:18:ea:d2:bf:e6:f1:17:65:5e:28:c4:06:0d:a1:a3:f4:c2 +-----BEGIN CERTIFICATE----- +MIIF8zCCA9ugAwIBAgIUDU3FzRYilZYIfrgLfxUGNPt5EDQwDQYJKoZIhvcNAQEL +BQAwgYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUt +VHVncmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYw +JAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIFJTQSB2MzAeFw0yMDAzMTgw +OTA3MTdaFw00NTAzMTIwOTA3MTdaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMG +QW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1 +Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBD +QSBSU0EgdjMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCiZvCJt3J7 +7gnJY9LTQ91ew6aEOErxjYG7FL1H6EAX8z3DeEVypi6Q3po61CBxyryfHUuXCscx +uj7X/iWpKo429NEvx7epXTPcMHD4QGxLsqYxYdE0PD0xesevxKenhOGXpOhL9hd8 +7jwH7eKKV9y2+/hDJVDqJ4GohryPUkqWOmAalrv9c/SF/YP9f4RtNGx/ardLAQO/ +rWm31zLZ9Vdq6YaCPqVmMbMWPcLzJmAy01IesGykNz709a/r4d+ABs8qQedmCeFL +l+d3vSFtKbZnwy1+7dZ5ZdHPOrbRsV5WYVB6Ws5OUDGAA5hH5+QYfERaxqSzO8bG +wzrwbMOLyKSRBfP12baqBqG3q+Sx6iEUXIOk/P+2UNOMEiaZdnDpwA+mdPy70Bt4 +znKS4iicvObpCdg604nmvi533wEKb5b25Y08TVJ2Glbhc34XrD2tbKNSEhhw5oBO +M/J+JjKsBY04pOZ2PJ8QaQ5tndLBeSBrW88zjdGUdjXnXVXHt6woq0bM5zshtQoK +5EpZ3IE1S0SVEgpnpaH/WwAH0sDM+T/8nzPyAPiMbIedBi3x7+PmBvrFZhNb/FAH +nnGGstpvdDDPk1Po3CLW3iAfYY2jLqN4MpBs3KwytQXk9TwzDdbgh3cXTJ2w2Amo +DVf3RIXwyAS+XF1a4xeOVGNpf0l0ZAWMowIDAQABo2MwYTAPBgNVHRMBAf8EBTAD +AQH/MB8GA1UdIwQYMBaAFLK0ruYt9ybVqnUtdkvAG1Mh0EjvMB0GA1UdDgQWBBSy +tK7mLfcm1ap1LXZLwBtTIdBI7zAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEL +BQADggIBAImocn+M684uGMQQgC0QDP/7FM0E4BQ8Tpr7nym/Ip5XuYJzEmMmtcyQ +6dIqKe6cLcwsmb5FJ+Sxce3kOJUxQfJ9emN438o2Fi+CiJ+8EUdPdk3ILY7r3y18 +Tjvarvbj2l0Upq7ohUSdBm6O++96SmotKygY/r+QLHUWnw/qln0F7psTpURs+APQ +3SPh/QMSEgj0GDSz4DcLdxEBSL9htLX4GdnLTeqjjO/98Aa1bZL0SmFQhO3sSdPk +vmjmLuMxC1QLGpLWgti2omU8ZgT5Vdps+9u1FGZNlIM7zR6mK7L+d0CGq+ffCsn9 +9t2HVhjYsCxVYJb6CH5SkPVLpi6HfMsg2wY+oF0Dd32iPBMbKaITVaA9FCKvb7jQ +mhty3QUBjYZgv6Rn7rWlDdF/5horYmbDB7rnoEgcOMPpRfunf/ztAmgayncSd6YA +VSgU7NbHEqIbZULpkejLPoeJVF3Zr52XnGnnCv8PWniLYypMfUeUP95L6VPQMPHF +9p5J3zugkaOj/s1YzOrfr28oO6Bpm4/srK4rVJ2bBLFHIK+WEj5jlB0E5y67hscM +moi/dkfv97ALl2bSRM9gUgfh1SxKOidhd8rXj+eHDjD/DLsE4mHDosiXYY60MGo8 +bcIHX0pzLz/5FooBZu+6kcpSV3uu1OYP3Qt6f4ueJiDPO++BcYNZ +-----END CERTIFICATE----- + +# Issuer: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Subject: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center +# Label: "E-Tugra Global Root CA ECC v3" +# Serial: 218504919822255052842371958738296604628416471745 +# MD5 Fingerprint: 46:bc:81:bb:f1:b5:1e:f7:4b:96:bc:14:e2:e7:27:64 +# SHA1 Fingerprint: 8a:2f:af:57:53:b1:b0:e6:a1:04:ec:5b:6a:69:71:6d:f6:1c:e2:84 +# SHA256 Fingerprint: 87:3f:46:85:fa:7f:56:36:25:25:2e:6d:36:bc:d7:f1:6f:c2:49:51:f2:64:e4:7e:1b:95:4f:49:08:cd:ca:13 +-----BEGIN CERTIFICATE----- +MIICpTCCAiqgAwIBAgIUJkYZdzHhT28oNt45UYbm1JeIIsEwCgYIKoZIzj0EAwMw +gYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVn +cmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYD +VQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIEVDQyB2MzAeFw0yMDAzMTgwOTQ2 +NThaFw00NTAzMTIwOTQ2NThaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMGQW5r +YXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1Z3Jh +IFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBF +Q0MgdjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASOmCm/xxAeJ9urA8woLNheSBkQ +KczLWYHMjLiSF4mDKpL2w6QdTGLVn9agRtwcvHbB40fQWxPa56WzZkjnIZpKT4YK +fWzqTTKACrJ6CZtpS5iB4i7sAnCWH/31Rs7K3IKjYzBhMA8GA1UdEwEB/wQFMAMB +Af8wHwYDVR0jBBgwFoAU/4Ixcj75xGZsrTie0bBRiKWQzPUwHQYDVR0OBBYEFP+C +MXI++cRmbK04ntGwUYilkMz1MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNp +ADBmAjEA5gVYaWHlLcoNy/EZCL3W/VGSGn5jVASQkZo1kTmZ+gepZpO6yGjUij/6 +7W4WAie3AjEA3VoXK3YdZUKWpqxdinlW2Iob35reX8dQj7FbcQwm32pAAOwzkSFx +vmjkI6TZraE3 +-----END CERTIFICATE----- + +# Issuer: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD. +# Subject: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD. +# Label: "Security Communication RootCA3" +# Serial: 16247922307909811815 +# MD5 Fingerprint: 1c:9a:16:ff:9e:5c:e0:4d:8a:14:01:f4:35:5d:29:26 +# SHA1 Fingerprint: c3:03:c8:22:74:92:e5:61:a2:9c:5f:79:91:2b:1e:44:13:91:30:3a +# SHA256 Fingerprint: 24:a5:5c:2a:b0:51:44:2d:06:17:76:65:41:23:9a:4a:d0:32:d7:c5:51:75:aa:34:ff:de:2f:bc:4f:5c:52:94 +-----BEGIN CERTIFICATE----- +MIIFfzCCA2egAwIBAgIJAOF8N0D9G/5nMA0GCSqGSIb3DQEBDAUAMF0xCzAJBgNV +BAYTAkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMScw +JQYDVQQDEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTMwHhcNMTYwNjE2 +MDYxNzE2WhcNMzgwMTE4MDYxNzE2WjBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UEAxMeU2VjdXJpdHkg +Q29tbXVuaWNhdGlvbiBSb290Q0EzMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEA48lySfcw3gl8qUCBWNO0Ot26YQ+TUG5pPDXC7ltzkBtnTCHsXzW7OT4r +CmDvu20rhvtxosis5FaU+cmvsXLUIKx00rgVrVH+hXShuRD+BYD5UpOzQD11EKzA +lrenfna84xtSGc4RHwsENPXY9Wk8d/Nk9A2qhd7gCVAEF5aEt8iKvE1y/By7z/MG +TfmfZPd+pmaGNXHIEYBMwXFAWB6+oHP2/D5Q4eAvJj1+XCO1eXDe+uDRpdYMQXF7 +9+qMHIjH7Iv10S9VlkZ8WjtYO/u62C21Jdp6Ts9EriGmnpjKIG58u4iFW/vAEGK7 +8vknR+/RiTlDxN/e4UG/VHMgly1s2vPUB6PmudhvrvyMGS7TZ2crldtYXLVqAvO4 +g160a75BflcJdURQVc1aEWEhCmHCqYj9E7wtiS/NYeCVvsq1e+F7NGcLH7YMx3we +GVPKp7FKFSBWFHA9K4IsD50VHUeAR/94mQ4xr28+j+2GaR57GIgUssL8gjMunEst ++3A7caoreyYn8xrC3PsXuKHqy6C0rtOUfnrQq8PsOC0RLoi/1D+tEjtCrI8Cbn3M +0V9hvqG8OmpI6iZVIhZdXw3/JzOfGAN0iltSIEdrRU0id4xVJ/CvHozJgyJUt5rQ +T9nO/NkuHJYosQLTA70lUhw0Zk8jq/R3gpYd0VcwCBEF/VfR2ccCAwEAAaNCMEAw +HQYDVR0OBBYEFGQUfPxYchamCik0FW8qy7z8r6irMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBDAUAA4ICAQDcAiMI4u8hOscNtybS +YpOnpSNyByCCYN8Y11StaSWSntkUz5m5UoHPrmyKO1o5yGwBQ8IibQLwYs1OY0PA +FNr0Y/Dq9HHuTofjcan0yVflLl8cebsjqodEV+m9NU1Bu0soo5iyG9kLFwfl9+qd +9XbXv8S2gVj/yP9kaWJ5rW4OH3/uHWnlt3Jxs/6lATWUVCvAUm2PVcTJ0rjLyjQI +UYWg9by0F1jqClx6vWPGOi//lkkZhOpn2ASxYfQAW0q3nHE3GYV5v4GwxxMOdnE+ +OoAGrgYWp421wsTL/0ClXI2lyTrtcoHKXJg80jQDdwj98ClZXSEIx2C/pHF7uNke +gr4Jr2VvKKu/S7XuPghHJ6APbw+LP6yVGPO5DtxnVW5inkYO0QR4ynKudtml+LLf +iAlhi+8kTtFZP1rUPcmTPCtk9YENFpb3ksP+MW/oKjJ0DvRMmEoYDjBU1cXrvMUV +nuiZIesnKwkK2/HmcBhWuwzkvvnoEKQTkrgc4NtnHVMDpCKn3F2SEDzq//wbEBrD +2NCcnWXL0CsnMQMeNuE9dnUM/0Umud1RvCPHX9jYhxBAEg09ODfnRDwYwFMJZI// +1ZqmfHAuc1Uh6N//g7kdPjIe1qZ9LPFm6Vwdp6POXiUyK+OVrCoHzrQoeIY8Laad +TdJ0MN1kURXbg4NR16/9M51NZg== +-----END CERTIFICATE----- + +# Issuer: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD. +# Subject: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD. +# Label: "Security Communication ECC RootCA1" +# Serial: 15446673492073852651 +# MD5 Fingerprint: 7e:43:b0:92:68:ec:05:43:4c:98:ab:5d:35:2e:7e:86 +# SHA1 Fingerprint: b8:0e:26:a9:bf:d2:b2:3b:c0:ef:46:c9:ba:c7:bb:f6:1d:0d:41:41 +# SHA256 Fingerprint: e7:4f:bd:a5:5b:d5:64:c4:73:a3:6b:44:1a:a7:99:c8:a6:8e:07:74:40:e8:28:8b:9f:a1:e5:0e:4b:ba:ca:11 +-----BEGIN CERTIFICATE----- +MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYT +AkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMSswKQYD +VQQDEyJTZWN1cml0eSBDb21tdW5pY2F0aW9uIEVDQyBSb290Q0ExMB4XDTE2MDYx +NjA1MTUyOFoXDTM4MDExODA1MTUyOFowYTELMAkGA1UEBhMCSlAxJTAjBgNVBAoT +HFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKzApBgNVBAMTIlNlY3VyaXR5 +IENvbW11bmljYXRpb24gRUNDIFJvb3RDQTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AASkpW9gAwPDvTH00xecK4R1rOX9PVdu12O/5gSJko6BnOPpR27KkBLIE+Cnnfdl +dB9sELLo5OnvbYUymUSxXv3MdhDYW72ixvnWQuRXdtyQwjWpS4g8EkdtXP9JTxpK +ULGjQjBAMB0GA1UdDgQWBBSGHOf+LaVKiwj+KBH6vqNm+GBZLzAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjAVXUI9/Lbu +9zuxNuie9sRGKEkz0FhDKmMpzE2xtHqiuQ04pV1IKv3LsnNdo4gIxwwCMQDAqy0O +be0YottT6SXbVQjgUMzfRGEWgqtJsLKB7HOHeLRMsmIbEvoWTSVLY70eN9k= +-----END CERTIFICATE----- diff --git a/voice_bridge/default.csv b/voice_bridge/default.csv new file mode 100644 index 0000000000000000000000000000000000000000..709fd8d4592b0cc6125a84114373632d93b9f921 --- /dev/null +++ b/voice_bridge/default.csv @@ -0,0 +1,51 @@ +青山,1350,1350,5000,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γ‚’γ‚ͺγƒ€γƒž,γ‚’γ‚ͺγƒ€γƒž,2/4,C1 +雨晴,1350,1350,7000,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,をパハレ,をパハレ,2/4,C1 +櫻歌,1350,1350,7000,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γ‚ͺウカ,γ‚ͺγƒΌγ‚«,1/3,C1 +ιŸ³θ‘—,1350,1350,0,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γ‚ͺγƒˆγƒžγƒ,γ‚ͺγƒˆγƒžγƒ,2/4,C1 +ζ˜₯ζ—₯部,1350,1350,8600,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,カスカベ,カスカベ,0/4,C1 +紲星,1350,1350,0,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γ‚­γ‚ΊγƒŠ,γ‚­γ‚ΊγƒŠ,1/3,C1 +九州,1350,1350,8600,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γ‚­γƒ₯ウシγƒ₯ウ,γ‚­γƒ₯ウシγƒ₯ウ,1/4,C1 +キョウコ,1351,1351,0,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,キョγ‚ͺγ‚³,キョγ‚ͺγ‚³,1/3,C1 +ηŽ„ι‡Ž,1350,1350,5000,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γ‚―γƒ­γƒŽ,γ‚―γƒ­γƒŽ,1/3,C1 +ε‰£ε΄Ž,1350,1350,5000,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,ケンアキ,ケンアキ,1/4,C1 +後鬼,1351,1351,0,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,γ‚΄γ‚­,γ‚΄γ‚­,1/2,C1 +θ™Žε€ͺιƒŽ,1351,1351,5000,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,コタロウ,γ‚³γ‚Ώγƒ­γƒΌ,4/4,C1 +琴葉,1350,1350,0,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γ‚³γƒˆγƒŽγƒ,γ‚³γƒˆγƒŽγƒ,0/4,C1 +小倜,1351,1351,2200,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,ァヨ,ァヨ,1/2,C1 +ε››ε›½,1350,1350,2200,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γ‚·γ‚³γ‚―,γ‚·γ‚³γ‚―,1/3,C1 +η™½δΈŠ,1350,1350,5000,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γ‚·γƒ©γ‚«γƒŸ,γ‚·γƒ©γ‚«γƒŸ,4/4,C1 +γšγ‚“γ γ‚‚γ‚“,1351,1351,0,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,ズンダヒン,ズンダヒン,1/5,C1 +そら,1351,1351,7000,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,ソラ,ソラ,1/2,C1 +γ‚Ώγ‚€γƒ—οΌ΄,1351,1351,5000,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,タむプティー,タむプティー,4/5,C1 +泒音,1350,1350,0,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γƒŠγƒŸγƒ,γƒŠγƒŸγƒ,0/3,C1 +ζ­¦ε,1351,1351,5000,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,γ‚Ώγ‚±γƒ’γƒ­,γ‚Ώγ‚±γƒ’γƒ­,2/4,C1 +γ‘γ³εΌγ˜γ„,1351,1351,0,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,チビシキジー,チビシキジー,5/6,C1 +月θͺ­,1350,1350,0,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γƒ„γ‚―γƒ¨γƒŸ,γƒ„γ‚―γƒ¨γƒŸ,0/4,C1 +γ€γ‚€γŽ,1351,1351,7450,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,ツムγ‚,ツムγ‚,0/3,C1 +γƒŠγƒΌγ‚Ήγƒ­γƒœ,1350,1350,0,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γƒŠγƒΌγ‚Ήγƒ­γƒœ,γƒŠγƒΌγ‚Ήγƒ­γƒœ,4/5,C1 +οΌο½οΌŽοΌ—,1351,1351,0,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,γƒŠγƒ³γƒγƒΌγ‚»γƒ–γƒ³,γƒŠγƒ³γƒγƒΌγ‚»γƒ–γƒ³,5/7,C1 +はう,1351,1351,5000,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,ハウ,ハウ,1/2,C1 +ζ‘œδΉƒ,1350,1350,0,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γƒγƒ«γƒŽ,γƒγƒ«γƒŽ,1/3,C1 +γ²γΎγ‚Š,1351,1351,7000,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,γƒ’γƒžγƒͺ,γƒ’γƒžγƒͺ,0/3,C1 +οΌ·ο½ˆο½‰ο½”ο½…οΌ£οΌ΅οΌ¬,1351,1351,0,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,γƒ›γƒ―γ‚€γƒˆγ‚«γƒ«,γƒ›γƒ―γ‚€γƒˆγ‚«γƒ«,5/6,C1 +γƒŸγ‚³,1351,1351,3900,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,γƒŸγ‚³,γƒŸγ‚³,1/2,C1 +ζ°΄ε₯ˆη€¬,1350,1350,0,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γƒŸγƒŠγ‚»,γƒŸγƒŠγ‚»,2/3,C1 +ε†₯鳴,1350,1350,5000,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,パむパむ,パむパむ,1/4,C1 +鳴花,1350,1350,0,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,パむカ,パむカ,1/3,C1 +γ‚γŸγ‚“,1351,1351,7000,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,パタン,パタン,1/3,C1 +ι›Œι›„,1351,1351,7000,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,パスγ‚ͺ,パスγ‚ͺ,0/3,C1 +もけ子さん,1351,1351,0,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,ヒチコァン,ヒチコァン,1/5,C1 +γƒ’γƒγƒŽ,1350,1350,0,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,γƒ’γƒγƒŽ,γƒ’γƒγƒŽ,0/3,C1 +硐月,1350,1350,0,名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,*,ユヅキ,ユヅキ,1/3,C1 +εΌ“ιΆ΄,1351,1351,0,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,ユヅル,ユヅル,0/3,C1 +γƒͺツ,1351,1351,3900,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,γƒͺツ,γƒͺツ,1/2,C1 +ε…­θŠ±,1351,1351,4900,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,γƒͺッカ,γƒͺッカ,1/3,C1 +龍星,1351,1351,5000,名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,*,γƒͺγƒ₯ウセむ,γƒͺγƒ₯ウセむ,1/4,C1 +οΌ£οΌ―οΌ₯οΌ©οΌ²οΌ―οΌ©οΌοΌ«,1348,1348,0,名詞,ε›Ίζœ‰εθ©ž,δΈ€θˆ¬,*,*,*,*,コエむロむンク,コエむロむンク,5/7,C1 +ο½ƒο½ο½…ο½‰ο½’ο½ο½‰ο½Žο½‹,1348,1348,0,名詞,ε›Ίζœ‰εθ©ž,δΈ€θˆ¬,*,*,*,*,コエむロむンク,コエむロむンク,5/7,C1 +οΌ£ο½ο½…οΌ¦ο½ο½Žο½”,1348,1348,0,名詞,ε›Ίζœ‰εθ©ž,δΈ€θˆ¬,*,*,*,*,γ‚³γ‚¨γƒ•γ‚©γƒ³γƒˆ,γ‚³γ‚¨γƒ•γ‚©γƒ³γƒˆ,3/5,C1 +ο½ƒο½ο½…ο½†ο½ο½Žο½”,1348,1348,0,名詞,ε›Ίζœ‰εθ©ž,δΈ€θˆ¬,*,*,*,*,γ‚³γ‚¨γƒ•γ‚©γƒ³γƒˆ,γ‚³γ‚¨γƒ•γ‚©γƒ³γƒˆ,3/5,C1 +T1LQu,1348,1348,0,名詞,ε›Ίζœ‰εθ©ž,δΈ€θˆ¬,*,*,*,*,γƒˆγƒΌγ‚―,γƒˆγƒΌγ‚―,0/3,C1 +ο½”ο½ο½Œο½‘ο½•,1348,1348,0,名詞,ε›Ίζœ‰εθ©ž,δΈ€θˆ¬,*,*,*,*,γƒˆγƒΌγ‚―,γƒˆγƒΌγ‚―,0/3,C1 +οΌΆοΌ―οΌ©οΌ£οΌ₯οΌΆοΌ―οΌΈ,1348,1348,0,名詞,ε›Ίζœ‰εθ©ž,δΈ€θˆ¬,*,*,*,*,γƒœγ‚€γ‚Ήγƒœγƒƒγ‚―γ‚Ή,γƒœγ‚€γ‚Ήγƒœγƒƒγ‚―γ‚Ή,4/7,C1 +ο½–ο½ο½‰ο½ƒο½…ο½–ο½ο½˜,1348,1348,0,名詞,ε›Ίζœ‰εθ©ž,δΈ€θˆ¬,*,*,*,*,γƒœγ‚€γ‚Ήγƒœγƒƒγ‚―γ‚Ή,γƒœγ‚€γ‚Ήγƒœγƒƒγ‚―γ‚Ή,4/7,C1 \ No newline at end of file diff --git a/voice_bridge/default_setting.yml b/voice_bridge/default_setting.yml new file mode 100644 index 0000000000000000000000000000000000000000..3421e7a6a32073e3413444495b3bb37d80d4d351 --- /dev/null +++ b/voice_bridge/default_setting.yml @@ -0,0 +1,2 @@ +allow_origin: null +cors_policy_mode: localapps diff --git a/voice_bridge/engine_manifest.json b/voice_bridge/engine_manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..994f36ff44459d96f5245a2e9d7a0afed793320c --- /dev/null +++ b/voice_bridge/engine_manifest.json @@ -0,0 +1,59 @@ +{ + "manifest_version": "0.13.1", + "name": "Suga Engine", + "brand_name": "Suga", + "uuid": "41d9d6cb-6682-4baa-80b6-875547f71d86", + "version": "999.999.999", + "url": "https://github.com/voicevox-bridge/bridge-plugin", + "command": "run", + "port": 50021, + "icon": "engine_manifest_assets/icon.png", + "default_sampling_rate": 44100, + "terms_of_service": "engine_manifest_assets/terms_of_service.md", + "update_infos": "engine_manifest_assets/update_infos.json", + "dependency_licenses": "engine_manifest_assets/dependency_licenses.json", + "downloadable_libraries_path": null, + "downloadable_libraries_url": null, + "supported_features": { + "adjust_mora_pitch": { + "type": "bool", + "value": false, + "name": "ヒーラごとγιŸ³ι«˜γθͺΏζ•΄" + }, + "adjust_phoneme_length": { + "type": "bool", + "value": false, + "name": "ιŸ³η΄ γ”γ¨γι•·γ•γθͺΏζ•΄" + }, + "adjust_speed_scale": { + "type": "bool", + "value": true, + "name": "全体γθ©±ι€ŸγθͺΏζ•΄" + }, + "adjust_pitch_scale": { + "type": "bool", + "value": true, + "name": "全体γιŸ³ι«˜γθͺΏζ•΄" + }, + "adjust_intonation_scale": { + "type": "bool", + "value": true, + "name": "全体γζŠ‘ζšγθͺΏζ•΄" + }, + "adjust_volume_scale": { + "type": "bool", + "value": true, + "name": "全体γιŸ³ι‡γθͺΏζ•΄" + }, + "interrogative_upspeak": { + "type": "bool", + "value": false, + "name": "疑問文γθ‡ͺε‹•θͺΏζ•΄" + }, + "synthesis_morphing" : { + "type": "bool", + "value": false, + "name": "2δΊΊγθ©±θ€…γ§γƒ’γƒΌγƒ•γ‚£γƒ³γ‚°γ—γŸιŸ³ε£°γ‚’εˆζˆ" + } + } +} diff --git a/voice_bridge/engine_manifest_assets/dependency_licenses.json b/voice_bridge/engine_manifest_assets/dependency_licenses.json new file mode 100644 index 0000000000000000000000000000000000000000..4ac28cae5f31e350733bccc58afb349a93ee70ee --- /dev/null +++ b/voice_bridge/engine_manifest_assets/dependency_licenses.json @@ -0,0 +1 @@ +[{"name": "Open JTalk", "version": "1.11", "license": "Modified BSD license", "text": "/* ----------------------------------------------------------------- */\n/* The Japanese TTS System \"Open JTalk\" */\n/* developed by HTS Working Group */\n/* http://open-jtalk.sourceforge.net/ */\n/* ----------------------------------------------------------------- */\n/* */\n/* Copyright (c) 2008-2016 Nagoya Institute of Technology */\n/* Department of Computer Science */\n/* */\n/* All rights reserved. */\n/* */\n/* Redistribution and use in source and binary forms, with or */\n/* without modification, are permitted provided that the following */\n/* conditions are met: */\n/* */\n/* - Redistributions of source code must retain the above copyright */\n/* notice, this list of conditions and the following disclaimer. */\n/* - Redistributions in binary form must reproduce the above */\n/* copyright notice, this list of conditions and the following */\n/* disclaimer in the documentation and/or other materials provided */\n/* with the distribution. */\n/* - Neither the name of the HTS working group nor the names of its */\n/* contributors may be used to endorse or promote products derived */\n/* from this software without specific prior written permission. */\n/* */\n/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */\n/* CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, */\n/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */\n/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */\n/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */\n/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */\n/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */\n/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */\n/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */\n/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */\n/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */\n/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\n/* POSSIBILITY OF SUCH DAMAGE. */\n/* ----------------------------------------------------------------- */\n"}, {"name": "MeCab", "version": null, "license": "Modified BSD license", "text": "Copyright (c) 2001-2008, Taku Kudo\nCopyright (c) 2004-2008, Nippon Telegraph and Telephone Corporation\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are\npermitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above\n copyright notice, this list of conditions and the\n following disclaimer.\n\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the\n following disclaimer in the documentation and/or other\n materials provided with the distribution.\n\n * Neither the name of the Nippon Telegraph and Telegraph Corporation\n nor the names of its contributors may be used to endorse or\n promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED\nWARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\nPARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\nTORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\nADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n/* ----------------------------------------------------------------- */\n/* The Japanese TTS System \"Open JTalk\" */\n/* developed by HTS Working Group */\n/* http://open-jtalk.sourceforge.net/ */\n/* ----------------------------------------------------------------- */\n/* */\n/* Copyright (c) 2008-2016 Nagoya Institute of Technology */\n/* Department of Computer Science */\n/* */\n/* All rights reserved. */\n/* */\n/* Redistribution and use in source and binary forms, with or */\n/* without modification, are permitted provided that the following */\n/* conditions are met: */\n/* */\n/* - Redistributions of source code must retain the above copyright */\n/* notice, this list of conditions and the following disclaimer. */\n/* - Redistributions in binary form must reproduce the above */\n/* copyright notice, this list of conditions and the following */\n/* disclaimer in the documentation and/or other materials provided */\n/* with the distribution. */\n/* - Neither the name of the HTS working group nor the names of its */\n/* contributors may be used to endorse or promote products derived */\n/* from this software without specific prior written permission. */\n/* */\n/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */\n/* CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, */\n/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */\n/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */\n/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */\n/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */\n/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */\n/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */\n/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */\n/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */\n/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */\n/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\n/* POSSIBILITY OF SUCH DAMAGE. */\n/* ----------------------------------------------------------------- */\n"}, {"name": "NAIST Japanese Dictionary", "version": null, "license": "Modified BSD license", "text": "Copyright (c) 2009, Nara Institute of Science and Technology, Japan.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\nRedistributions of source code must retain the above copyright notice,\nthis list of conditions and the following disclaimer.\nRedistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\nNeither the name of the Nara Institute of Science and Technology\n(NAIST) nor the names of its contributors may be used to endorse or\npromote products derived from this software without specific prior\nwritten permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\nEXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\nPROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n/* ----------------------------------------------------------------- */\n/* The Japanese TTS System \"Open JTalk\" */\n/* developed by HTS Working Group */\n/* http://open-jtalk.sourceforge.net/ */\n/* ----------------------------------------------------------------- */\n/* */\n/* Copyright (c) 2008-2016 Nagoya Institute of Technology */\n/* Department of Computer Science */\n/* */\n/* All rights reserved. */\n/* */\n/* Redistribution and use in source and binary forms, with or */\n/* without modification, are permitted provided that the following */\n/* conditions are met: */\n/* */\n/* - Redistributions of source code must retain the above copyright */\n/* notice, this list of conditions and the following disclaimer. */\n/* - Redistributions in binary form must reproduce the above */\n/* copyright notice, this list of conditions and the following */\n/* disclaimer in the documentation and/or other materials provided */\n/* with the distribution. */\n/* - Neither the name of the HTS working group nor the names of its */\n/* contributors may be used to endorse or promote products derived */\n/* from this software without specific prior written permission. */\n/* */\n/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */\n/* CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, */\n/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */\n/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */\n/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */\n/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */\n/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */\n/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */\n/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */\n/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */\n/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */\n/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\n/* POSSIBILITY OF SUCH DAMAGE. */\n/* ----------------------------------------------------------------- */\n"}, {"name": "HTS Voice \"Mei\"", "version": null, "license": "Creative Commons Attribution 3.0 license", "text": "# ----------------------------------------------------------------- #\n# HTS Voice \"Mei\" #\n# released by MMDAgent Project Team #\n# http://www.mmdagent.jp/ #\n# ----------------------------------------------------------------- #\n# #\n# Copyright (c) 2009-2013 Nagoya Institute of Technology #\n# Department of Computer Science #\n# #\n# Some rights reserved. #\n# #\n# This work is licensed under the Creative Commons Attribution 3.0 #\n# license. #\n# #\n# You are free: #\n# * to Share - to copy, distribute and transmit the work #\n# * to Remix - to adapt the work #\n# Under the following conditions: #\n# * Attribution - You must attribute the work in the manner #\n# specified by the author or licensor (but not in any way that #\n# suggests that they endorse you or your use of the work). #\n# With the understanding that: #\n# * Waiver - Any of the above conditions can be waived if you get #\n# permission from the copyright holder. #\n# * Public Domain - Where the work or any of its elements is in #\n# the public domain under applicable law, that status is in no #\n# way affected by the license. #\n# * Other Rights - In no way are any of the following rights #\n# affected by the license: #\n# - Your fair dealing or fair use rights, or other applicable #\n# copyright exceptions and limitations; #\n# - The author's moral rights; #\n# - Rights other persons may have either in the work itself or #\n# in how the work is used, such as publicity or privacy #\n# rights. #\n# * Notice - For any reuse or distribution, you must make clear to #\n# others the license terms of this work. The best way to do this #\n# is with a link to this web page. #\n# #\n# See http://creativecommons.org/ for details. #\n# ----------------------------------------------------------------- #\n"}, {"name": "Bridge Plugin", "version": null, "license": "Bridge Plugin License", "text": "Bridge Plugin\n\nCopyright (c) 2021 Hiroshiba\nCopyright (c) 2021 VOICEVOX\nCopyright (c) 2022 VOICEVOX-Bridge\n\n\n\u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306e\u30ea\u30dd\u30b8\u30c8\u30ea\u306bPull Request\u3092\u9001\u308b\u76ee\u7684\u306b\u9650\u308a\u3001\u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306e\u30bd\u30fc\u30b9\u30b3\u30fc\u30c9\u306e\u4f7f\u7528\u3001\u8907\u88fd\u3001\u914d\u5e03\u7b49\u3092\u884c\u3046\u3053\u3068\u3092\u8a31\u53ef\u3057\u307e\u3059\u3002\n\n\n\u5546\u7528\u30fb\u975e\u5546\u7528\u3092\u554f\u308f\u305a\u3001\u30d0\u30a4\u30ca\u30ea\u5f62\u5f0f\u306e\u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306e\u5229\u7528\u53ca\u3073\u518d\u914d\u5e03\u3092\u8a31\u53ef\u3057\u307e\u3059\u3002\n\u305f\u3060\u3057\u3001\u4ee5\u4e0b\u3092\u6761\u4ef6\u3068\u3057\u307e\u3059\u3002\n\n- \u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306b\u3088\u3063\u3066\u8aad\u307f\u8fbc\u307e\u308c\u308b\u97f3\u58f0\u5408\u6210\u30e2\u30c7\u30eb\u306f\u3001\u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u3092\u901a\u3058\u305f\u97f3\u58f0\u5408\u6210\u5316\u3092\u8a31\u8afe\u3057\u3066\u3044\u308b\u63d0\u4f9b\u8005\u306e\u97f3\u58f0\u3092\u5143\u306b\u4f5c\u3089\u308c\u3066\u3044\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002\n\n- \u518d\u914d\u5e03\u3092\u884c\u3046\u5834\u5408\u3001\u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306e\u30d5\u30a1\u30a4\u30eb\u306b\u95a2\u3057\u3066\u306f\u3001\u518d\u914d\u5e03\u5f8c\u3082\u672c\u30e9\u30a4\u30bb\u30f3\u30b9\u3092\u9069\u7528\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002\n\n\n\u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306f\u300c\u73fe\u72b6\u306e\u307e\u307e\u3067\u300d\u3067\u63d0\u4f9b\u3055\u308c\u3001\u660e\u793a\u7684\u3001\u6697\u9ed9\u7684\u304b\u3069\u3046\u304b\u306b\u62d8\u3089\u305a\u3042\u3089\u3086\u308b\u4fdd\u8a3c\u306f\u306a\u3044\u3082\u306e\u3068\u3057\u307e\u3059\u3002\u3053\u3053\u3067\u8a00\u3046\u4fdd\u8a3c\u306f\u3001\u5e02\u8ca9\u6027\u3001\u7279\u5b9a\u7528\u9014\u3078\u306e\u9069\u5408\u6027\u3001\u6a29\u5229\u306e\u4fb5\u5bb3\u304c\u306a\u3044\u3053\u3068\u7b49\u3092\u542b\u307f\u307e\u3059\u304c\u3001\u3053\u308c\u3089\u306b\u9650\u5b9a\u3055\u308c\u307e\u305b\u3093\u3002\n\u88fd\u4f5c\u8005\u306f\u3001\u5951\u7d04\u884c\u70ba\u3001\u4e0d\u6cd5\u884c\u70ba\u3001\u307e\u305f\u306f\u305d\u308c\u4ee5\u5916\u3067\u3042\u308d\u3046\u3068\u3001\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306b\u8d77\u56e0\u307e\u305f\u306f\u95a2\u9023\u3057\u3001\u3042\u308b\u3044\u306f\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306e\u4f7f\u7528\u307e\u305f\u306f\u305d\u306e\u4ed6\u306e\u6271\u3044\u306b\u3088\u3063\u3066\u751f\u3058\u308b\u4e00\u5207\u306e\u8acb\u6c42\u3001\u640d\u5bb3\u3001\u305d\u306e\u4ed6\u306e\u7fa9\u52d9\u306b\u3064\u3044\u3066\u4f55\u3089\u306e\u8cac\u4efb\u3082\u8ca0\u308f\u306a\u3044\u3082\u306e\u3068\u3057\u307e\u3059\u3002"}, {"name": "world", "version": null, "license": "Modified BSD license", "text": "/* ----------------------------------------------------------------- */\n/* WORLD: High-quality speech analysis, */\n/* manipulation and synthesis system */\n/* developed by M. Morise */\n/* http://www.kisc.meiji.ac.jp/~mmorise/world/english/ */\n/* ----------------------------------------------------------------- */\n/* */\n/* Copyright (c) 2010 M. Morise */\n/* */\n/* All rights reserved. */\n/* */\n/* Redistribution and use in source and binary forms, with or */\n/* without modification, are permitted provided that the following */\n/* conditions are met: */\n/* */\n/* - Redistributions of source code must retain the above copyright */\n/* notice, this list of conditions and the following disclaimer. */\n/* - Redistributions in binary form must reproduce the above */\n/* copyright notice, this list of conditions and the following */\n/* disclaimer in the documentation and/or other materials provided */\n/* with the distribution. */\n/* - Neither the name of the M. Morise nor the names of its */\n/* contributors may be used to endorse or promote products derived */\n/* from this software without specific prior written permission. */\n/* */\n/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */\n/* CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, */\n/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */\n/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */\n/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */\n/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */\n/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */\n/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */\n/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */\n/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */\n/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */\n/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\n/* POSSIBILITY OF SUCH DAMAGE. */\n/* ----------------------------------------------------------------- */\n"}, {"name": "PyTorch", "version": "1.9.0", "license": "BSD-style license", "text": "From PyTorch:\n\nCopyright (c) 2016- Facebook, Inc (Adam Paszke)\nCopyright (c) 2014- Facebook, Inc (Soumith Chintala)\nCopyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\nCopyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\nCopyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\nCopyright (c) 2011-2013 NYU (Clement Farabet)\nCopyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\nCopyright (c) 2006 Idiap Research Institute (Samy Bengio)\nCopyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\nFrom Caffe2:\n\nCopyright (c) 2016-present, Facebook Inc. All rights reserved.\n\nAll contributions by Facebook:\nCopyright (c) 2016 Facebook Inc.\n\nAll contributions by Google:\nCopyright (c) 2015 Google Inc.\nAll rights reserved.\n\nAll contributions by Yangqing Jia:\nCopyright (c) 2015 Yangqing Jia\nAll rights reserved.\n\nAll contributions by Kakao Brain:\nCopyright 2019-2020 Kakao Brain\n\nAll contributions by Cruise LLC:\nCopyright (c) 2022 Cruise LLC.\nAll rights reserved.\n\nAll contributions from Caffe:\nCopyright(c) 2013, 2014, 2015, the respective contributors\nAll rights reserved.\n\nAll other contributions:\nCopyright(c) 2015, 2016 the respective contributors\nAll rights reserved.\n\nCaffe2 uses a copyright model similar to Caffe: each contributor holds\ncopyright over their contributions to Caffe2. The project versioning records\nall such contribution and copyright details. If a contributor wants to further\nmark their specific copyright on a particular contribution, they should\nindicate their copyright solely in the commit message of the change when it is\ncommitted.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America\n and IDIAP Research Institute nor the names of its contributors may be\n used to endorse or promote products derived from this software without\n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "Python", "version": "3.8.10", "license": "Python Software Foundation License", "text": "A. HISTORY OF THE SOFTWARE\n==========================\n\nPython was created in the early 1990s by Guido van Rossum at Stichting\nMathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands\nas a successor of a language called ABC. Guido remains Python's\nprincipal author, although it includes many contributions from others.\n\nIn 1995, Guido continued his work on Python at the Corporation for\nNational Research Initiatives (CNRI, see http://www.cnri.reston.va.us)\nin Reston, Virginia where he released several versions of the\nsoftware.\n\nIn May 2000, Guido and the Python core development team moved to\nBeOpen.com to form the BeOpen PythonLabs team. In October of the same\nyear, the PythonLabs team moved to Digital Creations, which became\nZope Corporation. In 2001, the Python Software Foundation (PSF, see\nhttps://www.python.org/psf/) was formed, a non-profit organization\ncreated specifically to own Python-related Intellectual Property.\nZope Corporation was a sponsoring member of the PSF.\n\nAll Python releases are Open Source (see http://www.opensource.org for\nthe Open Source Definition). Historically, most, but not all, Python\nreleases have also been GPL-compatible; the table below summarizes\nthe various releases.\n\n Release Derived Year Owner GPL-\n from compatible? (1)\n\n 0.9.0 thru 1.2 1991-1995 CWI yes\n 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes\n 1.6 1.5.2 2000 CNRI no\n 2.0 1.6 2000 BeOpen.com no\n 1.6.1 1.6 2001 CNRI yes (2)\n 2.1 2.0+1.6.1 2001 PSF no\n 2.0.1 2.0+1.6.1 2001 PSF yes\n 2.1.1 2.1+2.0.1 2001 PSF yes\n 2.1.2 2.1.1 2002 PSF yes\n 2.1.3 2.1.2 2002 PSF yes\n 2.2 and above 2.1.1 2001-now PSF yes\n\nFootnotes:\n\n(1) GPL-compatible doesn't mean that we're distributing Python under\n the GPL. All Python licenses, unlike the GPL, let you distribute\n a modified version without making your changes open source. The\n GPL-compatible licenses make it possible to combine Python with\n other software that is released under the GPL; the others don't.\n\n(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,\n because its license has a choice of law clause. According to\n CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1\n is \"not incompatible\" with the GPL.\n\nThanks to the many outside volunteers who have worked under Guido's\ndirection to make these releases possible.\n\n\nB. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON\n===============================================================\n\nPython software and documentation are licensed under the\nPython Software Foundation License Version 2.\n\nStarting with Python 3.8.6, examples, recipes, and other code in\nthe documentation are dual licensed under the PSF License Version 2\nand the Zero-Clause BSD license.\n\nSome software incorporated into Python is under different licenses.\nThe licenses are listed with code falling under that license.\n\n\nPYTHON SOFTWARE FOUNDATION LICENSE VERSION 2\n--------------------------------------------\n\n1. This LICENSE AGREEMENT is between the Python Software Foundation\n(\"PSF\"), and the Individual or Organization (\"Licensee\") accessing and\notherwise using this software (\"Python\") in source or binary form and\nits associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, PSF hereby\ngrants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,\nanalyze, test, perform and/or display publicly, prepare derivative works,\ndistribute, and otherwise use Python alone or in any derivative version,\nprovided, however, that PSF's License Agreement and PSF's notice of copyright,\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,\n2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation;\nAll Rights Reserved\" are retained in Python alone or in any derivative version\nprepared by Licensee.\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python.\n\n4. PSF is making Python available to Licensee on an \"AS IS\"\nbasis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\nFOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. Nothing in this License Agreement shall be deemed to create any\nrelationship of agency, partnership, or joint venture between PSF and\nLicensee. This License Agreement does not grant permission to use PSF\ntrademarks or trade name in a trademark sense to endorse or promote\nproducts or services of Licensee, or any third party.\n\n8. By copying, installing or otherwise using Python, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nBEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0\n-------------------------------------------\n\nBEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1\n\n1. This LICENSE AGREEMENT is between BeOpen.com (\"BeOpen\"), having an\noffice at 160 Saratoga Avenue, Santa Clara, CA 95051, and the\nIndividual or Organization (\"Licensee\") accessing and otherwise using\nthis software in source or binary form and its associated\ndocumentation (\"the Software\").\n\n2. Subject to the terms and conditions of this BeOpen Python License\nAgreement, BeOpen hereby grants Licensee a non-exclusive,\nroyalty-free, world-wide license to reproduce, analyze, test, perform\nand/or display publicly, prepare derivative works, distribute, and\notherwise use the Software alone or in any derivative version,\nprovided, however, that the BeOpen Python License is retained in the\nSoftware, alone or in any derivative version prepared by Licensee.\n\n3. BeOpen is making the Software available to Licensee on an \"AS IS\"\nbasis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE\nSOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS\nAS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY\nDERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n5. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n6. This License Agreement shall be governed by and interpreted in all\nrespects by the law of the State of California, excluding conflict of\nlaw provisions. Nothing in this License Agreement shall be deemed to\ncreate any relationship of agency, partnership, or joint venture\nbetween BeOpen and Licensee. This License Agreement does not grant\npermission to use BeOpen trademarks or trade names in a trademark\nsense to endorse or promote products or services of Licensee, or any\nthird party. As an exception, the \"BeOpen Python\" logos available at\nhttp://www.pythonlabs.com/logos.html may be used according to the\npermissions granted on that web page.\n\n7. By copying, installing or otherwise using the software, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nCNRI LICENSE AGREEMENT FOR PYTHON 1.6.1\n---------------------------------------\n\n1. This LICENSE AGREEMENT is between the Corporation for National\nResearch Initiatives, having an office at 1895 Preston White Drive,\nReston, VA 20191 (\"CNRI\"), and the Individual or Organization\n(\"Licensee\") accessing and otherwise using Python 1.6.1 software in\nsource or binary form and its associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, CNRI\nhereby grants Licensee a nonexclusive, royalty-free, world-wide\nlicense to reproduce, analyze, test, perform and/or display publicly,\nprepare derivative works, distribute, and otherwise use Python 1.6.1\nalone or in any derivative version, provided, however, that CNRI's\nLicense Agreement and CNRI's notice of copyright, i.e., \"Copyright (c)\n1995-2001 Corporation for National Research Initiatives; All Rights\nReserved\" are retained in Python 1.6.1 alone or in any derivative\nversion prepared by Licensee. Alternately, in lieu of CNRI's License\nAgreement, Licensee may substitute the following text (omitting the\nquotes): \"Python 1.6.1 is made available subject to the terms and\nconditions in CNRI's License Agreement. This Agreement together with\nPython 1.6.1 may be located on the Internet using the following\nunique, persistent identifier (known as a handle): 1895.22/1013. This\nAgreement may also be obtained from a proxy server on the Internet\nusing the following URL: http://hdl.handle.net/1895.22/1013\".\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python 1.6.1 or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python 1.6.1.\n\n4. CNRI is making Python 1.6.1 available to Licensee on an \"AS IS\"\nbasis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\n1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. This License Agreement shall be governed by the federal\nintellectual property law of the United States, including without\nlimitation the federal copyright law, and, to the extent such\nU.S. federal law does not apply, by the law of the Commonwealth of\nVirginia, excluding Virginia's conflict of law provisions.\nNotwithstanding the foregoing, with regard to derivative works based\non Python 1.6.1 that incorporate non-separable material that was\npreviously distributed under the GNU General Public License (GPL), the\nlaw of the Commonwealth of Virginia shall govern this License\nAgreement only as to issues arising under or with respect to\nParagraphs 4, 5, and 7 of this License Agreement. Nothing in this\nLicense Agreement shall be deemed to create any relationship of\nagency, partnership, or joint venture between CNRI and Licensee. This\nLicense Agreement does not grant permission to use CNRI trademarks or\ntrade name in a trademark sense to endorse or promote products or\nservices of Licensee, or any third party.\n\n8. By clicking on the \"ACCEPT\" button where indicated, or by copying,\ninstalling or otherwise using Python 1.6.1, Licensee agrees to be\nbound by the terms and conditions of this License Agreement.\n\n ACCEPT\n\n\nCWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2\n--------------------------------------------------\n\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,\nThe Netherlands. All rights reserved.\n\nPermission to use, copy, modify, and distribute this software and its\ndocumentation for any purpose and without fee is hereby granted,\nprovided that the above copyright notice appear in all copies and that\nboth that copyright notice and this permission notice appear in\nsupporting documentation, and that the name of Stichting Mathematisch\nCentrum or CWI not be used in advertising or publicity pertaining to\ndistribution of the software without specific, written prior\npermission.\n\nSTICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO\nTHIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE\nFOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\nOF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION\n----------------------------------------------------------------------\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n"}, {"name": "ConfigArgParse", "version": "1.5.3", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2015 bw2\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"}, {"name": "Cython", "version": "0.29.24", "license": "Apache Software License", "text": " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n"}, {"name": "Jinja2", "version": "3.1.2", "license": "BSD License", "text": "Copyright 2007 Pallets\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\nPARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "MarkupSafe", "version": "2.1.1", "license": "BSD License", "text": "Copyright 2010 Pallets\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\nPARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "PyYAML", "version": "6.0", "license": "MIT License", "text": "Copyright (c) 2017-2021 Ingy d\u00f6t Net\nCopyright (c) 2006-2016 Kirill Simonov\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"}, {"name": "SoundFile", "version": "0.10.3.post1", "license": "BSD License", "text": "Copyright (c) 2013, Bastian Bechtold\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in\n the documentation and/or other materials provided with the\n distribution.\n * Neither the name of PySoundFile nor the names\n of its contributors may be used to endorse or promote products\n derived from this software without specific prior written\n permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "aiofiles", "version": "0.7.0", "license": "Other/Proprietary License", "text": "Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"{}\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright {yyyy} {name of copyright owner}\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n"}, {"name": "anyio", "version": "3.6.2", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2018 Alex Gr\u00f6nholm\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "appdirs", "version": "1.4.4", "license": "MIT License", "text": "# This is the MIT license\n\nCopyright (c) 2010 ActiveState Software Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be included\nin all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n"}, {"name": "asgiref", "version": "3.6.0", "license": "BSD License", "text": "Copyright (c) Django Software Foundation and individual contributors.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n 3. Neither the name of Django nor the names of its contributors may be used\n to endorse or promote products derived from this software without\n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "audioread", "version": "3.0.0", "license": "MIT", "text": "Copyright (c) 2011-2018 Adrian Sampson\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\nDAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\nOTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\nOR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "certifi", "version": "2022.12.7", "license": "Mozilla Public License 2.0 (MPL 2.0)", "text": "This package contains a modified version of ca-bundle.crt:\n\nca-bundle.crt -- Bundle of CA Root Certificates\n\nCertificate data from Mozilla as of: Thu Nov 3 19:04:19 2011#\nThis is a bundle of X.509 certificates of public Certificate Authorities\n(CA). These were automatically extracted from Mozilla's root certificates\nfile (certdata.txt). This file can be found in the mozilla source tree:\nhttps://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt\nIt contains the certificates in PEM format and therefore\ncan be directly used with curl / libcurl / php_curl, or with\nan Apache+mod_ssl webserver for SSL client authentication.\nJust configure this file as the SSLCACertificateFile.#\n\n***** BEGIN LICENSE BLOCK *****\nThis Source Code Form is subject to the terms of the Mozilla Public License,\nv. 2.0. If a copy of the MPL was not distributed with this file, You can obtain\none at http://mozilla.org/MPL/2.0/.\n\n***** END LICENSE BLOCK *****\n@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $\n"}, {"name": "cffi", "version": "1.15.1", "license": "MIT License", "text": "\nExcept when otherwise stated (look for LICENSE files in directories or\ninformation at the beginning of each file) all software and\ndocumentation is licensed as follows: \n\n The MIT License\n\n Permission is hereby granted, free of charge, to any person \n obtaining a copy of this software and associated documentation \n files (the \"Software\"), to deal in the Software without \n restriction, including without limitation the rights to use, \n copy, modify, merge, publish, distribute, sublicense, and/or \n sell copies of the Software, and to permit persons to whom the \n Software is furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included \n in all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS \n OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL \n THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING \n FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \n DEALINGS IN THE SOFTWARE.\n\n"}, {"name": "charset-normalizer", "version": "2.1.1", "license": "MIT License", "text": "MIT License\n\nCopyright (c) 2019 TAHRI Ahmed R.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE."}, {"name": "ci-sdr", "version": "0.0.2", "license": "MIT License", "text": "MIT License\n\nCopyright (c) 2020 Communications Engineering Group, Paderborn University\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"}, {"name": "click", "version": "8.0.4", "license": "BSD License", "text": "Copyright 2014 Pallets\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\nPARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "colorama", "version": "0.4.4", "license": "BSD License", "text": "Copyright (c) 2010 Jonathan Hartley\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holders, nor those of its contributors\n may be used to endorse or promote products derived from this software without\n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "ctc-segmentation", "version": "1.7.4", "license": "UNKNOWN", "text": " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "decorator", "version": "5.1.1", "license": "BSD License", "text": "Copyright (c) 2005-2018, Michele Simionato\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n Redistributions in bytecode form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in\n the documentation and/or other materials provided with the\n distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\nINCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\nBUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS\nOF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\nTORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\nUSE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGE.\n"}, {"name": "einops", "version": "0.6.0", "license": "MIT License", "text": "MIT License\n\nCopyright (c) 2018 Alex Rogozhnikov\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"}, {"name": "espnet", "version": "0.10.7a1", "license": "Apache Software License", "text": " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2017 Johns Hopkins University (Shinji Watanabe)\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "espnet-tts-frontend", "version": "0.0.3", "license": "Apache Software License", "text": " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2017 Johns Hopkins University (Shinji Watanabe)\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "fastapi", "version": "0.70.0", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2018 Sebasti\u00e1n Ram\u00edrez\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"}, {"name": "filelock", "version": "3.8.2", "license": "The Unlicense (Unlicense)", "text": "This is free and unencumbered software released into the public domain.\n\nAnyone is free to copy, modify, publish, use, compile, sell, or\ndistribute this software, either in source code form or as a compiled\nbinary, for any purpose, commercial or non-commercial, and by any\nmeans.\n\nIn jurisdictions that recognize copyright laws, the author or authors\nof this software dedicate any and all copyright interest in the\nsoftware to the public domain. We make this dedication for the benefit\nof the public at large and to the detriment of our heirs and\nsuccessors. We intend this dedication to be an overt act of\nrelinquishment in perpetuity of all present and future rights to this\nsoftware under copyright law.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\nOTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\nOTHER DEALINGS IN THE SOFTWARE.\n\nFor more information, please refer to \n"}, {"name": "g2p-en", "version": "2.1.0", "license": "Apache Software License", "text": " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"{}\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright {yyyy} {name of copyright owner}\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "h11", "version": "0.14.0", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2016 Nathaniel J. Smith and other contributors\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "h5py", "version": "3.7.0", "license": "BSD License", "text": "Copyright (c) 2008 Andrew Collette and contributors\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the\n distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "humanfriendly", "version": "10.0", "license": "MIT License", "text": "Copyright (c) 2021 Peter Odding\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "idna", "version": "3.4", "license": "BSD License", "text": "BSD 3-Clause License\n\nCopyright (c) 2013-2021, Kim Davies\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "importlib-metadata", "version": "4.13.0", "license": "Apache Software License", "text": "\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "inflect", "version": "6.0.2", "license": "MIT License", "text": "Copyright Jason R. Coombs\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n"}, {"name": "jaconv", "version": "0.3.3", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2014 Yukino Ikegami\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"}, {"name": "jamo", "version": "0.4.1", "license": "Apache Software License", "text": "Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"{}\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2017 Joshua Dong\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n"}, {"name": "joblib", "version": "1.2.0", "license": "BSD License", "text": "BSD 3-Clause License\n\nCopyright (c) 2008-2021, The joblib developers.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "librosa", "version": "0.9.2", "license": "ISC License (ISCL)", "text": "## ISC License\n\nCopyright (c) 2013--2017, librosa development team.\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n"}, {"name": "llvmlite", "version": "0.39.1", "license": "BSD", "text": "Copyright (c) 2014-, Continuum Analytics, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\nRedistributions of source code must retain the above copyright notice,\nthis list of conditions and the following disclaimer.\n\nRedistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "nltk", "version": "3.8.1", "license": "Apache Software License", "text": "\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "numba", "version": "0.56.4", "license": "BSD License", "text": "Copyright (c) 2012, Anaconda, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\nRedistributions of source code must retain the above copyright notice,\nthis list of conditions and the following disclaimer.\n\nRedistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "numpy", "version": "1.20.0", "license": "BSD License", "text": "\n----\n\nThis binary distribution of NumPy also bundles the following software:\n\n\nName: OpenBLAS\nFiles: extra-dll\\libopenb*.dll\nDescription: bundled as a dynamically linked library\nAvailability: https://github.com/xianyi/OpenBLAS/\nLicense: 3-clause BSD\n Copyright (c) 2011-2014, The OpenBLAS Project\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n\n 1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in\n the documentation and/or other materials provided with the\n distribution.\n 3. Neither the name of the OpenBLAS project nor the names of\n its contributors may be used to endorse or promote products\n derived from this software without specific prior written\n permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nName: LAPACK\nFiles: extra-dll\\libopenb*.dll\nDescription: bundled in OpenBLAS\nAvailability: https://github.com/xianyi/OpenBLAS/\nLicense 3-clause BSD\n Copyright (c) 1992-2013 The University of Tennessee and The University\n of Tennessee Research Foundation. All rights\n reserved.\n Copyright (c) 2000-2013 The University of California Berkeley. All\n rights reserved.\n Copyright (c) 2006-2013 The University of Colorado Denver. All rights\n reserved.\n\n $COPYRIGHT$\n\n Additional copyrights may follow\n\n $HEADER$\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n\n - Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n - Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer listed\n in this license in the documentation and/or other materials\n provided with the distribution.\n\n - Neither the name of the copyright holders nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\n The copyright holders provide no reassurances that the source code\n provided does not infringe any patent, copyright, or any other\n intellectual property rights of third parties. The copyright holders\n disclaim any liability to any recipient for claims brought against\n recipient by any third party for infringement of that parties\n intellectual property rights.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nName: GCC runtime library\nFiles: extra-dll\\*.dll\nDescription: statically linked, in DLL files compiled with gfortran only\nAvailability: https://gcc.gnu.org/viewcvs/gcc/\nLicense: GPLv3 + runtime exception\n Copyright (C) 2002-2017 Free Software Foundation, Inc.\n\n Libgfortran is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 3, or (at your option)\n any later version.\n\n Libgfortran is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n Under Section 7 of GPL version 3, you are granted additional\n permissions described in the GCC Runtime Library Exception, version\n 3.1, as published by the Free Software Foundation.\n\n You should have received a copy of the GNU General Public License and\n a copy of the GCC Runtime Library Exception along with this program;\n see the files COPYING3 and COPYING.RUNTIME respectively. If not, see\n .\n\n\nName: Microsoft Visual C++ Runtime Files\nFiles: extra-dll\\msvcp140.dll\nLicense: MSVC\n https://www.visualstudio.com/license-terms/distributable-code-microsoft-visual-studio-2015-rc-microsoft-visual-studio-2015-sdk-rc-includes-utilities-buildserver-files/#visual-c-runtime\n\n Subject to the License Terms for the software, you may copy and\n distribute with your program any of the files within the followng\n folder and its subfolders except as noted below. You may not modify\n these files.\n\n C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\redist\n\n You may not distribute the contents of the following folders:\n\n C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\redist\\debug_nonredist\n C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\redist\\onecore\\debug_nonredist\n\n Subject to the License Terms for the software, you may copy and\n distribute the following files with your program in your program\u2019s\n application local folder or by deploying them into the Global\n Assembly Cache (GAC):\n\n VC\\atlmfc\\lib\\mfcmifc80.dll\n VC\\atlmfc\\lib\\amd64\\mfcmifc80.dll\n\n\nName: Microsoft Visual C++ Runtime Files\nFiles: extra-dll\\msvc*90.dll, extra-dll\\Microsoft.VC90.CRT.manifest\nLicense: MSVC\n For your convenience, we have provided the following folders for\n use when redistributing VC++ runtime files. Subject to the license\n terms for the software, you may redistribute the folder\n (unmodified) in the application local folder as a sub-folder with\n no change to the folder name. You may also redistribute all the\n files (*.dll and *.manifest) within a folder, listed below the\n folder for your convenience, as an entire set.\n\n \\VC\\redist\\x86\\Microsoft.VC90.ATL\\\n atl90.dll\n Microsoft.VC90.ATL.manifest\n \\VC\\redist\\ia64\\Microsoft.VC90.ATL\\\n atl90.dll\n Microsoft.VC90.ATL.manifest\n \\VC\\redist\\amd64\\Microsoft.VC90.ATL\\\n atl90.dll\n Microsoft.VC90.ATL.manifest\n \\VC\\redist\\x86\\Microsoft.VC90.CRT\\\n msvcm90.dll\n msvcp90.dll\n msvcr90.dll\n Microsoft.VC90.CRT.manifest\n \\VC\\redist\\ia64\\Microsoft.VC90.CRT\\\n msvcm90.dll\n msvcp90.dll\n msvcr90.dll\n Microsoft.VC90.CRT.manifest\n\n----\n\nFull text of license texts referred to above follows (that they are\nlisted below does not necessarily imply the conditions apply to the\npresent binary release):\n\n----\n\nGCC RUNTIME LIBRARY EXCEPTION\n\nVersion 3.1, 31 March 2009\n\nCopyright (C) 2009 Free Software Foundation, Inc. \n\nEveryone is permitted to copy and distribute verbatim copies of this\nlicense document, but changing it is not allowed.\n\nThis GCC Runtime Library Exception (\"Exception\") is an additional\npermission under section 7 of the GNU General Public License, version\n3 (\"GPLv3\"). It applies to a given file (the \"Runtime Library\") that\nbears a notice placed by the copyright holder of the file stating that\nthe file is governed by GPLv3 along with this Exception.\n\nWhen you use GCC to compile a program, GCC may combine portions of\ncertain GCC header files and runtime libraries with the compiled\nprogram. The purpose of this Exception is to allow compilation of\nnon-GPL (including proprietary) programs to use, in this way, the\nheader files and runtime libraries covered by this Exception.\n\n0. Definitions.\n\nA file is an \"Independent Module\" if it either requires the Runtime\nLibrary for execution after a Compilation Process, or makes use of an\ninterface provided by the Runtime Library, but is not otherwise based\non the Runtime Library.\n\n\"GCC\" means a version of the GNU Compiler Collection, with or without\nmodifications, governed by version 3 (or a specified later version) of\nthe GNU General Public License (GPL) with the option of using any\nsubsequent versions published by the FSF.\n\n\"GPL-compatible Software\" is software whose conditions of propagation,\nmodification and use would permit combination with GCC in accord with\nthe license of GCC.\n\n\"Target Code\" refers to output from any compiler for a real or virtual\ntarget processor architecture, in executable form or suitable for\ninput to an assembler, loader, linker and/or execution\nphase. Notwithstanding that, Target Code does not include data in any\nformat that is used as a compiler intermediate representation, or used\nfor producing a compiler intermediate representation.\n\nThe \"Compilation Process\" transforms code entirely represented in\nnon-intermediate languages designed for human-written code, and/or in\nJava Virtual Machine byte code, into Target Code. Thus, for example,\nuse of source code generators and preprocessors need not be considered\npart of the Compilation Process, since the Compilation Process can be\nunderstood as starting with the output of the generators or\npreprocessors.\n\nA Compilation Process is \"Eligible\" if it is done using GCC, alone or\nwith other GPL-compatible software, or if it is done without using any\nwork based on GCC. For example, using non-GPL-compatible Software to\noptimize any GCC intermediate representations would not qualify as an\nEligible Compilation Process.\n\n1. Grant of Additional Permission.\n\nYou have permission to propagate a work of Target Code formed by\ncombining the Runtime Library with Independent Modules, even if such\npropagation would otherwise violate the terms of GPLv3, provided that\nall Target Code was generated by Eligible Compilation Processes. You\nmay then convey such a combination under terms of your choice,\nconsistent with the licensing of the Independent Modules.\n\n2. No Weakening of GCC Copyleft.\n\nThe availability of this Exception does not imply any general\npresumption that third-party software is unaffected by the copyleft\nrequirements of the license of GCC.\n\n----\n\n GNU GENERAL PUBLIC LICENSE\n Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. \n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n Preamble\n\n The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works. By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users. We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors. You can apply it to\nyour programs, too.\n\n When we speak of free software, we are referring to freedom, not\nprice. Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights. Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received. You must make sure that they, too, receive\nor can get the source code. And you must show them these terms so they\nknow their rights.\n\n Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software. For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so. This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software. The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable. Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts. If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary. To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n The precise terms and conditions for copying, distribution and\nmodification follow.\n\n TERMS AND CONDITIONS\n\n 0. Definitions.\n\n \"This License\" refers to version 3 of the GNU General Public License.\n\n \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n \"The Program\" refers to any copyrightable work licensed under this\nLicense. Each licensee is addressed as \"you\". \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy. The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy. Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies. Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License. If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n 1. Source Code.\n\n The \"source code\" for a work means the preferred form of the work\nfor making modifications to it. \"Object code\" means any non-source\nform of a work.\n\n A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form. A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities. However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work. For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n The Corresponding Source for a work in source code form is that\nsame work.\n\n 2. Basic Permissions.\n\n All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met. This License explicitly affirms your unlimited\npermission to run the unmodified Program. The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work. This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force. You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright. Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n Conveying under any other circumstances is permitted solely under\nthe conditions stated below. Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n 3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n 4. Conveying Verbatim Copies.\n\n You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n 5. Conveying Modified Source Versions.\n\n You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n a) The work must carry prominent notices stating that you modified\n it, and giving a relevant date.\n\n b) The work must carry prominent notices stating that it is\n released under this License and any conditions added under section\n 7. This requirement modifies the requirement in section 4 to\n \"keep intact all notices\".\n\n c) You must license the entire work, as a whole, under this\n License to anyone who comes into possession of a copy. This\n License will therefore apply, along with any applicable section 7\n additional terms, to the whole of the work, and all its parts,\n regardless of how they are packaged. This License gives no\n permission to license the work in any other way, but it does not\n invalidate such permission if you have separately received it.\n\n d) If the work has interactive user interfaces, each must display\n Appropriate Legal Notices; however, if the Program has interactive\n interfaces that do not display Appropriate Legal Notices, your\n work need not make them do so.\n\n A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit. Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n 6. Conveying Non-Source Forms.\n\n You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n a) Convey the object code in, or embodied in, a physical product\n (including a physical distribution medium), accompanied by the\n Corresponding Source fixed on a durable physical medium\n customarily used for software interchange.\n\n b) Convey the object code in, or embodied in, a physical product\n (including a physical distribution medium), accompanied by a\n written offer, valid for at least three years and valid for as\n long as you offer spare parts or customer support for that product\n model, to give anyone who possesses the object code either (1) a\n copy of the Corresponding Source for all the software in the\n product that is covered by this License, on a durable physical\n medium customarily used for software interchange, for a price no\n more than your reasonable cost of physically performing this\n conveying of source, or (2) access to copy the\n Corresponding Source from a network server at no charge.\n\n c) Convey individual copies of the object code with a copy of the\n written offer to provide the Corresponding Source. This\n alternative is allowed only occasionally and noncommercially, and\n only if you received the object code with such an offer, in accord\n with subsection 6b.\n\n d) Convey the object code by offering access from a designated\n place (gratis or for a charge), and offer equivalent access to the\n Corresponding Source in the same way through the same place at no\n further charge. You need not require recipients to copy the\n Corresponding Source along with the object code. If the place to\n copy the object code is a network server, the Corresponding Source\n may be on a different server (operated by you or a third party)\n that supports equivalent copying facilities, provided you maintain\n clear directions next to the object code saying where to find the\n Corresponding Source. Regardless of what server hosts the\n Corresponding Source, you remain obligated to ensure that it is\n available for as long as needed to satisfy these requirements.\n\n e) Convey the object code using peer-to-peer transmission, provided\n you inform other peers where the object code and Corresponding\n Source of the work are being offered to the general public at no\n charge under subsection 6d.\n\n A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling. In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage. For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product. A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source. The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information. But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed. Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n 7. Additional Terms.\n\n \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law. If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit. (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.) You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n a) Disclaiming warranty or limiting liability differently from the\n terms of sections 15 and 16 of this License; or\n\n b) Requiring preservation of specified reasonable legal notices or\n author attributions in that material or in the Appropriate Legal\n Notices displayed by works containing it; or\n\n c) Prohibiting misrepresentation of the origin of that material, or\n requiring that modified versions of such material be marked in\n reasonable ways as different from the original version; or\n\n d) Limiting the use for publicity purposes of names of licensors or\n authors of the material; or\n\n e) Declining to grant rights under trademark law for use of some\n trade names, trademarks, or service marks; or\n\n f) Requiring indemnification of licensors and authors of that\n material by anyone who conveys the material (or modified versions of\n it) with contractual assumptions of liability to the recipient, for\n any liability that these contractual assumptions directly impose on\n those licensors and authors.\n\n All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10. If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term. If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n 8. Termination.\n\n You may not propagate or modify a covered work except as expressly\nprovided under this License. Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License. If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n 9. Acceptance Not Required for Having Copies.\n\n You are not required to accept this License in order to receive or\nrun a copy of the Program. Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance. However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work. These actions infringe copyright if you do\nnot accept this License. Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n 10. Automatic Licensing of Downstream Recipients.\n\n Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License. You are not responsible\nfor enforcing compliance by third parties with this License.\n\n An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations. If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License. For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n 11. Patents.\n\n A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based. The\nwork thus licensed is called the contributor's \"contributor version\".\n\n A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version. For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement). To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients. \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License. You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n 12. No Surrender of Others' Freedom.\n\n If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License. If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all. For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n 13. Use with the GNU Affero General Public License.\n\n Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work. The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n 14. Revised Versions of this License.\n\n The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time. Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n Each version is given a distinguishing version number. If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation. If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n Later license versions may give you additional or different\npermissions. However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n 15. Disclaimer of Warranty.\n\n THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n 16. Limitation of Liability.\n\n IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n 17. Interpretation of Sections 15 and 16.\n\n If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n END OF TERMS AND CONDITIONS\n\n How to Apply These Terms to Your New Programs\n\n If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n To do so, attach the following notices to the program. It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n \n Copyright (C) \n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n\nAlso add information on how to contact you by electronic and paper mail.\n\n If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n Copyright (C) \n This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n This is free software, and you are welcome to redistribute it\n under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License. Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n.\n\n The GNU General Public License does not permit incorporating your program\ninto proprietary programs. If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library. If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License. But first, please read\n.\n"}, {"name": "packaging", "version": "22.0", "license": "Apache Software License; BSD License", "text": "This software is made available under the terms of *either* of the licenses\nfound in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made\nunder the terms of *both* these licenses.\n"}, {"name": "pooch", "version": "1.6.0", "license": "BSD License", "text": "Copyright (c) 2018 The Pooch Developers\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n* Neither the name of the copyright holders nor the names of any contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "pycparser", "version": "2.21", "license": "BSD License", "text": "pycparser -- A C parser in Python\n\nCopyright (c) 2008-2020, Eli Bendersky\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this \n list of conditions and the following disclaimer.\n* Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n* Neither the name of Eli Bendersky nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND \nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED \nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE \nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR \nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE \nGOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) \nHOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT \nLIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT \nOF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "pydantic", "version": "1.10.2", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2017, 2018, 2019, 2020, 2021 Samuel Colvin and other contributors\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"}, {"name": "pyopenjtalk", "version": "0.2.0+f4ade29", "license": "MIT License", "text": "The pyopenjtalk package is licensed under the MIT \"Expat\" License:\n\n> Copyright (c) 2018: Ryuichi Yamamoto.\n>\n> Permission is hereby granted, free of charge, to any person obtaining\n> a copy of this software and associated documentation files (the\n> \"Software\"), to deal in the Software without restriction, including\n> without limitation the rights to use, copy, modify, merge, publish,\n> distribute, sublicense, and/or sell copies of the Software, and to\n> permit persons to whom the Software is furnished to do so, subject to\n> the following conditions:\n>\n> The above copyright notice and this permission notice shall be\n> included in all copies or substantial portions of the Software.\n>\n> THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n> EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n> MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n> IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n> CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n> TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n> SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n/bAmFru).\n"}, {"name": "pypinyin", "version": "0.44.0", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2016 mozillazg, \u95f2\u8018 \n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "pyreadline3", "version": "3.4.1", "license": "BSD License", "text": "# LICENSE\n\n## pyreadline3 copyright and licensing notes\n\nUnless indicated otherwise, files in this project are covered by a BSD-type\nlicense, included below.\n\nIndividual authors are the holders of the copyright for their code and are\nlisted in each file.\n\nSome files may be licensed under different conditions. Ultimately each file \nindicates clearly the conditions under which its author/authors have \ndecided to publish the code.\n\n## pyreadline3 license\n\npyreadline3 is released under a BSD-type license.\n\nCopyright (c) 2020 Bassem Girgis .\n\nCopyright (c) 2006-2020 J\ufffdrgen Stenarson .\n\nCopyright (c) 2003-2006 Gary Bishop\n\nCopyright (c) 2003-2006 Jack Trainor\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\na. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\nb. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\nc. Neither the name of the copyright holders nor the names of any\n contributors to this software may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\nLIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\nOUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGE.\n"}, {"name": "python-multipart", "version": "0.0.5", "license": "Apache Software License", "text": "Copyright 2012, Andrew Dunham\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n"}, {"name": "pytorch-wpe", "version": "0.0.1", "license": "UNKNOWN", "text": "SOFTWARE LICENSE AGREEMENT FOR EVALUATION\n\nThis SOFTWARE EVALUATION LICENSE AGREEMENT (this \"Agreement\") is a legal contract between a person who uses or otherwise accesses or installs the Software (\u201cUser(s)\u201d), and Nippon Telegraph and Telephone corporation (\"NTT\").\nREAD THE TERMS AND CONDITIONS OF THIS AGREEMENT CAREFULLY BEFORE INSTALLING OR OTHERWISE ACCESSING OR USING NTT'S PROPRIETARY SOFTWARE ACCOMPANIED BY THIS AGREEMENT (the \"SOFTWARE\"). THE SOFTWARE IS COPYRIGHTED AND IT IS LICENSED TO USER UNDER THIS AGREEMENT, NOT SOLD TO USER. BY INSTALLING OR OTHERWISE ACCESSING OR USING THE SOFTWARE, USER ACKNOWLEDGES THAT USER HAS READ THIS AGREEMENT, THAT USER UNDERSTANDS IT, AND THAT USER ACCEPTS AND AGREES TO BE BOUND BY ITS TERMS. IF AT ANY TIME USER IS NOT WILLING TO BE BOUND BY THE TERMS OF THIS AGREEMENT, USER SHOULD TERMINATE THE INSTALLATION PROCESS, IMMEDIATELY CEASE AND REFRAIN FROM ACCESSING OR USING THE SOFTWARE AND DELETE ANY COPIES USER MAY HAVE. THIS AGREEMENT REPRESENTS THE ENTIRE AGREEMENT BETWEEN USER AND NTT CONCERNING THE SOFTWARE.\n\n \nBACKGROUND\nA.\tNTT is the owner of all rights, including all patent rights, copyrights and trade secret rights, in and to the Software and related documentation listed in Exhibit A to this Agreement.\n\nB.\tUser wishes to obtain a royalty free license to use the Software to enable User to evaluate, and NTT wishes to grant such a license to User, pursuant and subject to the terms and conditions of this Agreement.\n\nC.\tAs a condition to NTT's provision of the Software to User, NTT has required User to execute this Agreement.\n\nIn consideration of these premises, and the mutual promises and conditions in this Agreement, the parties hereby agree as follows:\n\n1.\tGrant of Evaluation License. \tNTT hereby grants to User, and User hereby accepts, under the terms and conditions of this Agreement, a royalty free, nontransferable and nonexclusive license to use the Software internally for the purposes of testing, analyzing, and evaluating the methods or mechanisms as shown in the research paper submitted by NTT to a certain academy. User may make a reasonable number of backup copies of the Software solely for User's internal use pursuant to the license granted in this Section 1.\n\n2.\u3000Shipment and Installation. NTT will ship or deliver the Software by any method that NTT deems appropriate. User shall be solely responsible for proper installation of the Software.\n\n3.\u3000Term. This Agreement is effective whichever is earlier (i) upon User\u2019s acceptance of the Agreement, or (ii) upon User\u2019s installing, accessing, and using the Software, even if User has not expressly accepted this Agreement. Without prejudice to any other rights, NTT may terminate this Agreement without notice to User (i) if User breaches or fails to comply with any of the limitations or other requirements described herein, and (ii) if NTT receives a notice from the academy stating that the research paper would not be published, and in any such case User agrees that NTT may, in addition to any other remedies it may have at law or in equity, remotely disable the Software. User may terminate this Agreement at any time by User\u2019s decision to terminate the Agreement to NTT and ceasing use of the Software. Upon any termination or expiration of this Agreement for any reason, User agrees to uninstall the Software and either return to NTT the Software and all copies thereof, or to destroy all such materials and provide written verification of such destruction to NTT.\n\n4.\tProprietary Rights\n(a)\tThe Software is the valuable, confidential, and proprietary property of NTT, and NTT shall retain exclusive title to this property both during the term and after the termination of this Agreement. Without limitation, User acknowledges that all patent rights, copyrights and trade secret rights in the Software shall remain the exclusive property of NTT at all times. User shall use not less than reasonable care in safeguarding the confidentiality of the Software. \n(b)\tUSER SHALL NOT, IN WHOLE OR IN PART, AT ANY TIME DURING THE TERM OF OR AFTER THE TERMINATION OF THIS AGREEMENT: (i) SELL, ASSIGN, LEASE, DISTRIBUTE, OR OTHERWISE TRANSFER THE SOFTWARE TO ANY THIRD PARTY; (ii) EXCEPT AS OTHERWISE PROVIDED HEREIN, COPY OR REPRODUCE THE SOFTWARE IN ANY MANNER; (iii) DISCLOSE THE SOFTWARE TO ANY THIRD PARTY, EXCEPT TO USER'S EMPLOYEES WHO REQUIRE ACCESS TO THE SOFTWARE FOR THE PURPOSES OF THIS AGREEMENT; (iv) MODIFY, DISASSEMBLE, DECOMPILE, REVERSE ENGINEER OR TRANSLATE THE SOFTWARE; OR (v) ALLOW ANY PERSON OR ENTITY TO COMMIT ANY OF THE ACTIONS DESCRIBED IN (i) THROUGH (iv) ABOVE.\n(c)\tUser shall take appropriate action, by instruction, agreement, or otherwise, with respect to its employees permitted under this Agreement to have access to the Software to ensure that all of User's obligations under this Section 4 shall be satisfied. \n\n5.\u3000\tIndemnity. User shall defend, indemnify and hold harmless NTT, its agents and employees, from any loss, damage, or liability arising in connection with User's improper or unauthorized use of the Software. NTT SHALL HAVE THE SOLE RIGHT TO CONDUCT DEFEND ANY ACTTION RELATING TO THE SOFTWARE.\n\n6.\tDisclaimer. THE SOFTWARE IS LICENSED TO USER \"AS IS,\" WITHOUT ANY TRAINING, MAINTENANCE, OR SERVICE OBLIGATIONS WHATSOEVER ON THE PART OF NTT. NTT MAKES NO EXPRESS OR IMPLIED WARRANTIES OF ANY TYPE WHATSOEVER, INCLUDING WITHOUT LIMITATION THE IMPLIED WARRANTIES OF MERCHANTABILITY, OF FITNESS FOR A PARTICULAR PURPOSE AND OF NON-INFRINGEMENT ON COPYRIGHT OR ANY OTHER RIGHT OF THIRD PARTIES. USER ASSUMES ALL RISKS ASSOCIATED WITH ITS USE OF THE SOFTWARE, INCLUDING WITHOUT LIMITATION RISKS RELATING TO QUALITY, PERFORMANCE, DATA LOSS, AND UTILITY IN A PRODUCTION ENVIRONMENT. \n\n7.\tLimitation of Liability. IN NO EVENT SHALL NTT BE LIABLE TO USER OR TO ANY THIRD PARTY FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING BUT NOT LIMITED TO DAMAGES FOR PERSONAL INJURY, PROPERTY DAMAGE, LOST PROFITS, OR OTHER ECONOMIC LOSS, ARISING IN CONNECTION WITH USER'S USE OF OR INABILITY TO USE THE SOFTWARE, IN CONNECTION WITH NTT'S PROVISION OF OR FAILURE TO PROVIDE SERVICES PERTAINING TO THE SOFTWARE, OR AS A RESULT OF ANY DEFECT IN THE SOFTWARE. THIS DISCLAIMER OF LIABILITY SHALL APPLY REGARD-LESS OF THE FORM OF ACTION THAT MAY BE BROUGHT AGAINST NTT, WHETHER IN CONTRACT OR TORT, INCLUDING WITHOUT LIMITATION ANY ACTION FOR NEGLIGENCE. USER'S SOLE REMEDY IN THE EVENT OF ANY BREACH OF THIS AGREEMENT BY NTT SHALL BE TERMINATION PURSUANT TO SECTION 3.\n\n8.\tNo Assignment or Sublicense. Neither this Agreement nor any right or license under this Agreement, nor the Software, may be sublicensed, assigned, or otherwise transferred by User without NTT's prior written consent.\n\n9.\tGeneral\n(a)\tIf any provision, or part of a provision, of this Agreement is or becomes illegal, unenforceable, or invalidated, by operation of law or otherwise, that provision or part shall to that extent be deemed omitted, and the remainder of this Agreement shall remain in full force and effect.\n(b)\tThis Agreement is the complete and exclusive statement of the agreement between the parties with respect to the subject matter hereof, and supersedes all written and oral contracts, proposals, and other communications between the parties relating to that subject matter. \n(c)\tSubject to Section 8, this Agreement shall be binding on, and shall inure to the benefit of, the respective successors and assigns of NTT and User. \n(d)\tIf either party to this Agreement initiates a legal action or proceeding to enforce or interpret any part of this Agreement, the prevailing party in such action shall be entitled to recover, as an element of the costs of such action and not as damages, its attorneys' fees and other costs associated with such action or proceeding.\n(e)\tThis Agreement shall be governed by and interpreted under the laws of Japan, without reference to conflicts of law principles. All disputes arising out of or in connection with this Agreement shall be finally settled by arbitration in Tokyo in accordance with the Commercial Arbitration Rules of the Japan Commercial Arbitration Association. The arbitration shall be conducted by three (3) arbitrators and in Japanese. The award rendered by the arbitrators shall be final and binding upon the parties. Judgment upon the award may be entered in any court having jurisdiction thereof.\n(f)\u3000\u3000\tNTT shall not be liable to the User or to any third party for any delay or failure to perform NTT\u2019s obligation set forth under this Agreement due to any cause beyond NTT\u2019s reasonable control.\n\u2003\nEXHIBIT A\nThe Software and related documentation in this repository.\n\nThe set of python code in this repository is an example implementation of DNN-WPE dereverberation algorithm described in the following paper, \n\nK. Kinoshita, M. Delcroix, H. Kwon, T. Mori, T. Nakatani, \"Neural network-based spectrum estimation for online WPE dereverberation\" In proc. of Interspeech, pp.384--388, 2017\n"}, {"name": "pyworld", "version": "0.3.0", "license": "UNKNOWN", "text": "MIT License\n\nCopyright 2016 pyworld contributors\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"}, {"name": "regex", "version": "2022.10.31", "license": "Apache Software License", "text": "This work was derived from the 're' module of CPython 2.6 and CPython 3.1,\ncopyright (c) 1998-2001 by Secret Labs AB and licensed under CNRI's Python 1.6\nlicense.\n\nAll additions and alterations are licensed under the Apache 2.0 License.\n\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2020 Matthew Barnett\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "requests", "version": "2.28.1", "license": "Apache Software License", "text": "\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n"}, {"name": "resampy", "version": "0.4.2", "license": "ISC License (ISCL)", "text": "ISC License\n\nCopyright (c) 2016, Brian McFee\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n"}, {"name": "scikit-learn", "version": "1.2.0", "license": "BSD License", "text": "BSD 3-Clause License\n\nCopyright (c) 2007-2022 The scikit-learn developers.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "scipy", "version": "1.7.1", "license": "BSD License", "text": "Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided\n with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n----\n\nThis binary distribution of Scipy also bundles the following software:\n\n\nName: OpenBLAS\nFiles: extra-dll\\libopenb*.dll\nDescription: bundled as a dynamically linked library\nAvailability: https://github.com/xianyi/OpenBLAS/\nLicense: 3-clause BSD\n Copyright (c) 2011-2014, The OpenBLAS Project\n All rights reserved.\n \n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n \n 1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n \n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in\n the documentation and/or other materials provided with the\n distribution.\n 3. Neither the name of the OpenBLAS project nor the names of \n its contributors may be used to endorse or promote products \n derived from this software without specific prior written \n permission.\n \n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nName: LAPACK\nFiles: extra-dll\\libopenb*.dll\nDescription: bundled in OpenBLAS\nAvailability: https://github.com/xianyi/OpenBLAS/\nLicense 3-clause BSD\n Copyright (c) 1992-2013 The University of Tennessee and The University\n of Tennessee Research Foundation. All rights\n reserved.\n Copyright (c) 2000-2013 The University of California Berkeley. All\n rights reserved.\n Copyright (c) 2006-2013 The University of Colorado Denver. All rights\n reserved.\n \n $COPYRIGHT$\n \n Additional copyrights may follow\n \n $HEADER$\n \n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n \n - Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n \n - Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer listed\n in this license in the documentation and/or other materials\n provided with the distribution.\n \n - Neither the name of the copyright holders nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n \n The copyright holders provide no reassurances that the source code\n provided does not infringe any patent, copyright, or any other\n intellectual property rights of third parties. The copyright holders\n disclaim any liability to any recipient for claims brought against\n recipient by any third party for infringement of that parties\n intellectual property rights.\n \n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nName: GCC runtime library\nFiles: extra-dll\\*.dll\nDescription: statically linked, in DLL files compiled with gfortran only\nAvailability: https://gcc.gnu.org/viewcvs/gcc/\nLicense: GPLv3 + runtime exception\n Copyright (C) 2002-2017 Free Software Foundation, Inc.\n \n Libgfortran is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 3, or (at your option)\n any later version.\n \n Libgfortran is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n \n Under Section 7 of GPL version 3, you are granted additional\n permissions described in the GCC Runtime Library Exception, version\n 3.1, as published by the Free Software Foundation.\n \n You should have received a copy of the GNU General Public License and\n a copy of the GCC Runtime Library Exception along with this program;\n see the files COPYING3 and COPYING.RUNTIME respectively. If not, see\n .\n\n\nName: Microsoft Visual C++ Runtime Files\nFiles: extra-dll\\msvcp140.dll\nLicense: MSVC\n https://www.visualstudio.com/license-terms/distributable-code-microsoft-visual-studio-2015-rc-microsoft-visual-studio-2015-sdk-rc-includes-utilities-buildserver-files/#visual-c-runtime\n\n Subject to the License Terms for the software, you may copy and\n distribute with your program any of the files within the followng\n folder and its subfolders except as noted below. You may not modify\n these files.\n\n C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\redist\n\n You may not distribute the contents of the following folders:\n\n C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\redist\\debug_nonredist\n C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\redist\\onecore\\debug_nonredist\n\n Subject to the License Terms for the software, you may copy and\n distribute the following files with your program in your program\u2019s\n application local folder or by deploying them into the Global\n Assembly Cache (GAC):\n\n VC\\atlmfc\\lib\\mfcmifc80.dll\n VC\\atlmfc\\lib\\amd64\\mfcmifc80.dll\n\n\nName: Microsoft Visual C++ Runtime Files\nFiles: extra-dll\\msvc*90.dll, extra-dll\\Microsoft.VC90.CRT.manifest\nLicense: MSVC\n For your convenience, we have provided the following folders for\n use when redistributing VC++ runtime files. Subject to the license\n terms for the software, you may redistribute the folder\n (unmodified) in the application local folder as a sub-folder with\n no change to the folder name. You may also redistribute all the\n files (*.dll and *.manifest) within a folder, listed below the\n folder for your convenience, as an entire set.\n\n \\VC\\redist\\x86\\Microsoft.VC90.ATL\\\n atl90.dll\n Microsoft.VC90.ATL.manifest\n \\VC\\redist\\ia64\\Microsoft.VC90.ATL\\\n atl90.dll\n Microsoft.VC90.ATL.manifest\n \\VC\\redist\\amd64\\Microsoft.VC90.ATL\\\n atl90.dll\n Microsoft.VC90.ATL.manifest\n \\VC\\redist\\x86\\Microsoft.VC90.CRT\\\n msvcm90.dll\n msvcp90.dll\n msvcr90.dll\n Microsoft.VC90.CRT.manifest\n \\VC\\redist\\ia64\\Microsoft.VC90.CRT\\\n msvcm90.dll\n msvcp90.dll\n msvcr90.dll\n Microsoft.VC90.CRT.manifest\n\n----\n\nFull text of license texts referred to above follows (that they are\nlisted below does not necessarily imply the conditions apply to the\npresent binary release):\n\n----\n\nGCC RUNTIME LIBRARY EXCEPTION\n\nVersion 3.1, 31 March 2009\n\nCopyright (C) 2009 Free Software Foundation, Inc. \n\nEveryone is permitted to copy and distribute verbatim copies of this\nlicense document, but changing it is not allowed.\n\nThis GCC Runtime Library Exception (\"Exception\") is an additional\npermission under section 7 of the GNU General Public License, version\n3 (\"GPLv3\"). It applies to a given file (the \"Runtime Library\") that\nbears a notice placed by the copyright holder of the file stating that\nthe file is governed by GPLv3 along with this Exception.\n\nWhen you use GCC to compile a program, GCC may combine portions of\ncertain GCC header files and runtime libraries with the compiled\nprogram. The purpose of this Exception is to allow compilation of\nnon-GPL (including proprietary) programs to use, in this way, the\nheader files and runtime libraries covered by this Exception.\n\n0. Definitions.\n\nA file is an \"Independent Module\" if it either requires the Runtime\nLibrary for execution after a Compilation Process, or makes use of an\ninterface provided by the Runtime Library, but is not otherwise based\non the Runtime Library.\n\n\"GCC\" means a version of the GNU Compiler Collection, with or without\nmodifications, governed by version 3 (or a specified later version) of\nthe GNU General Public License (GPL) with the option of using any\nsubsequent versions published by the FSF.\n\n\"GPL-compatible Software\" is software whose conditions of propagation,\nmodification and use would permit combination with GCC in accord with\nthe license of GCC.\n\n\"Target Code\" refers to output from any compiler for a real or virtual\ntarget processor architecture, in executable form or suitable for\ninput to an assembler, loader, linker and/or execution\nphase. Notwithstanding that, Target Code does not include data in any\nformat that is used as a compiler intermediate representation, or used\nfor producing a compiler intermediate representation.\n\nThe \"Compilation Process\" transforms code entirely represented in\nnon-intermediate languages designed for human-written code, and/or in\nJava Virtual Machine byte code, into Target Code. Thus, for example,\nuse of source code generators and preprocessors need not be considered\npart of the Compilation Process, since the Compilation Process can be\nunderstood as starting with the output of the generators or\npreprocessors.\n\nA Compilation Process is \"Eligible\" if it is done using GCC, alone or\nwith other GPL-compatible software, or if it is done without using any\nwork based on GCC. For example, using non-GPL-compatible Software to\noptimize any GCC intermediate representations would not qualify as an\nEligible Compilation Process.\n\n1. Grant of Additional Permission.\n\nYou have permission to propagate a work of Target Code formed by\ncombining the Runtime Library with Independent Modules, even if such\npropagation would otherwise violate the terms of GPLv3, provided that\nall Target Code was generated by Eligible Compilation Processes. You\nmay then convey such a combination under terms of your choice,\nconsistent with the licensing of the Independent Modules.\n\n2. No Weakening of GCC Copyleft.\n\nThe availability of this Exception does not imply any general\npresumption that third-party software is unaffected by the copyleft\nrequirements of the license of GCC.\n\n----\n\n GNU GENERAL PUBLIC LICENSE\n Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. \n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n Preamble\n\n The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works. By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users. We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors. You can apply it to\nyour programs, too.\n\n When we speak of free software, we are referring to freedom, not\nprice. Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights. Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received. You must make sure that they, too, receive\nor can get the source code. And you must show them these terms so they\nknow their rights.\n\n Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software. For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so. This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software. The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable. Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts. If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary. To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n The precise terms and conditions for copying, distribution and\nmodification follow.\n\n TERMS AND CONDITIONS\n\n 0. Definitions.\n\n \"This License\" refers to version 3 of the GNU General Public License.\n\n \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n \"The Program\" refers to any copyrightable work licensed under this\nLicense. Each licensee is addressed as \"you\". \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy. The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy. Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies. Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License. If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n 1. Source Code.\n\n The \"source code\" for a work means the preferred form of the work\nfor making modifications to it. \"Object code\" means any non-source\nform of a work.\n\n A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form. A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities. However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work. For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n The Corresponding Source for a work in source code form is that\nsame work.\n\n 2. Basic Permissions.\n\n All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met. This License explicitly affirms your unlimited\npermission to run the unmodified Program. The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work. This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force. You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright. Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n Conveying under any other circumstances is permitted solely under\nthe conditions stated below. Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n 3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n 4. Conveying Verbatim Copies.\n\n You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n 5. Conveying Modified Source Versions.\n\n You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n a) The work must carry prominent notices stating that you modified\n it, and giving a relevant date.\n\n b) The work must carry prominent notices stating that it is\n released under this License and any conditions added under section\n 7. This requirement modifies the requirement in section 4 to\n \"keep intact all notices\".\n\n c) You must license the entire work, as a whole, under this\n License to anyone who comes into possession of a copy. This\n License will therefore apply, along with any applicable section 7\n additional terms, to the whole of the work, and all its parts,\n regardless of how they are packaged. This License gives no\n permission to license the work in any other way, but it does not\n invalidate such permission if you have separately received it.\n\n d) If the work has interactive user interfaces, each must display\n Appropriate Legal Notices; however, if the Program has interactive\n interfaces that do not display Appropriate Legal Notices, your\n work need not make them do so.\n\n A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit. Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n 6. Conveying Non-Source Forms.\n\n You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n a) Convey the object code in, or embodied in, a physical product\n (including a physical distribution medium), accompanied by the\n Corresponding Source fixed on a durable physical medium\n customarily used for software interchange.\n\n b) Convey the object code in, or embodied in, a physical product\n (including a physical distribution medium), accompanied by a\n written offer, valid for at least three years and valid for as\n long as you offer spare parts or customer support for that product\n model, to give anyone who possesses the object code either (1) a\n copy of the Corresponding Source for all the software in the\n product that is covered by this License, on a durable physical\n medium customarily used for software interchange, for a price no\n more than your reasonable cost of physically performing this\n conveying of source, or (2) access to copy the\n Corresponding Source from a network server at no charge.\n\n c) Convey individual copies of the object code with a copy of the\n written offer to provide the Corresponding Source. This\n alternative is allowed only occasionally and noncommercially, and\n only if you received the object code with such an offer, in accord\n with subsection 6b.\n\n d) Convey the object code by offering access from a designated\n place (gratis or for a charge), and offer equivalent access to the\n Corresponding Source in the same way through the same place at no\n further charge. You need not require recipients to copy the\n Corresponding Source along with the object code. If the place to\n copy the object code is a network server, the Corresponding Source\n may be on a different server (operated by you or a third party)\n that supports equivalent copying facilities, provided you maintain\n clear directions next to the object code saying where to find the\n Corresponding Source. Regardless of what server hosts the\n Corresponding Source, you remain obligated to ensure that it is\n available for as long as needed to satisfy these requirements.\n\n e) Convey the object code using peer-to-peer transmission, provided\n you inform other peers where the object code and Corresponding\n Source of the work are being offered to the general public at no\n charge under subsection 6d.\n\n A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling. In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage. For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product. A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source. The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information. But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed. Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n 7. Additional Terms.\n\n \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law. If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit. (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.) You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n a) Disclaiming warranty or limiting liability differently from the\n terms of sections 15 and 16 of this License; or\n\n b) Requiring preservation of specified reasonable legal notices or\n author attributions in that material or in the Appropriate Legal\n Notices displayed by works containing it; or\n\n c) Prohibiting misrepresentation of the origin of that material, or\n requiring that modified versions of such material be marked in\n reasonable ways as different from the original version; or\n\n d) Limiting the use for publicity purposes of names of licensors or\n authors of the material; or\n\n e) Declining to grant rights under trademark law for use of some\n trade names, trademarks, or service marks; or\n\n f) Requiring indemnification of licensors and authors of that\n material by anyone who conveys the material (or modified versions of\n it) with contractual assumptions of liability to the recipient, for\n any liability that these contractual assumptions directly impose on\n those licensors and authors.\n\n All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10. If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term. If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n 8. Termination.\n\n You may not propagate or modify a covered work except as expressly\nprovided under this License. Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License. If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n 9. Acceptance Not Required for Having Copies.\n\n You are not required to accept this License in order to receive or\nrun a copy of the Program. Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance. However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work. These actions infringe copyright if you do\nnot accept this License. Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n 10. Automatic Licensing of Downstream Recipients.\n\n Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License. You are not responsible\nfor enforcing compliance by third parties with this License.\n\n An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations. If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License. For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n 11. Patents.\n\n A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based. The\nwork thus licensed is called the contributor's \"contributor version\".\n\n A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version. For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement). To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients. \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License. You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n 12. No Surrender of Others' Freedom.\n\n If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License. If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all. For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n 13. Use with the GNU Affero General Public License.\n\n Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work. The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n 14. Revised Versions of this License.\n\n The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time. Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n Each version is given a distinguishing version number. If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation. If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n Later license versions may give you additional or different\npermissions. However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n 15. Disclaimer of Warranty.\n\n THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n 16. Limitation of Liability.\n\n IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n 17. Interpretation of Sections 15 and 16.\n\n If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n END OF TERMS AND CONDITIONS\n\n How to Apply These Terms to Your New Programs\n\n If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n To do so, attach the following notices to the program. It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n \n Copyright (C) \n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n\nAlso add information on how to contact you by electronic and paper mail.\n\n If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n Copyright (C) \n This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n This is free software, and you are welcome to redistribute it\n under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License. Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n.\n\n The GNU General Public License does not permit incorporating your program\ninto proprietary programs. If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library. If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License. But first, please read\n.\n"}, {"name": "sentencepiece", "version": "0.1.97", "license": "Apache Software License", "text": "\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "six", "version": "1.16.0", "license": "MIT License", "text": "Copyright (c) 2010-2020 Benjamin Peterson\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "sniffio", "version": "1.3.0", "license": "Apache Software License; MIT License", "text": "This software is made available under the terms of *either* of the\nlicenses found in LICENSE.APACHE2 or LICENSE.MIT. Contributions to are\nmade under the terms of *both* these licenses.\n"}, {"name": "starlette", "version": "0.16.0", "license": "BSD License", "text": "Copyright \u00a9 2018, [Encode OSS Ltd](https://www.encode.io/).\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "threadpoolctl", "version": "3.1.0", "license": "BSD License", "text": "Copyright (c) 2019, threadpoolctl contributors\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."}, {"name": "torch", "version": "1.13.1", "license": "BSD License", "text": "From PyTorch:\n\nCopyright (c) 2016- Facebook, Inc (Adam Paszke)\nCopyright (c) 2014- Facebook, Inc (Soumith Chintala)\nCopyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\nCopyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\nCopyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\nCopyright (c) 2011-2013 NYU (Clement Farabet)\nCopyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\nCopyright (c) 2006 Idiap Research Institute (Samy Bengio)\nCopyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\nFrom Caffe2:\n\nCopyright (c) 2016-present, Facebook Inc. All rights reserved.\n\nAll contributions by Facebook:\nCopyright (c) 2016 Facebook Inc.\n\nAll contributions by Google:\nCopyright (c) 2015 Google Inc.\nAll rights reserved.\n\nAll contributions by Yangqing Jia:\nCopyright (c) 2015 Yangqing Jia\nAll rights reserved.\n\nAll contributions by Kakao Brain:\nCopyright 2019-2020 Kakao Brain\n\nAll contributions by Cruise LLC:\nCopyright (c) 2022 Cruise LLC.\nAll rights reserved.\n\nAll contributions from Caffe:\nCopyright(c) 2013, 2014, 2015, the respective contributors\nAll rights reserved.\n\nAll other contributions:\nCopyright(c) 2015, 2016 the respective contributors\nAll rights reserved.\n\nCaffe2 uses a copyright model similar to Caffe: each contributor holds\ncopyright over their contributions to Caffe2. The project versioning records\nall such contribution and copyright details. If a contributor wants to further\nmark their specific copyright on a particular contribution, they should\nindicate their copyright solely in the commit message of the change when it is\ncommitted.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America\n and IDIAP Research Institute nor the names of its contributors may be\n used to endorse or promote products derived from this software without\n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n\n\nThe Pytorch repository and source distributions bundle several libraries that are \ncompatibly licensed. We list these here.\n\nName: third_party\\FP16\nLicense: MIT\nFiles: third_party\\FP16\n For details, see the files concatenated below: third_party\\FP16\\LICENSE\n\nName: third_party\\FXdiv\nLicense: MIT\nFiles: third_party\\FXdiv\n For details, see the files concatenated below: third_party\\FXdiv\\LICENSE\n\nName: third_party\\NNPACK\nLicense: BSD-2-Clause\nFiles: third_party\\NNPACK\n For details, see the files concatenated below: third_party\\NNPACK\\LICENSE\n\nName: third_party\\QNNPACK\nLicense: BSD-3-Clause\nFiles: third_party\\QNNPACK\n For details, see the files concatenated below: third_party\\QNNPACK\\LICENSE\n\nName: third_party\\QNNPACK\\deps\\clog\nLicense: BSD-2-Clause\nFiles: third_party\\QNNPACK\\deps\\clog\n For details, see the files concatenated below: third_party\\QNNPACK\\deps\\clog\\LICENSE\n\nName: third_party\\VulkanMemoryAllocator\nLicense: MIT\nFiles: third_party\\VulkanMemoryAllocator\n For details, see the files concatenated below: third_party\\VulkanMemoryAllocator\\LICENSE.txt\n\nName: third_party\\XNNPACK\nLicense: BSD-3-Clause\nFiles: third_party\\XNNPACK\n For details, see the files concatenated below: third_party\\XNNPACK\\LICENSE\n\nName: third_party\\benchmark\nLicense: Apache-2.0\nFiles: third_party\\benchmark\n For details, see the files concatenated below: third_party\\benchmark\\LICENSE\n\nName: third_party\\cpuinfo\nLicense: BSD-2-Clause\nFiles: third_party\\cpuinfo\n For details, see the files concatenated below: third_party\\cpuinfo\\LICENSE\n\nName: third_party\\cpuinfo\\deps\\clog\nLicense: BSD-2-Clause\nFiles: third_party\\cpuinfo\\deps\\clog\n For details, see the files concatenated below: third_party\\cpuinfo\\deps\\clog\\LICENSE\n\nName: third_party\\cudnn_frontend\nLicense: MIT\nFiles: third_party\\cudnn_frontend\n For details, see the files concatenated below: third_party\\cudnn_frontend\\LICENSE.txt\n\nName: third_party\\cudnn_frontend\\include\\contrib\\nlohmann\\json\nLicense: MIT\nFiles: third_party\\cudnn_frontend\\include\\contrib\\nlohmann\\json\n For details, see the files concatenated below: third_party\\cudnn_frontend\\include\\contrib\\nlohmann\\json\\LICENSE.txt\n\nName: third_party\\cutlass\nLicense: BSD-3-Clause\nFiles: third_party\\cutlass\n For details, see the files concatenated below: third_party\\cutlass\\LICENSE.txt\n\nName: third_party\\eigen\nLicense: BSD-3-Clause\nFiles: third_party\\eigen\n For details, see the files concatenated below: third_party\\eigen\\COPYING.BSD\n\nName: third_party\\fbgemm\nLicense: BSD-3-Clause\nFiles: third_party\\fbgemm\n For details, see the files concatenated below: third_party\\fbgemm\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\cpuinfo\nLicense: BSD-2-Clause\nFiles: third_party\\fbgemm\\third_party\\cpuinfo\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\cpuinfo\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\cpuinfo\\deps\\clog\nLicense: BSD-2-Clause\nFiles: third_party\\fbgemm\\third_party\\cpuinfo\\deps\\clog\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\cpuinfo\\deps\\clog\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\fbgemm\\third_party\\googletest\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\googletest\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\googletest\\googlemock\nLicense: BSD-3-Clause\nFiles: third_party\\fbgemm\\third_party\\googletest\\googlemock\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\googletest\\googlemock\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\googletest\\googlemock\\scripts\\generator\nLicense: Apache-2.0\nFiles: third_party\\fbgemm\\third_party\\googletest\\googlemock\\scripts\\generator\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\googletest\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\fbgemm\\third_party\\googletest\\googletest\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\googletest\\googletest\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\hipify_torch\nLicense: MIT\nFiles: third_party\\fbgemm\\third_party\\hipify_torch\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\hipify_torch\\LICENSE.txt\n\nName: third_party\\flatbuffers\nLicense: Apache-2.0\nFiles: third_party\\flatbuffers\n For details, see the files concatenated below: third_party\\flatbuffers\\LICENSE.txt\n\nName: third_party\\flatbuffers\\dart\nLicense: Apache-2.0\nFiles: third_party\\flatbuffers\\dart\n For details, see the files concatenated below: third_party\\flatbuffers\\dart\\LICENSE\n\nName: third_party\\flatbuffers\\swift\nLicense: Apache-2.0\nFiles: third_party\\flatbuffers\\swift\n For details, see the files concatenated below: third_party\\flatbuffers\\swift\\LICENSE\n\nName: third_party\\fmt\nLicense: MIT with exception\nFiles: third_party\\fmt\n For details, see the files concatenated below: third_party\\fmt\\LICENSE.rst\n\nName: third_party\\foxi\nLicense: MIT\nFiles: third_party\\foxi\n For details, see the files concatenated below: third_party\\foxi\\LICENSE\n\nName: third_party\\gemmlowp\\gemmlowp\nLicense: Apache-2.0\nFiles: third_party\\gemmlowp\\gemmlowp\n For details, see the files concatenated below: third_party\\gemmlowp\\gemmlowp\\LICENSE\n\nName: third_party\\gloo\nLicense: BSD-3-Clause\nFiles: third_party\\gloo\n For details, see the files concatenated below: third_party\\gloo\\LICENSE\n\nName: third_party\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\googletest\n For details, see the files concatenated below: third_party\\googletest\\LICENSE\n\nName: third_party\\googletest\\googlemock\\scripts\\generator\nLicense: Apache-2.0\nFiles: third_party\\googletest\\googlemock\\scripts\\generator\n For details, see the files concatenated below: third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n\nName: third_party\\ideep\nLicense: MIT\nFiles: third_party\\ideep\n For details, see the files concatenated below: third_party\\ideep\\LICENSE\n\nName: third_party\\ideep\\mkl-dnn\nLicense: Apache-2.0\nFiles: third_party\\ideep\\mkl-dnn\n For details, see the files concatenated below: third_party\\ideep\\mkl-dnn\\LICENSE\n\nName: third_party\\ideep\\mkl-dnn\\tests\\gtest\nLicense: BSD-3-Clause\nFiles: third_party\\ideep\\mkl-dnn\\tests\\gtest\n For details, see the files concatenated below: third_party\\ideep\\mkl-dnn\\tests\\gtest\\LICENSE\n\nName: third_party\\ideep\\mkl-dnn\\third_party\\oneDNN\nLicense: Apache-2.0\nFiles: third_party\\ideep\\mkl-dnn\\third_party\\oneDNN\n For details, see the files concatenated below: third_party\\ideep\\mkl-dnn\\third_party\\oneDNN\\LICENSE\n\nName: third_party\\ideep\\mkl-dnn\\third_party\\oneDNN\\tests\\gtests\\gtest\nLicense: BSD-3-Clause\nFiles: third_party\\ideep\\mkl-dnn\\third_party\\oneDNN\\tests\\gtests\\gtest\n For details, see the files concatenated below: third_party\\ideep\\mkl-dnn\\third_party\\oneDNN\\tests\\gtests\\gtest\\LICENSE\n\nName: third_party\\ios-cmake\nLicense: BSD-3-Clause\nFiles: third_party\\ios-cmake\n For details, see the files concatenated below: third_party\\ios-cmake\\LICENSE\n\nName: third_party\\kineto\nLicense: BSD-3-Clause\nFiles: third_party\\kineto\n For details, see the files concatenated below: third_party\\kineto\\LICENSE\n\nName: third_party\\kineto\\libkineto\\third_party\\fmt\nLicense: MIT with exception\nFiles: third_party\\kineto\\libkineto\\third_party\\fmt\n For details, see the files concatenated below: third_party\\kineto\\libkineto\\third_party\\fmt\\LICENSE.rst\n\nName: third_party\\kineto\\libkineto\\third_party\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\kineto\\libkineto\\third_party\\googletest\n For details, see the files concatenated below: third_party\\kineto\\libkineto\\third_party\\googletest\\LICENSE\n\nName: third_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\nLicense: BSD-3-Clause\nFiles: third_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\n For details, see the files concatenated below: third_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\\LICENSE\n\nName: third_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\\scripts\\generator\nLicense: Apache-2.0\nFiles: third_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\\scripts\\generator\n For details, see the files concatenated below: third_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n\nName: third_party\\kineto\\libkineto\\third_party\\googletest\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\kineto\\libkineto\\third_party\\googletest\\googletest\n For details, see the files concatenated below: third_party\\kineto\\libkineto\\third_party\\googletest\\googletest\\LICENSE\n\nName: third_party\\kineto\\tb_plugin\nLicense: BSD-3-Clause\nFiles: third_party\\kineto\\tb_plugin\n For details, see the files concatenated below: third_party\\kineto\\tb_plugin\\LICENSE\n\nName: third_party\\miniz-2.1.0\nLicense: MIT\nFiles: third_party\\miniz-2.1.0\n For details, see the files concatenated below: third_party\\miniz-2.1.0\\LICENSE\n\nName: third_party\\nccl\\nccl\nLicense: BSD-3-Clause\nFiles: third_party\\nccl\\nccl\n For details, see the files concatenated below: third_party\\nccl\\nccl\\LICENSE.txt\n\nName: third_party\\neon2sse\nLicense: BSD-Source-Code\nFiles: third_party\\neon2sse\n For details, see the files concatenated below: third_party\\neon2sse\\LICENSE\n\nName: third_party\\nlohmann\\tests\\thirdparty\\doctest\nLicense: MIT\nFiles: third_party\\nlohmann\\tests\\thirdparty\\doctest\n For details, see the files concatenated below: third_party\\nlohmann\\tests\\thirdparty\\doctest\\LICENSE.txt\n\nName: third_party\\nlohmann\\tools\\cpplint\nLicense: BSD-3-Clause\nFiles: third_party\\nlohmann\\tools\\cpplint\n For details, see the files concatenated below: third_party\\nlohmann\\tools\\cpplint\\LICENSE\n\nName: third_party\\onnx\nLicense: Apache-2.0\nFiles: third_party\\onnx\n For details, see the files concatenated below: third_party\\onnx\\LICENSE\n\nName: third_party\\onnx-tensorrt\nLicense: MIT\nFiles: third_party\\onnx-tensorrt\n For details, see the files concatenated below: third_party\\onnx-tensorrt\\LICENSE\n\nName: third_party\\onnx-tensorrt\\third_party\\onnx\nLicense: MIT\nFiles: third_party\\onnx-tensorrt\\third_party\\onnx\n For details, see the files concatenated below: third_party\\onnx-tensorrt\\third_party\\onnx\\LICENSE\n\nName: third_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\benchmark\nLicense: Apache-2.0\nFiles: third_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\benchmark\n For details, see the files concatenated below: third_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\benchmark\\LICENSE\n\nName: third_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\pybind11\nLicense: BSD-3-Clause\nFiles: third_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\pybind11\n For details, see the files concatenated below: third_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\pybind11\\LICENSE\n\nName: third_party\\onnx\\third_party\\benchmark\nLicense: Apache-2.0\nFiles: third_party\\onnx\\third_party\\benchmark\n For details, see the files concatenated below: third_party\\onnx\\third_party\\benchmark\\LICENSE\n\nName: third_party\\onnx\\third_party\\pybind11\nLicense: BSD-3-Clause\nFiles: third_party\\onnx\\third_party\\pybind11\n For details, see the files concatenated below: third_party\\onnx\\third_party\\pybind11\\LICENSE\n\nName: third_party\\protobuf\nLicense: BSD-3-Clause\nFiles: third_party\\protobuf\n For details, see the files concatenated below: third_party\\protobuf\\LICENSE\n\nName: third_party\\protobuf\\third_party\\benchmark\nLicense: Apache-2.0\nFiles: third_party\\protobuf\\third_party\\benchmark\n For details, see the files concatenated below: third_party\\protobuf\\third_party\\benchmark\\LICENSE\n\nName: third_party\\protobuf\\third_party\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\protobuf\\third_party\\googletest\n For details, see the files concatenated below: third_party\\protobuf\\third_party\\googletest\\LICENSE\n\nName: third_party\\protobuf\\third_party\\googletest\\googlemock\nLicense: BSD-3-Clause\nFiles: third_party\\protobuf\\third_party\\googletest\\googlemock\n For details, see the files concatenated below: third_party\\protobuf\\third_party\\googletest\\googlemock\\LICENSE\n\nName: third_party\\protobuf\\third_party\\googletest\\googlemock\\scripts\\generator\nLicense: Apache-2.0\nFiles: third_party\\protobuf\\third_party\\googletest\\googlemock\\scripts\\generator\n For details, see the files concatenated below: third_party\\protobuf\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n\nName: third_party\\protobuf\\third_party\\googletest\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\protobuf\\third_party\\googletest\\googletest\n For details, see the files concatenated below: third_party\\protobuf\\third_party\\googletest\\googletest\\LICENSE\n\nName: third_party\\psimd\nLicense: MIT\nFiles: third_party\\psimd\n For details, see the files concatenated below: third_party\\psimd\\LICENSE\n\nName: third_party\\pthreadpool\nLicense: BSD-2-Clause\nFiles: third_party\\pthreadpool\n For details, see the files concatenated below: third_party\\pthreadpool\\LICENSE\n\nName: third_party\\pybind11\nLicense: BSD-3-Clause\nFiles: third_party\\pybind11\n For details, see the files concatenated below: third_party\\pybind11\\LICENSE\n\nName: third_party\\python-enum\\enum\nLicense: BSD-3-Clause\nFiles: third_party\\python-enum\\enum\n For details, see the files concatenated below: third_party\\python-enum\\enum\\LICENSE\n\nName: third_party\\python-peachpy\nLicense: BSD-2-Clause\nFiles: third_party\\python-peachpy\n For details, see the files concatenated below: third_party\\python-peachpy\\LICENSE.rst\n\nName: third_party\\python-six\nLicense: MIT\nFiles: third_party\\python-six\n For details, see the files concatenated below: third_party\\python-six\\LICENSE\n\nName: third_party\\sleef\nLicense: BSL-1.0\nFiles: third_party\\sleef\n For details, see the files concatenated below: third_party\\sleef\\LICENSE.txt\n\nName: third_party\\tbb\nLicense: Apache-2.0\nFiles: third_party\\tbb\n For details, see the files concatenated below: third_party\\tbb\\LICENSE\n\nName: third_party\\tensorpipe\nLicense: BSD-3-Clause\nFiles: third_party\\tensorpipe\n For details, see the files concatenated below: third_party\\tensorpipe\\LICENSE.txt\n\nName: third_party\\tensorpipe\\third_party\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\tensorpipe\\third_party\\googletest\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\googletest\\LICENSE\n\nName: third_party\\tensorpipe\\third_party\\googletest\\googlemock\nLicense: BSD-3-Clause\nFiles: third_party\\tensorpipe\\third_party\\googletest\\googlemock\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\googletest\\googlemock\\LICENSE\n\nName: third_party\\tensorpipe\\third_party\\googletest\\googlemock\\scripts\\generator\nLicense: Apache-2.0\nFiles: third_party\\tensorpipe\\third_party\\googletest\\googlemock\\scripts\\generator\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n\nName: third_party\\tensorpipe\\third_party\\googletest\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\tensorpipe\\third_party\\googletest\\googletest\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\googletest\\googletest\\LICENSE\n\nName: third_party\\tensorpipe\\third_party\\libnop\nLicense: Apache-2.0\nFiles: third_party\\tensorpipe\\third_party\\libnop\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\libnop\\LICENSE\n\nName: third_party\\tensorpipe\\third_party\\libuv\nLicense: MIT\nFiles: third_party\\tensorpipe\\third_party\\libuv\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\libuv\\LICENSE\n\nName: third_party\\tensorpipe\\third_party\\pybind11\nLicense: BSD-3-Clause\nFiles: third_party\\tensorpipe\\third_party\\pybind11\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\pybind11\\LICENSE\n\nName: third_party\\zstd\nLicense: BSD-3-Clause\nFiles: third_party\\zstd\n For details, see the files concatenated below: third_party\\zstd\\LICENSE\n\nthird_party\\FP16\\LICENSE\n------------------------\nThe MIT License (MIT)\n\nCopyright (c) 2017 Facebook Inc.\nCopyright (c) 2017 Georgia Institute of Technology\nCopyright 2019 Google LLC\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nthird_party\\FXdiv\\LICENSE\n-------------------------\nThe MIT License (MIT)\n\nCopyright (c) 2017 Facebook Inc.\nCopyright (c) 2016-2017 Marat Dukhan\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nthird_party\\NNPACK\\LICENSE\n--------------------------\nCopyright (c) 2017 Facebook Inc.\nCopyright (c) 2015-2017, Georgia Institute of Technology\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\QNNPACK\\LICENSE\n---------------------------\nBSD License\n\nFor QNNPACK software\n\nCopyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\QNNPACK\\deps\\clog\\LICENSE\n-------------------------------------\nCopyright (C) 2018 Marat Dukhan\nCopyright (c) 2017-2018 Facebook Inc.\nCopyright (c) 2017 Georgia Institute of Technology\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\VulkanMemoryAllocator\\LICENSE.txt\n---------------------------------------------\nCopyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\nthird_party\\XNNPACK\\LICENSE\n---------------------------\nBSD License\n\nFor XNNPACK software\n\nCopyright (c) Facebook, Inc. and its affiliates. All rights reserved.\nCopyright 2019 Google LLC\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\benchmark\\LICENSE\n-----------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\cpuinfo\\LICENSE\n---------------------------\nCopyright (c) 2019 Google LLC\nCopyright (c) 2017-2018 Facebook Inc.\nCopyright (C) 2012-2017 Georgia Institute of Technology\nCopyright (C) 2010-2012 Marat Dukhan\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\cpuinfo\\deps\\clog\\LICENSE\n-------------------------------------\nCopyright (C) 2018 Marat Dukhan\nCopyright (c) 2017-2018 Facebook Inc.\nCopyright (c) 2017 Georgia Institute of Technology\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\cudnn_frontend\\LICENSE.txt\n--------------------------------------\n/*\n * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and associated documentation files (the \"Software\"),\n * to deal in the Software without restriction, including without limitation\n * the rights to use, copy, modify, merge, publish, distribute, sublicense,\n * and/or sell copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */ \n\n\nthird_party\\cudnn_frontend\\include\\contrib\\nlohmann\\json\\LICENSE.txt\n--------------------------------------------------------------------\nMIT License \n\nCopyright (c) 2013-2021 Niels Lohmann\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\nthird_party\\cutlass\\LICENSE.txt\n-------------------------------\nCopyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\nSPDX-License-Identifier: BSD-3-Clause\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\nlist of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\nthis list of conditions and the following disclaimer in the documentation\nand/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\eigen\\COPYING.BSD\n-----------------------------\n/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n be used to endorse or promote products derived from this software without\n specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*/\n\n\nthird_party\\fbgemm\\LICENSE\n--------------------------\nBSD License\n\nFor FBGEMM software\n\nCopyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\fbgemm\\third_party\\cpuinfo\\LICENSE\n----------------------------------------------\nCopyright (c) 2019 Google LLC\nCopyright (c) 2017-2018 Facebook Inc.\nCopyright (C) 2012-2017 Georgia Institute of Technology\nCopyright (C) 2010-2012 Marat Dukhan\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\fbgemm\\third_party\\cpuinfo\\deps\\clog\\LICENSE\n--------------------------------------------------------\nCopyright (C) 2018 Marat Dukhan\nCopyright (c) 2017-2018 Facebook Inc.\nCopyright (c) 2017 Georgia Institute of Technology\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\fbgemm\\third_party\\googletest\\LICENSE\n-------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\fbgemm\\third_party\\googletest\\googlemock\\LICENSE\n------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\fbgemm\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n------------------------------------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [2007] Neal Norwitz\n Portions Copyright [2007] Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\fbgemm\\third_party\\googletest\\googletest\\LICENSE\n------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\fbgemm\\third_party\\hipify_torch\\LICENSE.txt\n-------------------------------------------------------\nMIT License\n\nCopyright (c) 2017 AMD Compute Libraries\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\nthird_party\\flatbuffers\\LICENSE.txt\n-----------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\flatbuffers\\dart\\LICENSE\n------------------------------------\nThe code in lib/flat_buffers.dart is based on code that was releases under the \nfollowing license:\n\nCopyright 2012, the Dart project authors. All rights reserved.\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided\n with the distribution.\n * Neither the name of Google Inc. nor the names of its\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nTo the extent permissible, the changes to that code and the other assets in \nthis package are licensed under the Apache2 license:\n\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2014 Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\flatbuffers\\swift\\LICENSE\n-------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\fmt\\LICENSE.rst\n---------------------------\nCopyright (c) 2012 - present, Victor Zverovich\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n--- Optional exception to the license ---\n\nAs an exception, if, as a result of your compiling your source code, portions\nof this Software are embedded into a machine-executable object form of such\nsource code, you may redistribute such embedded portions in such object form\nwithout including the above copyright and permission notices.\n\n\nthird_party\\foxi\\LICENSE\n------------------------\nMIT License\n\nCopyright (c) 2019 Lu Fang\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\nthird_party\\gemmlowp\\gemmlowp\\LICENSE\n-------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\gloo\\LICENSE\n------------------------\nBSD License\n\nFor Gloo software\n\nCopyright (c) 2017-present, Facebook, Inc. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\googletest\\LICENSE\n------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n-----------------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [2007] Neal Norwitz\n Portions Copyright [2007] Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\ideep\\LICENSE\n-------------------------\nCopyright (c) 2018 Intel Corporation.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\nthird_party\\ideep\\mkl-dnn\\LICENSE\n---------------------------------\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"{}\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\n\nthird_party\\ideep\\mkl-dnn\\tests\\gtest\\LICENSE\n---------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\ideep\\mkl-dnn\\third_party\\oneDNN\\LICENSE\n----------------------------------------------------\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n ============================================================================\n\n Copyright 2016-2021 Intel Corporation\n Copyright 2018 YANDEX LLC\n Copyright 2019-2021 FUJITSU LIMITED\n Copyright 2020 Arm Limited and affiliates\n Copyright 2020 Codeplay Software Limited\n Copyright 2021 Alanna Tempest\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n This distribution includes third party software (\"third party programs\").\n This third party software, even if included with the distribution of\n the Intel software, may be governed by separate license terms, including\n without limitation, third party license terms, other Intel software license\n terms, and open source software license terms. These separate license terms\n govern your use of the third party programs as set forth in the\n \"THIRD-PARTY-PROGRAMS\" file.\n\n\nthird_party\\ideep\\mkl-dnn\\third_party\\oneDNN\\tests\\gtests\\gtest\\LICENSE\n-----------------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\ios-cmake\\LICENSE\n-----------------------------\nCopyright (c) 2011-2014, Andrew Fischer \nCopyright (c) 2016, Bogdan Cristea \nCopyright (c) 2017, Yangqing Jia \n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\nlist of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\nthis list of conditions and the following disclaimer in the documentation\nand/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\nmay be used to endorse or promote products derived from this software without\nspecific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\kineto\\LICENSE\n--------------------------\nBSD License\n\nFor Kineto software\n\nCopyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nAll contributions by Microsoft:\nCopyright (c) Microsoft Corporation. (The Azure AI Platform team)\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\kineto\\libkineto\\third_party\\fmt\\LICENSE.rst\n--------------------------------------------------------\nCopyright (c) 2012 - present, Victor Zverovich\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n--- Optional exception to the license ---\n\nAs an exception, if, as a result of your compiling your source code, portions\nof this Software are embedded into a machine-executable object form of such\nsource code, you may redistribute such embedded portions in such object form\nwithout including the above copyright and permission notices.\n\n\nthird_party\\kineto\\libkineto\\third_party\\googletest\\LICENSE\n-----------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\\LICENSE\n----------------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n----------------------------------------------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [2007] Neal Norwitz\n Portions Copyright [2007] Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\kineto\\libkineto\\third_party\\googletest\\googletest\\LICENSE\n----------------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\kineto\\tb_plugin\\LICENSE\n------------------------------------\nBSD License\n\nFor Kineto software\n\nCopyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nAll contributions by Microsoft:\nCopyright (c) Microsoft Corporation. (The Azure AI Platform team)\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\miniz-2.1.0\\LICENSE\n-------------------------------\nCopyright 2013-2014 RAD Game Tools and Valve Software\nCopyright 2010-2014 Rich Geldreich and Tenacious Software LLC\n\nAll Rights Reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\nthird_party\\nccl\\nccl\\LICENSE.txt\n---------------------------------\n\n Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of NVIDIA CORPORATION, Lawrence Berkeley National\n Laboratory, the U.S. Department of Energy, nor the names of their\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n The U.S. Department of Energy funded the development of this software\n under subcontract 7078610 with Lawrence Berkeley National Laboratory.\n\n\nThis code also includes files from the NVIDIA Tools Extension SDK project.\n\nSee:\n\n https://github.com/NVIDIA/NVTX\n\nfor more information and license details.\n\n\nthird_party\\neon2sse\\LICENSE\n----------------------------\ncreated by Victoria Zhislina, the Senior Application Engineer, Intel Corporation, victoria.zhislina@intel.com\n\n*** Copyright (C) 2012-2016 Intel Corporation. All rights reserved.\n\nIMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n\nBy downloading, copying, installing or using the software you agree to this license.\nIf you do not agree to this license, do not download, install, copy or use the software.\n\n License Agreement\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n * The name of the copyright holders may not be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are disclaimed.\nIn no event shall the Intel Corporation or contributors be liable for any direct,\nindirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n\n\nthird_party\\nlohmann\\tests\\thirdparty\\doctest\\LICENSE.txt\n---------------------------------------------------------\nThe MIT License (MIT)\n\nCopyright (c) 2016-2021 Viktor Kirilov\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\nthird_party\\nlohmann\\tools\\cpplint\\LICENSE\n------------------------------------------\ncpplint.py and its corresponding unit tests are Copyright (C) 2009 Google Inc.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\onnx\\LICENSE\n------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\onnx-tensorrt\\LICENSE\n---------------------------------\nMIT License\n\nCopyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\nCopyright (c) 2018 Open Neural Network Exchange\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\nthird_party\\onnx-tensorrt\\third_party\\onnx\\LICENSE\n--------------------------------------------------\nOpen Neural Network Exchange\n\nCopyright (c) Facebook, Inc. and Microsoft Corporation.\nAll rights reserved. \n\nMIT License\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nthird_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\benchmark\\LICENSE\n------------------------------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\pybind11\\LICENSE\n-----------------------------------------------------------------------\nCopyright (c) 2016 Wenzel Jakob , All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nYou are under no obligation whatsoever to provide any bug fixes, patches, or\nupgrades to the features, functionality or performance of the source code\n(\"Enhancements\") to anyone; however, if you choose to make your Enhancements\navailable either publicly, or directly to the author of this software, without\nimposing a separate written license agreement for such Enhancements, then you\nhereby grant the following license: a non-exclusive, royalty-free perpetual\nlicense to install, use, modify, prepare derivative works, incorporate into\nother computer software, distribute, and sublicense such enhancements or\nderivative works thereof, in binary and source code form.\n\n\nthird_party\\onnx\\third_party\\benchmark\\LICENSE\n----------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\onnx\\third_party\\pybind11\\LICENSE\n---------------------------------------------\nCopyright (c) 2016 Wenzel Jakob , All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nPlease also refer to the file .github/CONTRIBUTING.md, which clarifies licensing of\nexternal contributions to this project including patches, pull requests, etc.\n\n\nthird_party\\protobuf\\LICENSE\n----------------------------\nCopyright 2008 Google Inc. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nCode generated by the Protocol Buffer compiler is owned by the owner\nof the input file used when generating it. This code is not\nstandalone and requires a support library to be linked with it. This\nsupport library is itself covered by the above license.\n\n\nthird_party\\protobuf\\third_party\\benchmark\\LICENSE\n--------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\protobuf\\third_party\\googletest\\LICENSE\n---------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\protobuf\\third_party\\googletest\\googlemock\\LICENSE\n--------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\protobuf\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n--------------------------------------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [2007] Neal Norwitz\n Portions Copyright [2007] Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\protobuf\\third_party\\googletest\\googletest\\LICENSE\n--------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\psimd\\LICENSE\n-------------------------\nThe MIT License (MIT)\n\nCopyright (c) 2017 Facebook Inc.\nCopyright (c) 2014-2017 Georgia Institute of Technology\nCopyright 2019 Google LLC\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nthird_party\\pthreadpool\\LICENSE\n-------------------------------\nCopyright 2019 Google LLC\nCopyright (c) 2017 Facebook Inc.\nCopyright (c) 2015-2017 Georgia Institute of Technology\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\n\nthird_party\\pybind11\\LICENSE\n----------------------------\nCopyright (c) 2016 Wenzel Jakob , All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nPlease also refer to the file .github/CONTRIBUTING.md, which clarifies licensing of\nexternal contributions to this project including patches, pull requests, etc.\n\n\nthird_party\\python-enum\\enum\\LICENSE\n------------------------------------\nCopyright (c) 2013, Ethan Furman.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n Redistributions of source code must retain the above\n copyright notice, this list of conditions and the\n following disclaimer.\n\n Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials\n provided with the distribution.\n\n Neither the name Ethan Furman nor the names of any\n contributors may be used to endorse or promote products\n derived from this software without specific prior written\n permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\python-peachpy\\LICENSE.rst\n--------------------------------------\n==============================\nPeachPy license (2-clause BSD)\n==============================\n\nCopyright (c) 2017, Facebook Inc.\nCopyright (c) 2013-2017, Georgia Institute of Technology\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\python-six\\LICENSE\n------------------------------\nCopyright (c) 2010-2017 Benjamin Peterson\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nthird_party\\sleef\\LICENSE.txt\n-----------------------------\nBoost Software License - Version 1.0 - August 17th, 2003\n\nPermission is hereby granted, free of charge, to any person or organization\nobtaining a copy of the software and accompanying documentation covered by\nthis license (the \"Software\") to use, reproduce, display, distribute,\nexecute, and transmit the Software, and to prepare derivative works of the\nSoftware, and to permit third-parties to whom the Software is furnished to\ndo so, all subject to the following:\n\nThe copyright notices in the Software and this entire statement, including\nthe above license grant, this restriction and the following disclaimer,\nmust be included in all copies of the Software, in whole or in part, and\nall derivative works of the Software, unless such copies or derivative\nworks are solely in the form of machine-executable object code generated by\na source language processor.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT\nSHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE\nFOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\n\nthird_party\\tbb\\LICENSE\n-----------------------\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\tensorpipe\\LICENSE.txt\n----------------------------------\nBSD License\n\nFor TensorPipe software\n\nCopyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Meta nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\tensorpipe\\third_party\\googletest\\LICENSE\n-----------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\tensorpipe\\third_party\\googletest\\googlemock\\LICENSE\n----------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\tensorpipe\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n----------------------------------------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [2007] Neal Norwitz\n Portions Copyright [2007] Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\tensorpipe\\third_party\\googletest\\googletest\\LICENSE\n----------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\tensorpipe\\third_party\\libnop\\LICENSE\n-------------------------------------------------\nCopyright 2017 The Native Object Protocols Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\nthird_party\\tensorpipe\\third_party\\libuv\\LICENSE\n------------------------------------------------\nlibuv is licensed for use as follows:\n\n====\nCopyright (c) 2015-present libuv project contributors.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n====\n\nThis license applies to parts of libuv originating from the\nhttps://github.com/joyent/libuv repository:\n\n====\n\nCopyright Joyent, Inc. and other Node contributors. All rights reserved.\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n\n====\n\nThis license applies to all parts of libuv that are not externally\nmaintained libraries.\n\nThe externally maintained libraries used by libuv are:\n\n - tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license.\n\n - inet_pton and inet_ntop implementations, contained in src/inet.c, are\n copyright the Internet Systems Consortium, Inc., and licensed under the ISC\n license.\n\n - stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three\n clause BSD license.\n\n - pthread-fixes.c, copyright Google Inc. and Sony Mobile Communications AB.\n Three clause BSD license.\n\n - android-ifaddrs.h, android-ifaddrs.c, copyright Berkeley Software Design\n Inc, Kenneth MacKay and Emergya (Cloud4all, FP7/2007-2013, grant agreement\n n\u00b0 289016). Three clause BSD license.\n\n\nthird_party\\tensorpipe\\third_party\\pybind11\\LICENSE\n---------------------------------------------------\nCopyright (c) 2016 Wenzel Jakob , All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nPlease also refer to the file CONTRIBUTING.md, which clarifies licensing of\nexternal contributions to this project including patches, pull requests, etc.\n\n\nthird_party\\zstd\\LICENSE\n------------------------\nBSD License\n\nFor Zstandard software\n\nCopyright (c) 2016-present, Facebook, Inc. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "torch-complex", "version": "0.4.3", "license": "Apache Software License", "text": "Copyright 2021 Naoyuki Kamo\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License."}, {"name": "tqdm", "version": "4.64.1", "license": "MIT License; Mozilla Public License 2.0 (MPL 2.0)", "text": "`tqdm` is a product of collaborative work.\nUnless otherwise stated, all authors (see commit logs) retain copyright\nfor their respective work, and release the work under the MIT licence\n(text below).\n\nExceptions or notable authors are listed below\nin reverse chronological order:\n\n* files: *\n MPLv2.0 2015-2021 (c) Casper da Costa-Luis\n [casperdcl](https://github.com/casperdcl).\n* files: tqdm/_tqdm.py\n MIT 2016 (c) [PR #96] on behalf of Google Inc.\n* files: tqdm/_tqdm.py setup.py README.rst MANIFEST.in .gitignore\n MIT 2013 (c) Noam Yorav-Raphael, original author.\n\n[PR #96]: https://github.com/tqdm/tqdm/pull/96\n\n\nMozilla Public Licence (MPL) v. 2.0 - Exhibit A\n-----------------------------------------------\n\nThis Source Code Form is subject to the terms of the\nMozilla Public License, v. 2.0.\nIf a copy of the MPL was not distributed with this project,\nYou can obtain one at https://mozilla.org/MPL/2.0/.\n\n\nMIT License (MIT)\n-----------------\n\nCopyright (c) 2013 noamraph\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "typeguard", "version": "2.13.3", "license": "MIT License", "text": "This is the MIT license: http://www.opensource.org/licenses/mit-license.php\n\nCopyright (c) Alex Gr\u00f6nholm\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this\nsoftware and associated documentation files (the \"Software\"), to deal in the Software\nwithout restriction, including without limitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of the Software, and to permit persons\nto whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or\nsubstantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\nPURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE\nFOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\nOTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n"}, {"name": "typing-extensions", "version": "4.4.0", "license": "Python Software Foundation License", "text": "A. HISTORY OF THE SOFTWARE\n==========================\n\nPython was created in the early 1990s by Guido van Rossum at Stichting\nMathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands\nas a successor of a language called ABC. Guido remains Python's\nprincipal author, although it includes many contributions from others.\n\nIn 1995, Guido continued his work on Python at the Corporation for\nNational Research Initiatives (CNRI, see http://www.cnri.reston.va.us)\nin Reston, Virginia where he released several versions of the\nsoftware.\n\nIn May 2000, Guido and the Python core development team moved to\nBeOpen.com to form the BeOpen PythonLabs team. In October of the same\nyear, the PythonLabs team moved to Digital Creations, which became\nZope Corporation. In 2001, the Python Software Foundation (PSF, see\nhttps://www.python.org/psf/) was formed, a non-profit organization\ncreated specifically to own Python-related Intellectual Property.\nZope Corporation was a sponsoring member of the PSF.\n\nAll Python releases are Open Source (see http://www.opensource.org for\nthe Open Source Definition). Historically, most, but not all, Python\nreleases have also been GPL-compatible; the table below summarizes\nthe various releases.\n\n Release Derived Year Owner GPL-\n from compatible? (1)\n\n 0.9.0 thru 1.2 1991-1995 CWI yes\n 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes\n 1.6 1.5.2 2000 CNRI no\n 2.0 1.6 2000 BeOpen.com no\n 1.6.1 1.6 2001 CNRI yes (2)\n 2.1 2.0+1.6.1 2001 PSF no\n 2.0.1 2.0+1.6.1 2001 PSF yes\n 2.1.1 2.1+2.0.1 2001 PSF yes\n 2.1.2 2.1.1 2002 PSF yes\n 2.1.3 2.1.2 2002 PSF yes\n 2.2 and above 2.1.1 2001-now PSF yes\n\nFootnotes:\n\n(1) GPL-compatible doesn't mean that we're distributing Python under\n the GPL. All Python licenses, unlike the GPL, let you distribute\n a modified version without making your changes open source. The\n GPL-compatible licenses make it possible to combine Python with\n other software that is released under the GPL; the others don't.\n\n(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,\n because its license has a choice of law clause. According to\n CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1\n is \"not incompatible\" with the GPL.\n\nThanks to the many outside volunteers who have worked under Guido's\ndirection to make these releases possible.\n\n\nB. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON\n===============================================================\n\nPYTHON SOFTWARE FOUNDATION LICENSE VERSION 2\n--------------------------------------------\n\n1. This LICENSE AGREEMENT is between the Python Software Foundation\n(\"PSF\"), and the Individual or Organization (\"Licensee\") accessing and\notherwise using this software (\"Python\") in source or binary form and\nits associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, PSF hereby\ngrants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,\nanalyze, test, perform and/or display publicly, prepare derivative works,\ndistribute, and otherwise use Python alone or in any derivative version,\nprovided, however, that PSF's License Agreement and PSF's notice of copyright,\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,\n2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Python Software Foundation;\nAll Rights Reserved\" are retained in Python alone or in any derivative version\nprepared by Licensee.\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python.\n\n4. PSF is making Python available to Licensee on an \"AS IS\"\nbasis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\nFOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. Nothing in this License Agreement shall be deemed to create any\nrelationship of agency, partnership, or joint venture between PSF and\nLicensee. This License Agreement does not grant permission to use PSF\ntrademarks or trade name in a trademark sense to endorse or promote\nproducts or services of Licensee, or any third party.\n\n8. By copying, installing or otherwise using Python, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nBEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0\n-------------------------------------------\n\nBEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1\n\n1. This LICENSE AGREEMENT is between BeOpen.com (\"BeOpen\"), having an\noffice at 160 Saratoga Avenue, Santa Clara, CA 95051, and the\nIndividual or Organization (\"Licensee\") accessing and otherwise using\nthis software in source or binary form and its associated\ndocumentation (\"the Software\").\n\n2. Subject to the terms and conditions of this BeOpen Python License\nAgreement, BeOpen hereby grants Licensee a non-exclusive,\nroyalty-free, world-wide license to reproduce, analyze, test, perform\nand/or display publicly, prepare derivative works, distribute, and\notherwise use the Software alone or in any derivative version,\nprovided, however, that the BeOpen Python License is retained in the\nSoftware, alone or in any derivative version prepared by Licensee.\n\n3. BeOpen is making the Software available to Licensee on an \"AS IS\"\nbasis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE\nSOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS\nAS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY\nDERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n5. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n6. This License Agreement shall be governed by and interpreted in all\nrespects by the law of the State of California, excluding conflict of\nlaw provisions. Nothing in this License Agreement shall be deemed to\ncreate any relationship of agency, partnership, or joint venture\nbetween BeOpen and Licensee. This License Agreement does not grant\npermission to use BeOpen trademarks or trade names in a trademark\nsense to endorse or promote products or services of Licensee, or any\nthird party. As an exception, the \"BeOpen Python\" logos available at\nhttp://www.pythonlabs.com/logos.html may be used according to the\npermissions granted on that web page.\n\n7. By copying, installing or otherwise using the software, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nCNRI LICENSE AGREEMENT FOR PYTHON 1.6.1\n---------------------------------------\n\n1. This LICENSE AGREEMENT is between the Corporation for National\nResearch Initiatives, having an office at 1895 Preston White Drive,\nReston, VA 20191 (\"CNRI\"), and the Individual or Organization\n(\"Licensee\") accessing and otherwise using Python 1.6.1 software in\nsource or binary form and its associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, CNRI\nhereby grants Licensee a nonexclusive, royalty-free, world-wide\nlicense to reproduce, analyze, test, perform and/or display publicly,\nprepare derivative works, distribute, and otherwise use Python 1.6.1\nalone or in any derivative version, provided, however, that CNRI's\nLicense Agreement and CNRI's notice of copyright, i.e., \"Copyright (c)\n1995-2001 Corporation for National Research Initiatives; All Rights\nReserved\" are retained in Python 1.6.1 alone or in any derivative\nversion prepared by Licensee. Alternately, in lieu of CNRI's License\nAgreement, Licensee may substitute the following text (omitting the\nquotes): \"Python 1.6.1 is made available subject to the terms and\nconditions in CNRI's License Agreement. This Agreement together with\nPython 1.6.1 may be located on the internet using the following\nunique, persistent identifier (known as a handle): 1895.22/1013. This\nAgreement may also be obtained from a proxy server on the internet\nusing the following URL: http://hdl.handle.net/1895.22/1013\".\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python 1.6.1 or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python 1.6.1.\n\n4. CNRI is making Python 1.6.1 available to Licensee on an \"AS IS\"\nbasis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\n1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. This License Agreement shall be governed by the federal\nintellectual property law of the United States, including without\nlimitation the federal copyright law, and, to the extent such\nU.S. federal law does not apply, by the law of the Commonwealth of\nVirginia, excluding Virginia's conflict of law provisions.\nNotwithstanding the foregoing, with regard to derivative works based\non Python 1.6.1 that incorporate non-separable material that was\npreviously distributed under the GNU General Public License (GPL), the\nlaw of the Commonwealth of Virginia shall govern this License\nAgreement only as to issues arising under or with respect to\nParagraphs 4, 5, and 7 of this License Agreement. Nothing in this\nLicense Agreement shall be deemed to create any relationship of\nagency, partnership, or joint venture between CNRI and Licensee. This\nLicense Agreement does not grant permission to use CNRI trademarks or\ntrade name in a trademark sense to endorse or promote products or\nservices of Licensee, or any third party.\n\n8. By clicking on the \"ACCEPT\" button where indicated, or by copying,\ninstalling or otherwise using Python 1.6.1, Licensee agrees to be\nbound by the terms and conditions of this License Agreement.\n\n ACCEPT\n\n\nCWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2\n--------------------------------------------------\n\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,\nThe Netherlands. All rights reserved.\n\nPermission to use, copy, modify, and distribute this software and its\ndocumentation for any purpose and without fee is hereby granted,\nprovided that the above copyright notice appear in all copies and that\nboth that copyright notice and this permission notice appear in\nsupporting documentation, and that the name of Stichting Mathematisch\nCentrum or CWI not be used in advertising or publicity pertaining to\ndistribution of the software without specific, written prior\npermission.\n\nSTICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO\nTHIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE\nFOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\nOF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n"}, {"name": "urllib3", "version": "1.26.13", "license": "MIT License", "text": "MIT License\n\nCopyright (c) 2008-2020 Andrey Petrov and contributors (see CONTRIBUTORS.txt)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"}, {"name": "uvicorn", "version": "0.15.0", "license": "BSD License", "text": "Copyright \u00a9 2017-present, [Encode OSS Ltd](http://www.encode.io/).\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "zipp", "version": "3.11.0", "license": "MIT License", "text": "Copyright Jason R. Coombs\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n"}, {"name": "OpenBLAS", "version": null, "license": "BSD 3-clause license", "text": "Copyright (c) 2011-2014, The OpenBLAS Project\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n 1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in\n the documentation and/or other materials provided with the\n distribution.\n 3. Neither the name of the OpenBLAS project nor the names of \n its contributors may be used to endorse or promote products \n derived from this software without specific prior written \n permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\nUSE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "libsndfile-binaries", "version": "1.0.28", "license": "LGPL-2.1 license", "text": " GNU LESSER GENERAL PUBLIC LICENSE\n Version 2.1, February 1999\n\n Copyright (C) 1991, 1999 Free Software Foundation, Inc.\n 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n[This is the first released version of the Lesser GPL. It also counts\n as the successor of the GNU Library Public License, version 2, hence\n the version number 2.1.]\n\n Preamble\n\n The licenses for most software are designed to take away your\nfreedom to share and change it. By contrast, the GNU General Public\nLicenses are intended to guarantee your freedom to share and change\nfree software--to make sure the software is free for all its users.\n\n This license, the Lesser General Public License, applies to some\nspecially designated software packages--typically libraries--of the\nFree Software Foundation and other authors who decide to use it. You\ncan use it too, but we suggest you first think carefully about whether\nthis license or the ordinary General Public License is the better\nstrategy to use in any particular case, based on the explanations below.\n\n When we speak of free software, we are referring to freedom of use,\nnot price. Our General Public Licenses are designed to make sure that\nyou have the freedom to distribute copies of free software (and charge\nfor this service if you wish); that you receive source code or can get\nit if you want it; that you can change the software and use pieces of\nit in new free programs; and that you are informed that you can do\nthese things.\n\n To protect your rights, we need to make restrictions that forbid\ndistributors to deny you these rights or to ask you to surrender these\nrights. These restrictions translate to certain responsibilities for\nyou if you distribute copies of the library or if you modify it.\n\n For example, if you distribute copies of the library, whether gratis\nor for a fee, you must give the recipients all the rights that we gave\nyou. You must make sure that they, too, receive or can get the source\ncode. If you link other code with the library, you must provide\ncomplete object files to the recipients, so that they can relink them\nwith the library after making changes to the library and recompiling\nit. And you must show them these terms so they know their rights.\n\n We protect your rights with a two-step method: (1) we copyright the\nlibrary, and (2) we offer you this license, which gives you legal\npermission to copy, distribute and/or modify the library.\n\n To protect each distributor, we want to make it very clear that\nthere is no warranty for the free library. Also, if the library is\nmodified by someone else and passed on, the recipients should know\nthat what they have is not the original version, so that the original\nauthor's reputation will not be affected by problems that might be\nintroduced by others.\n\n Finally, software patents pose a constant threat to the existence of\nany free program. We wish to make sure that a company cannot\neffectively restrict the users of a free program by obtaining a\nrestrictive license from a patent holder. Therefore, we insist that\nany patent license obtained for a version of the library must be\nconsistent with the full freedom of use specified in this license.\n\n Most GNU software, including some libraries, is covered by the\nordinary GNU General Public License. This license, the GNU Lesser\nGeneral Public License, applies to certain designated libraries, and\nis quite different from the ordinary General Public License. We use\nthis license for certain libraries in order to permit linking those\nlibraries into non-free programs.\n\n When a program is linked with a library, whether statically or using\na shared library, the combination of the two is legally speaking a\ncombined work, a derivative of the original library. The ordinary\nGeneral Public License therefore permits such linking only if the\nentire combination fits its criteria of freedom. The Lesser General\nPublic License permits more lax criteria for linking other code with\nthe library.\n\n We call this license the \"Lesser\" General Public License because it\ndoes Less to protect the user's freedom than the ordinary General\nPublic License. It also provides other free software developers Less\nof an advantage over competing non-free programs. These disadvantages\nare the reason we use the ordinary General Public License for many\nlibraries. However, the Lesser license provides advantages in certain\nspecial circumstances.\n\n For example, on rare occasions, there may be a special need to\nencourage the widest possible use of a certain library, so that it becomes\na de-facto standard. To achieve this, non-free programs must be\nallowed to use the library. A more frequent case is that a free\nlibrary does the same job as widely used non-free libraries. In this\ncase, there is little to gain by limiting the free library to free\nsoftware only, so we use the Lesser General Public License.\n\n In other cases, permission to use a particular library in non-free\nprograms enables a greater number of people to use a large body of\nfree software. For example, permission to use the GNU C Library in\nnon-free programs enables many more people to use the whole GNU\noperating system, as well as its variant, the GNU/Linux operating\nsystem.\n\n Although the Lesser General Public License is Less protective of the\nusers' freedom, it does ensure that the user of a program that is\nlinked with the Library has the freedom and the wherewithal to run\nthat program using a modified version of the Library.\n\n The precise terms and conditions for copying, distribution and\nmodification follow. Pay close attention to the difference between a\n\"work based on the library\" and a \"work that uses the library\". The\nformer contains code derived from the library, whereas the latter must\nbe combined with the library in order to run.\n\n GNU LESSER GENERAL PUBLIC LICENSE\n TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n 0. This License Agreement applies to any software library or other\nprogram which contains a notice placed by the copyright holder or\nother authorized party saying it may be distributed under the terms of\nthis Lesser General Public License (also called \"this License\").\nEach licensee is addressed as \"you\".\n\n A \"library\" means a collection of software functions and/or data\nprepared so as to be conveniently linked with application programs\n(which use some of those functions and data) to form executables.\n\n The \"Library\", below, refers to any such software library or work\nwhich has been distributed under these terms. A \"work based on the\nLibrary\" means either the Library or any derivative work under\ncopyright law: that is to say, a work containing the Library or a\nportion of it, either verbatim or with modifications and/or translated\nstraightforwardly into another language. (Hereinafter, translation is\nincluded without limitation in the term \"modification\".)\n\n \"Source code\" for a work means the preferred form of the work for\nmaking modifications to it. For a library, complete source code means\nall the source code for all modules it contains, plus any associated\ninterface definition files, plus the scripts used to control compilation\nand installation of the library.\n\n Activities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope. The act of\nrunning a program using the Library is not restricted, and output from\nsuch a program is covered only if its contents constitute a work based\non the Library (independent of the use of the Library in a tool for\nwriting it). Whether that is true depends on what the Library does\nand what the program that uses the Library does.\n \n 1. You may copy and distribute verbatim copies of the Library's\ncomplete source code as you receive it, in any medium, provided that\nyou conspicuously and appropriately publish on each copy an\nappropriate copyright notice and disclaimer of warranty; keep intact\nall the notices that refer to this License and to the absence of any\nwarranty; and distribute a copy of this License along with the\nLibrary.\n\n You may charge a fee for the physical act of transferring a copy,\nand you may at your option offer warranty protection in exchange for a\nfee.\n\n 2. You may modify your copy or copies of the Library or any portion\nof it, thus forming a work based on the Library, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n a) The modified work must itself be a software library.\n\n b) You must cause the files modified to carry prominent notices\n stating that you changed the files and the date of any change.\n\n c) You must cause the whole of the work to be licensed at no\n charge to all third parties under the terms of this License.\n\n d) If a facility in the modified Library refers to a function or a\n table of data to be supplied by an application program that uses\n the facility, other than as an argument passed when the facility\n is invoked, then you must make a good faith effort to ensure that,\n in the event an application does not supply such function or\n table, the facility still operates, and performs whatever part of\n its purpose remains meaningful.\n\n (For example, a function in a library to compute square roots has\n a purpose that is entirely well-defined independent of the\n application. Therefore, Subsection 2d requires that any\n application-supplied function or table used by this function must\n be optional: if the application does not supply it, the square\n root function must still compute square roots.)\n\nThese requirements apply to the modified work as a whole. If\nidentifiable sections of that work are not derived from the Library,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works. But when you\ndistribute the same sections as part of a whole which is a work based\non the Library, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote\nit.\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Library.\n\nIn addition, mere aggregation of another work not based on the Library\nwith the Library (or with a work based on the Library) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n 3. You may opt to apply the terms of the ordinary GNU General Public\nLicense instead of this License to a given copy of the Library. To do\nthis, you must alter all the notices that refer to this License, so\nthat they refer to the ordinary GNU General Public License, version 2,\ninstead of to this License. (If a newer version than version 2 of the\nordinary GNU General Public License has appeared, then you can specify\nthat version instead if you wish.) Do not make any other change in\nthese notices.\n\n Once this change is made in a given copy, it is irreversible for\nthat copy, so the ordinary GNU General Public License applies to all\nsubsequent copies and derivative works made from that copy.\n\n This option is useful when you wish to copy part of the code of\nthe Library into a program that is not a library.\n\n 4. You may copy and distribute the Library (or a portion or\nderivative of it, under Section 2) in object code or executable form\nunder the terms of Sections 1 and 2 above provided that you accompany\nit with the complete corresponding machine-readable source code, which\nmust be distributed under the terms of Sections 1 and 2 above on a\nmedium customarily used for software interchange.\n\n If distribution of object code is made by offering access to copy\nfrom a designated place, then offering equivalent access to copy the\nsource code from the same place satisfies the requirement to\ndistribute the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n 5. A program that contains no derivative of any portion of the\nLibrary, but is designed to work with the Library by being compiled or\nlinked with it, is called a \"work that uses the Library\". Such a\nwork, in isolation, is not a derivative work of the Library, and\ntherefore falls outside the scope of this License.\n\n However, linking a \"work that uses the Library\" with the Library\ncreates an executable that is a derivative of the Library (because it\ncontains portions of the Library), rather than a \"work that uses the\nlibrary\". The executable is therefore covered by this License.\nSection 6 states terms for distribution of such executables.\n\n When a \"work that uses the Library\" uses material from a header file\nthat is part of the Library, the object code for the work may be a\nderivative work of the Library even though the source code is not.\nWhether this is true is especially significant if the work can be\nlinked without the Library, or if the work is itself a library. The\nthreshold for this to be true is not precisely defined by law.\n\n If such an object file uses only numerical parameters, data\nstructure layouts and accessors, and small macros and small inline\nfunctions (ten lines or less in length), then the use of the object\nfile is unrestricted, regardless of whether it is legally a derivative\nwork. (Executables containing this object code plus portions of the\nLibrary will still fall under Section 6.)\n\n Otherwise, if the work is a derivative of the Library, you may\ndistribute the object code for the work under the terms of Section 6.\nAny executables containing that work also fall under Section 6,\nwhether or not they are linked directly with the Library itself.\n\n 6. As an exception to the Sections above, you may also combine or\nlink a \"work that uses the Library\" with the Library to produce a\nwork containing portions of the Library, and distribute that work\nunder terms of your choice, provided that the terms permit\nmodification of the work for the customer's own use and reverse\nengineering for debugging such modifications.\n\n You must give prominent notice with each copy of the work that the\nLibrary is used in it and that the Library and its use are covered by\nthis License. You must supply a copy of this License. If the work\nduring execution displays copyright notices, you must include the\ncopyright notice for the Library among them, as well as a reference\ndirecting the user to the copy of this License. Also, you must do one\nof these things:\n\n a) Accompany the work with the complete corresponding\n machine-readable source code for the Library including whatever\n changes were used in the work (which must be distributed under\n Sections 1 and 2 above); and, if the work is an executable linked\n with the Library, with the complete machine-readable \"work that\n uses the Library\", as object code and/or source code, so that the\n user can modify the Library and then relink to produce a modified\n executable containing the modified Library. (It is understood\n that the user who changes the contents of definitions files in the\n Library will not necessarily be able to recompile the application\n to use the modified definitions.)\n\n b) Use a suitable shared library mechanism for linking with the\n Library. A suitable mechanism is one that (1) uses at run time a\n copy of the library already present on the user's computer system,\n rather than copying library functions into the executable, and (2)\n will operate properly with a modified version of the library, if\n the user installs one, as long as the modified version is\n interface-compatible with the version that the work was made with.\n\n c) Accompany the work with a written offer, valid for at\n least three years, to give the same user the materials\n specified in Subsection 6a, above, for a charge no more\n than the cost of performing this distribution.\n\n d) If distribution of the work is made by offering access to copy\n from a designated place, offer equivalent access to copy the above\n specified materials from the same place.\n\n e) Verify that the user has already received a copy of these\n materials or that you have already sent this user a copy.\n\n For an executable, the required form of the \"work that uses the\nLibrary\" must include any data and utility programs needed for\nreproducing the executable from it. However, as a special exception,\nthe materials to be distributed need not include anything that is\nnormally distributed (in either source or binary form) with the major\ncomponents (compiler, kernel, and so on) of the operating system on\nwhich the executable runs, unless that component itself accompanies\nthe executable.\n\n It may happen that this requirement contradicts the license\nrestrictions of other proprietary libraries that do not normally\naccompany the operating system. Such a contradiction means you cannot\nuse both them and the Library together in an executable that you\ndistribute.\n\n 7. You may place library facilities that are a work based on the\nLibrary side-by-side in a single library together with other library\nfacilities not covered by this License, and distribute such a combined\nlibrary, provided that the separate distribution of the work based on\nthe Library and of the other library facilities is otherwise\npermitted, and provided that you do these two things:\n\n a) Accompany the combined library with a copy of the same work\n based on the Library, uncombined with any other library\n facilities. This must be distributed under the terms of the\n Sections above.\n\n b) Give prominent notice with the combined library of the fact\n that part of it is a work based on the Library, and explaining\n where to find the accompanying uncombined form of the same work.\n\n 8. You may not copy, modify, sublicense, link with, or distribute\nthe Library except as expressly provided under this License. Any\nattempt otherwise to copy, modify, sublicense, link with, or\ndistribute the Library is void, and will automatically terminate your\nrights under this License. However, parties who have received copies,\nor rights, from you under this License will not have their licenses\nterminated so long as such parties remain in full compliance.\n\n 9. You are not required to accept this License, since you have not\nsigned it. However, nothing else grants you permission to modify or\ndistribute the Library or its derivative works. These actions are\nprohibited by law if you do not accept this License. Therefore, by\nmodifying or distributing the Library (or any work based on the\nLibrary), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Library or works based on it.\n\n 10. Each time you redistribute the Library (or any work based on the\nLibrary), the recipient automatically receives a license from the\noriginal licensor to copy, distribute, link with or modify the Library\nsubject to these terms and conditions. You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties with\nthis License.\n\n 11. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License. If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Library at all. For example, if a patent\nlicense would not permit royalty-free redistribution of the Library by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Library.\n\nIf any portion of this section is held invalid or unenforceable under any\nparticular circumstance, the balance of the section is intended to apply,\nand the section as a whole is intended to apply in other circumstances.\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system which is\nimplemented by public license practices. Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n 12. If the distribution and/or use of the Library is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Library under this License may add\nan explicit geographical distribution limitation excluding those countries,\nso that distribution is permitted only in or among countries not thus\nexcluded. In such case, this License incorporates the limitation as if\nwritten in the body of this License.\n\n 13. The Free Software Foundation may publish revised and/or new\nversions of the Lesser General Public License from time to time.\nSuch new versions will be similar in spirit to the present version,\nbut may differ in detail to address new problems or concerns.\n\nEach version is given a distinguishing version number. If the Library\nspecifies a version number of this License which applies to it and\n\"any later version\", you have the option of following the terms and\nconditions either of that version or of any later version published by\nthe Free Software Foundation. If the Library does not specify a\nlicense version number, you may choose any version ever published by\nthe Free Software Foundation.\n\n 14. If you wish to incorporate parts of the Library into other free\nprograms whose distribution conditions are incompatible with these,\nwrite to the author to ask for permission. For software which is\ncopyrighted by the Free Software Foundation, write to the Free\nSoftware Foundation; we sometimes make exceptions for this. Our\ndecision will be guided by the two goals of preserving the free status\nof all derivatives of our free software and of promoting the sharing\nand reuse of software generally.\n\n NO WARRANTY\n\n 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO\nWARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.\nEXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR\nOTHER PARTIES PROVIDE THE LIBRARY \"AS IS\" WITHOUT WARRANTY OF ANY\nKIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE\nLIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME\nTHE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN\nWRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY\nAND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU\nFOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR\nCONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE\nLIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING\nRENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A\nFAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF\nSUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGES.\n\n END OF TERMS AND CONDITIONS\n\n How to Apply These Terms to Your New Libraries\n\n If you develop a new library, and you want it to be of the greatest\npossible use to the public, we recommend making it free software that\neveryone can redistribute and change. You can do so by permitting\nredistribution under these terms (or, alternatively, under the terms of the\nordinary General Public License).\n\n To apply these terms, attach the following notices to the library. It is\nsafest to attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least the\n\"copyright\" line and a pointer to where the full notice is found.\n\n \n Copyright (C) \n\n This library is free software; you can redistribute it and/or\n modify it under the terms of the GNU Lesser General Public\n License as published by the Free Software Foundation; either\n version 2.1 of the License, or (at your option) any later version.\n\n This library is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n Lesser General Public License for more details.\n\n You should have received a copy of the GNU Lesser General Public\n License along with this library; if not, write to the Free Software\n Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\nAlso add information on how to contact you by electronic and paper mail.\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the library, if\nnecessary. Here is a sample; alter the names:\n\n Yoyodyne, Inc., hereby disclaims all copyright interest in the\n library `Frob' (a library for tweaking knobs) written by James Random Hacker.\n\n , 1 April 1990\n Ty Coon, President of Vice\n\nThat's all there is to it!\n\n"}, {"name": "libogg", "version": "1.3.2", "license": "BSD 3-clause license", "text": "Copyright (c) 2002, Xiph.org Foundation\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n- Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n- Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n- Neither the name of the Xiph.org Foundation nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION\nOR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "libvorbis", "version": "1.3.5", "license": "BSD 3-clause license", "text": "Copyright (c) 2002-2008 Xiph.org Foundation\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n- Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n- Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n- Neither the name of the Xiph.org Foundation nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION\nOR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "FLAC", "version": "1.3.2", "license": "Xiph.org's BSD-like license", "text": "Copyright (C) 2000-2009 Josh Coalson\nCopyright (C) 2011-2016 Xiph.Org Foundation\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n- Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n- Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n- Neither the name of the Xiph.org Foundation nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\nEXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\nPROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}] \ No newline at end of file diff --git a/voice_bridge/engine_manifest_assets/downloadable_libraries.json b/voice_bridge/engine_manifest_assets/downloadable_libraries.json new file mode 100644 index 0000000000000000000000000000000000000000..fe51488c7066f6687ef680d6bfaa4f7768ef205c --- /dev/null +++ b/voice_bridge/engine_manifest_assets/downloadable_libraries.json @@ -0,0 +1 @@ +[] diff --git a/voice_bridge/engine_manifest_assets/icon.png b/voice_bridge/engine_manifest_assets/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..98064f347fdf8bd4fe42098c79ad293f291d40c8 Binary files /dev/null and b/voice_bridge/engine_manifest_assets/icon.png differ diff --git a/voice_bridge/engine_manifest_assets/terms_of_service.md b/voice_bridge/engine_manifest_assets/terms_of_service.md new file mode 100644 index 0000000000000000000000000000000000000000..c97bb512c679e98b7685a3b3d7013497ca280954 --- /dev/null +++ b/voice_bridge/engine_manifest_assets/terms_of_service.md @@ -0,0 +1 @@ +dummy teams of service \ No newline at end of file diff --git a/voice_bridge/engine_manifest_assets/update_infos.json b/voice_bridge/engine_manifest_assets/update_infos.json new file mode 100644 index 0000000000000000000000000000000000000000..c313a428f135bfd32ca183849f7580bd90b22f1b --- /dev/null +++ b/voice_bridge/engine_manifest_assets/update_infos.json @@ -0,0 +1,7 @@ +[ + { + "version": "0.0.1", + "descriptions": ["dummy descriptions"], + "contributors": ["dummy contributor"] + } +] \ No newline at end of file diff --git a/voice_bridge/espnet/version.txt b/voice_bridge/espnet/version.txt new file mode 100644 index 0000000000000000000000000000000000000000..574cb0d455e5e3c53ebb9e6dbc708d1af50b1d34 --- /dev/null +++ b/voice_bridge/espnet/version.txt @@ -0,0 +1 @@ +0.10.7a1 diff --git a/voice_bridge/fbgemm.dll b/voice_bridge/fbgemm.dll new file mode 100644 index 0000000000000000000000000000000000000000..5c351dbb3409af0342bb93e6b2966fc00100b535 --- /dev/null +++ b/voice_bridge/fbgemm.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:007eec76bb67a4f772b5c60d1553620a6794e80d723013d478bcde66aeef13a2 +size 4785152 diff --git a/voice_bridge/h5py/_conv.pyd b/voice_bridge/h5py/_conv.pyd new file mode 100644 index 0000000000000000000000000000000000000000..491a515ccd32cda00933bdc90de3e1cb62a5303d Binary files /dev/null and b/voice_bridge/h5py/_conv.pyd differ diff --git a/voice_bridge/h5py/_errors.pyd b/voice_bridge/h5py/_errors.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f13f381f89c0523913c2dfaabe76322df42683bf Binary files /dev/null and b/voice_bridge/h5py/_errors.pyd differ diff --git a/voice_bridge/h5py/_objects.pyd b/voice_bridge/h5py/_objects.pyd new file mode 100644 index 0000000000000000000000000000000000000000..649d5f8e49307c8d8a675a64e0edb67c4529cf0a Binary files /dev/null and b/voice_bridge/h5py/_objects.pyd differ diff --git a/voice_bridge/h5py/_proxy.pyd b/voice_bridge/h5py/_proxy.pyd new file mode 100644 index 0000000000000000000000000000000000000000..977def46cf83f0f645d5c6757932238a4a99333c Binary files /dev/null and b/voice_bridge/h5py/_proxy.pyd differ diff --git a/voice_bridge/h5py/_selector.pyd b/voice_bridge/h5py/_selector.pyd new file mode 100644 index 0000000000000000000000000000000000000000..9f380ea810bdaa8d6a46f044fe1b9a6266dae7aa Binary files /dev/null and b/voice_bridge/h5py/_selector.pyd differ diff --git a/voice_bridge/h5py/defs.pyd b/voice_bridge/h5py/defs.pyd new file mode 100644 index 0000000000000000000000000000000000000000..01be42d376504dd462d62634ae5cf33cd3568dbf Binary files /dev/null and b/voice_bridge/h5py/defs.pyd differ diff --git a/voice_bridge/h5py/h5.pyd b/voice_bridge/h5py/h5.pyd new file mode 100644 index 0000000000000000000000000000000000000000..7ab73eb3b10ada181ca4813ae9f485338e428f45 Binary files /dev/null and b/voice_bridge/h5py/h5.pyd differ diff --git a/voice_bridge/h5py/h5a.pyd b/voice_bridge/h5py/h5a.pyd new file mode 100644 index 0000000000000000000000000000000000000000..290642e559433ef9787b518a250bed8452858c90 Binary files /dev/null and b/voice_bridge/h5py/h5a.pyd differ diff --git a/voice_bridge/h5py/h5ac.pyd b/voice_bridge/h5py/h5ac.pyd new file mode 100644 index 0000000000000000000000000000000000000000..b282d81c00fa08f57436453bbe8763f8843173c2 Binary files /dev/null and b/voice_bridge/h5py/h5ac.pyd differ diff --git a/voice_bridge/h5py/h5d.pyd b/voice_bridge/h5py/h5d.pyd new file mode 100644 index 0000000000000000000000000000000000000000..aa40edaf296c3077b591744b078ee393cef5eae6 Binary files /dev/null and b/voice_bridge/h5py/h5d.pyd differ diff --git a/voice_bridge/h5py/h5ds.pyd b/voice_bridge/h5py/h5ds.pyd new file mode 100644 index 0000000000000000000000000000000000000000..1786c1f34152f3cc03bcd447690f2d0cd79e2c23 Binary files /dev/null and b/voice_bridge/h5py/h5ds.pyd differ diff --git a/voice_bridge/h5py/h5f.pyd b/voice_bridge/h5py/h5f.pyd new file mode 100644 index 0000000000000000000000000000000000000000..7e42231394eb06320bf86b9056002e7b78fcfab7 Binary files /dev/null and b/voice_bridge/h5py/h5f.pyd differ diff --git a/voice_bridge/h5py/h5fd.pyd b/voice_bridge/h5py/h5fd.pyd new file mode 100644 index 0000000000000000000000000000000000000000..37355b289ff3987593d9679257dda86de2919a1b Binary files /dev/null and b/voice_bridge/h5py/h5fd.pyd differ diff --git a/voice_bridge/h5py/h5g.pyd b/voice_bridge/h5py/h5g.pyd new file mode 100644 index 0000000000000000000000000000000000000000..7e058cf4e706f9ac57506266a8b9c29b4cf3c01f Binary files /dev/null and b/voice_bridge/h5py/h5g.pyd differ diff --git a/voice_bridge/h5py/h5i.pyd b/voice_bridge/h5py/h5i.pyd new file mode 100644 index 0000000000000000000000000000000000000000..7efad0f4ac688453d5e11571aa6d6431d883e1d3 Binary files /dev/null and b/voice_bridge/h5py/h5i.pyd differ diff --git a/voice_bridge/h5py/h5l.pyd b/voice_bridge/h5py/h5l.pyd new file mode 100644 index 0000000000000000000000000000000000000000..3767153da5358990f7561f5da33443ec9de11a3e Binary files /dev/null and b/voice_bridge/h5py/h5l.pyd differ diff --git a/voice_bridge/h5py/h5o.pyd b/voice_bridge/h5py/h5o.pyd new file mode 100644 index 0000000000000000000000000000000000000000..9fbbcc056a09e09abf4ce9d62d8bb117c3f18f84 Binary files /dev/null and b/voice_bridge/h5py/h5o.pyd differ diff --git a/voice_bridge/h5py/h5p.pyd b/voice_bridge/h5py/h5p.pyd new file mode 100644 index 0000000000000000000000000000000000000000..e791f0994d915c38a594434163d05fc3667d0e31 Binary files /dev/null and b/voice_bridge/h5py/h5p.pyd differ diff --git a/voice_bridge/h5py/h5pl.pyd b/voice_bridge/h5py/h5pl.pyd new file mode 100644 index 0000000000000000000000000000000000000000..96fb471a9c9f8b50820ae6cad4265741ba0e122e Binary files /dev/null and b/voice_bridge/h5py/h5pl.pyd differ diff --git a/voice_bridge/h5py/h5r.pyd b/voice_bridge/h5py/h5r.pyd new file mode 100644 index 0000000000000000000000000000000000000000..b23528565f2496ea20506dbe78e2d421f45fa6eb Binary files /dev/null and b/voice_bridge/h5py/h5r.pyd differ diff --git a/voice_bridge/h5py/h5s.pyd b/voice_bridge/h5py/h5s.pyd new file mode 100644 index 0000000000000000000000000000000000000000..1271004d4fd64834b3309ae5d09e7c0f022706ba Binary files /dev/null and b/voice_bridge/h5py/h5s.pyd differ diff --git a/voice_bridge/h5py/h5t.pyd b/voice_bridge/h5py/h5t.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f9c26adcef3553c6adb814e1672e0db6e5ec5f2d Binary files /dev/null and b/voice_bridge/h5py/h5t.pyd differ diff --git a/voice_bridge/h5py/h5z.pyd b/voice_bridge/h5py/h5z.pyd new file mode 100644 index 0000000000000000000000000000000000000000..c2c3363c774361c85fa874485824564e8c0a878a Binary files /dev/null and b/voice_bridge/h5py/h5z.pyd differ diff --git a/voice_bridge/h5py/utils.pyd b/voice_bridge/h5py/utils.pyd new file mode 100644 index 0000000000000000000000000000000000000000..32a866b6a4dd9458d4f53c22ad54816ed36e5ff7 Binary files /dev/null and b/voice_bridge/h5py/utils.pyd differ diff --git a/voice_bridge/hdf5.dll b/voice_bridge/hdf5.dll new file mode 100644 index 0000000000000000000000000000000000000000..f619e4c5856918340d9413721e301ab2dd9d081b --- /dev/null +++ b/voice_bridge/hdf5.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5cbe8d632c38cf3e633be91130d6123af8f7a5ee1ac40d0f2f93d6964ad5f96 +size 3334144 diff --git a/voice_bridge/hdf5_hl.dll b/voice_bridge/hdf5_hl.dll new file mode 100644 index 0000000000000000000000000000000000000000..8a6ddf565edada011a8f8c91aebd282134638f7f Binary files /dev/null and b/voice_bridge/hdf5_hl.dll differ diff --git a/voice_bridge/jamo/data/U+11xx.json b/voice_bridge/jamo/data/U+11xx.json new file mode 100644 index 0000000000000000000000000000000000000000..c6b38e68222fafe1ae66cf9e47aa2a1b8ef02cdb --- /dev/null +++ b/voice_bridge/jamo/data/U+11xx.json @@ -0,0 +1,258 @@ +{ + "\u1100": "HANGUL CHOSEONG KIYEOK", + "\u1101": "HANGUL CHOSEONG SSANGKIYEOK", + "\u1102": "HANGUL CHOSEONG NIEUN", + "\u1103": "HANGUL CHOSEONG TIKEUT", + "\u1104": "HANGUL CHOSEONG SSANGTIKEUT", + "\u1105": "HANGUL CHOSEONG RIEUL", + "\u1106": "HANGUL CHOSEONG MIEUM", + "\u1107": "HANGUL CHOSEONG PIEUP", + "\u1108": "HANGUL CHOSEONG SSANGPIEUP", + "\u1109": "HANGUL CHOSEONG SIOS", + "\u110a": "HANGUL CHOSEONG SSANGSIOS", + "\u110b": "HANGUL CHOSEONG IEUNG", + "\u110c": "HANGUL CHOSEONG CIEUC", + "\u110d": "HANGUL CHOSEONG SSANGCIEUC", + "\u110e": "HANGUL CHOSEONG CHIEUCH", + "\u110f": "HANGUL CHOSEONG KHIEUKH", + "\u1110": "HANGUL CHOSEONG THIEUTH", + "\u1111": "HANGUL CHOSEONG PHIEUPH", + "\u1112": "HANGUL CHOSEONG HIEUH", + "\u1113": "HANGUL CHOSEONG NIEUN-KIYEOK", + "\u1114": "HANGUL CHOSEONG SSANGNIEUN", + "\u1115": "HANGUL CHOSEONG NIEUN-TIKEUT", + "\u1116": "HANGUL CHOSEONG NIEUN-PIEUP", + "\u1117": "HANGUL CHOSEONG TIKEUT-KIYEOK", + "\u1118": "HANGUL CHOSEONG RIEUL-NIEUN", + "\u1119": "HANGUL CHOSEONG SSANGRIEUL", + "\u111a": "HANGUL CHOSEONG RIEUL-HIEUH", + "\u111b": "HANGUL CHOSEONG KAPYEOUNRIEUL", + "\u111c": "HANGUL CHOSEONG MIEUM-PIEUP", + "\u111d": "HANGUL CHOSEONG KAPYEOUNMIEUM", + "\u111e": "HANGUL CHOSEONG PIEUP-KIYEOK", + "\u111f": "HANGUL CHOSEONG PIEUP-NIEUN", + "\u1120": "HANGUL CHOSEONG PIEUP-TIKEUT", + "\u1121": "HANGUL CHOSEONG PIEUP-SIOS", + "\u1122": "HANGUL CHOSEONG PIEUP-SIOS-KIYEOK", + "\u1123": "HANGUL CHOSEONG PIEUP-SIOS-TIKEUT", + "\u1124": "HANGUL CHOSEONG PIEUP-SIOS-PIEUP", + "\u1125": "HANGUL CHOSEONG PIEUP-SSANGSIOS", + "\u1126": "HANGUL CHOSEONG PIEUP-SIOS-CIEUC", + "\u1127": "HANGUL CHOSEONG PIEUP-CIEUC", + "\u1128": "HANGUL CHOSEONG PIEUP-CHIEUCH", + "\u1129": "HANGUL CHOSEONG PIEUP-THIEUTH", + "\u112a": "HANGUL CHOSEONG PIEUP-PHIEUPH", + "\u112b": "HANGUL CHOSEONG KAPYEOUNPIEUP", + "\u112c": "HANGUL CHOSEONG KAPYEOUNSSANGPIEUP", + "\u112d": "HANGUL CHOSEONG SIOS-KIYEOK", + "\u112e": "HANGUL CHOSEONG SIOS-NIEUN", + "\u112f": "HANGUL CHOSEONG SIOS-TIKEUT", + "\u1130": "HANGUL CHOSEONG SIOS-RIEUL", + "\u1131": "HANGUL CHOSEONG SIOS-MIEUM", + "\u1132": "HANGUL CHOSEONG SIOS-PIEUP", + "\u1133": "HANGUL CHOSEONG SIOS-PIEUP-KIYEOK", + "\u1134": "HANGUL CHOSEONG SIOS-SSANGSIOS", + "\u1135": "HANGUL CHOSEONG SIOS-IEUNG", + "\u1136": "HANGUL CHOSEONG SIOS-CIEUC", + "\u1137": "HANGUL CHOSEONG SIOS-CHIEUCH", + "\u1138": "HANGUL CHOSEONG SIOS-KHIEUKH", + "\u1139": "HANGUL CHOSEONG SIOS-THIEUTH", + "\u113a": "HANGUL CHOSEONG SIOS-PHIEUPH", + "\u113b": "HANGUL CHOSEONG SIOS-HIEUH", + "\u113c": "HANGUL CHOSEONG CHITUEUMSIOS", + "\u113d": "HANGUL CHOSEONG CHITUEUMSSANGSIOS", + "\u113e": "HANGUL CHOSEONG CEONGCHIEUMSIOS", + "\u113f": "HANGUL CHOSEONG CEONGCHIEUMSSANGSIOS", + "\u1140": "HANGUL CHOSEONG PANSIOS", + "\u1141": "HANGUL CHOSEONG IEUNG-KIYEOK", + "\u1142": "HANGUL CHOSEONG IEUNG-TIKEUT", + "\u1143": "HANGUL CHOSEONG IEUNG-MIEUM", + "\u1144": "HANGUL CHOSEONG IEUNG-PIEUP", + "\u1145": "HANGUL CHOSEONG IEUNG-SIOS", + "\u1146": "HANGUL CHOSEONG IEUNG-PANSIOS", + "\u1147": "HANGUL CHOSEONG SSANGIEUNG", + "\u1148": "HANGUL CHOSEONG IEUNG-CIEUC", + "\u1149": "HANGUL CHOSEONG IEUNG-CHIEUCH", + "\u114a": "HANGUL CHOSEONG IEUNG-THIEUTH", + "\u114b": "HANGUL CHOSEONG IEUNG-PHIEUPH", + "\u114c": "HANGUL CHOSEONG YESIEUNG", + "\u114d": "HANGUL CHOSEONG CIEUC-IEUNG", + "\u114e": "HANGUL CHOSEONG CHITUEUMCIEUC", + "\u114f": "HANGUL CHOSEONG CHITUEUMSSANGCIEUC", + "\u1150": "HANGUL CHOSEONG CEONGCHIEUMCIEUC", + "\u1151": "HANGUL CHOSEONG CEONGCHIEUMSSANGCIEUC", + "\u1152": "HANGUL CHOSEONG CHIEUCH-KHIEUKH", + "\u1153": "HANGUL CHOSEONG CHIEUCH-HIEUH", + "\u1154": "HANGUL CHOSEONG CHITUEUMCHIEUCH", + "\u1155": "HANGUL CHOSEONG CEONGCHIEUMCHIEUCH", + "\u1156": "HANGUL CHOSEONG PHIEUPH-PIEUP", + "\u1157": "HANGUL CHOSEONG KAPYEOUNPHIEUPH", + "\u1158": "HANGUL CHOSEONG SSANGHIEUH", + "\u1159": "HANGUL CHOSEONG YEORINHIEUH", + "\u115a": "HANGUL CHOSEONG KIYEOK-TIKEUT", + "\u115b": "HANGUL CHOSEONG NIEUN-SIOS", + "\u115c": "HANGUL CHOSEONG NIEUN-CIEUC", + "\u115d": "HANGUL CHOSEONG NIEUN-HIEUH", + "\u115e": "HANGUL CHOSEONG TIKEUT-RIEUL", + "\u115f": "HANGUL CHOSEONG FILLER", + "\u1160": "HANGUL JUNGSEONG FILLER", + "\u1161": "HANGUL JUNGSEONG A", + "\u1162": "HANGUL JUNGSEONG AE", + "\u1163": "HANGUL JUNGSEONG YA", + "\u1164": "HANGUL JUNGSEONG YAE", + "\u1165": "HANGUL JUNGSEONG EO", + "\u1166": "HANGUL JUNGSEONG E", + "\u1167": "HANGUL JUNGSEONG YEO", + "\u1168": "HANGUL JUNGSEONG YE", + "\u1169": "HANGUL JUNGSEONG O", + "\u116a": "HANGUL JUNGSEONG WA", + "\u116b": "HANGUL JUNGSEONG WAE", + "\u116c": "HANGUL JUNGSEONG OE", + "\u116d": "HANGUL JUNGSEONG YO", + "\u116e": "HANGUL JUNGSEONG U", + "\u116f": "HANGUL JUNGSEONG WEO", + "\u1170": "HANGUL JUNGSEONG WE", + "\u1171": "HANGUL JUNGSEONG WI", + "\u1172": "HANGUL JUNGSEONG YU", + "\u1173": "HANGUL JUNGSEONG EU", + "\u1174": "HANGUL JUNGSEONG YI", + "\u1175": "HANGUL JUNGSEONG I", + "\u1176": "HANGUL JUNGSEONG A-O", + "\u1177": "HANGUL JUNGSEONG A-U", + "\u1178": "HANGUL JUNGSEONG YA-O", + "\u1179": "HANGUL JUNGSEONG YA-YO", + "\u117a": "HANGUL JUNGSEONG EO-O", + "\u117b": "HANGUL JUNGSEONG EO-U", + "\u117c": "HANGUL JUNGSEONG EO-EU", + "\u117d": "HANGUL JUNGSEONG YEO-O", + "\u117e": "HANGUL JUNGSEONG YEO-U", + "\u117f": "HANGUL JUNGSEONG O-EO", + "\u1180": "HANGUL JUNGSEONG O-E", + "\u1181": "HANGUL JUNGSEONG O-YE", + "\u1182": "HANGUL JUNGSEONG O-O", + "\u1183": "HANGUL JUNGSEONG O-U", + "\u1184": "HANGUL JUNGSEONG YO-YA", + "\u1185": "HANGUL JUNGSEONG YO-YAE", + "\u1186": "HANGUL JUNGSEONG YO-YEO", + "\u1187": "HANGUL JUNGSEONG YO-O", + "\u1188": "HANGUL JUNGSEONG YO-I", + "\u1189": "HANGUL JUNGSEONG U-A", + "\u118a": "HANGUL JUNGSEONG U-AE", + "\u118b": "HANGUL JUNGSEONG U-EO-EU", + "\u118c": "HANGUL JUNGSEONG U-YE", + "\u118d": "HANGUL JUNGSEONG U-U", + "\u118e": "HANGUL JUNGSEONG YU-A", + "\u118f": "HANGUL JUNGSEONG YU-EO", + "\u1190": "HANGUL JUNGSEONG YU-E", + "\u1191": "HANGUL JUNGSEONG YU-YEO", + "\u1192": "HANGUL JUNGSEONG YU-YE", + "\u1193": "HANGUL JUNGSEONG YU-U", + "\u1194": "HANGUL JUNGSEONG YU-I", + "\u1195": "HANGUL JUNGSEONG EU-U", + "\u1196": "HANGUL JUNGSEONG EU-EU", + "\u1197": "HANGUL JUNGSEONG YI-U", + "\u1198": "HANGUL JUNGSEONG I-A", + "\u1199": "HANGUL JUNGSEONG I-YA", + "\u119a": "HANGUL JUNGSEONG I-O", + "\u119b": "HANGUL JUNGSEONG I-U", + "\u119c": "HANGUL JUNGSEONG I-EU", + "\u119d": "HANGUL JUNGSEONG I-ARAEA", + "\u119e": "HANGUL JUNGSEONG ARAEA", + "\u119f": "HANGUL JUNGSEONG ARAEA-EO", + "\u11a0": "HANGUL JUNGSEONG ARAEA-U", + "\u11a1": "HANGUL JUNGSEONG ARAEA-I", + "\u11a2": "HANGUL JUNGSEONG SSANGARAEA", + "\u11a3": "HANGUL JUNGSEONG A-EU", + "\u11a4": "HANGUL JUNGSEONG YA-U", + "\u11a5": "HANGUL JUNGSEONG YEO-YA", + "\u11a6": "HANGUL JUNGSEONG O-YA", + "\u11a7": "HANGUL JUNGSEONG O-YAE", + "\u11a8": "HANGUL JONGSEONG KIYEOK", + "\u11a9": "HANGUL JONGSEONG SSANGKIYEOK", + "\u11aa": "HANGUL JONGSEONG KIYEOK-SIOS", + "\u11ab": "HANGUL JONGSEONG NIEUN", + "\u11ac": "HANGUL JONGSEONG NIEUN-CIEUC", + "\u11ad": "HANGUL JONGSEONG NIEUN-HIEUH", + "\u11ae": "HANGUL JONGSEONG TIKEUT", + "\u11af": "HANGUL JONGSEONG RIEUL", + "\u11b0": "HANGUL JONGSEONG RIEUL-KIYEOK", + "\u11b1": "HANGUL JONGSEONG RIEUL-MIEUM", + "\u11b2": "HANGUL JONGSEONG RIEUL-PIEUP", + "\u11b3": "HANGUL JONGSEONG RIEUL-SIOS", + "\u11b4": "HANGUL JONGSEONG RIEUL-THIEUTH", + "\u11b5": "HANGUL JONGSEONG RIEUL-PHIEUPH", + "\u11b6": "HANGUL JONGSEONG RIEUL-HIEUH", + "\u11b7": "HANGUL JONGSEONG MIEUM", + "\u11b8": "HANGUL JONGSEONG PIEUP", + "\u11b9": "HANGUL JONGSEONG PIEUP-SIOS", + "\u11ba": "HANGUL JONGSEONG SIOS", + "\u11bb": "HANGUL JONGSEONG SSANGSIOS", + "\u11bc": "HANGUL JONGSEONG IEUNG", + "\u11bd": "HANGUL JONGSEONG CIEUC", + "\u11be": "HANGUL JONGSEONG CHIEUCH", + "\u11bf": "HANGUL JONGSEONG KHIEUKH", + "\u11c0": "HANGUL JONGSEONG THIEUTH", + "\u11c1": "HANGUL JONGSEONG PHIEUPH", + "\u11c2": "HANGUL JONGSEONG HIEUH", + "\u11c3": "HANGUL JONGSEONG KIYEOK-RIEUL", + "\u11c4": "HANGUL JONGSEONG KIYEOK-SIOS-KIYEOK", + "\u11c5": "HANGUL JONGSEONG NIEUN-KIYEOK", + "\u11c6": "HANGUL JONGSEONG NIEUN-TIKEUT", + "\u11c7": "HANGUL JONGSEONG NIEUN-SIOS", + "\u11c8": "HANGUL JONGSEONG NIEUN-PANSIOS", + "\u11c9": "HANGUL JONGSEONG NIEUN-THIEUTH", + "\u11ca": "HANGUL JONGSEONG TIKEUT-KIYEOK", + "\u11cb": "HANGUL JONGSEONG TIKEUT-RIEUL", + "\u11cc": "HANGUL JONGSEONG RIEUL-KIYEOK-SIOS", + "\u11cd": "HANGUL JONGSEONG RIEUL-NIEUN", + "\u11ce": "HANGUL JONGSEONG RIEUL-TIKEUT", + "\u11cf": "HANGUL JONGSEONG RIEUL-TIKEUT-HIEUH", + "\u11d0": "HANGUL JONGSEONG SSANGRIEUL", + "\u11d1": "HANGUL JONGSEONG RIEUL-MIEUM-KIYEOK", + "\u11d2": "HANGUL JONGSEONG RIEUL-MIEUM-SIOS", + "\u11d3": "HANGUL JONGSEONG RIEUL-PIEUP-SIOS", + "\u11d4": "HANGUL JONGSEONG RIEUL-PIEUP-HIEUH", + "\u11d5": "HANGUL JONGSEONG RIEUL-KAPYEOUNPIEUP", + "\u11d6": "HANGUL JONGSEONG RIEUL-SSANGSIOS", + "\u11d7": "HANGUL JONGSEONG RIEUL-PANSIOS", + "\u11d8": "HANGUL JONGSEONG RIEUL-KHIEUKH", + "\u11d9": "HANGUL JONGSEONG RIEUL-YEORINHIEUH", + "\u11da": "HANGUL JONGSEONG MIEUM-KIYEOK", + "\u11db": "HANGUL JONGSEONG MIEUM-RIEUL", + "\u11dc": "HANGUL JONGSEONG MIEUM-PIEUP", + "\u11dd": "HANGUL JONGSEONG MIEUM-SIOS", + "\u11de": "HANGUL JONGSEONG MIEUM-SSANGSIOS", + "\u11df": "HANGUL JONGSEONG MIEUM-PANSIOS", + "\u11e0": "HANGUL JONGSEONG MIEUM-CHIEUCH", + "\u11e1": "HANGUL JONGSEONG MIEUM-HIEUH", + "\u11e2": "HANGUL JONGSEONG KAPYEOUNMIEUM", + "\u11e3": "HANGUL JONGSEONG PIEUP-RIEUL", + "\u11e4": "HANGUL JONGSEONG PIEUP-PHIEUPH", + "\u11e5": "HANGUL JONGSEONG PIEUP-HIEUH", + "\u11e6": "HANGUL JONGSEONG KAPYEOUNPIEUP", + "\u11e7": "HANGUL JONGSEONG SIOS-KIYEOK", + "\u11e8": "HANGUL JONGSEONG SIOS-TIKEUT", + "\u11e9": "HANGUL JONGSEONG SIOS-RIEUL", + "\u11ea": "HANGUL JONGSEONG SIOS-PIEUP", + "\u11eb": "HANGUL JONGSEONG PANSIOS", + "\u11ec": "HANGUL JONGSEONG IEUNG-KIYEOK", + "\u11ed": "HANGUL JONGSEONG IEUNG-SSANGKIYEOK", + "\u11ee": "HANGUL JONGSEONG SSANGIEUNG", + "\u11ef": "HANGUL JONGSEONG IEUNG-KHIEUKH", + "\u11f0": "HANGUL JONGSEONG YESIEUNG", + "\u11f1": "HANGUL JONGSEONG YESIEUNG-SIOS", + "\u11f2": "HANGUL JONGSEONG YESIEUNG-PANSIOS", + "\u11f3": "HANGUL JONGSEONG PHIEUPH-PIEUP", + "\u11f4": "HANGUL JONGSEONG KAPYEOUNPHIEUPH", + "\u11f5": "HANGUL JONGSEONG HIEUH-NIEUN", + "\u11f6": "HANGUL JONGSEONG HIEUH-RIEUL", + "\u11f7": "HANGUL JONGSEONG HIEUH-MIEUM", + "\u11f8": "HANGUL JONGSEONG HIEUH-PIEUP", + "\u11f9": "HANGUL JONGSEONG YEORINHIEUH", + "\u11fa": "HANGUL JONGSEONG KIYEOK-NIEUN", + "\u11fb": "HANGUL JONGSEONG KIYEOK-PIEUP", + "\u11fc": "HANGUL JONGSEONG KIYEOK-CHIEUCH", + "\u11fd": "HANGUL JONGSEONG KIYEOK-KHIEUKH", + "\u11fe": "HANGUL JONGSEONG KIYEOK-HIEUH", + "\u11ff": "HANGUL JONGSEONG SSANGNIEUN" +} \ No newline at end of file diff --git a/voice_bridge/jamo/data/U+31xx.json b/voice_bridge/jamo/data/U+31xx.json new file mode 100644 index 0000000000000000000000000000000000000000..ba81d0769c1d2e30874bb4eeac73fb28d25c3b26 --- /dev/null +++ b/voice_bridge/jamo/data/U+31xx.json @@ -0,0 +1,96 @@ +{ + "\u3131": "HANGUL LETTER KIYEOK", + "\u3132": "HANGUL LETTER SSANGKIYEOK", + "\u3133": "HANGUL LETTER KIYEOK-SIOS", + "\u3134": "HANGUL LETTER NIEUN", + "\u3135": "HANGUL LETTER NIEUN-CIEUC", + "\u3136": "HANGUL LETTER NIEUN-HIEUH", + "\u3137": "HANGUL LETTER TIKEUT", + "\u3138": "HANGUL LETTER SSANGTIKEUT", + "\u3139": "HANGUL LETTER RIEUL", + "\u313a": "HANGUL LETTER RIEUL-KIYEOK", + "\u313b": "HANGUL LETTER RIEUL-MIEUM", + "\u313c": "HANGUL LETTER RIEUL-PIEUP", + "\u313d": "HANGUL LETTER RIEUL-SIOS", + "\u313e": "HANGUL LETTER RIEUL-THIEUTH", + "\u313f": "HANGUL LETTER RIEUL-PHIEUPH", + "\u3140": "HANGUL LETTER RIEUL-HIEUH", + "\u3141": "HANGUL LETTER MIEUM", + "\u3142": "HANGUL LETTER PIEUP", + "\u3143": "HANGUL LETTER SSANGPIEUP", + "\u3144": "HANGUL LETTER PIEUP-SIOS", + "\u3145": "HANGUL LETTER SIOS", + "\u3146": "HANGUL LETTER SSANGSIOS", + "\u3147": "HANGUL LETTER IEUNG", + "\u3148": "HANGUL LETTER CIEUC", + "\u3149": "HANGUL LETTER SSANGCIEUC", + "\u314a": "HANGUL LETTER CHIEUCH", + "\u314b": "HANGUL LETTER KHIEUKH", + "\u314c": "HANGUL LETTER THIEUTH", + "\u314d": "HANGUL LETTER PHIEUPH", + "\u314e": "HANGUL LETTER HIEUH", + "\u314f": "HANGUL LETTER A", + "\u3150": "HANGUL LETTER AE", + "\u3151": "HANGUL LETTER YA", + "\u3152": "HANGUL LETTER YAE", + "\u3153": "HANGUL LETTER EO", + "\u3154": "HANGUL LETTER E", + "\u3155": "HANGUL LETTER YEO", + "\u3156": "HANGUL LETTER YE", + "\u3157": "HANGUL LETTER O", + "\u3158": "HANGUL LETTER WA", + "\u3159": "HANGUL LETTER WAE", + "\u315a": "HANGUL LETTER OE", + "\u315b": "HANGUL LETTER YO", + "\u315c": "HANGUL LETTER U", + "\u315d": "HANGUL LETTER WEO", + "\u315e": "HANGUL LETTER WE", + "\u315f": "HANGUL LETTER WI", + "\u3160": "HANGUL LETTER YU", + "\u3161": "HANGUL LETTER EU", + "\u3162": "HANGUL LETTER YI", + "\u3163": "HANGUL LETTER I", + "\u3164": "HANGUL FILLER", + "\u3165": "HANGUL LETTER SSANGNIEUN", + "\u3166": "HANGUL LETTER NIEUN-TIKEUT", + "\u3167": "HANGUL LETTER NIEUN-SIOS", + "\u3168": "HANGUL LETTER NIEUN-PANSIOS", + "\u3169": "HANGUL LETTER RIEUL-KIYEOK-SIOS", + "\u316a": "HANGUL LETTER RIEUL-TIKEUT", + "\u316b": "HANGUL LETTER RIEUL-PIEUP-SIOS", + "\u316c": "HANGUL LETTER RIEUL-PANSIOS", + "\u316d": "HANGUL LETTER RIEUL-YEORINHIEUH", + "\u316e": "HANGUL LETTER MIEUM-PIEUP", + "\u316f": "HANGUL LETTER MIEUM-SIOS", + "\u3170": "HANGUL LETTER MIEUM-PANSIOS", + "\u3171": "HANGUL LETTER KAPYEOUNMIEUM", + "\u3172": "HANGUL LETTER PIEUP-KIYEOK", + "\u3173": "HANGUL LETTER PIEUP-TIKEUT", + "\u3174": "HANGUL LETTER PIEUP-SIOS-KIYEOK", + "\u3175": "HANGUL LETTER PIEUP-SIOS-TIKEUT", + "\u3176": "HANGUL LETTER PIEUP-CIEUC", + "\u3177": "HANGUL LETTER PIEUP-THIEUTH", + "\u3178": "HANGUL LETTER KAPYEOUNPIEUP", + "\u3179": "HANGUL LETTER KAPYEOUNSSANGPIEUP", + "\u317a": "HANGUL LETTER SIOS-KIYEOK", + "\u317b": "HANGUL LETTER SIOS-NIEUN", + "\u317c": "HANGUL LETTER SIOS-TIKEUT", + "\u317d": "HANGUL LETTER SIOS-PIEUP", + "\u317e": "HANGUL LETTER SIOS-CIEUC", + "\u317f": "HANGUL LETTER PANSIOS", + "\u3180": "HANGUL LETTER SSANGIEUNG", + "\u3181": "HANGUL LETTER YESIEUNG", + "\u3182": "HANGUL LETTER YESIEUNG-SIOS", + "\u3183": "HANGUL LETTER YESIEUNG-PANSIOS", + "\u3184": "HANGUL LETTER KAPYEOUNPHIEUPH", + "\u3185": "HANGUL LETTER SSANGHIEUH", + "\u3186": "HANGUL LETTER YEORINHIEUH", + "\u3187": "HANGUL LETTER YO-YA", + "\u3188": "HANGUL LETTER YO-YAE", + "\u3189": "HANGUL LETTER YO-I", + "\u318a": "HANGUL LETTER YU-YEO", + "\u318b": "HANGUL LETTER YU-YE", + "\u318c": "HANGUL LETTER YU-I", + "\u318d": "HANGUL LETTER ARAEA", + "\u318e": "HANGUL LETTER ARAEAE" +} \ No newline at end of file diff --git a/voice_bridge/libcrypto-1_1.dll b/voice_bridge/libcrypto-1_1.dll new file mode 100644 index 0000000000000000000000000000000000000000..45127b2f5cb97d9ac3b8d5cfa3fce9f90e958877 --- /dev/null +++ b/voice_bridge/libcrypto-1_1.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:296426e7ce11bc3d1cfa9f2aeb42f60c974da4af3b3efbeb0ba40e92e5299fdf +size 3406016 diff --git a/voice_bridge/libffi-7.dll b/voice_bridge/libffi-7.dll new file mode 100644 index 0000000000000000000000000000000000000000..8fd2e5e07029035e4c437a14517ba7ff5eaed233 Binary files /dev/null and b/voice_bridge/libffi-7.dll differ diff --git a/voice_bridge/libiomp5md.dll b/voice_bridge/libiomp5md.dll new file mode 100644 index 0000000000000000000000000000000000000000..86a78de8a008e4236cee4182e20bce0eb51c83af --- /dev/null +++ b/voice_bridge/libiomp5md.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51043532cbb152b15ab3d4b20b85aaa28e18ebfe2b2565ff91950a1b622163e5 +size 1975912 diff --git a/voice_bridge/libopenblas.3hbpcjb5bpqgkwvzavebxnnj2q2g3tup.gfortran-win_amd64.dll b/voice_bridge/libopenblas.3hbpcjb5bpqgkwvzavebxnnj2q2g3tup.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..0fd7d19e00f35455e0a169527fd280843aa9fb2a --- /dev/null +++ b/voice_bridge/libopenblas.3hbpcjb5bpqgkwvzavebxnnj2q2g3tup.gfortran-win_amd64.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:621a0c6fb360e1028e6beb92eaac748142dd8615ce767e70f82417c7f8cc2826 +size 32862022 diff --git a/voice_bridge/librosa/util/example_data/index.json b/voice_bridge/librosa/util/example_data/index.json new file mode 100644 index 0000000000000000000000000000000000000000..8fff2772d00f7c31595a684414614905dfbe8fd9 --- /dev/null +++ b/voice_bridge/librosa/util/example_data/index.json @@ -0,0 +1 @@ +{"vibeace": {"path": "Kevin_MacLeod_-_Vibe_Ace", "desc": "Kevin MacLeod - Vibe Ace"}, "choice": {"path": "admiralbob77_-_Choice_-_Drum-bass", "desc": "Admiral Bob - Choice (drum+bass)"}, "nutcracker": {"path": "Kevin_MacLeod_-_P_I_Tchaikovsky_Dance_of_the_Sugar_Plum_Fairy", "desc": "Tchaikovsky - Dance of the Sugar Plum Fairy"}, "brahms": {"path": "Hungarian_Dance_number_5_-_Allegro_in_F_sharp_minor_(string_orchestra)", "desc": "Brahms - Hungarian Dance #5"}, "trumpet": {"path": "sorohanro_-_solo-trumpet-06", "desc": "Mihai Sorohan - Trumpet loop"}, "fishin": {"path": "Karissa_Hobbs_-_Let's_Go_Fishin'", "desc": "Karissa Hobbs - Let's Go Fishin'"}, "sweetwaltz": {"path": "147793__setuniman__sweet-waltz-0i-22mi", "desc": "Setuniman - Sweet Waltz"}, "humpback": {"path": "glacier-bay-humpback", "desc": "Glacier Bay 60-second clip humpback whale song November 2020"}, "libri1": {"path": "5703-47212-0000", "desc": "Ashiel Mystery - A Detective Story, chapter 2, narrated by Garth Comira"}, "libri2": {"path": "3436-172162-0000", "desc": "The Age of Chivalry / Chapter 18: Perceval / Read by Anders Lankford"}, "pistachio": {"path": "442789__lena-orsa__happy-music-pistachio-ice-cream-ragtime", "desc": "The Piano Lady - Pistachio Ice Cream Ragtime"}, "robin": {"path": "456440__inspectorj__bird-whistling-robin-single-13", "desc": "Bird Whistling, Robin, Single, 13.wav / InspectorJ"}, "libri3": {"path": "198-209-0000", "desc": "Sense and Sensibility / Chapter 18 / Jane Austen / Read by Heather Barnett"}} diff --git a/voice_bridge/librosa/util/example_data/registry.txt b/voice_bridge/librosa/util/example_data/registry.txt new file mode 100644 index 0000000000000000000000000000000000000000..c92419f8f7bee9d46579ca9a161ecd022378ce32 --- /dev/null +++ b/voice_bridge/librosa/util/example_data/registry.txt @@ -0,0 +1,39 @@ +147793__setuniman__sweet-waltz-0i-22mi.hq.ogg 248f93b7c729bc0bd6c4cbc107e6b67ec4d60e40c111e313d4d69f77748da26a +147793__setuniman__sweet-waltz-0i-22mi.ogg 4baff8ebf1771c33618b58aa12ac3fbac1e0462894ae74247f2fb8e649d1c63b +147793__setuniman__sweet-waltz-0i-22mi.txt 6e6e9265ae9a9a36033ce305a893abdc15f43bb5a41ec29efba368ad91905f43 +198-209-0000.hq.ogg 6a8d2c16e56dcb27b7f5fe5aa99bfd26b4722a6dabca1968eff951b456936514 +198-209-0000.ogg 0473ba9613befa1026db5e254546f105aeffba8366b5468fc5e8d474d844ca1d +198-209-0000.txt c5bf58b2a4aa0d6bf1e6051690312f3238960d455776a6d37e469d80db1da167 +3436-172162-0000.hq.ogg 8e034529f2f171c7e5e772df6eccc2d169183c39605b45b351c2bfabddf14162 +3436-172162-0000.ogg 780e482b52f2c8a500356babf60178ce005449cc171a8537eb3ef2961eb3e855 +3436-172162-0000.txt 8cf4b20e148c4366205bc607f447c169256e945d62c64ead43d0a4dfa7d3ab98 +442789__lena-orsa__happy-music-pistachio-ice-cream-ragtime.hq.ogg 1cc83b775e640a8dd409da9e29f5b5e0124bc5ec9bb4fda975919d31d1d893b8 +442789__lena-orsa__happy-music-pistachio-ice-cream-ragtime.ogg 9617c9be55c128177b13c20fbc52178ed482e3545094517efb30a7db2798991e +442789__lena-orsa__happy-music-pistachio-ice-cream-ragtime.txt 99f9c44368918572ac154ae4a2fec4020d1d4bb418ae7bf241864916d68f9d04 +456440__inspectorj__bird-whistling-robin-single-13.hq.ogg a3b3ecf749befde43bdf35f839fdcb8d399a4deb5666de6f399c35ce12936baa +456440__inspectorj__bird-whistling-robin-single-13.ogg 57c2b861d028e25d7c086b48853f20eda1ae9a7a33e1125a1f3bec91539b4208 +456440__inspectorj__bird-whistling-robin-single-13.txt 34d25a379408d9144fbc4ce8cceb72be5196aeb6140cec6c90e6bdcb0567cb6e +5703-47212-0000.hq.ogg f09254a0daf4b14b292868d46dc2e3c8e158d19fafff739ad4c3931e2ce7b1b0 +5703-47212-0000.ogg a284612b46af0535f7e1873758c4387bb8369f6dbbe192ffdec1f171108f98dd +5703-47212-0000.txt 1c5bd5eb792ff0d2fbe895e3a3dd0acf3beb48ee364e64e12cde3a646304a9f4 +Hungarian_Dance_number_5_-_Allegro_in_F_sharp_minor_(string_orchestra).hq.ogg 8e93ff0182a93168b15346c497b164cb49d2a97bf1e987a1149ea579e914532e +Hungarian_Dance_number_5_-_Allegro_in_F_sharp_minor_(string_orchestra).ogg 919b48aa4cc66a0357d2cd5728664c5ab8f15c4b3469460df4b59470d35d3e49 +Hungarian_Dance_number_5_-_Allegro_in_F_sharp_minor_(string_orchestra).txt 0c857ef8a7365e3df9d5c25c2b08ec3169c0585e3ec08c9c6423d2a04d72fe5f +Karissa_Hobbs_-_Let's_Go_Fishin'.hq.ogg 85901bde0bc5f2cad28acb83487ccf99bd6908d7359f1a449fd799d8bebb3319 +Karissa_Hobbs_-_Let's_Go_Fishin'.ogg 27b3667c396c1831511aa3c415fcf582b6e8be560cafb844c5b67b76b72c1cb3 +Karissa_Hobbs_-_Let's_Go_Fishin'.txt 199bf3408b98916cd9d28a22b2b43c1935ab70072b46fa05c0b9f40e7882802e +Kevin_MacLeod_-_P_I_Tchaikovsky_Dance_of_the_Sugar_Plum_Fairy.hq.ogg f062221a56a227cdb7c067cf2e6ac0e250a50012f7693ca0c8e31f05f83e49b1 +Kevin_MacLeod_-_P_I_Tchaikovsky_Dance_of_the_Sugar_Plum_Fairy.ogg b5c1a3e26310e6618d3c124f458654cd235650fcb9db7d711302644566600484 +Kevin_MacLeod_-_P_I_Tchaikovsky_Dance_of_the_Sugar_Plum_Fairy.txt 059acb340170385d2bfa4c7ab7c2a06b1d8f8af3e0f11cb4f46ff4049e950915 +Kevin_MacLeod_-_Vibe_Ace.hq.ogg 73d6443ef90a7c022f164e5aa90e56c2291585930b39b1656d0765abbc1f1779 +Kevin_MacLeod_-_Vibe_Ace.ogg 6c23aed3dd5aa57f2b1652ecab68d15d9b82ad257f54e639eb2880ca09bc118a +Kevin_MacLeod_-_Vibe_Ace.txt 6c71e0525cb0452ea74c6d6f5fde6fa1e221223db7b2aa35b9914b98367ee7b9 +admiralbob77_-_Choice_-_Drum-bass.hq.ogg 57b4d95473b92a8441c1d3ab20f836a3e0cbf501bf3afe1ce7a5d0d98d7d4576 +admiralbob77_-_Choice_-_Drum-bass.ogg ac644f9645e7c15174e4a4f8561e4d1448d7f6e59ff6b0556b310ebbced879bc +admiralbob77_-_Choice_-_Drum-bass.txt 76525d6a4fd135053c5ff7463ae43a8ffcf064b575e8ef2fc3eb37786a45342b +glacier-bay-humpback.hq.ogg c3250ff526898aa5528aeb6b81a49aff58f63400ec2ddb8bcb86caf611d33144 +glacier-bay-humpback.ogg 64395c617b28b5e31a03032b99b4617bc96b926f3bc5c141862cd13fb79ccd8e +glacier-bay-humpback.txt bec2c73fe2368604f161ffdd44b7afea5f597473e2d51c03906bb05c22113910 +sorohanro_-_solo-trumpet-06.hq.ogg beb954ae2c9c16919b5ca6973d6d5196cdcb196b46a3c2a201dd8861e7e324de +sorohanro_-_solo-trumpet-06.ogg 8374466fd3951d24509da6e799b132a0db0bdeda69d99c69d989a6888d3d727d +sorohanro_-_solo-trumpet-06.txt 750a191b9d0cc94b2f19cfbf11acc13783f75bedcccfcf562f5b076efd068aba diff --git a/voice_bridge/libssl-1_1.dll b/voice_bridge/libssl-1_1.dll new file mode 100644 index 0000000000000000000000000000000000000000..aa5005e0fcba314299f71d97c142c5f434d3d3df Binary files /dev/null and b/voice_bridge/libssl-1_1.dll differ diff --git a/voice_bridge/licenses.json b/voice_bridge/licenses.json new file mode 100644 index 0000000000000000000000000000000000000000..4ac28cae5f31e350733bccc58afb349a93ee70ee --- /dev/null +++ b/voice_bridge/licenses.json @@ -0,0 +1 @@ +[{"name": "Open JTalk", "version": "1.11", "license": "Modified BSD license", "text": "/* ----------------------------------------------------------------- */\n/* The Japanese TTS System \"Open JTalk\" */\n/* developed by HTS Working Group */\n/* http://open-jtalk.sourceforge.net/ */\n/* ----------------------------------------------------------------- */\n/* */\n/* Copyright (c) 2008-2016 Nagoya Institute of Technology */\n/* Department of Computer Science */\n/* */\n/* All rights reserved. */\n/* */\n/* Redistribution and use in source and binary forms, with or */\n/* without modification, are permitted provided that the following */\n/* conditions are met: */\n/* */\n/* - Redistributions of source code must retain the above copyright */\n/* notice, this list of conditions and the following disclaimer. */\n/* - Redistributions in binary form must reproduce the above */\n/* copyright notice, this list of conditions and the following */\n/* disclaimer in the documentation and/or other materials provided */\n/* with the distribution. */\n/* - Neither the name of the HTS working group nor the names of its */\n/* contributors may be used to endorse or promote products derived */\n/* from this software without specific prior written permission. */\n/* */\n/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */\n/* CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, */\n/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */\n/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */\n/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */\n/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */\n/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */\n/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */\n/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */\n/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */\n/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */\n/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\n/* POSSIBILITY OF SUCH DAMAGE. */\n/* ----------------------------------------------------------------- */\n"}, {"name": "MeCab", "version": null, "license": "Modified BSD license", "text": "Copyright (c) 2001-2008, Taku Kudo\nCopyright (c) 2004-2008, Nippon Telegraph and Telephone Corporation\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are\npermitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above\n copyright notice, this list of conditions and the\n following disclaimer.\n\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the\n following disclaimer in the documentation and/or other\n materials provided with the distribution.\n\n * Neither the name of the Nippon Telegraph and Telegraph Corporation\n nor the names of its contributors may be used to endorse or\n promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED\nWARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\nPARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\nTORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\nADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n/* ----------------------------------------------------------------- */\n/* The Japanese TTS System \"Open JTalk\" */\n/* developed by HTS Working Group */\n/* http://open-jtalk.sourceforge.net/ */\n/* ----------------------------------------------------------------- */\n/* */\n/* Copyright (c) 2008-2016 Nagoya Institute of Technology */\n/* Department of Computer Science */\n/* */\n/* All rights reserved. */\n/* */\n/* Redistribution and use in source and binary forms, with or */\n/* without modification, are permitted provided that the following */\n/* conditions are met: */\n/* */\n/* - Redistributions of source code must retain the above copyright */\n/* notice, this list of conditions and the following disclaimer. */\n/* - Redistributions in binary form must reproduce the above */\n/* copyright notice, this list of conditions and the following */\n/* disclaimer in the documentation and/or other materials provided */\n/* with the distribution. */\n/* - Neither the name of the HTS working group nor the names of its */\n/* contributors may be used to endorse or promote products derived */\n/* from this software without specific prior written permission. */\n/* */\n/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */\n/* CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, */\n/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */\n/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */\n/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */\n/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */\n/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */\n/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */\n/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */\n/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */\n/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */\n/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\n/* POSSIBILITY OF SUCH DAMAGE. */\n/* ----------------------------------------------------------------- */\n"}, {"name": "NAIST Japanese Dictionary", "version": null, "license": "Modified BSD license", "text": "Copyright (c) 2009, Nara Institute of Science and Technology, Japan.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\nRedistributions of source code must retain the above copyright notice,\nthis list of conditions and the following disclaimer.\nRedistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\nNeither the name of the Nara Institute of Science and Technology\n(NAIST) nor the names of its contributors may be used to endorse or\npromote products derived from this software without specific prior\nwritten permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\nEXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\nPROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n/* ----------------------------------------------------------------- */\n/* The Japanese TTS System \"Open JTalk\" */\n/* developed by HTS Working Group */\n/* http://open-jtalk.sourceforge.net/ */\n/* ----------------------------------------------------------------- */\n/* */\n/* Copyright (c) 2008-2016 Nagoya Institute of Technology */\n/* Department of Computer Science */\n/* */\n/* All rights reserved. */\n/* */\n/* Redistribution and use in source and binary forms, with or */\n/* without modification, are permitted provided that the following */\n/* conditions are met: */\n/* */\n/* - Redistributions of source code must retain the above copyright */\n/* notice, this list of conditions and the following disclaimer. */\n/* - Redistributions in binary form must reproduce the above */\n/* copyright notice, this list of conditions and the following */\n/* disclaimer in the documentation and/or other materials provided */\n/* with the distribution. */\n/* - Neither the name of the HTS working group nor the names of its */\n/* contributors may be used to endorse or promote products derived */\n/* from this software without specific prior written permission. */\n/* */\n/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */\n/* CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, */\n/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */\n/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */\n/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */\n/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */\n/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */\n/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */\n/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */\n/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */\n/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */\n/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\n/* POSSIBILITY OF SUCH DAMAGE. */\n/* ----------------------------------------------------------------- */\n"}, {"name": "HTS Voice \"Mei\"", "version": null, "license": "Creative Commons Attribution 3.0 license", "text": "# ----------------------------------------------------------------- #\n# HTS Voice \"Mei\" #\n# released by MMDAgent Project Team #\n# http://www.mmdagent.jp/ #\n# ----------------------------------------------------------------- #\n# #\n# Copyright (c) 2009-2013 Nagoya Institute of Technology #\n# Department of Computer Science #\n# #\n# Some rights reserved. #\n# #\n# This work is licensed under the Creative Commons Attribution 3.0 #\n# license. #\n# #\n# You are free: #\n# * to Share - to copy, distribute and transmit the work #\n# * to Remix - to adapt the work #\n# Under the following conditions: #\n# * Attribution - You must attribute the work in the manner #\n# specified by the author or licensor (but not in any way that #\n# suggests that they endorse you or your use of the work). #\n# With the understanding that: #\n# * Waiver - Any of the above conditions can be waived if you get #\n# permission from the copyright holder. #\n# * Public Domain - Where the work or any of its elements is in #\n# the public domain under applicable law, that status is in no #\n# way affected by the license. #\n# * Other Rights - In no way are any of the following rights #\n# affected by the license: #\n# - Your fair dealing or fair use rights, or other applicable #\n# copyright exceptions and limitations; #\n# - The author's moral rights; #\n# - Rights other persons may have either in the work itself or #\n# in how the work is used, such as publicity or privacy #\n# rights. #\n# * Notice - For any reuse or distribution, you must make clear to #\n# others the license terms of this work. The best way to do this #\n# is with a link to this web page. #\n# #\n# See http://creativecommons.org/ for details. #\n# ----------------------------------------------------------------- #\n"}, {"name": "Bridge Plugin", "version": null, "license": "Bridge Plugin License", "text": "Bridge Plugin\n\nCopyright (c) 2021 Hiroshiba\nCopyright (c) 2021 VOICEVOX\nCopyright (c) 2022 VOICEVOX-Bridge\n\n\n\u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306e\u30ea\u30dd\u30b8\u30c8\u30ea\u306bPull Request\u3092\u9001\u308b\u76ee\u7684\u306b\u9650\u308a\u3001\u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306e\u30bd\u30fc\u30b9\u30b3\u30fc\u30c9\u306e\u4f7f\u7528\u3001\u8907\u88fd\u3001\u914d\u5e03\u7b49\u3092\u884c\u3046\u3053\u3068\u3092\u8a31\u53ef\u3057\u307e\u3059\u3002\n\n\n\u5546\u7528\u30fb\u975e\u5546\u7528\u3092\u554f\u308f\u305a\u3001\u30d0\u30a4\u30ca\u30ea\u5f62\u5f0f\u306e\u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306e\u5229\u7528\u53ca\u3073\u518d\u914d\u5e03\u3092\u8a31\u53ef\u3057\u307e\u3059\u3002\n\u305f\u3060\u3057\u3001\u4ee5\u4e0b\u3092\u6761\u4ef6\u3068\u3057\u307e\u3059\u3002\n\n- \u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306b\u3088\u3063\u3066\u8aad\u307f\u8fbc\u307e\u308c\u308b\u97f3\u58f0\u5408\u6210\u30e2\u30c7\u30eb\u306f\u3001\u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u3092\u901a\u3058\u305f\u97f3\u58f0\u5408\u6210\u5316\u3092\u8a31\u8afe\u3057\u3066\u3044\u308b\u63d0\u4f9b\u8005\u306e\u97f3\u58f0\u3092\u5143\u306b\u4f5c\u3089\u308c\u3066\u3044\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002\n\n- \u518d\u914d\u5e03\u3092\u884c\u3046\u5834\u5408\u3001\u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306e\u30d5\u30a1\u30a4\u30eb\u306b\u95a2\u3057\u3066\u306f\u3001\u518d\u914d\u5e03\u5f8c\u3082\u672c\u30e9\u30a4\u30bb\u30f3\u30b9\u3092\u9069\u7528\u3059\u308b\u5fc5\u8981\u304c\u3042\u308a\u307e\u3059\u3002\n\n\n\u672c\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306f\u300c\u73fe\u72b6\u306e\u307e\u307e\u3067\u300d\u3067\u63d0\u4f9b\u3055\u308c\u3001\u660e\u793a\u7684\u3001\u6697\u9ed9\u7684\u304b\u3069\u3046\u304b\u306b\u62d8\u3089\u305a\u3042\u3089\u3086\u308b\u4fdd\u8a3c\u306f\u306a\u3044\u3082\u306e\u3068\u3057\u307e\u3059\u3002\u3053\u3053\u3067\u8a00\u3046\u4fdd\u8a3c\u306f\u3001\u5e02\u8ca9\u6027\u3001\u7279\u5b9a\u7528\u9014\u3078\u306e\u9069\u5408\u6027\u3001\u6a29\u5229\u306e\u4fb5\u5bb3\u304c\u306a\u3044\u3053\u3068\u7b49\u3092\u542b\u307f\u307e\u3059\u304c\u3001\u3053\u308c\u3089\u306b\u9650\u5b9a\u3055\u308c\u307e\u305b\u3093\u3002\n\u88fd\u4f5c\u8005\u306f\u3001\u5951\u7d04\u884c\u70ba\u3001\u4e0d\u6cd5\u884c\u70ba\u3001\u307e\u305f\u306f\u305d\u308c\u4ee5\u5916\u3067\u3042\u308d\u3046\u3068\u3001\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306b\u8d77\u56e0\u307e\u305f\u306f\u95a2\u9023\u3057\u3001\u3042\u308b\u3044\u306f\u30bd\u30d5\u30c8\u30a6\u30a7\u30a2\u306e\u4f7f\u7528\u307e\u305f\u306f\u305d\u306e\u4ed6\u306e\u6271\u3044\u306b\u3088\u3063\u3066\u751f\u3058\u308b\u4e00\u5207\u306e\u8acb\u6c42\u3001\u640d\u5bb3\u3001\u305d\u306e\u4ed6\u306e\u7fa9\u52d9\u306b\u3064\u3044\u3066\u4f55\u3089\u306e\u8cac\u4efb\u3082\u8ca0\u308f\u306a\u3044\u3082\u306e\u3068\u3057\u307e\u3059\u3002"}, {"name": "world", "version": null, "license": "Modified BSD license", "text": "/* ----------------------------------------------------------------- */\n/* WORLD: High-quality speech analysis, */\n/* manipulation and synthesis system */\n/* developed by M. Morise */\n/* http://www.kisc.meiji.ac.jp/~mmorise/world/english/ */\n/* ----------------------------------------------------------------- */\n/* */\n/* Copyright (c) 2010 M. Morise */\n/* */\n/* All rights reserved. */\n/* */\n/* Redistribution and use in source and binary forms, with or */\n/* without modification, are permitted provided that the following */\n/* conditions are met: */\n/* */\n/* - Redistributions of source code must retain the above copyright */\n/* notice, this list of conditions and the following disclaimer. */\n/* - Redistributions in binary form must reproduce the above */\n/* copyright notice, this list of conditions and the following */\n/* disclaimer in the documentation and/or other materials provided */\n/* with the distribution. */\n/* - Neither the name of the M. Morise nor the names of its */\n/* contributors may be used to endorse or promote products derived */\n/* from this software without specific prior written permission. */\n/* */\n/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */\n/* CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, */\n/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */\n/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */\n/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */\n/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */\n/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */\n/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */\n/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */\n/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */\n/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */\n/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */\n/* POSSIBILITY OF SUCH DAMAGE. */\n/* ----------------------------------------------------------------- */\n"}, {"name": "PyTorch", "version": "1.9.0", "license": "BSD-style license", "text": "From PyTorch:\n\nCopyright (c) 2016- Facebook, Inc (Adam Paszke)\nCopyright (c) 2014- Facebook, Inc (Soumith Chintala)\nCopyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\nCopyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\nCopyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\nCopyright (c) 2011-2013 NYU (Clement Farabet)\nCopyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\nCopyright (c) 2006 Idiap Research Institute (Samy Bengio)\nCopyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\nFrom Caffe2:\n\nCopyright (c) 2016-present, Facebook Inc. All rights reserved.\n\nAll contributions by Facebook:\nCopyright (c) 2016 Facebook Inc.\n\nAll contributions by Google:\nCopyright (c) 2015 Google Inc.\nAll rights reserved.\n\nAll contributions by Yangqing Jia:\nCopyright (c) 2015 Yangqing Jia\nAll rights reserved.\n\nAll contributions by Kakao Brain:\nCopyright 2019-2020 Kakao Brain\n\nAll contributions by Cruise LLC:\nCopyright (c) 2022 Cruise LLC.\nAll rights reserved.\n\nAll contributions from Caffe:\nCopyright(c) 2013, 2014, 2015, the respective contributors\nAll rights reserved.\n\nAll other contributions:\nCopyright(c) 2015, 2016 the respective contributors\nAll rights reserved.\n\nCaffe2 uses a copyright model similar to Caffe: each contributor holds\ncopyright over their contributions to Caffe2. The project versioning records\nall such contribution and copyright details. If a contributor wants to further\nmark their specific copyright on a particular contribution, they should\nindicate their copyright solely in the commit message of the change when it is\ncommitted.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America\n and IDIAP Research Institute nor the names of its contributors may be\n used to endorse or promote products derived from this software without\n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "Python", "version": "3.8.10", "license": "Python Software Foundation License", "text": "A. HISTORY OF THE SOFTWARE\n==========================\n\nPython was created in the early 1990s by Guido van Rossum at Stichting\nMathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands\nas a successor of a language called ABC. Guido remains Python's\nprincipal author, although it includes many contributions from others.\n\nIn 1995, Guido continued his work on Python at the Corporation for\nNational Research Initiatives (CNRI, see http://www.cnri.reston.va.us)\nin Reston, Virginia where he released several versions of the\nsoftware.\n\nIn May 2000, Guido and the Python core development team moved to\nBeOpen.com to form the BeOpen PythonLabs team. In October of the same\nyear, the PythonLabs team moved to Digital Creations, which became\nZope Corporation. In 2001, the Python Software Foundation (PSF, see\nhttps://www.python.org/psf/) was formed, a non-profit organization\ncreated specifically to own Python-related Intellectual Property.\nZope Corporation was a sponsoring member of the PSF.\n\nAll Python releases are Open Source (see http://www.opensource.org for\nthe Open Source Definition). Historically, most, but not all, Python\nreleases have also been GPL-compatible; the table below summarizes\nthe various releases.\n\n Release Derived Year Owner GPL-\n from compatible? (1)\n\n 0.9.0 thru 1.2 1991-1995 CWI yes\n 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes\n 1.6 1.5.2 2000 CNRI no\n 2.0 1.6 2000 BeOpen.com no\n 1.6.1 1.6 2001 CNRI yes (2)\n 2.1 2.0+1.6.1 2001 PSF no\n 2.0.1 2.0+1.6.1 2001 PSF yes\n 2.1.1 2.1+2.0.1 2001 PSF yes\n 2.1.2 2.1.1 2002 PSF yes\n 2.1.3 2.1.2 2002 PSF yes\n 2.2 and above 2.1.1 2001-now PSF yes\n\nFootnotes:\n\n(1) GPL-compatible doesn't mean that we're distributing Python under\n the GPL. All Python licenses, unlike the GPL, let you distribute\n a modified version without making your changes open source. The\n GPL-compatible licenses make it possible to combine Python with\n other software that is released under the GPL; the others don't.\n\n(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,\n because its license has a choice of law clause. According to\n CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1\n is \"not incompatible\" with the GPL.\n\nThanks to the many outside volunteers who have worked under Guido's\ndirection to make these releases possible.\n\n\nB. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON\n===============================================================\n\nPython software and documentation are licensed under the\nPython Software Foundation License Version 2.\n\nStarting with Python 3.8.6, examples, recipes, and other code in\nthe documentation are dual licensed under the PSF License Version 2\nand the Zero-Clause BSD license.\n\nSome software incorporated into Python is under different licenses.\nThe licenses are listed with code falling under that license.\n\n\nPYTHON SOFTWARE FOUNDATION LICENSE VERSION 2\n--------------------------------------------\n\n1. This LICENSE AGREEMENT is between the Python Software Foundation\n(\"PSF\"), and the Individual or Organization (\"Licensee\") accessing and\notherwise using this software (\"Python\") in source or binary form and\nits associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, PSF hereby\ngrants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,\nanalyze, test, perform and/or display publicly, prepare derivative works,\ndistribute, and otherwise use Python alone or in any derivative version,\nprovided, however, that PSF's License Agreement and PSF's notice of copyright,\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,\n2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation;\nAll Rights Reserved\" are retained in Python alone or in any derivative version\nprepared by Licensee.\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python.\n\n4. PSF is making Python available to Licensee on an \"AS IS\"\nbasis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\nFOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. Nothing in this License Agreement shall be deemed to create any\nrelationship of agency, partnership, or joint venture between PSF and\nLicensee. This License Agreement does not grant permission to use PSF\ntrademarks or trade name in a trademark sense to endorse or promote\nproducts or services of Licensee, or any third party.\n\n8. By copying, installing or otherwise using Python, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nBEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0\n-------------------------------------------\n\nBEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1\n\n1. This LICENSE AGREEMENT is between BeOpen.com (\"BeOpen\"), having an\noffice at 160 Saratoga Avenue, Santa Clara, CA 95051, and the\nIndividual or Organization (\"Licensee\") accessing and otherwise using\nthis software in source or binary form and its associated\ndocumentation (\"the Software\").\n\n2. Subject to the terms and conditions of this BeOpen Python License\nAgreement, BeOpen hereby grants Licensee a non-exclusive,\nroyalty-free, world-wide license to reproduce, analyze, test, perform\nand/or display publicly, prepare derivative works, distribute, and\notherwise use the Software alone or in any derivative version,\nprovided, however, that the BeOpen Python License is retained in the\nSoftware, alone or in any derivative version prepared by Licensee.\n\n3. BeOpen is making the Software available to Licensee on an \"AS IS\"\nbasis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE\nSOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS\nAS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY\nDERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n5. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n6. This License Agreement shall be governed by and interpreted in all\nrespects by the law of the State of California, excluding conflict of\nlaw provisions. Nothing in this License Agreement shall be deemed to\ncreate any relationship of agency, partnership, or joint venture\nbetween BeOpen and Licensee. This License Agreement does not grant\npermission to use BeOpen trademarks or trade names in a trademark\nsense to endorse or promote products or services of Licensee, or any\nthird party. As an exception, the \"BeOpen Python\" logos available at\nhttp://www.pythonlabs.com/logos.html may be used according to the\npermissions granted on that web page.\n\n7. By copying, installing or otherwise using the software, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nCNRI LICENSE AGREEMENT FOR PYTHON 1.6.1\n---------------------------------------\n\n1. This LICENSE AGREEMENT is between the Corporation for National\nResearch Initiatives, having an office at 1895 Preston White Drive,\nReston, VA 20191 (\"CNRI\"), and the Individual or Organization\n(\"Licensee\") accessing and otherwise using Python 1.6.1 software in\nsource or binary form and its associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, CNRI\nhereby grants Licensee a nonexclusive, royalty-free, world-wide\nlicense to reproduce, analyze, test, perform and/or display publicly,\nprepare derivative works, distribute, and otherwise use Python 1.6.1\nalone or in any derivative version, provided, however, that CNRI's\nLicense Agreement and CNRI's notice of copyright, i.e., \"Copyright (c)\n1995-2001 Corporation for National Research Initiatives; All Rights\nReserved\" are retained in Python 1.6.1 alone or in any derivative\nversion prepared by Licensee. Alternately, in lieu of CNRI's License\nAgreement, Licensee may substitute the following text (omitting the\nquotes): \"Python 1.6.1 is made available subject to the terms and\nconditions in CNRI's License Agreement. This Agreement together with\nPython 1.6.1 may be located on the Internet using the following\nunique, persistent identifier (known as a handle): 1895.22/1013. This\nAgreement may also be obtained from a proxy server on the Internet\nusing the following URL: http://hdl.handle.net/1895.22/1013\".\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python 1.6.1 or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python 1.6.1.\n\n4. CNRI is making Python 1.6.1 available to Licensee on an \"AS IS\"\nbasis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\n1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. This License Agreement shall be governed by the federal\nintellectual property law of the United States, including without\nlimitation the federal copyright law, and, to the extent such\nU.S. federal law does not apply, by the law of the Commonwealth of\nVirginia, excluding Virginia's conflict of law provisions.\nNotwithstanding the foregoing, with regard to derivative works based\non Python 1.6.1 that incorporate non-separable material that was\npreviously distributed under the GNU General Public License (GPL), the\nlaw of the Commonwealth of Virginia shall govern this License\nAgreement only as to issues arising under or with respect to\nParagraphs 4, 5, and 7 of this License Agreement. Nothing in this\nLicense Agreement shall be deemed to create any relationship of\nagency, partnership, or joint venture between CNRI and Licensee. This\nLicense Agreement does not grant permission to use CNRI trademarks or\ntrade name in a trademark sense to endorse or promote products or\nservices of Licensee, or any third party.\n\n8. By clicking on the \"ACCEPT\" button where indicated, or by copying,\ninstalling or otherwise using Python 1.6.1, Licensee agrees to be\nbound by the terms and conditions of this License Agreement.\n\n ACCEPT\n\n\nCWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2\n--------------------------------------------------\n\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,\nThe Netherlands. All rights reserved.\n\nPermission to use, copy, modify, and distribute this software and its\ndocumentation for any purpose and without fee is hereby granted,\nprovided that the above copyright notice appear in all copies and that\nboth that copyright notice and this permission notice appear in\nsupporting documentation, and that the name of Stichting Mathematisch\nCentrum or CWI not be used in advertising or publicity pertaining to\ndistribution of the software without specific, written prior\npermission.\n\nSTICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO\nTHIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE\nFOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\nOF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION\n----------------------------------------------------------------------\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n"}, {"name": "ConfigArgParse", "version": "1.5.3", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2015 bw2\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"}, {"name": "Cython", "version": "0.29.24", "license": "Apache Software License", "text": " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n"}, {"name": "Jinja2", "version": "3.1.2", "license": "BSD License", "text": "Copyright 2007 Pallets\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\nPARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "MarkupSafe", "version": "2.1.1", "license": "BSD License", "text": "Copyright 2010 Pallets\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\nPARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "PyYAML", "version": "6.0", "license": "MIT License", "text": "Copyright (c) 2017-2021 Ingy d\u00f6t Net\nCopyright (c) 2006-2016 Kirill Simonov\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"}, {"name": "SoundFile", "version": "0.10.3.post1", "license": "BSD License", "text": "Copyright (c) 2013, Bastian Bechtold\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in\n the documentation and/or other materials provided with the\n distribution.\n * Neither the name of PySoundFile nor the names\n of its contributors may be used to endorse or promote products\n derived from this software without specific prior written\n permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "aiofiles", "version": "0.7.0", "license": "Other/Proprietary License", "text": "Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"{}\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright {yyyy} {name of copyright owner}\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n"}, {"name": "anyio", "version": "3.6.2", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2018 Alex Gr\u00f6nholm\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "appdirs", "version": "1.4.4", "license": "MIT License", "text": "# This is the MIT license\n\nCopyright (c) 2010 ActiveState Software Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be included\nin all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n"}, {"name": "asgiref", "version": "3.6.0", "license": "BSD License", "text": "Copyright (c) Django Software Foundation and individual contributors.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n 3. Neither the name of Django nor the names of its contributors may be used\n to endorse or promote products derived from this software without\n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "audioread", "version": "3.0.0", "license": "MIT", "text": "Copyright (c) 2011-2018 Adrian Sampson\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\nDAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\nOTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\nOR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "certifi", "version": "2022.12.7", "license": "Mozilla Public License 2.0 (MPL 2.0)", "text": "This package contains a modified version of ca-bundle.crt:\n\nca-bundle.crt -- Bundle of CA Root Certificates\n\nCertificate data from Mozilla as of: Thu Nov 3 19:04:19 2011#\nThis is a bundle of X.509 certificates of public Certificate Authorities\n(CA). These were automatically extracted from Mozilla's root certificates\nfile (certdata.txt). This file can be found in the mozilla source tree:\nhttps://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt\nIt contains the certificates in PEM format and therefore\ncan be directly used with curl / libcurl / php_curl, or with\nan Apache+mod_ssl webserver for SSL client authentication.\nJust configure this file as the SSLCACertificateFile.#\n\n***** BEGIN LICENSE BLOCK *****\nThis Source Code Form is subject to the terms of the Mozilla Public License,\nv. 2.0. If a copy of the MPL was not distributed with this file, You can obtain\none at http://mozilla.org/MPL/2.0/.\n\n***** END LICENSE BLOCK *****\n@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $\n"}, {"name": "cffi", "version": "1.15.1", "license": "MIT License", "text": "\nExcept when otherwise stated (look for LICENSE files in directories or\ninformation at the beginning of each file) all software and\ndocumentation is licensed as follows: \n\n The MIT License\n\n Permission is hereby granted, free of charge, to any person \n obtaining a copy of this software and associated documentation \n files (the \"Software\"), to deal in the Software without \n restriction, including without limitation the rights to use, \n copy, modify, merge, publish, distribute, sublicense, and/or \n sell copies of the Software, and to permit persons to whom the \n Software is furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included \n in all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS \n OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL \n THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING \n FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \n DEALINGS IN THE SOFTWARE.\n\n"}, {"name": "charset-normalizer", "version": "2.1.1", "license": "MIT License", "text": "MIT License\n\nCopyright (c) 2019 TAHRI Ahmed R.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE."}, {"name": "ci-sdr", "version": "0.0.2", "license": "MIT License", "text": "MIT License\n\nCopyright (c) 2020 Communications Engineering Group, Paderborn University\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"}, {"name": "click", "version": "8.0.4", "license": "BSD License", "text": "Copyright 2014 Pallets\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\nPARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "colorama", "version": "0.4.4", "license": "BSD License", "text": "Copyright (c) 2010 Jonathan Hartley\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holders, nor those of its contributors\n may be used to endorse or promote products derived from this software without\n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "ctc-segmentation", "version": "1.7.4", "license": "UNKNOWN", "text": " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "decorator", "version": "5.1.1", "license": "BSD License", "text": "Copyright (c) 2005-2018, Michele Simionato\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n Redistributions in bytecode form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in\n the documentation and/or other materials provided with the\n distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\nINCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\nBUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS\nOF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\nTORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\nUSE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGE.\n"}, {"name": "einops", "version": "0.6.0", "license": "MIT License", "text": "MIT License\n\nCopyright (c) 2018 Alex Rogozhnikov\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"}, {"name": "espnet", "version": "0.10.7a1", "license": "Apache Software License", "text": " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2017 Johns Hopkins University (Shinji Watanabe)\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "espnet-tts-frontend", "version": "0.0.3", "license": "Apache Software License", "text": " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2017 Johns Hopkins University (Shinji Watanabe)\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "fastapi", "version": "0.70.0", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2018 Sebasti\u00e1n Ram\u00edrez\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"}, {"name": "filelock", "version": "3.8.2", "license": "The Unlicense (Unlicense)", "text": "This is free and unencumbered software released into the public domain.\n\nAnyone is free to copy, modify, publish, use, compile, sell, or\ndistribute this software, either in source code form or as a compiled\nbinary, for any purpose, commercial or non-commercial, and by any\nmeans.\n\nIn jurisdictions that recognize copyright laws, the author or authors\nof this software dedicate any and all copyright interest in the\nsoftware to the public domain. We make this dedication for the benefit\nof the public at large and to the detriment of our heirs and\nsuccessors. We intend this dedication to be an overt act of\nrelinquishment in perpetuity of all present and future rights to this\nsoftware under copyright law.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\nOTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\nOTHER DEALINGS IN THE SOFTWARE.\n\nFor more information, please refer to \n"}, {"name": "g2p-en", "version": "2.1.0", "license": "Apache Software License", "text": " Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"{}\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright {yyyy} {name of copyright owner}\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "h11", "version": "0.14.0", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2016 Nathaniel J. Smith and other contributors\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "h5py", "version": "3.7.0", "license": "BSD License", "text": "Copyright (c) 2008 Andrew Collette and contributors\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the\n distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "humanfriendly", "version": "10.0", "license": "MIT License", "text": "Copyright (c) 2021 Peter Odding\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "idna", "version": "3.4", "license": "BSD License", "text": "BSD 3-Clause License\n\nCopyright (c) 2013-2021, Kim Davies\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "importlib-metadata", "version": "4.13.0", "license": "Apache Software License", "text": "\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "inflect", "version": "6.0.2", "license": "MIT License", "text": "Copyright Jason R. Coombs\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n"}, {"name": "jaconv", "version": "0.3.3", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2014 Yukino Ikegami\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n"}, {"name": "jamo", "version": "0.4.1", "license": "Apache Software License", "text": "Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"{}\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2017 Joshua Dong\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n"}, {"name": "joblib", "version": "1.2.0", "license": "BSD License", "text": "BSD 3-Clause License\n\nCopyright (c) 2008-2021, The joblib developers.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "librosa", "version": "0.9.2", "license": "ISC License (ISCL)", "text": "## ISC License\n\nCopyright (c) 2013--2017, librosa development team.\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n"}, {"name": "llvmlite", "version": "0.39.1", "license": "BSD", "text": "Copyright (c) 2014-, Continuum Analytics, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\nRedistributions of source code must retain the above copyright notice,\nthis list of conditions and the following disclaimer.\n\nRedistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "nltk", "version": "3.8.1", "license": "Apache Software License", "text": "\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "numba", "version": "0.56.4", "license": "BSD License", "text": "Copyright (c) 2012, Anaconda, Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\nRedistributions of source code must retain the above copyright notice,\nthis list of conditions and the following disclaimer.\n\nRedistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "numpy", "version": "1.20.0", "license": "BSD License", "text": "\n----\n\nThis binary distribution of NumPy also bundles the following software:\n\n\nName: OpenBLAS\nFiles: extra-dll\\libopenb*.dll\nDescription: bundled as a dynamically linked library\nAvailability: https://github.com/xianyi/OpenBLAS/\nLicense: 3-clause BSD\n Copyright (c) 2011-2014, The OpenBLAS Project\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n\n 1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in\n the documentation and/or other materials provided with the\n distribution.\n 3. Neither the name of the OpenBLAS project nor the names of\n its contributors may be used to endorse or promote products\n derived from this software without specific prior written\n permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nName: LAPACK\nFiles: extra-dll\\libopenb*.dll\nDescription: bundled in OpenBLAS\nAvailability: https://github.com/xianyi/OpenBLAS/\nLicense 3-clause BSD\n Copyright (c) 1992-2013 The University of Tennessee and The University\n of Tennessee Research Foundation. All rights\n reserved.\n Copyright (c) 2000-2013 The University of California Berkeley. All\n rights reserved.\n Copyright (c) 2006-2013 The University of Colorado Denver. All rights\n reserved.\n\n $COPYRIGHT$\n\n Additional copyrights may follow\n\n $HEADER$\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n\n - Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n - Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer listed\n in this license in the documentation and/or other materials\n provided with the distribution.\n\n - Neither the name of the copyright holders nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\n The copyright holders provide no reassurances that the source code\n provided does not infringe any patent, copyright, or any other\n intellectual property rights of third parties. The copyright holders\n disclaim any liability to any recipient for claims brought against\n recipient by any third party for infringement of that parties\n intellectual property rights.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nName: GCC runtime library\nFiles: extra-dll\\*.dll\nDescription: statically linked, in DLL files compiled with gfortran only\nAvailability: https://gcc.gnu.org/viewcvs/gcc/\nLicense: GPLv3 + runtime exception\n Copyright (C) 2002-2017 Free Software Foundation, Inc.\n\n Libgfortran is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 3, or (at your option)\n any later version.\n\n Libgfortran is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n Under Section 7 of GPL version 3, you are granted additional\n permissions described in the GCC Runtime Library Exception, version\n 3.1, as published by the Free Software Foundation.\n\n You should have received a copy of the GNU General Public License and\n a copy of the GCC Runtime Library Exception along with this program;\n see the files COPYING3 and COPYING.RUNTIME respectively. If not, see\n .\n\n\nName: Microsoft Visual C++ Runtime Files\nFiles: extra-dll\\msvcp140.dll\nLicense: MSVC\n https://www.visualstudio.com/license-terms/distributable-code-microsoft-visual-studio-2015-rc-microsoft-visual-studio-2015-sdk-rc-includes-utilities-buildserver-files/#visual-c-runtime\n\n Subject to the License Terms for the software, you may copy and\n distribute with your program any of the files within the followng\n folder and its subfolders except as noted below. You may not modify\n these files.\n\n C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\redist\n\n You may not distribute the contents of the following folders:\n\n C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\redist\\debug_nonredist\n C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\redist\\onecore\\debug_nonredist\n\n Subject to the License Terms for the software, you may copy and\n distribute the following files with your program in your program\u2019s\n application local folder or by deploying them into the Global\n Assembly Cache (GAC):\n\n VC\\atlmfc\\lib\\mfcmifc80.dll\n VC\\atlmfc\\lib\\amd64\\mfcmifc80.dll\n\n\nName: Microsoft Visual C++ Runtime Files\nFiles: extra-dll\\msvc*90.dll, extra-dll\\Microsoft.VC90.CRT.manifest\nLicense: MSVC\n For your convenience, we have provided the following folders for\n use when redistributing VC++ runtime files. Subject to the license\n terms for the software, you may redistribute the folder\n (unmodified) in the application local folder as a sub-folder with\n no change to the folder name. You may also redistribute all the\n files (*.dll and *.manifest) within a folder, listed below the\n folder for your convenience, as an entire set.\n\n \\VC\\redist\\x86\\Microsoft.VC90.ATL\\\n atl90.dll\n Microsoft.VC90.ATL.manifest\n \\VC\\redist\\ia64\\Microsoft.VC90.ATL\\\n atl90.dll\n Microsoft.VC90.ATL.manifest\n \\VC\\redist\\amd64\\Microsoft.VC90.ATL\\\n atl90.dll\n Microsoft.VC90.ATL.manifest\n \\VC\\redist\\x86\\Microsoft.VC90.CRT\\\n msvcm90.dll\n msvcp90.dll\n msvcr90.dll\n Microsoft.VC90.CRT.manifest\n \\VC\\redist\\ia64\\Microsoft.VC90.CRT\\\n msvcm90.dll\n msvcp90.dll\n msvcr90.dll\n Microsoft.VC90.CRT.manifest\n\n----\n\nFull text of license texts referred to above follows (that they are\nlisted below does not necessarily imply the conditions apply to the\npresent binary release):\n\n----\n\nGCC RUNTIME LIBRARY EXCEPTION\n\nVersion 3.1, 31 March 2009\n\nCopyright (C) 2009 Free Software Foundation, Inc. \n\nEveryone is permitted to copy and distribute verbatim copies of this\nlicense document, but changing it is not allowed.\n\nThis GCC Runtime Library Exception (\"Exception\") is an additional\npermission under section 7 of the GNU General Public License, version\n3 (\"GPLv3\"). It applies to a given file (the \"Runtime Library\") that\nbears a notice placed by the copyright holder of the file stating that\nthe file is governed by GPLv3 along with this Exception.\n\nWhen you use GCC to compile a program, GCC may combine portions of\ncertain GCC header files and runtime libraries with the compiled\nprogram. The purpose of this Exception is to allow compilation of\nnon-GPL (including proprietary) programs to use, in this way, the\nheader files and runtime libraries covered by this Exception.\n\n0. Definitions.\n\nA file is an \"Independent Module\" if it either requires the Runtime\nLibrary for execution after a Compilation Process, or makes use of an\ninterface provided by the Runtime Library, but is not otherwise based\non the Runtime Library.\n\n\"GCC\" means a version of the GNU Compiler Collection, with or without\nmodifications, governed by version 3 (or a specified later version) of\nthe GNU General Public License (GPL) with the option of using any\nsubsequent versions published by the FSF.\n\n\"GPL-compatible Software\" is software whose conditions of propagation,\nmodification and use would permit combination with GCC in accord with\nthe license of GCC.\n\n\"Target Code\" refers to output from any compiler for a real or virtual\ntarget processor architecture, in executable form or suitable for\ninput to an assembler, loader, linker and/or execution\nphase. Notwithstanding that, Target Code does not include data in any\nformat that is used as a compiler intermediate representation, or used\nfor producing a compiler intermediate representation.\n\nThe \"Compilation Process\" transforms code entirely represented in\nnon-intermediate languages designed for human-written code, and/or in\nJava Virtual Machine byte code, into Target Code. Thus, for example,\nuse of source code generators and preprocessors need not be considered\npart of the Compilation Process, since the Compilation Process can be\nunderstood as starting with the output of the generators or\npreprocessors.\n\nA Compilation Process is \"Eligible\" if it is done using GCC, alone or\nwith other GPL-compatible software, or if it is done without using any\nwork based on GCC. For example, using non-GPL-compatible Software to\noptimize any GCC intermediate representations would not qualify as an\nEligible Compilation Process.\n\n1. Grant of Additional Permission.\n\nYou have permission to propagate a work of Target Code formed by\ncombining the Runtime Library with Independent Modules, even if such\npropagation would otherwise violate the terms of GPLv3, provided that\nall Target Code was generated by Eligible Compilation Processes. You\nmay then convey such a combination under terms of your choice,\nconsistent with the licensing of the Independent Modules.\n\n2. No Weakening of GCC Copyleft.\n\nThe availability of this Exception does not imply any general\npresumption that third-party software is unaffected by the copyleft\nrequirements of the license of GCC.\n\n----\n\n GNU GENERAL PUBLIC LICENSE\n Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. \n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n Preamble\n\n The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works. By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users. We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors. You can apply it to\nyour programs, too.\n\n When we speak of free software, we are referring to freedom, not\nprice. Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights. Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received. You must make sure that they, too, receive\nor can get the source code. And you must show them these terms so they\nknow their rights.\n\n Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software. For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so. This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software. The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable. Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts. If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary. To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n The precise terms and conditions for copying, distribution and\nmodification follow.\n\n TERMS AND CONDITIONS\n\n 0. Definitions.\n\n \"This License\" refers to version 3 of the GNU General Public License.\n\n \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n \"The Program\" refers to any copyrightable work licensed under this\nLicense. Each licensee is addressed as \"you\". \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy. The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy. Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies. Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License. If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n 1. Source Code.\n\n The \"source code\" for a work means the preferred form of the work\nfor making modifications to it. \"Object code\" means any non-source\nform of a work.\n\n A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form. A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities. However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work. For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n The Corresponding Source for a work in source code form is that\nsame work.\n\n 2. Basic Permissions.\n\n All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met. This License explicitly affirms your unlimited\npermission to run the unmodified Program. The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work. This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force. You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright. Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n Conveying under any other circumstances is permitted solely under\nthe conditions stated below. Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n 3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n 4. Conveying Verbatim Copies.\n\n You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n 5. Conveying Modified Source Versions.\n\n You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n a) The work must carry prominent notices stating that you modified\n it, and giving a relevant date.\n\n b) The work must carry prominent notices stating that it is\n released under this License and any conditions added under section\n 7. This requirement modifies the requirement in section 4 to\n \"keep intact all notices\".\n\n c) You must license the entire work, as a whole, under this\n License to anyone who comes into possession of a copy. This\n License will therefore apply, along with any applicable section 7\n additional terms, to the whole of the work, and all its parts,\n regardless of how they are packaged. This License gives no\n permission to license the work in any other way, but it does not\n invalidate such permission if you have separately received it.\n\n d) If the work has interactive user interfaces, each must display\n Appropriate Legal Notices; however, if the Program has interactive\n interfaces that do not display Appropriate Legal Notices, your\n work need not make them do so.\n\n A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit. Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n 6. Conveying Non-Source Forms.\n\n You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n a) Convey the object code in, or embodied in, a physical product\n (including a physical distribution medium), accompanied by the\n Corresponding Source fixed on a durable physical medium\n customarily used for software interchange.\n\n b) Convey the object code in, or embodied in, a physical product\n (including a physical distribution medium), accompanied by a\n written offer, valid for at least three years and valid for as\n long as you offer spare parts or customer support for that product\n model, to give anyone who possesses the object code either (1) a\n copy of the Corresponding Source for all the software in the\n product that is covered by this License, on a durable physical\n medium customarily used for software interchange, for a price no\n more than your reasonable cost of physically performing this\n conveying of source, or (2) access to copy the\n Corresponding Source from a network server at no charge.\n\n c) Convey individual copies of the object code with a copy of the\n written offer to provide the Corresponding Source. This\n alternative is allowed only occasionally and noncommercially, and\n only if you received the object code with such an offer, in accord\n with subsection 6b.\n\n d) Convey the object code by offering access from a designated\n place (gratis or for a charge), and offer equivalent access to the\n Corresponding Source in the same way through the same place at no\n further charge. You need not require recipients to copy the\n Corresponding Source along with the object code. If the place to\n copy the object code is a network server, the Corresponding Source\n may be on a different server (operated by you or a third party)\n that supports equivalent copying facilities, provided you maintain\n clear directions next to the object code saying where to find the\n Corresponding Source. Regardless of what server hosts the\n Corresponding Source, you remain obligated to ensure that it is\n available for as long as needed to satisfy these requirements.\n\n e) Convey the object code using peer-to-peer transmission, provided\n you inform other peers where the object code and Corresponding\n Source of the work are being offered to the general public at no\n charge under subsection 6d.\n\n A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling. In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage. For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product. A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source. The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information. But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed. Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n 7. Additional Terms.\n\n \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law. If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit. (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.) You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n a) Disclaiming warranty or limiting liability differently from the\n terms of sections 15 and 16 of this License; or\n\n b) Requiring preservation of specified reasonable legal notices or\n author attributions in that material or in the Appropriate Legal\n Notices displayed by works containing it; or\n\n c) Prohibiting misrepresentation of the origin of that material, or\n requiring that modified versions of such material be marked in\n reasonable ways as different from the original version; or\n\n d) Limiting the use for publicity purposes of names of licensors or\n authors of the material; or\n\n e) Declining to grant rights under trademark law for use of some\n trade names, trademarks, or service marks; or\n\n f) Requiring indemnification of licensors and authors of that\n material by anyone who conveys the material (or modified versions of\n it) with contractual assumptions of liability to the recipient, for\n any liability that these contractual assumptions directly impose on\n those licensors and authors.\n\n All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10. If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term. If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n 8. Termination.\n\n You may not propagate or modify a covered work except as expressly\nprovided under this License. Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License. If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n 9. Acceptance Not Required for Having Copies.\n\n You are not required to accept this License in order to receive or\nrun a copy of the Program. Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance. However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work. These actions infringe copyright if you do\nnot accept this License. Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n 10. Automatic Licensing of Downstream Recipients.\n\n Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License. You are not responsible\nfor enforcing compliance by third parties with this License.\n\n An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations. If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License. For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n 11. Patents.\n\n A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based. The\nwork thus licensed is called the contributor's \"contributor version\".\n\n A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version. For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement). To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients. \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License. You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n 12. No Surrender of Others' Freedom.\n\n If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License. If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all. For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n 13. Use with the GNU Affero General Public License.\n\n Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work. The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n 14. Revised Versions of this License.\n\n The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time. Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n Each version is given a distinguishing version number. If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation. If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n Later license versions may give you additional or different\npermissions. However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n 15. Disclaimer of Warranty.\n\n THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n 16. Limitation of Liability.\n\n IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n 17. Interpretation of Sections 15 and 16.\n\n If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n END OF TERMS AND CONDITIONS\n\n How to Apply These Terms to Your New Programs\n\n If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n To do so, attach the following notices to the program. It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n \n Copyright (C) \n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n\nAlso add information on how to contact you by electronic and paper mail.\n\n If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n Copyright (C) \n This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n This is free software, and you are welcome to redistribute it\n under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License. Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n.\n\n The GNU General Public License does not permit incorporating your program\ninto proprietary programs. If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library. If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License. But first, please read\n.\n"}, {"name": "packaging", "version": "22.0", "license": "Apache Software License; BSD License", "text": "This software is made available under the terms of *either* of the licenses\nfound in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made\nunder the terms of *both* these licenses.\n"}, {"name": "pooch", "version": "1.6.0", "license": "BSD License", "text": "Copyright (c) 2018 The Pooch Developers\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n* Neither the name of the copyright holders nor the names of any contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "pycparser", "version": "2.21", "license": "BSD License", "text": "pycparser -- A C parser in Python\n\nCopyright (c) 2008-2020, Eli Bendersky\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this \n list of conditions and the following disclaimer.\n* Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n* Neither the name of Eli Bendersky nor the names of its contributors may \n be used to endorse or promote products derived from this software without \n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND \nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED \nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE \nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR \nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE \nGOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) \nHOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT \nLIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT \nOF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "pydantic", "version": "1.10.2", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2017, 2018, 2019, 2020, 2021 Samuel Colvin and other contributors\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"}, {"name": "pyopenjtalk", "version": "0.2.0+f4ade29", "license": "MIT License", "text": "The pyopenjtalk package is licensed under the MIT \"Expat\" License:\n\n> Copyright (c) 2018: Ryuichi Yamamoto.\n>\n> Permission is hereby granted, free of charge, to any person obtaining\n> a copy of this software and associated documentation files (the\n> \"Software\"), to deal in the Software without restriction, including\n> without limitation the rights to use, copy, modify, merge, publish,\n> distribute, sublicense, and/or sell copies of the Software, and to\n> permit persons to whom the Software is furnished to do so, subject to\n> the following conditions:\n>\n> The above copyright notice and this permission notice shall be\n> included in all copies or substantial portions of the Software.\n>\n> THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n> EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n> MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n> IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n> CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n> TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n> SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n/bAmFru).\n"}, {"name": "pypinyin", "version": "0.44.0", "license": "MIT License", "text": "The MIT License (MIT)\n\nCopyright (c) 2016 mozillazg, \u95f2\u8018 \n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "pyreadline3", "version": "3.4.1", "license": "BSD License", "text": "# LICENSE\n\n## pyreadline3 copyright and licensing notes\n\nUnless indicated otherwise, files in this project are covered by a BSD-type\nlicense, included below.\n\nIndividual authors are the holders of the copyright for their code and are\nlisted in each file.\n\nSome files may be licensed under different conditions. Ultimately each file \nindicates clearly the conditions under which its author/authors have \ndecided to publish the code.\n\n## pyreadline3 license\n\npyreadline3 is released under a BSD-type license.\n\nCopyright (c) 2020 Bassem Girgis .\n\nCopyright (c) 2006-2020 J\ufffdrgen Stenarson .\n\nCopyright (c) 2003-2006 Gary Bishop\n\nCopyright (c) 2003-2006 Jack Trainor\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\na. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\nb. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\nc. Neither the name of the copyright holders nor the names of any\n contributors to this software may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\nLIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\nOUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGE.\n"}, {"name": "python-multipart", "version": "0.0.5", "license": "Apache Software License", "text": "Copyright 2012, Andrew Dunham\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n"}, {"name": "pytorch-wpe", "version": "0.0.1", "license": "UNKNOWN", "text": "SOFTWARE LICENSE AGREEMENT FOR EVALUATION\n\nThis SOFTWARE EVALUATION LICENSE AGREEMENT (this \"Agreement\") is a legal contract between a person who uses or otherwise accesses or installs the Software (\u201cUser(s)\u201d), and Nippon Telegraph and Telephone corporation (\"NTT\").\nREAD THE TERMS AND CONDITIONS OF THIS AGREEMENT CAREFULLY BEFORE INSTALLING OR OTHERWISE ACCESSING OR USING NTT'S PROPRIETARY SOFTWARE ACCOMPANIED BY THIS AGREEMENT (the \"SOFTWARE\"). THE SOFTWARE IS COPYRIGHTED AND IT IS LICENSED TO USER UNDER THIS AGREEMENT, NOT SOLD TO USER. BY INSTALLING OR OTHERWISE ACCESSING OR USING THE SOFTWARE, USER ACKNOWLEDGES THAT USER HAS READ THIS AGREEMENT, THAT USER UNDERSTANDS IT, AND THAT USER ACCEPTS AND AGREES TO BE BOUND BY ITS TERMS. IF AT ANY TIME USER IS NOT WILLING TO BE BOUND BY THE TERMS OF THIS AGREEMENT, USER SHOULD TERMINATE THE INSTALLATION PROCESS, IMMEDIATELY CEASE AND REFRAIN FROM ACCESSING OR USING THE SOFTWARE AND DELETE ANY COPIES USER MAY HAVE. THIS AGREEMENT REPRESENTS THE ENTIRE AGREEMENT BETWEEN USER AND NTT CONCERNING THE SOFTWARE.\n\n \nBACKGROUND\nA.\tNTT is the owner of all rights, including all patent rights, copyrights and trade secret rights, in and to the Software and related documentation listed in Exhibit A to this Agreement.\n\nB.\tUser wishes to obtain a royalty free license to use the Software to enable User to evaluate, and NTT wishes to grant such a license to User, pursuant and subject to the terms and conditions of this Agreement.\n\nC.\tAs a condition to NTT's provision of the Software to User, NTT has required User to execute this Agreement.\n\nIn consideration of these premises, and the mutual promises and conditions in this Agreement, the parties hereby agree as follows:\n\n1.\tGrant of Evaluation License. \tNTT hereby grants to User, and User hereby accepts, under the terms and conditions of this Agreement, a royalty free, nontransferable and nonexclusive license to use the Software internally for the purposes of testing, analyzing, and evaluating the methods or mechanisms as shown in the research paper submitted by NTT to a certain academy. User may make a reasonable number of backup copies of the Software solely for User's internal use pursuant to the license granted in this Section 1.\n\n2.\u3000Shipment and Installation. NTT will ship or deliver the Software by any method that NTT deems appropriate. User shall be solely responsible for proper installation of the Software.\n\n3.\u3000Term. This Agreement is effective whichever is earlier (i) upon User\u2019s acceptance of the Agreement, or (ii) upon User\u2019s installing, accessing, and using the Software, even if User has not expressly accepted this Agreement. Without prejudice to any other rights, NTT may terminate this Agreement without notice to User (i) if User breaches or fails to comply with any of the limitations or other requirements described herein, and (ii) if NTT receives a notice from the academy stating that the research paper would not be published, and in any such case User agrees that NTT may, in addition to any other remedies it may have at law or in equity, remotely disable the Software. User may terminate this Agreement at any time by User\u2019s decision to terminate the Agreement to NTT and ceasing use of the Software. Upon any termination or expiration of this Agreement for any reason, User agrees to uninstall the Software and either return to NTT the Software and all copies thereof, or to destroy all such materials and provide written verification of such destruction to NTT.\n\n4.\tProprietary Rights\n(a)\tThe Software is the valuable, confidential, and proprietary property of NTT, and NTT shall retain exclusive title to this property both during the term and after the termination of this Agreement. Without limitation, User acknowledges that all patent rights, copyrights and trade secret rights in the Software shall remain the exclusive property of NTT at all times. User shall use not less than reasonable care in safeguarding the confidentiality of the Software. \n(b)\tUSER SHALL NOT, IN WHOLE OR IN PART, AT ANY TIME DURING THE TERM OF OR AFTER THE TERMINATION OF THIS AGREEMENT: (i) SELL, ASSIGN, LEASE, DISTRIBUTE, OR OTHERWISE TRANSFER THE SOFTWARE TO ANY THIRD PARTY; (ii) EXCEPT AS OTHERWISE PROVIDED HEREIN, COPY OR REPRODUCE THE SOFTWARE IN ANY MANNER; (iii) DISCLOSE THE SOFTWARE TO ANY THIRD PARTY, EXCEPT TO USER'S EMPLOYEES WHO REQUIRE ACCESS TO THE SOFTWARE FOR THE PURPOSES OF THIS AGREEMENT; (iv) MODIFY, DISASSEMBLE, DECOMPILE, REVERSE ENGINEER OR TRANSLATE THE SOFTWARE; OR (v) ALLOW ANY PERSON OR ENTITY TO COMMIT ANY OF THE ACTIONS DESCRIBED IN (i) THROUGH (iv) ABOVE.\n(c)\tUser shall take appropriate action, by instruction, agreement, or otherwise, with respect to its employees permitted under this Agreement to have access to the Software to ensure that all of User's obligations under this Section 4 shall be satisfied. \n\n5.\u3000\tIndemnity. User shall defend, indemnify and hold harmless NTT, its agents and employees, from any loss, damage, or liability arising in connection with User's improper or unauthorized use of the Software. NTT SHALL HAVE THE SOLE RIGHT TO CONDUCT DEFEND ANY ACTTION RELATING TO THE SOFTWARE.\n\n6.\tDisclaimer. THE SOFTWARE IS LICENSED TO USER \"AS IS,\" WITHOUT ANY TRAINING, MAINTENANCE, OR SERVICE OBLIGATIONS WHATSOEVER ON THE PART OF NTT. NTT MAKES NO EXPRESS OR IMPLIED WARRANTIES OF ANY TYPE WHATSOEVER, INCLUDING WITHOUT LIMITATION THE IMPLIED WARRANTIES OF MERCHANTABILITY, OF FITNESS FOR A PARTICULAR PURPOSE AND OF NON-INFRINGEMENT ON COPYRIGHT OR ANY OTHER RIGHT OF THIRD PARTIES. USER ASSUMES ALL RISKS ASSOCIATED WITH ITS USE OF THE SOFTWARE, INCLUDING WITHOUT LIMITATION RISKS RELATING TO QUALITY, PERFORMANCE, DATA LOSS, AND UTILITY IN A PRODUCTION ENVIRONMENT. \n\n7.\tLimitation of Liability. IN NO EVENT SHALL NTT BE LIABLE TO USER OR TO ANY THIRD PARTY FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING BUT NOT LIMITED TO DAMAGES FOR PERSONAL INJURY, PROPERTY DAMAGE, LOST PROFITS, OR OTHER ECONOMIC LOSS, ARISING IN CONNECTION WITH USER'S USE OF OR INABILITY TO USE THE SOFTWARE, IN CONNECTION WITH NTT'S PROVISION OF OR FAILURE TO PROVIDE SERVICES PERTAINING TO THE SOFTWARE, OR AS A RESULT OF ANY DEFECT IN THE SOFTWARE. THIS DISCLAIMER OF LIABILITY SHALL APPLY REGARD-LESS OF THE FORM OF ACTION THAT MAY BE BROUGHT AGAINST NTT, WHETHER IN CONTRACT OR TORT, INCLUDING WITHOUT LIMITATION ANY ACTION FOR NEGLIGENCE. USER'S SOLE REMEDY IN THE EVENT OF ANY BREACH OF THIS AGREEMENT BY NTT SHALL BE TERMINATION PURSUANT TO SECTION 3.\n\n8.\tNo Assignment or Sublicense. Neither this Agreement nor any right or license under this Agreement, nor the Software, may be sublicensed, assigned, or otherwise transferred by User without NTT's prior written consent.\n\n9.\tGeneral\n(a)\tIf any provision, or part of a provision, of this Agreement is or becomes illegal, unenforceable, or invalidated, by operation of law or otherwise, that provision or part shall to that extent be deemed omitted, and the remainder of this Agreement shall remain in full force and effect.\n(b)\tThis Agreement is the complete and exclusive statement of the agreement between the parties with respect to the subject matter hereof, and supersedes all written and oral contracts, proposals, and other communications between the parties relating to that subject matter. \n(c)\tSubject to Section 8, this Agreement shall be binding on, and shall inure to the benefit of, the respective successors and assigns of NTT and User. \n(d)\tIf either party to this Agreement initiates a legal action or proceeding to enforce or interpret any part of this Agreement, the prevailing party in such action shall be entitled to recover, as an element of the costs of such action and not as damages, its attorneys' fees and other costs associated with such action or proceeding.\n(e)\tThis Agreement shall be governed by and interpreted under the laws of Japan, without reference to conflicts of law principles. All disputes arising out of or in connection with this Agreement shall be finally settled by arbitration in Tokyo in accordance with the Commercial Arbitration Rules of the Japan Commercial Arbitration Association. The arbitration shall be conducted by three (3) arbitrators and in Japanese. The award rendered by the arbitrators shall be final and binding upon the parties. Judgment upon the award may be entered in any court having jurisdiction thereof.\n(f)\u3000\u3000\tNTT shall not be liable to the User or to any third party for any delay or failure to perform NTT\u2019s obligation set forth under this Agreement due to any cause beyond NTT\u2019s reasonable control.\n\u2003\nEXHIBIT A\nThe Software and related documentation in this repository.\n\nThe set of python code in this repository is an example implementation of DNN-WPE dereverberation algorithm described in the following paper, \n\nK. Kinoshita, M. Delcroix, H. Kwon, T. Mori, T. Nakatani, \"Neural network-based spectrum estimation for online WPE dereverberation\" In proc. of Interspeech, pp.384--388, 2017\n"}, {"name": "pyworld", "version": "0.3.0", "license": "UNKNOWN", "text": "MIT License\n\nCopyright 2016 pyworld contributors\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"}, {"name": "regex", "version": "2022.10.31", "license": "Apache Software License", "text": "This work was derived from the 're' module of CPython 2.6 and CPython 3.1,\ncopyright (c) 1998-2001 by Secret Labs AB and licensed under CNRI's Python 1.6\nlicense.\n\nAll additions and alterations are licensed under the Apache 2.0 License.\n\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2020 Matthew Barnett\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "requests", "version": "2.28.1", "license": "Apache Software License", "text": "\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n"}, {"name": "resampy", "version": "0.4.2", "license": "ISC License (ISCL)", "text": "ISC License\n\nCopyright (c) 2016, Brian McFee\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n"}, {"name": "scikit-learn", "version": "1.2.0", "license": "BSD License", "text": "BSD 3-Clause License\n\nCopyright (c) 2007-2022 The scikit-learn developers.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "scipy", "version": "1.7.1", "license": "BSD License", "text": "Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided\n with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n----\n\nThis binary distribution of Scipy also bundles the following software:\n\n\nName: OpenBLAS\nFiles: extra-dll\\libopenb*.dll\nDescription: bundled as a dynamically linked library\nAvailability: https://github.com/xianyi/OpenBLAS/\nLicense: 3-clause BSD\n Copyright (c) 2011-2014, The OpenBLAS Project\n All rights reserved.\n \n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n \n 1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n \n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in\n the documentation and/or other materials provided with the\n distribution.\n 3. Neither the name of the OpenBLAS project nor the names of \n its contributors may be used to endorse or promote products \n derived from this software without specific prior written \n permission.\n \n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nName: LAPACK\nFiles: extra-dll\\libopenb*.dll\nDescription: bundled in OpenBLAS\nAvailability: https://github.com/xianyi/OpenBLAS/\nLicense 3-clause BSD\n Copyright (c) 1992-2013 The University of Tennessee and The University\n of Tennessee Research Foundation. All rights\n reserved.\n Copyright (c) 2000-2013 The University of California Berkeley. All\n rights reserved.\n Copyright (c) 2006-2013 The University of Colorado Denver. All rights\n reserved.\n \n $COPYRIGHT$\n \n Additional copyrights may follow\n \n $HEADER$\n \n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are\n met:\n \n - Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n \n - Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer listed\n in this license in the documentation and/or other materials\n provided with the distribution.\n \n - Neither the name of the copyright holders nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n \n The copyright holders provide no reassurances that the source code\n provided does not infringe any patent, copyright, or any other\n intellectual property rights of third parties. The copyright holders\n disclaim any liability to any recipient for claims brought against\n recipient by any third party for infringement of that parties\n intellectual property rights.\n \n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nName: GCC runtime library\nFiles: extra-dll\\*.dll\nDescription: statically linked, in DLL files compiled with gfortran only\nAvailability: https://gcc.gnu.org/viewcvs/gcc/\nLicense: GPLv3 + runtime exception\n Copyright (C) 2002-2017 Free Software Foundation, Inc.\n \n Libgfortran is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 3, or (at your option)\n any later version.\n \n Libgfortran is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n \n Under Section 7 of GPL version 3, you are granted additional\n permissions described in the GCC Runtime Library Exception, version\n 3.1, as published by the Free Software Foundation.\n \n You should have received a copy of the GNU General Public License and\n a copy of the GCC Runtime Library Exception along with this program;\n see the files COPYING3 and COPYING.RUNTIME respectively. If not, see\n .\n\n\nName: Microsoft Visual C++ Runtime Files\nFiles: extra-dll\\msvcp140.dll\nLicense: MSVC\n https://www.visualstudio.com/license-terms/distributable-code-microsoft-visual-studio-2015-rc-microsoft-visual-studio-2015-sdk-rc-includes-utilities-buildserver-files/#visual-c-runtime\n\n Subject to the License Terms for the software, you may copy and\n distribute with your program any of the files within the followng\n folder and its subfolders except as noted below. You may not modify\n these files.\n\n C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\redist\n\n You may not distribute the contents of the following folders:\n\n C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\redist\\debug_nonredist\n C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\redist\\onecore\\debug_nonredist\n\n Subject to the License Terms for the software, you may copy and\n distribute the following files with your program in your program\u2019s\n application local folder or by deploying them into the Global\n Assembly Cache (GAC):\n\n VC\\atlmfc\\lib\\mfcmifc80.dll\n VC\\atlmfc\\lib\\amd64\\mfcmifc80.dll\n\n\nName: Microsoft Visual C++ Runtime Files\nFiles: extra-dll\\msvc*90.dll, extra-dll\\Microsoft.VC90.CRT.manifest\nLicense: MSVC\n For your convenience, we have provided the following folders for\n use when redistributing VC++ runtime files. Subject to the license\n terms for the software, you may redistribute the folder\n (unmodified) in the application local folder as a sub-folder with\n no change to the folder name. You may also redistribute all the\n files (*.dll and *.manifest) within a folder, listed below the\n folder for your convenience, as an entire set.\n\n \\VC\\redist\\x86\\Microsoft.VC90.ATL\\\n atl90.dll\n Microsoft.VC90.ATL.manifest\n \\VC\\redist\\ia64\\Microsoft.VC90.ATL\\\n atl90.dll\n Microsoft.VC90.ATL.manifest\n \\VC\\redist\\amd64\\Microsoft.VC90.ATL\\\n atl90.dll\n Microsoft.VC90.ATL.manifest\n \\VC\\redist\\x86\\Microsoft.VC90.CRT\\\n msvcm90.dll\n msvcp90.dll\n msvcr90.dll\n Microsoft.VC90.CRT.manifest\n \\VC\\redist\\ia64\\Microsoft.VC90.CRT\\\n msvcm90.dll\n msvcp90.dll\n msvcr90.dll\n Microsoft.VC90.CRT.manifest\n\n----\n\nFull text of license texts referred to above follows (that they are\nlisted below does not necessarily imply the conditions apply to the\npresent binary release):\n\n----\n\nGCC RUNTIME LIBRARY EXCEPTION\n\nVersion 3.1, 31 March 2009\n\nCopyright (C) 2009 Free Software Foundation, Inc. \n\nEveryone is permitted to copy and distribute verbatim copies of this\nlicense document, but changing it is not allowed.\n\nThis GCC Runtime Library Exception (\"Exception\") is an additional\npermission under section 7 of the GNU General Public License, version\n3 (\"GPLv3\"). It applies to a given file (the \"Runtime Library\") that\nbears a notice placed by the copyright holder of the file stating that\nthe file is governed by GPLv3 along with this Exception.\n\nWhen you use GCC to compile a program, GCC may combine portions of\ncertain GCC header files and runtime libraries with the compiled\nprogram. The purpose of this Exception is to allow compilation of\nnon-GPL (including proprietary) programs to use, in this way, the\nheader files and runtime libraries covered by this Exception.\n\n0. Definitions.\n\nA file is an \"Independent Module\" if it either requires the Runtime\nLibrary for execution after a Compilation Process, or makes use of an\ninterface provided by the Runtime Library, but is not otherwise based\non the Runtime Library.\n\n\"GCC\" means a version of the GNU Compiler Collection, with or without\nmodifications, governed by version 3 (or a specified later version) of\nthe GNU General Public License (GPL) with the option of using any\nsubsequent versions published by the FSF.\n\n\"GPL-compatible Software\" is software whose conditions of propagation,\nmodification and use would permit combination with GCC in accord with\nthe license of GCC.\n\n\"Target Code\" refers to output from any compiler for a real or virtual\ntarget processor architecture, in executable form or suitable for\ninput to an assembler, loader, linker and/or execution\nphase. Notwithstanding that, Target Code does not include data in any\nformat that is used as a compiler intermediate representation, or used\nfor producing a compiler intermediate representation.\n\nThe \"Compilation Process\" transforms code entirely represented in\nnon-intermediate languages designed for human-written code, and/or in\nJava Virtual Machine byte code, into Target Code. Thus, for example,\nuse of source code generators and preprocessors need not be considered\npart of the Compilation Process, since the Compilation Process can be\nunderstood as starting with the output of the generators or\npreprocessors.\n\nA Compilation Process is \"Eligible\" if it is done using GCC, alone or\nwith other GPL-compatible software, or if it is done without using any\nwork based on GCC. For example, using non-GPL-compatible Software to\noptimize any GCC intermediate representations would not qualify as an\nEligible Compilation Process.\n\n1. Grant of Additional Permission.\n\nYou have permission to propagate a work of Target Code formed by\ncombining the Runtime Library with Independent Modules, even if such\npropagation would otherwise violate the terms of GPLv3, provided that\nall Target Code was generated by Eligible Compilation Processes. You\nmay then convey such a combination under terms of your choice,\nconsistent with the licensing of the Independent Modules.\n\n2. No Weakening of GCC Copyleft.\n\nThe availability of this Exception does not imply any general\npresumption that third-party software is unaffected by the copyleft\nrequirements of the license of GCC.\n\n----\n\n GNU GENERAL PUBLIC LICENSE\n Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. \n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n Preamble\n\n The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works. By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users. We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors. You can apply it to\nyour programs, too.\n\n When we speak of free software, we are referring to freedom, not\nprice. Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights. Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received. You must make sure that they, too, receive\nor can get the source code. And you must show them these terms so they\nknow their rights.\n\n Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software. For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so. This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software. The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable. Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts. If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary. To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n The precise terms and conditions for copying, distribution and\nmodification follow.\n\n TERMS AND CONDITIONS\n\n 0. Definitions.\n\n \"This License\" refers to version 3 of the GNU General Public License.\n\n \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n \"The Program\" refers to any copyrightable work licensed under this\nLicense. Each licensee is addressed as \"you\". \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy. The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy. Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies. Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License. If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n 1. Source Code.\n\n The \"source code\" for a work means the preferred form of the work\nfor making modifications to it. \"Object code\" means any non-source\nform of a work.\n\n A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form. A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities. However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work. For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n The Corresponding Source for a work in source code form is that\nsame work.\n\n 2. Basic Permissions.\n\n All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met. This License explicitly affirms your unlimited\npermission to run the unmodified Program. The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work. This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force. You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright. Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n Conveying under any other circumstances is permitted solely under\nthe conditions stated below. Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n 3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n 4. Conveying Verbatim Copies.\n\n You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n 5. Conveying Modified Source Versions.\n\n You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n a) The work must carry prominent notices stating that you modified\n it, and giving a relevant date.\n\n b) The work must carry prominent notices stating that it is\n released under this License and any conditions added under section\n 7. This requirement modifies the requirement in section 4 to\n \"keep intact all notices\".\n\n c) You must license the entire work, as a whole, under this\n License to anyone who comes into possession of a copy. This\n License will therefore apply, along with any applicable section 7\n additional terms, to the whole of the work, and all its parts,\n regardless of how they are packaged. This License gives no\n permission to license the work in any other way, but it does not\n invalidate such permission if you have separately received it.\n\n d) If the work has interactive user interfaces, each must display\n Appropriate Legal Notices; however, if the Program has interactive\n interfaces that do not display Appropriate Legal Notices, your\n work need not make them do so.\n\n A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit. Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n 6. Conveying Non-Source Forms.\n\n You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n a) Convey the object code in, or embodied in, a physical product\n (including a physical distribution medium), accompanied by the\n Corresponding Source fixed on a durable physical medium\n customarily used for software interchange.\n\n b) Convey the object code in, or embodied in, a physical product\n (including a physical distribution medium), accompanied by a\n written offer, valid for at least three years and valid for as\n long as you offer spare parts or customer support for that product\n model, to give anyone who possesses the object code either (1) a\n copy of the Corresponding Source for all the software in the\n product that is covered by this License, on a durable physical\n medium customarily used for software interchange, for a price no\n more than your reasonable cost of physically performing this\n conveying of source, or (2) access to copy the\n Corresponding Source from a network server at no charge.\n\n c) Convey individual copies of the object code with a copy of the\n written offer to provide the Corresponding Source. This\n alternative is allowed only occasionally and noncommercially, and\n only if you received the object code with such an offer, in accord\n with subsection 6b.\n\n d) Convey the object code by offering access from a designated\n place (gratis or for a charge), and offer equivalent access to the\n Corresponding Source in the same way through the same place at no\n further charge. You need not require recipients to copy the\n Corresponding Source along with the object code. If the place to\n copy the object code is a network server, the Corresponding Source\n may be on a different server (operated by you or a third party)\n that supports equivalent copying facilities, provided you maintain\n clear directions next to the object code saying where to find the\n Corresponding Source. Regardless of what server hosts the\n Corresponding Source, you remain obligated to ensure that it is\n available for as long as needed to satisfy these requirements.\n\n e) Convey the object code using peer-to-peer transmission, provided\n you inform other peers where the object code and Corresponding\n Source of the work are being offered to the general public at no\n charge under subsection 6d.\n\n A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling. In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage. For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product. A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source. The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information. But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed. Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n 7. Additional Terms.\n\n \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law. If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit. (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.) You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n a) Disclaiming warranty or limiting liability differently from the\n terms of sections 15 and 16 of this License; or\n\n b) Requiring preservation of specified reasonable legal notices or\n author attributions in that material or in the Appropriate Legal\n Notices displayed by works containing it; or\n\n c) Prohibiting misrepresentation of the origin of that material, or\n requiring that modified versions of such material be marked in\n reasonable ways as different from the original version; or\n\n d) Limiting the use for publicity purposes of names of licensors or\n authors of the material; or\n\n e) Declining to grant rights under trademark law for use of some\n trade names, trademarks, or service marks; or\n\n f) Requiring indemnification of licensors and authors of that\n material by anyone who conveys the material (or modified versions of\n it) with contractual assumptions of liability to the recipient, for\n any liability that these contractual assumptions directly impose on\n those licensors and authors.\n\n All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10. If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term. If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n 8. Termination.\n\n You may not propagate or modify a covered work except as expressly\nprovided under this License. Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License. If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n 9. Acceptance Not Required for Having Copies.\n\n You are not required to accept this License in order to receive or\nrun a copy of the Program. Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance. However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work. These actions infringe copyright if you do\nnot accept this License. Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n 10. Automatic Licensing of Downstream Recipients.\n\n Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License. You are not responsible\nfor enforcing compliance by third parties with this License.\n\n An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations. If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License. For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n 11. Patents.\n\n A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based. The\nwork thus licensed is called the contributor's \"contributor version\".\n\n A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version. For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement). To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients. \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License. You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n 12. No Surrender of Others' Freedom.\n\n If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License. If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all. For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n 13. Use with the GNU Affero General Public License.\n\n Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work. The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n 14. Revised Versions of this License.\n\n The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time. Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n Each version is given a distinguishing version number. If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation. If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n Later license versions may give you additional or different\npermissions. However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n 15. Disclaimer of Warranty.\n\n THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n 16. Limitation of Liability.\n\n IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n 17. Interpretation of Sections 15 and 16.\n\n If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n END OF TERMS AND CONDITIONS\n\n How to Apply These Terms to Your New Programs\n\n If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n To do so, attach the following notices to the program. It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n \n Copyright (C) \n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n\nAlso add information on how to contact you by electronic and paper mail.\n\n If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n Copyright (C) \n This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n This is free software, and you are welcome to redistribute it\n under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License. Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n.\n\n The GNU General Public License does not permit incorporating your program\ninto proprietary programs. If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library. If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License. But first, please read\n.\n"}, {"name": "sentencepiece", "version": "0.1.97", "license": "Apache Software License", "text": "\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"}, {"name": "six", "version": "1.16.0", "license": "MIT License", "text": "Copyright (c) 2010-2020 Benjamin Peterson\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "sniffio", "version": "1.3.0", "license": "Apache Software License; MIT License", "text": "This software is made available under the terms of *either* of the\nlicenses found in LICENSE.APACHE2 or LICENSE.MIT. Contributions to are\nmade under the terms of *both* these licenses.\n"}, {"name": "starlette", "version": "0.16.0", "license": "BSD License", "text": "Copyright \u00a9 2018, [Encode OSS Ltd](https://www.encode.io/).\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "threadpoolctl", "version": "3.1.0", "license": "BSD License", "text": "Copyright (c) 2019, threadpoolctl contributors\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."}, {"name": "torch", "version": "1.13.1", "license": "BSD License", "text": "From PyTorch:\n\nCopyright (c) 2016- Facebook, Inc (Adam Paszke)\nCopyright (c) 2014- Facebook, Inc (Soumith Chintala)\nCopyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\nCopyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\nCopyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\nCopyright (c) 2011-2013 NYU (Clement Farabet)\nCopyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\nCopyright (c) 2006 Idiap Research Institute (Samy Bengio)\nCopyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n\nFrom Caffe2:\n\nCopyright (c) 2016-present, Facebook Inc. All rights reserved.\n\nAll contributions by Facebook:\nCopyright (c) 2016 Facebook Inc.\n\nAll contributions by Google:\nCopyright (c) 2015 Google Inc.\nAll rights reserved.\n\nAll contributions by Yangqing Jia:\nCopyright (c) 2015 Yangqing Jia\nAll rights reserved.\n\nAll contributions by Kakao Brain:\nCopyright 2019-2020 Kakao Brain\n\nAll contributions by Cruise LLC:\nCopyright (c) 2022 Cruise LLC.\nAll rights reserved.\n\nAll contributions from Caffe:\nCopyright(c) 2013, 2014, 2015, the respective contributors\nAll rights reserved.\n\nAll other contributions:\nCopyright(c) 2015, 2016 the respective contributors\nAll rights reserved.\n\nCaffe2 uses a copyright model similar to Caffe: each contributor holds\ncopyright over their contributions to Caffe2. The project versioning records\nall such contribution and copyright details. If a contributor wants to further\nmark their specific copyright on a particular contribution, they should\nindicate their copyright solely in the commit message of the change when it is\ncommitted.\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America\n and IDIAP Research Institute nor the names of its contributors may be\n used to endorse or promote products derived from this software without\n specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n\n\nThe Pytorch repository and source distributions bundle several libraries that are \ncompatibly licensed. We list these here.\n\nName: third_party\\FP16\nLicense: MIT\nFiles: third_party\\FP16\n For details, see the files concatenated below: third_party\\FP16\\LICENSE\n\nName: third_party\\FXdiv\nLicense: MIT\nFiles: third_party\\FXdiv\n For details, see the files concatenated below: third_party\\FXdiv\\LICENSE\n\nName: third_party\\NNPACK\nLicense: BSD-2-Clause\nFiles: third_party\\NNPACK\n For details, see the files concatenated below: third_party\\NNPACK\\LICENSE\n\nName: third_party\\QNNPACK\nLicense: BSD-3-Clause\nFiles: third_party\\QNNPACK\n For details, see the files concatenated below: third_party\\QNNPACK\\LICENSE\n\nName: third_party\\QNNPACK\\deps\\clog\nLicense: BSD-2-Clause\nFiles: third_party\\QNNPACK\\deps\\clog\n For details, see the files concatenated below: third_party\\QNNPACK\\deps\\clog\\LICENSE\n\nName: third_party\\VulkanMemoryAllocator\nLicense: MIT\nFiles: third_party\\VulkanMemoryAllocator\n For details, see the files concatenated below: third_party\\VulkanMemoryAllocator\\LICENSE.txt\n\nName: third_party\\XNNPACK\nLicense: BSD-3-Clause\nFiles: third_party\\XNNPACK\n For details, see the files concatenated below: third_party\\XNNPACK\\LICENSE\n\nName: third_party\\benchmark\nLicense: Apache-2.0\nFiles: third_party\\benchmark\n For details, see the files concatenated below: third_party\\benchmark\\LICENSE\n\nName: third_party\\cpuinfo\nLicense: BSD-2-Clause\nFiles: third_party\\cpuinfo\n For details, see the files concatenated below: third_party\\cpuinfo\\LICENSE\n\nName: third_party\\cpuinfo\\deps\\clog\nLicense: BSD-2-Clause\nFiles: third_party\\cpuinfo\\deps\\clog\n For details, see the files concatenated below: third_party\\cpuinfo\\deps\\clog\\LICENSE\n\nName: third_party\\cudnn_frontend\nLicense: MIT\nFiles: third_party\\cudnn_frontend\n For details, see the files concatenated below: third_party\\cudnn_frontend\\LICENSE.txt\n\nName: third_party\\cudnn_frontend\\include\\contrib\\nlohmann\\json\nLicense: MIT\nFiles: third_party\\cudnn_frontend\\include\\contrib\\nlohmann\\json\n For details, see the files concatenated below: third_party\\cudnn_frontend\\include\\contrib\\nlohmann\\json\\LICENSE.txt\n\nName: third_party\\cutlass\nLicense: BSD-3-Clause\nFiles: third_party\\cutlass\n For details, see the files concatenated below: third_party\\cutlass\\LICENSE.txt\n\nName: third_party\\eigen\nLicense: BSD-3-Clause\nFiles: third_party\\eigen\n For details, see the files concatenated below: third_party\\eigen\\COPYING.BSD\n\nName: third_party\\fbgemm\nLicense: BSD-3-Clause\nFiles: third_party\\fbgemm\n For details, see the files concatenated below: third_party\\fbgemm\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\cpuinfo\nLicense: BSD-2-Clause\nFiles: third_party\\fbgemm\\third_party\\cpuinfo\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\cpuinfo\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\cpuinfo\\deps\\clog\nLicense: BSD-2-Clause\nFiles: third_party\\fbgemm\\third_party\\cpuinfo\\deps\\clog\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\cpuinfo\\deps\\clog\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\fbgemm\\third_party\\googletest\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\googletest\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\googletest\\googlemock\nLicense: BSD-3-Clause\nFiles: third_party\\fbgemm\\third_party\\googletest\\googlemock\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\googletest\\googlemock\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\googletest\\googlemock\\scripts\\generator\nLicense: Apache-2.0\nFiles: third_party\\fbgemm\\third_party\\googletest\\googlemock\\scripts\\generator\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\googletest\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\fbgemm\\third_party\\googletest\\googletest\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\googletest\\googletest\\LICENSE\n\nName: third_party\\fbgemm\\third_party\\hipify_torch\nLicense: MIT\nFiles: third_party\\fbgemm\\third_party\\hipify_torch\n For details, see the files concatenated below: third_party\\fbgemm\\third_party\\hipify_torch\\LICENSE.txt\n\nName: third_party\\flatbuffers\nLicense: Apache-2.0\nFiles: third_party\\flatbuffers\n For details, see the files concatenated below: third_party\\flatbuffers\\LICENSE.txt\n\nName: third_party\\flatbuffers\\dart\nLicense: Apache-2.0\nFiles: third_party\\flatbuffers\\dart\n For details, see the files concatenated below: third_party\\flatbuffers\\dart\\LICENSE\n\nName: third_party\\flatbuffers\\swift\nLicense: Apache-2.0\nFiles: third_party\\flatbuffers\\swift\n For details, see the files concatenated below: third_party\\flatbuffers\\swift\\LICENSE\n\nName: third_party\\fmt\nLicense: MIT with exception\nFiles: third_party\\fmt\n For details, see the files concatenated below: third_party\\fmt\\LICENSE.rst\n\nName: third_party\\foxi\nLicense: MIT\nFiles: third_party\\foxi\n For details, see the files concatenated below: third_party\\foxi\\LICENSE\n\nName: third_party\\gemmlowp\\gemmlowp\nLicense: Apache-2.0\nFiles: third_party\\gemmlowp\\gemmlowp\n For details, see the files concatenated below: third_party\\gemmlowp\\gemmlowp\\LICENSE\n\nName: third_party\\gloo\nLicense: BSD-3-Clause\nFiles: third_party\\gloo\n For details, see the files concatenated below: third_party\\gloo\\LICENSE\n\nName: third_party\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\googletest\n For details, see the files concatenated below: third_party\\googletest\\LICENSE\n\nName: third_party\\googletest\\googlemock\\scripts\\generator\nLicense: Apache-2.0\nFiles: third_party\\googletest\\googlemock\\scripts\\generator\n For details, see the files concatenated below: third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n\nName: third_party\\ideep\nLicense: MIT\nFiles: third_party\\ideep\n For details, see the files concatenated below: third_party\\ideep\\LICENSE\n\nName: third_party\\ideep\\mkl-dnn\nLicense: Apache-2.0\nFiles: third_party\\ideep\\mkl-dnn\n For details, see the files concatenated below: third_party\\ideep\\mkl-dnn\\LICENSE\n\nName: third_party\\ideep\\mkl-dnn\\tests\\gtest\nLicense: BSD-3-Clause\nFiles: third_party\\ideep\\mkl-dnn\\tests\\gtest\n For details, see the files concatenated below: third_party\\ideep\\mkl-dnn\\tests\\gtest\\LICENSE\n\nName: third_party\\ideep\\mkl-dnn\\third_party\\oneDNN\nLicense: Apache-2.0\nFiles: third_party\\ideep\\mkl-dnn\\third_party\\oneDNN\n For details, see the files concatenated below: third_party\\ideep\\mkl-dnn\\third_party\\oneDNN\\LICENSE\n\nName: third_party\\ideep\\mkl-dnn\\third_party\\oneDNN\\tests\\gtests\\gtest\nLicense: BSD-3-Clause\nFiles: third_party\\ideep\\mkl-dnn\\third_party\\oneDNN\\tests\\gtests\\gtest\n For details, see the files concatenated below: third_party\\ideep\\mkl-dnn\\third_party\\oneDNN\\tests\\gtests\\gtest\\LICENSE\n\nName: third_party\\ios-cmake\nLicense: BSD-3-Clause\nFiles: third_party\\ios-cmake\n For details, see the files concatenated below: third_party\\ios-cmake\\LICENSE\n\nName: third_party\\kineto\nLicense: BSD-3-Clause\nFiles: third_party\\kineto\n For details, see the files concatenated below: third_party\\kineto\\LICENSE\n\nName: third_party\\kineto\\libkineto\\third_party\\fmt\nLicense: MIT with exception\nFiles: third_party\\kineto\\libkineto\\third_party\\fmt\n For details, see the files concatenated below: third_party\\kineto\\libkineto\\third_party\\fmt\\LICENSE.rst\n\nName: third_party\\kineto\\libkineto\\third_party\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\kineto\\libkineto\\third_party\\googletest\n For details, see the files concatenated below: third_party\\kineto\\libkineto\\third_party\\googletest\\LICENSE\n\nName: third_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\nLicense: BSD-3-Clause\nFiles: third_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\n For details, see the files concatenated below: third_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\\LICENSE\n\nName: third_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\\scripts\\generator\nLicense: Apache-2.0\nFiles: third_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\\scripts\\generator\n For details, see the files concatenated below: third_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n\nName: third_party\\kineto\\libkineto\\third_party\\googletest\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\kineto\\libkineto\\third_party\\googletest\\googletest\n For details, see the files concatenated below: third_party\\kineto\\libkineto\\third_party\\googletest\\googletest\\LICENSE\n\nName: third_party\\kineto\\tb_plugin\nLicense: BSD-3-Clause\nFiles: third_party\\kineto\\tb_plugin\n For details, see the files concatenated below: third_party\\kineto\\tb_plugin\\LICENSE\n\nName: third_party\\miniz-2.1.0\nLicense: MIT\nFiles: third_party\\miniz-2.1.0\n For details, see the files concatenated below: third_party\\miniz-2.1.0\\LICENSE\n\nName: third_party\\nccl\\nccl\nLicense: BSD-3-Clause\nFiles: third_party\\nccl\\nccl\n For details, see the files concatenated below: third_party\\nccl\\nccl\\LICENSE.txt\n\nName: third_party\\neon2sse\nLicense: BSD-Source-Code\nFiles: third_party\\neon2sse\n For details, see the files concatenated below: third_party\\neon2sse\\LICENSE\n\nName: third_party\\nlohmann\\tests\\thirdparty\\doctest\nLicense: MIT\nFiles: third_party\\nlohmann\\tests\\thirdparty\\doctest\n For details, see the files concatenated below: third_party\\nlohmann\\tests\\thirdparty\\doctest\\LICENSE.txt\n\nName: third_party\\nlohmann\\tools\\cpplint\nLicense: BSD-3-Clause\nFiles: third_party\\nlohmann\\tools\\cpplint\n For details, see the files concatenated below: third_party\\nlohmann\\tools\\cpplint\\LICENSE\n\nName: third_party\\onnx\nLicense: Apache-2.0\nFiles: third_party\\onnx\n For details, see the files concatenated below: third_party\\onnx\\LICENSE\n\nName: third_party\\onnx-tensorrt\nLicense: MIT\nFiles: third_party\\onnx-tensorrt\n For details, see the files concatenated below: third_party\\onnx-tensorrt\\LICENSE\n\nName: third_party\\onnx-tensorrt\\third_party\\onnx\nLicense: MIT\nFiles: third_party\\onnx-tensorrt\\third_party\\onnx\n For details, see the files concatenated below: third_party\\onnx-tensorrt\\third_party\\onnx\\LICENSE\n\nName: third_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\benchmark\nLicense: Apache-2.0\nFiles: third_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\benchmark\n For details, see the files concatenated below: third_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\benchmark\\LICENSE\n\nName: third_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\pybind11\nLicense: BSD-3-Clause\nFiles: third_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\pybind11\n For details, see the files concatenated below: third_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\pybind11\\LICENSE\n\nName: third_party\\onnx\\third_party\\benchmark\nLicense: Apache-2.0\nFiles: third_party\\onnx\\third_party\\benchmark\n For details, see the files concatenated below: third_party\\onnx\\third_party\\benchmark\\LICENSE\n\nName: third_party\\onnx\\third_party\\pybind11\nLicense: BSD-3-Clause\nFiles: third_party\\onnx\\third_party\\pybind11\n For details, see the files concatenated below: third_party\\onnx\\third_party\\pybind11\\LICENSE\n\nName: third_party\\protobuf\nLicense: BSD-3-Clause\nFiles: third_party\\protobuf\n For details, see the files concatenated below: third_party\\protobuf\\LICENSE\n\nName: third_party\\protobuf\\third_party\\benchmark\nLicense: Apache-2.0\nFiles: third_party\\protobuf\\third_party\\benchmark\n For details, see the files concatenated below: third_party\\protobuf\\third_party\\benchmark\\LICENSE\n\nName: third_party\\protobuf\\third_party\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\protobuf\\third_party\\googletest\n For details, see the files concatenated below: third_party\\protobuf\\third_party\\googletest\\LICENSE\n\nName: third_party\\protobuf\\third_party\\googletest\\googlemock\nLicense: BSD-3-Clause\nFiles: third_party\\protobuf\\third_party\\googletest\\googlemock\n For details, see the files concatenated below: third_party\\protobuf\\third_party\\googletest\\googlemock\\LICENSE\n\nName: third_party\\protobuf\\third_party\\googletest\\googlemock\\scripts\\generator\nLicense: Apache-2.0\nFiles: third_party\\protobuf\\third_party\\googletest\\googlemock\\scripts\\generator\n For details, see the files concatenated below: third_party\\protobuf\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n\nName: third_party\\protobuf\\third_party\\googletest\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\protobuf\\third_party\\googletest\\googletest\n For details, see the files concatenated below: third_party\\protobuf\\third_party\\googletest\\googletest\\LICENSE\n\nName: third_party\\psimd\nLicense: MIT\nFiles: third_party\\psimd\n For details, see the files concatenated below: third_party\\psimd\\LICENSE\n\nName: third_party\\pthreadpool\nLicense: BSD-2-Clause\nFiles: third_party\\pthreadpool\n For details, see the files concatenated below: third_party\\pthreadpool\\LICENSE\n\nName: third_party\\pybind11\nLicense: BSD-3-Clause\nFiles: third_party\\pybind11\n For details, see the files concatenated below: third_party\\pybind11\\LICENSE\n\nName: third_party\\python-enum\\enum\nLicense: BSD-3-Clause\nFiles: third_party\\python-enum\\enum\n For details, see the files concatenated below: third_party\\python-enum\\enum\\LICENSE\n\nName: third_party\\python-peachpy\nLicense: BSD-2-Clause\nFiles: third_party\\python-peachpy\n For details, see the files concatenated below: third_party\\python-peachpy\\LICENSE.rst\n\nName: third_party\\python-six\nLicense: MIT\nFiles: third_party\\python-six\n For details, see the files concatenated below: third_party\\python-six\\LICENSE\n\nName: third_party\\sleef\nLicense: BSL-1.0\nFiles: third_party\\sleef\n For details, see the files concatenated below: third_party\\sleef\\LICENSE.txt\n\nName: third_party\\tbb\nLicense: Apache-2.0\nFiles: third_party\\tbb\n For details, see the files concatenated below: third_party\\tbb\\LICENSE\n\nName: third_party\\tensorpipe\nLicense: BSD-3-Clause\nFiles: third_party\\tensorpipe\n For details, see the files concatenated below: third_party\\tensorpipe\\LICENSE.txt\n\nName: third_party\\tensorpipe\\third_party\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\tensorpipe\\third_party\\googletest\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\googletest\\LICENSE\n\nName: third_party\\tensorpipe\\third_party\\googletest\\googlemock\nLicense: BSD-3-Clause\nFiles: third_party\\tensorpipe\\third_party\\googletest\\googlemock\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\googletest\\googlemock\\LICENSE\n\nName: third_party\\tensorpipe\\third_party\\googletest\\googlemock\\scripts\\generator\nLicense: Apache-2.0\nFiles: third_party\\tensorpipe\\third_party\\googletest\\googlemock\\scripts\\generator\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n\nName: third_party\\tensorpipe\\third_party\\googletest\\googletest\nLicense: BSD-3-Clause\nFiles: third_party\\tensorpipe\\third_party\\googletest\\googletest\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\googletest\\googletest\\LICENSE\n\nName: third_party\\tensorpipe\\third_party\\libnop\nLicense: Apache-2.0\nFiles: third_party\\tensorpipe\\third_party\\libnop\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\libnop\\LICENSE\n\nName: third_party\\tensorpipe\\third_party\\libuv\nLicense: MIT\nFiles: third_party\\tensorpipe\\third_party\\libuv\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\libuv\\LICENSE\n\nName: third_party\\tensorpipe\\third_party\\pybind11\nLicense: BSD-3-Clause\nFiles: third_party\\tensorpipe\\third_party\\pybind11\n For details, see the files concatenated below: third_party\\tensorpipe\\third_party\\pybind11\\LICENSE\n\nName: third_party\\zstd\nLicense: BSD-3-Clause\nFiles: third_party\\zstd\n For details, see the files concatenated below: third_party\\zstd\\LICENSE\n\nthird_party\\FP16\\LICENSE\n------------------------\nThe MIT License (MIT)\n\nCopyright (c) 2017 Facebook Inc.\nCopyright (c) 2017 Georgia Institute of Technology\nCopyright 2019 Google LLC\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nthird_party\\FXdiv\\LICENSE\n-------------------------\nThe MIT License (MIT)\n\nCopyright (c) 2017 Facebook Inc.\nCopyright (c) 2016-2017 Marat Dukhan\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nthird_party\\NNPACK\\LICENSE\n--------------------------\nCopyright (c) 2017 Facebook Inc.\nCopyright (c) 2015-2017, Georgia Institute of Technology\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\QNNPACK\\LICENSE\n---------------------------\nBSD License\n\nFor QNNPACK software\n\nCopyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\QNNPACK\\deps\\clog\\LICENSE\n-------------------------------------\nCopyright (C) 2018 Marat Dukhan\nCopyright (c) 2017-2018 Facebook Inc.\nCopyright (c) 2017 Georgia Institute of Technology\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\VulkanMemoryAllocator\\LICENSE.txt\n---------------------------------------------\nCopyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\nthird_party\\XNNPACK\\LICENSE\n---------------------------\nBSD License\n\nFor XNNPACK software\n\nCopyright (c) Facebook, Inc. and its affiliates. All rights reserved.\nCopyright 2019 Google LLC\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\benchmark\\LICENSE\n-----------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\cpuinfo\\LICENSE\n---------------------------\nCopyright (c) 2019 Google LLC\nCopyright (c) 2017-2018 Facebook Inc.\nCopyright (C) 2012-2017 Georgia Institute of Technology\nCopyright (C) 2010-2012 Marat Dukhan\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\cpuinfo\\deps\\clog\\LICENSE\n-------------------------------------\nCopyright (C) 2018 Marat Dukhan\nCopyright (c) 2017-2018 Facebook Inc.\nCopyright (c) 2017 Georgia Institute of Technology\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\cudnn_frontend\\LICENSE.txt\n--------------------------------------\n/*\n * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and associated documentation files (the \"Software\"),\n * to deal in the Software without restriction, including without limitation\n * the rights to use, copy, modify, merge, publish, distribute, sublicense,\n * and/or sell copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */ \n\n\nthird_party\\cudnn_frontend\\include\\contrib\\nlohmann\\json\\LICENSE.txt\n--------------------------------------------------------------------\nMIT License \n\nCopyright (c) 2013-2021 Niels Lohmann\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\nthird_party\\cutlass\\LICENSE.txt\n-------------------------------\nCopyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\nSPDX-License-Identifier: BSD-3-Clause\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\nlist of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\nthis list of conditions and the following disclaimer in the documentation\nand/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\eigen\\COPYING.BSD\n-----------------------------\n/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n be used to endorse or promote products derived from this software without\n specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*/\n\n\nthird_party\\fbgemm\\LICENSE\n--------------------------\nBSD License\n\nFor FBGEMM software\n\nCopyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\fbgemm\\third_party\\cpuinfo\\LICENSE\n----------------------------------------------\nCopyright (c) 2019 Google LLC\nCopyright (c) 2017-2018 Facebook Inc.\nCopyright (C) 2012-2017 Georgia Institute of Technology\nCopyright (C) 2010-2012 Marat Dukhan\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\fbgemm\\third_party\\cpuinfo\\deps\\clog\\LICENSE\n--------------------------------------------------------\nCopyright (C) 2018 Marat Dukhan\nCopyright (c) 2017-2018 Facebook Inc.\nCopyright (c) 2017 Georgia Institute of Technology\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\fbgemm\\third_party\\googletest\\LICENSE\n-------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\fbgemm\\third_party\\googletest\\googlemock\\LICENSE\n------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\fbgemm\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n------------------------------------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [2007] Neal Norwitz\n Portions Copyright [2007] Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\fbgemm\\third_party\\googletest\\googletest\\LICENSE\n------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\fbgemm\\third_party\\hipify_torch\\LICENSE.txt\n-------------------------------------------------------\nMIT License\n\nCopyright (c) 2017 AMD Compute Libraries\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\nthird_party\\flatbuffers\\LICENSE.txt\n-----------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\flatbuffers\\dart\\LICENSE\n------------------------------------\nThe code in lib/flat_buffers.dart is based on code that was releases under the \nfollowing license:\n\nCopyright 2012, the Dart project authors. All rights reserved.\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided\n with the distribution.\n * Neither the name of Google Inc. nor the names of its\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nTo the extent permissible, the changes to that code and the other assets in \nthis package are licensed under the Apache2 license:\n\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2014 Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\flatbuffers\\swift\\LICENSE\n-------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\fmt\\LICENSE.rst\n---------------------------\nCopyright (c) 2012 - present, Victor Zverovich\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n--- Optional exception to the license ---\n\nAs an exception, if, as a result of your compiling your source code, portions\nof this Software are embedded into a machine-executable object form of such\nsource code, you may redistribute such embedded portions in such object form\nwithout including the above copyright and permission notices.\n\n\nthird_party\\foxi\\LICENSE\n------------------------\nMIT License\n\nCopyright (c) 2019 Lu Fang\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\nthird_party\\gemmlowp\\gemmlowp\\LICENSE\n-------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\gloo\\LICENSE\n------------------------\nBSD License\n\nFor Gloo software\n\nCopyright (c) 2017-present, Facebook, Inc. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\googletest\\LICENSE\n------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n-----------------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [2007] Neal Norwitz\n Portions Copyright [2007] Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\ideep\\LICENSE\n-------------------------\nCopyright (c) 2018 Intel Corporation.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\nthird_party\\ideep\\mkl-dnn\\LICENSE\n---------------------------------\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"{}\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright 2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\n\nthird_party\\ideep\\mkl-dnn\\tests\\gtest\\LICENSE\n---------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\ideep\\mkl-dnn\\third_party\\oneDNN\\LICENSE\n----------------------------------------------------\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n ============================================================================\n\n Copyright 2016-2021 Intel Corporation\n Copyright 2018 YANDEX LLC\n Copyright 2019-2021 FUJITSU LIMITED\n Copyright 2020 Arm Limited and affiliates\n Copyright 2020 Codeplay Software Limited\n Copyright 2021 Alanna Tempest\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n This distribution includes third party software (\"third party programs\").\n This third party software, even if included with the distribution of\n the Intel software, may be governed by separate license terms, including\n without limitation, third party license terms, other Intel software license\n terms, and open source software license terms. These separate license terms\n govern your use of the third party programs as set forth in the\n \"THIRD-PARTY-PROGRAMS\" file.\n\n\nthird_party\\ideep\\mkl-dnn\\third_party\\oneDNN\\tests\\gtests\\gtest\\LICENSE\n-----------------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\ios-cmake\\LICENSE\n-----------------------------\nCopyright (c) 2011-2014, Andrew Fischer \nCopyright (c) 2016, Bogdan Cristea \nCopyright (c) 2017, Yangqing Jia \n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\nlist of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\nthis list of conditions and the following disclaimer in the documentation\nand/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\nmay be used to endorse or promote products derived from this software without\nspecific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\kineto\\LICENSE\n--------------------------\nBSD License\n\nFor Kineto software\n\nCopyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nAll contributions by Microsoft:\nCopyright (c) Microsoft Corporation. (The Azure AI Platform team)\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\kineto\\libkineto\\third_party\\fmt\\LICENSE.rst\n--------------------------------------------------------\nCopyright (c) 2012 - present, Victor Zverovich\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n--- Optional exception to the license ---\n\nAs an exception, if, as a result of your compiling your source code, portions\nof this Software are embedded into a machine-executable object form of such\nsource code, you may redistribute such embedded portions in such object form\nwithout including the above copyright and permission notices.\n\n\nthird_party\\kineto\\libkineto\\third_party\\googletest\\LICENSE\n-----------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\\LICENSE\n----------------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\kineto\\libkineto\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n----------------------------------------------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [2007] Neal Norwitz\n Portions Copyright [2007] Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\kineto\\libkineto\\third_party\\googletest\\googletest\\LICENSE\n----------------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\kineto\\tb_plugin\\LICENSE\n------------------------------------\nBSD License\n\nFor Kineto software\n\nCopyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nAll contributions by Microsoft:\nCopyright (c) Microsoft Corporation. (The Azure AI Platform team)\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\miniz-2.1.0\\LICENSE\n-------------------------------\nCopyright 2013-2014 RAD Game Tools and Valve Software\nCopyright 2010-2014 Rich Geldreich and Tenacious Software LLC\n\nAll Rights Reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\nthird_party\\nccl\\nccl\\LICENSE.txt\n---------------------------------\n\n Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions\n are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of NVIDIA CORPORATION, Lawrence Berkeley National\n Laboratory, the U.S. Department of Energy, nor the names of their\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n The U.S. Department of Energy funded the development of this software\n under subcontract 7078610 with Lawrence Berkeley National Laboratory.\n\n\nThis code also includes files from the NVIDIA Tools Extension SDK project.\n\nSee:\n\n https://github.com/NVIDIA/NVTX\n\nfor more information and license details.\n\n\nthird_party\\neon2sse\\LICENSE\n----------------------------\ncreated by Victoria Zhislina, the Senior Application Engineer, Intel Corporation, victoria.zhislina@intel.com\n\n*** Copyright (C) 2012-2016 Intel Corporation. All rights reserved.\n\nIMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\n\nBy downloading, copying, installing or using the software you agree to this license.\nIf you do not agree to this license, do not download, install, copy or use the software.\n\n License Agreement\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n * The name of the copyright holders may not be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nThis software is provided by the copyright holders and contributors \"as is\" and\nany express or implied warranties, including, but not limited to, the implied\nwarranties of merchantability and fitness for a particular purpose are disclaimed.\nIn no event shall the Intel Corporation or contributors be liable for any direct,\nindirect, incidental, special, exemplary, or consequential damages\n(including, but not limited to, procurement of substitute goods or services;\nloss of use, data, or profits; or business interruption) however caused\nand on any theory of liability, whether in contract, strict liability,\nor tort (including negligence or otherwise) arising in any way out of\nthe use of this software, even if advised of the possibility of such damage.\n\n\nthird_party\\nlohmann\\tests\\thirdparty\\doctest\\LICENSE.txt\n---------------------------------------------------------\nThe MIT License (MIT)\n\nCopyright (c) 2016-2021 Viktor Kirilov\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\nthird_party\\nlohmann\\tools\\cpplint\\LICENSE\n------------------------------------------\ncpplint.py and its corresponding unit tests are Copyright (C) 2009 Google Inc.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\onnx\\LICENSE\n------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\onnx-tensorrt\\LICENSE\n---------------------------------\nMIT License\n\nCopyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\nCopyright (c) 2018 Open Neural Network Exchange\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\nthird_party\\onnx-tensorrt\\third_party\\onnx\\LICENSE\n--------------------------------------------------\nOpen Neural Network Exchange\n\nCopyright (c) Facebook, Inc. and Microsoft Corporation.\nAll rights reserved. \n\nMIT License\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"\"Software\"\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nthird_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\benchmark\\LICENSE\n------------------------------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\onnx-tensorrt\\third_party\\onnx\\third_party\\pybind11\\LICENSE\n-----------------------------------------------------------------------\nCopyright (c) 2016 Wenzel Jakob , All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nYou are under no obligation whatsoever to provide any bug fixes, patches, or\nupgrades to the features, functionality or performance of the source code\n(\"Enhancements\") to anyone; however, if you choose to make your Enhancements\navailable either publicly, or directly to the author of this software, without\nimposing a separate written license agreement for such Enhancements, then you\nhereby grant the following license: a non-exclusive, royalty-free perpetual\nlicense to install, use, modify, prepare derivative works, incorporate into\nother computer software, distribute, and sublicense such enhancements or\nderivative works thereof, in binary and source code form.\n\n\nthird_party\\onnx\\third_party\\benchmark\\LICENSE\n----------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\onnx\\third_party\\pybind11\\LICENSE\n---------------------------------------------\nCopyright (c) 2016 Wenzel Jakob , All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nPlease also refer to the file .github/CONTRIBUTING.md, which clarifies licensing of\nexternal contributions to this project including patches, pull requests, etc.\n\n\nthird_party\\protobuf\\LICENSE\n----------------------------\nCopyright 2008 Google Inc. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nCode generated by the Protocol Buffer compiler is owned by the owner\nof the input file used when generating it. This code is not\nstandalone and requires a support library to be linked with it. This\nsupport library is itself covered by the above license.\n\n\nthird_party\\protobuf\\third_party\\benchmark\\LICENSE\n--------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\protobuf\\third_party\\googletest\\LICENSE\n---------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\protobuf\\third_party\\googletest\\googlemock\\LICENSE\n--------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\protobuf\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n--------------------------------------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [2007] Neal Norwitz\n Portions Copyright [2007] Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\protobuf\\third_party\\googletest\\googletest\\LICENSE\n--------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\psimd\\LICENSE\n-------------------------\nThe MIT License (MIT)\n\nCopyright (c) 2017 Facebook Inc.\nCopyright (c) 2014-2017 Georgia Institute of Technology\nCopyright 2019 Google LLC\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nthird_party\\pthreadpool\\LICENSE\n-------------------------------\nCopyright 2019 Google LLC\nCopyright (c) 2017 Facebook Inc.\nCopyright (c) 2015-2017 Georgia Institute of Technology\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\n\nthird_party\\pybind11\\LICENSE\n----------------------------\nCopyright (c) 2016 Wenzel Jakob , All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nPlease also refer to the file .github/CONTRIBUTING.md, which clarifies licensing of\nexternal contributions to this project including patches, pull requests, etc.\n\n\nthird_party\\python-enum\\enum\\LICENSE\n------------------------------------\nCopyright (c) 2013, Ethan Furman.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n Redistributions of source code must retain the above\n copyright notice, this list of conditions and the\n following disclaimer.\n\n Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials\n provided with the distribution.\n\n Neither the name Ethan Furman nor the names of any\n contributors may be used to endorse or promote products\n derived from this software without specific prior written\n permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\python-peachpy\\LICENSE.rst\n--------------------------------------\n==============================\nPeachPy license (2-clause BSD)\n==============================\n\nCopyright (c) 2017, Facebook Inc.\nCopyright (c) 2013-2017, Georgia Institute of Technology\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\python-six\\LICENSE\n------------------------------\nCopyright (c) 2010-2017 Benjamin Peterson\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\nthird_party\\sleef\\LICENSE.txt\n-----------------------------\nBoost Software License - Version 1.0 - August 17th, 2003\n\nPermission is hereby granted, free of charge, to any person or organization\nobtaining a copy of the software and accompanying documentation covered by\nthis license (the \"Software\") to use, reproduce, display, distribute,\nexecute, and transmit the Software, and to prepare derivative works of the\nSoftware, and to permit third-parties to whom the Software is furnished to\ndo so, all subject to the following:\n\nThe copyright notices in the Software and this entire statement, including\nthe above license grant, this restriction and the following disclaimer,\nmust be included in all copies of the Software, in whole or in part, and\nall derivative works of the Software, unless such copies or derivative\nworks are solely in the form of machine-executable object code generated by\na source language processor.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT\nSHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE\nFOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\n\nthird_party\\tbb\\LICENSE\n-----------------------\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\tensorpipe\\LICENSE.txt\n----------------------------------\nBSD License\n\nFor TensorPipe software\n\nCopyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Meta nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\tensorpipe\\third_party\\googletest\\LICENSE\n-----------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\tensorpipe\\third_party\\googletest\\googlemock\\LICENSE\n----------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\tensorpipe\\third_party\\googletest\\googlemock\\scripts\\generator\\LICENSE\n----------------------------------------------------------------------------------\n\n Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n 1. Definitions.\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n END OF TERMS AND CONDITIONS\n\n APPENDIX: How to apply the Apache License to your work.\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n Copyright [2007] Neal Norwitz\n Portions Copyright [2007] Google Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\nthird_party\\tensorpipe\\third_party\\googletest\\googletest\\LICENSE\n----------------------------------------------------------------\nCopyright 2008, Google Inc.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nthird_party\\tensorpipe\\third_party\\libnop\\LICENSE\n-------------------------------------------------\nCopyright 2017 The Native Object Protocols Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\nthird_party\\tensorpipe\\third_party\\libuv\\LICENSE\n------------------------------------------------\nlibuv is licensed for use as follows:\n\n====\nCopyright (c) 2015-present libuv project contributors.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n====\n\nThis license applies to parts of libuv originating from the\nhttps://github.com/joyent/libuv repository:\n\n====\n\nCopyright Joyent, Inc. and other Node contributors. All rights reserved.\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n\n====\n\nThis license applies to all parts of libuv that are not externally\nmaintained libraries.\n\nThe externally maintained libraries used by libuv are:\n\n - tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license.\n\n - inet_pton and inet_ntop implementations, contained in src/inet.c, are\n copyright the Internet Systems Consortium, Inc., and licensed under the ISC\n license.\n\n - stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three\n clause BSD license.\n\n - pthread-fixes.c, copyright Google Inc. and Sony Mobile Communications AB.\n Three clause BSD license.\n\n - android-ifaddrs.h, android-ifaddrs.c, copyright Berkeley Software Design\n Inc, Kenneth MacKay and Emergya (Cloud4all, FP7/2007-2013, grant agreement\n n\u00b0 289016). Three clause BSD license.\n\n\nthird_party\\tensorpipe\\third_party\\pybind11\\LICENSE\n---------------------------------------------------\nCopyright (c) 2016 Wenzel Jakob , All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nPlease also refer to the file CONTRIBUTING.md, which clarifies licensing of\nexternal contributions to this project including patches, pull requests, etc.\n\n\nthird_party\\zstd\\LICENSE\n------------------------\nBSD License\n\nFor Zstandard software\n\nCopyright (c) 2016-present, Facebook, Inc. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n * Neither the name Facebook nor the names of its contributors may be used to\n endorse or promote products derived from this software without specific\n prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "torch-complex", "version": "0.4.3", "license": "Apache Software License", "text": "Copyright 2021 Naoyuki Kamo\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License."}, {"name": "tqdm", "version": "4.64.1", "license": "MIT License; Mozilla Public License 2.0 (MPL 2.0)", "text": "`tqdm` is a product of collaborative work.\nUnless otherwise stated, all authors (see commit logs) retain copyright\nfor their respective work, and release the work under the MIT licence\n(text below).\n\nExceptions or notable authors are listed below\nin reverse chronological order:\n\n* files: *\n MPLv2.0 2015-2021 (c) Casper da Costa-Luis\n [casperdcl](https://github.com/casperdcl).\n* files: tqdm/_tqdm.py\n MIT 2016 (c) [PR #96] on behalf of Google Inc.\n* files: tqdm/_tqdm.py setup.py README.rst MANIFEST.in .gitignore\n MIT 2013 (c) Noam Yorav-Raphael, original author.\n\n[PR #96]: https://github.com/tqdm/tqdm/pull/96\n\n\nMozilla Public Licence (MPL) v. 2.0 - Exhibit A\n-----------------------------------------------\n\nThis Source Code Form is subject to the terms of the\nMozilla Public License, v. 2.0.\nIf a copy of the MPL was not distributed with this project,\nYou can obtain one at https://mozilla.org/MPL/2.0/.\n\n\nMIT License (MIT)\n-----------------\n\nCopyright (c) 2013 noamraph\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"}, {"name": "typeguard", "version": "2.13.3", "license": "MIT License", "text": "This is the MIT license: http://www.opensource.org/licenses/mit-license.php\n\nCopyright (c) Alex Gr\u00f6nholm\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this\nsoftware and associated documentation files (the \"Software\"), to deal in the Software\nwithout restriction, including without limitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of the Software, and to permit persons\nto whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or\nsubstantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\nPURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE\nFOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\nOTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n"}, {"name": "typing-extensions", "version": "4.4.0", "license": "Python Software Foundation License", "text": "A. HISTORY OF THE SOFTWARE\n==========================\n\nPython was created in the early 1990s by Guido van Rossum at Stichting\nMathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands\nas a successor of a language called ABC. Guido remains Python's\nprincipal author, although it includes many contributions from others.\n\nIn 1995, Guido continued his work on Python at the Corporation for\nNational Research Initiatives (CNRI, see http://www.cnri.reston.va.us)\nin Reston, Virginia where he released several versions of the\nsoftware.\n\nIn May 2000, Guido and the Python core development team moved to\nBeOpen.com to form the BeOpen PythonLabs team. In October of the same\nyear, the PythonLabs team moved to Digital Creations, which became\nZope Corporation. In 2001, the Python Software Foundation (PSF, see\nhttps://www.python.org/psf/) was formed, a non-profit organization\ncreated specifically to own Python-related Intellectual Property.\nZope Corporation was a sponsoring member of the PSF.\n\nAll Python releases are Open Source (see http://www.opensource.org for\nthe Open Source Definition). Historically, most, but not all, Python\nreleases have also been GPL-compatible; the table below summarizes\nthe various releases.\n\n Release Derived Year Owner GPL-\n from compatible? (1)\n\n 0.9.0 thru 1.2 1991-1995 CWI yes\n 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes\n 1.6 1.5.2 2000 CNRI no\n 2.0 1.6 2000 BeOpen.com no\n 1.6.1 1.6 2001 CNRI yes (2)\n 2.1 2.0+1.6.1 2001 PSF no\n 2.0.1 2.0+1.6.1 2001 PSF yes\n 2.1.1 2.1+2.0.1 2001 PSF yes\n 2.1.2 2.1.1 2002 PSF yes\n 2.1.3 2.1.2 2002 PSF yes\n 2.2 and above 2.1.1 2001-now PSF yes\n\nFootnotes:\n\n(1) GPL-compatible doesn't mean that we're distributing Python under\n the GPL. All Python licenses, unlike the GPL, let you distribute\n a modified version without making your changes open source. The\n GPL-compatible licenses make it possible to combine Python with\n other software that is released under the GPL; the others don't.\n\n(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,\n because its license has a choice of law clause. According to\n CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1\n is \"not incompatible\" with the GPL.\n\nThanks to the many outside volunteers who have worked under Guido's\ndirection to make these releases possible.\n\n\nB. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON\n===============================================================\n\nPYTHON SOFTWARE FOUNDATION LICENSE VERSION 2\n--------------------------------------------\n\n1. This LICENSE AGREEMENT is between the Python Software Foundation\n(\"PSF\"), and the Individual or Organization (\"Licensee\") accessing and\notherwise using this software (\"Python\") in source or binary form and\nits associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, PSF hereby\ngrants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,\nanalyze, test, perform and/or display publicly, prepare derivative works,\ndistribute, and otherwise use Python alone or in any derivative version,\nprovided, however, that PSF's License Agreement and PSF's notice of copyright,\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,\n2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Python Software Foundation;\nAll Rights Reserved\" are retained in Python alone or in any derivative version\nprepared by Licensee.\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python.\n\n4. PSF is making Python available to Licensee on an \"AS IS\"\nbasis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\nFOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. Nothing in this License Agreement shall be deemed to create any\nrelationship of agency, partnership, or joint venture between PSF and\nLicensee. This License Agreement does not grant permission to use PSF\ntrademarks or trade name in a trademark sense to endorse or promote\nproducts or services of Licensee, or any third party.\n\n8. By copying, installing or otherwise using Python, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nBEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0\n-------------------------------------------\n\nBEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1\n\n1. This LICENSE AGREEMENT is between BeOpen.com (\"BeOpen\"), having an\noffice at 160 Saratoga Avenue, Santa Clara, CA 95051, and the\nIndividual or Organization (\"Licensee\") accessing and otherwise using\nthis software in source or binary form and its associated\ndocumentation (\"the Software\").\n\n2. Subject to the terms and conditions of this BeOpen Python License\nAgreement, BeOpen hereby grants Licensee a non-exclusive,\nroyalty-free, world-wide license to reproduce, analyze, test, perform\nand/or display publicly, prepare derivative works, distribute, and\notherwise use the Software alone or in any derivative version,\nprovided, however, that the BeOpen Python License is retained in the\nSoftware, alone or in any derivative version prepared by Licensee.\n\n3. BeOpen is making the Software available to Licensee on an \"AS IS\"\nbasis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE\nSOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS\nAS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY\nDERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n5. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n6. This License Agreement shall be governed by and interpreted in all\nrespects by the law of the State of California, excluding conflict of\nlaw provisions. Nothing in this License Agreement shall be deemed to\ncreate any relationship of agency, partnership, or joint venture\nbetween BeOpen and Licensee. This License Agreement does not grant\npermission to use BeOpen trademarks or trade names in a trademark\nsense to endorse or promote products or services of Licensee, or any\nthird party. As an exception, the \"BeOpen Python\" logos available at\nhttp://www.pythonlabs.com/logos.html may be used according to the\npermissions granted on that web page.\n\n7. By copying, installing or otherwise using the software, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nCNRI LICENSE AGREEMENT FOR PYTHON 1.6.1\n---------------------------------------\n\n1. This LICENSE AGREEMENT is between the Corporation for National\nResearch Initiatives, having an office at 1895 Preston White Drive,\nReston, VA 20191 (\"CNRI\"), and the Individual or Organization\n(\"Licensee\") accessing and otherwise using Python 1.6.1 software in\nsource or binary form and its associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, CNRI\nhereby grants Licensee a nonexclusive, royalty-free, world-wide\nlicense to reproduce, analyze, test, perform and/or display publicly,\nprepare derivative works, distribute, and otherwise use Python 1.6.1\nalone or in any derivative version, provided, however, that CNRI's\nLicense Agreement and CNRI's notice of copyright, i.e., \"Copyright (c)\n1995-2001 Corporation for National Research Initiatives; All Rights\nReserved\" are retained in Python 1.6.1 alone or in any derivative\nversion prepared by Licensee. Alternately, in lieu of CNRI's License\nAgreement, Licensee may substitute the following text (omitting the\nquotes): \"Python 1.6.1 is made available subject to the terms and\nconditions in CNRI's License Agreement. This Agreement together with\nPython 1.6.1 may be located on the internet using the following\nunique, persistent identifier (known as a handle): 1895.22/1013. This\nAgreement may also be obtained from a proxy server on the internet\nusing the following URL: http://hdl.handle.net/1895.22/1013\".\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python 1.6.1 or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python 1.6.1.\n\n4. CNRI is making Python 1.6.1 available to Licensee on an \"AS IS\"\nbasis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\n1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. This License Agreement shall be governed by the federal\nintellectual property law of the United States, including without\nlimitation the federal copyright law, and, to the extent such\nU.S. federal law does not apply, by the law of the Commonwealth of\nVirginia, excluding Virginia's conflict of law provisions.\nNotwithstanding the foregoing, with regard to derivative works based\non Python 1.6.1 that incorporate non-separable material that was\npreviously distributed under the GNU General Public License (GPL), the\nlaw of the Commonwealth of Virginia shall govern this License\nAgreement only as to issues arising under or with respect to\nParagraphs 4, 5, and 7 of this License Agreement. Nothing in this\nLicense Agreement shall be deemed to create any relationship of\nagency, partnership, or joint venture between CNRI and Licensee. This\nLicense Agreement does not grant permission to use CNRI trademarks or\ntrade name in a trademark sense to endorse or promote products or\nservices of Licensee, or any third party.\n\n8. By clicking on the \"ACCEPT\" button where indicated, or by copying,\ninstalling or otherwise using Python 1.6.1, Licensee agrees to be\nbound by the terms and conditions of this License Agreement.\n\n ACCEPT\n\n\nCWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2\n--------------------------------------------------\n\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,\nThe Netherlands. All rights reserved.\n\nPermission to use, copy, modify, and distribute this software and its\ndocumentation for any purpose and without fee is hereby granted,\nprovided that the above copyright notice appear in all copies and that\nboth that copyright notice and this permission notice appear in\nsupporting documentation, and that the name of Stichting Mathematisch\nCentrum or CWI not be used in advertising or publicity pertaining to\ndistribution of the software without specific, written prior\npermission.\n\nSTICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO\nTHIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE\nFOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\nOF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n"}, {"name": "urllib3", "version": "1.26.13", "license": "MIT License", "text": "MIT License\n\nCopyright (c) 2008-2020 Andrey Petrov and contributors (see CONTRIBUTORS.txt)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"}, {"name": "uvicorn", "version": "0.15.0", "license": "BSD License", "text": "Copyright \u00a9 2017-present, [Encode OSS Ltd](http://www.encode.io/).\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "zipp", "version": "3.11.0", "license": "MIT License", "text": "Copyright Jason R. Coombs\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to\ndeal in the Software without restriction, including without limitation the\nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or\nsell copies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\nIN THE SOFTWARE.\n"}, {"name": "OpenBLAS", "version": null, "license": "BSD 3-clause license", "text": "Copyright (c) 2011-2014, The OpenBLAS Project\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n 1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in\n the documentation and/or other materials provided with the\n distribution.\n 3. Neither the name of the OpenBLAS project nor the names of \n its contributors may be used to endorse or promote products \n derived from this software without specific prior written \n permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\nUSE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "libsndfile-binaries", "version": "1.0.28", "license": "LGPL-2.1 license", "text": " GNU LESSER GENERAL PUBLIC LICENSE\n Version 2.1, February 1999\n\n Copyright (C) 1991, 1999 Free Software Foundation, Inc.\n 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n[This is the first released version of the Lesser GPL. It also counts\n as the successor of the GNU Library Public License, version 2, hence\n the version number 2.1.]\n\n Preamble\n\n The licenses for most software are designed to take away your\nfreedom to share and change it. By contrast, the GNU General Public\nLicenses are intended to guarantee your freedom to share and change\nfree software--to make sure the software is free for all its users.\n\n This license, the Lesser General Public License, applies to some\nspecially designated software packages--typically libraries--of the\nFree Software Foundation and other authors who decide to use it. You\ncan use it too, but we suggest you first think carefully about whether\nthis license or the ordinary General Public License is the better\nstrategy to use in any particular case, based on the explanations below.\n\n When we speak of free software, we are referring to freedom of use,\nnot price. Our General Public Licenses are designed to make sure that\nyou have the freedom to distribute copies of free software (and charge\nfor this service if you wish); that you receive source code or can get\nit if you want it; that you can change the software and use pieces of\nit in new free programs; and that you are informed that you can do\nthese things.\n\n To protect your rights, we need to make restrictions that forbid\ndistributors to deny you these rights or to ask you to surrender these\nrights. These restrictions translate to certain responsibilities for\nyou if you distribute copies of the library or if you modify it.\n\n For example, if you distribute copies of the library, whether gratis\nor for a fee, you must give the recipients all the rights that we gave\nyou. You must make sure that they, too, receive or can get the source\ncode. If you link other code with the library, you must provide\ncomplete object files to the recipients, so that they can relink them\nwith the library after making changes to the library and recompiling\nit. And you must show them these terms so they know their rights.\n\n We protect your rights with a two-step method: (1) we copyright the\nlibrary, and (2) we offer you this license, which gives you legal\npermission to copy, distribute and/or modify the library.\n\n To protect each distributor, we want to make it very clear that\nthere is no warranty for the free library. Also, if the library is\nmodified by someone else and passed on, the recipients should know\nthat what they have is not the original version, so that the original\nauthor's reputation will not be affected by problems that might be\nintroduced by others.\n\n Finally, software patents pose a constant threat to the existence of\nany free program. We wish to make sure that a company cannot\neffectively restrict the users of a free program by obtaining a\nrestrictive license from a patent holder. Therefore, we insist that\nany patent license obtained for a version of the library must be\nconsistent with the full freedom of use specified in this license.\n\n Most GNU software, including some libraries, is covered by the\nordinary GNU General Public License. This license, the GNU Lesser\nGeneral Public License, applies to certain designated libraries, and\nis quite different from the ordinary General Public License. We use\nthis license for certain libraries in order to permit linking those\nlibraries into non-free programs.\n\n When a program is linked with a library, whether statically or using\na shared library, the combination of the two is legally speaking a\ncombined work, a derivative of the original library. The ordinary\nGeneral Public License therefore permits such linking only if the\nentire combination fits its criteria of freedom. The Lesser General\nPublic License permits more lax criteria for linking other code with\nthe library.\n\n We call this license the \"Lesser\" General Public License because it\ndoes Less to protect the user's freedom than the ordinary General\nPublic License. It also provides other free software developers Less\nof an advantage over competing non-free programs. These disadvantages\nare the reason we use the ordinary General Public License for many\nlibraries. However, the Lesser license provides advantages in certain\nspecial circumstances.\n\n For example, on rare occasions, there may be a special need to\nencourage the widest possible use of a certain library, so that it becomes\na de-facto standard. To achieve this, non-free programs must be\nallowed to use the library. A more frequent case is that a free\nlibrary does the same job as widely used non-free libraries. In this\ncase, there is little to gain by limiting the free library to free\nsoftware only, so we use the Lesser General Public License.\n\n In other cases, permission to use a particular library in non-free\nprograms enables a greater number of people to use a large body of\nfree software. For example, permission to use the GNU C Library in\nnon-free programs enables many more people to use the whole GNU\noperating system, as well as its variant, the GNU/Linux operating\nsystem.\n\n Although the Lesser General Public License is Less protective of the\nusers' freedom, it does ensure that the user of a program that is\nlinked with the Library has the freedom and the wherewithal to run\nthat program using a modified version of the Library.\n\n The precise terms and conditions for copying, distribution and\nmodification follow. Pay close attention to the difference between a\n\"work based on the library\" and a \"work that uses the library\". The\nformer contains code derived from the library, whereas the latter must\nbe combined with the library in order to run.\n\n GNU LESSER GENERAL PUBLIC LICENSE\n TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n 0. This License Agreement applies to any software library or other\nprogram which contains a notice placed by the copyright holder or\nother authorized party saying it may be distributed under the terms of\nthis Lesser General Public License (also called \"this License\").\nEach licensee is addressed as \"you\".\n\n A \"library\" means a collection of software functions and/or data\nprepared so as to be conveniently linked with application programs\n(which use some of those functions and data) to form executables.\n\n The \"Library\", below, refers to any such software library or work\nwhich has been distributed under these terms. A \"work based on the\nLibrary\" means either the Library or any derivative work under\ncopyright law: that is to say, a work containing the Library or a\nportion of it, either verbatim or with modifications and/or translated\nstraightforwardly into another language. (Hereinafter, translation is\nincluded without limitation in the term \"modification\".)\n\n \"Source code\" for a work means the preferred form of the work for\nmaking modifications to it. For a library, complete source code means\nall the source code for all modules it contains, plus any associated\ninterface definition files, plus the scripts used to control compilation\nand installation of the library.\n\n Activities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope. The act of\nrunning a program using the Library is not restricted, and output from\nsuch a program is covered only if its contents constitute a work based\non the Library (independent of the use of the Library in a tool for\nwriting it). Whether that is true depends on what the Library does\nand what the program that uses the Library does.\n \n 1. You may copy and distribute verbatim copies of the Library's\ncomplete source code as you receive it, in any medium, provided that\nyou conspicuously and appropriately publish on each copy an\nappropriate copyright notice and disclaimer of warranty; keep intact\nall the notices that refer to this License and to the absence of any\nwarranty; and distribute a copy of this License along with the\nLibrary.\n\n You may charge a fee for the physical act of transferring a copy,\nand you may at your option offer warranty protection in exchange for a\nfee.\n\n 2. You may modify your copy or copies of the Library or any portion\nof it, thus forming a work based on the Library, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n a) The modified work must itself be a software library.\n\n b) You must cause the files modified to carry prominent notices\n stating that you changed the files and the date of any change.\n\n c) You must cause the whole of the work to be licensed at no\n charge to all third parties under the terms of this License.\n\n d) If a facility in the modified Library refers to a function or a\n table of data to be supplied by an application program that uses\n the facility, other than as an argument passed when the facility\n is invoked, then you must make a good faith effort to ensure that,\n in the event an application does not supply such function or\n table, the facility still operates, and performs whatever part of\n its purpose remains meaningful.\n\n (For example, a function in a library to compute square roots has\n a purpose that is entirely well-defined independent of the\n application. Therefore, Subsection 2d requires that any\n application-supplied function or table used by this function must\n be optional: if the application does not supply it, the square\n root function must still compute square roots.)\n\nThese requirements apply to the modified work as a whole. If\nidentifiable sections of that work are not derived from the Library,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works. But when you\ndistribute the same sections as part of a whole which is a work based\non the Library, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote\nit.\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Library.\n\nIn addition, mere aggregation of another work not based on the Library\nwith the Library (or with a work based on the Library) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n 3. You may opt to apply the terms of the ordinary GNU General Public\nLicense instead of this License to a given copy of the Library. To do\nthis, you must alter all the notices that refer to this License, so\nthat they refer to the ordinary GNU General Public License, version 2,\ninstead of to this License. (If a newer version than version 2 of the\nordinary GNU General Public License has appeared, then you can specify\nthat version instead if you wish.) Do not make any other change in\nthese notices.\n\n Once this change is made in a given copy, it is irreversible for\nthat copy, so the ordinary GNU General Public License applies to all\nsubsequent copies and derivative works made from that copy.\n\n This option is useful when you wish to copy part of the code of\nthe Library into a program that is not a library.\n\n 4. You may copy and distribute the Library (or a portion or\nderivative of it, under Section 2) in object code or executable form\nunder the terms of Sections 1 and 2 above provided that you accompany\nit with the complete corresponding machine-readable source code, which\nmust be distributed under the terms of Sections 1 and 2 above on a\nmedium customarily used for software interchange.\n\n If distribution of object code is made by offering access to copy\nfrom a designated place, then offering equivalent access to copy the\nsource code from the same place satisfies the requirement to\ndistribute the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n 5. A program that contains no derivative of any portion of the\nLibrary, but is designed to work with the Library by being compiled or\nlinked with it, is called a \"work that uses the Library\". Such a\nwork, in isolation, is not a derivative work of the Library, and\ntherefore falls outside the scope of this License.\n\n However, linking a \"work that uses the Library\" with the Library\ncreates an executable that is a derivative of the Library (because it\ncontains portions of the Library), rather than a \"work that uses the\nlibrary\". The executable is therefore covered by this License.\nSection 6 states terms for distribution of such executables.\n\n When a \"work that uses the Library\" uses material from a header file\nthat is part of the Library, the object code for the work may be a\nderivative work of the Library even though the source code is not.\nWhether this is true is especially significant if the work can be\nlinked without the Library, or if the work is itself a library. The\nthreshold for this to be true is not precisely defined by law.\n\n If such an object file uses only numerical parameters, data\nstructure layouts and accessors, and small macros and small inline\nfunctions (ten lines or less in length), then the use of the object\nfile is unrestricted, regardless of whether it is legally a derivative\nwork. (Executables containing this object code plus portions of the\nLibrary will still fall under Section 6.)\n\n Otherwise, if the work is a derivative of the Library, you may\ndistribute the object code for the work under the terms of Section 6.\nAny executables containing that work also fall under Section 6,\nwhether or not they are linked directly with the Library itself.\n\n 6. As an exception to the Sections above, you may also combine or\nlink a \"work that uses the Library\" with the Library to produce a\nwork containing portions of the Library, and distribute that work\nunder terms of your choice, provided that the terms permit\nmodification of the work for the customer's own use and reverse\nengineering for debugging such modifications.\n\n You must give prominent notice with each copy of the work that the\nLibrary is used in it and that the Library and its use are covered by\nthis License. You must supply a copy of this License. If the work\nduring execution displays copyright notices, you must include the\ncopyright notice for the Library among them, as well as a reference\ndirecting the user to the copy of this License. Also, you must do one\nof these things:\n\n a) Accompany the work with the complete corresponding\n machine-readable source code for the Library including whatever\n changes were used in the work (which must be distributed under\n Sections 1 and 2 above); and, if the work is an executable linked\n with the Library, with the complete machine-readable \"work that\n uses the Library\", as object code and/or source code, so that the\n user can modify the Library and then relink to produce a modified\n executable containing the modified Library. (It is understood\n that the user who changes the contents of definitions files in the\n Library will not necessarily be able to recompile the application\n to use the modified definitions.)\n\n b) Use a suitable shared library mechanism for linking with the\n Library. A suitable mechanism is one that (1) uses at run time a\n copy of the library already present on the user's computer system,\n rather than copying library functions into the executable, and (2)\n will operate properly with a modified version of the library, if\n the user installs one, as long as the modified version is\n interface-compatible with the version that the work was made with.\n\n c) Accompany the work with a written offer, valid for at\n least three years, to give the same user the materials\n specified in Subsection 6a, above, for a charge no more\n than the cost of performing this distribution.\n\n d) If distribution of the work is made by offering access to copy\n from a designated place, offer equivalent access to copy the above\n specified materials from the same place.\n\n e) Verify that the user has already received a copy of these\n materials or that you have already sent this user a copy.\n\n For an executable, the required form of the \"work that uses the\nLibrary\" must include any data and utility programs needed for\nreproducing the executable from it. However, as a special exception,\nthe materials to be distributed need not include anything that is\nnormally distributed (in either source or binary form) with the major\ncomponents (compiler, kernel, and so on) of the operating system on\nwhich the executable runs, unless that component itself accompanies\nthe executable.\n\n It may happen that this requirement contradicts the license\nrestrictions of other proprietary libraries that do not normally\naccompany the operating system. Such a contradiction means you cannot\nuse both them and the Library together in an executable that you\ndistribute.\n\n 7. You may place library facilities that are a work based on the\nLibrary side-by-side in a single library together with other library\nfacilities not covered by this License, and distribute such a combined\nlibrary, provided that the separate distribution of the work based on\nthe Library and of the other library facilities is otherwise\npermitted, and provided that you do these two things:\n\n a) Accompany the combined library with a copy of the same work\n based on the Library, uncombined with any other library\n facilities. This must be distributed under the terms of the\n Sections above.\n\n b) Give prominent notice with the combined library of the fact\n that part of it is a work based on the Library, and explaining\n where to find the accompanying uncombined form of the same work.\n\n 8. You may not copy, modify, sublicense, link with, or distribute\nthe Library except as expressly provided under this License. Any\nattempt otherwise to copy, modify, sublicense, link with, or\ndistribute the Library is void, and will automatically terminate your\nrights under this License. However, parties who have received copies,\nor rights, from you under this License will not have their licenses\nterminated so long as such parties remain in full compliance.\n\n 9. You are not required to accept this License, since you have not\nsigned it. However, nothing else grants you permission to modify or\ndistribute the Library or its derivative works. These actions are\nprohibited by law if you do not accept this License. Therefore, by\nmodifying or distributing the Library (or any work based on the\nLibrary), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Library or works based on it.\n\n 10. Each time you redistribute the Library (or any work based on the\nLibrary), the recipient automatically receives a license from the\noriginal licensor to copy, distribute, link with or modify the Library\nsubject to these terms and conditions. You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties with\nthis License.\n\n 11. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License. If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Library at all. For example, if a patent\nlicense would not permit royalty-free redistribution of the Library by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Library.\n\nIf any portion of this section is held invalid or unenforceable under any\nparticular circumstance, the balance of the section is intended to apply,\nand the section as a whole is intended to apply in other circumstances.\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system which is\nimplemented by public license practices. Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n 12. If the distribution and/or use of the Library is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Library under this License may add\nan explicit geographical distribution limitation excluding those countries,\nso that distribution is permitted only in or among countries not thus\nexcluded. In such case, this License incorporates the limitation as if\nwritten in the body of this License.\n\n 13. The Free Software Foundation may publish revised and/or new\nversions of the Lesser General Public License from time to time.\nSuch new versions will be similar in spirit to the present version,\nbut may differ in detail to address new problems or concerns.\n\nEach version is given a distinguishing version number. If the Library\nspecifies a version number of this License which applies to it and\n\"any later version\", you have the option of following the terms and\nconditions either of that version or of any later version published by\nthe Free Software Foundation. If the Library does not specify a\nlicense version number, you may choose any version ever published by\nthe Free Software Foundation.\n\n 14. If you wish to incorporate parts of the Library into other free\nprograms whose distribution conditions are incompatible with these,\nwrite to the author to ask for permission. For software which is\ncopyrighted by the Free Software Foundation, write to the Free\nSoftware Foundation; we sometimes make exceptions for this. Our\ndecision will be guided by the two goals of preserving the free status\nof all derivatives of our free software and of promoting the sharing\nand reuse of software generally.\n\n NO WARRANTY\n\n 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO\nWARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.\nEXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR\nOTHER PARTIES PROVIDE THE LIBRARY \"AS IS\" WITHOUT WARRANTY OF ANY\nKIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE\nLIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME\nTHE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN\nWRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY\nAND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU\nFOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR\nCONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE\nLIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING\nRENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A\nFAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF\nSUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGES.\n\n END OF TERMS AND CONDITIONS\n\n How to Apply These Terms to Your New Libraries\n\n If you develop a new library, and you want it to be of the greatest\npossible use to the public, we recommend making it free software that\neveryone can redistribute and change. You can do so by permitting\nredistribution under these terms (or, alternatively, under the terms of the\nordinary General Public License).\n\n To apply these terms, attach the following notices to the library. It is\nsafest to attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least the\n\"copyright\" line and a pointer to where the full notice is found.\n\n \n Copyright (C) \n\n This library is free software; you can redistribute it and/or\n modify it under the terms of the GNU Lesser General Public\n License as published by the Free Software Foundation; either\n version 2.1 of the License, or (at your option) any later version.\n\n This library is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n Lesser General Public License for more details.\n\n You should have received a copy of the GNU Lesser General Public\n License along with this library; if not, write to the Free Software\n Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\nAlso add information on how to contact you by electronic and paper mail.\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the library, if\nnecessary. Here is a sample; alter the names:\n\n Yoyodyne, Inc., hereby disclaims all copyright interest in the\n library `Frob' (a library for tweaking knobs) written by James Random Hacker.\n\n , 1 April 1990\n Ty Coon, President of Vice\n\nThat's all there is to it!\n\n"}, {"name": "libogg", "version": "1.3.2", "license": "BSD 3-clause license", "text": "Copyright (c) 2002, Xiph.org Foundation\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n- Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n- Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n- Neither the name of the Xiph.org Foundation nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION\nOR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "libvorbis", "version": "1.3.5", "license": "BSD 3-clause license", "text": "Copyright (c) 2002-2008 Xiph.org Foundation\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n- Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n- Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n- Neither the name of the Xiph.org Foundation nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION\nOR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}, {"name": "FLAC", "version": "1.3.2", "license": "Xiph.org's BSD-like license", "text": "Copyright (C) 2000-2009 Josh Coalson\nCopyright (C) 2011-2016 Xiph.Org Foundation\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n- Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n\n- Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n\n- Neither the name of the Xiph.org Foundation nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\nEXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\nPROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"}] \ No newline at end of file diff --git a/voice_bridge/llvmlite/binding/llvmlite.dll b/voice_bridge/llvmlite/binding/llvmlite.dll new file mode 100644 index 0000000000000000000000000000000000000000..cead7471d07025e250dac95b82ea1ccf63479276 --- /dev/null +++ b/voice_bridge/llvmlite/binding/llvmlite.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79e8a65bf8444ecfe417e4db7ea813b0c1074bb16def5c17208bba29b160f7b0 +size 63802880 diff --git a/voice_bridge/markupsafe/_speedups.pyd b/voice_bridge/markupsafe/_speedups.pyd new file mode 100644 index 0000000000000000000000000000000000000000..48067ba24ab904cb2f31e4781f1b1979ba41df28 Binary files /dev/null and b/voice_bridge/markupsafe/_speedups.pyd differ diff --git a/voice_bridge/model/100epoch.pth b/voice_bridge/model/100epoch.pth new file mode 100644 index 0000000000000000000000000000000000000000..0f265dd2517354b6950ec5359440b0a314e5a49c --- /dev/null +++ b/voice_bridge/model/100epoch.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b393b091ed1da1ef2bd4ec1d067aecc94ed7a19b4c00d4ea16d0d3ff4d233471 +size 373275392 diff --git a/voice_bridge/model/config.yaml b/voice_bridge/model/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e3bbb0942dbfe92a4355a4080133924634bb51b3 --- /dev/null +++ b/voice_bridge/model/config.yaml @@ -0,0 +1,401 @@ +config: ./conf/tuning/finetune_full_band_vits.yaml +print_config: false +log_level: INFO +dry_run: false +iterator_type: sequence +output_dir: exp/tts_full_band_vits +ngpu: 1 +seed: 777 +num_workers: 4 +num_att_plot: 3 +dist_backend: nccl +dist_init_method: env:// +dist_world_size: null +dist_rank: null +local_rank: 0 +dist_master_addr: null +dist_master_port: null +dist_launcher: null +multiprocessing_distributed: false +unused_parameters: true +sharded_ddp: false +cudnn_enabled: true +cudnn_benchmark: false +cudnn_deterministic: false +collect_stats: false +write_collected_feats: false +max_epoch: 100 +patience: null +val_scheduler_criterion: +- valid +- loss +early_stopping_criterion: +- valid +- loss +- min +best_model_criterion: +- - train + - total_count + - max +keep_nbest_models: 10 +nbest_averaging_interval: 0 +grad_clip: -1 +grad_clip_type: 2.0 +grad_noise: false +accum_grad: 1 +no_forward_run: false +resume: true +train_dtype: float32 +use_amp: false +log_interval: 50 +use_matplotlib: true +use_tensorboard: true +create_graph_in_tensorboard: false +use_wandb: false +wandb_project: null +wandb_id: null +wandb_entity: null +wandb_name: null +wandb_model_log_interval: -1 +detect_anomaly: false +pretrain_path: null +init_param: +- downloads/full_band_vits_accent_with_pause_pretrain/exp/tts_train_full_band_vits_raw_phn_jaconv_pyopenjtalk_accent_with_pause/train.total_count.ave_10best.pth:tts:tts +ignore_init_mismatch: false +freeze_param: [] +num_iters_per_epoch: 1000 +batch_size: 20 +valid_batch_size: null +batch_bins: 100000 +valid_batch_bins: null +train_shape_file: +- exp/tts_stats_raw_linear_spectrogram_phn_jaconv_pyopenjtalk_accent_with_pause/train/text_shape.phn +- exp/tts_stats_raw_linear_spectrogram_phn_jaconv_pyopenjtalk_accent_with_pause/train/speech_shape +valid_shape_file: +- exp/tts_stats_raw_linear_spectrogram_phn_jaconv_pyopenjtalk_accent_with_pause/valid/text_shape.phn +- exp/tts_stats_raw_linear_spectrogram_phn_jaconv_pyopenjtalk_accent_with_pause/valid/speech_shape +batch_type: numel +valid_batch_type: null +fold_length: +- 150 +- 409600 +sort_in_batch: descending +sort_batch: descending +multiple_iterator: false +chunk_length: 500 +chunk_shift_ratio: 0.5 +num_cache_chunks: 1024 +chunk_excluded_key_prefixes: [] +train_data_path_and_name_and_type: +- - dump/44k/raw/tr_no_dev/text + - text + - text +- - dump/44k/raw/tr_no_dev/wav.scp + - speech + - sound +valid_data_path_and_name_and_type: +- - dump/44k/raw/dev/text + - text + - text +- - dump/44k/raw/dev/wav.scp + - speech + - sound +allow_variable_data_keys: false +max_cache_size: 0.0 +max_cache_fd: 32 +valid_max_cache_size: null +exclude_weight_decay: false +exclude_weight_decay_conf: {} +optim: adamw +optim_conf: + lr: 0.0001 + betas: + - 0.8 + - 0.99 + eps: 1.0e-09 + weight_decay: 0.0 +scheduler: exponentiallr +scheduler_conf: + gamma: 0.999875 +optim2: adamw +optim2_conf: + lr: 0.0001 + betas: + - 0.8 + - 0.99 + eps: 1.0e-09 + weight_decay: 0.0 +scheduler2: exponentiallr +scheduler2_conf: + gamma: 0.999875 +generator_first: false +token_list: +- +- +- '1' +- '2' +- '0' +- '3' +- '4' +- '-1' +- '5' +- a +- o +- '-2' +- i +- '-3' +- u +- e +- k +- n +- t +- '6' +- r +- '-4' +- s +- N +- m +- pau +- '7' +- sh +- d +- g +- w +- '8' +- U +- '-5' +- I +- cl +- h +- y +- b +- '9' +- j +- ts +- ch +- '-6' +- z +- p +- '-7' +- f +- ky +- ry +- '-8' +- gy +- '-9' +- hy +- ny +- '-10' +- by +- my +- '-11' +- '-12' +- '-13' +- py +- '-14' +- '-15' +- v +- '10' +- '-16' +- '-17' +- '11' +- '-21' +- '-20' +- '12' +- '-19' +- '13' +- '-18' +- '14' +- dy +- '15' +- ty +- '-22' +- '16' +- '18' +- '19' +- '17' +- +odim: null +model_conf: {} +use_preprocessor: true +token_type: phn +bpemodel: null +non_linguistic_symbols: null +cleaner: jaconv +g2p: pyopenjtalk_accent_with_pause +feats_extract: linear_spectrogram +feats_extract_conf: + n_fft: 2048 + hop_length: 512 + win_length: null +normalize: null +normalize_conf: {} +tts: vits +tts_conf: + generator_type: vits_generator + generator_params: + hidden_channels: 192 + spks: -1 + global_channels: -1 + segment_size: 32 + text_encoder_attention_heads: 2 + text_encoder_ffn_expand: 4 + text_encoder_blocks: 6 + text_encoder_positionwise_layer_type: conv1d + text_encoder_positionwise_conv_kernel_size: 3 + text_encoder_positional_encoding_layer_type: rel_pos + text_encoder_self_attention_layer_type: rel_selfattn + text_encoder_activation_type: swish + text_encoder_normalize_before: true + text_encoder_dropout_rate: 0.1 + text_encoder_positional_dropout_rate: 0.0 + text_encoder_attention_dropout_rate: 0.1 + use_macaron_style_in_text_encoder: true + use_conformer_conv_in_text_encoder: false + text_encoder_conformer_kernel_size: -1 + decoder_kernel_size: 7 + decoder_channels: 512 + decoder_upsample_scales: + - 8 + - 8 + - 2 + - 2 + - 2 + decoder_upsample_kernel_sizes: + - 16 + - 16 + - 4 + - 4 + - 4 + decoder_resblock_kernel_sizes: + - 3 + - 7 + - 11 + decoder_resblock_dilations: + - - 1 + - 3 + - 5 + - - 1 + - 3 + - 5 + - - 1 + - 3 + - 5 + use_weight_norm_in_decoder: true + posterior_encoder_kernel_size: 5 + posterior_encoder_layers: 16 + posterior_encoder_stacks: 1 + posterior_encoder_base_dilation: 1 + posterior_encoder_dropout_rate: 0.0 + use_weight_norm_in_posterior_encoder: true + flow_flows: 4 + flow_kernel_size: 5 + flow_base_dilation: 1 + flow_layers: 4 + flow_dropout_rate: 0.0 + use_weight_norm_in_flow: true + use_only_mean_in_flow: true + stochastic_duration_predictor_kernel_size: 3 + stochastic_duration_predictor_dropout_rate: 0.5 + stochastic_duration_predictor_flows: 4 + stochastic_duration_predictor_dds_conv_layers: 3 + vocabs: 85 + aux_channels: 1025 + discriminator_type: hifigan_multi_scale_multi_period_discriminator + discriminator_params: + scales: 1 + scale_downsample_pooling: AvgPool1d + scale_downsample_pooling_params: + kernel_size: 4 + stride: 2 + padding: 2 + scale_discriminator_params: + in_channels: 1 + out_channels: 1 + kernel_sizes: + - 15 + - 41 + - 5 + - 3 + channels: 128 + max_downsample_channels: 1024 + max_groups: 16 + bias: true + downsample_scales: + - 2 + - 2 + - 4 + - 4 + - 1 + nonlinear_activation: LeakyReLU + nonlinear_activation_params: + negative_slope: 0.1 + use_weight_norm: true + use_spectral_norm: false + follow_official_norm: false + periods: + - 2 + - 3 + - 5 + - 7 + - 11 + period_discriminator_params: + in_channels: 1 + out_channels: 1 + kernel_sizes: + - 5 + - 3 + channels: 32 + downsample_scales: + - 3 + - 3 + - 3 + - 3 + - 1 + max_downsample_channels: 1024 + bias: true + nonlinear_activation: LeakyReLU + nonlinear_activation_params: + negative_slope: 0.1 + use_weight_norm: true + use_spectral_norm: false + generator_adv_loss_params: + average_by_discriminators: false + loss_type: mse + discriminator_adv_loss_params: + average_by_discriminators: false + loss_type: mse + feat_match_loss_params: + average_by_discriminators: false + average_by_layers: false + include_final_outputs: true + mel_loss_params: + fs: 44100 + n_fft: 2048 + hop_length: 512 + win_length: null + window: hann + n_mels: 80 + fmin: 0 + fmax: null + log_base: null + lambda_adv: 1.0 + lambda_mel: 45.0 + lambda_feat_match: 2.0 + lambda_dur: 1.0 + lambda_kl: 1.0 + sampling_rate: 44100 + cache_generator_outputs: true +pitch_extract: null +pitch_extract_conf: {} +pitch_normalize: null +pitch_normalize_conf: {} +energy_extract: null +energy_extract_conf: {} +energy_normalize: null +energy_normalize_conf: {} +required: +- output_dir +- token_list +version: '202301' +distributed: false diff --git a/voice_bridge/msvcp140.dll b/voice_bridge/msvcp140.dll new file mode 100644 index 0000000000000000000000000000000000000000..cafd558f5cb9638272f2880992a8bddedfe059b2 Binary files /dev/null and b/voice_bridge/msvcp140.dll differ diff --git a/voice_bridge/numba/_devicearray.pyd b/voice_bridge/numba/_devicearray.pyd new file mode 100644 index 0000000000000000000000000000000000000000..e83258a2494746a18d9e1c628bfe17faab18a077 Binary files /dev/null and b/voice_bridge/numba/_devicearray.pyd differ diff --git a/voice_bridge/numba/_dispatcher.pyd b/voice_bridge/numba/_dispatcher.pyd new file mode 100644 index 0000000000000000000000000000000000000000..fb2a3a543830f9a959692ebbfb50a993f43002cb Binary files /dev/null and b/voice_bridge/numba/_dispatcher.pyd differ diff --git a/voice_bridge/numba/_dynfunc.pyd b/voice_bridge/numba/_dynfunc.pyd new file mode 100644 index 0000000000000000000000000000000000000000..b6041e87544dd641d2205341b3593851d1e953cb Binary files /dev/null and b/voice_bridge/numba/_dynfunc.pyd differ diff --git a/voice_bridge/numba/_helperlib.pyd b/voice_bridge/numba/_helperlib.pyd new file mode 100644 index 0000000000000000000000000000000000000000..7ad95fb1e4adfb16a2733a17b84dcb964491111c Binary files /dev/null and b/voice_bridge/numba/_helperlib.pyd differ diff --git a/voice_bridge/numba/core/runtime/_nrt_python.pyd b/voice_bridge/numba/core/runtime/_nrt_python.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f6a73b239ac9045f85ce26ee3739313cc56e74c6 Binary files /dev/null and b/voice_bridge/numba/core/runtime/_nrt_python.pyd differ diff --git a/voice_bridge/numba/core/typeconv/_typeconv.pyd b/voice_bridge/numba/core/typeconv/_typeconv.pyd new file mode 100644 index 0000000000000000000000000000000000000000..3e5e4c788e913508e2bacaf0fa8101053974f243 Binary files /dev/null and b/voice_bridge/numba/core/typeconv/_typeconv.pyd differ diff --git a/voice_bridge/numba/cuda/cudadrv/_extras.pyd b/voice_bridge/numba/cuda/cudadrv/_extras.pyd new file mode 100644 index 0000000000000000000000000000000000000000..69322621167d89260d9b1a27c3c0ded901e0f04f Binary files /dev/null and b/voice_bridge/numba/cuda/cudadrv/_extras.pyd differ diff --git a/voice_bridge/numba/experimental/jitclass/_box.pyd b/voice_bridge/numba/experimental/jitclass/_box.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f2d6f01df0d8ddafcad9ca7b43ebbe15229065ba Binary files /dev/null and b/voice_bridge/numba/experimental/jitclass/_box.pyd differ diff --git a/voice_bridge/numba/mviewbuf.pyd b/voice_bridge/numba/mviewbuf.pyd new file mode 100644 index 0000000000000000000000000000000000000000..ac02e6f3b456a987c968615e0def9b626ca68ffa Binary files /dev/null and b/voice_bridge/numba/mviewbuf.pyd differ diff --git a/voice_bridge/numba/np/ufunc/_internal.pyd b/voice_bridge/numba/np/ufunc/_internal.pyd new file mode 100644 index 0000000000000000000000000000000000000000..e5793e093365c0c0845bd6946d4c01e85a5a8bb6 Binary files /dev/null and b/voice_bridge/numba/np/ufunc/_internal.pyd differ diff --git a/voice_bridge/numba/np/ufunc/omppool.pyd b/voice_bridge/numba/np/ufunc/omppool.pyd new file mode 100644 index 0000000000000000000000000000000000000000..cf61e66653132628ec099a414ae49ca5fd17aafe Binary files /dev/null and b/voice_bridge/numba/np/ufunc/omppool.pyd differ diff --git a/voice_bridge/numba/np/ufunc/workqueue.pyd b/voice_bridge/numba/np/ufunc/workqueue.pyd new file mode 100644 index 0000000000000000000000000000000000000000..ecb43ad4be1d6626b7d4ece6e20afc6b288877ff Binary files /dev/null and b/voice_bridge/numba/np/ufunc/workqueue.pyd differ diff --git a/voice_bridge/numpy/.libs/libopenblas.4SP5SUA7CBGXUEOC35YP2ASOICYYEQZZ.gfortran-win_amd64.dll b/voice_bridge/numpy/.libs/libopenblas.4SP5SUA7CBGXUEOC35YP2ASOICYYEQZZ.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..30b1ff9e4349032af476207e642a7ec5bc62d226 --- /dev/null +++ b/voice_bridge/numpy/.libs/libopenblas.4SP5SUA7CBGXUEOC35YP2ASOICYYEQZZ.gfortran-win_amd64.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f0d72e5fd784f27097b21df7a0d0e93ce95a788bcd8fcff756a39698ffcd863 +size 34413408 diff --git a/voice_bridge/numpy/core/_multiarray_tests.pyd b/voice_bridge/numpy/core/_multiarray_tests.pyd new file mode 100644 index 0000000000000000000000000000000000000000..0c80f8394549c6a7354867c03ca7bf8e47fe8e15 Binary files /dev/null and b/voice_bridge/numpy/core/_multiarray_tests.pyd differ diff --git a/voice_bridge/numpy/core/_multiarray_umath.pyd b/voice_bridge/numpy/core/_multiarray_umath.pyd new file mode 100644 index 0000000000000000000000000000000000000000..a9345b7917dfdc881c7646e6c7b3f7845dfa9be8 --- /dev/null +++ b/voice_bridge/numpy/core/_multiarray_umath.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01f25df5998117120d277bb4746eff626b608e75cf733f2c750de362c7c34f68 +size 2817536 diff --git a/voice_bridge/numpy/fft/_pocketfft_internal.pyd b/voice_bridge/numpy/fft/_pocketfft_internal.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f1ef8cbc2a12d7faefad36181b5739b90ccb2d1e Binary files /dev/null and b/voice_bridge/numpy/fft/_pocketfft_internal.pyd differ diff --git a/voice_bridge/numpy/linalg/_umath_linalg.pyd b/voice_bridge/numpy/linalg/_umath_linalg.pyd new file mode 100644 index 0000000000000000000000000000000000000000..a4059896e296287ff29d6a728f86f25e256092f3 Binary files /dev/null and b/voice_bridge/numpy/linalg/_umath_linalg.pyd differ diff --git a/voice_bridge/numpy/linalg/lapack_lite.pyd b/voice_bridge/numpy/linalg/lapack_lite.pyd new file mode 100644 index 0000000000000000000000000000000000000000..fbebce482676f2ad8a91ec8a74a75704a8a22d34 Binary files /dev/null and b/voice_bridge/numpy/linalg/lapack_lite.pyd differ diff --git a/voice_bridge/numpy/random/_bounded_integers.pyd b/voice_bridge/numpy/random/_bounded_integers.pyd new file mode 100644 index 0000000000000000000000000000000000000000..77298c13100e0d729a2983bd41999cd867b608f9 Binary files /dev/null and b/voice_bridge/numpy/random/_bounded_integers.pyd differ diff --git a/voice_bridge/numpy/random/_common.pyd b/voice_bridge/numpy/random/_common.pyd new file mode 100644 index 0000000000000000000000000000000000000000..83c3623d0f13b92cb617636401f33c9e9b3473c8 Binary files /dev/null and b/voice_bridge/numpy/random/_common.pyd differ diff --git a/voice_bridge/numpy/random/_generator.pyd b/voice_bridge/numpy/random/_generator.pyd new file mode 100644 index 0000000000000000000000000000000000000000..12a618aa52e5494a87d973cbf435079c3e5e95fe Binary files /dev/null and b/voice_bridge/numpy/random/_generator.pyd differ diff --git a/voice_bridge/numpy/random/_mt19937.pyd b/voice_bridge/numpy/random/_mt19937.pyd new file mode 100644 index 0000000000000000000000000000000000000000..614ba3f69f2003fb3fe964e05ad7f1a7c7e63ee8 Binary files /dev/null and b/voice_bridge/numpy/random/_mt19937.pyd differ diff --git a/voice_bridge/numpy/random/_pcg64.pyd b/voice_bridge/numpy/random/_pcg64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..1d66f449b947b36974570b75dc2cfb19bb471aa6 Binary files /dev/null and b/voice_bridge/numpy/random/_pcg64.pyd differ diff --git a/voice_bridge/numpy/random/_philox.pyd b/voice_bridge/numpy/random/_philox.pyd new file mode 100644 index 0000000000000000000000000000000000000000..aa5db6c47cdf11ed9643380232956dcf2087ed49 Binary files /dev/null and b/voice_bridge/numpy/random/_philox.pyd differ diff --git a/voice_bridge/numpy/random/_sfc64.pyd b/voice_bridge/numpy/random/_sfc64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..7b473e6813fa195fac3b2b2fbfb4f8bf9372f007 Binary files /dev/null and b/voice_bridge/numpy/random/_sfc64.pyd differ diff --git a/voice_bridge/numpy/random/bit_generator.pyd b/voice_bridge/numpy/random/bit_generator.pyd new file mode 100644 index 0000000000000000000000000000000000000000..d98f6f7b575f702fa4f102e60a9a821317c87956 Binary files /dev/null and b/voice_bridge/numpy/random/bit_generator.pyd differ diff --git a/voice_bridge/numpy/random/mtrand.pyd b/voice_bridge/numpy/random/mtrand.pyd new file mode 100644 index 0000000000000000000000000000000000000000..0197675f54fab0a7b8d1f7fb282e7fd3b7e90e6f Binary files /dev/null and b/voice_bridge/numpy/random/mtrand.pyd differ diff --git a/voice_bridge/presets.yaml b/voice_bridge/presets.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f554e61cba1005bba8af9de5bda1f1740ef4dbe --- /dev/null +++ b/voice_bridge/presets.yaml @@ -0,0 +1,10 @@ +- id: 1 + name: ァンプルプγƒͺγ‚»γƒƒγƒˆ + speaker_uuid: 7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff + style_id: 0 + speedScale: 1 + pitchScale: 0 + intonationScale: 1 + volumeScale: 1 + prePhonemeLength: 0.1 + postPhonemeLength: 0.1 diff --git a/voice_bridge/pydantic/annotated_types.pyd b/voice_bridge/pydantic/annotated_types.pyd new file mode 100644 index 0000000000000000000000000000000000000000..e460361797191a7cef2e410327675d80c18cd491 Binary files /dev/null and b/voice_bridge/pydantic/annotated_types.pyd differ diff --git a/voice_bridge/pydantic/class_validators.pyd b/voice_bridge/pydantic/class_validators.pyd new file mode 100644 index 0000000000000000000000000000000000000000..649a940753b72f935fb34490a2c4445c2daa98b6 Binary files /dev/null and b/voice_bridge/pydantic/class_validators.pyd differ diff --git a/voice_bridge/pydantic/color.pyd b/voice_bridge/pydantic/color.pyd new file mode 100644 index 0000000000000000000000000000000000000000..8f505ca0ccf3a8f30e96591ce6bab6198d87ffd7 Binary files /dev/null and b/voice_bridge/pydantic/color.pyd differ diff --git a/voice_bridge/pydantic/config.pyd b/voice_bridge/pydantic/config.pyd new file mode 100644 index 0000000000000000000000000000000000000000..0f8c385bd535b497b9bc8d1e7dc8479c0fcf6293 Binary files /dev/null and b/voice_bridge/pydantic/config.pyd differ diff --git a/voice_bridge/pydantic/dataclasses.pyd b/voice_bridge/pydantic/dataclasses.pyd new file mode 100644 index 0000000000000000000000000000000000000000..751740e4423d2a84150d6b99caa52a3210a7f894 Binary files /dev/null and b/voice_bridge/pydantic/dataclasses.pyd differ diff --git a/voice_bridge/pydantic/datetime_parse.pyd b/voice_bridge/pydantic/datetime_parse.pyd new file mode 100644 index 0000000000000000000000000000000000000000..37c9f2f2676760b870ae92378f4794e4556be03c Binary files /dev/null and b/voice_bridge/pydantic/datetime_parse.pyd differ diff --git a/voice_bridge/pydantic/decorator.pyd b/voice_bridge/pydantic/decorator.pyd new file mode 100644 index 0000000000000000000000000000000000000000..9ed395d6dd16bebbef6bcea15fbe7b360c172eb2 Binary files /dev/null and b/voice_bridge/pydantic/decorator.pyd differ diff --git a/voice_bridge/pydantic/env_settings.pyd b/voice_bridge/pydantic/env_settings.pyd new file mode 100644 index 0000000000000000000000000000000000000000..0338bacb9ec0da2321a02a19d6b619d83d6481c2 Binary files /dev/null and b/voice_bridge/pydantic/env_settings.pyd differ diff --git a/voice_bridge/pydantic/error_wrappers.pyd b/voice_bridge/pydantic/error_wrappers.pyd new file mode 100644 index 0000000000000000000000000000000000000000..882bbb255ac14d8437afe5dfa449ce28ca16c4ea Binary files /dev/null and b/voice_bridge/pydantic/error_wrappers.pyd differ diff --git a/voice_bridge/pydantic/errors.pyd b/voice_bridge/pydantic/errors.pyd new file mode 100644 index 0000000000000000000000000000000000000000..c03e56babb9b02e4784beaff022106cde05a220c Binary files /dev/null and b/voice_bridge/pydantic/errors.pyd differ diff --git a/voice_bridge/pydantic/fields.pyd b/voice_bridge/pydantic/fields.pyd new file mode 100644 index 0000000000000000000000000000000000000000..619a80af097ddab05027ea6434c00a53dd9df8cf Binary files /dev/null and b/voice_bridge/pydantic/fields.pyd differ diff --git a/voice_bridge/pydantic/json.pyd b/voice_bridge/pydantic/json.pyd new file mode 100644 index 0000000000000000000000000000000000000000..b0a54dabd6c5ef1295193ccd0f78b21ab75f3039 Binary files /dev/null and b/voice_bridge/pydantic/json.pyd differ diff --git a/voice_bridge/pydantic/main.pyd b/voice_bridge/pydantic/main.pyd new file mode 100644 index 0000000000000000000000000000000000000000..d50027bd4c8069215ffd0df5e9360c8d16d0c50a Binary files /dev/null and b/voice_bridge/pydantic/main.pyd differ diff --git a/voice_bridge/pydantic/mypy.pyd b/voice_bridge/pydantic/mypy.pyd new file mode 100644 index 0000000000000000000000000000000000000000..9ce90878c3ddf5b038c92fb43fa2619b107daed5 Binary files /dev/null and b/voice_bridge/pydantic/mypy.pyd differ diff --git a/voice_bridge/pydantic/networks.pyd b/voice_bridge/pydantic/networks.pyd new file mode 100644 index 0000000000000000000000000000000000000000..6e8109260ac62161772bc1d5020be269e84b8210 Binary files /dev/null and b/voice_bridge/pydantic/networks.pyd differ diff --git a/voice_bridge/pydantic/parse.pyd b/voice_bridge/pydantic/parse.pyd new file mode 100644 index 0000000000000000000000000000000000000000..4b6a9e5e25244146caf52423bab80d70099938cd Binary files /dev/null and b/voice_bridge/pydantic/parse.pyd differ diff --git a/voice_bridge/pydantic/schema.pyd b/voice_bridge/pydantic/schema.pyd new file mode 100644 index 0000000000000000000000000000000000000000..46aedbf6e3be8cbf606c7130026a23df38b5de79 Binary files /dev/null and b/voice_bridge/pydantic/schema.pyd differ diff --git a/voice_bridge/pydantic/tools.pyd b/voice_bridge/pydantic/tools.pyd new file mode 100644 index 0000000000000000000000000000000000000000..dcce8c392ed6212eba580f1d752be100ee4ba2fd Binary files /dev/null and b/voice_bridge/pydantic/tools.pyd differ diff --git a/voice_bridge/pydantic/types.pyd b/voice_bridge/pydantic/types.pyd new file mode 100644 index 0000000000000000000000000000000000000000..3322d8ccc4c54848f77eac3ebb2439135823c4ff Binary files /dev/null and b/voice_bridge/pydantic/types.pyd differ diff --git a/voice_bridge/pydantic/typing.pyd b/voice_bridge/pydantic/typing.pyd new file mode 100644 index 0000000000000000000000000000000000000000..a8d263df0ff248e8b6b39bb5bd1e2ad56072840d Binary files /dev/null and b/voice_bridge/pydantic/typing.pyd differ diff --git a/voice_bridge/pydantic/utils.pyd b/voice_bridge/pydantic/utils.pyd new file mode 100644 index 0000000000000000000000000000000000000000..0210ff96ed1a624a04beceb26200ce9a7a6c0035 Binary files /dev/null and b/voice_bridge/pydantic/utils.pyd differ diff --git a/voice_bridge/pydantic/validators.pyd b/voice_bridge/pydantic/validators.pyd new file mode 100644 index 0000000000000000000000000000000000000000..818fb72613a38badb4a0d8222dc5f355fee65b17 Binary files /dev/null and b/voice_bridge/pydantic/validators.pyd differ diff --git a/voice_bridge/pydantic/version.pyd b/voice_bridge/pydantic/version.pyd new file mode 100644 index 0000000000000000000000000000000000000000..abce4ec22def3dfb15ef3cb305d7e28144723f6e Binary files /dev/null and b/voice_bridge/pydantic/version.pyd differ diff --git a/voice_bridge/pyexpat.pyd b/voice_bridge/pyexpat.pyd new file mode 100644 index 0000000000000000000000000000000000000000..2c5fb1d1fb7ae89b6268ce5358ff568a0982298d Binary files /dev/null and b/voice_bridge/pyexpat.pyd differ diff --git a/voice_bridge/pyopenjtalk/htsengine.pyd b/voice_bridge/pyopenjtalk/htsengine.pyd new file mode 100644 index 0000000000000000000000000000000000000000..9d3581947254e9204fd4894896dd9202364a6e8e Binary files /dev/null and b/voice_bridge/pyopenjtalk/htsengine.pyd differ diff --git a/voice_bridge/pyopenjtalk/htsvoice/LICENSE_mei_normal.htsvoice b/voice_bridge/pyopenjtalk/htsvoice/LICENSE_mei_normal.htsvoice new file mode 100644 index 0000000000000000000000000000000000000000..753611721aea5b6ab7f713229c04cdbf8e63dff5 --- /dev/null +++ b/voice_bridge/pyopenjtalk/htsvoice/LICENSE_mei_normal.htsvoice @@ -0,0 +1,41 @@ +# ----------------------------------------------------------------- # +# HTS Voice "Mei" # +# released by MMDAgent Project Team # +# http://www.mmdagent.jp/ # +# ----------------------------------------------------------------- # +# # +# Copyright (c) 2009-2013 Nagoya Institute of Technology # +# Department of Computer Science # +# # +# Some rights reserved. # +# # +# This work is licensed under the Creative Commons Attribution 3.0 # +# license. # +# # +# You are free: # +# * to Share - to copy, distribute and transmit the work # +# * to Remix - to adapt the work # +# Under the following conditions: # +# * Attribution - You must attribute the work in the manner # +# specified by the author or licensor (but not in any way that # +# suggests that they endorse you or your use of the work). # +# With the understanding that: # +# * Waiver - Any of the above conditions can be waived if you get # +# permission from the copyright holder. # +# * Public Domain - Where the work or any of its elements is in # +# the public domain under applicable law, that status is in no # +# way affected by the license. # +# * Other Rights - In no way are any of the following rights # +# affected by the license: # +# - Your fair dealing or fair use rights, or other applicable # +# copyright exceptions and limitations; # +# - The author's moral rights; # +# - Rights other persons may have either in the work itself or # +# in how the work is used, such as publicity or privacy # +# rights. # +# * Notice - For any reuse or distribution, you must make clear to # +# others the license terms of this work. The best way to do this # +# is with a link to this web page. # +# # +# See http://creativecommons.org/ for details. # +# ----------------------------------------------------------------- # diff --git a/voice_bridge/pyopenjtalk/htsvoice/README.md b/voice_bridge/pyopenjtalk/htsvoice/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6dff758c2495c69157ea84c05aff9a114eeb3f2c --- /dev/null +++ b/voice_bridge/pyopenjtalk/htsvoice/README.md @@ -0,0 +1,3 @@ +# LICENSE for test data + +- [LICENSE_mei](LICENSE_mei_normal.htsvoice): LICENSE for mei_normal.voice diff --git a/voice_bridge/pyopenjtalk/htsvoice/mei_normal.htsvoice b/voice_bridge/pyopenjtalk/htsvoice/mei_normal.htsvoice new file mode 100644 index 0000000000000000000000000000000000000000..edcb095075784ff19f072b8af8a491eeedaabd47 Binary files /dev/null and b/voice_bridge/pyopenjtalk/htsvoice/mei_normal.htsvoice differ diff --git a/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/COPYING b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..05d9789fde8883f09b0b9a814a53e6000346e964 --- /dev/null +++ b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/COPYING @@ -0,0 +1,100 @@ +Copyright (c) 2009, Nara Institute of Science and Technology, Japan. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +Neither the name of the Nara Institute of Science and Technology +(NAIST) nor the names of its contributors may be used to endorse or +promote products derived from this software without specific prior +written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Copyright (c) 2011-2017, The UniDic Consortium +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + + * Neither the name of the UniDic Consortium nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* ----------------------------------------------------------------- */ +/* The Japanese TTS System "Open JTalk" */ +/* developed by HTS Working Group */ +/* http://open-jtalk.sourceforge.net/ */ +/* ----------------------------------------------------------------- */ +/* */ +/* Copyright (c) 2008-2016 Nagoya Institute of Technology */ +/* Department of Computer Science */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* - Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* - Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials provided */ +/* with the distribution. */ +/* - Neither the name of the HTS working group nor the names of its */ +/* contributors may be used to endorse or promote products derived */ +/* from this software without specific prior written permission. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */ +/* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */ +/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */ +/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */ +/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */ +/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */ +/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */ +/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* ----------------------------------------------------------------- */ diff --git a/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/char.bin b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/char.bin new file mode 100644 index 0000000000000000000000000000000000000000..9f6369901c7383d9038c9c49939279e47f6a4db9 --- /dev/null +++ b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/char.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:888ee94c5a8a7a26d24ab3f1b7155441351954fd51ea06b4a2f78bd742492b2f +size 262496 diff --git a/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/left-id.def b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/left-id.def new file mode 100644 index 0000000000000000000000000000000000000000..51c269688b9591d76eab882b6339c83c960b3e5b --- /dev/null +++ b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/left-id.def @@ -0,0 +1,1377 @@ +0 BOS/EOS,*,*,*,*,*,BOS/EOS +1 そγδ»–,ι–“ζŠ•,*,*,*,*,* +2 フィラー,*,*,*,*,*,* +3 ζ„Ÿε‹•θ©ž,*,*,*,*,*,* +4 記号,γ‚’γƒ«γƒ•γ‚‘γƒ™γƒƒγƒˆ,*,*,*,*,* +5 記号,δΈ€θˆ¬,*,*,*,*,* +6 記号,括弧開,*,*,*,*,BOS/EOS +7 記号,括弧閉,*,*,*,*,BOS/EOS +8 記号,ε₯η‚Ή,*,*,*,*,BOS/EOS +9 記号,η©Ίη™½,*,*,*,*,* +10 記号,θͺ­η‚Ή,*,*,*,*,* +11 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,*,* +12 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,* +13 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,焑い +14 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,* +15 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,焑い +16 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,* +17 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,焑い +18 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,* +19 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,焑い +20 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,* +21 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,焑い +22 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,* +23 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,焑い +24 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,* +25 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,焑い +26 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,* +27 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,焑い +28 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,* +29 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,焑い +30 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,* +31 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,焑い +32 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,* +33 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,焑い +34 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,* +35 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,焑い +36 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,* +37 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,焑い +38 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ガルζŽ₯碚,* +39 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšε½’,* +40 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ‘,* +41 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ’,* +42 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,基本归,* +43 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,体言ζŽ₯碚,* +44 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,* +45 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺ焢ウζŽ₯碚,* +46 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺη„ΆγƒŒζŽ₯碚,* +47 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,命什e,* +48 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用ゴアむζŽ₯碚,* +49 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用タζŽ₯碚,* +50 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用テζŽ₯碚,* +51 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,δΈε€‰εŒ–εž‹,基本归,* +52 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,* +53 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,γŸγ‚‰γ—γ„ +54 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,臭い +55 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,* +56 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,γŸγ‚‰γ—γ„ +57 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,臭い +58 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,* +59 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,γŸγ‚‰γ—γ„ +60 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,臭い +61 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,* +62 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,γŸγ‚‰γ—γ„ +63 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,臭い +64 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,* +65 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,γŸγ‚‰γ—γ„ +66 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,臭い +67 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,* +68 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,γŸγ‚‰γ—γ„ +69 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,臭い +70 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,* +71 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,γŸγ‚‰γ—γ„ +72 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,臭い +73 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,* +74 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,γŸγ‚‰γ—γ„ +75 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,臭い +76 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,* +77 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,γŸγ‚‰γ—γ„ +78 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,臭い +79 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,* +80 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,γŸγ‚‰γ—γ„ +81 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,臭い +82 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,* +83 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,γŸγ‚‰γ—γ„ +84 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,臭い +85 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,* +86 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,γŸγ‚‰γ—γ„ +87 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,臭い +88 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,* +89 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,γŸγ‚‰γ—γ„ +90 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,臭い +91 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ガルζŽ₯碚,* +92 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ガルζŽ₯碚,γŸγ‚‰γ—γ„ +93 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšε½’,* +94 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšε½’,γŸγ‚‰γ—γ„ +95 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ‘,* +96 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ‘,γŸγ‚‰γ—γ„ +97 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ’,* +98 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ’,γŸγ‚‰γ—γ„ +99 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,基本归,* +100 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,基本归,γŸγ‚‰γ—γ„ +101 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,体言ζŽ₯碚,* +102 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,体言ζŽ₯碚,γŸγ‚‰γ—γ„ +103 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,* +104 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,γŸγ‚‰γ—γ„ +105 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺ焢ウζŽ₯碚,* +106 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺ焢ウζŽ₯碚,γŸγ‚‰γ—γ„ +107 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺη„ΆγƒŒζŽ₯碚,* +108 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺη„ΆγƒŒζŽ₯碚,γŸγ‚‰γ—γ„ +109 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,命什e,* +110 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,命什e,γŸγ‚‰γ—γ„ +111 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用ゴアむζŽ₯碚,* +112 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用ゴアむζŽ₯碚,γŸγ‚‰γ—γ„ +113 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用タζŽ₯碚,* +114 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用タζŽ₯碚,γŸγ‚‰γ—γ„ +115 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用テζŽ₯碚,* +116 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用テζŽ₯碚,γŸγ‚‰γ—γ„ +117 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,*,* +118 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,* +119 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,難い +120 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,良い +121 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,* +122 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,難い +123 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,良い +124 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,* +125 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,難い +126 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,良い +127 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,* +128 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,難い +129 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,良い +130 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,難い +131 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,良い +132 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,* +133 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,難い +134 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,良い +135 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,* +136 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,難い +137 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,良い +138 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,* +139 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,難い +140 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,良い +141 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,* +142 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,難い +143 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,良い +144 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,* +145 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,難い +146 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,良い +147 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,* +148 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,難い +149 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,良い +150 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,* +151 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,難い +152 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,良い +153 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,* +154 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,難い +155 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,良い +156 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ガルζŽ₯碚,欲しい +157 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšε½’,欲しい +158 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ‘,欲しい +159 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ’,欲しい +160 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,基本归,欲しい +161 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,体言ζŽ₯碚,欲しい +162 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,欲しい +163 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺ焢ウζŽ₯碚,欲しい +164 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺη„ΆγƒŒζŽ₯碚,欲しい +165 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,命什e,欲しい +166 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用ゴアむζŽ₯碚,欲しい +167 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用タζŽ₯碚,欲しい +168 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用テζŽ₯碚,欲しい +169 助詞,格助詞,δΈ€θˆ¬,*,*,*,から +170 助詞,格助詞,δΈ€θˆ¬,*,*,*,が +171 助詞,格助詞,δΈ€θˆ¬,*,*,*,぀ +172 助詞,格助詞,δΈ€θˆ¬,*,*,*,で +173 助詞,格助詞,δΈ€θˆ¬,*,*,*,と +174 助詞,格助詞,δΈ€θˆ¬,*,*,*,に +175 助詞,格助詞,δΈ€θˆ¬,*,*,*,にて +176 助詞,格助詞,δΈ€θˆ¬,*,*,*,だ+177 助詞,格助詞,δΈ€θˆ¬,*,*,*,へ +178 助詞,格助詞,δΈ€θˆ¬,*,*,*,γ‚ˆγ‚Š +179 助詞,格助詞,δΈ€θˆ¬,*,*,*,γ‚’ +180 助詞,格助詞,δΈ€θˆ¬,*,*,*,γ‚“ +181 助詞,格助詞,δΈ€θˆ¬,*,*,*,デ +182 助詞,格助詞,δΈ€θˆ¬,*,*,*,γƒŽ +183 助詞,格助詞,δΈ€θˆ¬,*,*,*,γƒ˜ +184 助詞,格助詞,δΈ€θˆ¬,*,*,*,ヲ +185 助詞,格助詞,δΈ€θˆ¬,*,*,*,δΉ‹ +186 助詞,格助詞,引用,*,*,*,っと +187 助詞,格助詞,引用,*,*,*,と +188 助詞,格助詞,ι€£θͺž,*,*,*,γ˜γ‚ƒ +189 助詞,格助詞,ι€£θͺž,*,*,*,っけゅう +190 助詞,格助詞,ι€£θͺž,*,*,*,って +191 助詞,格助詞,ι€£θͺž,*,*,*,っていう +192 助詞,格助詞,ι€£θͺž,*,*,*,ってγͺ +193 助詞,格助詞,ι€£θͺž,*,*,*,て +194 助詞,格助詞,ι€£θͺž,*,*,*,ていう +195 助詞,格助詞,ι€£θͺž,*,*,*,といいます +196 助詞,格助詞,ι€£θͺž,*,*,*,という +197 助詞,格助詞,ι€£θͺž,*,*,*,γ¨γ„γ£γŸ +198 助詞,格助詞,ι€£θͺž,*,*,*,といち +199 助詞,格助詞,ι€£θͺž,*,*,*,とかいいます +200 助詞,格助詞,ι€£θͺž,*,*,*,とかいう +201 助詞,格助詞,ι€£θͺž,*,*,*,とかいち +202 助詞,格助詞,ι€£θͺž,*,*,*,として +203 助詞,格助詞,ι€£θͺž,*,*,*,γ¨γ—γΎγ—γŸγ‚‰ +204 助詞,格助詞,ι€£θͺž,*,*,*,としまして +205 助詞,格助詞,ι€£θͺž,*,*,*,とともに +206 助詞,格助詞,ι€£θͺž,*,*,*,と共に +207 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ£γ¦ +208 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚Š +209 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚ŠγΎγ—γ¦ +210 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚ŠγΎγ™ +211 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚‹ +212 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŠγ„γ¦ +213 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŠγγΎγ—γ¦ +214 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŠγ‘γ‚‹ +215 助詞,格助詞,ι€£θͺž,*,*,*,にかけ +216 助詞,格助詞,ι€£θͺž,*,*,*,にかけて +217 助詞,格助詞,ι€£θͺž,*,*,*,にかけまして +218 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŸγ„γ—γ¦ +219 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŸγ„γ—γΎγ—γ¦ +220 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŸγ„γ—γΎγ™ +221 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŸγ„γ™γ‚‹ +222 助詞,格助詞,ι€£θͺž,*,*,*,に぀いて +223 助詞,格助詞,ι€£θͺž,*,*,*,に぀き +224 助詞,格助詞,ι€£θͺž,*,*,*,に぀きまして +225 助詞,格助詞,ι€£θͺž,*,*,*,に぀け +226 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ€γ‚Œ +227 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ€γ‚Œγ¦ +228 助詞,格助詞,ι€£θͺž,*,*,*,にとって +229 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ¨γ‚Š +230 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ¨γ‚ŠγΎγ—γ¦ +231 助詞,格助詞,ι€£θͺž,*,*,*,γ«γΎγ€γ‚γ‚ŠγΎγ™ +232 助詞,格助詞,ι€£θͺž,*,*,*,にま぀わる +233 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚ˆγ£γ¦ +234 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚ˆγ‚Š +235 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚ˆγ‚ŠγΎγ—γ¦ +236 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚ˆγ‚ŠγΎγ™ +237 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚ˆγ‚‹ +238 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ£γ¦ +239 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚Š +240 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚ŠγΎγ—γ¦ +241 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚ŠγΎγ™ +242 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚‹ +243 助詞,格助詞,ι€£θͺž,*,*,*,に閒し +244 助詞,格助詞,ι€£θͺž,*,*,*,に閒して +245 助詞,格助詞,ι€£θͺž,*,*,*,に閒しまして +246 助詞,格助詞,ι€£θͺž,*,*,*,に閒します +247 助詞,格助詞,ι€£θͺž,*,*,*,に閒する +248 助詞,格助詞,ι€£θͺž,*,*,*,γ«ιš›γ— +249 助詞,格助詞,ι€£θͺž,*,*,*,γ«ιš›γ—γ¦ +250 助詞,格助詞,ι€£θͺž,*,*,*,γ«ιš›γ—γΎγ—γ¦ +251 助詞,格助詞,ι€£θͺž,*,*,*,に従い +252 助詞,格助詞,ι€£θͺž,*,*,*,に従いまして +253 助詞,格助詞,ι€£θͺž,*,*,*,に従います +254 助詞,格助詞,ι€£θͺž,*,*,*,に従う +255 助詞,格助詞,ι€£θͺž,*,*,*,に従って +256 助詞,格助詞,ι€£θͺž,*,*,*,に対し +257 助詞,格助詞,ι€£θͺž,*,*,*,に対して +258 助詞,格助詞,ι€£θͺž,*,*,*,に対しまして +259 助詞,格助詞,ι€£θͺž,*,*,*,に対します +260 助詞,格助詞,ι€£θͺž,*,*,*,に対する +261 助詞,格助詞,ι€£θͺž,*,*,*,γ«ε½“γŸγ£γ¦ +262 助詞,格助詞,ι€£θͺž,*,*,*,γ«ε½“γŸγ‚Š +263 助詞,格助詞,ι€£θͺž,*,*,*,γ«ε½“γŸγ‚ŠγΎγ—γ¦ +264 助詞,格助詞,ι€£θͺž,*,*,*,γ«ε½“γŸγ‚ŠγΎγ™ +265 助詞,格助詞,ι€£θͺž,*,*,*,γ«ε½“γŸγ‚‹ +266 助詞,格助詞,ι€£θͺž,*,*,*,をめぐって +267 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’γ‚γγ‚ŠγΎγ—γ¦ +268 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’γ‚γγ‚ŠγΎγ™ +269 助詞,格助詞,ι€£θͺž,*,*,*,をめぐる +270 助詞,格助詞,ι€£θͺž,*,*,*,をもけまして +271 助詞,格助詞,ι€£θͺž,*,*,*,をもって +272 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’δ»₯て +273 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’ι€šγ—γ¦ +274 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’ι€šγ—γΎγ—γ¦ +275 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’ι€šγ˜ +276 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’ι€šγ˜γ¦ +277 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’ι€šγ˜γΎγ—γ¦ +278 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,こそ +279 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,γ•γˆ +280 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,しか +281 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,すら +282 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,ぞ +283 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,っきゃ +284 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,は +285 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,γ‚‚ +286 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,γ‚„ +287 助詞,η΅‚εŠ©θ©ž,*,*,*,*,かぁ +288 助詞,η΅‚εŠ©θ©ž,*,*,*,*,かい +289 助詞,η΅‚εŠ©θ©ž,*,*,*,*,かしら +290 助詞,η΅‚εŠ©θ©ž,*,*,*,*,け +291 助詞,η΅‚εŠ©θ©ž,*,*,*,*,さ +292 助詞,η΅‚εŠ©θ©ž,*,*,*,*,ぜ +293 助詞,η΅‚εŠ©θ©ž,*,*,*,*,ぞ +294 助詞,η΅‚εŠ©θ©ž,*,*,*,*,だって +295 助詞,η΅‚εŠ©θ©ž,*,*,*,*,っけ +296 助詞,η΅‚εŠ©θ©ž,*,*,*,*,てん +297 助詞,η΅‚εŠ©θ©ž,*,*,*,*,で +298 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γͺ +299 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γͺγƒΌ +300 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γͺぁー +301 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γͺγ‚‘ +302 助詞,η΅‚εŠ©θ©ž,*,*,*,*,ね +303 助詞,η΅‚εŠ©θ©ž,*,*,*,*,ねー +304 助詞,η΅‚εŠ©θ©ž,*,*,*,*,ねん +305 助詞,η΅‚εŠ©θ©ž,*,*,*,*,だ+306 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γγ† +307 助詞,η΅‚εŠ©θ©ž,*,*,*,*,べ +308 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γ‚‚γ‚“ +309 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γ‚„ +310 助詞,η΅‚εŠ©θ©ž,*,*,*,*,やら +311 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γ‚ˆ +312 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γ‚ˆγƒΌ +313 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γ‚ˆγ† +314 助詞,η΅‚εŠ©θ©ž,*,*,*,*,わ +315 助詞,η΅‚εŠ©θ©ž,*,*,*,*,わい +316 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γ‚“ +317 助詞,η΅‚εŠ©θ©ž,*,*,*,*,ヨー +318 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γƒ― +319 助詞,ζŽ₯碚助詞,*,*,*,*,γŠγ‚ˆγ³ +320 助詞,ζŽ₯碚助詞,*,*,*,*,から +321 助詞,ζŽ₯碚助詞,*,*,*,*,からには +322 助詞,ζŽ₯碚助詞,*,*,*,*,が +323 助詞,ζŽ₯碚助詞,*,*,*,*,けども +324 助詞,ζŽ₯碚助詞,*,*,*,*,γ‘γ‚Œγ© +325 助詞,ζŽ₯碚助詞,*,*,*,*,γ‘γ‚Œγ©γ‚‚ +326 助詞,ζŽ₯碚助詞,*,*,*,*,さかい +327 助詞,ζŽ₯碚助詞,*,*,*,*,し +328 助詞,ζŽ₯碚助詞,*,*,*,*,たって +329 助詞,ζŽ₯碚助詞,*,*,*,*,぀぀ +330 助詞,ζŽ₯碚助詞,*,*,*,*,て +331 助詞,ζŽ₯碚助詞,*,*,*,*,で +332 助詞,ζŽ₯碚助詞,*,*,*,*,と +333 助詞,ζŽ₯碚助詞,*,*,*,*,とも +334 助詞,ζŽ₯碚助詞,*,*,*,*,ど +335 助詞,ζŽ₯碚助詞,*,*,*,*,どころか +336 助詞,ζŽ₯碚助詞,*,*,*,*,ども +337 助詞,ζŽ₯碚助詞,*,*,*,*,γͺγŒγ‚‰ +338 助詞,ζŽ₯碚助詞,*,*,*,*,γͺγ‚Š +339 助詞,ζŽ₯碚助詞,*,*,*,*,γγ§ +340 助詞,ζŽ₯碚助詞,*,*,*,*,γγ« +341 助詞,ζŽ₯碚助詞,*,*,*,*,ば +342 助詞,ζŽ₯碚助詞,*,*,*,*,γ‚‚γγ +343 助詞,ζŽ₯碚助詞,*,*,*,*,γ‚„ +344 助詞,ζŽ₯碚助詞,*,*,*,*,やいγͺγ‚„ +345 助詞,ζŽ₯碚助詞,*,*,*,*,や否や +346 助詞,ζŽ₯碚助詞,*,*,*,*,んで +347 助詞,η‰ΉζŠ,*,*,*,*,かγͺ +348 助詞,η‰ΉζŠ,*,*,*,*,けむ +349 助詞,η‰ΉζŠ,*,*,*,*,に +350 助詞,η‰ΉζŠ,*,*,*,*,にゃ +351 助詞,η‰ΉζŠ,*,*,*,*,γ‚“ +352 助詞,ε‰―θ©žεŒ–,*,*,*,*,と +353 助詞,ε‰―θ©žεŒ–,*,*,*,*,に +354 助詞,ε‰―εŠ©θ©ž,*,*,*,*,かも +355 助詞,ε‰―εŠ©θ©ž,*,*,*,*,くらい +356 助詞,ε‰―εŠ©θ©ž,*,*,*,*,ぐらい +357 助詞,ε‰―εŠ©θ©ž,*,*,*,*,しも +358 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γ˜γ‚ƒ +359 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γ˜γ‚ƒγ‚ +360 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γ˜γ‚ƒγ‚‘ +361 助詞,ε‰―εŠ©θ©ž,*,*,*,*,ず぀ +362 助詞,ε‰―εŠ©θ©ž,*,*,*,*,だけ +363 助詞,ε‰―εŠ©θ©ž,*,*,*,*,だって +364 助詞,ε‰―εŠ©θ©ž,*,*,*,*,だに +365 助詞,ε‰―εŠ©θ©ž,*,*,*,*,でも +366 助詞,ε‰―εŠ©θ©ž,*,*,*,*,とも +367 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γͺぞ +368 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γͺど +369 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γͺγ‚Š +370 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γͺんか +371 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γͺγ‚“γž +372 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γͺんて +373 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γγΏ +374 助詞,ε‰―εŠ©θ©ž,*,*,*,*,ばかし +375 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γ°γ‹γ‚Š +376 助詞,ε‰―εŠ©θ©ž,*,*,*,*,ばっか +377 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γ°γ£γ‹γ‚Š +378 助詞,ε‰―εŠ©θ©ž,*,*,*,*,ほど +379 助詞,ε‰―εŠ©θ©ž,*,*,*,*,まで +380 助詞,ε‰―εŠ©θ©ž,*,*,*,*,やら +381 助詞,ε‰―εŠ©θ©ž,*,*,*,*,程 +382 助詞,ε‰―εŠ©θ©ž,*,*,*,*,θΏ„ +383 助詞,ε‰―εŠ©θ©žοΌδΈ¦η«‹εŠ©θ©žοΌη΅‚εŠ©θ©ž,*,*,*,*,か +384 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,γŸγ‚Š +385 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,だだ+386 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,γ γ‚Š +387 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,と +388 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,とか +389 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,γͺγ‚Š +390 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,γ‚„ +391 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,やら +392 助詞,ι€£δ½“εŒ–,*,*,*,*,だ+393 助詞,ι€£δ½“εŒ–,*,*,*,*,γƒŽ +394 εŠ©ε‹•θ©ž,*,*,*,δΈ‹δΊŒγƒ»γ‚Ώθ‘Œ,δ»εšε½’,぀ +395 εŠ©ε‹•θ©ž,*,*,*,δΈ‹δΊŒγƒ»γ‚Ώθ‘Œ,基本归,぀ +396 εŠ©ε‹•θ©ž,*,*,*,δΈ‹δΊŒγƒ»γ‚Ώθ‘Œ,体言ζŽ₯碚,぀ +397 εŠ©ε‹•θ©ž,*,*,*,δΈ‹δΊŒγƒ»γ‚Ώθ‘Œ,ζœͺη„Άε½’,぀ +398 εŠ©ε‹•θ©ž,*,*,*,δΈ‹δΊŒγƒ»γ‚Ώθ‘Œ,命什yo,぀ +399 εŠ©ε‹•θ©ž,*,*,*,δΈ‹δΊŒγƒ»γ‚Ώθ‘Œ,連用归,぀ +400 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ガルζŽ₯碚,らしい +401 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ガルζŽ₯碚,焑い +402 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšε½’,らしい +403 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšε½’,焑い +404 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ‘,らしい +405 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ‘,焑い +406 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ’,らしい +407 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ’,焑い +408 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,基本归,らしい +409 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,基本归,焑い +410 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,体言ζŽ₯碚,らしい +411 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,体言ζŽ₯碚,焑い +412 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,らしい +413 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,焑い +414 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺ焢ウζŽ₯碚,らしい +415 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺ焢ウζŽ₯碚,焑い +416 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺη„ΆγƒŒζŽ₯碚,らしい +417 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺη„ΆγƒŒζŽ₯碚,焑い +418 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,命什e,らしい +419 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,命什e,焑い +420 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用ゴアむζŽ₯碚,らしい +421 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用ゴアむζŽ₯碚,焑い +422 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用タζŽ₯碚,らしい +423 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用タζŽ₯碚,焑い +424 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用テζŽ₯碚,らしい +425 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用テζŽ₯碚,焑い +426 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,δ»εšε½’,ある +427 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,δ»εšηΈη΄„οΌ‘,ある +428 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,基本归,ある +429 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,体言ζŽ₯ηΆšη‰ΉζŠ,ある +430 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,ζœͺ焢ウζŽ₯碚,ある +431 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,ζœͺη„Άε½’,ある +432 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,命什e,ある +433 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,連用タζŽ₯碚,ある +434 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,連用归,ある +435 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšε½’,ござる +436 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšε½’,εΎ‘εΊ§γ‚‹ +437 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšηΈη΄„οΌ‘,ござる +438 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšηΈη΄„οΌ‘,εΎ‘εΊ§γ‚‹ +439 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,基本归,ござる +440 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,基本归,εΎ‘εΊ§γ‚‹ +441 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺ焢ウζŽ₯碚,ござる +442 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺ焢ウζŽ₯碚,εΎ‘εΊ§γ‚‹ +443 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άε½’,ござる +444 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άε½’,εΎ‘εΊ§γ‚‹ +445 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άη‰ΉζŠ,ござる +446 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άη‰ΉζŠ,εΎ‘εΊ§γ‚‹ +447 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什e,ござる +448 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什e,εΎ‘εΊ§γ‚‹ +449 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什i,ござる +450 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什i,εΎ‘εΊ§γ‚‹ +451 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用タζŽ₯碚,ござる +452 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用タζŽ₯碚,εΎ‘εΊ§γ‚‹ +453 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用归,ござる +454 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用归,εΎ‘εΊ§γ‚‹ +455 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Έγƒ£,基本归,γ˜γ‚ƒ +456 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Έγƒ£,ζœͺη„Άε½’,γ˜γ‚ƒ +457 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Έγƒ£,連用归,γ˜γ‚ƒ +458 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώ,δ»εšε½’,た +459 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώ,δ»εšε½’,だ +460 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώ,基本归,た +461 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώ,基本归,だ +462 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώ,ζœͺη„Άε½’,た +463 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώ,ζœͺη„Άε½’,だ +464 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,ガルζŽ₯碚,γŸγ„ +465 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,音便基本归,γŸγ„ +466 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,δ»εšε½’,γŸγ„ +467 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,δ»εšηΈη΄„οΌ‘,γŸγ„ +468 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,δ»εšηΈη΄„οΌ’,γŸγ„ +469 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,基本归,γŸγ„ +470 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,体言ζŽ₯碚,γŸγ„ +471 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,ζ–‡θͺžεŸΊζœ¬ε½’,γŸγ„ +472 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,ζœͺ焢ウζŽ₯碚,γŸγ„ +473 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,ζœͺη„ΆγƒŒζŽ₯碚,γŸγ„ +474 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,連用ゴアむζŽ₯碚,γŸγ„ +475 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,連用タζŽ₯碚,γŸγ„ +476 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,連用テζŽ₯碚,γŸγ„ +477 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,δ»εšε½’,だ +478 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,基本归,だ +479 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,体言ζŽ₯碚,だ +480 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,ζœͺη„Άε½’,だ +481 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,命什e,だ +482 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,連用タζŽ₯碚,だ +483 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,連用归,だ +484 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,基本归,っす +485 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,基本归,です +486 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,基本归,どす +487 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,ζœͺη„Άε½’,っす +488 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,ζœͺη„Άε½’,です +489 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,ζœͺη„Άε½’,どす +490 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,連用归,っす +491 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,連用归,です +492 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,連用归,どす +493 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,ガルζŽ₯碚,焑い +494 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,音便基本归,焑い +495 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,δ»εšε½’,焑い +496 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,δ»εšηΈη΄„οΌ‘,焑い +497 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,δ»εšηΈη΄„οΌ’,焑い +498 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,基本归,焑い +499 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,体言ζŽ₯碚,焑い +500 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,ζ–‡θͺžεŸΊζœ¬ε½’,焑い +501 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,ζœͺ焢ウζŽ₯碚,焑い +502 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,ζœͺη„ΆγƒŒζŽ₯碚,焑い +503 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,命什e,焑い +504 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,連用ゴアむζŽ₯碚,焑い +505 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,連用タζŽ₯碚,焑い +506 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,連用テζŽ₯碚,焑い +507 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,連用デζŽ₯碚,焑い +508 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŒ,δ»εšε½’,ぬ +509 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŒ,基本归,ぬ +510 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŒ,体言ζŽ₯碚,ぬ +511 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŒ,ζ–‡θͺžεŸΊζœ¬ε½’,ぬ +512 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŒ,連用ニζŽ₯碚,ぬ +513 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŒ,連用归,ぬ +514 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,δ»εšε½’,ます +515 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,δ»εšε½’,やす +516 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,基本归,ます +517 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,基本归,やす +518 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,ζœͺ焢ウζŽ₯碚,ます +519 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,ζœͺ焢ウζŽ₯碚,やす +520 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,ζœͺη„Άε½’,ます +521 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,ζœͺη„Άε½’,やす +522 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,命什e,ます +523 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,命什e,やす +524 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,命什i,ます +525 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,命什i,やす +526 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,連用归,ます +527 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,連用归,やす +528 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,基本归,γ‚„ +529 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,ζœͺη„Άε½’,γ‚„ +530 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,連用归,γ‚„ +531 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,う +532 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,じ +533 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,γ˜γ‚ƒγ‚“ +534 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,γ˜γ‚ƒγƒ³ +535 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,ぬ +536 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,ひん +537 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,へん +538 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,まい +539 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,γ‚„γ‚“ +540 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,γ‚“ +541 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚­,基本归,き +542 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚­,体言ζŽ₯碚,き +543 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚­,命什e,き +544 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚±γƒͺ,基本归,γ‘γ‚Š +545 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚±γƒͺ,体言ζŽ₯碚,γ‘γ‚Š +546 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚΄γƒˆγ‚·,基本归,ごとし +547 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚΄γƒˆγ‚·,基本归,如し +548 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚΄γƒˆγ‚·,体言ζŽ₯碚,ごとし +549 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚΄γƒˆγ‚·,体言ζŽ₯碚,如し +550 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚΄γƒˆγ‚·,連用归,ごとし +551 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚΄γƒˆγ‚·,連用归,如し +552 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,δ»εšε½’,γŸγ‚Š +553 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,δ»εšε½’,γͺγ‚Š +554 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,基本归,γŸγ‚Š +555 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,基本归,γͺγ‚Š +556 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,体言ζŽ₯碚,γŸγ‚Š +557 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,体言ζŽ₯碚,γͺγ‚Š +558 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,ζœͺη„Άε½’,γŸγ‚Š +559 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,ζœͺη„Άε½’,γͺγ‚Š +560 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,命什e,γŸγ‚Š +561 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,命什e,γͺγ‚Š +562 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ™γ‚·,δ»εšε½’,べし +563 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ™γ‚·,基本归,べし +564 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ™γ‚·,体言ζŽ₯碚,べし +565 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ™γ‚·,ζœͺη„Άε½’,べし +566 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ™γ‚·,連用归,べし +567 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒžγ‚Έ,δ»εšε½’,まじ +568 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒžγ‚Έ,基本归,まじ +569 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒžγ‚Έ,体言ζŽ₯碚,まじ +570 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒžγ‚Έ,連用归,まじ +571 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒͺ,基本归,γ‚Š +572 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒͺ,体言ζŽ₯碚,γ‚Š +573 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,δ»εšε½’,γ‚‹ +574 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,基本归,γ‚‹ +575 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,体言ζŽ₯碚,γ‚‹ +576 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,ζœͺη„Άε½’,γ‚‹ +577 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,命什e,γ‚‹ +578 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,命什yo,γ‚‹ +579 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,連用归,γ‚‹ +580 ζŽ₯碚詞,*,*,*,*,*,* +581 ζŽ₯碚詞,*,*,*,*,*,γŠγ‚ˆγ³ +582 ζŽ₯頭詞,ε½’εΉθ©žζŽ₯碚,*,*,*,*,* +583 ζŽ₯頭詞,ζ•°ζŽ₯碚,*,*,*,*,* +584 ζŽ₯頭詞,ε‹•θ©žζŽ₯碚,*,*,*,*,* +585 ζŽ₯頭詞,名詞ζŽ₯碚,*,*,*,*,* +586 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,δ»εšε½’,* +587 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,δ»εšηΈη΄„οΌ‘,* +588 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,基本归,* +589 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,体言ζŽ₯ηΆšη‰ΉζŠ,* +590 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,* +591 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,ζœͺ焢ウζŽ₯碚,* +592 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,ζœͺη„Άε½’,* +593 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,命什i,* +594 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,命什yo,* +595 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,連用归,* +596 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,δ»εšε½’,* +597 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,δ»εšηΈη΄„οΌ‘,* +598 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,基本归,* +599 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,体言ζŽ₯ηΆšη‰ΉζŠ,* +600 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,* +601 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,ζœͺ焢ウζŽ₯碚,* +602 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,ζœͺη„Άε½’,* +603 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,命什i,* +604 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,命什yo,* +605 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,連用归,* +606 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,δ»εšε½’,* +607 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,δ»εšηΈη΄„οΌ‘,* +608 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,基本归,* +609 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,ζ–‡θͺžεŸΊζœ¬ε½’,* +610 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,ζœͺ焢ウζŽ₯碚,* +611 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,ζœͺ焢レルζŽ₯碚,* +612 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,ζœͺη„Άε½’,* +613 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,命什ro,* +614 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,命什yo,* +615 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,δ»εšε½’,* +616 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,δ»εšηΈη΄„οΌ‘,* +617 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,基本归,* +618 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,ζ–‡θͺžεŸΊζœ¬ε½’,* +619 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,ζœͺ焢ウζŽ₯碚,* +620 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,ζœͺη„Άε½’,* +621 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,命什yo,* +622 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,δ»εšε½’,する +623 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,δ»εšηΈη΄„οΌ‘,する +624 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,基本归,する +625 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,体言ζŽ₯ηΆšη‰ΉζŠ,する +626 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,する +627 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,ζ–‡θͺžεŸΊζœ¬ε½’,する +628 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,ζœͺ焢ウζŽ₯碚,する +629 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,ζœͺη„ΆγƒŒζŽ₯碚,する +630 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,ζœͺ焢レルζŽ₯碚,する +631 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,ζœͺη„Άε½’,する +632 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,命什i,する +633 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,命什ro,する +634 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,命什yo,する +635 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,連用归,する +636 ε‹•θ©ž,θ‡ͺη«‹,*,*,ラ倉,δ»εšε½’,* +637 ε‹•θ©ž,θ‡ͺη«‹,*,*,ラ倉,基本归,* +638 ε‹•θ©ž,θ‡ͺη«‹,*,*,ラ倉,体言ζŽ₯碚,* +639 ε‹•θ©ž,θ‡ͺη«‹,*,*,ラ倉,ζœͺη„Άε½’,* +640 ε‹•θ©ž,θ‡ͺη«‹,*,*,ラ倉,命什e,* +641 ε‹•θ©ž,θ‡ͺη«‹,*,*,ラ倉,連用归,* +642 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,*,* +643 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,δ»εšε½’,* +644 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,δ»εšηΈη΄„οΌ‘,* +645 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,基本归,* +646 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,基本归-δΏƒιŸ³δΎΏ,* +647 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,体言ζŽ₯ηΆšη‰ΉζŠ,* +648 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,ζœͺ焢ウζŽ₯碚,* +649 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,ζœͺη„Άε½’,* +650 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,命什ro,* +651 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,命什yo,* +652 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,連用归,* +653 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,δ»εšε½’,* +654 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,δ»εšηΈη΄„οΌ‘,* +655 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,基本归,* +656 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,ζœͺ焢ウζŽ₯碚,* +657 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,ζœͺη„Άε½’,* +658 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,ζœͺη„Άη‰ΉζŠ,* +659 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,命什e,* +660 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,命什ro,* +661 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,命什yo,* +662 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,連用归,* +663 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»εΎ—ル,δ»εšε½’,* +664 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»εΎ—ル,基本归,* +665 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚«θ‘Œ,δ»εšε½’,* +666 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚«θ‘Œ,基本归,* +667 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚«θ‘Œ,体言ζŽ₯碚,* +668 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚«θ‘Œ,ζœͺη„Άε½’,* +669 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚«θ‘Œ,命什yo,* +670 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚«θ‘Œ,連用归,* +671 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚¬θ‘Œ,δ»εšε½’,* +672 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚¬θ‘Œ,基本归,* +673 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚¬θ‘Œ,体言ζŽ₯碚,* +674 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚¬θ‘Œ,ζœͺη„Άε½’,* +675 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚¬θ‘Œ,命什yo,* +676 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚¬θ‘Œ,連用归,* +677 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒ€θ‘Œ,δ»εšε½’,* +678 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒ€θ‘Œ,基本归,* +679 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒ€θ‘Œ,体言ζŽ₯碚,* +680 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒ€θ‘Œ,ζœͺη„Άε½’,* +681 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒ€θ‘Œ,命什yo,* +682 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒ€θ‘Œ,連用归,* +683 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒθ‘Œ,δ»εšε½’,* +684 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒθ‘Œ,基本归,* +685 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒθ‘Œ,体言ζŽ₯碚,* +686 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒθ‘Œ,ζœͺη„Άε½’,* +687 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒθ‘Œ,命什yo,* +688 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒθ‘Œ,連用归,* +689 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒžθ‘Œ,δ»εšε½’,* +690 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒžθ‘Œ,基本归,* +691 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒžθ‘Œ,体言ζŽ₯碚,* +692 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒžθ‘Œ,ζœͺη„Άε½’,* +693 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒžθ‘Œ,命什yo,* +694 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒžθ‘Œ,連用归,* +695 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,δ»εšε½’,* +696 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,基本归,* +697 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,体言ζŽ₯碚,* +698 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,ζœͺ焢ウζŽ₯碚,* +699 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,ζœͺη„Άε½’,* +700 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,命什yo,* +701 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,連用归,* +702 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,*,* +703 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšε½’,* +704 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšε½’,葌く +705 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,* +706 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,葌く +707 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,基本归,* +708 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,基本归,葌く +709 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +710 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,葌く +711 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺη„Άε½’,* +712 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺη„Άε½’,葌く +713 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,命什e,* +714 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,命什e,葌く +715 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用タζŽ₯碚,* +716 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用タζŽ₯碚,葌く +717 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用归,* +718 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用归,葌く +719 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,* +720 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,葌く +721 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,* +722 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,葌く +723 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,基本归,* +724 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,基本归,葌く +725 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +726 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,葌く +727 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,* +728 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,葌く +729 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,命什e,* +730 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,命什e,葌く +731 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,* +732 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,葌く +733 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用归,* +734 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用归,葌く +735 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšε½’,* +736 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšε½’,葌く +737 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšηΈη΄„οΌ‘,* +738 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšηΈη΄„οΌ‘,葌く +739 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,基本归,* +740 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,基本归,葌く +741 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺ焢ウζŽ₯碚,* +742 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺ焢ウζŽ₯碚,葌く +743 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺη„Άε½’,* +744 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺη„Άε½’,葌く +745 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,命什e,* +746 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,命什e,葌く +747 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,連用归,* +748 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,連用归,葌く +749 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,*,* +750 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,δ»εšε½’,* +751 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,δ»εšηΈη΄„οΌ‘,* +752 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,基本归,* +753 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,ζœͺ焢ウζŽ₯碚,* +754 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,ζœͺη„Άε½’,* +755 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,命什e,* +756 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,連用タζŽ₯碚,* +757 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,連用归,* +758 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,*,* +759 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,* +760 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšηΈη΄„οΌ‘,* +761 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,基本归,* +762 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺ焢ウζŽ₯碚,* +763 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,* +764 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,命什e,* +765 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,連用归,* +766 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,*,* +767 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,δ»εšε½’,* +768 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,δ»εšηΈη΄„οΌ‘,* +769 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,基本归,* +770 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,ζœͺ焢ウζŽ₯碚,* +771 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,ζœͺη„Άε½’,* +772 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,命什e,* +773 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,連用タζŽ₯碚,* +774 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,連用归,* +775 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,δ»εšε½’,* +776 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,δ»εšηΈη΄„οΌ‘,* +777 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,基本归,* +778 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,ζœͺ焢ウζŽ₯碚,* +779 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,ζœͺη„Άε½’,* +780 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,命什e,* +781 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,連用タζŽ₯碚,* +782 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,連用归,* +783 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,*,* +784 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,δ»εšε½’,* +785 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,δ»εšηΈη΄„οΌ‘,* +786 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,基本归,* +787 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,ζœͺ焢ウζŽ₯碚,* +788 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,ζœͺη„Άε½’,* +789 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,命什e,* +790 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,連用タζŽ₯碚,* +791 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,連用归,* +792 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,*,* +793 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,δ»εšε½’,* +794 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,δ»εšηΈη΄„οΌ‘,* +795 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,基本归,* +796 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,ζœͺ焢ウζŽ₯碚,* +797 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,ζœͺη„Άε½’,* +798 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,命什e,* +799 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,連用タζŽ₯碚,* +800 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,連用归,* +801 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,*,* +802 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,*,する +803 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,* +804 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,する +805 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,* +806 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,する +807 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,* +808 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,する +809 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,* +810 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,する +811 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,* +812 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,する +813 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,* +814 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,する +815 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,* +816 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,する +817 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,* +818 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,する +819 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,* +820 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,する +821 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,* +822 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,する +823 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,* +824 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,する +825 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšε½’,* +826 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšηΈη΄„οΌ‘,* +827 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,基本归,* +828 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺ焢ウζŽ₯碚,* +829 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άε½’,* +830 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άη‰ΉζŠ,* +831 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什e,* +832 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什i,* +833 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用タζŽ₯碚,* +834 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用归,* +835 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,*,* +836 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,δ»εšε½’,* +837 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,δ»εšε½’,言う +838 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,基本归,* +839 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,基本归,言う +840 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +841 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,言う +842 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,ζœͺη„Άε½’,* +843 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,ζœͺη„Άε½’,言う +844 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,命什e,* +845 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,命什e,言う +846 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,連用タζŽ₯碚,* +847 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,連用タζŽ₯碚,言う +848 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,連用归,* +849 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,連用归,言う +850 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,*,* +851 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,* +852 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,言う +853 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,θ‘Œγ† +854 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,* +855 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,言う +856 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,θ‘Œγ† +857 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +858 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,言う +859 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,θ‘Œγ† +860 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,* +861 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,言う +862 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,θ‘Œγ† +863 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,* +864 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,言う +865 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,θ‘Œγ† +866 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,* +867 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,言う +868 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,θ‘Œγ† +869 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,* +870 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,言う +871 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,θ‘Œγ† +872 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,* +873 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚΅θ‘Œ,基本归,* +874 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,* +875 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚΅θ‘Œ,命什e,* +876 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚΅θ‘Œ,連用归,* +877 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚Ώθ‘Œ,δ»εšε½’,* +878 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚Ώθ‘Œ,基本归,* +879 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚Ώθ‘Œ,ζœͺη„Άε½’,* +880 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚Ώθ‘Œ,命什e,* +881 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚Ώθ‘Œ,連用归,* +882 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,δ»εšε½’,* +883 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,基本归,* +884 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,ζœͺη„Άε½’,* +885 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,命什e,* +886 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,連用归,* +887 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,δ»εšε½’,* +888 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,基本归,* +889 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,ζœͺη„Άε½’,* +890 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,命什e,* +891 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,連用归,* +892 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,δ»εšε½’,* +893 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,基本归,* +894 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,現代基本归,* +895 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,体言ζŽ₯碚,* +896 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,ζœͺη„Άε½’,* +897 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,命什yo,* +898 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,連用归,* +899 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒθ‘Œ,δ»εšε½’,* +900 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒθ‘Œ,基本归,* +901 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒθ‘Œ,体言ζŽ₯碚,* +902 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒθ‘Œ,ζœͺη„Άε½’,* +903 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒθ‘Œ,命什yo,* +904 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒθ‘Œ,連用归,* +905 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,δ»εšε½’,* +906 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,δ»εšηΈη΄„οΌ‘,* +907 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,基本归,* +908 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,基本归-δΏƒιŸ³δΎΏ,* +909 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,体言ζŽ₯ηΆšη‰ΉζŠ,* +910 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,ζœͺ焢ウζŽ₯碚,* +911 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,ζœͺη„Άε½’,* +912 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,命什ro,* +913 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,命什yo,* +914 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,連用归,* +915 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,* +916 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšηΈη΄„οΌ‘,* +917 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,基本归,* +918 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺ焢ウζŽ₯碚,* +919 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,* +920 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,命什e,* +921 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,連用归,* +922 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,* +923 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,* +924 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,* +925 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,* +926 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,* +927 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,* +928 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,* +929 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,* +930 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,* +931 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,* +932 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,* +933 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,δ»εšε½’,ζ₯γ‚‹ +934 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,δ»εšηΈη΄„οΌ‘,ζ₯γ‚‹ +935 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,基本归,ζ₯γ‚‹ +936 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,体言ζŽ₯ηΆšη‰ΉζŠ,ζ₯γ‚‹ +937 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,ζ₯γ‚‹ +938 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,ζœͺ焢ウζŽ₯碚,ζ₯γ‚‹ +939 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,ζœͺη„Άε½’,ζ₯γ‚‹ +940 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,命什i,ζ₯γ‚‹ +941 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,命什yo,ζ₯γ‚‹ +942 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,連用归,ζ₯γ‚‹ +943 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,δ»εšε½’,ζ₯γ‚‹ +944 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,δ»εšηΈη΄„οΌ‘,ζ₯γ‚‹ +945 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,基本归,ζ₯γ‚‹ +946 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,体言ζŽ₯ηΆšη‰ΉζŠ,ζ₯γ‚‹ +947 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,ζ₯γ‚‹ +948 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,ζœͺ焢ウζŽ₯碚,ζ₯γ‚‹ +949 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,ζœͺη„Άε½’,ζ₯γ‚‹ +950 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,命什i,ζ₯γ‚‹ +951 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,命什yo,ζ₯γ‚‹ +952 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,連用归,ζ₯γ‚‹ +953 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,*,* +954 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,δ»εšε½’,* +955 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,δ»εšε½’,γ‚‹ +956 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,δ»εšηΈη΄„οΌ‘,* +957 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,δ»εšηΈη΄„οΌ‘,γ‚‹ +958 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,基本归,* +959 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,基本归,γ‚‹ +960 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,基本归-δΏƒιŸ³δΎΏ,* +961 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,基本归-δΏƒιŸ³δΎΏ,γ‚‹ +962 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,体言ζŽ₯ηΆšη‰ΉζŠ,* +963 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,体言ζŽ₯ηΆšη‰ΉζŠ,γ‚‹ +964 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,ζœͺ焢ウζŽ₯碚,* +965 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,ζœͺ焢ウζŽ₯碚,γ‚‹ +966 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,ζœͺη„Άε½’,* +967 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,命什ro,* +968 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,命什ro,γ‚‹ +969 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,命什yo,* +970 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,命什yo,γ‚‹ +971 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,連用归,* +972 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,δ»εšε½’,γγ‚Œγ‚‹ +973 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,δ»εšηΈη΄„οΌ‘,γγ‚Œγ‚‹ +974 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,基本归,γγ‚Œγ‚‹ +975 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,ζœͺ焢ウζŽ₯碚,γγ‚Œγ‚‹ +976 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,ζœͺη„Άε½’,γγ‚Œγ‚‹ +977 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,ζœͺη„Άη‰ΉζŠ,γγ‚Œγ‚‹ +978 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,命什e,γγ‚Œγ‚‹ +979 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,命什ro,γγ‚Œγ‚‹ +980 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,命什yo,γγ‚Œγ‚‹ +981 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,連用归,γγ‚Œγ‚‹ +982 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»εΎ—ル,δ»εšε½’,* +983 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»εΎ—ル,基本归,* +984 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,*,* +985 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšε½’,* +986 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšε½’,おく +987 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšε½’,碚く +988 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšε½’,抜く +989 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,* +990 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,おく +991 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,碚く +992 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,抜く +993 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,基本归,* +994 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,基本归,おく +995 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,基本归,碚く +996 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,基本归,抜く +997 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +998 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,おく +999 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,碚く +1000 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,抜く +1001 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺη„Άε½’,* +1002 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺη„Άε½’,おく +1003 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺη„Άε½’,碚く +1004 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺη„Άε½’,抜く +1005 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,命什e,* +1006 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,命什e,おく +1007 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,命什e,碚く +1008 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,命什e,抜く +1009 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用タζŽ₯碚,* +1010 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用タζŽ₯碚,おく +1011 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用タζŽ₯碚,碚く +1012 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用タζŽ₯碚,抜く +1013 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用归,* +1014 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用归,おく +1015 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用归,碚く +1016 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用归,抜く +1017 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,* +1018 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,いく +1019 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,く +1020 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,葌く +1021 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,* +1022 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,いく +1023 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,く +1024 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,葌く +1025 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,基本归,* +1026 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,基本归,いく +1027 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,基本归,く +1028 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,基本归,葌く +1029 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +1030 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,いく +1031 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,く +1032 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,葌く +1033 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,* +1034 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,いく +1035 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,く +1036 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,葌く +1037 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,命什e,* +1038 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,命什e,いく +1039 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,命什e,く +1040 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,命什e,葌く +1041 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,* +1042 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,いく +1043 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,く +1044 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,葌く +1045 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用归,* +1046 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用归,いく +1047 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用归,く +1048 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用归,葌く +1049 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšε½’,ゆく +1050 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšε½’,葌く +1051 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšηΈη΄„οΌ‘,ゆく +1052 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšηΈη΄„οΌ‘,葌く +1053 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,基本归,ゆく +1054 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,基本归,葌く +1055 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺ焢ウζŽ₯碚,ゆく +1056 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺ焢ウζŽ₯碚,葌く +1057 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺη„Άε½’,ゆく +1058 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺη„Άε½’,葌く +1059 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,命什e,ゆく +1060 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,命什e,葌く +1061 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,連用归,ゆく +1062 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,連用归,葌く +1063 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,*,* +1064 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,*,尽くす +1065 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,* +1066 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,出す +1067 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,尽くす +1068 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,直す +1069 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšηΈη΄„οΌ‘,* +1070 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšηΈη΄„οΌ‘,出す +1071 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšηΈη΄„οΌ‘,尽くす +1072 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšηΈη΄„οΌ‘,直す +1073 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,基本归,出す +1074 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,基本归,尽くす +1075 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,基本归,直す +1076 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺ焢ウζŽ₯碚,* +1077 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺ焢ウζŽ₯碚,出す +1078 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺ焢ウζŽ₯碚,尽くす +1079 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺ焢ウζŽ₯碚,直す +1080 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,* +1081 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,出す +1082 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,尽くす +1083 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,直す +1084 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,命什e,* +1085 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,命什e,出す +1086 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,命什e,尽くす +1087 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,命什e,直す +1088 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,連用归,* +1089 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,連用归,出す +1090 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,連用归,尽くす +1091 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,連用归,直す +1092 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,*,* +1093 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,δ»εšε½’,* +1094 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,δ»εšηΈη΄„οΌ‘,* +1095 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,ζœͺ焢ウζŽ₯碚,* +1096 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,ζœͺη„Άε½’,* +1097 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,命什e,* +1098 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,連用タζŽ₯碚,* +1099 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,連用归,* +1100 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,*,* +1101 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,δ»εšε½’,* +1102 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,δ»εšηΈη΄„οΌ‘,* +1103 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,ζœͺ焢ウζŽ₯碚,* +1104 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,ζœͺη„Άε½’,* +1105 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,命什e,* +1106 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,連用タζŽ₯碚,* +1107 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,連用归,* +1108 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,*,* +1109 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,δ»εšε½’,* +1110 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,δ»εšε½’,θΎΌγ‚€ +1111 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,δ»εšηΈη΄„οΌ‘,* +1112 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,δ»εšηΈη΄„οΌ‘,θΎΌγ‚€ +1113 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,基本归,θΎΌγ‚€ +1114 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,ζœͺ焢ウζŽ₯碚,* +1115 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,ζœͺ焢ウζŽ₯碚,θΎΌγ‚€ +1116 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,ζœͺη„Άε½’,* +1117 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,ζœͺη„Άε½’,θΎΌγ‚€ +1118 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,命什e,* +1119 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,命什e,θΎΌγ‚€ +1120 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,連用タζŽ₯碚,* +1121 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,連用タζŽ₯碚,θΎΌγ‚€ +1122 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,連用归,* +1123 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,連用归,θΎΌγ‚€ +1124 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,*,* +1125 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,*,εˆ‡γ‚‹ +1126 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,* +1127 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,ある +1128 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,γŠγ‚‹ +1129 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,かかる +1130 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,きる +1131 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,γͺγ‚‹ +1132 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,まいる +1133 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,まわる +1134 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,γ‚„γ‚‹ +1135 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,ε›žγ‚‹ +1136 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,参る +1137 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,硂わる +1138 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,εˆ‡γ‚‹ +1139 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,* +1140 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,ある +1141 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,γŠγ‚‹ +1142 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,かかる +1143 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,きる +1144 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,γͺγ‚‹ +1145 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,まいる +1146 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,まわる +1147 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,γ‚„γ‚‹ +1148 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,ε›žγ‚‹ +1149 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,参る +1150 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,硂わる +1151 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,εˆ‡γ‚‹ +1152 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,* +1153 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,ある +1154 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,γŠγ‚‹ +1155 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,かかる +1156 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,きる +1157 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,γͺγ‚‹ +1158 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,まいる +1159 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,まわる +1160 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,γ‚„γ‚‹ +1161 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,ε›žγ‚‹ +1162 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,参る +1163 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,硂わる +1164 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,εˆ‡γ‚‹ +1165 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,* +1166 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,ある +1167 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,γŠγ‚‹ +1168 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,かかる +1169 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,きる +1170 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,γͺγ‚‹ +1171 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,まいる +1172 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,まわる +1173 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,γ‚„γ‚‹ +1174 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,ε›žγ‚‹ +1175 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,参る +1176 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,硂わる +1177 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,εˆ‡γ‚‹ +1178 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,* +1179 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,ある +1180 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,γŠγ‚‹ +1181 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,かかる +1182 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,きる +1183 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,γͺγ‚‹ +1184 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,まいる +1185 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,まわる +1186 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,γ‚„γ‚‹ +1187 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,ε›žγ‚‹ +1188 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,参る +1189 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,硂わる +1190 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,εˆ‡γ‚‹ +1191 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,* +1192 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,ある +1193 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,γŠγ‚‹ +1194 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,かかる +1195 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,きる +1196 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,γͺγ‚‹ +1197 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,まいる +1198 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,まわる +1199 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,γ‚„γ‚‹ +1200 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,ε›žγ‚‹ +1201 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,参る +1202 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,硂わる +1203 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,εˆ‡γ‚‹ +1204 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,* +1205 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,ある +1206 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,γŠγ‚‹ +1207 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,かかる +1208 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,きる +1209 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,γͺγ‚‹ +1210 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,まいる +1211 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,まわる +1212 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,γ‚„γ‚‹ +1213 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,ε›žγ‚‹ +1214 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,参る +1215 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,硂わる +1216 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,εˆ‡γ‚‹ +1217 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,* +1218 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,ある +1219 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,γŠγ‚‹ +1220 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,かかる +1221 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,きる +1222 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,γͺγ‚‹ +1223 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,まいる +1224 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,まわる +1225 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,γ‚„γ‚‹ +1226 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,ε›žγ‚‹ +1227 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,参る +1228 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,硂わる +1229 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,εˆ‡γ‚‹ +1230 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,* +1231 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,ある +1232 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,γŠγ‚‹ +1233 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,かかる +1234 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,きる +1235 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,γͺγ‚‹ +1236 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,まいる +1237 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,まわる +1238 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,γ‚„γ‚‹ +1239 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,ε›žγ‚‹ +1240 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,参る +1241 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,硂わる +1242 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,εˆ‡γ‚‹ +1243 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,* +1244 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,ある +1245 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,γŠγ‚‹ +1246 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,かかる +1247 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,きる +1248 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,γͺγ‚‹ +1249 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,まいる +1250 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,まわる +1251 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,γ‚„γ‚‹ +1252 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,ε›žγ‚‹ +1253 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,参る +1254 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,硂わる +1255 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,εˆ‡γ‚‹ +1256 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,* +1257 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,ある +1258 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,γŠγ‚‹ +1259 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,かかる +1260 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,きる +1261 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,γͺγ‚‹ +1262 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,まいる +1263 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,まわる +1264 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,γ‚„γ‚‹ +1265 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,ε›žγ‚‹ +1266 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,参る +1267 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,硂わる +1268 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,εˆ‡γ‚‹ +1269 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšε½’,γͺさる +1270 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšε½’,らっしゃる +1271 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšε½’,下さる +1272 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšηΈη΄„οΌ‘,γͺさる +1273 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšηΈη΄„οΌ‘,らっしゃる +1274 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšηΈη΄„οΌ‘,下さる +1275 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,基本归,γͺさる +1276 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,基本归,らっしゃる +1277 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,基本归,下さる +1278 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺ焢ウζŽ₯碚,γͺさる +1279 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺ焢ウζŽ₯碚,らっしゃる +1280 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺ焢ウζŽ₯碚,下さる +1281 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άε½’,γͺさる +1282 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άε½’,らっしゃる +1283 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άε½’,下さる +1284 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άη‰ΉζŠ,γͺさる +1285 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άη‰ΉζŠ,らっしゃる +1286 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άη‰ΉζŠ,下さる +1287 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什e,γͺさる +1288 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什e,らっしゃる +1289 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什e,下さる +1290 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什i,γͺさる +1291 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什i,らっしゃる +1292 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什i,下さる +1293 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用タζŽ₯碚,γͺさる +1294 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用タζŽ₯碚,らっしゃる +1295 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用タζŽ₯碚,下さる +1296 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用归,γͺさる +1297 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用归,らっしゃる +1298 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用归,下さる +1299 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,*,* +1300 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,δ»εšε½’,* +1301 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +1302 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,ζœͺη„Άε½’,* +1303 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,命什e,* +1304 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,連用タζŽ₯碚,* +1305 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,連用归,* +1306 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,*,* +1307 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,* +1308 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,しまう +1309 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,もらう +1310 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,εˆγ† +1311 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,* +1312 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,しまう +1313 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,もらう +1314 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,εˆγ† +1315 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +1316 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,しまう +1317 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,もらう +1318 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,εˆγ† +1319 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,* +1320 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,しまう +1321 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,もらう +1322 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,εˆγ† +1323 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,* +1324 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,しまう +1325 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,もらう +1326 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,εˆγ† +1327 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,* +1328 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,しまう +1329 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,もらう +1330 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,εˆγ† +1331 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,* +1332 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,しまう +1333 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,もらう +1334 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,εˆγ† +1335 ε‹•θ©ž,非θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,δ»εšε½’,* +1336 ε‹•θ©ž,非θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,基本归,* +1337 ε‹•θ©ž,非θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,ζœͺη„Άε½’,* +1338 ε‹•θ©ž,非θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,命什e,* +1339 ε‹•θ©ž,非θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,連用归,* +1340 ε‰―θ©ž,*,*,*,*,*,* +1341 ε‰―θ©ž,δΈ€θˆ¬,*,*,*,*,* +1342 ε‰―θ©ž,助詞鑞ζŽ₯碚,*,*,*,*,* +1343 名詞,ァ倉ζŽ₯碚,*,*,*,*,* +1344 名詞,γƒŠγ‚€ε½’εΉθ©žθͺžεΉΉ,*,*,*,*,* +1345 名詞,δΈ€θˆ¬,*,*,*,*,* +1346 名詞,δΈ€θˆ¬,*,*,*,0,* +1347 名詞,ε½’εΉε‹•θ©žθͺžεΉΉ,*,*,*,*,* +1348 名詞,ε›Ίζœ‰εθ©ž,δΈ€θˆ¬,*,*,*,* +1349 名詞,ε›Ίζœ‰εθ©ž,人名,δΈ€θˆ¬,*,*,* +1350 名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,* +1351 名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,* +1352 名詞,ε›Ίζœ‰εθ©ž,η΅„ηΉ”,*,*,*,* +1353 名詞,ε›Ίζœ‰εθ©ž,地域,δΈ€θˆ¬,*,*,* +1354 名詞,ε›Ίζœ‰εθ©ž,地域,ε›½,*,*,* +1355 名詞,ζ•°,*,*,*,*,* +1356 名詞,ζŽ₯ηΆšθ©žηš„,*,*,*,*,* +1357 名詞,ζŽ₯ε°Ύ,ァ倉ζŽ₯碚,*,*,*,* +1358 名詞,ζŽ₯ε°Ύ,δΈ€θˆ¬,*,*,*,* +1359 名詞,ζŽ₯ε°Ύ,ε½’εΉε‹•θ©žθͺžεΉΉ,*,*,*,* +1360 名詞,ζŽ₯ε°Ύ,εŠ©ζ•°θ©ž,*,*,*,* +1361 名詞,ζŽ₯ε°Ύ,εŠ©ε‹•θ©žθͺžεΉΉ,*,*,*,* +1362 名詞,ζŽ₯ε°Ύ,人名,*,*,*,* +1363 名詞,ζŽ₯ε°Ύ,地域,*,*,*,* +1364 名詞,ζŽ₯ε°Ύ,η‰ΉζŠ,*,*,*,* +1365 名詞,ζŽ₯ε°Ύ,ε‰―θ©žε―θƒ½,*,*,*,* +1366 名詞,代名詞,δΈ€θˆ¬,*,*,*,* +1367 名詞,代名詞,ηΈη΄„,*,*,*,* +1368 名詞,ε‹•θ©žιžθ‡ͺη«‹ηš„,*,*,*,*,* +1369 名詞,η‰ΉζŠ,εŠ©ε‹•θ©žθͺžεΉΉ,*,*,*,* +1370 名詞,非θ‡ͺη«‹,*,*,*,*,* +1371 名詞,非θ‡ͺη«‹,δΈ€θˆ¬,*,*,*,* +1372 名詞,非θ‡ͺη«‹,ε½’εΉε‹•θ©žθͺžεΉΉ,*,*,*,* +1373 名詞,非θ‡ͺη«‹,εŠ©ε‹•θ©žθͺžεΉΉ,*,*,*,* +1374 名詞,非θ‡ͺη«‹,ε‰―θ©žε―θƒ½,*,*,*,* +1375 名詞,ε‰―θ©žε―θƒ½,*,*,*,*,* +1376 ι€£δ½“θ©ž,*,*,*,*,*,* diff --git a/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/matrix.bin b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/matrix.bin new file mode 100644 index 0000000000000000000000000000000000000000..bf7a464c5c6768b506986b1f01f8c82bb707b2b8 --- /dev/null +++ b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/matrix.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62fd16b4f64c851d5dc352ef0d5740c5fc83ddc7c203b2b0b1fc5271969a14ce +size 3792262 diff --git a/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/pos-id.def b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/pos-id.def new file mode 100644 index 0000000000000000000000000000000000000000..ffd74a8e933dc71a0464c8123c5b887d3c425a7d --- /dev/null +++ b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/pos-id.def @@ -0,0 +1,69 @@ +そγδ»–,ι–“ζŠ•,*,* 0 +フィラー,*,*,* 1 +ζ„Ÿε‹•θ©ž,*,*,* 2 +記号,γ‚’γƒ«γƒ•γ‚‘γƒ™γƒƒγƒˆ,*,* 3 +記号,δΈ€θˆ¬,*,* 4 +記号,括弧開,*,* 5 +記号,括弧閉,*,* 6 +記号,ε₯η‚Ή,*,* 7 +記号,η©Ίη™½,*,* 8 +記号,θͺ­η‚Ή,*,* 9 +ε½’εΉθ©ž,θ‡ͺη«‹,*,* 10 +ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,* 11 +ε½’εΉθ©ž,非θ‡ͺη«‹,*,* 12 +助詞,格助詞,δΈ€θˆ¬,* 13 +助詞,格助詞,引用,* 14 +助詞,格助詞,ι€£θͺž,* 15 +助詞,δΏ‚εŠ©θ©ž,*,* 16 +助詞,η΅‚εŠ©θ©ž,*,* 17 +助詞,ζŽ₯碚助詞,*,* 18 +助詞,η‰ΉζŠ,*,* 19 +助詞,ε‰―θ©žεŒ–,*,* 20 +助詞,ε‰―εŠ©θ©ž,*,* 21 +助詞,ε‰―εŠ©θ©žοΌδΈ¦η«‹εŠ©θ©žοΌη΅‚εŠ©θ©ž,*,* 22 +助詞,δΈ¦η«‹εŠ©θ©ž,*,* 23 +助詞,ι€£δ½“εŒ–,*,* 24 +εŠ©ε‹•θ©ž,*,*,* 25 +ζŽ₯碚詞,*,*,* 26 +ζŽ₯頭詞,ε½’εΉθ©žζŽ₯碚,*,* 27 +ζŽ₯頭詞,ζ•°ζŽ₯碚,*,* 28 +ζŽ₯頭詞,ε‹•θ©žζŽ₯碚,*,* 29 +ζŽ₯頭詞,名詞ζŽ₯碚,*,* 30 +ε‹•θ©ž,θ‡ͺη«‹,*,* 31 +ε‹•θ©ž,ζŽ₯ε°Ύ,*,* 32 +ε‹•θ©ž,非θ‡ͺη«‹,*,* 33 +ε‰―θ©ž,δΈ€θˆ¬,*,* 34 +ε‰―θ©ž,助詞鑞ζŽ₯碚,*,* 35 +名詞,ァ倉ζŽ₯碚,*,* 36 +名詞,γƒŠγ‚€ε½’εΉθ©žθͺžεΉΉ,*,* 37 +名詞,δΈ€θˆ¬,*,* 38 +名詞,εΌ•η”¨ζ–‡ε­—εˆ—,*,* 39 +名詞,ε½’εΉε‹•θ©žθͺžεΉΉ,*,* 40 +名詞,ε›Ίζœ‰εθ©ž,δΈ€θˆ¬,* 41 +名詞,ε›Ίζœ‰εθ©ž,人名,δΈ€θˆ¬ 42 +名詞,ε›Ίζœ‰εθ©ž,人名,姓 43 +名詞,ε›Ίζœ‰εθ©ž,人名,名 44 +名詞,ε›Ίζœ‰εθ©ž,η΅„ηΉ”,* 45 +名詞,ε›Ίζœ‰εθ©ž,地域,δΈ€θˆ¬ 46 +名詞,ε›Ίζœ‰εθ©ž,地域,ε›½ 47 +名詞,ζ•°,*,* 48 +名詞,ζŽ₯ηΆšθ©žηš„,*,* 49 +名詞,ζŽ₯ε°Ύ,ァ倉ζŽ₯碚,* 50 +名詞,ζŽ₯ε°Ύ,δΈ€θˆ¬,* 51 +名詞,ζŽ₯ε°Ύ,ε½’εΉε‹•θ©žθͺžεΉΉ,* 52 +名詞,ζŽ₯ε°Ύ,εŠ©ζ•°θ©ž,* 53 +名詞,ζŽ₯ε°Ύ,εŠ©ε‹•θ©žθͺžεΉΉ,* 54 +名詞,ζŽ₯ε°Ύ,人名,* 55 +名詞,ζŽ₯ε°Ύ,地域,* 56 +名詞,ζŽ₯ε°Ύ,η‰ΉζŠ,* 57 +名詞,ζŽ₯ε°Ύ,ε‰―θ©žε―θƒ½,* 58 +名詞,代名詞,δΈ€θˆ¬,* 59 +名詞,代名詞,ηΈη΄„,* 60 +名詞,ε‹•θ©žιžθ‡ͺη«‹ηš„,*,* 61 +名詞,η‰ΉζŠ,εŠ©ε‹•θ©žθͺžεΉΉ,* 62 +名詞,非θ‡ͺη«‹,δΈ€θˆ¬,* 63 +名詞,非θ‡ͺη«‹,ε½’εΉε‹•θ©žθͺžεΉΉ,* 64 +名詞,非θ‡ͺη«‹,εŠ©ε‹•θ©žθͺžεΉΉ,* 65 +名詞,非θ‡ͺη«‹,ε‰―θ©žε―θƒ½,* 66 +名詞,ε‰―θ©žε―θƒ½,*,* 67 +ι€£δ½“θ©ž,*,*,* 68 diff --git a/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/rewrite.def b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/rewrite.def new file mode 100644 index 0000000000000000000000000000000000000000..81727723b1826e5cd765efb1685c27d129b937c4 --- /dev/null +++ b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/rewrite.def @@ -0,0 +1,94 @@ +# +# Feature(POS) to Internal State mapping +# +[unigram rewrite] +# θͺ­γΏ,η™ΊιŸ³γ‚’γ¨γ‚Šγγžγ„て, ε“θ©ž1,2,3,4,活用归,ζ΄»η”¨εž‹,原归,γ‚ˆγΏ を使う +*,*,*,*,*,*,*,* $1,$2,$3,$4,$5,$6,$7,$8 +# θͺ­γΏγŒγͺγ„ε ΄εˆγ―η„‘θ¦– +*,*,*,*,*,*,* $1,$2,$3,$4,$5,$6,$7,* + +[left rewrite] +(助詞|εŠ©ε‹•θ©ž),*,*,*,*,*,(γͺい|焑い) $1,$2,$3,$4,$5,$6,焑い +(助詞|εŠ©ε‹•θ©ž),η΅‚εŠ©θ©ž,*,*,*,*,(γ‚ˆ|ヨ) $1,$2,$3,$4,$5,$6,γ‚ˆ +(助詞|εŠ©ε‹•θ©ž),η΅‚εŠ©θ©ž,*,*,*,*,(γͺ|γͺぁ|γͺあ|γƒŠ) $1,$2,$3,$4,$5,$6,γͺ +(助詞|εŠ©ε‹•θ©ž),η΅‚εŠ©θ©ž,*,*,*,*,(ね|ねぇ|ねえ|ねェ|ねエ|ねっ|ねッ|ネ) $1,$2,$3,$4,$5,$6,ね +(助詞|εŠ©ε‹•θ©ž),ζŽ₯碚助詞,*,*,*,*,(て|けゃ|けゃあ) $1,$2,$3,$4,$5,$6,て +(助詞|εŠ©ε‹•θ©ž),ζŽ₯碚助詞,*,*,*,*,(けゃあ|けゃ) $1,$2,$3,$4,$5,$6,けゃ +(助詞|εŠ©ε‹•θ©ž),ζŽ₯碚助詞,*,*,*,*,(で|γ˜γ‚ƒ) $1,$2,$3,$4,$5,$6,で +(助詞|εŠ©ε‹•θ©ž),ζŽ₯碚助詞,*,*,*,*,(けど|γ‘γ‚Œγ©) $1,$2,$3,$4,$5,$6,γ‘γ‚Œγ© +(助詞|εŠ©ε‹•θ©ž),*,*,*,*,*,* $1,$2,$3,$4,$5,$6,$7 +記号,(ε₯η‚Ή|括弧閉|括弧開),*,*,*,*,* $1,$2,$3,$4,$5,$6,BOS/EOS +BOS/EOS,*,*,*,*,*,* $1,$2,$3,$4,$5,$6,BOS/EOS +ε‹•θ©ž,θ‡ͺη«‹,*,*,*,*,(θ‘Œγ†|葌γͺう) $1,$2,$3,$4,$5,$6,θ‘Œγ† +ε‹•θ©ž,θ‡ͺη«‹,*,*,*,*,(いう|言う|云う) $1,$2,$3,$4,$5,$6,言う +ε‹•θ©ž,θ‡ͺη«‹,*,*,*,*,(いく|葌く) $1,$2,$3,$4,$5,$6,葌く +ε‹•θ©ž,θ‡ͺη«‹,*,*,*,*,する $1,$2,$3,$4,$5,$6,する +ε‹•θ©ž,θ‡ͺη«‹,*,*,*,*,* $1,$2,$3,$4,$5,$6,* +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(ある|γŠγ‚‹|かかる|きる|γͺγ‚‹|まいる|まわる|γ‚„γ‚‹|ε›žγ‚‹|硂わる|εˆ‡γ‚‹|参る|いらっしゃる|らっしゃる|γͺさる|γ‚‹|もらう|しまう|碚く|いく|ゆく|葌く|く|γγ‚Œγ‚‹|おく|する) $1,$2,$3,$4,$5,$6,$7 +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(ζ₯γ‚‹|くる) $1,$2,$3,$4,$5,$6,ζ₯γ‚‹ +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(ぬく|抜く) $1,$2,$3,$4,$5,$6,抜く +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(頂く|γ„γŸγ γ) $1,$2,$3,$4,$5,$6,頂く +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(γ„γŸγ™|致す) $1,$2,$3,$4,$5,$6,致す +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(だす|出す) $1,$2,$3,$4,$5,$6,出す +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(぀くす|尽くす|尽す) $1,$2,$3,$4,$5,$6,尽くす +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(直す|γͺγŠγ™) $1,$2,$3,$4,$5,$6,直す +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(θΎΌγ‚€|こむ) $1,$2,$3,$4,$5,$6,θΎΌγ‚€ +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(くださる|下さる) $1,$2,$3,$4,$5,$6,下さる +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(εˆγ†|あう) $1,$2,$3,$4,$5,$6,εˆγ† +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,* $1,$2,$3,$4,$5,$6,* +ε½’εΉθ©ž,*,*,*,*,*,(γͺい|焑い|いい|らしい) $1,$2,$3,$4,$5,$6,焑い +ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,*,*,(臭い|くさい) $1,$2,$3,$4,$5,$6,臭い +ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,*,*,(欲しい|ほしい) $1,$2,$3,$4,$5,$6,欲しい +ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,*,*,(γ£γŸγ‚‰γ—γ„|γŸγ‚‰γ—γ„|っぽい|ぽい) $1,$2,$3,$4,$5,$6,γŸγ‚‰γ—γ„ +ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,*,*,* $1,$2,$3,$4,$5,$6,* +ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,*,*,(難い|γŒγŸγ„|γ₯らい|にくい|やすい) $1,$2,$3,$4,$5,$6,難い +ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,*,*,(γ‚ˆγ„|良い) $1,$2,$3,$4,$5,$6,良い +ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,*,*,(欲しい|ほしい) $1,$2,$3,$4,$5,$6,欲しい +ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,*,*,(γ˜γΎγ†|γ˜γ‚ƒγ†|でく|どく|でる|どる) $1,$2,$3,$4,$5,$6,でる +ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,*,*,(けまう|けゃう|てく|とく|てる|とる) $1,$2,$3,$4,$5,$6,てる +ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,*,*,* $1,$2,$3,$4,$5,$6,* +ζŽ₯碚詞,*,*,*,*,*,(及び|γŠγ‚ˆγ³|あるいは|ζˆ–γ„γ―|ζˆ–γ―|または|又は|γͺいし|γͺらびに|並びに|もしくは|θ‹₯しくは) $1,$2,$3,$4,$5,$6,γŠγ‚ˆγ³ +*,*,*,*,*,*,* $1,$2,$3,$4,$5,$6,* + +[right rewrite] +(助詞|εŠ©ε‹•θ©ž),*,*,*,*,*,(γͺい|焑い) $1,$2,$3,$4,$5,$6,焑い +(助詞|εŠ©ε‹•θ©ž),η΅‚εŠ©θ©ž,*,*,*,*,(γ‚ˆ|ヨ) $1,$2,$3,$4,$5,$6,γ‚ˆ +(助詞|εŠ©ε‹•θ©ž),η΅‚εŠ©θ©ž,*,*,*,*,(γͺ|γͺぁ|γͺあ|γƒŠ) $1,$2,$3,$4,$5,$6,γͺ +(助詞|εŠ©ε‹•θ©ž),η΅‚εŠ©θ©ž,*,*,*,*,(ね|ねぇ|ねえ|ねェ|ねエ|ねっ|ねッ|ネ) $1,$2,$3,$4,$5,$6,ね +(助詞|εŠ©ε‹•θ©ž),ζŽ₯碚助詞,*,*,*,*,(て|けゃ|けゃあ) $1,$2,$3,$4,$5,$6,て +(助詞|εŠ©ε‹•θ©ž),ζŽ₯碚助詞,*,*,*,*,(けゃあ|けゃ) $1,$2,$3,$4,$5,$6,けゃ +(助詞|εŠ©ε‹•θ©ž),ζŽ₯碚助詞,*,*,*,*,(で|γ˜γ‚ƒ) $1,$2,$3,$4,$5,$6,で +(助詞|εŠ©ε‹•θ©ž),ζŽ₯碚助詞,*,*,*,*,(けど|γ‘γ‚Œγ©) $1,$2,$3,$4,$5,$6,γ‘γ‚Œγ© +(助詞|εŠ©ε‹•θ©ž),*,*,*,*,*,* $1,$2,$3,$4,$5,$6,$7 +記号,(ε₯η‚Ή|括弧閉|括弧開),*,*,*,*,* $1,$2,$3,$4,$5,$6,BOS/EOS +BOS/EOS,*,*,*,*,*,* $1,$2,$3,$4,$5,$6,BOS/EOS +ε‹•θ©ž,θ‡ͺη«‹,*,*,*,*,(θ‘Œγ†|葌γͺう) $1,$2,$3,$4,$5,$6,θ‘Œγ† +ε‹•θ©ž,θ‡ͺη«‹,*,*,*,*,(いう|言う|云う) $1,$2,$3,$4,$5,$6,言う +ε‹•θ©ž,θ‡ͺη«‹,*,*,*,*,(いく|葌く) $1,$2,$3,$4,$5,$6,葌く +ε‹•θ©ž,θ‡ͺη«‹,*,*,*,*,する $1,$2,$3,$4,$5,$6,する +ε‹•θ©ž,θ‡ͺη«‹,*,*,*,*,* $1,$2,$3,$4,$5,$6,* +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(ある|γŠγ‚‹|かかる|きる|γͺγ‚‹|まいる|まわる|γ‚„γ‚‹|ε›žγ‚‹|硂わる|εˆ‡γ‚‹|参る|いらっしゃる|らっしゃる|γͺさる|γ‚‹|もらう|しまう|碚く|いく|ゆく|葌く|く|γγ‚Œγ‚‹|おく|する) $1,$2,$3,$4,$5,$6,$7 +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(ζ₯γ‚‹|くる) $1,$2,$3,$4,$5,$6,ζ₯γ‚‹ +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(ぬく|抜く) $1,$2,$3,$4,$5,$6,抜く +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(頂く|γ„γŸγ γ) $1,$2,$3,$4,$5,$6,頂く +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(γ„γŸγ™|致す) $1,$2,$3,$4,$5,$6,致す +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(だす|出す) $1,$2,$3,$4,$5,$6,出す +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(぀くす|尽くす|尽す) $1,$2,$3,$4,$5,$6,尽くす +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(直す|γͺγŠγ™) $1,$2,$3,$4,$5,$6,直す +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(θΎΌγ‚€|こむ) $1,$2,$3,$4,$5,$6,θΎΌγ‚€ +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(くださる|下さる) $1,$2,$3,$4,$5,$6,下さる +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,(εˆγ†|あう) $1,$2,$3,$4,$5,$6,εˆγ† +ε‹•θ©ž,非θ‡ͺη«‹,*,*,*,*,* $1,$2,$3,$4,$5,$6,* +ε½’εΉθ©ž,*,*,*,*,*,(γͺい|焑い|いい|らしい) $1,$2,$3,$4,$5,$6,焑い +ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,*,*,(臭い|くさい) $1,$2,$3,$4,$5,$6,臭い +ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,*,*,(欲しい|ほしい) $1,$2,$3,$4,$5,$6,欲しい +ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,*,*,(γ£γŸγ‚‰γ—γ„|γŸγ‚‰γ—γ„|っぽい|ぽい) $1,$2,$3,$4,$5,$6,γŸγ‚‰γ—γ„ +ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,*,*,* $1,$2,$3,$4,$5,$6,* +ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,*,*,(難い|γŒγŸγ„|γ₯らい|にくい|やすい) $1,$2,$3,$4,$5,$6,難い +ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,*,*,(γ‚ˆγ„|良い) $1,$2,$3,$4,$5,$6,良い +ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,*,*,(欲しい|ほしい) $1,$2,$3,$4,$5,$6,欲しい +ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,*,*,(γ˜γΎγ†|γ˜γ‚ƒγ†|でく|どく|でる|どる) $1,$2,$3,$4,$5,$6,でる +ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,*,*,(けまう|けゃう|てく|とく|てる|とる) $1,$2,$3,$4,$5,$6,てる +ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,*,*,* $1,$2,$3,$4,$5,$6,* +ζŽ₯碚詞,*,*,*,*,*,(及び|γŠγ‚ˆγ³|あるいは|ζˆ–γ„γ―|ζˆ–γ―|または|又は|γͺいし|γͺらびに|並びに|もしくは|θ‹₯しくは) $1,$2,$3,$4,$5,$6,γŠγ‚ˆγ³ +*,*,*,*,*,*,* $1,$2,$3,$4,$5,$6,* diff --git a/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/right-id.def b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/right-id.def new file mode 100644 index 0000000000000000000000000000000000000000..51c269688b9591d76eab882b6339c83c960b3e5b --- /dev/null +++ b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/right-id.def @@ -0,0 +1,1377 @@ +0 BOS/EOS,*,*,*,*,*,BOS/EOS +1 そγδ»–,ι–“ζŠ•,*,*,*,*,* +2 フィラー,*,*,*,*,*,* +3 ζ„Ÿε‹•θ©ž,*,*,*,*,*,* +4 記号,γ‚’γƒ«γƒ•γ‚‘γƒ™γƒƒγƒˆ,*,*,*,*,* +5 記号,δΈ€θˆ¬,*,*,*,*,* +6 記号,括弧開,*,*,*,*,BOS/EOS +7 記号,括弧閉,*,*,*,*,BOS/EOS +8 記号,ε₯η‚Ή,*,*,*,*,BOS/EOS +9 記号,η©Ίη™½,*,*,*,*,* +10 記号,θͺ­η‚Ή,*,*,*,*,* +11 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,*,* +12 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,* +13 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,焑い +14 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,* +15 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,焑い +16 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,* +17 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,焑い +18 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,* +19 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,焑い +20 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,* +21 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,焑い +22 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,* +23 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,焑い +24 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,* +25 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,焑い +26 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,* +27 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,焑い +28 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,* +29 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,焑い +30 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,* +31 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,焑い +32 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,* +33 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,焑い +34 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,* +35 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,焑い +36 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,* +37 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,焑い +38 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ガルζŽ₯碚,* +39 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšε½’,* +40 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ‘,* +41 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ’,* +42 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,基本归,* +43 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,体言ζŽ₯碚,* +44 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,* +45 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺ焢ウζŽ₯碚,* +46 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺη„ΆγƒŒζŽ₯碚,* +47 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,命什e,* +48 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用ゴアむζŽ₯碚,* +49 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用タζŽ₯碚,* +50 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用テζŽ₯碚,* +51 ε½’εΉθ©ž,θ‡ͺη«‹,*,*,δΈε€‰εŒ–εž‹,基本归,* +52 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,* +53 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,γŸγ‚‰γ—γ„ +54 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,臭い +55 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,* +56 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,γŸγ‚‰γ—γ„ +57 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,臭い +58 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,* +59 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,γŸγ‚‰γ—γ„ +60 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,臭い +61 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,* +62 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,γŸγ‚‰γ—γ„ +63 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,臭い +64 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,* +65 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,γŸγ‚‰γ—γ„ +66 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,臭い +67 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,* +68 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,γŸγ‚‰γ—γ„ +69 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,臭い +70 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,* +71 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,γŸγ‚‰γ—γ„ +72 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,臭い +73 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,* +74 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,γŸγ‚‰γ—γ„ +75 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,臭い +76 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,* +77 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,γŸγ‚‰γ—γ„ +78 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,臭い +79 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,* +80 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,γŸγ‚‰γ—γ„ +81 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,臭い +82 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,* +83 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,γŸγ‚‰γ—γ„ +84 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,臭い +85 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,* +86 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,γŸγ‚‰γ—γ„ +87 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,臭い +88 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,* +89 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,γŸγ‚‰γ—γ„ +90 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,臭い +91 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ガルζŽ₯碚,* +92 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ガルζŽ₯碚,γŸγ‚‰γ—γ„ +93 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšε½’,* +94 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšε½’,γŸγ‚‰γ—γ„ +95 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ‘,* +96 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ‘,γŸγ‚‰γ—γ„ +97 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ’,* +98 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ’,γŸγ‚‰γ—γ„ +99 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,基本归,* +100 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,基本归,γŸγ‚‰γ—γ„ +101 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,体言ζŽ₯碚,* +102 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,体言ζŽ₯碚,γŸγ‚‰γ—γ„ +103 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,* +104 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,γŸγ‚‰γ—γ„ +105 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺ焢ウζŽ₯碚,* +106 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺ焢ウζŽ₯碚,γŸγ‚‰γ—γ„ +107 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺη„ΆγƒŒζŽ₯碚,* +108 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺη„ΆγƒŒζŽ₯碚,γŸγ‚‰γ—γ„ +109 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,命什e,* +110 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,命什e,γŸγ‚‰γ—γ„ +111 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用ゴアむζŽ₯碚,* +112 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用ゴアむζŽ₯碚,γŸγ‚‰γ—γ„ +113 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用タζŽ₯碚,* +114 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用タζŽ₯碚,γŸγ‚‰γ—γ„ +115 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用テζŽ₯碚,* +116 ε½’εΉθ©ž,ζŽ₯ε°Ύ,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用テζŽ₯碚,γŸγ‚‰γ—γ„ +117 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,*,* +118 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,* +119 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,難い +120 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ガルζŽ₯碚,良い +121 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,* +122 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,難い +123 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšε½’,良い +124 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,* +125 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,難い +126 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ‘,良い +127 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,* +128 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,難い +129 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,δ»εšηΈη΄„οΌ’,良い +130 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,難い +131 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,基本归,良い +132 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,* +133 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,難い +134 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,体言ζŽ₯碚,良い +135 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,* +136 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,難い +137 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,良い +138 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,* +139 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,難い +140 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺ焢ウζŽ₯碚,良い +141 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,* +142 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,難い +143 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,ζœͺη„ΆγƒŒζŽ₯碚,良い +144 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,* +145 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,難い +146 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,命什e,良い +147 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,* +148 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,難い +149 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用ゴアむζŽ₯碚,良い +150 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,* +151 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,難い +152 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用タζŽ₯碚,良い +153 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,* +154 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,難い +155 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚’ウγ‚ͺζ΅,連用テζŽ₯碚,良い +156 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ガルζŽ₯碚,欲しい +157 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšε½’,欲しい +158 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ‘,欲しい +159 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ’,欲しい +160 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,基本归,欲しい +161 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,体言ζŽ₯碚,欲しい +162 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,欲しい +163 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺ焢ウζŽ₯碚,欲しい +164 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺη„ΆγƒŒζŽ₯碚,欲しい +165 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,命什e,欲しい +166 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用ゴアむζŽ₯碚,欲しい +167 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用タζŽ₯碚,欲しい +168 ε½’εΉθ©ž,非θ‡ͺη«‹,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用テζŽ₯碚,欲しい +169 助詞,格助詞,δΈ€θˆ¬,*,*,*,から +170 助詞,格助詞,δΈ€θˆ¬,*,*,*,が +171 助詞,格助詞,δΈ€θˆ¬,*,*,*,぀ +172 助詞,格助詞,δΈ€θˆ¬,*,*,*,で +173 助詞,格助詞,δΈ€θˆ¬,*,*,*,と +174 助詞,格助詞,δΈ€θˆ¬,*,*,*,に +175 助詞,格助詞,δΈ€θˆ¬,*,*,*,にて +176 助詞,格助詞,δΈ€θˆ¬,*,*,*,だ+177 助詞,格助詞,δΈ€θˆ¬,*,*,*,へ +178 助詞,格助詞,δΈ€θˆ¬,*,*,*,γ‚ˆγ‚Š +179 助詞,格助詞,δΈ€θˆ¬,*,*,*,γ‚’ +180 助詞,格助詞,δΈ€θˆ¬,*,*,*,γ‚“ +181 助詞,格助詞,δΈ€θˆ¬,*,*,*,デ +182 助詞,格助詞,δΈ€θˆ¬,*,*,*,γƒŽ +183 助詞,格助詞,δΈ€θˆ¬,*,*,*,γƒ˜ +184 助詞,格助詞,δΈ€θˆ¬,*,*,*,ヲ +185 助詞,格助詞,δΈ€θˆ¬,*,*,*,δΉ‹ +186 助詞,格助詞,引用,*,*,*,っと +187 助詞,格助詞,引用,*,*,*,と +188 助詞,格助詞,ι€£θͺž,*,*,*,γ˜γ‚ƒ +189 助詞,格助詞,ι€£θͺž,*,*,*,っけゅう +190 助詞,格助詞,ι€£θͺž,*,*,*,って +191 助詞,格助詞,ι€£θͺž,*,*,*,っていう +192 助詞,格助詞,ι€£θͺž,*,*,*,ってγͺ +193 助詞,格助詞,ι€£θͺž,*,*,*,て +194 助詞,格助詞,ι€£θͺž,*,*,*,ていう +195 助詞,格助詞,ι€£θͺž,*,*,*,といいます +196 助詞,格助詞,ι€£θͺž,*,*,*,という +197 助詞,格助詞,ι€£θͺž,*,*,*,γ¨γ„γ£γŸ +198 助詞,格助詞,ι€£θͺž,*,*,*,といち +199 助詞,格助詞,ι€£θͺž,*,*,*,とかいいます +200 助詞,格助詞,ι€£θͺž,*,*,*,とかいう +201 助詞,格助詞,ι€£θͺž,*,*,*,とかいち +202 助詞,格助詞,ι€£θͺž,*,*,*,として +203 助詞,格助詞,ι€£θͺž,*,*,*,γ¨γ—γΎγ—γŸγ‚‰ +204 助詞,格助詞,ι€£θͺž,*,*,*,としまして +205 助詞,格助詞,ι€£θͺž,*,*,*,とともに +206 助詞,格助詞,ι€£θͺž,*,*,*,と共に +207 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ£γ¦ +208 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚Š +209 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚ŠγΎγ—γ¦ +210 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚ŠγΎγ™ +211 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚‹ +212 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŠγ„γ¦ +213 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŠγγΎγ—γ¦ +214 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŠγ‘γ‚‹ +215 助詞,格助詞,ι€£θͺž,*,*,*,にかけ +216 助詞,格助詞,ι€£θͺž,*,*,*,にかけて +217 助詞,格助詞,ι€£θͺž,*,*,*,にかけまして +218 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŸγ„γ—γ¦ +219 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŸγ„γ—γΎγ—γ¦ +220 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŸγ„γ—γΎγ™ +221 助詞,格助詞,ι€£θͺž,*,*,*,γ«γŸγ„γ™γ‚‹ +222 助詞,格助詞,ι€£θͺž,*,*,*,に぀いて +223 助詞,格助詞,ι€£θͺž,*,*,*,に぀き +224 助詞,格助詞,ι€£θͺž,*,*,*,に぀きまして +225 助詞,格助詞,ι€£θͺž,*,*,*,に぀け +226 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ€γ‚Œ +227 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ€γ‚Œγ¦ +228 助詞,格助詞,ι€£θͺž,*,*,*,にとって +229 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ¨γ‚Š +230 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ¨γ‚ŠγΎγ—γ¦ +231 助詞,格助詞,ι€£θͺž,*,*,*,γ«γΎγ€γ‚γ‚ŠγΎγ™ +232 助詞,格助詞,ι€£θͺž,*,*,*,にま぀わる +233 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚ˆγ£γ¦ +234 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚ˆγ‚Š +235 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚ˆγ‚ŠγΎγ—γ¦ +236 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚ˆγ‚ŠγΎγ™ +237 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚ˆγ‚‹ +238 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ£γ¦ +239 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚Š +240 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚ŠγΎγ—γ¦ +241 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚ŠγΎγ™ +242 助詞,格助詞,ι€£θͺž,*,*,*,γ«γ‚γŸγ‚‹ +243 助詞,格助詞,ι€£θͺž,*,*,*,に閒し +244 助詞,格助詞,ι€£θͺž,*,*,*,に閒して +245 助詞,格助詞,ι€£θͺž,*,*,*,に閒しまして +246 助詞,格助詞,ι€£θͺž,*,*,*,に閒します +247 助詞,格助詞,ι€£θͺž,*,*,*,に閒する +248 助詞,格助詞,ι€£θͺž,*,*,*,γ«ιš›γ— +249 助詞,格助詞,ι€£θͺž,*,*,*,γ«ιš›γ—γ¦ +250 助詞,格助詞,ι€£θͺž,*,*,*,γ«ιš›γ—γΎγ—γ¦ +251 助詞,格助詞,ι€£θͺž,*,*,*,に従い +252 助詞,格助詞,ι€£θͺž,*,*,*,に従いまして +253 助詞,格助詞,ι€£θͺž,*,*,*,に従います +254 助詞,格助詞,ι€£θͺž,*,*,*,に従う +255 助詞,格助詞,ι€£θͺž,*,*,*,に従って +256 助詞,格助詞,ι€£θͺž,*,*,*,に対し +257 助詞,格助詞,ι€£θͺž,*,*,*,に対して +258 助詞,格助詞,ι€£θͺž,*,*,*,に対しまして +259 助詞,格助詞,ι€£θͺž,*,*,*,に対します +260 助詞,格助詞,ι€£θͺž,*,*,*,に対する +261 助詞,格助詞,ι€£θͺž,*,*,*,γ«ε½“γŸγ£γ¦ +262 助詞,格助詞,ι€£θͺž,*,*,*,γ«ε½“γŸγ‚Š +263 助詞,格助詞,ι€£θͺž,*,*,*,γ«ε½“γŸγ‚ŠγΎγ—γ¦ +264 助詞,格助詞,ι€£θͺž,*,*,*,γ«ε½“γŸγ‚ŠγΎγ™ +265 助詞,格助詞,ι€£θͺž,*,*,*,γ«ε½“γŸγ‚‹ +266 助詞,格助詞,ι€£θͺž,*,*,*,をめぐって +267 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’γ‚γγ‚ŠγΎγ—γ¦ +268 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’γ‚γγ‚ŠγΎγ™ +269 助詞,格助詞,ι€£θͺž,*,*,*,をめぐる +270 助詞,格助詞,ι€£θͺž,*,*,*,をもけまして +271 助詞,格助詞,ι€£θͺž,*,*,*,をもって +272 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’δ»₯て +273 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’ι€šγ—γ¦ +274 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’ι€šγ—γΎγ—γ¦ +275 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’ι€šγ˜ +276 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’ι€šγ˜γ¦ +277 助詞,格助詞,ι€£θͺž,*,*,*,γ‚’ι€šγ˜γΎγ—γ¦ +278 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,こそ +279 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,γ•γˆ +280 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,しか +281 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,すら +282 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,ぞ +283 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,っきゃ +284 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,は +285 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,γ‚‚ +286 助詞,δΏ‚εŠ©θ©ž,*,*,*,*,γ‚„ +287 助詞,η΅‚εŠ©θ©ž,*,*,*,*,かぁ +288 助詞,η΅‚εŠ©θ©ž,*,*,*,*,かい +289 助詞,η΅‚εŠ©θ©ž,*,*,*,*,かしら +290 助詞,η΅‚εŠ©θ©ž,*,*,*,*,け +291 助詞,η΅‚εŠ©θ©ž,*,*,*,*,さ +292 助詞,η΅‚εŠ©θ©ž,*,*,*,*,ぜ +293 助詞,η΅‚εŠ©θ©ž,*,*,*,*,ぞ +294 助詞,η΅‚εŠ©θ©ž,*,*,*,*,だって +295 助詞,η΅‚εŠ©θ©ž,*,*,*,*,っけ +296 助詞,η΅‚εŠ©θ©ž,*,*,*,*,てん +297 助詞,η΅‚εŠ©θ©ž,*,*,*,*,で +298 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γͺ +299 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γͺγƒΌ +300 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γͺぁー +301 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γͺγ‚‘ +302 助詞,η΅‚εŠ©θ©ž,*,*,*,*,ね +303 助詞,η΅‚εŠ©θ©ž,*,*,*,*,ねー +304 助詞,η΅‚εŠ©θ©ž,*,*,*,*,ねん +305 助詞,η΅‚εŠ©θ©ž,*,*,*,*,だ+306 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γγ† +307 助詞,η΅‚εŠ©θ©ž,*,*,*,*,べ +308 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γ‚‚γ‚“ +309 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γ‚„ +310 助詞,η΅‚εŠ©θ©ž,*,*,*,*,やら +311 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γ‚ˆ +312 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γ‚ˆγƒΌ +313 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γ‚ˆγ† +314 助詞,η΅‚εŠ©θ©ž,*,*,*,*,わ +315 助詞,η΅‚εŠ©θ©ž,*,*,*,*,わい +316 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γ‚“ +317 助詞,η΅‚εŠ©θ©ž,*,*,*,*,ヨー +318 助詞,η΅‚εŠ©θ©ž,*,*,*,*,γƒ― +319 助詞,ζŽ₯碚助詞,*,*,*,*,γŠγ‚ˆγ³ +320 助詞,ζŽ₯碚助詞,*,*,*,*,から +321 助詞,ζŽ₯碚助詞,*,*,*,*,からには +322 助詞,ζŽ₯碚助詞,*,*,*,*,が +323 助詞,ζŽ₯碚助詞,*,*,*,*,けども +324 助詞,ζŽ₯碚助詞,*,*,*,*,γ‘γ‚Œγ© +325 助詞,ζŽ₯碚助詞,*,*,*,*,γ‘γ‚Œγ©γ‚‚ +326 助詞,ζŽ₯碚助詞,*,*,*,*,さかい +327 助詞,ζŽ₯碚助詞,*,*,*,*,し +328 助詞,ζŽ₯碚助詞,*,*,*,*,たって +329 助詞,ζŽ₯碚助詞,*,*,*,*,぀぀ +330 助詞,ζŽ₯碚助詞,*,*,*,*,て +331 助詞,ζŽ₯碚助詞,*,*,*,*,で +332 助詞,ζŽ₯碚助詞,*,*,*,*,と +333 助詞,ζŽ₯碚助詞,*,*,*,*,とも +334 助詞,ζŽ₯碚助詞,*,*,*,*,ど +335 助詞,ζŽ₯碚助詞,*,*,*,*,どころか +336 助詞,ζŽ₯碚助詞,*,*,*,*,ども +337 助詞,ζŽ₯碚助詞,*,*,*,*,γͺγŒγ‚‰ +338 助詞,ζŽ₯碚助詞,*,*,*,*,γͺγ‚Š +339 助詞,ζŽ₯碚助詞,*,*,*,*,γγ§ +340 助詞,ζŽ₯碚助詞,*,*,*,*,γγ« +341 助詞,ζŽ₯碚助詞,*,*,*,*,ば +342 助詞,ζŽ₯碚助詞,*,*,*,*,γ‚‚γγ +343 助詞,ζŽ₯碚助詞,*,*,*,*,γ‚„ +344 助詞,ζŽ₯碚助詞,*,*,*,*,やいγͺγ‚„ +345 助詞,ζŽ₯碚助詞,*,*,*,*,や否や +346 助詞,ζŽ₯碚助詞,*,*,*,*,んで +347 助詞,η‰ΉζŠ,*,*,*,*,かγͺ +348 助詞,η‰ΉζŠ,*,*,*,*,けむ +349 助詞,η‰ΉζŠ,*,*,*,*,に +350 助詞,η‰ΉζŠ,*,*,*,*,にゃ +351 助詞,η‰ΉζŠ,*,*,*,*,γ‚“ +352 助詞,ε‰―θ©žεŒ–,*,*,*,*,と +353 助詞,ε‰―θ©žεŒ–,*,*,*,*,に +354 助詞,ε‰―εŠ©θ©ž,*,*,*,*,かも +355 助詞,ε‰―εŠ©θ©ž,*,*,*,*,くらい +356 助詞,ε‰―εŠ©θ©ž,*,*,*,*,ぐらい +357 助詞,ε‰―εŠ©θ©ž,*,*,*,*,しも +358 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γ˜γ‚ƒ +359 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γ˜γ‚ƒγ‚ +360 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γ˜γ‚ƒγ‚‘ +361 助詞,ε‰―εŠ©θ©ž,*,*,*,*,ず぀ +362 助詞,ε‰―εŠ©θ©ž,*,*,*,*,だけ +363 助詞,ε‰―εŠ©θ©ž,*,*,*,*,だって +364 助詞,ε‰―εŠ©θ©ž,*,*,*,*,だに +365 助詞,ε‰―εŠ©θ©ž,*,*,*,*,でも +366 助詞,ε‰―εŠ©θ©ž,*,*,*,*,とも +367 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γͺぞ +368 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γͺど +369 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γͺγ‚Š +370 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γͺんか +371 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γͺγ‚“γž +372 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γͺんて +373 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γγΏ +374 助詞,ε‰―εŠ©θ©ž,*,*,*,*,ばかし +375 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γ°γ‹γ‚Š +376 助詞,ε‰―εŠ©θ©ž,*,*,*,*,ばっか +377 助詞,ε‰―εŠ©θ©ž,*,*,*,*,γ°γ£γ‹γ‚Š +378 助詞,ε‰―εŠ©θ©ž,*,*,*,*,ほど +379 助詞,ε‰―εŠ©θ©ž,*,*,*,*,まで +380 助詞,ε‰―εŠ©θ©ž,*,*,*,*,やら +381 助詞,ε‰―εŠ©θ©ž,*,*,*,*,程 +382 助詞,ε‰―εŠ©θ©ž,*,*,*,*,θΏ„ +383 助詞,ε‰―εŠ©θ©žοΌδΈ¦η«‹εŠ©θ©žοΌη΅‚εŠ©θ©ž,*,*,*,*,か +384 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,γŸγ‚Š +385 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,だだ+386 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,γ γ‚Š +387 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,と +388 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,とか +389 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,γͺγ‚Š +390 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,γ‚„ +391 助詞,δΈ¦η«‹εŠ©θ©ž,*,*,*,*,やら +392 助詞,ι€£δ½“εŒ–,*,*,*,*,だ+393 助詞,ι€£δ½“εŒ–,*,*,*,*,γƒŽ +394 εŠ©ε‹•θ©ž,*,*,*,δΈ‹δΊŒγƒ»γ‚Ώθ‘Œ,δ»εšε½’,぀ +395 εŠ©ε‹•θ©ž,*,*,*,δΈ‹δΊŒγƒ»γ‚Ώθ‘Œ,基本归,぀ +396 εŠ©ε‹•θ©ž,*,*,*,δΈ‹δΊŒγƒ»γ‚Ώθ‘Œ,体言ζŽ₯碚,぀ +397 εŠ©ε‹•θ©ž,*,*,*,δΈ‹δΊŒγƒ»γ‚Ώθ‘Œ,ζœͺη„Άε½’,぀ +398 εŠ©ε‹•θ©ž,*,*,*,δΈ‹δΊŒγƒ»γ‚Ώθ‘Œ,命什yo,぀ +399 εŠ©ε‹•θ©ž,*,*,*,δΈ‹δΊŒγƒ»γ‚Ώθ‘Œ,連用归,぀ +400 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ガルζŽ₯碚,らしい +401 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ガルζŽ₯碚,焑い +402 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšε½’,らしい +403 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšε½’,焑い +404 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ‘,らしい +405 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ‘,焑い +406 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ’,らしい +407 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,δ»εšηΈη΄„οΌ’,焑い +408 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,基本归,らしい +409 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,基本归,焑い +410 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,体言ζŽ₯碚,らしい +411 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,体言ζŽ₯碚,焑い +412 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,らしい +413 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζ–‡θͺžεŸΊζœ¬ε½’,焑い +414 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺ焢ウζŽ₯碚,らしい +415 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺ焢ウζŽ₯碚,焑い +416 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺη„ΆγƒŒζŽ₯碚,らしい +417 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,ζœͺη„ΆγƒŒζŽ₯碚,焑い +418 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,命什e,らしい +419 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,命什e,焑い +420 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用ゴアむζŽ₯碚,らしい +421 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用ゴアむζŽ₯碚,焑い +422 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用タζŽ₯碚,らしい +423 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用タζŽ₯碚,焑い +424 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用テζŽ₯碚,らしい +425 εŠ©ε‹•θ©ž,*,*,*,ε½’εΉθ©žγƒ»γ‚€ζ΅,連用テζŽ₯碚,焑い +426 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,δ»εšε½’,ある +427 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,δ»εšηΈη΄„οΌ‘,ある +428 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,基本归,ある +429 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,体言ζŽ₯ηΆšη‰ΉζŠ,ある +430 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,ζœͺ焢ウζŽ₯碚,ある +431 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,ζœͺη„Άε½’,ある +432 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,命什e,ある +433 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,連用タζŽ₯碚,ある +434 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œγ‚’ル,連用归,ある +435 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšε½’,ござる +436 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšε½’,εΎ‘εΊ§γ‚‹ +437 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšηΈη΄„οΌ‘,ござる +438 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšηΈη΄„οΌ‘,εΎ‘εΊ§γ‚‹ +439 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,基本归,ござる +440 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,基本归,εΎ‘εΊ§γ‚‹ +441 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺ焢ウζŽ₯碚,ござる +442 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺ焢ウζŽ₯碚,εΎ‘εΊ§γ‚‹ +443 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άε½’,ござる +444 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άε½’,εΎ‘εΊ§γ‚‹ +445 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άη‰ΉζŠ,ござる +446 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άη‰ΉζŠ,εΎ‘εΊ§γ‚‹ +447 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什e,ござる +448 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什e,εΎ‘εΊ§γ‚‹ +449 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什i,ござる +450 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什i,εΎ‘εΊ§γ‚‹ +451 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用タζŽ₯碚,ござる +452 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用タζŽ₯碚,εΎ‘εΊ§γ‚‹ +453 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用归,ござる +454 εŠ©ε‹•θ©ž,*,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用归,εΎ‘εΊ§γ‚‹ +455 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Έγƒ£,基本归,γ˜γ‚ƒ +456 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Έγƒ£,ζœͺη„Άε½’,γ˜γ‚ƒ +457 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Έγƒ£,連用归,γ˜γ‚ƒ +458 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώ,δ»εšε½’,た +459 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώ,δ»εšε½’,だ +460 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώ,基本归,た +461 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώ,基本归,だ +462 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώ,ζœͺη„Άε½’,た +463 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώ,ζœͺη„Άε½’,だ +464 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,ガルζŽ₯碚,γŸγ„ +465 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,音便基本归,γŸγ„ +466 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,δ»εšε½’,γŸγ„ +467 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,δ»εšηΈη΄„οΌ‘,γŸγ„ +468 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,δ»εšηΈη΄„οΌ’,γŸγ„ +469 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,基本归,γŸγ„ +470 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,体言ζŽ₯碚,γŸγ„ +471 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,ζ–‡θͺžεŸΊζœ¬ε½’,γŸγ„ +472 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,ζœͺ焢ウζŽ₯碚,γŸγ„ +473 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,ζœͺη„ΆγƒŒζŽ₯碚,γŸγ„ +474 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,連用ゴアむζŽ₯碚,γŸγ„ +475 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,連用タζŽ₯碚,γŸγ„ +476 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γ‚Ώγ‚€,連用テζŽ₯碚,γŸγ„ +477 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,δ»εšε½’,だ +478 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,基本归,だ +479 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,体言ζŽ₯碚,だ +480 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,ζœͺη„Άε½’,だ +481 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,命什e,だ +482 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,連用タζŽ₯碚,だ +483 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,連用归,だ +484 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,基本归,っす +485 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,基本归,です +486 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,基本归,どす +487 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,ζœͺη„Άε½’,っす +488 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,ζœͺη„Άε½’,です +489 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,ζœͺη„Άε½’,どす +490 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,連用归,っす +491 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,連用归,です +492 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ‡γ‚Ή,連用归,どす +493 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,ガルζŽ₯碚,焑い +494 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,音便基本归,焑い +495 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,δ»εšε½’,焑い +496 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,δ»εšηΈη΄„οΌ‘,焑い +497 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,δ»εšηΈη΄„οΌ’,焑い +498 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,基本归,焑い +499 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,体言ζŽ₯碚,焑い +500 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,ζ–‡θͺžεŸΊζœ¬ε½’,焑い +501 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,ζœͺ焢ウζŽ₯碚,焑い +502 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,ζœͺη„ΆγƒŒζŽ₯碚,焑い +503 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,命什e,焑い +504 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,連用ゴアむζŽ₯碚,焑い +505 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,連用タζŽ₯碚,焑い +506 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,連用テζŽ₯碚,焑い +507 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŠγ‚€,連用デζŽ₯碚,焑い +508 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŒ,δ»εšε½’,ぬ +509 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŒ,基本归,ぬ +510 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŒ,体言ζŽ₯碚,ぬ +511 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŒ,ζ–‡θͺžεŸΊζœ¬ε½’,ぬ +512 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŒ,連用ニζŽ₯碚,ぬ +513 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒŒ,連用归,ぬ +514 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,δ»εšε½’,ます +515 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,δ»εšε½’,やす +516 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,基本归,ます +517 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,基本归,やす +518 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,ζœͺ焢ウζŽ₯碚,ます +519 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,ζœͺ焢ウζŽ₯碚,やす +520 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,ζœͺη„Άε½’,ます +521 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,ζœͺη„Άε½’,やす +522 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,命什e,ます +523 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,命什e,やす +524 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,命什i,ます +525 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,命什i,やす +526 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,連用归,ます +527 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒžγ‚Ή,連用归,やす +528 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,基本归,γ‚„ +529 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,ζœͺη„Άε½’,γ‚„ +530 εŠ©ε‹•θ©ž,*,*,*,η‰ΉζŠγƒ»γƒ€,連用归,γ‚„ +531 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,う +532 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,じ +533 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,γ˜γ‚ƒγ‚“ +534 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,γ˜γ‚ƒγƒ³ +535 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,ぬ +536 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,ひん +537 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,へん +538 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,まい +539 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,γ‚„γ‚“ +540 εŠ©ε‹•θ©ž,*,*,*,δΈε€‰εŒ–εž‹,基本归,γ‚“ +541 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚­,基本归,き +542 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚­,体言ζŽ₯碚,き +543 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚­,命什e,き +544 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚±γƒͺ,基本归,γ‘γ‚Š +545 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚±γƒͺ,体言ζŽ₯碚,γ‘γ‚Š +546 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚΄γƒˆγ‚·,基本归,ごとし +547 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚΄γƒˆγ‚·,基本归,如し +548 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚΄γƒˆγ‚·,体言ζŽ₯碚,ごとし +549 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚΄γƒˆγ‚·,体言ζŽ₯碚,如し +550 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚΄γƒˆγ‚·,連用归,ごとし +551 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γ‚΄γƒˆγ‚·,連用归,如し +552 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,δ»εšε½’,γŸγ‚Š +553 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,δ»εšε½’,γͺγ‚Š +554 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,基本归,γŸγ‚Š +555 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,基本归,γͺγ‚Š +556 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,体言ζŽ₯碚,γŸγ‚Š +557 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,体言ζŽ₯碚,γͺγ‚Š +558 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,ζœͺη„Άε½’,γŸγ‚Š +559 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,ζœͺη„Άε½’,γͺγ‚Š +560 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,命什e,γŸγ‚Š +561 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒŠγƒͺ,命什e,γͺγ‚Š +562 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ™γ‚·,δ»εšε½’,べし +563 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ™γ‚·,基本归,べし +564 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ™γ‚·,体言ζŽ₯碚,べし +565 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ™γ‚·,ζœͺη„Άε½’,べし +566 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ™γ‚·,連用归,べし +567 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒžγ‚Έ,δ»εšε½’,まじ +568 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒžγ‚Έ,基本归,まじ +569 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒžγ‚Έ,体言ζŽ₯碚,まじ +570 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒžγ‚Έ,連用归,まじ +571 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒͺ,基本归,γ‚Š +572 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒͺ,体言ζŽ₯碚,γ‚Š +573 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,δ»εšε½’,γ‚‹ +574 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,基本归,γ‚‹ +575 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,体言ζŽ₯碚,γ‚‹ +576 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,ζœͺη„Άε½’,γ‚‹ +577 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,命什e,γ‚‹ +578 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,命什yo,γ‚‹ +579 εŠ©ε‹•θ©ž,*,*,*,ζ–‡θͺžγƒ»γƒ«,連用归,γ‚‹ +580 ζŽ₯碚詞,*,*,*,*,*,* +581 ζŽ₯碚詞,*,*,*,*,*,γŠγ‚ˆγ³ +582 ζŽ₯頭詞,ε½’εΉθ©žζŽ₯碚,*,*,*,*,* +583 ζŽ₯頭詞,ζ•°ζŽ₯碚,*,*,*,*,* +584 ζŽ₯頭詞,ε‹•θ©žζŽ₯碚,*,*,*,*,* +585 ζŽ₯頭詞,名詞ζŽ₯碚,*,*,*,*,* +586 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,δ»εšε½’,* +587 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,δ»εšηΈη΄„οΌ‘,* +588 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,基本归,* +589 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,体言ζŽ₯ηΆšη‰ΉζŠ,* +590 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,* +591 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,ζœͺ焢ウζŽ₯碚,* +592 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,ζœͺη„Άε½’,* +593 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,命什i,* +594 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,命什yo,* +595 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・クル,連用归,* +596 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,δ»εšε½’,* +597 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,δ»εšηΈη΄„οΌ‘,* +598 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,基本归,* +599 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,体言ζŽ₯ηΆšη‰ΉζŠ,* +600 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,* +601 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,ζœͺ焢ウζŽ₯碚,* +602 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,ζœͺη„Άε½’,* +603 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,命什i,* +604 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,命什yo,* +605 ε‹•θ©ž,θ‡ͺη«‹,*,*,カ倉・ζ₯ル,連用归,* +606 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,δ»εšε½’,* +607 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,δ»εšηΈη΄„οΌ‘,* +608 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,基本归,* +609 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,ζ–‡θͺžεŸΊζœ¬ε½’,* +610 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,ζœͺ焢ウζŽ₯碚,* +611 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,ζœͺ焢レルζŽ₯碚,* +612 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,ζœͺη„Άε½’,* +613 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,命什ro,* +614 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ήγƒ«,命什yo,* +615 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,δ»εšε½’,* +616 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,δ»εšηΈη΄„οΌ‘,* +617 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,基本归,* +618 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,ζ–‡θͺžεŸΊζœ¬ε½’,* +619 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,ζœͺ焢ウζŽ₯碚,* +620 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,ζœͺη„Άε½’,* +621 ε‹•θ©ž,θ‡ͺη«‹,*,*,γ‚΅ε€‰γƒ»βˆ’γ‚Ίγƒ«,命什yo,* +622 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,δ»εšε½’,する +623 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,δ»εšηΈη΄„οΌ‘,する +624 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,基本归,する +625 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,体言ζŽ₯ηΆšη‰ΉζŠ,する +626 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,する +627 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,ζ–‡θͺžεŸΊζœ¬ε½’,する +628 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,ζœͺ焢ウζŽ₯碚,する +629 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,ζœͺη„ΆγƒŒζŽ₯碚,する +630 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,ζœͺ焢レルζŽ₯碚,する +631 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,ζœͺη„Άε½’,する +632 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,命什i,する +633 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,命什ro,する +634 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,命什yo,する +635 ε‹•θ©ž,θ‡ͺη«‹,*,*,ァ倉・スル,連用归,する +636 ε‹•θ©ž,θ‡ͺη«‹,*,*,ラ倉,δ»εšε½’,* +637 ε‹•θ©ž,θ‡ͺη«‹,*,*,ラ倉,基本归,* +638 ε‹•θ©ž,θ‡ͺη«‹,*,*,ラ倉,体言ζŽ₯碚,* +639 ε‹•θ©ž,θ‡ͺη«‹,*,*,ラ倉,ζœͺη„Άε½’,* +640 ε‹•θ©ž,θ‡ͺη«‹,*,*,ラ倉,命什e,* +641 ε‹•θ©ž,θ‡ͺη«‹,*,*,ラ倉,連用归,* +642 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,*,* +643 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,δ»εšε½’,* +644 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,δ»εšηΈη΄„οΌ‘,* +645 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,基本归,* +646 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,基本归-δΏƒιŸ³δΎΏ,* +647 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,体言ζŽ₯ηΆšη‰ΉζŠ,* +648 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,ζœͺ焢ウζŽ₯碚,* +649 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,ζœͺη„Άε½’,* +650 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,命什ro,* +651 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,命什yo,* +652 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅,連用归,* +653 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,δ»εšε½’,* +654 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,δ»εšηΈη΄„οΌ‘,* +655 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,基本归,* +656 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,ζœͺ焢ウζŽ₯碚,* +657 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,ζœͺη„Άε½’,* +658 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,ζœͺη„Άη‰ΉζŠ,* +659 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,命什e,* +660 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,命什ro,* +661 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,命什yo,* +662 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,連用归,* +663 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»εΎ—ル,δ»εšε½’,* +664 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»εΎ—ル,基本归,* +665 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚«θ‘Œ,δ»εšε½’,* +666 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚«θ‘Œ,基本归,* +667 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚«θ‘Œ,体言ζŽ₯碚,* +668 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚«θ‘Œ,ζœͺη„Άε½’,* +669 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚«θ‘Œ,命什yo,* +670 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚«θ‘Œ,連用归,* +671 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚¬θ‘Œ,δ»εšε½’,* +672 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚¬θ‘Œ,基本归,* +673 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚¬θ‘Œ,体言ζŽ₯碚,* +674 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚¬θ‘Œ,ζœͺη„Άε½’,* +675 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚¬θ‘Œ,命什yo,* +676 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γ‚¬θ‘Œ,連用归,* +677 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒ€θ‘Œ,δ»εšε½’,* +678 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒ€θ‘Œ,基本归,* +679 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒ€θ‘Œ,体言ζŽ₯碚,* +680 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒ€θ‘Œ,ζœͺη„Άε½’,* +681 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒ€θ‘Œ,命什yo,* +682 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒ€θ‘Œ,連用归,* +683 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒθ‘Œ,δ»εšε½’,* +684 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒθ‘Œ,基本归,* +685 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒθ‘Œ,体言ζŽ₯碚,* +686 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒθ‘Œ,ζœͺη„Άε½’,* +687 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒθ‘Œ,命什yo,* +688 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒθ‘Œ,連用归,* +689 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒžθ‘Œ,δ»εšε½’,* +690 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒžθ‘Œ,基本归,* +691 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒžθ‘Œ,体言ζŽ₯碚,* +692 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒžθ‘Œ,ζœͺη„Άε½’,* +693 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒžθ‘Œ,命什yo,* +694 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»γƒžθ‘Œ,連用归,* +695 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,δ»εšε½’,* +696 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,基本归,* +697 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,体言ζŽ₯碚,* +698 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,ζœͺ焢ウζŽ₯碚,* +699 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,ζœͺη„Άε½’,* +700 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,命什yo,* +701 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈ‹δΊŒγƒ»εΎ—,連用归,* +702 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,*,* +703 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšε½’,* +704 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšε½’,葌く +705 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,* +706 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,葌く +707 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,基本归,* +708 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,基本归,葌く +709 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +710 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,葌く +711 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺη„Άε½’,* +712 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺη„Άε½’,葌く +713 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,命什e,* +714 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,命什e,葌く +715 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用タζŽ₯碚,* +716 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用タζŽ₯碚,葌く +717 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用归,* +718 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用归,葌く +719 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,* +720 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,葌く +721 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,* +722 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,葌く +723 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,基本归,* +724 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,基本归,葌く +725 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +726 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,葌く +727 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,* +728 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,葌く +729 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,命什e,* +730 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,命什e,葌く +731 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,* +732 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,葌く +733 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用归,* +734 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用归,葌く +735 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšε½’,* +736 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšε½’,葌く +737 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšηΈη΄„οΌ‘,* +738 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšηΈη΄„οΌ‘,葌く +739 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,基本归,* +740 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,基本归,葌く +741 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺ焢ウζŽ₯碚,* +742 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺ焢ウζŽ₯碚,葌く +743 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺη„Άε½’,* +744 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺη„Άε½’,葌く +745 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,命什e,* +746 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,命什e,葌く +747 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,連用归,* +748 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,連用归,葌く +749 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,*,* +750 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,δ»εšε½’,* +751 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,δ»εšηΈη΄„οΌ‘,* +752 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,基本归,* +753 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,ζœͺ焢ウζŽ₯碚,* +754 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,ζœͺη„Άε½’,* +755 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,命什e,* +756 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,連用タζŽ₯碚,* +757 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚¬θ‘Œ,連用归,* +758 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,*,* +759 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,* +760 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšηΈη΄„οΌ‘,* +761 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,基本归,* +762 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺ焢ウζŽ₯碚,* +763 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,* +764 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,命什e,* +765 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,連用归,* +766 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,*,* +767 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,δ»εšε½’,* +768 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,δ»εšηΈη΄„οΌ‘,* +769 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,基本归,* +770 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,ζœͺ焢ウζŽ₯碚,* +771 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,ζœͺη„Άε½’,* +772 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,命什e,* +773 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,連用タζŽ₯碚,* +774 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,連用归,* +775 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,δ»εšε½’,* +776 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,δ»εšηΈη΄„οΌ‘,* +777 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,基本归,* +778 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,ζœͺ焢ウζŽ₯碚,* +779 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,ζœͺη„Άε½’,* +780 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,命什e,* +781 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,連用タζŽ₯碚,* +782 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,連用归,* +783 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,*,* +784 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,δ»εšε½’,* +785 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,δ»εšηΈη΄„οΌ‘,* +786 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,基本归,* +787 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,ζœͺ焢ウζŽ₯碚,* +788 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,ζœͺη„Άε½’,* +789 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,命什e,* +790 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,連用タζŽ₯碚,* +791 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒθ‘Œ,連用归,* +792 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,*,* +793 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,δ»εšε½’,* +794 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,δ»εšηΈη΄„οΌ‘,* +795 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,基本归,* +796 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,ζœͺ焢ウζŽ₯碚,* +797 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,ζœͺη„Άε½’,* +798 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,命什e,* +799 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,連用タζŽ₯碚,* +800 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,連用归,* +801 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,*,* +802 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,*,する +803 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,* +804 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,する +805 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,* +806 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,する +807 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,* +808 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,する +809 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,* +810 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,する +811 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,* +812 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,する +813 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,* +814 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,する +815 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,* +816 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,する +817 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,* +818 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,する +819 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,* +820 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,する +821 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,* +822 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,する +823 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,* +824 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,する +825 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšε½’,* +826 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšηΈη΄„οΌ‘,* +827 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,基本归,* +828 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺ焢ウζŽ₯碚,* +829 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άε½’,* +830 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άη‰ΉζŠ,* +831 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什e,* +832 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什i,* +833 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用タζŽ₯碚,* +834 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用归,* +835 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,*,* +836 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,δ»εšε½’,* +837 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,δ»εšε½’,言う +838 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,基本归,* +839 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,基本归,言う +840 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +841 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,言う +842 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,ζœͺη„Άε½’,* +843 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,ζœͺη„Άε½’,言う +844 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,命什e,* +845 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,命什e,言う +846 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,連用タζŽ₯碚,* +847 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,連用タζŽ₯碚,言う +848 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,連用归,* +849 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,連用归,言う +850 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,*,* +851 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,* +852 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,言う +853 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,θ‘Œγ† +854 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,* +855 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,言う +856 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,θ‘Œγ† +857 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +858 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,言う +859 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,θ‘Œγ† +860 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,* +861 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,言う +862 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,θ‘Œγ† +863 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,* +864 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,言う +865 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,θ‘Œγ† +866 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,* +867 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,言う +868 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,θ‘Œγ† +869 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,* +870 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,言う +871 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,θ‘Œγ† +872 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,* +873 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚΅θ‘Œ,基本归,* +874 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,* +875 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚΅θ‘Œ,命什e,* +876 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚΅θ‘Œ,連用归,* +877 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚Ώθ‘Œ,δ»εšε½’,* +878 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚Ώθ‘Œ,基本归,* +879 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚Ώθ‘Œ,ζœͺη„Άε½’,* +880 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚Ώθ‘Œ,命什e,* +881 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γ‚Ώθ‘Œ,連用归,* +882 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,δ»εšε½’,* +883 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,基本归,* +884 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,ζœͺη„Άε½’,* +885 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,命什e,* +886 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,連用归,* +887 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,δ»εšε½’,* +888 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,基本归,* +889 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,ζœͺη„Άε½’,* +890 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,命什e,* +891 ε‹•θ©ž,θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,連用归,* +892 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,δ»εšε½’,* +893 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,基本归,* +894 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,現代基本归,* +895 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,体言ζŽ₯碚,* +896 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,ζœͺη„Άε½’,* +897 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,命什yo,* +898 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒ€θ‘Œ,連用归,* +899 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒθ‘Œ,δ»εšε½’,* +900 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒθ‘Œ,基本归,* +901 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒθ‘Œ,体言ζŽ₯碚,* +902 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒθ‘Œ,ζœͺη„Άε½’,* +903 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒθ‘Œ,命什yo,* +904 ε‹•θ©ž,θ‡ͺη«‹,*,*,δΈŠδΊŒγƒ»γƒθ‘Œ,連用归,* +905 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,δ»εšε½’,* +906 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,δ»εšηΈη΄„οΌ‘,* +907 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,基本归,* +908 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,基本归-δΏƒιŸ³δΎΏ,* +909 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,体言ζŽ₯ηΆšη‰ΉζŠ,* +910 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,ζœͺ焢ウζŽ₯碚,* +911 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,ζœͺη„Άε½’,* +912 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,命什ro,* +913 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,命什yo,* +914 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΈ€ζ΅,連用归,* +915 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,* +916 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšηΈη΄„οΌ‘,* +917 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,基本归,* +918 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺ焢ウζŽ₯碚,* +919 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,* +920 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,命什e,* +921 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,連用归,* +922 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,* +923 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,* +924 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,* +925 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,* +926 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,* +927 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,* +928 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,* +929 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,* +930 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,* +931 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,* +932 ε‹•θ©ž,ζŽ₯ε°Ύ,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,* +933 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,δ»εšε½’,ζ₯γ‚‹ +934 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,δ»εšηΈη΄„οΌ‘,ζ₯γ‚‹ +935 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,基本归,ζ₯γ‚‹ +936 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,体言ζŽ₯ηΆšη‰ΉζŠ,ζ₯γ‚‹ +937 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,ζ₯γ‚‹ +938 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,ζœͺ焢ウζŽ₯碚,ζ₯γ‚‹ +939 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,ζœͺη„Άε½’,ζ₯γ‚‹ +940 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,命什i,ζ₯γ‚‹ +941 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,命什yo,ζ₯γ‚‹ +942 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・クル,連用归,ζ₯γ‚‹ +943 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,δ»εšε½’,ζ₯γ‚‹ +944 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,δ»εšηΈη΄„οΌ‘,ζ₯γ‚‹ +945 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,基本归,ζ₯γ‚‹ +946 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,体言ζŽ₯ηΆšη‰ΉζŠ,ζ₯γ‚‹ +947 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,ζ₯γ‚‹ +948 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,ζœͺ焢ウζŽ₯碚,ζ₯γ‚‹ +949 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,ζœͺη„Άε½’,ζ₯γ‚‹ +950 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,命什i,ζ₯γ‚‹ +951 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,命什yo,ζ₯γ‚‹ +952 ε‹•θ©ž,非θ‡ͺη«‹,*,*,カ倉・ζ₯ル,連用归,ζ₯γ‚‹ +953 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,*,* +954 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,δ»εšε½’,* +955 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,δ»εšε½’,γ‚‹ +956 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,δ»εšηΈη΄„οΌ‘,* +957 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,δ»εšηΈη΄„οΌ‘,γ‚‹ +958 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,基本归,* +959 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,基本归,γ‚‹ +960 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,基本归-δΏƒιŸ³δΎΏ,* +961 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,基本归-δΏƒιŸ³δΎΏ,γ‚‹ +962 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,体言ζŽ₯ηΆšη‰ΉζŠ,* +963 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,体言ζŽ₯ηΆšη‰ΉζŠ,γ‚‹ +964 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,ζœͺ焢ウζŽ₯碚,* +965 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,ζœͺ焢ウζŽ₯碚,γ‚‹ +966 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,ζœͺη„Άε½’,* +967 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,命什ro,* +968 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,命什ro,γ‚‹ +969 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,命什yo,* +970 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,命什yo,γ‚‹ +971 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅,連用归,* +972 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,δ»εšε½’,γγ‚Œγ‚‹ +973 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,δ»εšηΈη΄„οΌ‘,γγ‚Œγ‚‹ +974 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,基本归,γγ‚Œγ‚‹ +975 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,ζœͺ焢ウζŽ₯碚,γγ‚Œγ‚‹ +976 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,ζœͺη„Άε½’,γγ‚Œγ‚‹ +977 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,ζœͺη„Άη‰ΉζŠ,γγ‚Œγ‚‹ +978 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,命什e,γγ‚Œγ‚‹ +979 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,命什ro,γγ‚Œγ‚‹ +980 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,命什yo,γγ‚Œγ‚‹ +981 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»γ‚―レル,連用归,γγ‚Œγ‚‹ +982 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»εΎ—ル,δ»εšε½’,* +983 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΈ€ζ΅γƒ»εΎ—ル,基本归,* +984 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,*,* +985 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšε½’,* +986 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšε½’,おく +987 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšε½’,碚く +988 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšε½’,抜く +989 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,* +990 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,おく +991 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,碚く +992 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,抜く +993 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,基本归,* +994 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,基本归,おく +995 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,基本归,碚く +996 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,基本归,抜く +997 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +998 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,おく +999 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,碚く +1000 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,抜く +1001 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺη„Άε½’,* +1002 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺη„Άε½’,おく +1003 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺη„Άε½’,碚く +1004 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,ζœͺη„Άε½’,抜く +1005 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,命什e,* +1006 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,命什e,おく +1007 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,命什e,碚く +1008 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,命什e,抜く +1009 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用タζŽ₯碚,* +1010 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用タζŽ₯碚,おく +1011 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用タζŽ₯碚,碚く +1012 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用タζŽ₯碚,抜く +1013 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用归,* +1014 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用归,おく +1015 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用归,碚く +1016 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘Œγ‚€ιŸ³δΎΏ,連用归,抜く +1017 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,* +1018 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,いく +1019 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,く +1020 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,葌く +1021 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,* +1022 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,いく +1023 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,く +1024 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,δ»εšηΈη΄„οΌ‘,葌く +1025 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,基本归,* +1026 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,基本归,いく +1027 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,基本归,く +1028 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,基本归,葌く +1029 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +1030 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,いく +1031 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,く +1032 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,葌く +1033 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,* +1034 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,いく +1035 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,く +1036 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,葌く +1037 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,命什e,* +1038 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,命什e,いく +1039 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,命什e,く +1040 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,命什e,葌く +1041 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,* +1042 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,いく +1043 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,く +1044 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,葌く +1045 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用归,* +1046 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用归,いく +1047 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用归,く +1048 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏ,連用归,葌く +1049 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšε½’,ゆく +1050 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšε½’,葌く +1051 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšηΈη΄„οΌ‘,ゆく +1052 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,δ»εšηΈη΄„οΌ‘,葌く +1053 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,基本归,ゆく +1054 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,基本归,葌く +1055 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺ焢ウζŽ₯碚,ゆく +1056 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺ焢ウζŽ₯碚,葌く +1057 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺη„Άε½’,ゆく +1058 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,ζœͺη„Άε½’,葌く +1059 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,命什e,ゆく +1060 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,命什e,葌く +1061 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,連用归,ゆく +1062 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚«θ‘ŒδΏƒιŸ³δΎΏγƒ¦γ‚―,連用归,葌く +1063 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,*,* +1064 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,*,尽くす +1065 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,* +1066 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,出す +1067 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,尽くす +1068 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšε½’,直す +1069 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšηΈη΄„οΌ‘,* +1070 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšηΈη΄„οΌ‘,出す +1071 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšηΈη΄„οΌ‘,尽くす +1072 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,δ»εšηΈη΄„οΌ‘,直す +1073 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,基本归,出す +1074 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,基本归,尽くす +1075 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,基本归,直す +1076 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺ焢ウζŽ₯碚,* +1077 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺ焢ウζŽ₯碚,出す +1078 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺ焢ウζŽ₯碚,尽くす +1079 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺ焢ウζŽ₯碚,直す +1080 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,* +1081 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,出す +1082 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,尽くす +1083 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,ζœͺη„Άε½’,直す +1084 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,命什e,* +1085 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,命什e,出す +1086 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,命什e,尽くす +1087 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,命什e,直す +1088 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,連用归,* +1089 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,連用归,出す +1090 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,連用归,尽くす +1091 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚΅θ‘Œ,連用归,直す +1092 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,*,* +1093 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,δ»εšε½’,* +1094 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,δ»εšηΈη΄„οΌ‘,* +1095 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,ζœͺ焢ウζŽ₯碚,* +1096 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,ζœͺη„Άε½’,* +1097 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,命什e,* +1098 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,連用タζŽ₯碚,* +1099 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γ‚Ώθ‘Œ,連用归,* +1100 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,*,* +1101 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,δ»εšε½’,* +1102 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,δ»εšηΈη΄„οΌ‘,* +1103 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,ζœͺ焢ウζŽ₯碚,* +1104 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,ζœͺη„Άε½’,* +1105 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,命什e,* +1106 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,連用タζŽ₯碚,* +1107 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒŠθ‘Œ,連用归,* +1108 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,*,* +1109 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,δ»εšε½’,* +1110 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,δ»εšε½’,θΎΌγ‚€ +1111 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,δ»εšηΈη΄„οΌ‘,* +1112 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,δ»εšηΈη΄„οΌ‘,θΎΌγ‚€ +1113 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,基本归,θΎΌγ‚€ +1114 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,ζœͺ焢ウζŽ₯碚,* +1115 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,ζœͺ焢ウζŽ₯碚,θΎΌγ‚€ +1116 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,ζœͺη„Άε½’,* +1117 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,ζœͺη„Άε½’,θΎΌγ‚€ +1118 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,命什e,* +1119 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,命什e,θΎΌγ‚€ +1120 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,連用タζŽ₯碚,* +1121 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,連用タζŽ₯碚,θΎΌγ‚€ +1122 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,連用归,* +1123 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒžθ‘Œ,連用归,θΎΌγ‚€ +1124 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,*,* +1125 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,*,εˆ‡γ‚‹ +1126 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,* +1127 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,ある +1128 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,γŠγ‚‹ +1129 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,かかる +1130 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,きる +1131 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,γͺγ‚‹ +1132 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,まいる +1133 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,まわる +1134 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,γ‚„γ‚‹ +1135 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,ε›žγ‚‹ +1136 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,参る +1137 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,硂わる +1138 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšε½’,εˆ‡γ‚‹ +1139 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,* +1140 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,ある +1141 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,γŠγ‚‹ +1142 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,かかる +1143 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,きる +1144 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,γͺγ‚‹ +1145 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,まいる +1146 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,まわる +1147 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,γ‚„γ‚‹ +1148 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,ε›žγ‚‹ +1149 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,参る +1150 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,硂わる +1151 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,δ»εšηΈη΄„οΌ‘,εˆ‡γ‚‹ +1152 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,* +1153 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,ある +1154 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,γŠγ‚‹ +1155 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,かかる +1156 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,きる +1157 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,γͺγ‚‹ +1158 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,まいる +1159 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,まわる +1160 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,γ‚„γ‚‹ +1161 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,ε›žγ‚‹ +1162 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,参る +1163 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,硂わる +1164 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,基本归,εˆ‡γ‚‹ +1165 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,* +1166 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,ある +1167 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,γŠγ‚‹ +1168 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,かかる +1169 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,きる +1170 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,γͺγ‚‹ +1171 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,まいる +1172 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,まわる +1173 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,γ‚„γ‚‹ +1174 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,ε›žγ‚‹ +1175 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,参る +1176 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,硂わる +1177 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠ,εˆ‡γ‚‹ +1178 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,* +1179 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,ある +1180 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,γŠγ‚‹ +1181 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,かかる +1182 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,きる +1183 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,γͺγ‚‹ +1184 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,まいる +1185 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,まわる +1186 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,γ‚„γ‚‹ +1187 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,ε›žγ‚‹ +1188 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,参る +1189 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,硂わる +1190 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,体言ζŽ₯ηΆšη‰ΉζŠοΌ’,εˆ‡γ‚‹ +1191 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,* +1192 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,ある +1193 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,γŠγ‚‹ +1194 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,かかる +1195 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,きる +1196 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,γͺγ‚‹ +1197 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,まいる +1198 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,まわる +1199 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,γ‚„γ‚‹ +1200 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,ε›žγ‚‹ +1201 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,参る +1202 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,硂わる +1203 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺ焢ウζŽ₯碚,εˆ‡γ‚‹ +1204 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,* +1205 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,ある +1206 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,γŠγ‚‹ +1207 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,かかる +1208 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,きる +1209 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,γͺγ‚‹ +1210 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,まいる +1211 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,まわる +1212 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,γ‚„γ‚‹ +1213 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,ε›žγ‚‹ +1214 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,参る +1215 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,硂わる +1216 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άε½’,εˆ‡γ‚‹ +1217 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,* +1218 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,ある +1219 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,γŠγ‚‹ +1220 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,かかる +1221 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,きる +1222 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,γͺγ‚‹ +1223 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,まいる +1224 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,まわる +1225 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,γ‚„γ‚‹ +1226 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,ε›žγ‚‹ +1227 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,参る +1228 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,硂わる +1229 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,ζœͺη„Άη‰ΉζŠ,εˆ‡γ‚‹ +1230 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,* +1231 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,ある +1232 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,γŠγ‚‹ +1233 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,かかる +1234 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,きる +1235 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,γͺγ‚‹ +1236 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,まいる +1237 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,まわる +1238 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,γ‚„γ‚‹ +1239 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,ε›žγ‚‹ +1240 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,参る +1241 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,硂わる +1242 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,命什e,εˆ‡γ‚‹ +1243 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,* +1244 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,ある +1245 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,γŠγ‚‹ +1246 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,かかる +1247 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,きる +1248 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,γͺγ‚‹ +1249 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,まいる +1250 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,まわる +1251 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,γ‚„γ‚‹ +1252 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,ε›žγ‚‹ +1253 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,参る +1254 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,硂わる +1255 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用タζŽ₯碚,εˆ‡γ‚‹ +1256 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,* +1257 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,ある +1258 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,γŠγ‚‹ +1259 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,かかる +1260 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,きる +1261 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,γͺγ‚‹ +1262 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,まいる +1263 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,まわる +1264 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,γ‚„γ‚‹ +1265 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,ε›žγ‚‹ +1266 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,参る +1267 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,硂わる +1268 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œ,連用归,εˆ‡γ‚‹ +1269 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšε½’,γͺさる +1270 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšε½’,らっしゃる +1271 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšε½’,下さる +1272 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšηΈη΄„οΌ‘,γͺさる +1273 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšηΈη΄„οΌ‘,らっしゃる +1274 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,δ»εšηΈη΄„οΌ‘,下さる +1275 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,基本归,γͺさる +1276 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,基本归,らっしゃる +1277 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,基本归,下さる +1278 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺ焢ウζŽ₯碚,γͺさる +1279 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺ焢ウζŽ₯碚,らっしゃる +1280 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺ焢ウζŽ₯碚,下さる +1281 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άε½’,γͺさる +1282 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άε½’,らっしゃる +1283 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άε½’,下さる +1284 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άη‰ΉζŠ,γͺさる +1285 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άη‰ΉζŠ,らっしゃる +1286 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,ζœͺη„Άη‰ΉζŠ,下さる +1287 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什e,γͺさる +1288 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什e,らっしゃる +1289 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什e,下さる +1290 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什i,γͺさる +1291 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什i,らっしゃる +1292 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,命什i,下さる +1293 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用タζŽ₯碚,γͺさる +1294 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用タζŽ₯碚,らっしゃる +1295 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用タζŽ₯碚,下さる +1296 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用归,γͺさる +1297 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用归,らっしゃる +1298 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ©θ‘Œη‰ΉζŠ,連用归,下さる +1299 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,*,* +1300 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,δ»εšε½’,* +1301 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +1302 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,ζœͺη„Άε½’,* +1303 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,命什e,* +1304 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,連用タζŽ₯碚,* +1305 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘Œγ‚¦ιŸ³δΎΏ,連用归,* +1306 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,*,* +1307 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,* +1308 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,しまう +1309 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,もらう +1310 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,δ»εšε½’,εˆγ† +1311 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,* +1312 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,しまう +1313 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,もらう +1314 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,基本归,εˆγ† +1315 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,* +1316 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,しまう +1317 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,もらう +1318 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺ焢ウζŽ₯碚,εˆγ† +1319 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,* +1320 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,しまう +1321 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,もらう +1322 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,ζœͺη„Άε½’,εˆγ† +1323 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,* +1324 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,しまう +1325 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,もらう +1326 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,命什e,εˆγ† +1327 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,* +1328 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,しまう +1329 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,もらう +1330 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用タζŽ₯碚,εˆγ† +1331 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,* +1332 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,しまう +1333 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,もらう +1334 ε‹•θ©ž,非θ‡ͺη«‹,*,*,δΊ”ζ΅γƒ»γƒ―θ‘ŒδΏƒιŸ³δΎΏ,連用归,εˆγ† +1335 ε‹•θ©ž,非θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,δ»εšε½’,* +1336 ε‹•θ©ž,非θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,基本归,* +1337 ε‹•θ©ž,非θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,ζœͺη„Άε½’,* +1338 ε‹•θ©ž,非θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,命什e,* +1339 ε‹•θ©ž,非θ‡ͺη«‹,*,*,ε››ζ΅γƒ»γƒθ‘Œ,連用归,* +1340 ε‰―θ©ž,*,*,*,*,*,* +1341 ε‰―θ©ž,δΈ€θˆ¬,*,*,*,*,* +1342 ε‰―θ©ž,助詞鑞ζŽ₯碚,*,*,*,*,* +1343 名詞,ァ倉ζŽ₯碚,*,*,*,*,* +1344 名詞,γƒŠγ‚€ε½’εΉθ©žθͺžεΉΉ,*,*,*,*,* +1345 名詞,δΈ€θˆ¬,*,*,*,*,* +1346 名詞,δΈ€θˆ¬,*,*,*,0,* +1347 名詞,ε½’εΉε‹•θ©žθͺžεΉΉ,*,*,*,*,* +1348 名詞,ε›Ίζœ‰εθ©ž,δΈ€θˆ¬,*,*,*,* +1349 名詞,ε›Ίζœ‰εθ©ž,人名,δΈ€θˆ¬,*,*,* +1350 名詞,ε›Ίζœ‰εθ©ž,人名,姓,*,*,* +1351 名詞,ε›Ίζœ‰εθ©ž,人名,名,*,*,* +1352 名詞,ε›Ίζœ‰εθ©ž,η΅„ηΉ”,*,*,*,* +1353 名詞,ε›Ίζœ‰εθ©ž,地域,δΈ€θˆ¬,*,*,* +1354 名詞,ε›Ίζœ‰εθ©ž,地域,ε›½,*,*,* +1355 名詞,ζ•°,*,*,*,*,* +1356 名詞,ζŽ₯ηΆšθ©žηš„,*,*,*,*,* +1357 名詞,ζŽ₯ε°Ύ,ァ倉ζŽ₯碚,*,*,*,* +1358 名詞,ζŽ₯ε°Ύ,δΈ€θˆ¬,*,*,*,* +1359 名詞,ζŽ₯ε°Ύ,ε½’εΉε‹•θ©žθͺžεΉΉ,*,*,*,* +1360 名詞,ζŽ₯ε°Ύ,εŠ©ζ•°θ©ž,*,*,*,* +1361 名詞,ζŽ₯ε°Ύ,εŠ©ε‹•θ©žθͺžεΉΉ,*,*,*,* +1362 名詞,ζŽ₯ε°Ύ,人名,*,*,*,* +1363 名詞,ζŽ₯ε°Ύ,地域,*,*,*,* +1364 名詞,ζŽ₯ε°Ύ,η‰ΉζŠ,*,*,*,* +1365 名詞,ζŽ₯ε°Ύ,ε‰―θ©žε―θƒ½,*,*,*,* +1366 名詞,代名詞,δΈ€θˆ¬,*,*,*,* +1367 名詞,代名詞,ηΈη΄„,*,*,*,* +1368 名詞,ε‹•θ©žιžθ‡ͺη«‹ηš„,*,*,*,*,* +1369 名詞,η‰ΉζŠ,εŠ©ε‹•θ©žθͺžεΉΉ,*,*,*,* +1370 名詞,非θ‡ͺη«‹,*,*,*,*,* +1371 名詞,非θ‡ͺη«‹,δΈ€θˆ¬,*,*,*,* +1372 名詞,非θ‡ͺη«‹,ε½’εΉε‹•θ©žθͺžεΉΉ,*,*,*,* +1373 名詞,非θ‡ͺη«‹,εŠ©ε‹•θ©žθͺžεΉΉ,*,*,*,* +1374 名詞,非θ‡ͺη«‹,ε‰―θ©žε―θƒ½,*,*,*,* +1375 名詞,ε‰―θ©žε―θƒ½,*,*,*,*,* +1376 ι€£δ½“θ©ž,*,*,*,*,*,* diff --git a/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/sys.dic b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/sys.dic new file mode 100644 index 0000000000000000000000000000000000000000..700526dd4ccbd412d38d057307f5980d1be5a422 --- /dev/null +++ b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/sys.dic @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca57d9029691a70a5dfb99afc2844180256161d7130da65b1a867510e129b9a6 +size 103073776 diff --git a/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/unk.dic b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/unk.dic new file mode 100644 index 0000000000000000000000000000000000000000..2635af4613f6c6012259743bad97594ab0c85bc0 Binary files /dev/null and b/voice_bridge/pyopenjtalk/open_jtalk_dic_utf_8-1.11/unk.dic differ diff --git a/voice_bridge/pyopenjtalk/openjtalk.pyd b/voice_bridge/pyopenjtalk/openjtalk.pyd new file mode 100644 index 0000000000000000000000000000000000000000..fc9adaaf275a3141b3e5e2c6b869cb684e2c988f --- /dev/null +++ b/voice_bridge/pyopenjtalk/openjtalk.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb832ebe3d838f10879a074a67196ff30b7808b709b2487fabda74410808a5a0 +size 1639424 diff --git a/voice_bridge/python38.dll b/voice_bridge/python38.dll new file mode 100644 index 0000000000000000000000000000000000000000..6133b1e4025d1811cef39b88393dc400b6bc4f59 --- /dev/null +++ b/voice_bridge/python38.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f3e368f5bcc1dda5e951682008a509751e6395f7328fd0f02c4e1a11f67c128 +size 4211376 diff --git a/voice_bridge/pyworld/pyworld.pyd b/voice_bridge/pyworld/pyworld.pyd new file mode 100644 index 0000000000000000000000000000000000000000..677e77a184eae3cac81519d0a5e6d6f2e3d0393b Binary files /dev/null and b/voice_bridge/pyworld/pyworld.pyd differ diff --git a/voice_bridge/regex/_regex.pyd b/voice_bridge/regex/_regex.pyd new file mode 100644 index 0000000000000000000000000000000000000000..13794dff3f02f861d1e8a1b483fbfa4099d0f9ad Binary files /dev/null and b/voice_bridge/regex/_regex.pyd differ diff --git a/voice_bridge/resampy/data/kaiser_best.npz b/voice_bridge/resampy/data/kaiser_best.npz new file mode 100644 index 0000000000000000000000000000000000000000..155deeddde16699bad1fa936b1c7b73eb5db12ef --- /dev/null +++ b/voice_bridge/resampy/data/kaiser_best.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:344c4eb7d35ef9845192a506aaa5836ca243033a90e6363a9d25fc5cd12a5328 +size 3277596 diff --git a/voice_bridge/resampy/data/kaiser_fast.npz b/voice_bridge/resampy/data/kaiser_fast.npz new file mode 100644 index 0000000000000000000000000000000000000000..a2a4c78f2fc30b05d9a3786d7ad233bea0bc7aaa --- /dev/null +++ b/voice_bridge/resampy/data/kaiser_fast.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8289150d2cd8f5cae2b60af483fcce0b9dd8a740c6294277f569317573e0a095 +size 99100 diff --git a/voice_bridge/run.exe b/voice_bridge/run.exe new file mode 100644 index 0000000000000000000000000000000000000000..a97bf27ef266cf86438a26753114a52eddee5b54 --- /dev/null +++ b/voice_bridge/run.exe @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75b7befad704d03dec5102c51e0ebd1d66610210ea4266cdb8ddff125061e42b +size 170618368 diff --git a/voice_bridge/scipy/.libs/lib_arpack-.7LJHSWCJNV2L27UVBY6IEIQMV7LQJHJM.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/lib_arpack-.7LJHSWCJNV2L27UVBY6IEIQMV7LQJHJM.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..01cfe9e83b98eb2161bfb32f2cabada81c44b8c8 --- /dev/null +++ b/voice_bridge/scipy/.libs/lib_arpack-.7LJHSWCJNV2L27UVBY6IEIQMV7LQJHJM.gfortran-win_amd64.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e956c618a3ec67960578786bd6923a0b56280288bed25350d2bdaf897867ecfe +size 1812493 diff --git a/voice_bridge/scipy/.libs/lib_blas_su.LZA4TR6XGWADVORUYXZXR5S4RO6WWEF4.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/lib_blas_su.LZA4TR6XGWADVORUYXZXR5S4RO6WWEF4.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..a61354ced527e2b6a7ae671cae7d8bc8dd261d96 Binary files /dev/null and b/voice_bridge/scipy/.libs/lib_blas_su.LZA4TR6XGWADVORUYXZXR5S4RO6WWEF4.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/lib_dop-f2p.MT27WXU45SIAX6ASQY7CWYIOK7BE6F7J.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/lib_dop-f2p.MT27WXU45SIAX6ASQY7CWYIOK7BE6F7J.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..3b426194f38886cf122c318e4be6da4fd85b37af Binary files /dev/null and b/voice_bridge/scipy/.libs/lib_dop-f2p.MT27WXU45SIAX6ASQY7CWYIOK7BE6F7J.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/lib_test_fo.JF5HTWMUPBXWGAYEBVEJU3OZAHTSVKCT.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/lib_test_fo.JF5HTWMUPBXWGAYEBVEJU3OZAHTSVKCT.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..5d3a4ffdc058f50d7d9ef53b0714601904791398 Binary files /dev/null and b/voice_bridge/scipy/.libs/lib_test_fo.JF5HTWMUPBXWGAYEBVEJU3OZAHTSVKCT.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libansari.R6EA3HQP5KZ6TAXU4Y4ZVTRPT7UVA53Z.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libansari.R6EA3HQP5KZ6TAXU4Y4ZVTRPT7UVA53Z.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..b2c5debc4643e78bcd190ab6cef3c1bf0ed872e0 Binary files /dev/null and b/voice_bridge/scipy/.libs/libansari.R6EA3HQP5KZ6TAXU4Y4ZVTRPT7UVA53Z.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libbanded5x.7J4WS2QZKMXGIZDNNWWXUXE52PU2TOEI.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libbanded5x.7J4WS2QZKMXGIZDNNWWXUXE52PU2TOEI.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..d9ee8c362508d2a47bd56fb0f5f89a8d31f08b29 Binary files /dev/null and b/voice_bridge/scipy/.libs/libbanded5x.7J4WS2QZKMXGIZDNNWWXUXE52PU2TOEI.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libbispeu.7AH3PCQ2E2NGLC3AQD7FFAH73KGJTZCJ.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libbispeu.7AH3PCQ2E2NGLC3AQD7FFAH73KGJTZCJ.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..65243363bd58ae3986ad81eb4c17b1506ecfc793 --- /dev/null +++ b/voice_bridge/scipy/.libs/libbispeu.7AH3PCQ2E2NGLC3AQD7FFAH73KGJTZCJ.gfortran-win_amd64.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:039a248365e7fe9e706cff003a7d6f0edc07a015498753fee42f5bc477eef553 +size 1982740 diff --git a/voice_bridge/scipy/.libs/libblkdta00.MVYK4IMPWHTMFZE66OC6K7KR6X3K76U2.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libblkdta00.MVYK4IMPWHTMFZE66OC6K7KR6X3K76U2.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..f8c42cbb75fb95f5f460ca1ccf66e7cae690333c Binary files /dev/null and b/voice_bridge/scipy/.libs/libblkdta00.MVYK4IMPWHTMFZE66OC6K7KR6X3K76U2.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libchkder.G7WSOGIYYQO3UWFVEZ3PPXCXR53ADVPA.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libchkder.G7WSOGIYYQO3UWFVEZ3PPXCXR53ADVPA.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..946bcc9f3dc0cf020e4abcdaf80a905d4961ed7c Binary files /dev/null and b/voice_bridge/scipy/.libs/libchkder.G7WSOGIYYQO3UWFVEZ3PPXCXR53ADVPA.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libcobyla2.25EVUSEBAW7VKISARB7LO3UGZPN2HXE3.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libcobyla2.25EVUSEBAW7VKISARB7LO3UGZPN2HXE3.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..4cac58b588f152cc86a8fe4503953586f5d46837 Binary files /dev/null and b/voice_bridge/scipy/.libs/libcobyla2.25EVUSEBAW7VKISARB7LO3UGZPN2HXE3.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libd_odr.SJ2UBBXLPDDMD64MKGXB66R2CWD5IC45.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libd_odr.SJ2UBBXLPDDMD64MKGXB66R2CWD5IC45.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..f85637eb5ddab16577c23611b8a176633a8c85b6 --- /dev/null +++ b/voice_bridge/scipy/.libs/libd_odr.SJ2UBBXLPDDMD64MKGXB66R2CWD5IC45.gfortran-win_amd64.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:356da6ec8c63264776f17f2c0ec8adeaecc2674ea639b2469473cd2d8ff1ca39 +size 1289117 diff --git a/voice_bridge/scipy/.libs/libdcsrch.I2AOPDCXAPDRFNPWY55H5UE7XZSU5CVN.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libdcsrch.I2AOPDCXAPDRFNPWY55H5UE7XZSU5CVN.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..b4a78698ade406c905e3e7b258d646cdfeebe98f Binary files /dev/null and b/voice_bridge/scipy/.libs/libdcsrch.I2AOPDCXAPDRFNPWY55H5UE7XZSU5CVN.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libdet.Q2ZFL7LN3KNWTO52QNZLI5JKVAYGTYI2.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libdet.Q2ZFL7LN3KNWTO52QNZLI5JKVAYGTYI2.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..012b81e9c5b4c34a3438a29c84ce2f976e35dfd8 Binary files /dev/null and b/voice_bridge/scipy/.libs/libdet.Q2ZFL7LN3KNWTO52QNZLI5JKVAYGTYI2.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libdfft.NU4EIZBEDIVVXBWR26HLW3PTNEKKIRCU.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libdfft.NU4EIZBEDIVVXBWR26HLW3PTNEKKIRCU.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..e6c0864b4146687d8c3d82b8522720c12a7b80c6 --- /dev/null +++ b/voice_bridge/scipy/.libs/libdfft.NU4EIZBEDIVVXBWR26HLW3PTNEKKIRCU.gfortran-win_amd64.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:850659410fcdf535a82f57ef0ca53cb9a4b8bdf09fc26b5f5ab71a89db71c591 +size 1751241 diff --git a/voice_bridge/scipy/.libs/libdfitpack.LMAPXDO5462XTHNWXJBZFJU252ZVABKI.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libdfitpack.LMAPXDO5462XTHNWXJBZFJU252ZVABKI.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..11e897566a0787bbe45885c809961d4759baaea5 --- /dev/null +++ b/voice_bridge/scipy/.libs/libdfitpack.LMAPXDO5462XTHNWXJBZFJU252ZVABKI.gfortran-win_amd64.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c249fc8a0311777024242f6b9fbceb265b771ecb358e749f008a40c3eda3369 +size 1986953 diff --git a/voice_bridge/scipy/.libs/libdgamln.WP5Q52HGLVXILXN6MQ6JUKPFUZEHPO3N.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libdgamln.WP5Q52HGLVXILXN6MQ6JUKPFUZEHPO3N.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..b27933d65439cc0e1955810577cf14ef0b053b83 --- /dev/null +++ b/voice_bridge/scipy/.libs/libdgamln.WP5Q52HGLVXILXN6MQ6JUKPFUZEHPO3N.gfortran-win_amd64.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c129b73a5afdb00f0b565d25053fc313aade9f03f004e37ab9540d776ced9b8 +size 2718599 diff --git a/voice_bridge/scipy/.libs/libdqag.ZURFXVIEQYOXVDGJZPA7RPIDKH2QX2JW.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libdqag.ZURFXVIEQYOXVDGJZPA7RPIDKH2QX2JW.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..a266b6f274d99d10e942288e79a6adaff6410aa2 Binary files /dev/null and b/voice_bridge/scipy/.libs/libdqag.ZURFXVIEQYOXVDGJZPA7RPIDKH2QX2JW.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libgetbreak.BLFUV4GPXDJ6X5ZESRVZCTKZWNFK2DT2.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libgetbreak.BLFUV4GPXDJ6X5ZESRVZCTKZWNFK2DT2.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..bfbbe6e161a7c441a02aec1a5a0fa2c78dc8cbeb Binary files /dev/null and b/voice_bridge/scipy/.libs/libgetbreak.BLFUV4GPXDJ6X5ZESRVZCTKZWNFK2DT2.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/liblbfgsb.VKTSXPVDTQUXOHORKJLKUQ3XKRYUULUD.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/liblbfgsb.VKTSXPVDTQUXOHORKJLKUQ3XKRYUULUD.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..d671e41ae1f534fd00309fa1cd50f51422ecc01c Binary files /dev/null and b/voice_bridge/scipy/.libs/liblbfgsb.VKTSXPVDTQUXOHORKJLKUQ3XKRYUULUD.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/liblsoda-f2.A3SHF7Q77FQIZL4ZYZSJVXJVPEF674WY.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/liblsoda-f2.A3SHF7Q77FQIZL4ZYZSJVXJVPEF674WY.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..41a05badc14992be4bd9d3e781d7d6667b70416b Binary files /dev/null and b/voice_bridge/scipy/.libs/liblsoda-f2.A3SHF7Q77FQIZL4ZYZSJVXJVPEF674WY.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libmvndst.IUWFZM2WSUQ3UTGQHFQ26ATH2A2TIUVI.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libmvndst.IUWFZM2WSUQ3UTGQHFQ26ATH2A2TIUVI.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..19ab99dc6629f6a50b4a9a2811641d14e3e76f78 Binary files /dev/null and b/voice_bridge/scipy/.libs/libmvndst.IUWFZM2WSUQ3UTGQHFQ26ATH2A2TIUVI.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libnnls.4HUTGAJQTI623WTX372VAIIWXRLC62YU.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libnnls.4HUTGAJQTI623WTX372VAIIWXRLC62YU.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..2e18fa879135028c3c32f3fc73c20938cd75971c Binary files /dev/null and b/voice_bridge/scipy/.libs/libnnls.4HUTGAJQTI623WTX372VAIIWXRLC62YU.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libopenblas.3HBPCJB5BPQGKWVZAVEBXNNJ2Q2G3TUP.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libopenblas.3HBPCJB5BPQGKWVZAVEBXNNJ2Q2G3TUP.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..0fd7d19e00f35455e0a169527fd280843aa9fb2a --- /dev/null +++ b/voice_bridge/scipy/.libs/libopenblas.3HBPCJB5BPQGKWVZAVEBXNNJ2Q2G3TUP.gfortran-win_amd64.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:621a0c6fb360e1028e6beb92eaac748142dd8615ce767e70f82417c7f8cc2826 +size 32862022 diff --git a/voice_bridge/scipy/.libs/libslsqp_op.RGGN6ZOFD2K47X7YRNDYCM7JFP4AGLER.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libslsqp_op.RGGN6ZOFD2K47X7YRNDYCM7JFP4AGLER.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..aad0d1dd62376ce34b160263ea39b31944dd027e Binary files /dev/null and b/voice_bridge/scipy/.libs/libslsqp_op.RGGN6ZOFD2K47X7YRNDYCM7JFP4AGLER.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libspecfun.LQCTHMCYNULEOOGKIO6AGREE6D6V37RU.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libspecfun.LQCTHMCYNULEOOGKIO6AGREE6D6V37RU.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..4e4b925895ae7f1345a5bbb41b0e47d17518000d --- /dev/null +++ b/voice_bridge/scipy/.libs/libspecfun.LQCTHMCYNULEOOGKIO6AGREE6D6V37RU.gfortran-win_amd64.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec8dedad81e95125a0b7d106ecad088c19d91eeef1fc2f2acd969fcef958d30a +size 1402857 diff --git a/voice_bridge/scipy/.libs/libvode-f2p.CPK3WLWI3UO7R5A2TENGVYGYTZJJVIU5.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libvode-f2p.CPK3WLWI3UO7R5A2TENGVYGYTZJJVIU5.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..296dfb3f65b95f8f4cc623c22acaab9128a18ec5 --- /dev/null +++ b/voice_bridge/scipy/.libs/libvode-f2p.CPK3WLWI3UO7R5A2TENGVYGYTZJJVIU5.gfortran-win_amd64.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a32c78c9ba62ca77a549b9d2e1495d881b4c85c33f84172bd45551ee280dbff1 +size 1018595 diff --git a/voice_bridge/scipy/.libs/libwrap_dum.YPEDV6SMZ3M5ZJQQYA6QHCHD6F7YIEG5.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libwrap_dum.YPEDV6SMZ3M5ZJQQYA6QHCHD6F7YIEG5.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..eff5f01759a43b0353adb725d36f103d16c3b43b Binary files /dev/null and b/voice_bridge/scipy/.libs/libwrap_dum.YPEDV6SMZ3M5ZJQQYA6QHCHD6F7YIEG5.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/libwrap_dum.YZL2SEQKGO26ANDNTTBRMKWBFZP2LBDX.gfortran-win_amd64.dll b/voice_bridge/scipy/.libs/libwrap_dum.YZL2SEQKGO26ANDNTTBRMKWBFZP2LBDX.gfortran-win_amd64.dll new file mode 100644 index 0000000000000000000000000000000000000000..589da6a2c8fb43fc96c7d6b59d3dfefbac8fc8d0 Binary files /dev/null and b/voice_bridge/scipy/.libs/libwrap_dum.YZL2SEQKGO26ANDNTTBRMKWBFZP2LBDX.gfortran-win_amd64.dll differ diff --git a/voice_bridge/scipy/.libs/msvcp140.dll b/voice_bridge/scipy/.libs/msvcp140.dll new file mode 100644 index 0000000000000000000000000000000000000000..4a5ca4f47a741e3ed243ae1d34e0e1f7b046ec32 Binary files /dev/null and b/voice_bridge/scipy/.libs/msvcp140.dll differ diff --git a/voice_bridge/scipy/HACKING.rst.txt b/voice_bridge/scipy/HACKING.rst.txt new file mode 100644 index 0000000000000000000000000000000000000000..6d007b22ff0d231aec16f9de41553e7385a9c3f7 --- /dev/null +++ b/voice_bridge/scipy/HACKING.rst.txt @@ -0,0 +1,297 @@ +.. _hacking: + +================== +Ways to Contribute +================== + +This document aims to give an overview of the ways to contribute to SciPy. It +tries to answer commonly asked questions and provide some insight into how the +community process works in practice. Readers who are familiar with the SciPy +community and are experienced Python coders may want to jump straight to the +:ref:`contributor-toc`. + +There are a lot of ways you can contribute: + +- Contributing new code +- Fixing bugs, improving documentation, and other maintenance work +- Reviewing open pull requests +- Triaging issues +- Working on the `scipy.org`_ website +- Answering questions and participating on the scipy-dev and scipy-user + `mailing lists`_. + +Contributing new code +===================== + +If you have been working with the scientific Python toolstack for a while, you +probably have some code lying around of which you think "this could be useful +for others too". Perhaps it's a good idea then to contribute it to SciPy or +another open source project. The first question to ask is then, where does +this code belong? That question is hard to answer here, so we start with a +more specific one: *what code is suitable for putting into SciPy?* +Almost all of the new code added to SciPy has in common that it's potentially +useful in multiple scientific domains and it fits in the scope of existing +SciPy subpackages (see :ref:`deciding-on-new-features`). In principle new +subpackages can be added too, but this is far less common. For code that is +specific to a single application, there may be an existing project that can +use the code. Some SciKits (`scikit-learn`_, `scikit-image`_, `statsmodels`_, +etc.) are good examples here; they have a narrower focus and because of that +more domain-specific code than SciPy. + +Now if you have code that you would like to see included in SciPy, how do you +go about it? After checking that your code can be distributed in SciPy under a +compatible license (see :ref:`license-considerations`), the first step is to +discuss on the scipy-dev mailing list. All new features, as well as changes to +existing code, are discussed and decided on there. You can, and probably +should, already start this discussion before your code is finished. Remember +that in order to be added to SciPy your code will need to be reviewed by +someone else, so try to find someone willing to review your work while you're +at it. + +Assuming the outcome of the discussion on the mailing list is positive and you +have a function or piece of code that does what you need it to do, what next? +Before code is added to SciPy, it at least has to have good documentation, unit +tests, benchmarks, and correct code style. + +1. Unit tests + In principle you should aim to create unit tests that exercise all the code + that you are adding. This gives some degree of confidence that your code + runs correctly, also on Python versions and hardware or OSes that you don't + have available yourself. An extensive description of how to write unit + tests is given in :doc:`numpy:reference/testing`, and :ref:`runtests` + documents how to run them. + +2. Benchmarks + Unit tests check for correct functionality; benchmarks measure code + performance. Not all existing SciPy code has benchmarks, but it should: + as SciPy grows it is increasingly important to monitor execution times in + order to catch unexpected regressions. More information about writing + and running benchmarks is available in :ref:`benchmarking-with-asv`. + +3. Documentation + Clear and complete documentation is essential in order for users to be able + to find and understand the code. Documentation for individual functions + and classes -- which includes at least a basic description, type and + meaning of all parameters and returns values, and usage examples in + `doctest`_ format -- is put in docstrings. Those docstrings can be read + within the interpreter, and are compiled into a reference guide in html and + pdf format. Higher-level documentation for key (areas of) functionality is + provided in tutorial format and/or in module docstrings. A guide on how to + write documentation is given in :ref:`numpy:howto-document`, and + :ref:`rendering-documentation` explains how to preview the documentation + as it will appear online. + +4. Code style + Uniformity of style in which code is written is important to others trying + to understand the code. SciPy follows the standard Python guidelines for + code style, `PEP8`_. In order to check that your code conforms to PEP8, + you can use the `pep8 package`_ style checker. Most IDEs and text editors + have settings that can help you follow PEP8, for example by translating + tabs by four spaces. Using `pyflakes`_ to check your code is also a good + idea. More information is available in :ref:`pep8-scipy`. + +A :ref:`checklist`, including these and other requirements, is +available at the end of the example :ref:`development-workflow`. + +Another question you may have is: *where exactly do I put my code*? To answer +this, it is useful to understand how the SciPy public API (application +programming interface) is defined. For most modules the API is two levels +deep, which means your new function should appear as +``scipy.subpackage.my_new_func``. ``my_new_func`` can be put in an existing or +new file under ``/scipy//``, its name is added to the ``__all__`` +list in that file (which lists all public functions in the file), and those +public functions are then imported in ``/scipy//__init__.py``. Any +private functions/classes should have a leading underscore (``_``) in their +name. A more detailed description of what the public API of SciPy is, is given +in :ref:`scipy-api`. + +Once you think your code is ready for inclusion in SciPy, you can send a pull +request (PR) on Github. We won't go into the details of how to work with git +here, this is described well in :ref:`git-development` +and on the `Github help pages`_. When you send the PR for a new +feature, be sure to also mention this on the scipy-dev mailing list. This can +prompt interested people to help review your PR. Assuming that you already got +positive feedback before on the general idea of your code/feature, the purpose +of the code review is to ensure that the code is correct, efficient and meets +the requirements outlined above. In many cases the code review happens +relatively quickly, but it's possible that it stalls. If you have addressed +all feedback already given, it's perfectly fine to ask on the mailing list +again for review (after a reasonable amount of time, say a couple of weeks, has +passed). Once the review is completed, the PR is merged into the "master" +branch of SciPy. + +The above describes the requirements and process for adding code to SciPy. It +doesn't yet answer the question though how decisions are made exactly. The +basic answer is: decisions are made by consensus, by everyone who chooses to +participate in the discussion on the mailing list. This includes developers, +other users and yourself. Aiming for consensus in the discussion is important +-- SciPy is a project by and for the scientific Python community. In those +rare cases that agreement cannot be reached, the maintainers of the module +in question can decide the issue. + +.. _license-considerations: + +License Considerations +---------------------- + +*I based my code on existing Matlab/R/... code I found online, is this OK?* + +It depends. SciPy is distributed under a BSD license, so if the code that you +based your code on is also BSD licensed or has a BSD-compatible license (e.g. +MIT, PSF) then it's OK. Code which is GPL or Apache licensed, has no +clear license, requires citation or is free for academic use only can't be +included in SciPy. Therefore if you copied existing code with such a license +or made a direct translation to Python of it, your code can't be included. +If you're unsure, please ask on the scipy-dev `mailing list `_. + +*Why is SciPy under the BSD license and not, say, the GPL?* + +Like Python, SciPy uses a "permissive" open source license, which allows +proprietary re-use. While this allows companies to use and modify the software +without giving anything back, it is felt that the larger user base results in +more contributions overall, and companies often publish their modifications +anyway, without being required to. See John Hunter's `BSD pitch`_. + +For more information about SciPy's license, see :ref:`scipy-licensing`. + + +Maintaining existing code +========================= + +The previous section talked specifically about adding new functionality to +SciPy. A large part of that discussion also applies to maintenance of existing +code. Maintenance means fixing bugs, improving code quality, documenting +existing functionality better, adding missing unit tests, adding performance +benchmarks, keeping build scripts up-to-date, etc. The SciPy `issue list`_ +contains all reported bugs, build/documentation issues, etc. Fixing issues +helps improve the overall quality of SciPy, and is also a good way +of getting familiar with the project. You may also want to fix a bug because +you ran into it and need the function in question to work correctly. + +The discussion on code style and unit testing above applies equally to bug +fixes. It is usually best to start by writing a unit test that shows the +problem, i.e. it should pass but doesn't. Once you have that, you can fix the +code so that the test does pass. That should be enough to send a PR for this +issue. Unlike when adding new code, discussing this on the mailing list may +not be necessary - if the old behavior of the code is clearly incorrect, no one +will object to having it fixed. It may be necessary to add some warning or +deprecation message for the changed behavior. This should be part of the +review process. + +.. note:: + + Pull requests that *only* change code style, e.g. fixing some PEP8 issues in + a file, are discouraged. Such PRs are often not worth cluttering the git + annotate history, and take reviewer time that may be better spent in other ways. + Code style cleanups of code that is touched as part of a functional change + are fine however. + + +Reviewing pull requests +======================= + +Reviewing open pull requests (PRs) is very welcome, and a valuable way to help +increase the speed at which the project moves forward. If you have specific +knowledge/experience in a particular area (say "optimization algorithms" or +"special functions") then reviewing PRs in that area is especially valuable - +sometimes PRs with technical code have to wait for a long time to get merged +due to a shortage of appropriate reviewers. + +We encourage everyone to get involved in the review process; it's also a +great way to get familiar with the code base. Reviewers should ask +themselves some or all of the following questions: + +- Was this change adequately discussed (relevant for new features and changes + in existing behavior)? +- Is the feature scientifically sound? Algorithms may be known to work based on + literature; otherwise, closer look at correctness is valuable. +- Is the intended behavior clear under all conditions (e.g. unexpected inputs + like empty arrays or nan/inf values)? +- Does the code meet the quality, test and documentation expectation outline + under `Contributing new code`_? + +If we do not know you yet, consider introducing yourself. + + +Other ways to contribute +======================== + +There are many ways to contribute other than writing code. + +Triaging issues (investigating bug reports for validity and possible actions to +take) is also a useful activity. SciPy has many hundreds of open issues; +closing invalid ones and correctly labeling valid ones (ideally with some first +thoughts in a comment) allows prioritizing maintenance work and finding related +issues easily when working on an existing function or subpackage. + +Participating in discussions on the scipy-user and scipy-dev `mailing lists`_ is +a contribution in itself. Everyone who writes to those lists with a problem or +an idea would like to get responses, and writing such responses makes the +project and community function better and appear more welcoming. + +The `scipy.org`_ website contains a lot of information on both SciPy the +project and SciPy the community, and it can always use a new pair of hands. +The sources for the website live in their own separate repo: +https://github.com/scipy/scipy.org + +Getting started +=============== + +Thanks for your interest in contributing to SciPy! If you're interested in +contributing code, we hope you'll continue on to the :ref:`contributor-toc` +for details on how to set up your development environment, implement your +improvements, and submit your first PR! + +.. _scikit-learn: http://scikit-learn.org + +.. _scikit-image: http://scikit-image.org/ + +.. _statsmodels: https://www.statsmodels.org/ + +.. _testing guidelines: https://docs.scipy.org/doc/numpy/reference/testing.html + +.. _formatted correctly: https://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html#writing-the-commit-message + +.. _bug report: https://scipy.org/bug-report.html + +.. _PEP8: https://www.python.org/dev/peps/pep-0008/ + +.. _pep8 package: https://pypi.python.org/pypi/pep8 + +.. _pyflakes: https://pypi.python.org/pypi/pyflakes + +.. _Github help pages: https://help.github.com/articles/set-up-git/ + +.. _issue list: https://github.com/scipy/scipy/issues + +.. _Github: https://github.com/scipy/scipy + +.. _scipy.org: https://scipy.org/ + +.. _scipy.github.com: https://scipy.github.com/ + +.. _scipy.org-new: https://github.com/scipy/scipy.org-new + +.. _documentation wiki: https://docs.scipy.org/scipy/Front%20Page/ + +.. _SciPy Central: https://web.archive.org/web/20170520065729/http://central.scipy.org/ + +.. _doctest: https://pymotw.com/3/doctest/ + +.. _virtualenv: https://virtualenv.pypa.io/ + +.. _virtualenvwrapper: https://bitbucket.org/dhellmann/virtualenvwrapper/ + +.. _bsd pitch: http://nipy.sourceforge.net/nipy/stable/faq/johns_bsd_pitch.html + +.. _Pytest: https://pytest.org/ + +.. _mailing lists: https://www.scipy.org/scipylib/mailing-lists.html + +.. _Spyder: https://www.spyder-ide.org/ + +.. _Anaconda SciPy Dev Part I (macOS): https://youtu.be/1rPOSNd0ULI + +.. _Anaconda SciPy Dev Part II (macOS): https://youtu.be/Faz29u5xIZc + +.. _SciPy Development Workflow: https://youtu.be/HgU01gJbzMY diff --git a/voice_bridge/scipy/INSTALL.rst.txt b/voice_bridge/scipy/INSTALL.rst.txt new file mode 100644 index 0000000000000000000000000000000000000000..800cf29dbca9795e32668014f2bcea016551d554 --- /dev/null +++ b/voice_bridge/scipy/INSTALL.rst.txt @@ -0,0 +1,255 @@ +Building and installing SciPy ++++++++++++++++++++++++++++++ + +See https://www.scipy.org/install.html + +.. Contents:: + + +INTRODUCTION +============ + +It is *strongly* recommended that you use either a complete scientific Python +distribution or binary packages on your platform if they are available, in +particular on Windows and Mac OS X. You should not attempt to build SciPy if +you are not familiar with compiling software from sources. + +Recommended distributions are: + + - Enthought Canopy (https://www.enthought.com/products/canopy/) + - Anaconda (https://www.anaconda.com) + - Python(x,y) (https://python-xy.github.io/) + - WinPython (https://winpython.github.io/) + +The rest of this install documentation summarizes how to build Scipy. Note +that more extensive (and possibly more up-to-date) build instructions are +maintained at https://scipy.github.io/devdocs/building/ + + +PREREQUISITES +============= + +SciPy requires the following software installed for your platform: + +1) Python__ >= 3.7 + +__ https://www.python.org + +2) NumPy__ >= 1.16.5 + +__ https://www.numpy.org/ + +If building from source, SciPy also requires: + +3) setuptools__ + +__ https://github.com/pypa/setuptools + +4) pybind11__ >= 2.4.3 + +__ https://github.com/pybind/pybind11 + +5) If you want to build the documentation: Sphinx__ >= 2.4.0 and < 3.1.0 + +__ http://www.sphinx-doc.org/ + +6) If you want to build SciPy master or other unreleased version from source + (Cython-generated C sources are included in official releases): + Cython__ >= 0.29.18 + +__ http://cython.org/ + +Windows +------- + +Compilers +~~~~~~~~~ + +There are two ways to build SciPy on Windows: + +1. Use Intel MKL, and Intel compilers or ifort + MSVC. This is what Anaconda + and Enthought Canopy use. +2. Use MSVC + GFortran with OpenBLAS. This is how the SciPy Windows wheels are + built. + +Mac OS X +-------- + +It is recommended to use GCC or Clang, both work fine. Gcc is available for +free when installing Xcode, the developer toolsuite on Mac OS X. You also +need a Fortran compiler, which is not included with Xcode: you should use a +recent GFortran from an OS X package manager (like Homebrew). + +Please do NOT use GFortran from `hpc.sourceforge.net `_, +it is known to generate buggy SciPy binaries. + +You should also use a BLAS/LAPACK library from an OS X package manager. +ATLAS, OpenBLAS, and MKL all work. + +As of SciPy version 1.2.0, we do not support compiling against the system +Accelerate library for BLAS and LAPACK. It does not support a sufficiently +recent LAPACK interface. + +Linux +----- + +Most common distributions include all the dependencies. You will need to +install a BLAS/LAPACK (all of ATLAS, OpenBLAS, MKL work fine) including +development headers, as well as development headers for Python itself. Those +are typically packaged as python-dev. + + +INSTALLING SCIPY +================ + +For the latest information, see the website: + + https://www.scipy.org + + +Development version from Git +---------------------------- +Use the command:: + + git clone https://github.com/scipy/scipy.git + + cd scipy + git clean -xdf + python setup.py install --user + +Documentation +------------- +Type:: + + cd scipy/doc + make html + +From tarballs +------------- +Unpack ``SciPy-.tar.gz``, change to the ``SciPy-/`` +directory, and run:: + + pip install . -v --user + +This may take several minutes to half an hour depending on the speed of your +computer. + + +TESTING +======= + +To test SciPy after installation (highly recommended), execute in Python:: + + >>> import scipy + >>> scipy.test() + +To run the full test suite use:: + + >>> scipy.test('full') + +If you are upgrading from an older SciPy release, please test your code for any +deprecation warnings before and after upgrading to avoid surprises: + + $ python -Wd -c my_code_that_shouldnt_break.py + +Please note that you must have version 1.0 or later of the Pytest test +framework installed in order to run the tests. More information about Pytest is +available on the website__. + +__ https://pytest.org/ + +COMPILER NOTES +============== + +You can specify which Fortran compiler to use by using the following +install command:: + + python setup.py config_fc --fcompiler= install + +To see a valid list of names, run:: + + python setup.py config_fc --help-fcompiler + +IMPORTANT: It is highly recommended that all libraries that SciPy uses (e.g. +BLAS and ATLAS libraries) are built with the same Fortran compiler. In most +cases, if you mix compilers, you will not be able to import SciPy at best, and will have +crashes and random results at worst. + +UNINSTALLING +============ + +When installing with ``python setup.py install`` or a variation on that, you do +not get proper uninstall behavior for an older already installed SciPy version. +In many cases that's not a problem, but if it turns out to be an issue, you +need to manually uninstall it first (remove from e.g. in +``/usr/lib/python3.4/site-packages/scipy`` or +``$HOME/lib/python3.4/site-packages/scipy``). + +Alternatively, you can use ``pip install . --user`` instead of ``python +setup.py install --user`` in order to get reliable uninstall behavior. +The downside is that ``pip`` doesn't show you a build log and doesn't support +incremental rebuilds (it copies the whole source tree to a tempdir). + +TROUBLESHOOTING +=============== + +If you experience problems when building/installing/testing SciPy, you +can ask help from scipy-user@python.org or scipy-dev@python.org mailing +lists. Please include the following information in your message: + +NOTE: You can generate some of the following information (items 1-5,7) +in one command:: + + python -c 'from numpy.f2py.diagnose import run; run()' + +1) Platform information:: + + python -c 'import os, sys; print(os.name, sys.platform)' + uname -a + OS, its distribution name and version information + etc. + +2) Information about C, C++, Fortran compilers/linkers as reported by + the compilers when requesting their version information, e.g., + the output of + :: + + gcc -v + g77 --version + +3) Python version:: + + python -c 'import sys; print(sys.version)' + +4) NumPy version:: + + python -c 'import numpy; print(numpy.__version__)' + +5) ATLAS version, the locations of atlas and lapack libraries, building + information if any. If you have ATLAS version 3.3.6 or newer, then + give the output of the last command in + :: + + cd scipy/Lib/linalg + python setup_atlas_version.py build_ext --inplace --force + python -c 'import atlas_version' + +7) The output of the following commands + :: + + python INSTALLDIR/numpy/distutils/system_info.py + + where INSTALLDIR is, for example, /usr/lib/python3.4/site-packages/. + +8) Feel free to add any other relevant information. + For example, the full output (both stdout and stderr) of the SciPy + installation command can be very helpful. Since this output can be + rather large, ask before sending it into the mailing list (or + better yet, to one of the developers, if asked). + +9) In case of failing to import extension modules, the output of + :: + + ldd /path/to/ext_module.so + + can be useful. diff --git a/voice_bridge/scipy/LICENSE.txt b/voice_bridge/scipy/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..edc00c0b8fa85ae83353654b146c5758ff5aa93b --- /dev/null +++ b/voice_bridge/scipy/LICENSE.txt @@ -0,0 +1,968 @@ +Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- + +This binary distribution of Scipy also bundles the following software: + + +Name: OpenBLAS +Files: extra-dll\libopenb*.dll +Description: bundled as a dynamically linked library +Availability: https://github.com/xianyi/OpenBLAS/ +License: 3-clause BSD + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: LAPACK +Files: extra-dll\libopenb*.dll +Description: bundled in OpenBLAS +Availability: https://github.com/xianyi/OpenBLAS/ +License 3-clause BSD + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: GCC runtime library +Files: extra-dll\*.dll +Description: statically linked, in DLL files compiled with gfortran only +Availability: https://gcc.gnu.org/viewcvs/gcc/ +License: GPLv3 + runtime exception + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + + +Name: Microsoft Visual C++ Runtime Files +Files: extra-dll\msvcp140.dll +License: MSVC + https://www.visualstudio.com/license-terms/distributable-code-microsoft-visual-studio-2015-rc-microsoft-visual-studio-2015-sdk-rc-includes-utilities-buildserver-files/#visual-c-runtime + + Subject to the License Terms for the software, you may copy and + distribute with your program any of the files within the followng + folder and its subfolders except as noted below. You may not modify + these files. + + C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist + + You may not distribute the contents of the following folders: + + C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist\debug_nonredist + C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\redist\onecore\debug_nonredist + + Subject to the License Terms for the software, you may copy and + distribute the following files with your program in your program’s + application local folder or by deploying them into the Global + Assembly Cache (GAC): + + VC\atlmfc\lib\mfcmifc80.dll + VC\atlmfc\lib\amd64\mfcmifc80.dll + + +Name: Microsoft Visual C++ Runtime Files +Files: extra-dll\msvc*90.dll, extra-dll\Microsoft.VC90.CRT.manifest +License: MSVC + For your convenience, we have provided the following folders for + use when redistributing VC++ runtime files. Subject to the license + terms for the software, you may redistribute the folder + (unmodified) in the application local folder as a sub-folder with + no change to the folder name. You may also redistribute all the + files (*.dll and *.manifest) within a folder, listed below the + folder for your convenience, as an entire set. + + \VC\redist\x86\Microsoft.VC90.ATL\ + atl90.dll + Microsoft.VC90.ATL.manifest + \VC\redist\ia64\Microsoft.VC90.ATL\ + atl90.dll + Microsoft.VC90.ATL.manifest + \VC\redist\amd64\Microsoft.VC90.ATL\ + atl90.dll + Microsoft.VC90.ATL.manifest + \VC\redist\x86\Microsoft.VC90.CRT\ + msvcm90.dll + msvcp90.dll + msvcr90.dll + Microsoft.VC90.CRT.manifest + \VC\redist\ia64\Microsoft.VC90.CRT\ + msvcm90.dll + msvcp90.dll + msvcr90.dll + Microsoft.VC90.CRT.manifest + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/voice_bridge/scipy/LICENSES_bundled.txt b/voice_bridge/scipy/LICENSES_bundled.txt new file mode 100644 index 0000000000000000000000000000000000000000..06f04a3d1a03a59f87cdc4753cb8c0ac0ca4faef --- /dev/null +++ b/voice_bridge/scipy/LICENSES_bundled.txt @@ -0,0 +1,248 @@ +The SciPy repository and source distributions bundle a number of libraries that +are compatibly licensed. We list these here. + +Name: Numpydoc +Files: doc/sphinxext/numpydoc/* +License: 2-clause BSD + For details, see doc/sphinxext/LICENSE.txt + +Name: scipy-sphinx-theme +Files: doc/scipy-sphinx-theme/* +License: 3-clause BSD, PSF and Apache 2.0 + For details, see doc/sphinxext/LICENSE.txt + +Name: Decorator +Files: scipy/_lib/decorator.py +License: 2-clause BSD + For details, see the header inside scipy/_lib/decorator.py + +Name: ID +Files: scipy/linalg/src/id_dist/* +License: 3-clause BSD + For details, see scipy/linalg/src/id_dist/doc/doc.tex + +Name: L-BFGS-B +Files: scipy/optimize/lbfgsb/* +License: BSD license + For details, see scipy/optimize/lbfgsb/README + +Name: LAPJVsp +Files: scipy/sparse/csgraph/_matching.pyx +License: 3-clause BSD +Copyright 1987-, A. Volgenant/Amsterdam School of Economics, + University of Amsterdam + + Distributed under 3-clause BSD license with permission from + University of Amsterdam. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + +Name: SuperLU +Files: scipy/sparse/linalg/dsolve/SuperLU/* +License: 3-clause BSD + For details, see scipy/sparse/linalg/dsolve/SuperLU/License.txt + +Name: ARPACK +Files: scipy/sparse/linalg/eigen/arpack/ARPACK/* +License: 3-clause BSD + For details, see scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING + +Name: Qhull +Files: scipy/spatial/qhull/* +License: Qhull license (BSD-like) + For details, see scipy/spatial/qhull/COPYING.txt + +Name: Cephes +Files: scipy/special/cephes/* +License: 3-clause BSD + Distributed under 3-clause BSD license with permission from the author, + see https://lists.debian.org/debian-legal/2004/12/msg00295.html + + Cephes Math Library Release 2.8: June, 2000 + Copyright 1984, 1995, 2000 by Stephen L. Moshier + + This software is derived from the Cephes Math Library and is + incorporated herein by permission of the author. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Name: Faddeeva +Files: scipy/special/Faddeeva.* +License: MIT + Copyright (c) 2012 Massachusetts Institute of Technology + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Name: qd +Files: scipy/special/cephes/dd_*.[ch] +License: modified BSD license ("BSD-LBNL-License.doc") + This work was supported by the Director, Office of Science, Division + of Mathematical, Information, and Computational Sciences of the + U.S. Department of Energy under contract numbers DE-AC03-76SF00098 and + DE-AC02-05CH11231. + + Copyright (c) 2003-2009, The Regents of the University of California, + through Lawrence Berkeley National Laboratory (subject to receipt of + any required approvals from U.S. Dept. of Energy) All rights reserved. + + 1. Redistribution and use in source and binary forms, with or + without modification, are permitted provided that the following + conditions are met: + + (1) Redistributions of source code must retain the copyright + notice, this list of conditions and the following disclaimer. + + (2) Redistributions in binary form must reproduce the copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + (3) Neither the name of the University of California, Lawrence + Berkeley National Laboratory, U.S. Dept. of Energy nor the names + of its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + 2. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 3. You are under no obligation whatsoever to provide any bug fixes, + patches, or upgrades to the features, functionality or performance of + the source code ("Enhancements") to anyone; however, if you choose to + make your Enhancements available either publicly, or directly to + Lawrence Berkeley National Laboratory, without imposing a separate + written license agreement for such Enhancements, then you hereby grant + the following license: a non-exclusive, royalty-free perpetual license + to install, use, modify, prepare derivative works, incorporate into + other computer software, distribute, and sublicense such enhancements + or derivative works thereof, in binary and source code form. + +Name: pypocketfft +Files: scipy/fft/_pocketfft/[pocketfft.h, pypocketfft.cxx] +License: 3-Clause BSD + For details, see scipy/fft/_pocketfft/LICENSE.md + +Name: uarray +Files: scipy/_lib/uarray/* +License: 3-Clause BSD + For details, see scipy/_lib/uarray/LICENSE + +Name: ampgo +Files: benchmarks/benchmarks/go_benchmark_functions/*.py +License: MIT + Functions for testing global optimizers, forked from the AMPGO project, + https://code.google.com/archive/p/ampgo + +Name: pybind11 +Files: no source files are included, however pybind11 binary artifacts are + included with every binary build of SciPy. +License: + Copyright (c) 2016 Wenzel Jakob , All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Name: HiGHS +Files: scipy/optimize/_highs/* +License: MIT + For details, see scipy/optimize/_highs/LICENCE + +Name: Boost +Files: scipy/_lib/boost/* +License: Boost Software License - Version 1.0 + For details, see scipy/_lib/boost/LICENSE_1_0.txt diff --git a/voice_bridge/scipy/_lib/_ccallback_c.pyd b/voice_bridge/scipy/_lib/_ccallback_c.pyd new file mode 100644 index 0000000000000000000000000000000000000000..9e8a81bfff470ca56ecec34267d8282c9fe32e8a Binary files /dev/null and b/voice_bridge/scipy/_lib/_ccallback_c.pyd differ diff --git a/voice_bridge/scipy/_lib/_uarray/LICENSE b/voice_bridge/scipy/_lib/_uarray/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5f2b90a026aaecbdc090b3d3234954ab29fce8ae --- /dev/null +++ b/voice_bridge/scipy/_lib/_uarray/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2018, Quansight-Labs +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/voice_bridge/scipy/_lib/_uarray/_uarray.pyd b/voice_bridge/scipy/_lib/_uarray/_uarray.pyd new file mode 100644 index 0000000000000000000000000000000000000000..b84c5de2868ce6b533d5b5e5a2862ebe7d9de9a2 Binary files /dev/null and b/voice_bridge/scipy/_lib/_uarray/_uarray.pyd differ diff --git a/voice_bridge/scipy/_lib/messagestream.pyd b/voice_bridge/scipy/_lib/messagestream.pyd new file mode 100644 index 0000000000000000000000000000000000000000..6077ed63baa7a8b81fa8f807df0484365eb92396 Binary files /dev/null and b/voice_bridge/scipy/_lib/messagestream.pyd differ diff --git a/voice_bridge/scipy/doc_requirements.txt b/voice_bridge/scipy/doc_requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..220ee5b2a52c70e1820c8ebd309b680dcd265a9e --- /dev/null +++ b/voice_bridge/scipy/doc_requirements.txt @@ -0,0 +1,6 @@ +# Note: this should disappear at some point. For now, please keep it +# in sync with the doc dependencies in pyproject.toml +Sphinx!=3.1.0, !=4.1.0 +pydata-sphinx-theme>=0.6.1 +sphinx-panels>=0.5.2 +matplotlib>2 diff --git a/voice_bridge/scipy/fft/_pocketfft/LICENSE.md b/voice_bridge/scipy/fft/_pocketfft/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..1b5163d8435976c24988afbd39ded304947178cb --- /dev/null +++ b/voice_bridge/scipy/fft/_pocketfft/LICENSE.md @@ -0,0 +1,25 @@ +Copyright (C) 2010-2019 Max-Planck-Society +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of the copyright holder nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/voice_bridge/scipy/fft/_pocketfft/pypocketfft.pyd b/voice_bridge/scipy/fft/_pocketfft/pypocketfft.pyd new file mode 100644 index 0000000000000000000000000000000000000000..0e22c951a534c48319d17c4424d25ea14c9618e7 Binary files /dev/null and b/voice_bridge/scipy/fft/_pocketfft/pypocketfft.pyd differ diff --git a/voice_bridge/scipy/fftpack/convolve.pyd b/voice_bridge/scipy/fftpack/convolve.pyd new file mode 100644 index 0000000000000000000000000000000000000000..1f2518f5985001a3ae74a732145c0a6e1100356d Binary files /dev/null and b/voice_bridge/scipy/fftpack/convolve.pyd differ diff --git a/voice_bridge/scipy/fftpack/tests/Makefile b/voice_bridge/scipy/fftpack/tests/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..39fdb58e7326669bce6f6fe79d74ed1f6b840b04 --- /dev/null +++ b/voice_bridge/scipy/fftpack/tests/Makefile @@ -0,0 +1,13 @@ +CC = gcc +LD = gcc + +fftw_single: fftw_dct.c + $(CC) -W -Wall -DDCT_TEST_USE_SINGLE $< -o $@ -lfftw3f + +fftw_double: fftw_dct.c + $(CC) -W -Wall $< -o $@ -lfftw3 + +clean: + rm -f fftw_single + rm -f fftw_double + rm -f *.o diff --git a/voice_bridge/scipy/fftpack/tests/fftw_dct.c b/voice_bridge/scipy/fftpack/tests/fftw_dct.c new file mode 100644 index 0000000000000000000000000000000000000000..688eeb531a440266e7cb56d6881b7676cbaad5a7 --- /dev/null +++ b/voice_bridge/scipy/fftpack/tests/fftw_dct.c @@ -0,0 +1,150 @@ +#include +#include + +#include + +#if DCT_TEST_PRECISION == 1 +typedef float float_prec; +#define PF "%.7f" +#define FFTW_PLAN fftwf_plan +#define FFTW_MALLOC fftwf_malloc +#define FFTW_FREE fftwf_free +#define FFTW_PLAN_CREATE fftwf_plan_r2r_1d +#define FFTW_EXECUTE fftwf_execute +#define FFTW_DESTROY_PLAN fftwf_destroy_plan +#define FFTW_CLEANUP fftwf_cleanup +#elif DCT_TEST_PRECISION == 2 +typedef double float_prec; +#define PF "%.18f" +#define FFTW_PLAN fftw_plan +#define FFTW_MALLOC fftw_malloc +#define FFTW_FREE fftw_free +#define FFTW_PLAN_CREATE fftw_plan_r2r_1d +#define FFTW_EXECUTE fftw_execute +#define FFTW_DESTROY_PLAN fftw_destroy_plan +#define FFTW_CLEANUP fftw_cleanup +#elif DCT_TEST_PRECISION == 3 +typedef long double float_prec; +#define PF "%.18Lf" +#define FFTW_PLAN fftwl_plan +#define FFTW_MALLOC fftwl_malloc +#define FFTW_FREE fftwl_free +#define FFTW_PLAN_CREATE fftwl_plan_r2r_1d +#define FFTW_EXECUTE fftwl_execute +#define FFTW_DESTROY_PLAN fftwl_destroy_plan +#define FFTW_CLEANUP fftwl_cleanup +#else +#error DCT_TEST_PRECISION must be a number 1-3 +#endif + + +enum type { + DCT_I = 1, + DCT_II = 2, + DCT_III = 3, + DCT_IV = 4, + DST_I = 5, + DST_II = 6, + DST_III = 7, + DST_IV = 8, +}; + +int gen(int type, int sz) +{ + float_prec *a, *b; + FFTW_PLAN p; + int i, tp; + + a = FFTW_MALLOC(sizeof(*a) * sz); + if (a == NULL) { + fprintf(stderr, "failure\n"); + exit(EXIT_FAILURE); + } + b = FFTW_MALLOC(sizeof(*b) * sz); + if (b == NULL) { + fprintf(stderr, "failure\n"); + exit(EXIT_FAILURE); + } + + switch(type) { + case DCT_I: + tp = FFTW_REDFT00; + break; + case DCT_II: + tp = FFTW_REDFT10; + break; + case DCT_III: + tp = FFTW_REDFT01; + break; + case DCT_IV: + tp = FFTW_REDFT11; + break; + case DST_I: + tp = FFTW_RODFT00; + break; + case DST_II: + tp = FFTW_RODFT10; + break; + case DST_III: + tp = FFTW_RODFT01; + break; + case DST_IV: + tp = FFTW_RODFT11; + break; + default: + fprintf(stderr, "unknown type\n"); + exit(EXIT_FAILURE); + } + + switch(type) { + case DCT_I: + case DCT_II: + case DCT_III: + case DCT_IV: + for(i=0; i < sz; ++i) { + a[i] = i; + } + break; + case DST_I: + case DST_II: + case DST_III: + case DST_IV: +/* TODO: what should we do for dst's?*/ + for(i=0; i < sz; ++i) { + a[i] = i; + } + break; + default: + fprintf(stderr, "unknown type\n"); + exit(EXIT_FAILURE); + } + + p = FFTW_PLAN_CREATE(sz, a, b, tp, FFTW_ESTIMATE); + FFTW_EXECUTE(p); + FFTW_DESTROY_PLAN(p); + + for(i=0; i < sz; ++i) { + printf(PF"\n", b[i]); + } + FFTW_FREE(b); + FFTW_FREE(a); + + return 0; +} + +int main(int argc, char* argv[]) +{ + int n, tp; + + if (argc < 3) { + fprintf(stderr, "missing argument: program type n\n"); + exit(EXIT_FAILURE); + } + tp = atoi(argv[1]); + n = atoi(argv[2]); + + gen(tp, n); + FFTW_CLEANUP(); + + return 0; +} diff --git a/voice_bridge/scipy/fftpack/tests/fftw_double_ref.npz b/voice_bridge/scipy/fftpack/tests/fftw_double_ref.npz new file mode 100644 index 0000000000000000000000000000000000000000..e1e3d620400746177b560b9193efce03c2841e99 --- /dev/null +++ b/voice_bridge/scipy/fftpack/tests/fftw_double_ref.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a60c649415b645223924d8342ccc5c097801c86901287a369e53fc9259f5ec4e +size 162120 diff --git a/voice_bridge/scipy/fftpack/tests/fftw_longdouble_ref.npz b/voice_bridge/scipy/fftpack/tests/fftw_longdouble_ref.npz new file mode 100644 index 0000000000000000000000000000000000000000..b1a646889c9889541e8d368c8c2d96520d183dc4 --- /dev/null +++ b/voice_bridge/scipy/fftpack/tests/fftw_longdouble_ref.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a406cbd4dad04d0c59dd38f54416fb49424c82229c1a074b6a44ec0cde2000e3 +size 296072 diff --git a/voice_bridge/scipy/fftpack/tests/fftw_single_ref.npz b/voice_bridge/scipy/fftpack/tests/fftw_single_ref.npz new file mode 100644 index 0000000000000000000000000000000000000000..a42748dba14b7ff0d2f53ce4cd5a86a4f08e5d93 --- /dev/null +++ b/voice_bridge/scipy/fftpack/tests/fftw_single_ref.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:276a9141318e6fc36e4ab6ff54a61b64054ef8849b660f17359e5f541b43c526 +size 95144 diff --git a/voice_bridge/scipy/fftpack/tests/gendata.m b/voice_bridge/scipy/fftpack/tests/gendata.m new file mode 100644 index 0000000000000000000000000000000000000000..6c231df4d788a2e1a96f877ec5d40509a7fb09cc --- /dev/null +++ b/voice_bridge/scipy/fftpack/tests/gendata.m @@ -0,0 +1,21 @@ +x0 = linspace(0, 10, 11); +x1 = linspace(0, 10, 15); +x2 = linspace(0, 10, 16); +x3 = linspace(0, 10, 17); + +x4 = randn(32, 1); +x5 = randn(64, 1); +x6 = randn(128, 1); +x7 = randn(256, 1); + +y0 = dct(x0); +y1 = dct(x1); +y2 = dct(x2); +y3 = dct(x3); +y4 = dct(x4); +y5 = dct(x5); +y6 = dct(x6); +y7 = dct(x7); + +save('test.mat', 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', ... + 'y0', 'y1', 'y2', 'y3', 'y4', 'y5', 'y6', 'y7'); diff --git a/voice_bridge/scipy/fftpack/tests/test.npz b/voice_bridge/scipy/fftpack/tests/test.npz new file mode 100644 index 0000000000000000000000000000000000000000..1e5a4e06615c6bcc58f0feff20f73e83439a937d --- /dev/null +++ b/voice_bridge/scipy/fftpack/tests/test.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36de804a22d8fdea054590ce49ddf3c859838b7d89193c56b3bcb660cbf43797 +size 11968 diff --git a/voice_bridge/scipy/integrate/_dop.pyd b/voice_bridge/scipy/integrate/_dop.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f7ef499d952bfda4ba9d1cf65bd4dd70dfd2f526 Binary files /dev/null and b/voice_bridge/scipy/integrate/_dop.pyd differ diff --git a/voice_bridge/scipy/integrate/_odepack.pyd b/voice_bridge/scipy/integrate/_odepack.pyd new file mode 100644 index 0000000000000000000000000000000000000000..efb7c1e708175244f0d0f9b57098dbf680914451 Binary files /dev/null and b/voice_bridge/scipy/integrate/_odepack.pyd differ diff --git a/voice_bridge/scipy/integrate/_quadpack.pyd b/voice_bridge/scipy/integrate/_quadpack.pyd new file mode 100644 index 0000000000000000000000000000000000000000..0e0a3848d95913b735712ed235f5963657700816 Binary files /dev/null and b/voice_bridge/scipy/integrate/_quadpack.pyd differ diff --git a/voice_bridge/scipy/integrate/lsoda.pyd b/voice_bridge/scipy/integrate/lsoda.pyd new file mode 100644 index 0000000000000000000000000000000000000000..540d3fba4aa27fa9097cebb26d89c6e71fb9a377 Binary files /dev/null and b/voice_bridge/scipy/integrate/lsoda.pyd differ diff --git a/voice_bridge/scipy/integrate/tests/_test_multivariate.c b/voice_bridge/scipy/integrate/tests/_test_multivariate.c new file mode 100644 index 0000000000000000000000000000000000000000..d761fcb7605b3db3fd67f5a07356ed07c8c40aaa --- /dev/null +++ b/voice_bridge/scipy/integrate/tests/_test_multivariate.c @@ -0,0 +1,124 @@ +#include + +#include "math.h" + +const double PI = 3.141592653589793238462643383279502884; + +static double +_multivariate_typical(int n, double *args) +{ + return cos(args[1] * args[0] - args[2] * sin(args[0])) / PI; +} + +static double +_multivariate_indefinite(int n, double *args) +{ + return -exp(-args[0]) * log(args[0]); +} + +static double +_multivariate_sin(int n, double *args) +{ + return sin(args[0]); +} + +static double +_sin_0(double x, void *user_data) +{ + return sin(x); +} + +static double +_sin_1(int ndim, double *x, void *user_data) +{ + return sin(x[0]); +} + +static double +_sin_2(double x) +{ + return sin(x); +} + +static double +_sin_3(int ndim, double *x) +{ + return sin(x[0]); +} + + +typedef struct { + char *name; + void *ptr; +} routine_t; + + +static const routine_t routines[] = { + {"_multivariate_typical", &_multivariate_typical}, + {"_multivariate_indefinite", &_multivariate_indefinite}, + {"_multivariate_sin", &_multivariate_sin}, + {"_sin_0", &_sin_0}, + {"_sin_1", &_sin_1}, + {"_sin_2", &_sin_2}, + {"_sin_3", &_sin_3} +}; + + +static int create_pointers(PyObject *module) +{ + PyObject *d, *obj = NULL; + size_t i; + + d = PyModule_GetDict(module); + if (d == NULL) { + goto fail; + } + + for (i = 0; i < sizeof(routines) / sizeof(routine_t); ++i) { + obj = PyLong_FromVoidPtr(routines[i].ptr); + if (obj == NULL) { + goto fail; + } + + if (PyDict_SetItemString(d, routines[i].name, obj)) { + goto fail; + } + + Py_DECREF(obj); + obj = NULL; + } + + Py_XDECREF(obj); + return 0; + +fail: + Py_XDECREF(obj); + return -1; +} + + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_test_multivariate", + NULL, + -1, + NULL, /* Empty methods section */ + NULL, + NULL, + NULL, + NULL +}; + +PyObject *PyInit__test_multivariate(void) +{ + PyObject *m; + m = PyModule_Create(&moduledef); + if (m == NULL) { + return NULL; + } + if (create_pointers(m)) { + Py_DECREF(m); + return NULL; + } + return m; +} diff --git a/voice_bridge/scipy/integrate/tests/banded5x5.f b/voice_bridge/scipy/integrate/tests/banded5x5.f new file mode 100644 index 0000000000000000000000000000000000000000..8a56593d0ef7964d8984fd58b3e70c6ed18bab70 --- /dev/null +++ b/voice_bridge/scipy/integrate/tests/banded5x5.f @@ -0,0 +1,240 @@ +c banded5x5.f +c +c This Fortran library contains implementations of the +c differential equation +c dy/dt = A*y +c where A is a 5x5 banded matrix (see below for the actual +c values). These functions will be used to test +c scipy.integrate.odeint. +c +c The idea is to solve the system two ways: pure Fortran, and +c using odeint. The "pure Fortran" solver is implemented in +c the subroutine banded5x5_solve below. It calls LSODA to +c solve the system. +c +c To solve the same system using odeint, the functions in this +c file are given a python wrapper using f2py. Then the code +c in test_odeint_jac.py uses the wrapper to implement the +c equation and Jacobian functions required by odeint. Because +c those functions ultimately call the Fortran routines defined +c in this file, the two method (pure Fortran and odeint) should +c produce exactly the same results. (That's assuming floating +c point calculations are deterministic, which can be an +c incorrect assumption.) If we simply re-implemented the +c equation and Jacobian functions using just python and numpy, +c the floating point calculations would not be performed in +c the same sequence as in the Fortran code, and we would obtain +c different answers. The answer for either method would be +c numerically "correct", but the errors would be different, +c and the counts of function and Jacobian evaluations would +c likely be different. +c + block data jacobian + implicit none + + double precision bands + dimension bands(4,5) + common /jac/ bands + +c The data for a banded Jacobian stored in packed banded +c format. The full Jacobian is +c +c -1, 0.25, 0, 0, 0 +c 0.25, -5, 0.25, 0, 0 +c 0.10, 0.25, -25, 0.25, 0 +c 0, 0.10, 0.25, -125, 0.25 +c 0, 0, 0.10, 0.25, -625 +c +c The columns in the following layout of numbers are +c the upper diagonal, main diagonal and two lower diagonals +c (i.e. each row in the layout is a column of the packed +c banded Jacobian). The values 0.00D0 are in the "don't +c care" positions. + + data bands/ + + 0.00D0, -1.0D0, 0.25D0, 0.10D0, + + 0.25D0, -5.0D0, 0.25D0, 0.10D0, + + 0.25D0, -25.0D0, 0.25D0, 0.10D0, + + 0.25D0, -125.0D0, 0.25D0, 0.00D0, + + 0.25D0, -625.0D0, 0.00D0, 0.00D0 + + / + + end + + subroutine getbands(jac) + double precision jac + dimension jac(4, 5) +cf2py intent(out) jac + + double precision bands + dimension bands(4,5) + common /jac/ bands + + integer i, j + do 5 i = 1, 4 + do 5 j = 1, 5 + jac(i, j) = bands(i, j) + 5 continue + + return + end + +c +c Differential equations, right-hand-side +c + subroutine banded5x5(n, t, y, f) + implicit none + integer n + double precision t, y, f + dimension y(n), f(n) + + double precision bands + dimension bands(4,5) + common /jac/ bands + + f(1) = bands(2,1)*y(1) + bands(1,2)*y(2) + f(2) = bands(3,1)*y(1) + bands(2,2)*y(2) + bands(1,3)*y(3) + f(3) = bands(4,1)*y(1) + bands(3,2)*y(2) + bands(2,3)*y(3) + + + bands(1,4)*y(4) + f(4) = bands(4,2)*y(2) + bands(3,3)*y(3) + bands(2,4)*y(4) + + + bands(1,5)*y(5) + f(5) = bands(4,3)*y(3) + bands(3,4)*y(4) + bands(2,5)*y(5) + + return + end + +c +c Jacobian +c +c The subroutine assumes that the full Jacobian is to be computed. +c ml and mu are ignored, and nrowpd is assumed to be n. +c + subroutine banded5x5_jac(n, t, y, ml, mu, jac, nrowpd) + implicit none + integer n, ml, mu, nrowpd + double precision t, y, jac + dimension y(n), jac(nrowpd, n) + + integer i, j + + double precision bands + dimension bands(4,5) + common /jac/ bands + + do 15 i = 1, 4 + do 15 j = 1, 5 + if ((i - j) .gt. 0) then + jac(i - j, j) = bands(i, j) + end if +15 continue + + return + end + +c +c Banded Jacobian +c +c ml = 2, mu = 1 +c + subroutine banded5x5_bjac(n, t, y, ml, mu, bjac, nrowpd) + implicit none + integer n, ml, mu, nrowpd + double precision t, y, bjac + dimension y(5), bjac(nrowpd, n) + + integer i, j + + double precision bands + dimension bands(4,5) + common /jac/ bands + + do 20 i = 1, 4 + do 20 j = 1, 5 + bjac(i, j) = bands(i, j) + 20 continue + + return + end + + + subroutine banded5x5_solve(y, nsteps, dt, jt, nst, nfe, nje) + +c jt is the Jacobian type: +c jt = 1 Use the full Jacobian. +c jt = 4 Use the banded Jacobian. +c nst, nfe and nje are outputs: +c nst: Total number of internal steps +c nfe: Total number of function (i.e. right-hand-side) +c evaluations +c nje: Total number of Jacobian evaluations + + implicit none + + external banded5x5 + external banded5x5_jac + external banded5x5_bjac + external LSODA + +c Arguments... + double precision y, dt + integer nsteps, jt, nst, nfe, nje +cf2py intent(inout) y +cf2py intent(in) nsteps, dt, jt +cf2py intent(out) nst, nfe, nje + +c Local variables... + double precision atol, rtol, t, tout, rwork + integer iwork + dimension y(5), rwork(500), iwork(500) + integer neq, i + integer itol, iopt, itask, istate, lrw, liw + +c Common block... + double precision jacband + dimension jacband(4,5) + common /jac/ jacband + +c --- t range --- + t = 0.0D0 + +c --- Solver tolerances --- + rtol = 1.0D-11 + atol = 1.0D-13 + itol = 1 + +c --- Other LSODA parameters --- + neq = 5 + itask = 1 + istate = 1 + iopt = 0 + iwork(1) = 2 + iwork(2) = 1 + lrw = 500 + liw = 500 + +c --- Call LSODA in a loop to compute the solution --- + do 40 i = 1, nsteps + tout = i*dt + if (jt .eq. 1) then + call LSODA(banded5x5, neq, y, t, tout, + & itol, rtol, atol, itask, istate, iopt, + & rwork, lrw, iwork, liw, + & banded5x5_jac, jt) + else + call LSODA(banded5x5, neq, y, t, tout, + & itol, rtol, atol, itask, istate, iopt, + & rwork, lrw, iwork, liw, + & banded5x5_bjac, jt) + end if + 40 if (istate .lt. 0) goto 80 + + nst = iwork(11) + nfe = iwork(12) + nje = iwork(13) + + return + + 80 write (6,89) istate + 89 format(1X,"Error: istate=",I3) + return + end diff --git a/voice_bridge/scipy/integrate/vode.pyd b/voice_bridge/scipy/integrate/vode.pyd new file mode 100644 index 0000000000000000000000000000000000000000..e77ffa0724cf710a47e645fd883c2ae55d5257bf Binary files /dev/null and b/voice_bridge/scipy/integrate/vode.pyd differ diff --git a/voice_bridge/scipy/interpolate/_bspl.pyd b/voice_bridge/scipy/interpolate/_bspl.pyd new file mode 100644 index 0000000000000000000000000000000000000000..b5241f3b3529904f9d29b389376d2d91fc1653db Binary files /dev/null and b/voice_bridge/scipy/interpolate/_bspl.pyd differ diff --git a/voice_bridge/scipy/interpolate/_fitpack.pyd b/voice_bridge/scipy/interpolate/_fitpack.pyd new file mode 100644 index 0000000000000000000000000000000000000000..574a1354087a03978000aa3fe7fa55d204d9252f Binary files /dev/null and b/voice_bridge/scipy/interpolate/_fitpack.pyd differ diff --git a/voice_bridge/scipy/interpolate/_ppoly.pyd b/voice_bridge/scipy/interpolate/_ppoly.pyd new file mode 100644 index 0000000000000000000000000000000000000000..0def9afa515f7a4ce7f10bcad0effa7abb844716 Binary files /dev/null and b/voice_bridge/scipy/interpolate/_ppoly.pyd differ diff --git a/voice_bridge/scipy/interpolate/_rbfinterp_pythran.pyd b/voice_bridge/scipy/interpolate/_rbfinterp_pythran.pyd new file mode 100644 index 0000000000000000000000000000000000000000..e68e84c1d1681ace0df31f2c937cb777e59abcf9 Binary files /dev/null and b/voice_bridge/scipy/interpolate/_rbfinterp_pythran.pyd differ diff --git a/voice_bridge/scipy/interpolate/dfitpack.pyd b/voice_bridge/scipy/interpolate/dfitpack.pyd new file mode 100644 index 0000000000000000000000000000000000000000..d5c398f1a026ac3e002ee5553a181808f7a264b5 Binary files /dev/null and b/voice_bridge/scipy/interpolate/dfitpack.pyd differ diff --git a/voice_bridge/scipy/interpolate/interpnd.pyd b/voice_bridge/scipy/interpolate/interpnd.pyd new file mode 100644 index 0000000000000000000000000000000000000000..8aa3562052408669885e0fb9f43293d68f5deb48 Binary files /dev/null and b/voice_bridge/scipy/interpolate/interpnd.pyd differ diff --git a/voice_bridge/scipy/interpolate/tests/data/bug-1310.npz b/voice_bridge/scipy/interpolate/tests/data/bug-1310.npz new file mode 100644 index 0000000000000000000000000000000000000000..8bddf805c36b29dc449556c27a2b489691f841af --- /dev/null +++ b/voice_bridge/scipy/interpolate/tests/data/bug-1310.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d6803c0b398f2704c236f1d1b9e8e5ede06bd165a0abb0f228281abbd455ae9 +size 2648 diff --git a/voice_bridge/scipy/interpolate/tests/data/estimate_gradients_hang.npy b/voice_bridge/scipy/interpolate/tests/data/estimate_gradients_hang.npy new file mode 100644 index 0000000000000000000000000000000000000000..c5ef8f63f263a476823ddeacf2571551c2fe4690 --- /dev/null +++ b/voice_bridge/scipy/interpolate/tests/data/estimate_gradients_hang.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:406c10857417ff5ea98d8cd28945c9d0e4f5c24f92a48ad0e8fab955bf2477f1 +size 35680 diff --git a/voice_bridge/scipy/io/arff/tests/data/iris.arff b/voice_bridge/scipy/io/arff/tests/data/iris.arff new file mode 100644 index 0000000000000000000000000000000000000000..780480c7c6b9a68bf71aaf357c7d3f7a5b3b3f57 --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/iris.arff @@ -0,0 +1,225 @@ +% 1. Title: Iris Plants Database +% +% 2. Sources: +% (a) Creator: R.A. Fisher +% (b) Donor: Michael Marshall (MARSHALL%PLU@io.arc.nasa.gov) +% (c) Date: July, 1988 +% +% 3. Past Usage: +% - Publications: too many to mention!!! Here are a few. +% 1. Fisher,R.A. "The use of multiple measurements in taxonomic problems" +% Annual Eugenics, 7, Part II, 179-188 (1936); also in "Contributions +% to Mathematical Statistics" (John Wiley, NY, 1950). +% 2. Duda,R.O., & Hart,P.E. (1973) Pattern Classification and Scene Analysis. +% (Q327.D83) John Wiley & Sons. ISBN 0-471-22361-1. See page 218. +% 3. Dasarathy, B.V. (1980) "Nosing Around the Neighborhood: A New System +% Structure and Classification Rule for Recognition in Partially Exposed +% Environments". IEEE Transactions on Pattern Analysis and Machine +% Intelligence, Vol. PAMI-2, No. 1, 67-71. +% -- Results: +% -- very low misclassification rates (0% for the setosa class) +% 4. Gates, G.W. (1972) "The Reduced Nearest Neighbor Rule". IEEE +% Transactions on Information Theory, May 1972, 431-433. +% -- Results: +% -- very low misclassification rates again +% 5. See also: 1988 MLC Proceedings, 54-64. Cheeseman et al's AUTOCLASS II +% conceptual clustering system finds 3 classes in the data. +% +% 4. Relevant Information: +% --- This is perhaps the best known database to be found in the pattern +% recognition literature. Fisher's paper is a classic in the field +% and is referenced frequently to this day. (See Duda & Hart, for +% example.) The data set contains 3 classes of 50 instances each, +% where each class refers to a type of iris plant. One class is +% linearly separable from the other 2; the latter are NOT linearly +% separable from each other. +% --- Predicted attribute: class of iris plant. +% --- This is an exceedingly simple domain. +% +% 5. Number of Instances: 150 (50 in each of three classes) +% +% 6. Number of Attributes: 4 numeric, predictive attributes and the class +% +% 7. Attribute Information: +% 1. sepal length in cm +% 2. sepal width in cm +% 3. petal length in cm +% 4. petal width in cm +% 5. class: +% -- Iris Setosa +% -- Iris Versicolour +% -- Iris Virginica +% +% 8. Missing Attribute Values: None +% +% Summary Statistics: +% Min Max Mean SD Class Correlation +% sepal length: 4.3 7.9 5.84 0.83 0.7826 +% sepal width: 2.0 4.4 3.05 0.43 -0.4194 +% petal length: 1.0 6.9 3.76 1.76 0.9490 (high!) +% petal width: 0.1 2.5 1.20 0.76 0.9565 (high!) +% +% 9. Class Distribution: 33.3% for each of 3 classes. + +@RELATION iris + +@ATTRIBUTE sepallength REAL +@ATTRIBUTE sepalwidth REAL +@ATTRIBUTE petallength REAL +@ATTRIBUTE petalwidth REAL +@ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica} + +@DATA +5.1,3.5,1.4,0.2,Iris-setosa +4.9,3.0,1.4,0.2,Iris-setosa +4.7,3.2,1.3,0.2,Iris-setosa +4.6,3.1,1.5,0.2,Iris-setosa +5.0,3.6,1.4,0.2,Iris-setosa +5.4,3.9,1.7,0.4,Iris-setosa +4.6,3.4,1.4,0.3,Iris-setosa +5.0,3.4,1.5,0.2,Iris-setosa +4.4,2.9,1.4,0.2,Iris-setosa +4.9,3.1,1.5,0.1,Iris-setosa +5.4,3.7,1.5,0.2,Iris-setosa +4.8,3.4,1.6,0.2,Iris-setosa +4.8,3.0,1.4,0.1,Iris-setosa +4.3,3.0,1.1,0.1,Iris-setosa +5.8,4.0,1.2,0.2,Iris-setosa +5.7,4.4,1.5,0.4,Iris-setosa +5.4,3.9,1.3,0.4,Iris-setosa +5.1,3.5,1.4,0.3,Iris-setosa +5.7,3.8,1.7,0.3,Iris-setosa +5.1,3.8,1.5,0.3,Iris-setosa +5.4,3.4,1.7,0.2,Iris-setosa +5.1,3.7,1.5,0.4,Iris-setosa +4.6,3.6,1.0,0.2,Iris-setosa +5.1,3.3,1.7,0.5,Iris-setosa +4.8,3.4,1.9,0.2,Iris-setosa +5.0,3.0,1.6,0.2,Iris-setosa +5.0,3.4,1.6,0.4,Iris-setosa +5.2,3.5,1.5,0.2,Iris-setosa +5.2,3.4,1.4,0.2,Iris-setosa +4.7,3.2,1.6,0.2,Iris-setosa +4.8,3.1,1.6,0.2,Iris-setosa +5.4,3.4,1.5,0.4,Iris-setosa +5.2,4.1,1.5,0.1,Iris-setosa +5.5,4.2,1.4,0.2,Iris-setosa +4.9,3.1,1.5,0.1,Iris-setosa +5.0,3.2,1.2,0.2,Iris-setosa +5.5,3.5,1.3,0.2,Iris-setosa +4.9,3.1,1.5,0.1,Iris-setosa +4.4,3.0,1.3,0.2,Iris-setosa +5.1,3.4,1.5,0.2,Iris-setosa +5.0,3.5,1.3,0.3,Iris-setosa +4.5,2.3,1.3,0.3,Iris-setosa +4.4,3.2,1.3,0.2,Iris-setosa +5.0,3.5,1.6,0.6,Iris-setosa +5.1,3.8,1.9,0.4,Iris-setosa +4.8,3.0,1.4,0.3,Iris-setosa +5.1,3.8,1.6,0.2,Iris-setosa +4.6,3.2,1.4,0.2,Iris-setosa +5.3,3.7,1.5,0.2,Iris-setosa +5.0,3.3,1.4,0.2,Iris-setosa +7.0,3.2,4.7,1.4,Iris-versicolor +6.4,3.2,4.5,1.5,Iris-versicolor +6.9,3.1,4.9,1.5,Iris-versicolor +5.5,2.3,4.0,1.3,Iris-versicolor +6.5,2.8,4.6,1.5,Iris-versicolor +5.7,2.8,4.5,1.3,Iris-versicolor +6.3,3.3,4.7,1.6,Iris-versicolor +4.9,2.4,3.3,1.0,Iris-versicolor +6.6,2.9,4.6,1.3,Iris-versicolor +5.2,2.7,3.9,1.4,Iris-versicolor +5.0,2.0,3.5,1.0,Iris-versicolor +5.9,3.0,4.2,1.5,Iris-versicolor +6.0,2.2,4.0,1.0,Iris-versicolor +6.1,2.9,4.7,1.4,Iris-versicolor +5.6,2.9,3.6,1.3,Iris-versicolor +6.7,3.1,4.4,1.4,Iris-versicolor +5.6,3.0,4.5,1.5,Iris-versicolor +5.8,2.7,4.1,1.0,Iris-versicolor +6.2,2.2,4.5,1.5,Iris-versicolor +5.6,2.5,3.9,1.1,Iris-versicolor +5.9,3.2,4.8,1.8,Iris-versicolor +6.1,2.8,4.0,1.3,Iris-versicolor +6.3,2.5,4.9,1.5,Iris-versicolor +6.1,2.8,4.7,1.2,Iris-versicolor +6.4,2.9,4.3,1.3,Iris-versicolor +6.6,3.0,4.4,1.4,Iris-versicolor +6.8,2.8,4.8,1.4,Iris-versicolor +6.7,3.0,5.0,1.7,Iris-versicolor +6.0,2.9,4.5,1.5,Iris-versicolor +5.7,2.6,3.5,1.0,Iris-versicolor +5.5,2.4,3.8,1.1,Iris-versicolor +5.5,2.4,3.7,1.0,Iris-versicolor +5.8,2.7,3.9,1.2,Iris-versicolor +6.0,2.7,5.1,1.6,Iris-versicolor +5.4,3.0,4.5,1.5,Iris-versicolor +6.0,3.4,4.5,1.6,Iris-versicolor +6.7,3.1,4.7,1.5,Iris-versicolor +6.3,2.3,4.4,1.3,Iris-versicolor +5.6,3.0,4.1,1.3,Iris-versicolor +5.5,2.5,4.0,1.3,Iris-versicolor +5.5,2.6,4.4,1.2,Iris-versicolor +6.1,3.0,4.6,1.4,Iris-versicolor +5.8,2.6,4.0,1.2,Iris-versicolor +5.0,2.3,3.3,1.0,Iris-versicolor +5.6,2.7,4.2,1.3,Iris-versicolor +5.7,3.0,4.2,1.2,Iris-versicolor +5.7,2.9,4.2,1.3,Iris-versicolor +6.2,2.9,4.3,1.3,Iris-versicolor +5.1,2.5,3.0,1.1,Iris-versicolor +5.7,2.8,4.1,1.3,Iris-versicolor +6.3,3.3,6.0,2.5,Iris-virginica +5.8,2.7,5.1,1.9,Iris-virginica +7.1,3.0,5.9,2.1,Iris-virginica +6.3,2.9,5.6,1.8,Iris-virginica +6.5,3.0,5.8,2.2,Iris-virginica +7.6,3.0,6.6,2.1,Iris-virginica +4.9,2.5,4.5,1.7,Iris-virginica +7.3,2.9,6.3,1.8,Iris-virginica +6.7,2.5,5.8,1.8,Iris-virginica +7.2,3.6,6.1,2.5,Iris-virginica +6.5,3.2,5.1,2.0,Iris-virginica +6.4,2.7,5.3,1.9,Iris-virginica +6.8,3.0,5.5,2.1,Iris-virginica +5.7,2.5,5.0,2.0,Iris-virginica +5.8,2.8,5.1,2.4,Iris-virginica +6.4,3.2,5.3,2.3,Iris-virginica +6.5,3.0,5.5,1.8,Iris-virginica +7.7,3.8,6.7,2.2,Iris-virginica +7.7,2.6,6.9,2.3,Iris-virginica +6.0,2.2,5.0,1.5,Iris-virginica +6.9,3.2,5.7,2.3,Iris-virginica +5.6,2.8,4.9,2.0,Iris-virginica +7.7,2.8,6.7,2.0,Iris-virginica +6.3,2.7,4.9,1.8,Iris-virginica +6.7,3.3,5.7,2.1,Iris-virginica +7.2,3.2,6.0,1.8,Iris-virginica +6.2,2.8,4.8,1.8,Iris-virginica +6.1,3.0,4.9,1.8,Iris-virginica +6.4,2.8,5.6,2.1,Iris-virginica +7.2,3.0,5.8,1.6,Iris-virginica +7.4,2.8,6.1,1.9,Iris-virginica +7.9,3.8,6.4,2.0,Iris-virginica +6.4,2.8,5.6,2.2,Iris-virginica +6.3,2.8,5.1,1.5,Iris-virginica +6.1,2.6,5.6,1.4,Iris-virginica +7.7,3.0,6.1,2.3,Iris-virginica +6.3,3.4,5.6,2.4,Iris-virginica +6.4,3.1,5.5,1.8,Iris-virginica +6.0,3.0,4.8,1.8,Iris-virginica +6.9,3.1,5.4,2.1,Iris-virginica +6.7,3.1,5.6,2.4,Iris-virginica +6.9,3.1,5.1,2.3,Iris-virginica +5.8,2.7,5.1,1.9,Iris-virginica +6.8,3.2,5.9,2.3,Iris-virginica +6.7,3.3,5.7,2.5,Iris-virginica +6.7,3.0,5.2,2.3,Iris-virginica +6.3,2.5,5.0,1.9,Iris-virginica +6.5,3.0,5.2,2.0,Iris-virginica +6.2,3.4,5.4,2.3,Iris-virginica +5.9,3.0,5.1,1.8,Iris-virginica +% +% +% diff --git a/voice_bridge/scipy/io/arff/tests/data/missing.arff b/voice_bridge/scipy/io/arff/tests/data/missing.arff new file mode 100644 index 0000000000000000000000000000000000000000..dedc64c8fa2fcdc0081b30b7804be85114495ce2 --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/missing.arff @@ -0,0 +1,8 @@ +% This arff file contains some missing data +@relation missing +@attribute yop real +@attribute yap real +@data +1,5 +2,4 +?,? diff --git a/voice_bridge/scipy/io/arff/tests/data/nodata.arff b/voice_bridge/scipy/io/arff/tests/data/nodata.arff new file mode 100644 index 0000000000000000000000000000000000000000..5766aeb229a1b31378026274c366e8e9e44fd487 --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/nodata.arff @@ -0,0 +1,11 @@ +@RELATION iris + +@ATTRIBUTE sepallength REAL +@ATTRIBUTE sepalwidth REAL +@ATTRIBUTE petallength REAL +@ATTRIBUTE petalwidth REAL +@ATTRIBUTE class {Iris-setosa,Iris-versicolor,Iris-virginica} + +@DATA + +% This file has no data diff --git a/voice_bridge/scipy/io/arff/tests/data/quoted_nominal.arff b/voice_bridge/scipy/io/arff/tests/data/quoted_nominal.arff new file mode 100644 index 0000000000000000000000000000000000000000..7cd16d1ef9b50cc1194d034ef4d458ef3cf0d417 --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/quoted_nominal.arff @@ -0,0 +1,13 @@ +% Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes +% Spaces between elements are stripped by the parser + +@relation SOME_DATA +@attribute age numeric +@attribute smoker {'yes', 'no'} +@data +18, 'no' +24, 'yes' +44, 'no' +56, 'no' +89,'yes' +11, 'no' diff --git a/voice_bridge/scipy/io/arff/tests/data/quoted_nominal_spaces.arff b/voice_bridge/scipy/io/arff/tests/data/quoted_nominal_spaces.arff new file mode 100644 index 0000000000000000000000000000000000000000..c799127862b6060442b29c9a0382836cc9c55537 --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/quoted_nominal_spaces.arff @@ -0,0 +1,13 @@ +% Regression test for issue #10232 : Exception in loadarff with quoted nominal attributes +% Spaces inside quotes are NOT stripped by the parser + +@relation SOME_DATA +@attribute age numeric +@attribute smoker {' yes', 'no '} +@data +18,'no ' +24,' yes' +44,'no ' +56,'no ' +89,' yes' +11,'no ' diff --git a/voice_bridge/scipy/io/arff/tests/data/test1.arff b/voice_bridge/scipy/io/arff/tests/data/test1.arff new file mode 100644 index 0000000000000000000000000000000000000000..ccc8e0cc7c43dc66ad7b3a8e4738c3322d3f79d8 --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/test1.arff @@ -0,0 +1,10 @@ +@RELATION test1 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class {class0, class1, class2, class3} + +@DATA +0.1, 0.2, 0.3, 0.4,class1 diff --git a/voice_bridge/scipy/io/arff/tests/data/test10.arff b/voice_bridge/scipy/io/arff/tests/data/test10.arff new file mode 100644 index 0000000000000000000000000000000000000000..094ac5094a842866666726b358d2c66bf927c9d2 --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/test10.arff @@ -0,0 +1,8 @@ +@relation test9 + +@attribute attr_relational relational + @attribute attr_number integer +@end attr_relational + +@data +'0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n30\n31\n32\n33\n34\n35\n36\n37\n38\n39\n40\n41\n42\n43\n44\n45\n46\n47\n48\n49\n50\n51\n52\n53\n54\n55\n56\n57\n58\n59\n60\n61\n62\n63\n64\n65\n66\n67\n68\n69\n70\n71\n72\n73\n74\n75\n76\n77\n78\n79\n80\n81\n82\n83\n84\n85\n86\n87\n88\n89\n90\n91\n92\n93\n94\n95\n96\n97\n98\n99\n100\n101\n102\n103\n104\n105\n106\n107\n108\n109\n110\n111\n112\n113\n114\n115\n116\n117\n118\n119\n120\n121\n122\n123\n124\n125\n126\n127\n128\n129\n130\n131\n132\n133\n134\n135\n136\n137\n138\n139\n140\n141\n142\n143\n144\n145\n146\n147\n148\n149\n150\n151\n152\n153\n154\n155\n156\n157\n158\n159\n160\n161\n162\n163\n164\n165\n166\n167\n168\n169\n170\n171\n172\n173\n174\n175\n176\n177\n178\n179\n180\n181\n182\n183\n184\n185\n186\n187\n188\n189\n190\n191\n192\n193\n194\n195\n196\n197\n198\n199\n200\n201\n202\n203\n204\n205\n206\n207\n208\n209\n210\n211\n212\n213\n214\n215\n216\n217\n218\n219\n220\n221\n222\n223\n224\n225\n226\n227\n228\n229\n230\n231\n232\n233\n234\n235\n236\n237\n238\n239\n240\n241\n242\n243\n244\n245\n246\n247\n248\n249\n250\n251\n252\n253\n254\n255\n256\n257\n258\n259\n260\n261\n262\n263\n264\n265\n266\n267\n268\n269\n270\n271\n272\n273\n274\n275\n276\n277\n278\n279\n280\n281\n282\n283\n284\n285\n286\n287\n288\n289\n290\n291\n292\n293\n294\n295\n296\n297\n298\n299\n300\n301\n302\n303\n304\n305\n306\n307\n308\n309\n310\n311\n312\n313\n314\n315\n316\n317\n318\n319\n320\n321\n322\n323\n324\n325\n326\n327\n328\n329\n330\n331\n332\n333\n334\n335\n336\n337\n338\n339\n340\n341\n342\n343\n344\n345\n346\n347\n348\n349\n350\n351\n352\n353\n354\n355\n356\n357\n358\n359\n360\n361\n362\n363\n364\n365\n366\n367\n368\n369\n370\n371\n372\n373\n374\n375\n376\n377\n378\n379\n380\n381\n382\n383\n384\n385\n386\n387\n388\n389\n390\n391\n392\n393\n394\n395\n396\n397\n398\n399\n400\n401\n402\n403\n404\n405\n406\n407\n408\n409\n410\n411\n412\n413\n414\n415\n416\n417\n418\n419\n420\n421\n422\n423\n424\n425\n426\n427\n428\n429\n430\n431\n432\n433\n434\n435\n436\n437\n438\n439\n440\n441\n442\n443\n444\n445\n446\n447\n448\n449\n450\n451\n452\n453\n454\n455\n456\n457\n458\n459\n460\n461\n462\n463\n464\n465\n466\n467\n468\n469\n470\n471\n472\n473\n474\n475\n476\n477\n478\n479\n480\n481\n482\n483\n484\n485\n486\n487\n488\n489\n490\n491\n492\n493\n494\n495\n496\n497\n498\n499\n500\n501\n502\n503\n504\n505\n506\n507\n508\n509\n510\n511\n512\n513\n514\n515\n516\n517\n518\n519\n520\n521\n522\n523\n524\n525\n526\n527\n528\n529\n530\n531\n532\n533\n534\n535\n536\n537\n538\n539\n540\n541\n542\n543\n544\n545\n546\n547\n548\n549\n550\n551\n552\n553\n554\n555\n556\n557\n558\n559\n560\n561\n562\n563\n564\n565\n566\n567\n568\n569\n570\n571\n572\n573\n574\n575\n576\n577\n578\n579\n580\n581\n582\n583\n584\n585\n586\n587\n588\n589\n590\n591\n592\n593\n594\n595\n596\n597\n598\n599\n600\n601\n602\n603\n604\n605\n606\n607\n608\n609\n610\n611\n612\n613\n614\n615\n616\n617\n618\n619\n620\n621\n622\n623\n624\n625\n626\n627\n628\n629\n630\n631\n632\n633\n634\n635\n636\n637\n638\n639\n640\n641\n642\n643\n644\n645\n646\n647\n648\n649\n650\n651\n652\n653\n654\n655\n656\n657\n658\n659\n660\n661\n662\n663\n664\n665\n666\n667\n668\n669\n670\n671\n672\n673\n674\n675\n676\n677\n678\n679\n680\n681\n682\n683\n684\n685\n686\n687\n688\n689\n690\n691\n692\n693\n694\n695\n696\n697\n698\n699\n700\n701\n702\n703\n704\n705\n706\n707\n708\n709\n710\n711\n712\n713\n714\n715\n716\n717\n718\n719\n720\n721\n722\n723\n724\n725\n726\n727\n728\n729\n730\n731\n732\n733\n734\n735\n736\n737\n738\n739\n740\n741\n742\n743\n744\n745\n746\n747\n748\n749\n750\n751\n752\n753\n754\n755\n756\n757\n758\n759\n760\n761\n762\n763\n764\n765\n766\n767\n768\n769\n770\n771\n772\n773\n774\n775\n776\n777\n778\n779\n780\n781\n782\n783\n784\n785\n786\n787\n788\n789\n790\n791\n792\n793\n794\n795\n796\n797\n798\n799\n800\n801\n802\n803\n804\n805\n806\n807\n808\n809\n810\n811\n812\n813\n814\n815\n816\n817\n818\n819\n820\n821\n822\n823\n824\n825\n826\n827\n828\n829\n830\n831\n832\n833\n834\n835\n836\n837\n838\n839\n840\n841\n842\n843\n844\n845\n846\n847\n848\n849\n850\n851\n852\n853\n854\n855\n856\n857\n858\n859\n860\n861\n862\n863\n864\n865\n866\n867\n868\n869\n870\n871\n872\n873\n874\n875\n876\n877\n878\n879\n880\n881\n882\n883\n884\n885\n886\n887\n888\n889\n890\n891\n892\n893\n894\n895\n896\n897\n898\n899\n900\n901\n902\n903\n904\n905\n906\n907\n908\n909\n910\n911\n912\n913\n914\n915\n916\n917\n918\n919\n920\n921\n922\n923\n924\n925\n926\n927\n928\n929\n930\n931\n932\n933\n934\n935\n936\n937\n938\n939\n940\n941\n942\n943\n944\n945\n946\n947\n948\n949\n950\n951\n952\n953\n954\n955\n956\n957\n958\n959\n960\n961\n962\n963\n964\n965\n966\n967\n968\n969\n970\n971\n972\n973\n974\n975\n976\n977\n978\n979\n980\n981\n982\n983\n984\n985\n986\n987\n988\n989\n990\n991\n992\n993\n994\n995\n996\n997\n998\n999\n1000\n1001\n1002\n1003\n1004\n1005\n1006\n1007\n1008\n1009\n1010\n1011\n1012\n1013\n1014\n1015\n1016\n1017\n1018\n1019\n1020\n1021\n1022\n1023\n1024\n1025\n1026\n1027\n1028\n1029\n1030\n1031\n1032\n1033\n1034\n1035\n1036\n1037\n1038\n1039\n1040\n1041\n1042\n1043\n1044\n1045\n1046\n1047\n1048\n1049\n1050\n1051\n1052\n1053\n1054\n1055\n1056\n1057\n1058\n1059\n1060\n1061\n1062\n1063\n1064\n1065\n1066\n1067\n1068\n1069\n1070\n1071\n1072\n1073\n1074\n1075\n1076\n1077\n1078\n1079\n1080\n1081\n1082\n1083\n1084\n1085\n1086\n1087\n1088\n1089\n1090\n1091\n1092\n1093\n1094\n1095\n1096\n1097\n1098\n1099\n1100\n1101\n1102\n1103\n1104\n1105\n1106\n1107\n1108\n1109\n1110\n1111\n1112\n1113\n1114\n1115\n1116\n1117\n1118\n1119\n1120\n1121\n1122\n1123\n1124\n1125\n1126\n1127\n1128\n1129\n1130\n1131\n1132\n1133\n1134\n1135\n1136\n1137\n1138\n1139\n1140\n1141\n1142\n1143\n1144\n1145\n1146\n1147\n1148\n1149\n1150\n1151\n1152\n1153\n1154\n1155\n1156\n1157\n1158\n1159\n1160\n1161\n1162\n1163\n1164\n1165\n1166\n1167\n1168\n1169\n1170\n1171\n1172\n1173\n1174\n1175\n1176\n1177\n1178\n1179\n1180\n1181\n1182\n1183\n1184\n1185\n1186\n1187\n1188\n1189\n1190\n1191\n1192\n1193\n1194\n1195\n1196\n1197\n1198\n1199\n1200\n1201\n1202\n1203\n1204\n1205\n1206\n1207\n1208\n1209\n1210\n1211\n1212\n1213\n1214\n1215\n1216\n1217\n1218\n1219\n1220\n1221\n1222\n1223\n1224\n1225\n1226\n1227\n1228\n1229\n1230\n1231\n1232\n1233\n1234\n1235\n1236\n1237\n1238\n1239\n1240\n1241\n1242\n1243\n1244\n1245\n1246\n1247\n1248\n1249\n1250\n1251\n1252\n1253\n1254\n1255\n1256\n1257\n1258\n1259\n1260\n1261\n1262\n1263\n1264\n1265\n1266\n1267\n1268\n1269\n1270\n1271\n1272\n1273\n1274\n1275\n1276\n1277\n1278\n1279\n1280\n1281\n1282\n1283\n1284\n1285\n1286\n1287\n1288\n1289\n1290\n1291\n1292\n1293\n1294\n1295\n1296\n1297\n1298\n1299\n1300\n1301\n1302\n1303\n1304\n1305\n1306\n1307\n1308\n1309\n1310\n1311\n1312\n1313\n1314\n1315\n1316\n1317\n1318\n1319\n1320\n1321\n1322\n1323\n1324\n1325\n1326\n1327\n1328\n1329\n1330\n1331\n1332\n1333\n1334\n1335\n1336\n1337\n1338\n1339\n1340\n1341\n1342\n1343\n1344\n1345\n1346\n1347\n1348\n1349\n1350\n1351\n1352\n1353\n1354\n1355\n1356\n1357\n1358\n1359\n1360\n1361\n1362\n1363\n1364\n1365\n1366\n1367\n1368\n1369\n1370\n1371\n1372\n1373\n1374\n1375\n1376\n1377\n1378\n1379\n1380\n1381\n1382\n1383\n1384\n1385\n1386\n1387\n1388\n1389\n1390\n1391\n1392\n1393\n1394\n1395\n1396\n1397\n1398\n1399\n1400\n1401\n1402\n1403\n1404\n1405\n1406\n1407\n1408\n1409\n1410\n1411\n1412\n1413\n1414\n1415\n1416\n1417\n1418\n1419\n1420\n1421\n1422\n1423\n1424\n1425\n1426\n1427\n1428\n1429\n1430\n1431\n1432\n1433\n1434\n1435\n1436\n1437\n1438\n1439\n1440\n1441\n1442\n1443\n1444\n1445\n1446\n1447\n1448\n1449\n1450\n1451\n1452\n1453\n1454\n1455\n1456\n1457\n1458\n1459\n1460\n1461\n1462\n1463\n1464\n1465\n1466\n1467\n1468\n1469\n1470\n1471\n1472\n1473\n1474\n1475\n1476\n1477\n1478\n1479\n1480\n1481\n1482\n1483\n1484\n1485\n1486\n1487\n1488\n1489\n1490\n1491\n1492\n1493\n1494\n1495\n1496\n1497\n1498\n1499\n1500\n1501\n1502\n1503\n1504\n1505\n1506\n1507\n1508\n1509\n1510\n1511\n1512\n1513\n1514\n1515\n1516\n1517\n1518\n1519\n1520\n1521\n1522\n1523\n1524\n1525\n1526\n1527\n1528\n1529\n1530\n1531\n1532\n1533\n1534\n1535\n1536\n1537\n1538\n1539\n1540\n1541\n1542\n1543\n1544\n1545\n1546\n1547\n1548\n1549\n1550\n1551\n1552\n1553\n1554\n1555\n1556\n1557\n1558\n1559\n1560\n1561\n1562\n1563\n1564\n1565\n1566\n1567\n1568\n1569\n1570\n1571\n1572\n1573\n1574\n1575\n1576\n1577\n1578\n1579\n1580\n1581\n1582\n1583\n1584\n1585\n1586\n1587\n1588\n1589\n1590\n1591\n1592\n1593\n1594\n1595\n1596\n1597\n1598\n1599\n1600\n1601\n1602\n1603\n1604\n1605\n1606\n1607\n1608\n1609\n1610\n1611\n1612\n1613\n1614\n1615\n1616\n1617\n1618\n1619\n1620\n1621\n1622\n1623\n1624\n1625\n1626\n1627\n1628\n1629\n1630\n1631\n1632\n1633\n1634\n1635\n1636\n1637\n1638\n1639\n1640\n1641\n1642\n1643\n1644\n1645\n1646\n1647\n1648\n1649\n1650\n1651\n1652\n1653\n1654\n1655\n1656\n1657\n1658\n1659\n1660\n1661\n1662\n1663\n1664\n1665\n1666\n1667\n1668\n1669\n1670\n1671\n1672\n1673\n1674\n1675\n1676\n1677\n1678\n1679\n1680\n1681\n1682\n1683\n1684\n1685\n1686\n1687\n1688\n1689\n1690\n1691\n1692\n1693\n1694\n1695\n1696\n1697\n1698\n1699\n1700\n1701\n1702\n1703\n1704\n1705\n1706\n1707\n1708\n1709\n1710\n1711\n1712\n1713\n1714\n1715\n1716\n1717\n1718\n1719\n1720\n1721\n1722\n1723\n1724\n1725\n1726\n1727\n1728\n1729\n1730\n1731\n1732\n1733\n1734\n1735\n1736\n1737\n1738\n1739\n1740\n1741\n1742\n1743\n1744\n1745\n1746\n1747\n1748\n1749\n1750\n1751\n1752\n1753\n1754\n1755\n1756\n1757\n1758\n1759\n1760\n1761\n1762\n1763\n1764\n1765\n1766\n1767\n1768\n1769\n1770\n1771\n1772\n1773\n1774\n1775\n1776\n1777\n1778\n1779\n1780\n1781\n1782\n1783\n1784\n1785\n1786\n1787\n1788\n1789\n1790\n1791\n1792\n1793\n1794\n1795\n1796\n1797\n1798\n1799\n1800\n1801\n1802\n1803\n1804\n1805\n1806\n1807\n1808\n1809\n1810\n1811\n1812\n1813\n1814\n1815\n1816\n1817\n1818\n1819\n1820\n1821\n1822\n1823\n1824\n1825\n1826\n1827\n1828\n1829\n1830\n1831\n1832\n1833\n1834\n1835\n1836\n1837\n1838\n1839\n1840\n1841\n1842\n1843\n1844\n1845\n1846\n1847\n1848\n1849\n1850\n1851\n1852\n1853\n1854\n1855\n1856\n1857\n1858\n1859\n1860\n1861\n1862\n1863\n1864\n1865\n1866\n1867\n1868\n1869\n1870\n1871\n1872\n1873\n1874\n1875\n1876\n1877\n1878\n1879\n1880\n1881\n1882\n1883\n1884\n1885\n1886\n1887\n1888\n1889\n1890\n1891\n1892\n1893\n1894\n1895\n1896\n1897\n1898\n1899\n1900\n1901\n1902\n1903\n1904\n1905\n1906\n1907\n1908\n1909\n1910\n1911\n1912\n1913\n1914\n1915\n1916\n1917\n1918\n1919\n1920\n1921\n1922\n1923\n1924\n1925\n1926\n1927\n1928\n1929\n1930\n1931\n1932\n1933\n1934\n1935\n1936\n1937\n1938\n1939\n1940\n1941\n1942\n1943\n1944\n1945\n1946\n1947\n1948\n1949\n1950\n1951\n1952\n1953\n1954\n1955\n1956\n1957\n1958\n1959\n1960\n1961\n1962\n1963\n1964\n1965\n1966\n1967\n1968\n1969\n1970\n1971\n1972\n1973\n1974\n1975\n1976\n1977\n1978\n1979\n1980\n1981\n1982\n1983\n1984\n1985\n1986\n1987\n1988\n1989\n1990\n1991\n1992\n1993\n1994\n1995\n1996\n1997\n1998\n1999\n2000\n2001\n2002\n2003\n2004\n2005\n2006\n2007\n2008\n2009\n2010\n2011\n2012\n2013\n2014\n2015\n2016\n2017\n2018\n2019\n2020\n2021\n2022\n2023\n2024\n2025\n2026\n2027\n2028\n2029\n2030\n2031\n2032\n2033\n2034\n2035\n2036\n2037\n2038\n2039\n2040\n2041\n2042\n2043\n2044\n2045\n2046\n2047\n2048\n2049\n2050\n2051\n2052\n2053\n2054\n2055\n2056\n2057\n2058\n2059\n2060\n2061\n2062\n2063\n2064\n2065\n2066\n2067\n2068\n2069\n2070\n2071\n2072\n2073\n2074\n2075\n2076\n2077\n2078\n2079\n2080\n2081\n2082\n2083\n2084\n2085\n2086\n2087\n2088\n2089\n2090\n2091\n2092\n2093\n2094\n2095\n2096\n2097\n2098\n2099\n2100\n2101\n2102\n2103\n2104\n2105\n2106\n2107\n2108\n2109\n2110\n2111\n2112\n2113\n2114\n2115\n2116\n2117\n2118\n2119\n2120\n2121\n2122\n2123\n2124\n2125\n2126\n2127\n2128\n2129\n2130\n2131\n2132\n2133\n2134\n2135\n2136\n2137\n2138\n2139\n2140\n2141\n2142\n2143\n2144\n2145\n2146\n2147\n2148\n2149\n2150\n2151\n2152\n2153\n2154\n2155\n2156\n2157\n2158\n2159\n2160\n2161\n2162\n2163\n2164\n2165\n2166\n2167\n2168\n2169\n2170\n2171\n2172\n2173\n2174\n2175\n2176\n2177\n2178\n2179\n2180\n2181\n2182\n2183\n2184\n2185\n2186\n2187\n2188\n2189\n2190\n2191\n2192\n2193\n2194\n2195\n2196\n2197\n2198\n2199\n2200\n2201\n2202\n2203\n2204\n2205\n2206\n2207\n2208\n2209\n2210\n2211\n2212\n2213\n2214\n2215\n2216\n2217\n2218\n2219\n2220\n2221\n2222\n2223\n2224\n2225\n2226\n2227\n2228\n2229\n2230\n2231\n2232\n2233\n2234\n2235\n2236\n2237\n2238\n2239\n2240\n2241\n2242\n2243\n2244\n2245\n2246\n2247\n2248\n2249\n2250\n2251\n2252\n2253\n2254\n2255\n2256\n2257\n2258\n2259\n2260\n2261\n2262\n2263\n2264\n2265\n2266\n2267\n2268\n2269\n2270\n2271\n2272\n2273\n2274\n2275\n2276\n2277\n2278\n2279\n2280\n2281\n2282\n2283\n2284\n2285\n2286\n2287\n2288\n2289\n2290\n2291\n2292\n2293\n2294\n2295\n2296\n2297\n2298\n2299\n2300\n2301\n2302\n2303\n2304\n2305\n2306\n2307\n2308\n2309\n2310\n2311\n2312\n2313\n2314\n2315\n2316\n2317\n2318\n2319\n2320\n2321\n2322\n2323\n2324\n2325\n2326\n2327\n2328\n2329\n2330\n2331\n2332\n2333\n2334\n2335\n2336\n2337\n2338\n2339\n2340\n2341\n2342\n2343\n2344\n2345\n2346\n2347\n2348\n2349\n2350\n2351\n2352\n2353\n2354\n2355\n2356\n2357\n2358\n2359\n2360\n2361\n2362\n2363\n2364\n2365\n2366\n2367\n2368\n2369\n2370\n2371\n2372\n2373\n2374\n2375\n2376\n2377\n2378\n2379\n2380\n2381\n2382\n2383\n2384\n2385\n2386\n2387\n2388\n2389\n2390\n2391\n2392\n2393\n2394\n2395\n2396\n2397\n2398\n2399\n2400\n2401\n2402\n2403\n2404\n2405\n2406\n2407\n2408\n2409\n2410\n2411\n2412\n2413\n2414\n2415\n2416\n2417\n2418\n2419\n2420\n2421\n2422\n2423\n2424\n2425\n2426\n2427\n2428\n2429\n2430\n2431\n2432\n2433\n2434\n2435\n2436\n2437\n2438\n2439\n2440\n2441\n2442\n2443\n2444\n2445\n2446\n2447\n2448\n2449\n2450\n2451\n2452\n2453\n2454\n2455\n2456\n2457\n2458\n2459\n2460\n2461\n2462\n2463\n2464\n2465\n2466\n2467\n2468\n2469\n2470\n2471\n2472\n2473\n2474\n2475\n2476\n2477\n2478\n2479\n2480\n2481\n2482\n2483\n2484\n2485\n2486\n2487\n2488\n2489\n2490\n2491\n2492\n2493\n2494\n2495\n2496\n2497\n2498\n2499\n2500\n2501\n2502\n2503\n2504\n2505\n2506\n2507\n2508\n2509\n2510\n2511\n2512\n2513\n2514\n2515\n2516\n2517\n2518\n2519\n2520\n2521\n2522\n2523\n2524\n2525\n2526\n2527\n2528\n2529\n2530\n2531\n2532\n2533\n2534\n2535\n2536\n2537\n2538\n2539\n2540\n2541\n2542\n2543\n2544\n2545\n2546\n2547\n2548\n2549\n2550\n2551\n2552\n2553\n2554\n2555\n2556\n2557\n2558\n2559\n2560\n2561\n2562\n2563\n2564\n2565\n2566\n2567\n2568\n2569\n2570\n2571\n2572\n2573\n2574\n2575\n2576\n2577\n2578\n2579\n2580\n2581\n2582\n2583\n2584\n2585\n2586\n2587\n2588\n2589\n2590\n2591\n2592\n2593\n2594\n2595\n2596\n2597\n2598\n2599\n2600\n2601\n2602\n2603\n2604\n2605\n2606\n2607\n2608\n2609\n2610\n2611\n2612\n2613\n2614\n2615\n2616\n2617\n2618\n2619\n2620\n2621\n2622\n2623\n2624\n2625\n2626\n2627\n2628\n2629\n2630\n2631\n2632\n2633\n2634\n2635\n2636\n2637\n2638\n2639\n2640\n2641\n2642\n2643\n2644\n2645\n2646\n2647\n2648\n2649\n2650\n2651\n2652\n2653\n2654\n2655\n2656\n2657\n2658\n2659\n2660\n2661\n2662\n2663\n2664\n2665\n2666\n2667\n2668\n2669\n2670\n2671\n2672\n2673\n2674\n2675\n2676\n2677\n2678\n2679\n2680\n2681\n2682\n2683\n2684\n2685\n2686\n2687\n2688\n2689\n2690\n2691\n2692\n2693\n2694\n2695\n2696\n2697\n2698\n2699\n2700\n2701\n2702\n2703\n2704\n2705\n2706\n2707\n2708\n2709\n2710\n2711\n2712\n2713\n2714\n2715\n2716\n2717\n2718\n2719\n2720\n2721\n2722\n2723\n2724\n2725\n2726\n2727\n2728\n2729\n2730\n2731\n2732\n2733\n2734\n2735\n2736\n2737\n2738\n2739\n2740\n2741\n2742\n2743\n2744\n2745\n2746\n2747\n2748\n2749\n2750\n2751\n2752\n2753\n2754\n2755\n2756\n2757\n2758\n2759\n2760\n2761\n2762\n2763\n2764\n2765\n2766\n2767\n2768\n2769\n2770\n2771\n2772\n2773\n2774\n2775\n2776\n2777\n2778\n2779\n2780\n2781\n2782\n2783\n2784\n2785\n2786\n2787\n2788\n2789\n2790\n2791\n2792\n2793\n2794\n2795\n2796\n2797\n2798\n2799\n2800\n2801\n2802\n2803\n2804\n2805\n2806\n2807\n2808\n2809\n2810\n2811\n2812\n2813\n2814\n2815\n2816\n2817\n2818\n2819\n2820\n2821\n2822\n2823\n2824\n2825\n2826\n2827\n2828\n2829\n2830\n2831\n2832\n2833\n2834\n2835\n2836\n2837\n2838\n2839\n2840\n2841\n2842\n2843\n2844\n2845\n2846\n2847\n2848\n2849\n2850\n2851\n2852\n2853\n2854\n2855\n2856\n2857\n2858\n2859\n2860\n2861\n2862\n2863\n2864\n2865\n2866\n2867\n2868\n2869\n2870\n2871\n2872\n2873\n2874\n2875\n2876\n2877\n2878\n2879\n2880\n2881\n2882\n2883\n2884\n2885\n2886\n2887\n2888\n2889\n2890\n2891\n2892\n2893\n2894\n2895\n2896\n2897\n2898\n2899\n2900\n2901\n2902\n2903\n2904\n2905\n2906\n2907\n2908\n2909\n2910\n2911\n2912\n2913\n2914\n2915\n2916\n2917\n2918\n2919\n2920\n2921\n2922\n2923\n2924\n2925\n2926\n2927\n2928\n2929\n2930\n2931\n2932\n2933\n2934\n2935\n2936\n2937\n2938\n2939\n2940\n2941\n2942\n2943\n2944\n2945\n2946\n2947\n2948\n2949\n2950\n2951\n2952\n2953\n2954\n2955\n2956\n2957\n2958\n2959\n2960\n2961\n2962\n2963\n2964\n2965\n2966\n2967\n2968\n2969\n2970\n2971\n2972\n2973\n2974\n2975\n2976\n2977\n2978\n2979\n2980\n2981\n2982\n2983\n2984\n2985\n2986\n2987\n2988\n2989\n2990\n2991\n2992\n2993\n2994\n2995\n2996\n2997\n2998\n2999\n3000\n3001\n3002\n3003\n3004\n3005\n3006\n3007\n3008\n3009\n3010\n3011\n3012\n3013\n3014\n3015\n3016\n3017\n3018\n3019\n3020\n3021\n3022\n3023\n3024\n3025\n3026\n3027\n3028\n3029\n3030\n3031\n3032\n3033\n3034\n3035\n3036\n3037\n3038\n3039\n3040\n3041\n3042\n3043\n3044\n3045\n3046\n3047\n3048\n3049\n3050\n3051\n3052\n3053\n3054\n3055\n3056\n3057\n3058\n3059\n3060\n3061\n3062\n3063\n3064\n3065\n3066\n3067\n3068\n3069\n3070\n3071\n3072\n3073\n3074\n3075\n3076\n3077\n3078\n3079\n3080\n3081\n3082\n3083\n3084\n3085\n3086\n3087\n3088\n3089\n3090\n3091\n3092\n3093\n3094\n3095\n3096\n3097\n3098\n3099\n3100\n3101\n3102\n3103\n3104\n3105\n3106\n3107\n3108\n3109\n3110\n3111\n3112\n3113\n3114\n3115\n3116\n3117\n3118\n3119\n3120\n3121\n3122\n3123\n3124\n3125\n3126\n3127\n3128\n3129\n3130\n3131\n3132\n3133\n3134\n3135\n3136\n3137\n3138\n3139\n3140\n3141\n3142\n3143\n3144\n3145\n3146\n3147\n3148\n3149\n3150\n3151\n3152\n3153\n3154\n3155\n3156\n3157\n3158\n3159\n3160\n3161\n3162\n3163\n3164\n3165\n3166\n3167\n3168\n3169\n3170\n3171\n3172\n3173\n3174\n3175\n3176\n3177\n3178\n3179\n3180\n3181\n3182\n3183\n3184\n3185\n3186\n3187\n3188\n3189\n3190\n3191\n3192\n3193\n3194\n3195\n3196\n3197\n3198\n3199\n3200\n3201\n3202\n3203\n3204\n3205\n3206\n3207\n3208\n3209\n3210\n3211\n3212\n3213\n3214\n3215\n3216\n3217\n3218\n3219\n3220\n3221\n3222\n3223\n3224\n3225\n3226\n3227\n3228\n3229\n3230\n3231\n3232\n3233\n3234\n3235\n3236\n3237\n3238\n3239\n3240\n3241\n3242\n3243\n3244\n3245\n3246\n3247\n3248\n3249\n3250\n3251\n3252\n3253\n3254\n3255\n3256\n3257\n3258\n3259\n3260\n3261\n3262\n3263\n3264\n3265\n3266\n3267\n3268\n3269\n3270\n3271\n3272\n3273\n3274\n3275\n3276\n3277\n3278\n3279\n3280\n3281\n3282\n3283\n3284\n3285\n3286\n3287\n3288\n3289\n3290\n3291\n3292\n3293\n3294\n3295\n3296\n3297\n3298\n3299\n3300\n3301\n3302\n3303\n3304\n3305\n3306\n3307\n3308\n3309\n3310\n3311\n3312\n3313\n3314\n3315\n3316\n3317\n3318\n3319\n3320\n3321\n3322\n3323\n3324\n3325\n3326\n3327\n3328\n3329\n3330\n3331\n3332\n3333\n3334\n3335\n3336\n3337\n3338\n3339\n3340\n3341\n3342\n3343\n3344\n3345\n3346\n3347\n3348\n3349\n3350\n3351\n3352\n3353\n3354\n3355\n3356\n3357\n3358\n3359\n3360\n3361\n3362\n3363\n3364\n3365\n3366\n3367\n3368\n3369\n3370\n3371\n3372\n3373\n3374\n3375\n3376\n3377\n3378\n3379\n3380\n3381\n3382\n3383\n3384\n3385\n3386\n3387\n3388\n3389\n3390\n3391\n3392\n3393\n3394\n3395\n3396\n3397\n3398\n3399\n3400\n3401\n3402\n3403\n3404\n3405\n3406\n3407\n3408\n3409\n3410\n3411\n3412\n3413\n3414\n3415\n3416\n3417\n3418\n3419\n3420\n3421\n3422\n3423\n3424\n3425\n3426\n3427\n3428\n3429\n3430\n3431\n3432\n3433\n3434\n3435\n3436\n3437\n3438\n3439\n3440\n3441\n3442\n3443\n3444\n3445\n3446\n3447\n3448\n3449\n3450\n3451\n3452\n3453\n3454\n3455\n3456\n3457\n3458\n3459\n3460\n3461\n3462\n3463\n3464\n3465\n3466\n3467\n3468\n3469\n3470\n3471\n3472\n3473\n3474\n3475\n3476\n3477\n3478\n3479\n3480\n3481\n3482\n3483\n3484\n3485\n3486\n3487\n3488\n3489\n3490\n3491\n3492\n3493\n3494\n3495\n3496\n3497\n3498\n3499\n3500\n3501\n3502\n3503\n3504\n3505\n3506\n3507\n3508\n3509\n3510\n3511\n3512\n3513\n3514\n3515\n3516\n3517\n3518\n3519\n3520\n3521\n3522\n3523\n3524\n3525\n3526\n3527\n3528\n3529\n3530\n3531\n3532\n3533\n3534\n3535\n3536\n3537\n3538\n3539\n3540\n3541\n3542\n3543\n3544\n3545\n3546\n3547\n3548\n3549\n3550\n3551\n3552\n3553\n3554\n3555\n3556\n3557\n3558\n3559\n3560\n3561\n3562\n3563\n3564\n3565\n3566\n3567\n3568\n3569\n3570\n3571\n3572\n3573\n3574\n3575\n3576\n3577\n3578\n3579\n3580\n3581\n3582\n3583\n3584\n3585\n3586\n3587\n3588\n3589\n3590\n3591\n3592\n3593\n3594\n3595\n3596\n3597\n3598\n3599\n3600\n3601\n3602\n3603\n3604\n3605\n3606\n3607\n3608\n3609\n3610\n3611\n3612\n3613\n3614\n3615\n3616\n3617\n3618\n3619\n3620\n3621\n3622\n3623\n3624\n3625\n3626\n3627\n3628\n3629\n3630\n3631\n3632\n3633\n3634\n3635\n3636\n3637\n3638\n3639\n3640\n3641\n3642\n3643\n3644\n3645\n3646\n3647\n3648\n3649\n3650\n3651\n3652\n3653\n3654\n3655\n3656\n3657\n3658\n3659\n3660\n3661\n3662\n3663\n3664\n3665\n3666\n3667\n3668\n3669\n3670\n3671\n3672\n3673\n3674\n3675\n3676\n3677\n3678\n3679\n3680\n3681\n3682\n3683\n3684\n3685\n3686\n3687\n3688\n3689\n3690\n3691\n3692\n3693\n3694\n3695\n3696\n3697\n3698\n3699\n3700\n3701\n3702\n3703\n3704\n3705\n3706\n3707\n3708\n3709\n3710\n3711\n3712\n3713\n3714\n3715\n3716\n3717\n3718\n3719\n3720\n3721\n3722\n3723\n3724\n3725\n3726\n3727\n3728\n3729\n3730\n3731\n3732\n3733\n3734\n3735\n3736\n3737\n3738\n3739\n3740\n3741\n3742\n3743\n3744\n3745\n3746\n3747\n3748\n3749\n3750\n3751\n3752\n3753\n3754\n3755\n3756\n3757\n3758\n3759\n3760\n3761\n3762\n3763\n3764\n3765\n3766\n3767\n3768\n3769\n3770\n3771\n3772\n3773\n3774\n3775\n3776\n3777\n3778\n3779\n3780\n3781\n3782\n3783\n3784\n3785\n3786\n3787\n3788\n3789\n3790\n3791\n3792\n3793\n3794\n3795\n3796\n3797\n3798\n3799\n3800\n3801\n3802\n3803\n3804\n3805\n3806\n3807\n3808\n3809\n3810\n3811\n3812\n3813\n3814\n3815\n3816\n3817\n3818\n3819\n3820\n3821\n3822\n3823\n3824\n3825\n3826\n3827\n3828\n3829\n3830\n3831\n3832\n3833\n3834\n3835\n3836\n3837\n3838\n3839\n3840\n3841\n3842\n3843\n3844\n3845\n3846\n3847\n3848\n3849\n3850\n3851\n3852\n3853\n3854\n3855\n3856\n3857\n3858\n3859\n3860\n3861\n3862\n3863\n3864\n3865\n3866\n3867\n3868\n3869\n3870\n3871\n3872\n3873\n3874\n3875\n3876\n3877\n3878\n3879\n3880\n3881\n3882\n3883\n3884\n3885\n3886\n3887\n3888\n3889\n3890\n3891\n3892\n3893\n3894\n3895\n3896\n3897\n3898\n3899\n3900\n3901\n3902\n3903\n3904\n3905\n3906\n3907\n3908\n3909\n3910\n3911\n3912\n3913\n3914\n3915\n3916\n3917\n3918\n3919\n3920\n3921\n3922\n3923\n3924\n3925\n3926\n3927\n3928\n3929\n3930\n3931\n3932\n3933\n3934\n3935\n3936\n3937\n3938\n3939\n3940\n3941\n3942\n3943\n3944\n3945\n3946\n3947\n3948\n3949\n3950\n3951\n3952\n3953\n3954\n3955\n3956\n3957\n3958\n3959\n3960\n3961\n3962\n3963\n3964\n3965\n3966\n3967\n3968\n3969\n3970\n3971\n3972\n3973\n3974\n3975\n3976\n3977\n3978\n3979\n3980\n3981\n3982\n3983\n3984\n3985\n3986\n3987\n3988\n3989\n3990\n3991\n3992\n3993\n3994\n3995\n3996\n3997\n3998\n3999\n4000\n4001\n4002\n4003\n4004\n4005\n4006\n4007\n4008\n4009\n4010\n4011\n4012\n4013\n4014\n4015\n4016\n4017\n4018\n4019\n4020\n4021\n4022\n4023\n4024\n4025\n4026\n4027\n4028\n4029\n4030\n4031\n4032\n4033\n4034\n4035\n4036\n4037\n4038\n4039\n4040\n4041\n4042\n4043\n4044\n4045\n4046\n4047\n4048\n4049\n4050\n4051\n4052\n4053\n4054\n4055\n4056\n4057\n4058\n4059\n4060\n4061\n4062\n4063\n4064\n4065\n4066\n4067\n4068\n4069\n4070\n4071\n4072\n4073\n4074\n4075\n4076\n4077\n4078\n4079\n4080\n4081\n4082\n4083\n4084\n4085\n4086\n4087\n4088\n4089\n4090\n4091\n4092\n4093\n4094\n4095\n4096\n4097\n4098\n4099\n4100\n4101\n4102\n4103\n4104\n4105\n4106\n4107\n4108\n4109\n4110\n4111\n4112\n4113\n4114\n4115\n4116\n4117\n4118\n4119\n4120\n4121\n4122\n4123\n4124\n4125\n4126\n4127\n4128\n4129\n4130\n4131\n4132\n4133\n4134\n4135\n4136\n4137\n4138\n4139\n4140\n4141\n4142\n4143\n4144\n4145\n4146\n4147\n4148\n4149\n4150\n4151\n4152\n4153\n4154\n4155\n4156\n4157\n4158\n4159\n4160\n4161\n4162\n4163\n4164\n4165\n4166\n4167\n4168\n4169\n4170\n4171\n4172\n4173\n4174\n4175\n4176\n4177\n4178\n4179\n4180\n4181\n4182\n4183\n4184\n4185\n4186\n4187\n4188\n4189\n4190\n4191\n4192\n4193\n4194\n4195\n4196\n4197\n4198\n4199\n4200\n4201\n4202\n4203\n4204\n4205\n4206\n4207\n4208\n4209\n4210\n4211\n4212\n4213\n4214\n4215\n4216\n4217\n4218\n4219\n4220\n4221\n4222\n4223\n4224\n4225\n4226\n4227\n4228\n4229\n4230\n4231\n4232\n4233\n4234\n4235\n4236\n4237\n4238\n4239\n4240\n4241\n4242\n4243\n4244\n4245\n4246\n4247\n4248\n4249\n4250\n4251\n4252\n4253\n4254\n4255\n4256\n4257\n4258\n4259\n4260\n4261\n4262\n4263\n4264\n4265\n4266\n4267\n4268\n4269\n4270\n4271\n4272\n4273\n4274\n4275\n4276\n4277\n4278\n4279\n4280\n4281\n4282\n4283\n4284\n4285\n4286\n4287\n4288\n4289\n4290\n4291\n4292\n4293\n4294\n4295\n4296\n4297\n4298\n4299\n4300\n4301\n4302\n4303\n4304\n4305\n4306\n4307\n4308\n4309\n4310\n4311\n4312\n4313\n4314\n4315\n4316\n4317\n4318\n4319\n4320\n4321\n4322\n4323\n4324\n4325\n4326\n4327\n4328\n4329\n4330\n4331\n4332\n4333\n4334\n4335\n4336\n4337\n4338\n4339\n4340\n4341\n4342\n4343\n4344\n4345\n4346\n4347\n4348\n4349\n4350\n4351\n4352\n4353\n4354\n4355\n4356\n4357\n4358\n4359\n4360\n4361\n4362\n4363\n4364\n4365\n4366\n4367\n4368\n4369\n4370\n4371\n4372\n4373\n4374\n4375\n4376\n4377\n4378\n4379\n4380\n4381\n4382\n4383\n4384\n4385\n4386\n4387\n4388\n4389\n4390\n4391\n4392\n4393\n4394\n4395\n4396\n4397\n4398\n4399\n4400\n4401\n4402\n4403\n4404\n4405\n4406\n4407\n4408\n4409\n4410\n4411\n4412\n4413\n4414\n4415\n4416\n4417\n4418\n4419\n4420\n4421\n4422\n4423\n4424\n4425\n4426\n4427\n4428\n4429\n4430\n4431\n4432\n4433\n4434\n4435\n4436\n4437\n4438\n4439\n4440\n4441\n4442\n4443\n4444\n4445\n4446\n4447\n4448\n4449\n4450\n4451\n4452\n4453\n4454\n4455\n4456\n4457\n4458\n4459\n4460\n4461\n4462\n4463\n4464\n4465\n4466\n4467\n4468\n4469\n4470\n4471\n4472\n4473\n4474\n4475\n4476\n4477\n4478\n4479\n4480\n4481\n4482\n4483\n4484\n4485\n4486\n4487\n4488\n4489\n4490\n4491\n4492\n4493\n4494\n4495\n4496\n4497\n4498\n4499\n4500\n4501\n4502\n4503\n4504\n4505\n4506\n4507\n4508\n4509\n4510\n4511\n4512\n4513\n4514\n4515\n4516\n4517\n4518\n4519\n4520\n4521\n4522\n4523\n4524\n4525\n4526\n4527\n4528\n4529\n4530\n4531\n4532\n4533\n4534\n4535\n4536\n4537\n4538\n4539\n4540\n4541\n4542\n4543\n4544\n4545\n4546\n4547\n4548\n4549\n4550\n4551\n4552\n4553\n4554\n4555\n4556\n4557\n4558\n4559\n4560\n4561\n4562\n4563\n4564\n4565\n4566\n4567\n4568\n4569\n4570\n4571\n4572\n4573\n4574\n4575\n4576\n4577\n4578\n4579\n4580\n4581\n4582\n4583\n4584\n4585\n4586\n4587\n4588\n4589\n4590\n4591\n4592\n4593\n4594\n4595\n4596\n4597\n4598\n4599\n4600\n4601\n4602\n4603\n4604\n4605\n4606\n4607\n4608\n4609\n4610\n4611\n4612\n4613\n4614\n4615\n4616\n4617\n4618\n4619\n4620\n4621\n4622\n4623\n4624\n4625\n4626\n4627\n4628\n4629\n4630\n4631\n4632\n4633\n4634\n4635\n4636\n4637\n4638\n4639\n4640\n4641\n4642\n4643\n4644\n4645\n4646\n4647\n4648\n4649\n4650\n4651\n4652\n4653\n4654\n4655\n4656\n4657\n4658\n4659\n4660\n4661\n4662\n4663\n4664\n4665\n4666\n4667\n4668\n4669\n4670\n4671\n4672\n4673\n4674\n4675\n4676\n4677\n4678\n4679\n4680\n4681\n4682\n4683\n4684\n4685\n4686\n4687\n4688\n4689\n4690\n4691\n4692\n4693\n4694\n4695\n4696\n4697\n4698\n4699\n4700\n4701\n4702\n4703\n4704\n4705\n4706\n4707\n4708\n4709\n4710\n4711\n4712\n4713\n4714\n4715\n4716\n4717\n4718\n4719\n4720\n4721\n4722\n4723\n4724\n4725\n4726\n4727\n4728\n4729\n4730\n4731\n4732\n4733\n4734\n4735\n4736\n4737\n4738\n4739\n4740\n4741\n4742\n4743\n4744\n4745\n4746\n4747\n4748\n4749\n4750\n4751\n4752\n4753\n4754\n4755\n4756\n4757\n4758\n4759\n4760\n4761\n4762\n4763\n4764\n4765\n4766\n4767\n4768\n4769\n4770\n4771\n4772\n4773\n4774\n4775\n4776\n4777\n4778\n4779\n4780\n4781\n4782\n4783\n4784\n4785\n4786\n4787\n4788\n4789\n4790\n4791\n4792\n4793\n4794\n4795\n4796\n4797\n4798\n4799\n4800\n4801\n4802\n4803\n4804\n4805\n4806\n4807\n4808\n4809\n4810\n4811\n4812\n4813\n4814\n4815\n4816\n4817\n4818\n4819\n4820\n4821\n4822\n4823\n4824\n4825\n4826\n4827\n4828\n4829\n4830\n4831\n4832\n4833\n4834\n4835\n4836\n4837\n4838\n4839\n4840\n4841\n4842\n4843\n4844\n4845\n4846\n4847\n4848\n4849\n4850\n4851\n4852\n4853\n4854\n4855\n4856\n4857\n4858\n4859\n4860\n4861\n4862\n4863\n4864\n4865\n4866\n4867\n4868\n4869\n4870\n4871\n4872\n4873\n4874\n4875\n4876\n4877\n4878\n4879\n4880\n4881\n4882\n4883\n4884\n4885\n4886\n4887\n4888\n4889\n4890\n4891\n4892\n4893\n4894\n4895\n4896\n4897\n4898\n4899\n4900\n4901\n4902\n4903\n4904\n4905\n4906\n4907\n4908\n4909\n4910\n4911\n4912\n4913\n4914\n4915\n4916\n4917\n4918\n4919\n4920\n4921\n4922\n4923\n4924\n4925\n4926\n4927\n4928\n4929\n4930\n4931\n4932\n4933\n4934\n4935\n4936\n4937\n4938\n4939\n4940\n4941\n4942\n4943\n4944\n4945\n4946\n4947\n4948\n4949\n4950\n4951\n4952\n4953\n4954\n4955\n4956\n4957\n4958\n4959\n4960\n4961\n4962\n4963\n4964\n4965\n4966\n4967\n4968\n4969\n4970\n4971\n4972\n4973\n4974\n4975\n4976\n4977\n4978\n4979\n4980\n4981\n4982\n4983\n4984\n4985\n4986\n4987\n4988\n4989\n4990\n4991\n4992\n4993\n4994\n4995\n4996\n4997\n4998\n4999\n5000\n5001\n5002\n5003\n5004\n5005\n5006\n5007\n5008\n5009\n5010\n5011\n5012\n5013\n5014\n5015\n5016\n5017\n5018\n5019\n5020\n5021\n5022\n5023\n5024\n5025\n5026\n5027\n5028\n5029\n5030\n5031\n5032\n5033\n5034\n5035\n5036\n5037\n5038\n5039\n5040\n5041\n5042\n5043\n5044\n5045\n5046\n5047\n5048\n5049\n5050\n5051\n5052\n5053\n5054\n5055\n5056\n5057\n5058\n5059\n5060\n5061\n5062\n5063\n5064\n5065\n5066\n5067\n5068\n5069\n5070\n5071\n5072\n5073\n5074\n5075\n5076\n5077\n5078\n5079\n5080\n5081\n5082\n5083\n5084\n5085\n5086\n5087\n5088\n5089\n5090\n5091\n5092\n5093\n5094\n5095\n5096\n5097\n5098\n5099\n5100\n5101\n5102\n5103\n5104\n5105\n5106\n5107\n5108\n5109\n5110\n5111\n5112\n5113\n5114\n5115\n5116\n5117\n5118\n5119\n5120\n5121\n5122\n5123\n5124\n5125\n5126\n5127\n5128\n5129\n5130\n5131\n5132\n5133\n5134\n5135\n5136\n5137\n5138\n5139\n5140\n5141\n5142\n5143\n5144\n5145\n5146\n5147\n5148\n5149\n5150\n5151\n5152\n5153\n5154\n5155\n5156\n5157\n5158\n5159\n5160\n5161\n5162\n5163\n5164\n5165\n5166\n5167\n5168\n5169\n5170\n5171\n5172\n5173\n5174\n5175\n5176\n5177\n5178\n5179\n5180\n5181\n5182\n5183\n5184\n5185\n5186\n5187\n5188\n5189\n5190\n5191\n5192\n5193\n5194\n5195\n5196\n5197\n5198\n5199\n5200\n5201\n5202\n5203\n5204\n5205\n5206\n5207\n5208\n5209\n5210\n5211\n5212\n5213\n5214\n5215\n5216\n5217\n5218\n5219\n5220\n5221\n5222\n5223\n5224\n5225\n5226\n5227\n5228\n5229\n5230\n5231\n5232\n5233\n5234\n5235\n5236\n5237\n5238\n5239\n5240\n5241\n5242\n5243\n5244\n5245\n5246\n5247\n5248\n5249\n5250\n5251\n5252\n5253\n5254\n5255\n5256\n5257\n5258\n5259\n5260\n5261\n5262\n5263\n5264\n5265\n5266\n5267\n5268\n5269\n5270\n5271\n5272\n5273\n5274\n5275\n5276\n5277\n5278\n5279\n5280\n5281\n5282\n5283\n5284\n5285\n5286\n5287\n5288\n5289\n5290\n5291\n5292\n5293\n5294\n5295\n5296\n5297\n5298\n5299\n5300\n5301\n5302\n5303\n5304\n5305\n5306\n5307\n5308\n5309\n5310\n5311\n5312\n5313\n5314\n5315\n5316\n5317\n5318\n5319\n5320\n5321\n5322\n5323\n5324\n5325\n5326\n5327\n5328\n5329\n5330\n5331\n5332\n5333\n5334\n5335\n5336\n5337\n5338\n5339\n5340\n5341\n5342\n5343\n5344\n5345\n5346\n5347\n5348\n5349\n5350\n5351\n5352\n5353\n5354\n5355\n5356\n5357\n5358\n5359\n5360\n5361\n5362\n5363\n5364\n5365\n5366\n5367\n5368\n5369\n5370\n5371\n5372\n5373\n5374\n5375\n5376\n5377\n5378\n5379\n5380\n5381\n5382\n5383\n5384\n5385\n5386\n5387\n5388\n5389\n5390\n5391\n5392\n5393\n5394\n5395\n5396\n5397\n5398\n5399\n5400\n5401\n5402\n5403\n5404\n5405\n5406\n5407\n5408\n5409\n5410\n5411\n5412\n5413\n5414\n5415\n5416\n5417\n5418\n5419\n5420\n5421\n5422\n5423\n5424\n5425\n5426\n5427\n5428\n5429\n5430\n5431\n5432\n5433\n5434\n5435\n5436\n5437\n5438\n5439\n5440\n5441\n5442\n5443\n5444\n5445\n5446\n5447\n5448\n5449\n5450\n5451\n5452\n5453\n5454\n5455\n5456\n5457\n5458\n5459\n5460\n5461\n5462\n5463\n5464\n5465\n5466\n5467\n5468\n5469\n5470\n5471\n5472\n5473\n5474\n5475\n5476\n5477\n5478\n5479\n5480\n5481\n5482\n5483\n5484\n5485\n5486\n5487\n5488\n5489\n5490\n5491\n5492\n5493\n5494\n5495\n5496\n5497\n5498\n5499\n5500\n5501\n5502\n5503\n5504\n5505\n5506\n5507\n5508\n5509\n5510\n5511\n5512\n5513\n5514\n5515\n5516\n5517\n5518\n5519\n5520\n5521\n5522\n5523\n5524\n5525\n5526\n5527\n5528\n5529\n5530\n5531\n5532\n5533\n5534\n5535\n5536\n5537\n5538\n5539\n5540\n5541\n5542\n5543\n5544\n5545\n5546\n5547\n5548\n5549\n5550\n5551\n5552\n5553\n5554\n5555\n5556\n5557\n5558\n5559\n5560\n5561\n5562\n5563\n5564\n5565\n5566\n5567\n5568\n5569\n5570\n5571\n5572\n5573\n5574\n5575\n5576\n5577\n5578\n5579\n5580\n5581\n5582\n5583\n5584\n5585\n5586\n5587\n5588\n5589\n5590\n5591\n5592\n5593\n5594\n5595\n5596\n5597\n5598\n5599\n5600\n5601\n5602\n5603\n5604\n5605\n5606\n5607\n5608\n5609\n5610\n5611\n5612\n5613\n5614\n5615\n5616\n5617\n5618\n5619\n5620\n5621\n5622\n5623\n5624\n5625\n5626\n5627\n5628\n5629\n5630\n5631\n5632\n5633\n5634\n5635\n5636\n5637\n5638\n5639\n5640\n5641\n5642\n5643\n5644\n5645\n5646\n5647\n5648\n5649\n5650\n5651\n5652\n5653\n5654\n5655\n5656\n5657\n5658\n5659\n5660\n5661\n5662\n5663\n5664\n5665\n5666\n5667\n5668\n5669\n5670\n5671\n5672\n5673\n5674\n5675\n5676\n5677\n5678\n5679\n5680\n5681\n5682\n5683\n5684\n5685\n5686\n5687\n5688\n5689\n5690\n5691\n5692\n5693\n5694\n5695\n5696\n5697\n5698\n5699\n5700\n5701\n5702\n5703\n5704\n5705\n5706\n5707\n5708\n5709\n5710\n5711\n5712\n5713\n5714\n5715\n5716\n5717\n5718\n5719\n5720\n5721\n5722\n5723\n5724\n5725\n5726\n5727\n5728\n5729\n5730\n5731\n5732\n5733\n5734\n5735\n5736\n5737\n5738\n5739\n5740\n5741\n5742\n5743\n5744\n5745\n5746\n5747\n5748\n5749\n5750\n5751\n5752\n5753\n5754\n5755\n5756\n5757\n5758\n5759\n5760\n5761\n5762\n5763\n5764\n5765\n5766\n5767\n5768\n5769\n5770\n5771\n5772\n5773\n5774\n5775\n5776\n5777\n5778\n5779\n5780\n5781\n5782\n5783\n5784\n5785\n5786\n5787\n5788\n5789\n5790\n5791\n5792\n5793\n5794\n5795\n5796\n5797\n5798\n5799\n5800\n5801\n5802\n5803\n5804\n5805\n5806\n5807\n5808\n5809\n5810\n5811\n5812\n5813\n5814\n5815\n5816\n5817\n5818\n5819\n5820\n5821\n5822\n5823\n5824\n5825\n5826\n5827\n5828\n5829\n5830\n5831\n5832\n5833\n5834\n5835\n5836\n5837\n5838\n5839\n5840\n5841\n5842\n5843\n5844\n5845\n5846\n5847\n5848\n5849\n5850\n5851\n5852\n5853\n5854\n5855\n5856\n5857\n5858\n5859\n5860\n5861\n5862\n5863\n5864\n5865\n5866\n5867\n5868\n5869\n5870\n5871\n5872\n5873\n5874\n5875\n5876\n5877\n5878\n5879\n5880\n5881\n5882\n5883\n5884\n5885\n5886\n5887\n5888\n5889\n5890\n5891\n5892\n5893\n5894\n5895\n5896\n5897\n5898\n5899\n5900\n5901\n5902\n5903\n5904\n5905\n5906\n5907\n5908\n5909\n5910\n5911\n5912\n5913\n5914\n5915\n5916\n5917\n5918\n5919\n5920\n5921\n5922\n5923\n5924\n5925\n5926\n5927\n5928\n5929\n5930\n5931\n5932\n5933\n5934\n5935\n5936\n5937\n5938\n5939\n5940\n5941\n5942\n5943\n5944\n5945\n5946\n5947\n5948\n5949\n5950\n5951\n5952\n5953\n5954\n5955\n5956\n5957\n5958\n5959\n5960\n5961\n5962\n5963\n5964\n5965\n5966\n5967\n5968\n5969\n5970\n5971\n5972\n5973\n5974\n5975\n5976\n5977\n5978\n5979\n5980\n5981\n5982\n5983\n5984\n5985\n5986\n5987\n5988\n5989\n5990\n5991\n5992\n5993\n5994\n5995\n5996\n5997\n5998\n5999\n6000\n6001\n6002\n6003\n6004\n6005\n6006\n6007\n6008\n6009\n6010\n6011\n6012\n6013\n6014\n6015\n6016\n6017\n6018\n6019\n6020\n6021\n6022\n6023\n6024\n6025\n6026\n6027\n6028\n6029\n6030\n6031\n6032\n6033\n6034\n6035\n6036\n6037\n6038\n6039\n6040\n6041\n6042\n6043\n6044\n6045\n6046\n6047\n6048\n6049\n6050\n6051\n6052\n6053\n6054\n6055\n6056\n6057\n6058\n6059\n6060\n6061\n6062\n6063\n6064\n6065\n6066\n6067\n6068\n6069\n6070\n6071\n6072\n6073\n6074\n6075\n6076\n6077\n6078\n6079\n6080\n6081\n6082\n6083\n6084\n6085\n6086\n6087\n6088\n6089\n6090\n6091\n6092\n6093\n6094\n6095\n6096\n6097\n6098\n6099\n6100\n6101\n6102\n6103\n6104\n6105\n6106\n6107\n6108\n6109\n6110\n6111\n6112\n6113\n6114\n6115\n6116\n6117\n6118\n6119\n6120\n6121\n6122\n6123\n6124\n6125\n6126\n6127\n6128\n6129\n6130\n6131\n6132\n6133\n6134\n6135\n6136\n6137\n6138\n6139\n6140\n6141\n6142\n6143\n6144\n6145\n6146\n6147\n6148\n6149\n6150\n6151\n6152\n6153\n6154\n6155\n6156\n6157\n6158\n6159\n6160\n6161\n6162\n6163\n6164\n6165\n6166\n6167\n6168\n6169\n6170\n6171\n6172\n6173\n6174\n6175\n6176\n6177\n6178\n6179\n6180\n6181\n6182\n6183\n6184\n6185\n6186\n6187\n6188\n6189\n6190\n6191\n6192\n6193\n6194\n6195\n6196\n6197\n6198\n6199\n6200\n6201\n6202\n6203\n6204\n6205\n6206\n6207\n6208\n6209\n6210\n6211\n6212\n6213\n6214\n6215\n6216\n6217\n6218\n6219\n6220\n6221\n6222\n6223\n6224\n6225\n6226\n6227\n6228\n6229\n6230\n6231\n6232\n6233\n6234\n6235\n6236\n6237\n6238\n6239\n6240\n6241\n6242\n6243\n6244\n6245\n6246\n6247\n6248\n6249\n6250\n6251\n6252\n6253\n6254\n6255\n6256\n6257\n6258\n6259\n6260\n6261\n6262\n6263\n6264\n6265\n6266\n6267\n6268\n6269\n6270\n6271\n6272\n6273\n6274\n6275\n6276\n6277\n6278\n6279\n6280\n6281\n6282\n6283\n6284\n6285\n6286\n6287\n6288\n6289\n6290\n6291\n6292\n6293\n6294\n6295\n6296\n6297\n6298\n6299\n6300\n6301\n6302\n6303\n6304\n6305\n6306\n6307\n6308\n6309\n6310\n6311\n6312\n6313\n6314\n6315\n6316\n6317\n6318\n6319\n6320\n6321\n6322\n6323\n6324\n6325\n6326\n6327\n6328\n6329\n6330\n6331\n6332\n6333\n6334\n6335\n6336\n6337\n6338\n6339\n6340\n6341\n6342\n6343\n6344\n6345\n6346\n6347\n6348\n6349\n6350\n6351\n6352\n6353\n6354\n6355\n6356\n6357\n6358\n6359\n6360\n6361\n6362\n6363\n6364\n6365\n6366\n6367\n6368\n6369\n6370\n6371\n6372\n6373\n6374\n6375\n6376\n6377\n6378\n6379\n6380\n6381\n6382\n6383\n6384\n6385\n6386\n6387\n6388\n6389\n6390\n6391\n6392\n6393\n6394\n6395\n6396\n6397\n6398\n6399\n6400\n6401\n6402\n6403\n6404\n6405\n6406\n6407\n6408\n6409\n6410\n6411\n6412\n6413\n6414\n6415\n6416\n6417\n6418\n6419\n6420\n6421\n6422\n6423\n6424\n6425\n6426\n6427\n6428\n6429\n6430\n6431\n6432\n6433\n6434\n6435\n6436\n6437\n6438\n6439\n6440\n6441\n6442\n6443\n6444\n6445\n6446\n6447\n6448\n6449\n6450\n6451\n6452\n6453\n6454\n6455\n6456\n6457\n6458\n6459\n6460\n6461\n6462\n6463\n6464\n6465\n6466\n6467\n6468\n6469\n6470\n6471\n6472\n6473\n6474\n6475\n6476\n6477\n6478\n6479\n6480\n6481\n6482\n6483\n6484\n6485\n6486\n6487\n6488\n6489\n6490\n6491\n6492\n6493\n6494\n6495\n6496\n6497\n6498\n6499\n6500\n6501\n6502\n6503\n6504\n6505\n6506\n6507\n6508\n6509\n6510\n6511\n6512\n6513\n6514\n6515\n6516\n6517\n6518\n6519\n6520\n6521\n6522\n6523\n6524\n6525\n6526\n6527\n6528\n6529\n6530\n6531\n6532\n6533\n6534\n6535\n6536\n6537\n6538\n6539\n6540\n6541\n6542\n6543\n6544\n6545\n6546\n6547\n6548\n6549\n6550\n6551\n6552\n6553\n6554\n6555\n6556\n6557\n6558\n6559\n6560\n6561\n6562\n6563\n6564\n6565\n6566\n6567\n6568\n6569\n6570\n6571\n6572\n6573\n6574\n6575\n6576\n6577\n6578\n6579\n6580\n6581\n6582\n6583\n6584\n6585\n6586\n6587\n6588\n6589\n6590\n6591\n6592\n6593\n6594\n6595\n6596\n6597\n6598\n6599\n6600\n6601\n6602\n6603\n6604\n6605\n6606\n6607\n6608\n6609\n6610\n6611\n6612\n6613\n6614\n6615\n6616\n6617\n6618\n6619\n6620\n6621\n6622\n6623\n6624\n6625\n6626\n6627\n6628\n6629\n6630\n6631\n6632\n6633\n6634\n6635\n6636\n6637\n6638\n6639\n6640\n6641\n6642\n6643\n6644\n6645\n6646\n6647\n6648\n6649\n6650\n6651\n6652\n6653\n6654\n6655\n6656\n6657\n6658\n6659\n6660\n6661\n6662\n6663\n6664\n6665\n6666\n6667\n6668\n6669\n6670\n6671\n6672\n6673\n6674\n6675\n6676\n6677\n6678\n6679\n6680\n6681\n6682\n6683\n6684\n6685\n6686\n6687\n6688\n6689\n6690\n6691\n6692\n6693\n6694\n6695\n6696\n6697\n6698\n6699\n6700\n6701\n6702\n6703\n6704\n6705\n6706\n6707\n6708\n6709\n6710\n6711\n6712\n6713\n6714\n6715\n6716\n6717\n6718\n6719\n6720\n6721\n6722\n6723\n6724\n6725\n6726\n6727\n6728\n6729\n6730\n6731\n6732\n6733\n6734\n6735\n6736\n6737\n6738\n6739\n6740\n6741\n6742\n6743\n6744\n6745\n6746\n6747\n6748\n6749\n6750\n6751\n6752\n6753\n6754\n6755\n6756\n6757\n6758\n6759\n6760\n6761\n6762\n6763\n6764\n6765\n6766\n6767\n6768\n6769\n6770\n6771\n6772\n6773\n6774\n6775\n6776\n6777\n6778\n6779\n6780\n6781\n6782\n6783\n6784\n6785\n6786\n6787\n6788\n6789\n6790\n6791\n6792\n6793\n6794\n6795\n6796\n6797\n6798\n6799\n6800\n6801\n6802\n6803\n6804\n6805\n6806\n6807\n6808\n6809\n6810\n6811\n6812\n6813\n6814\n6815\n6816\n6817\n6818\n6819\n6820\n6821\n6822\n6823\n6824\n6825\n6826\n6827\n6828\n6829\n6830\n6831\n6832\n6833\n6834\n6835\n6836\n6837\n6838\n6839\n6840\n6841\n6842\n6843\n6844\n6845\n6846\n6847\n6848\n6849\n6850\n6851\n6852\n6853\n6854\n6855\n6856\n6857\n6858\n6859\n6860\n6861\n6862\n6863\n6864\n6865\n6866\n6867\n6868\n6869\n6870\n6871\n6872\n6873\n6874\n6875\n6876\n6877\n6878\n6879\n6880\n6881\n6882\n6883\n6884\n6885\n6886\n6887\n6888\n6889\n6890\n6891\n6892\n6893\n6894\n6895\n6896\n6897\n6898\n6899\n6900\n6901\n6902\n6903\n6904\n6905\n6906\n6907\n6908\n6909\n6910\n6911\n6912\n6913\n6914\n6915\n6916\n6917\n6918\n6919\n6920\n6921\n6922\n6923\n6924\n6925\n6926\n6927\n6928\n6929\n6930\n6931\n6932\n6933\n6934\n6935\n6936\n6937\n6938\n6939\n6940\n6941\n6942\n6943\n6944\n6945\n6946\n6947\n6948\n6949\n6950\n6951\n6952\n6953\n6954\n6955\n6956\n6957\n6958\n6959\n6960\n6961\n6962\n6963\n6964\n6965\n6966\n6967\n6968\n6969\n6970\n6971\n6972\n6973\n6974\n6975\n6976\n6977\n6978\n6979\n6980\n6981\n6982\n6983\n6984\n6985\n6986\n6987\n6988\n6989\n6990\n6991\n6992\n6993\n6994\n6995\n6996\n6997\n6998\n6999\n7000\n7001\n7002\n7003\n7004\n7005\n7006\n7007\n7008\n7009\n7010\n7011\n7012\n7013\n7014\n7015\n7016\n7017\n7018\n7019\n7020\n7021\n7022\n7023\n7024\n7025\n7026\n7027\n7028\n7029\n7030\n7031\n7032\n7033\n7034\n7035\n7036\n7037\n7038\n7039\n7040\n7041\n7042\n7043\n7044\n7045\n7046\n7047\n7048\n7049\n7050\n7051\n7052\n7053\n7054\n7055\n7056\n7057\n7058\n7059\n7060\n7061\n7062\n7063\n7064\n7065\n7066\n7067\n7068\n7069\n7070\n7071\n7072\n7073\n7074\n7075\n7076\n7077\n7078\n7079\n7080\n7081\n7082\n7083\n7084\n7085\n7086\n7087\n7088\n7089\n7090\n7091\n7092\n7093\n7094\n7095\n7096\n7097\n7098\n7099\n7100\n7101\n7102\n7103\n7104\n7105\n7106\n7107\n7108\n7109\n7110\n7111\n7112\n7113\n7114\n7115\n7116\n7117\n7118\n7119\n7120\n7121\n7122\n7123\n7124\n7125\n7126\n7127\n7128\n7129\n7130\n7131\n7132\n7133\n7134\n7135\n7136\n7137\n7138\n7139\n7140\n7141\n7142\n7143\n7144\n7145\n7146\n7147\n7148\n7149\n7150\n7151\n7152\n7153\n7154\n7155\n7156\n7157\n7158\n7159\n7160\n7161\n7162\n7163\n7164\n7165\n7166\n7167\n7168\n7169\n7170\n7171\n7172\n7173\n7174\n7175\n7176\n7177\n7178\n7179\n7180\n7181\n7182\n7183\n7184\n7185\n7186\n7187\n7188\n7189\n7190\n7191\n7192\n7193\n7194\n7195\n7196\n7197\n7198\n7199\n7200\n7201\n7202\n7203\n7204\n7205\n7206\n7207\n7208\n7209\n7210\n7211\n7212\n7213\n7214\n7215\n7216\n7217\n7218\n7219\n7220\n7221\n7222\n7223\n7224\n7225\n7226\n7227\n7228\n7229\n7230\n7231\n7232\n7233\n7234\n7235\n7236\n7237\n7238\n7239\n7240\n7241\n7242\n7243\n7244\n7245\n7246\n7247\n7248\n7249\n7250\n7251\n7252\n7253\n7254\n7255\n7256\n7257\n7258\n7259\n7260\n7261\n7262\n7263\n7264\n7265\n7266\n7267\n7268\n7269\n7270\n7271\n7272\n7273\n7274\n7275\n7276\n7277\n7278\n7279\n7280\n7281\n7282\n7283\n7284\n7285\n7286\n7287\n7288\n7289\n7290\n7291\n7292\n7293\n7294\n7295\n7296\n7297\n7298\n7299\n7300\n7301\n7302\n7303\n7304\n7305\n7306\n7307\n7308\n7309\n7310\n7311\n7312\n7313\n7314\n7315\n7316\n7317\n7318\n7319\n7320\n7321\n7322\n7323\n7324\n7325\n7326\n7327\n7328\n7329\n7330\n7331\n7332\n7333\n7334\n7335\n7336\n7337\n7338\n7339\n7340\n7341\n7342\n7343\n7344\n7345\n7346\n7347\n7348\n7349\n7350\n7351\n7352\n7353\n7354\n7355\n7356\n7357\n7358\n7359\n7360\n7361\n7362\n7363\n7364\n7365\n7366\n7367\n7368\n7369\n7370\n7371\n7372\n7373\n7374\n7375\n7376\n7377\n7378\n7379\n7380\n7381\n7382\n7383\n7384\n7385\n7386\n7387\n7388\n7389\n7390\n7391\n7392\n7393\n7394\n7395\n7396\n7397\n7398\n7399\n7400\n7401\n7402\n7403\n7404\n7405\n7406\n7407\n7408\n7409\n7410\n7411\n7412\n7413\n7414\n7415\n7416\n7417\n7418\n7419\n7420\n7421\n7422\n7423\n7424\n7425\n7426\n7427\n7428\n7429\n7430\n7431\n7432\n7433\n7434\n7435\n7436\n7437\n7438\n7439\n7440\n7441\n7442\n7443\n7444\n7445\n7446\n7447\n7448\n7449\n7450\n7451\n7452\n7453\n7454\n7455\n7456\n7457\n7458\n7459\n7460\n7461\n7462\n7463\n7464\n7465\n7466\n7467\n7468\n7469\n7470\n7471\n7472\n7473\n7474\n7475\n7476\n7477\n7478\n7479\n7480\n7481\n7482\n7483\n7484\n7485\n7486\n7487\n7488\n7489\n7490\n7491\n7492\n7493\n7494\n7495\n7496\n7497\n7498\n7499\n7500\n7501\n7502\n7503\n7504\n7505\n7506\n7507\n7508\n7509\n7510\n7511\n7512\n7513\n7514\n7515\n7516\n7517\n7518\n7519\n7520\n7521\n7522\n7523\n7524\n7525\n7526\n7527\n7528\n7529\n7530\n7531\n7532\n7533\n7534\n7535\n7536\n7537\n7538\n7539\n7540\n7541\n7542\n7543\n7544\n7545\n7546\n7547\n7548\n7549\n7550\n7551\n7552\n7553\n7554\n7555\n7556\n7557\n7558\n7559\n7560\n7561\n7562\n7563\n7564\n7565\n7566\n7567\n7568\n7569\n7570\n7571\n7572\n7573\n7574\n7575\n7576\n7577\n7578\n7579\n7580\n7581\n7582\n7583\n7584\n7585\n7586\n7587\n7588\n7589\n7590\n7591\n7592\n7593\n7594\n7595\n7596\n7597\n7598\n7599\n7600\n7601\n7602\n7603\n7604\n7605\n7606\n7607\n7608\n7609\n7610\n7611\n7612\n7613\n7614\n7615\n7616\n7617\n7618\n7619\n7620\n7621\n7622\n7623\n7624\n7625\n7626\n7627\n7628\n7629\n7630\n7631\n7632\n7633\n7634\n7635\n7636\n7637\n7638\n7639\n7640\n7641\n7642\n7643\n7644\n7645\n7646\n7647\n7648\n7649\n7650\n7651\n7652\n7653\n7654\n7655\n7656\n7657\n7658\n7659\n7660\n7661\n7662\n7663\n7664\n7665\n7666\n7667\n7668\n7669\n7670\n7671\n7672\n7673\n7674\n7675\n7676\n7677\n7678\n7679\n7680\n7681\n7682\n7683\n7684\n7685\n7686\n7687\n7688\n7689\n7690\n7691\n7692\n7693\n7694\n7695\n7696\n7697\n7698\n7699\n7700\n7701\n7702\n7703\n7704\n7705\n7706\n7707\n7708\n7709\n7710\n7711\n7712\n7713\n7714\n7715\n7716\n7717\n7718\n7719\n7720\n7721\n7722\n7723\n7724\n7725\n7726\n7727\n7728\n7729\n7730\n7731\n7732\n7733\n7734\n7735\n7736\n7737\n7738\n7739\n7740\n7741\n7742\n7743\n7744\n7745\n7746\n7747\n7748\n7749\n7750\n7751\n7752\n7753\n7754\n7755\n7756\n7757\n7758\n7759\n7760\n7761\n7762\n7763\n7764\n7765\n7766\n7767\n7768\n7769\n7770\n7771\n7772\n7773\n7774\n7775\n7776\n7777\n7778\n7779\n7780\n7781\n7782\n7783\n7784\n7785\n7786\n7787\n7788\n7789\n7790\n7791\n7792\n7793\n7794\n7795\n7796\n7797\n7798\n7799\n7800\n7801\n7802\n7803\n7804\n7805\n7806\n7807\n7808\n7809\n7810\n7811\n7812\n7813\n7814\n7815\n7816\n7817\n7818\n7819\n7820\n7821\n7822\n7823\n7824\n7825\n7826\n7827\n7828\n7829\n7830\n7831\n7832\n7833\n7834\n7835\n7836\n7837\n7838\n7839\n7840\n7841\n7842\n7843\n7844\n7845\n7846\n7847\n7848\n7849\n7850\n7851\n7852\n7853\n7854\n7855\n7856\n7857\n7858\n7859\n7860\n7861\n7862\n7863\n7864\n7865\n7866\n7867\n7868\n7869\n7870\n7871\n7872\n7873\n7874\n7875\n7876\n7877\n7878\n7879\n7880\n7881\n7882\n7883\n7884\n7885\n7886\n7887\n7888\n7889\n7890\n7891\n7892\n7893\n7894\n7895\n7896\n7897\n7898\n7899\n7900\n7901\n7902\n7903\n7904\n7905\n7906\n7907\n7908\n7909\n7910\n7911\n7912\n7913\n7914\n7915\n7916\n7917\n7918\n7919\n7920\n7921\n7922\n7923\n7924\n7925\n7926\n7927\n7928\n7929\n7930\n7931\n7932\n7933\n7934\n7935\n7936\n7937\n7938\n7939\n7940\n7941\n7942\n7943\n7944\n7945\n7946\n7947\n7948\n7949\n7950\n7951\n7952\n7953\n7954\n7955\n7956\n7957\n7958\n7959\n7960\n7961\n7962\n7963\n7964\n7965\n7966\n7967\n7968\n7969\n7970\n7971\n7972\n7973\n7974\n7975\n7976\n7977\n7978\n7979\n7980\n7981\n7982\n7983\n7984\n7985\n7986\n7987\n7988\n7989\n7990\n7991\n7992\n7993\n7994\n7995\n7996\n7997\n7998\n7999\n8000\n8001\n8002\n8003\n8004\n8005\n8006\n8007\n8008\n8009\n8010\n8011\n8012\n8013\n8014\n8015\n8016\n8017\n8018\n8019\n8020\n8021\n8022\n8023\n8024\n8025\n8026\n8027\n8028\n8029\n8030\n8031\n8032\n8033\n8034\n8035\n8036\n8037\n8038\n8039\n8040\n8041\n8042\n8043\n8044\n8045\n8046\n8047\n8048\n8049\n8050\n8051\n8052\n8053\n8054\n8055\n8056\n8057\n8058\n8059\n8060\n8061\n8062\n8063\n8064\n8065\n8066\n8067\n8068\n8069\n8070\n8071\n8072\n8073\n8074\n8075\n8076\n8077\n8078\n8079\n8080\n8081\n8082\n8083\n8084\n8085\n8086\n8087\n8088\n8089\n8090\n8091\n8092\n8093\n8094\n8095\n8096\n8097\n8098\n8099\n8100\n8101\n8102\n8103\n8104\n8105\n8106\n8107\n8108\n8109\n8110\n8111\n8112\n8113\n8114\n8115\n8116\n8117\n8118\n8119\n8120\n8121\n8122\n8123\n8124\n8125\n8126\n8127\n8128\n8129\n8130\n8131\n8132\n8133\n8134\n8135\n8136\n8137\n8138\n8139\n8140\n8141\n8142\n8143\n8144\n8145\n8146\n8147\n8148\n8149\n8150\n8151\n8152\n8153\n8154\n8155\n8156\n8157\n8158\n8159\n8160\n8161\n8162\n8163\n8164\n8165\n8166\n8167\n8168\n8169\n8170\n8171\n8172\n8173\n8174\n8175\n8176\n8177\n8178\n8179\n8180\n8181\n8182\n8183\n8184\n8185\n8186\n8187\n8188\n8189\n8190\n8191\n8192\n8193\n8194\n8195\n8196\n8197\n8198\n8199\n8200\n8201\n8202\n8203\n8204\n8205\n8206\n8207\n8208\n8209\n8210\n8211\n8212\n8213\n8214\n8215\n8216\n8217\n8218\n8219\n8220\n8221\n8222\n8223\n8224\n8225\n8226\n8227\n8228\n8229\n8230\n8231\n8232\n8233\n8234\n8235\n8236\n8237\n8238\n8239\n8240\n8241\n8242\n8243\n8244\n8245\n8246\n8247\n8248\n8249\n8250\n8251\n8252\n8253\n8254\n8255\n8256\n8257\n8258\n8259\n8260\n8261\n8262\n8263\n8264\n8265\n8266\n8267\n8268\n8269\n8270\n8271\n8272\n8273\n8274\n8275\n8276\n8277\n8278\n8279\n8280\n8281\n8282\n8283\n8284\n8285\n8286\n8287\n8288\n8289\n8290\n8291\n8292\n8293\n8294\n8295\n8296\n8297\n8298\n8299\n8300\n8301\n8302\n8303\n8304\n8305\n8306\n8307\n8308\n8309\n8310\n8311\n8312\n8313\n8314\n8315\n8316\n8317\n8318\n8319\n8320\n8321\n8322\n8323\n8324\n8325\n8326\n8327\n8328\n8329\n8330\n8331\n8332\n8333\n8334\n8335\n8336\n8337\n8338\n8339\n8340\n8341\n8342\n8343\n8344\n8345\n8346\n8347\n8348\n8349\n8350\n8351\n8352\n8353\n8354\n8355\n8356\n8357\n8358\n8359\n8360\n8361\n8362\n8363\n8364\n8365\n8366\n8367\n8368\n8369\n8370\n8371\n8372\n8373\n8374\n8375\n8376\n8377\n8378\n8379\n8380\n8381\n8382\n8383\n8384\n8385\n8386\n8387\n8388\n8389\n8390\n8391\n8392\n8393\n8394\n8395\n8396\n8397\n8398\n8399\n8400\n8401\n8402\n8403\n8404\n8405\n8406\n8407\n8408\n8409\n8410\n8411\n8412\n8413\n8414\n8415\n8416\n8417\n8418\n8419\n8420\n8421\n8422\n8423\n8424\n8425\n8426\n8427\n8428\n8429\n8430\n8431\n8432\n8433\n8434\n8435\n8436\n8437\n8438\n8439\n8440\n8441\n8442\n8443\n8444\n8445\n8446\n8447\n8448\n8449\n8450\n8451\n8452\n8453\n8454\n8455\n8456\n8457\n8458\n8459\n8460\n8461\n8462\n8463\n8464\n8465\n8466\n8467\n8468\n8469\n8470\n8471\n8472\n8473\n8474\n8475\n8476\n8477\n8478\n8479\n8480\n8481\n8482\n8483\n8484\n8485\n8486\n8487\n8488\n8489\n8490\n8491\n8492\n8493\n8494\n8495\n8496\n8497\n8498\n8499\n8500\n8501\n8502\n8503\n8504\n8505\n8506\n8507\n8508\n8509\n8510\n8511\n8512\n8513\n8514\n8515\n8516\n8517\n8518\n8519\n8520\n8521\n8522\n8523\n8524\n8525\n8526\n8527\n8528\n8529\n8530\n8531\n8532\n8533\n8534\n8535\n8536\n8537\n8538\n8539\n8540\n8541\n8542\n8543\n8544\n8545\n8546\n8547\n8548\n8549\n8550\n8551\n8552\n8553\n8554\n8555\n8556\n8557\n8558\n8559\n8560\n8561\n8562\n8563\n8564\n8565\n8566\n8567\n8568\n8569\n8570\n8571\n8572\n8573\n8574\n8575\n8576\n8577\n8578\n8579\n8580\n8581\n8582\n8583\n8584\n8585\n8586\n8587\n8588\n8589\n8590\n8591\n8592\n8593\n8594\n8595\n8596\n8597\n8598\n8599\n8600\n8601\n8602\n8603\n8604\n8605\n8606\n8607\n8608\n8609\n8610\n8611\n8612\n8613\n8614\n8615\n8616\n8617\n8618\n8619\n8620\n8621\n8622\n8623\n8624\n8625\n8626\n8627\n8628\n8629\n8630\n8631\n8632\n8633\n8634\n8635\n8636\n8637\n8638\n8639\n8640\n8641\n8642\n8643\n8644\n8645\n8646\n8647\n8648\n8649\n8650\n8651\n8652\n8653\n8654\n8655\n8656\n8657\n8658\n8659\n8660\n8661\n8662\n8663\n8664\n8665\n8666\n8667\n8668\n8669\n8670\n8671\n8672\n8673\n8674\n8675\n8676\n8677\n8678\n8679\n8680\n8681\n8682\n8683\n8684\n8685\n8686\n8687\n8688\n8689\n8690\n8691\n8692\n8693\n8694\n8695\n8696\n8697\n8698\n8699\n8700\n8701\n8702\n8703\n8704\n8705\n8706\n8707\n8708\n8709\n8710\n8711\n8712\n8713\n8714\n8715\n8716\n8717\n8718\n8719\n8720\n8721\n8722\n8723\n8724\n8725\n8726\n8727\n8728\n8729\n8730\n8731\n8732\n8733\n8734\n8735\n8736\n8737\n8738\n8739\n8740\n8741\n8742\n8743\n8744\n8745\n8746\n8747\n8748\n8749\n8750\n8751\n8752\n8753\n8754\n8755\n8756\n8757\n8758\n8759\n8760\n8761\n8762\n8763\n8764\n8765\n8766\n8767\n8768\n8769\n8770\n8771\n8772\n8773\n8774\n8775\n8776\n8777\n8778\n8779\n8780\n8781\n8782\n8783\n8784\n8785\n8786\n8787\n8788\n8789\n8790\n8791\n8792\n8793\n8794\n8795\n8796\n8797\n8798\n8799\n8800\n8801\n8802\n8803\n8804\n8805\n8806\n8807\n8808\n8809\n8810\n8811\n8812\n8813\n8814\n8815\n8816\n8817\n8818\n8819\n8820\n8821\n8822\n8823\n8824\n8825\n8826\n8827\n8828\n8829\n8830\n8831\n8832\n8833\n8834\n8835\n8836\n8837\n8838\n8839\n8840\n8841\n8842\n8843\n8844\n8845\n8846\n8847\n8848\n8849\n8850\n8851\n8852\n8853\n8854\n8855\n8856\n8857\n8858\n8859\n8860\n8861\n8862\n8863\n8864\n8865\n8866\n8867\n8868\n8869\n8870\n8871\n8872\n8873\n8874\n8875\n8876\n8877\n8878\n8879\n8880\n8881\n8882\n8883\n8884\n8885\n8886\n8887\n8888\n8889\n8890\n8891\n8892\n8893\n8894\n8895\n8896\n8897\n8898\n8899\n8900\n8901\n8902\n8903\n8904\n8905\n8906\n8907\n8908\n8909\n8910\n8911\n8912\n8913\n8914\n8915\n8916\n8917\n8918\n8919\n8920\n8921\n8922\n8923\n8924\n8925\n8926\n8927\n8928\n8929\n8930\n8931\n8932\n8933\n8934\n8935\n8936\n8937\n8938\n8939\n8940\n8941\n8942\n8943\n8944\n8945\n8946\n8947\n8948\n8949\n8950\n8951\n8952\n8953\n8954\n8955\n8956\n8957\n8958\n8959\n8960\n8961\n8962\n8963\n8964\n8965\n8966\n8967\n8968\n8969\n8970\n8971\n8972\n8973\n8974\n8975\n8976\n8977\n8978\n8979\n8980\n8981\n8982\n8983\n8984\n8985\n8986\n8987\n8988\n8989\n8990\n8991\n8992\n8993\n8994\n8995\n8996\n8997\n8998\n8999\n9000\n9001\n9002\n9003\n9004\n9005\n9006\n9007\n9008\n9009\n9010\n9011\n9012\n9013\n9014\n9015\n9016\n9017\n9018\n9019\n9020\n9021\n9022\n9023\n9024\n9025\n9026\n9027\n9028\n9029\n9030\n9031\n9032\n9033\n9034\n9035\n9036\n9037\n9038\n9039\n9040\n9041\n9042\n9043\n9044\n9045\n9046\n9047\n9048\n9049\n9050\n9051\n9052\n9053\n9054\n9055\n9056\n9057\n9058\n9059\n9060\n9061\n9062\n9063\n9064\n9065\n9066\n9067\n9068\n9069\n9070\n9071\n9072\n9073\n9074\n9075\n9076\n9077\n9078\n9079\n9080\n9081\n9082\n9083\n9084\n9085\n9086\n9087\n9088\n9089\n9090\n9091\n9092\n9093\n9094\n9095\n9096\n9097\n9098\n9099\n9100\n9101\n9102\n9103\n9104\n9105\n9106\n9107\n9108\n9109\n9110\n9111\n9112\n9113\n9114\n9115\n9116\n9117\n9118\n9119\n9120\n9121\n9122\n9123\n9124\n9125\n9126\n9127\n9128\n9129\n9130\n9131\n9132\n9133\n9134\n9135\n9136\n9137\n9138\n9139\n9140\n9141\n9142\n9143\n9144\n9145\n9146\n9147\n9148\n9149\n9150\n9151\n9152\n9153\n9154\n9155\n9156\n9157\n9158\n9159\n9160\n9161\n9162\n9163\n9164\n9165\n9166\n9167\n9168\n9169\n9170\n9171\n9172\n9173\n9174\n9175\n9176\n9177\n9178\n9179\n9180\n9181\n9182\n9183\n9184\n9185\n9186\n9187\n9188\n9189\n9190\n9191\n9192\n9193\n9194\n9195\n9196\n9197\n9198\n9199\n9200\n9201\n9202\n9203\n9204\n9205\n9206\n9207\n9208\n9209\n9210\n9211\n9212\n9213\n9214\n9215\n9216\n9217\n9218\n9219\n9220\n9221\n9222\n9223\n9224\n9225\n9226\n9227\n9228\n9229\n9230\n9231\n9232\n9233\n9234\n9235\n9236\n9237\n9238\n9239\n9240\n9241\n9242\n9243\n9244\n9245\n9246\n9247\n9248\n9249\n9250\n9251\n9252\n9253\n9254\n9255\n9256\n9257\n9258\n9259\n9260\n9261\n9262\n9263\n9264\n9265\n9266\n9267\n9268\n9269\n9270\n9271\n9272\n9273\n9274\n9275\n9276\n9277\n9278\n9279\n9280\n9281\n9282\n9283\n9284\n9285\n9286\n9287\n9288\n9289\n9290\n9291\n9292\n9293\n9294\n9295\n9296\n9297\n9298\n9299\n9300\n9301\n9302\n9303\n9304\n9305\n9306\n9307\n9308\n9309\n9310\n9311\n9312\n9313\n9314\n9315\n9316\n9317\n9318\n9319\n9320\n9321\n9322\n9323\n9324\n9325\n9326\n9327\n9328\n9329\n9330\n9331\n9332\n9333\n9334\n9335\n9336\n9337\n9338\n9339\n9340\n9341\n9342\n9343\n9344\n9345\n9346\n9347\n9348\n9349\n9350\n9351\n9352\n9353\n9354\n9355\n9356\n9357\n9358\n9359\n9360\n9361\n9362\n9363\n9364\n9365\n9366\n9367\n9368\n9369\n9370\n9371\n9372\n9373\n9374\n9375\n9376\n9377\n9378\n9379\n9380\n9381\n9382\n9383\n9384\n9385\n9386\n9387\n9388\n9389\n9390\n9391\n9392\n9393\n9394\n9395\n9396\n9397\n9398\n9399\n9400\n9401\n9402\n9403\n9404\n9405\n9406\n9407\n9408\n9409\n9410\n9411\n9412\n9413\n9414\n9415\n9416\n9417\n9418\n9419\n9420\n9421\n9422\n9423\n9424\n9425\n9426\n9427\n9428\n9429\n9430\n9431\n9432\n9433\n9434\n9435\n9436\n9437\n9438\n9439\n9440\n9441\n9442\n9443\n9444\n9445\n9446\n9447\n9448\n9449\n9450\n9451\n9452\n9453\n9454\n9455\n9456\n9457\n9458\n9459\n9460\n9461\n9462\n9463\n9464\n9465\n9466\n9467\n9468\n9469\n9470\n9471\n9472\n9473\n9474\n9475\n9476\n9477\n9478\n9479\n9480\n9481\n9482\n9483\n9484\n9485\n9486\n9487\n9488\n9489\n9490\n9491\n9492\n9493\n9494\n9495\n9496\n9497\n9498\n9499\n9500\n9501\n9502\n9503\n9504\n9505\n9506\n9507\n9508\n9509\n9510\n9511\n9512\n9513\n9514\n9515\n9516\n9517\n9518\n9519\n9520\n9521\n9522\n9523\n9524\n9525\n9526\n9527\n9528\n9529\n9530\n9531\n9532\n9533\n9534\n9535\n9536\n9537\n9538\n9539\n9540\n9541\n9542\n9543\n9544\n9545\n9546\n9547\n9548\n9549\n9550\n9551\n9552\n9553\n9554\n9555\n9556\n9557\n9558\n9559\n9560\n9561\n9562\n9563\n9564\n9565\n9566\n9567\n9568\n9569\n9570\n9571\n9572\n9573\n9574\n9575\n9576\n9577\n9578\n9579\n9580\n9581\n9582\n9583\n9584\n9585\n9586\n9587\n9588\n9589\n9590\n9591\n9592\n9593\n9594\n9595\n9596\n9597\n9598\n9599\n9600\n9601\n9602\n9603\n9604\n9605\n9606\n9607\n9608\n9609\n9610\n9611\n9612\n9613\n9614\n9615\n9616\n9617\n9618\n9619\n9620\n9621\n9622\n9623\n9624\n9625\n9626\n9627\n9628\n9629\n9630\n9631\n9632\n9633\n9634\n9635\n9636\n9637\n9638\n9639\n9640\n9641\n9642\n9643\n9644\n9645\n9646\n9647\n9648\n9649\n9650\n9651\n9652\n9653\n9654\n9655\n9656\n9657\n9658\n9659\n9660\n9661\n9662\n9663\n9664\n9665\n9666\n9667\n9668\n9669\n9670\n9671\n9672\n9673\n9674\n9675\n9676\n9677\n9678\n9679\n9680\n9681\n9682\n9683\n9684\n9685\n9686\n9687\n9688\n9689\n9690\n9691\n9692\n9693\n9694\n9695\n9696\n9697\n9698\n9699\n9700\n9701\n9702\n9703\n9704\n9705\n9706\n9707\n9708\n9709\n9710\n9711\n9712\n9713\n9714\n9715\n9716\n9717\n9718\n9719\n9720\n9721\n9722\n9723\n9724\n9725\n9726\n9727\n9728\n9729\n9730\n9731\n9732\n9733\n9734\n9735\n9736\n9737\n9738\n9739\n9740\n9741\n9742\n9743\n9744\n9745\n9746\n9747\n9748\n9749\n9750\n9751\n9752\n9753\n9754\n9755\n9756\n9757\n9758\n9759\n9760\n9761\n9762\n9763\n9764\n9765\n9766\n9767\n9768\n9769\n9770\n9771\n9772\n9773\n9774\n9775\n9776\n9777\n9778\n9779\n9780\n9781\n9782\n9783\n9784\n9785\n9786\n9787\n9788\n9789\n9790\n9791\n9792\n9793\n9794\n9795\n9796\n9797\n9798\n9799\n9800\n9801\n9802\n9803\n9804\n9805\n9806\n9807\n9808\n9809\n9810\n9811\n9812\n9813\n9814\n9815\n9816\n9817\n9818\n9819\n9820\n9821\n9822\n9823\n9824\n9825\n9826\n9827\n9828\n9829\n9830\n9831\n9832\n9833\n9834\n9835\n9836\n9837\n9838\n9839\n9840\n9841\n9842\n9843\n9844\n9845\n9846\n9847\n9848\n9849\n9850\n9851\n9852\n9853\n9854\n9855\n9856\n9857\n9858\n9859\n9860\n9861\n9862\n9863\n9864\n9865\n9866\n9867\n9868\n9869\n9870\n9871\n9872\n9873\n9874\n9875\n9876\n9877\n9878\n9879\n9880\n9881\n9882\n9883\n9884\n9885\n9886\n9887\n9888\n9889\n9890\n9891\n9892\n9893\n9894\n9895\n9896\n9897\n9898\n9899\n9900\n9901\n9902\n9903\n9904\n9905\n9906\n9907\n9908\n9909\n9910\n9911\n9912\n9913\n9914\n9915\n9916\n9917\n9918\n9919\n9920\n9921\n9922\n9923\n9924\n9925\n9926\n9927\n9928\n9929\n9930\n9931\n9932\n9933\n9934\n9935\n9936\n9937\n9938\n9939\n9940\n9941\n9942\n9943\n9944\n9945\n9946\n9947\n9948\n9949\n9950\n9951\n9952\n9953\n9954\n9955\n9956\n9957\n9958\n9959\n9960\n9961\n9962\n9963\n9964\n9965\n9966\n9967\n9968\n9969\n9970\n9971\n9972\n9973\n9974\n9975\n9976\n9977\n9978\n9979\n9980\n9981\n9982\n9983\n9984\n9985\n9986\n9987\n9988\n9989\n9990\n9991\n9992\n9993\n9994\n9995\n9996\n9997\n9998\n9999\n10000\n10001\n10002\n10003\n10004\n10005\n10006\n10007\n10008\n10009\n10010\n10011\n10012\n10013\n10014\n10015\n10016\n10017\n10018\n10019\n10020\n10021\n10022\n10023\n10024\n10025\n10026\n10027\n10028\n10029\n10030\n10031\n10032\n10033\n10034\n10035\n10036\n10037\n10038\n10039\n10040\n10041\n10042\n10043\n10044\n10045\n10046\n10047\n10048\n10049\n10050\n10051\n10052\n10053\n10054\n10055\n10056\n10057\n10058\n10059\n10060\n10061\n10062\n10063\n10064\n10065\n10066\n10067\n10068\n10069\n10070\n10071\n10072\n10073\n10074\n10075\n10076\n10077\n10078\n10079\n10080\n10081\n10082\n10083\n10084\n10085\n10086\n10087\n10088\n10089\n10090\n10091\n10092\n10093\n10094\n10095\n10096\n10097\n10098\n10099\n10100\n10101\n10102\n10103\n10104\n10105\n10106\n10107\n10108\n10109\n10110\n10111\n10112\n10113\n10114\n10115\n10116\n10117\n10118\n10119\n10120\n10121\n10122\n10123\n10124\n10125\n10126\n10127\n10128\n10129\n10130\n10131\n10132\n10133\n10134\n10135\n10136\n10137\n10138\n10139\n10140\n10141\n10142\n10143\n10144\n10145\n10146\n10147\n10148\n10149\n10150\n10151\n10152\n10153\n10154\n10155\n10156\n10157\n10158\n10159\n10160\n10161\n10162\n10163\n10164\n10165\n10166\n10167\n10168\n10169\n10170\n10171\n10172\n10173\n10174\n10175\n10176\n10177\n10178\n10179\n10180\n10181\n10182\n10183\n10184\n10185\n10186\n10187\n10188\n10189\n10190\n10191\n10192\n10193\n10194\n10195\n10196\n10197\n10198\n10199\n10200\n10201\n10202\n10203\n10204\n10205\n10206\n10207\n10208\n10209\n10210\n10211\n10212\n10213\n10214\n10215\n10216\n10217\n10218\n10219\n10220\n10221\n10222\n10223\n10224\n10225\n10226\n10227\n10228\n10229\n10230\n10231\n10232\n10233\n10234\n10235\n10236\n10237\n10238\n10239\n10240\n10241\n10242\n10243\n10244\n10245\n10246\n10247\n10248\n10249\n10250\n10251\n10252\n10253\n10254\n10255\n10256\n10257\n10258\n10259\n10260\n10261\n10262\n10263\n10264\n10265\n10266\n10267\n10268\n10269\n10270\n10271\n10272\n10273\n10274\n10275\n10276\n10277\n10278\n10279\n10280\n10281\n10282\n10283\n10284\n10285\n10286\n10287\n10288\n10289\n10290\n10291\n10292\n10293\n10294\n10295\n10296\n10297\n10298\n10299\n10300\n10301\n10302\n10303\n10304\n10305\n10306\n10307\n10308\n10309\n10310\n10311\n10312\n10313\n10314\n10315\n10316\n10317\n10318\n10319\n10320\n10321\n10322\n10323\n10324\n10325\n10326\n10327\n10328\n10329\n10330\n10331\n10332\n10333\n10334\n10335\n10336\n10337\n10338\n10339\n10340\n10341\n10342\n10343\n10344\n10345\n10346\n10347\n10348\n10349\n10350\n10351\n10352\n10353\n10354\n10355\n10356\n10357\n10358\n10359\n10360\n10361\n10362\n10363\n10364\n10365\n10366\n10367\n10368\n10369\n10370\n10371\n10372\n10373\n10374\n10375\n10376\n10377\n10378\n10379\n10380\n10381\n10382\n10383\n10384\n10385\n10386\n10387\n10388\n10389\n10390\n10391\n10392\n10393\n10394\n10395\n10396\n10397\n10398\n10399\n10400\n10401\n10402\n10403\n10404\n10405\n10406\n10407\n10408\n10409\n10410\n10411\n10412\n10413\n10414\n10415\n10416\n10417\n10418\n10419\n10420\n10421\n10422\n10423\n10424\n10425\n10426\n10427\n10428\n10429\n10430\n10431\n10432\n10433\n10434\n10435\n10436\n10437\n10438\n10439\n10440\n10441\n10442\n10443\n10444\n10445\n10446\n10447\n10448\n10449\n10450\n10451\n10452\n10453\n10454\n10455\n10456\n10457\n10458\n10459\n10460\n10461\n10462\n10463\n10464\n10465\n10466\n10467\n10468\n10469\n10470\n10471\n10472\n10473\n10474\n10475\n10476\n10477\n10478\n10479\n10480\n10481\n10482\n10483\n10484\n10485\n10486\n10487\n10488\n10489\n10490\n10491\n10492\n10493\n10494\n10495\n10496\n10497\n10498\n10499\n10500\n10501\n10502\n10503\n10504\n10505\n10506\n10507\n10508\n10509\n10510\n10511\n10512\n10513\n10514\n10515\n10516\n10517\n10518\n10519\n10520\n10521\n10522\n10523\n10524\n10525\n10526\n10527\n10528\n10529\n10530\n10531\n10532\n10533\n10534\n10535\n10536\n10537\n10538\n10539\n10540\n10541\n10542\n10543\n10544\n10545\n10546\n10547\n10548\n10549\n10550\n10551\n10552\n10553\n10554\n10555\n10556\n10557\n10558\n10559\n10560\n10561\n10562\n10563\n10564\n10565\n10566\n10567\n10568\n10569\n10570\n10571\n10572\n10573\n10574\n10575\n10576\n10577\n10578\n10579\n10580\n10581\n10582\n10583\n10584\n10585\n10586\n10587\n10588\n10589\n10590\n10591\n10592\n10593\n10594\n10595\n10596\n10597\n10598\n10599\n10600\n10601\n10602\n10603\n10604\n10605\n10606\n10607\n10608\n10609\n10610\n10611\n10612\n10613\n10614\n10615\n10616\n10617\n10618\n10619\n10620\n10621\n10622\n10623\n10624\n10625\n10626\n10627\n10628\n10629\n10630\n10631\n10632\n10633\n10634\n10635\n10636\n10637\n10638\n10639\n10640\n10641\n10642\n10643\n10644\n10645\n10646\n10647\n10648\n10649\n10650\n10651\n10652\n10653\n10654\n10655\n10656\n10657\n10658\n10659\n10660\n10661\n10662\n10663\n10664\n10665\n10666\n10667\n10668\n10669\n10670\n10671\n10672\n10673\n10674\n10675\n10676\n10677\n10678\n10679\n10680\n10681\n10682\n10683\n10684\n10685\n10686\n10687\n10688\n10689\n10690\n10691\n10692\n10693\n10694\n10695\n10696\n10697\n10698\n10699\n10700\n10701\n10702\n10703\n10704\n10705\n10706\n10707\n10708\n10709\n10710\n10711\n10712\n10713\n10714\n10715\n10716\n10717\n10718\n10719\n10720\n10721\n10722\n10723\n10724\n10725\n10726\n10727\n10728\n10729\n10730\n10731\n10732\n10733\n10734\n10735\n10736\n10737\n10738\n10739\n10740\n10741\n10742\n10743\n10744\n10745\n10746\n10747\n10748\n10749\n10750\n10751\n10752\n10753\n10754\n10755\n10756\n10757\n10758\n10759\n10760\n10761\n10762\n10763\n10764\n10765\n10766\n10767\n10768\n10769\n10770\n10771\n10772\n10773\n10774\n10775\n10776\n10777\n10778\n10779\n10780\n10781\n10782\n10783\n10784\n10785\n10786\n10787\n10788\n10789\n10790\n10791\n10792\n10793\n10794\n10795\n10796\n10797\n10798\n10799\n10800\n10801\n10802\n10803\n10804\n10805\n10806\n10807\n10808\n10809\n10810\n10811\n10812\n10813\n10814\n10815\n10816\n10817\n10818\n10819\n10820\n10821\n10822\n10823\n10824\n10825\n10826\n10827\n10828\n10829\n10830\n10831\n10832\n10833\n10834\n10835\n10836\n10837\n10838\n10839\n10840\n10841\n10842\n10843\n10844\n10845\n10846\n10847\n10848\n10849\n10850\n10851\n10852\n10853\n10854\n10855\n10856\n10857\n10858\n10859\n10860\n10861\n10862\n10863\n10864\n10865\n10866\n10867\n10868\n10869\n10870\n10871\n10872\n10873\n10874\n10875\n10876\n10877\n10878\n10879\n10880\n10881\n10882\n10883\n10884\n10885\n10886\n10887\n10888\n10889\n10890\n10891\n10892\n10893\n10894\n10895\n10896\n10897\n10898\n10899\n10900\n10901\n10902\n10903\n10904\n10905\n10906\n10907\n10908\n10909\n10910\n10911\n10912\n10913\n10914\n10915\n10916\n10917\n10918\n10919\n10920\n10921\n10922\n10923\n10924\n10925\n10926\n10927\n10928\n10929\n10930\n10931\n10932\n10933\n10934\n10935\n10936\n10937\n10938\n10939\n10940\n10941\n10942\n10943\n10944\n10945\n10946\n10947\n10948\n10949\n10950\n10951\n10952\n10953\n10954\n10955\n10956\n10957\n10958\n10959\n10960\n10961\n10962\n10963\n10964\n10965\n10966\n10967\n10968\n10969\n10970\n10971\n10972\n10973\n10974\n10975\n10976\n10977\n10978\n10979\n10980\n10981\n10982\n10983\n10984\n10985\n10986\n10987\n10988\n10989\n10990\n10991\n10992\n10993\n10994\n10995\n10996\n10997\n10998\n10999\n11000\n11001\n11002\n11003\n11004\n11005\n11006\n11007\n11008\n11009\n11010\n11011\n11012\n11013\n11014\n11015\n11016\n11017\n11018\n11019\n11020\n11021\n11022\n11023\n11024\n11025\n11026\n11027\n11028\n11029\n11030\n11031\n11032\n11033\n11034\n11035\n11036\n11037\n11038\n11039\n11040\n11041\n11042\n11043\n11044\n11045\n11046\n11047\n11048\n11049\n11050\n11051\n11052\n11053\n11054\n11055\n11056\n11057\n11058\n11059\n11060\n11061\n11062\n11063\n11064\n11065\n11066\n11067\n11068\n11069\n11070\n11071\n11072\n11073\n11074\n11075\n11076\n11077\n11078\n11079\n11080\n11081\n11082\n11083\n11084\n11085\n11086\n11087\n11088\n11089\n11090\n11091\n11092\n11093\n11094\n11095\n11096\n11097\n11098\n11099\n11100\n11101\n11102\n11103\n11104\n11105\n11106\n11107\n11108\n11109\n11110\n11111\n11112\n11113\n11114\n11115\n11116\n11117\n11118\n11119\n11120\n11121\n11122\n11123\n11124\n11125\n11126\n11127\n11128\n11129\n11130\n11131\n11132\n11133\n11134\n11135\n11136\n11137\n11138\n11139\n11140\n11141\n11142\n11143\n11144\n11145\n11146\n11147\n11148\n11149\n11150\n11151\n11152\n11153\n11154\n11155\n11156\n11157\n11158\n11159\n11160\n11161\n11162\n11163\n11164\n11165\n11166\n11167\n11168\n11169\n11170\n11171\n11172\n11173\n11174\n11175\n11176\n11177\n11178\n11179\n11180\n11181\n11182\n11183\n11184\n11185\n11186\n11187\n11188\n11189\n11190\n11191\n11192\n11193\n11194\n11195\n11196\n11197\n11198\n11199\n11200\n11201\n11202\n11203\n11204\n11205\n11206\n11207\n11208\n11209\n11210\n11211\n11212\n11213\n11214\n11215\n11216\n11217\n11218\n11219\n11220\n11221\n11222\n11223\n11224\n11225\n11226\n11227\n11228\n11229\n11230\n11231\n11232\n11233\n11234\n11235\n11236\n11237\n11238\n11239\n11240\n11241\n11242\n11243\n11244\n11245\n11246\n11247\n11248\n11249\n11250\n11251\n11252\n11253\n11254\n11255\n11256\n11257\n11258\n11259\n11260\n11261\n11262\n11263\n11264\n11265\n11266\n11267\n11268\n11269\n11270\n11271\n11272\n11273\n11274\n11275\n11276\n11277\n11278\n11279\n11280\n11281\n11282\n11283\n11284\n11285\n11286\n11287\n11288\n11289\n11290\n11291\n11292\n11293\n11294\n11295\n11296\n11297\n11298\n11299\n11300\n11301\n11302\n11303\n11304\n11305\n11306\n11307\n11308\n11309\n11310\n11311\n11312\n11313\n11314\n11315\n11316\n11317\n11318\n11319\n11320\n11321\n11322\n11323\n11324\n11325\n11326\n11327\n11328\n11329\n11330\n11331\n11332\n11333\n11334\n11335\n11336\n11337\n11338\n11339\n11340\n11341\n11342\n11343\n11344\n11345\n11346\n11347\n11348\n11349\n11350\n11351\n11352\n11353\n11354\n11355\n11356\n11357\n11358\n11359\n11360\n11361\n11362\n11363\n11364\n11365\n11366\n11367\n11368\n11369\n11370\n11371\n11372\n11373\n11374\n11375\n11376\n11377\n11378\n11379\n11380\n11381\n11382\n11383\n11384\n11385\n11386\n11387\n11388\n11389\n11390\n11391\n11392\n11393\n11394\n11395\n11396\n11397\n11398\n11399\n11400\n11401\n11402\n11403\n11404\n11405\n11406\n11407\n11408\n11409\n11410\n11411\n11412\n11413\n11414\n11415\n11416\n11417\n11418\n11419\n11420\n11421\n11422\n11423\n11424\n11425\n11426\n11427\n11428\n11429\n11430\n11431\n11432\n11433\n11434\n11435\n11436\n11437\n11438\n11439\n11440\n11441\n11442\n11443\n11444\n11445\n11446\n11447\n11448\n11449\n11450\n11451\n11452\n11453\n11454\n11455\n11456\n11457\n11458\n11459\n11460\n11461\n11462\n11463\n11464\n11465\n11466\n11467\n11468\n11469\n11470\n11471\n11472\n11473\n11474\n11475\n11476\n11477\n11478\n11479\n11480\n11481\n11482\n11483\n11484\n11485\n11486\n11487\n11488\n11489\n11490\n11491\n11492\n11493\n11494\n11495\n11496\n11497\n11498\n11499\n11500\n11501\n11502\n11503\n11504\n11505\n11506\n11507\n11508\n11509\n11510\n11511\n11512\n11513\n11514\n11515\n11516\n11517\n11518\n11519\n11520\n11521\n11522\n11523\n11524\n11525\n11526\n11527\n11528\n11529\n11530\n11531\n11532\n11533\n11534\n11535\n11536\n11537\n11538\n11539\n11540\n11541\n11542\n11543\n11544\n11545\n11546\n11547\n11548\n11549\n11550\n11551\n11552\n11553\n11554\n11555\n11556\n11557\n11558\n11559\n11560\n11561\n11562\n11563\n11564\n11565\n11566\n11567\n11568\n11569\n11570\n11571\n11572\n11573\n11574\n11575\n11576\n11577\n11578\n11579\n11580\n11581\n11582\n11583\n11584\n11585\n11586\n11587\n11588\n11589\n11590\n11591\n11592\n11593\n11594\n11595\n11596\n11597\n11598\n11599\n11600\n11601\n11602\n11603\n11604\n11605\n11606\n11607\n11608\n11609\n11610\n11611\n11612\n11613\n11614\n11615\n11616\n11617\n11618\n11619\n11620\n11621\n11622\n11623\n11624\n11625\n11626\n11627\n11628\n11629\n11630\n11631\n11632\n11633\n11634\n11635\n11636\n11637\n11638\n11639\n11640\n11641\n11642\n11643\n11644\n11645\n11646\n11647\n11648\n11649\n11650\n11651\n11652\n11653\n11654\n11655\n11656\n11657\n11658\n11659\n11660\n11661\n11662\n11663\n11664\n11665\n11666\n11667\n11668\n11669\n11670\n11671\n11672\n11673\n11674\n11675\n11676\n11677\n11678\n11679\n11680\n11681\n11682\n11683\n11684\n11685\n11686\n11687\n11688\n11689\n11690\n11691\n11692\n11693\n11694\n11695\n11696\n11697\n11698\n11699\n11700\n11701\n11702\n11703\n11704\n11705\n11706\n11707\n11708\n11709\n11710\n11711\n11712\n11713\n11714\n11715\n11716\n11717\n11718\n11719\n11720\n11721\n11722\n11723\n11724\n11725\n11726\n11727\n11728\n11729\n11730\n11731\n11732\n11733\n11734\n11735\n11736\n11737\n11738\n11739\n11740\n11741\n11742\n11743\n11744\n11745\n11746\n11747\n11748\n11749\n11750\n11751\n11752\n11753\n11754\n11755\n11756\n11757\n11758\n11759\n11760\n11761\n11762\n11763\n11764\n11765\n11766\n11767\n11768\n11769\n11770\n11771\n11772\n11773\n11774\n11775\n11776\n11777\n11778\n11779\n11780\n11781\n11782\n11783\n11784\n11785\n11786\n11787\n11788\n11789\n11790\n11791\n11792\n11793\n11794\n11795\n11796\n11797\n11798\n11799\n11800\n11801\n11802\n11803\n11804\n11805\n11806\n11807\n11808\n11809\n11810\n11811\n11812\n11813\n11814\n11815\n11816\n11817\n11818\n11819\n11820\n11821\n11822\n11823\n11824\n11825\n11826\n11827\n11828\n11829\n11830\n11831\n11832\n11833\n11834\n11835\n11836\n11837\n11838\n11839\n11840\n11841\n11842\n11843\n11844\n11845\n11846\n11847\n11848\n11849\n11850\n11851\n11852\n11853\n11854\n11855\n11856\n11857\n11858\n11859\n11860\n11861\n11862\n11863\n11864\n11865\n11866\n11867\n11868\n11869\n11870\n11871\n11872\n11873\n11874\n11875\n11876\n11877\n11878\n11879\n11880\n11881\n11882\n11883\n11884\n11885\n11886\n11887\n11888\n11889\n11890\n11891\n11892\n11893\n11894\n11895\n11896\n11897\n11898\n11899\n11900\n11901\n11902\n11903\n11904\n11905\n11906\n11907\n11908\n11909\n11910\n11911\n11912\n11913\n11914\n11915\n11916\n11917\n11918\n11919\n11920\n11921\n11922\n11923\n11924\n11925\n11926\n11927\n11928\n11929\n11930\n11931\n11932\n11933\n11934\n11935\n11936\n11937\n11938\n11939\n11940\n11941\n11942\n11943\n11944\n11945\n11946\n11947\n11948\n11949\n11950\n11951\n11952\n11953\n11954\n11955\n11956\n11957\n11958\n11959\n11960\n11961\n11962\n11963\n11964\n11965\n11966\n11967\n11968\n11969\n11970\n11971\n11972\n11973\n11974\n11975\n11976\n11977\n11978\n11979\n11980\n11981\n11982\n11983\n11984\n11985\n11986\n11987\n11988\n11989\n11990\n11991\n11992\n11993\n11994\n11995\n11996\n11997\n11998\n11999\n12000\n12001\n12002\n12003\n12004\n12005\n12006\n12007\n12008\n12009\n12010\n12011\n12012\n12013\n12014\n12015\n12016\n12017\n12018\n12019\n12020\n12021\n12022\n12023\n12024\n12025\n12026\n12027\n12028\n12029\n12030\n12031\n12032\n12033\n12034\n12035\n12036\n12037\n12038\n12039\n12040\n12041\n12042\n12043\n12044\n12045\n12046\n12047\n12048\n12049\n12050\n12051\n12052\n12053\n12054\n12055\n12056\n12057\n12058\n12059\n12060\n12061\n12062\n12063\n12064\n12065\n12066\n12067\n12068\n12069\n12070\n12071\n12072\n12073\n12074\n12075\n12076\n12077\n12078\n12079\n12080\n12081\n12082\n12083\n12084\n12085\n12086\n12087\n12088\n12089\n12090\n12091\n12092\n12093\n12094\n12095\n12096\n12097\n12098\n12099\n12100\n12101\n12102\n12103\n12104\n12105\n12106\n12107\n12108\n12109\n12110\n12111\n12112\n12113\n12114\n12115\n12116\n12117\n12118\n12119\n12120\n12121\n12122\n12123\n12124\n12125\n12126\n12127\n12128\n12129\n12130\n12131\n12132\n12133\n12134\n12135\n12136\n12137\n12138\n12139\n12140\n12141\n12142\n12143\n12144\n12145\n12146\n12147\n12148\n12149\n12150\n12151\n12152\n12153\n12154\n12155\n12156\n12157\n12158\n12159\n12160\n12161\n12162\n12163\n12164\n12165\n12166\n12167\n12168\n12169\n12170\n12171\n12172\n12173\n12174\n12175\n12176\n12177\n12178\n12179\n12180\n12181\n12182\n12183\n12184\n12185\n12186\n12187\n12188\n12189\n12190\n12191\n12192\n12193\n12194\n12195\n12196\n12197\n12198\n12199\n12200\n12201\n12202\n12203\n12204\n12205\n12206\n12207\n12208\n12209\n12210\n12211\n12212\n12213\n12214\n12215\n12216\n12217\n12218\n12219\n12220\n12221\n12222\n12223\n12224\n12225\n12226\n12227\n12228\n12229\n12230\n12231\n12232\n12233\n12234\n12235\n12236\n12237\n12238\n12239\n12240\n12241\n12242\n12243\n12244\n12245\n12246\n12247\n12248\n12249\n12250\n12251\n12252\n12253\n12254\n12255\n12256\n12257\n12258\n12259\n12260\n12261\n12262\n12263\n12264\n12265\n12266\n12267\n12268\n12269\n12270\n12271\n12272\n12273\n12274\n12275\n12276\n12277\n12278\n12279\n12280\n12281\n12282\n12283\n12284\n12285\n12286\n12287\n12288\n12289\n12290\n12291\n12292\n12293\n12294\n12295\n12296\n12297\n12298\n12299\n12300\n12301\n12302\n12303\n12304\n12305\n12306\n12307\n12308\n12309\n12310\n12311\n12312\n12313\n12314\n12315\n12316\n12317\n12318\n12319\n12320\n12321\n12322\n12323\n12324\n12325\n12326\n12327\n12328\n12329\n12330\n12331\n12332\n12333\n12334\n12335\n12336\n12337\n12338\n12339\n12340\n12341\n12342\n12343\n12344\n12345\n12346\n12347\n12348\n12349\n12350\n12351\n12352\n12353\n12354\n12355\n12356\n12357\n12358\n12359\n12360\n12361\n12362\n12363\n12364\n12365\n12366\n12367\n12368\n12369\n12370\n12371\n12372\n12373\n12374\n12375\n12376\n12377\n12378\n12379\n12380\n12381\n12382\n12383\n12384\n12385\n12386\n12387\n12388\n12389\n12390\n12391\n12392\n12393\n12394\n12395\n12396\n12397\n12398\n12399\n12400\n12401\n12402\n12403\n12404\n12405\n12406\n12407\n12408\n12409\n12410\n12411\n12412\n12413\n12414\n12415\n12416\n12417\n12418\n12419\n12420\n12421\n12422\n12423\n12424\n12425\n12426\n12427\n12428\n12429\n12430\n12431\n12432\n12433\n12434\n12435\n12436\n12437\n12438\n12439\n12440\n12441\n12442\n12443\n12444\n12445\n12446\n12447\n12448\n12449\n12450\n12451\n12452\n12453\n12454\n12455\n12456\n12457\n12458\n12459\n12460\n12461\n12462\n12463\n12464\n12465\n12466\n12467\n12468\n12469\n12470\n12471\n12472\n12473\n12474\n12475\n12476\n12477\n12478\n12479\n12480\n12481\n12482\n12483\n12484\n12485\n12486\n12487\n12488\n12489\n12490\n12491\n12492\n12493\n12494\n12495\n12496\n12497\n12498\n12499\n12500\n12501\n12502\n12503\n12504\n12505\n12506\n12507\n12508\n12509\n12510\n12511\n12512\n12513\n12514\n12515\n12516\n12517\n12518\n12519\n12520\n12521\n12522\n12523\n12524\n12525\n12526\n12527\n12528\n12529\n12530\n12531\n12532\n12533\n12534\n12535\n12536\n12537\n12538\n12539\n12540\n12541\n12542\n12543\n12544\n12545\n12546\n12547\n12548\n12549\n12550\n12551\n12552\n12553\n12554\n12555\n12556\n12557\n12558\n12559\n12560\n12561\n12562\n12563\n12564\n12565\n12566\n12567\n12568\n12569\n12570\n12571\n12572\n12573\n12574\n12575\n12576\n12577\n12578\n12579\n12580\n12581\n12582\n12583\n12584\n12585\n12586\n12587\n12588\n12589\n12590\n12591\n12592\n12593\n12594\n12595\n12596\n12597\n12598\n12599\n12600\n12601\n12602\n12603\n12604\n12605\n12606\n12607\n12608\n12609\n12610\n12611\n12612\n12613\n12614\n12615\n12616\n12617\n12618\n12619\n12620\n12621\n12622\n12623\n12624\n12625\n12626\n12627\n12628\n12629\n12630\n12631\n12632\n12633\n12634\n12635\n12636\n12637\n12638\n12639\n12640\n12641\n12642\n12643\n12644\n12645\n12646\n12647\n12648\n12649\n12650\n12651\n12652\n12653\n12654\n12655\n12656\n12657\n12658\n12659\n12660\n12661\n12662\n12663\n12664\n12665\n12666\n12667\n12668\n12669\n12670\n12671\n12672\n12673\n12674\n12675\n12676\n12677\n12678\n12679\n12680\n12681\n12682\n12683\n12684\n12685\n12686\n12687\n12688\n12689\n12690\n12691\n12692\n12693\n12694\n12695\n12696\n12697\n12698\n12699\n12700\n12701\n12702\n12703\n12704\n12705\n12706\n12707\n12708\n12709\n12710\n12711\n12712\n12713\n12714\n12715\n12716\n12717\n12718\n12719\n12720\n12721\n12722\n12723\n12724\n12725\n12726\n12727\n12728\n12729\n12730\n12731\n12732\n12733\n12734\n12735\n12736\n12737\n12738\n12739\n12740\n12741\n12742\n12743\n12744\n12745\n12746\n12747\n12748\n12749\n12750\n12751\n12752\n12753\n12754\n12755\n12756\n12757\n12758\n12759\n12760\n12761\n12762\n12763\n12764\n12765\n12766\n12767\n12768\n12769\n12770\n12771\n12772\n12773\n12774\n12775\n12776\n12777\n12778\n12779\n12780\n12781\n12782\n12783\n12784\n12785\n12786\n12787\n12788\n12789\n12790\n12791\n12792\n12793\n12794\n12795\n12796\n12797\n12798\n12799\n12800\n12801\n12802\n12803\n12804\n12805\n12806\n12807\n12808\n12809\n12810\n12811\n12812\n12813\n12814\n12815\n12816\n12817\n12818\n12819\n12820\n12821\n12822\n12823\n12824\n12825\n12826\n12827\n12828\n12829\n12830\n12831\n12832\n12833\n12834\n12835\n12836\n12837\n12838\n12839\n12840\n12841\n12842\n12843\n12844\n12845\n12846\n12847\n12848\n12849\n12850\n12851\n12852\n12853\n12854\n12855\n12856\n12857\n12858\n12859\n12860\n12861\n12862\n12863\n12864\n12865\n12866\n12867\n12868\n12869\n12870\n12871\n12872\n12873\n12874\n12875\n12876\n12877\n12878\n12879\n12880\n12881\n12882\n12883\n12884\n12885\n12886\n12887\n12888\n12889\n12890\n12891\n12892\n12893\n12894\n12895\n12896\n12897\n12898\n12899\n12900\n12901\n12902\n12903\n12904\n12905\n12906\n12907\n12908\n12909\n12910\n12911\n12912\n12913\n12914\n12915\n12916\n12917\n12918\n12919\n12920\n12921\n12922\n12923\n12924\n12925\n12926\n12927\n12928\n12929\n12930\n12931\n12932\n12933\n12934\n12935\n12936\n12937\n12938\n12939\n12940\n12941\n12942\n12943\n12944\n12945\n12946\n12947\n12948\n12949\n12950\n12951\n12952\n12953\n12954\n12955\n12956\n12957\n12958\n12959\n12960\n12961\n12962\n12963\n12964\n12965\n12966\n12967\n12968\n12969\n12970\n12971\n12972\n12973\n12974\n12975\n12976\n12977\n12978\n12979\n12980\n12981\n12982\n12983\n12984\n12985\n12986\n12987\n12988\n12989\n12990\n12991\n12992\n12993\n12994\n12995\n12996\n12997\n12998\n12999\n13000\n13001\n13002\n13003\n13004\n13005\n13006\n13007\n13008\n13009\n13010\n13011\n13012\n13013\n13014\n13015\n13016\n13017\n13018\n13019\n13020\n13021\n13022\n13023\n13024\n13025\n13026\n13027\n13028\n13029\n13030\n13031\n13032\n13033\n13034\n13035\n13036\n13037\n13038\n13039\n13040\n13041\n13042\n13043\n13044\n13045\n13046\n13047\n13048\n13049\n13050\n13051\n13052\n13053\n13054\n13055\n13056\n13057\n13058\n13059\n13060\n13061\n13062\n13063\n13064\n13065\n13066\n13067\n13068\n13069\n13070\n13071\n13072\n13073\n13074\n13075\n13076\n13077\n13078\n13079\n13080\n13081\n13082\n13083\n13084\n13085\n13086\n13087\n13088\n13089\n13090\n13091\n13092\n13093\n13094\n13095\n13096\n13097\n13098\n13099\n13100\n13101\n13102\n13103\n13104\n13105\n13106\n13107\n13108\n13109\n13110\n13111\n13112\n13113\n13114\n13115\n13116\n13117\n13118\n13119\n13120\n13121\n13122\n13123\n13124\n13125\n13126\n13127\n13128\n13129\n13130\n13131\n13132\n13133\n13134\n13135\n13136\n13137\n13138\n13139\n13140\n13141\n13142\n13143\n13144\n13145\n13146\n13147\n13148\n13149\n13150\n13151\n13152\n13153\n13154\n13155\n13156\n13157\n13158\n13159\n13160\n13161\n13162\n13163\n13164\n13165\n13166\n13167\n13168\n13169\n13170\n13171\n13172\n13173\n13174\n13175\n13176\n13177\n13178\n13179\n13180\n13181\n13182\n13183\n13184\n13185\n13186\n13187\n13188\n13189\n13190\n13191\n13192\n13193\n13194\n13195\n13196\n13197\n13198\n13199\n13200\n13201\n13202\n13203\n13204\n13205\n13206\n13207\n13208\n13209\n13210\n13211\n13212\n13213\n13214\n13215\n13216\n13217\n13218\n13219\n13220\n13221\n13222\n13223\n13224\n13225\n13226\n13227\n13228\n13229\n13230\n13231\n13232\n13233\n13234\n13235\n13236\n13237\n13238\n13239\n13240\n13241\n13242\n13243\n13244\n13245\n13246\n13247\n13248\n13249\n13250\n13251\n13252\n13253\n13254\n13255\n13256\n13257\n13258\n13259\n13260\n13261\n13262\n13263\n13264\n13265\n13266\n13267\n13268\n13269\n13270\n13271\n13272\n13273\n13274\n13275\n13276\n13277\n13278\n13279\n13280\n13281\n13282\n13283\n13284\n13285\n13286\n13287\n13288\n13289\n13290\n13291\n13292\n13293\n13294\n13295\n13296\n13297\n13298\n13299\n13300\n13301\n13302\n13303\n13304\n13305\n13306\n13307\n13308\n13309\n13310\n13311\n13312\n13313\n13314\n13315\n13316\n13317\n13318\n13319\n13320\n13321\n13322\n13323\n13324\n13325\n13326\n13327\n13328\n13329\n13330\n13331\n13332\n13333\n13334\n13335\n13336\n13337\n13338\n13339\n13340\n13341\n13342\n13343\n13344\n13345\n13346\n13347\n13348\n13349\n13350\n13351\n13352\n13353\n13354\n13355\n13356\n13357\n13358\n13359\n13360\n13361\n13362\n13363\n13364\n13365\n13366\n13367\n13368\n13369\n13370\n13371\n13372\n13373\n13374\n13375\n13376\n13377\n13378\n13379\n13380\n13381\n13382\n13383\n13384\n13385\n13386\n13387\n13388\n13389\n13390\n13391\n13392\n13393\n13394\n13395\n13396\n13397\n13398\n13399\n13400\n13401\n13402\n13403\n13404\n13405\n13406\n13407\n13408\n13409\n13410\n13411\n13412\n13413\n13414\n13415\n13416\n13417\n13418\n13419\n13420\n13421\n13422\n13423\n13424\n13425\n13426\n13427\n13428\n13429\n13430\n13431\n13432\n13433\n13434\n13435\n13436\n13437\n13438\n13439\n13440\n13441\n13442\n13443\n13444\n13445\n13446\n13447\n13448\n13449\n13450\n13451\n13452\n13453\n13454\n13455\n13456\n13457\n13458\n13459\n13460\n13461\n13462\n13463\n13464\n13465\n13466\n13467\n13468\n13469\n13470\n13471\n13472\n13473\n13474\n13475\n13476\n13477\n13478\n13479\n13480\n13481\n13482\n13483\n13484\n13485\n13486\n13487\n13488\n13489\n13490\n13491\n13492\n13493\n13494\n13495\n13496\n13497\n13498\n13499\n13500\n13501\n13502\n13503\n13504\n13505\n13506\n13507\n13508\n13509\n13510\n13511\n13512\n13513\n13514\n13515\n13516\n13517\n13518\n13519\n13520\n13521\n13522\n13523\n13524\n13525\n13526\n13527\n13528\n13529\n13530\n13531\n13532\n13533\n13534\n13535\n13536\n13537\n13538\n13539\n13540\n13541\n13542\n13543\n13544\n13545\n13546\n13547\n13548\n13549\n13550\n13551\n13552\n13553\n13554\n13555\n13556\n13557\n13558\n13559\n13560\n13561\n13562\n13563\n13564\n13565\n13566\n13567\n13568\n13569\n13570\n13571\n13572\n13573\n13574\n13575\n13576\n13577\n13578\n13579\n13580\n13581\n13582\n13583\n13584\n13585\n13586\n13587\n13588\n13589\n13590\n13591\n13592\n13593\n13594\n13595\n13596\n13597\n13598\n13599\n13600\n13601\n13602\n13603\n13604\n13605\n13606\n13607\n13608\n13609\n13610\n13611\n13612\n13613\n13614\n13615\n13616\n13617\n13618\n13619\n13620\n13621\n13622\n13623\n13624\n13625\n13626\n13627\n13628\n13629\n13630\n13631\n13632\n13633\n13634\n13635\n13636\n13637\n13638\n13639\n13640\n13641\n13642\n13643\n13644\n13645\n13646\n13647\n13648\n13649\n13650\n13651\n13652\n13653\n13654\n13655\n13656\n13657\n13658\n13659\n13660\n13661\n13662\n13663\n13664\n13665\n13666\n13667\n13668\n13669\n13670\n13671\n13672\n13673\n13674\n13675\n13676\n13677\n13678\n13679\n13680\n13681\n13682\n13683\n13684\n13685\n13686\n13687\n13688\n13689\n13690\n13691\n13692\n13693\n13694\n13695\n13696\n13697\n13698\n13699\n13700\n13701\n13702\n13703\n13704\n13705\n13706\n13707\n13708\n13709\n13710\n13711\n13712\n13713\n13714\n13715\n13716\n13717\n13718\n13719\n13720\n13721\n13722\n13723\n13724\n13725\n13726\n13727\n13728\n13729\n13730\n13731\n13732\n13733\n13734\n13735\n13736\n13737\n13738\n13739\n13740\n13741\n13742\n13743\n13744\n13745\n13746\n13747\n13748\n13749\n13750\n13751\n13752\n13753\n13754\n13755\n13756\n13757\n13758\n13759\n13760\n13761\n13762\n13763\n13764\n13765\n13766\n13767\n13768\n13769\n13770\n13771\n13772\n13773\n13774\n13775\n13776\n13777\n13778\n13779\n13780\n13781\n13782\n13783\n13784\n13785\n13786\n13787\n13788\n13789\n13790\n13791\n13792\n13793\n13794\n13795\n13796\n13797\n13798\n13799\n13800\n13801\n13802\n13803\n13804\n13805\n13806\n13807\n13808\n13809\n13810\n13811\n13812\n13813\n13814\n13815\n13816\n13817\n13818\n13819\n13820\n13821\n13822\n13823\n13824\n13825\n13826\n13827\n13828\n13829\n13830\n13831\n13832\n13833\n13834\n13835\n13836\n13837\n13838\n13839\n13840\n13841\n13842\n13843\n13844\n13845\n13846\n13847\n13848\n13849\n13850\n13851\n13852\n13853\n13854\n13855\n13856\n13857\n13858\n13859\n13860\n13861\n13862\n13863\n13864\n13865\n13866\n13867\n13868\n13869\n13870\n13871\n13872\n13873\n13874\n13875\n13876\n13877\n13878\n13879\n13880\n13881\n13882\n13883\n13884\n13885\n13886\n13887\n13888\n13889\n13890\n13891\n13892\n13893\n13894\n13895\n13896\n13897\n13898\n13899\n13900\n13901\n13902\n13903\n13904\n13905\n13906\n13907\n13908\n13909\n13910\n13911\n13912\n13913\n13914\n13915\n13916\n13917\n13918\n13919\n13920\n13921\n13922\n13923\n13924\n13925\n13926\n13927\n13928\n13929\n13930\n13931\n13932\n13933\n13934\n13935\n13936\n13937\n13938\n13939\n13940\n13941\n13942\n13943\n13944\n13945\n13946\n13947\n13948\n13949\n13950\n13951\n13952\n13953\n13954\n13955\n13956\n13957\n13958\n13959\n13960\n13961\n13962\n13963\n13964\n13965\n13966\n13967\n13968\n13969\n13970\n13971\n13972\n13973\n13974\n13975\n13976\n13977\n13978\n13979\n13980\n13981\n13982\n13983\n13984\n13985\n13986\n13987\n13988\n13989\n13990\n13991\n13992\n13993\n13994\n13995\n13996\n13997\n13998\n13999\n14000\n14001\n14002\n14003\n14004\n14005\n14006\n14007\n14008\n14009\n14010\n14011\n14012\n14013\n14014\n14015\n14016\n14017\n14018\n14019\n14020\n14021\n14022\n14023\n14024\n14025\n14026\n14027\n14028\n14029\n14030\n14031\n14032\n14033\n14034\n14035\n14036\n14037\n14038\n14039\n14040\n14041\n14042\n14043\n14044\n14045\n14046\n14047\n14048\n14049\n14050\n14051\n14052\n14053\n14054\n14055\n14056\n14057\n14058\n14059\n14060\n14061\n14062\n14063\n14064\n14065\n14066\n14067\n14068\n14069\n14070\n14071\n14072\n14073\n14074\n14075\n14076\n14077\n14078\n14079\n14080\n14081\n14082\n14083\n14084\n14085\n14086\n14087\n14088\n14089\n14090\n14091\n14092\n14093\n14094\n14095\n14096\n14097\n14098\n14099\n14100\n14101\n14102\n14103\n14104\n14105\n14106\n14107\n14108\n14109\n14110\n14111\n14112\n14113\n14114\n14115\n14116\n14117\n14118\n14119\n14120\n14121\n14122\n14123\n14124\n14125\n14126\n14127\n14128\n14129\n14130\n14131\n14132\n14133\n14134\n14135\n14136\n14137\n14138\n14139\n14140\n14141\n14142\n14143\n14144\n14145\n14146\n14147\n14148\n14149\n14150\n14151\n14152\n14153\n14154\n14155\n14156\n14157\n14158\n14159\n14160\n14161\n14162\n14163\n14164\n14165\n14166\n14167\n14168\n14169\n14170\n14171\n14172\n14173\n14174\n14175\n14176\n14177\n14178\n14179\n14180\n14181\n14182\n14183\n14184\n14185\n14186\n14187\n14188\n14189\n14190\n14191\n14192\n14193\n14194\n14195\n14196\n14197\n14198\n14199\n14200\n14201\n14202\n14203\n14204\n14205\n14206\n14207\n14208\n14209\n14210\n14211\n14212\n14213\n14214\n14215\n14216\n14217\n14218\n14219\n14220\n14221\n14222\n14223\n14224\n14225\n14226\n14227\n14228\n14229\n14230\n14231\n14232\n14233\n14234\n14235\n14236\n14237\n14238\n14239\n14240\n14241\n14242\n14243\n14244\n14245\n14246\n14247\n14248\n14249\n14250\n14251\n14252\n14253\n14254\n14255\n14256\n14257\n14258\n14259\n14260\n14261\n14262\n14263\n14264\n14265\n14266\n14267\n14268\n14269\n14270\n14271\n14272\n14273\n14274\n14275\n14276\n14277\n14278\n14279\n14280\n14281\n14282\n14283\n14284\n14285\n14286\n14287\n14288\n14289\n14290\n14291\n14292\n14293\n14294\n14295\n14296\n14297\n14298\n14299\n14300\n14301\n14302\n14303\n14304\n14305\n14306\n14307\n14308\n14309\n14310\n14311\n14312\n14313\n14314\n14315\n14316\n14317\n14318\n14319\n14320\n14321\n14322\n14323\n14324\n14325\n14326\n14327\n14328\n14329\n14330\n14331\n14332\n14333\n14334\n14335\n14336\n14337\n14338\n14339\n14340\n14341\n14342\n14343\n14344\n14345\n14346\n14347\n14348\n14349\n14350\n14351\n14352\n14353\n14354\n14355\n14356\n14357\n14358\n14359\n14360\n14361\n14362\n14363\n14364\n14365\n14366\n14367\n14368\n14369\n14370\n14371\n14372\n14373\n14374\n14375\n14376\n14377\n14378\n14379\n14380\n14381\n14382\n14383\n14384\n14385\n14386\n14387\n14388\n14389\n14390\n14391\n14392\n14393\n14394\n14395\n14396\n14397\n14398\n14399\n14400\n14401\n14402\n14403\n14404\n14405\n14406\n14407\n14408\n14409\n14410\n14411\n14412\n14413\n14414\n14415\n14416\n14417\n14418\n14419\n14420\n14421\n14422\n14423\n14424\n14425\n14426\n14427\n14428\n14429\n14430\n14431\n14432\n14433\n14434\n14435\n14436\n14437\n14438\n14439\n14440\n14441\n14442\n14443\n14444\n14445\n14446\n14447\n14448\n14449\n14450\n14451\n14452\n14453\n14454\n14455\n14456\n14457\n14458\n14459\n14460\n14461\n14462\n14463\n14464\n14465\n14466\n14467\n14468\n14469\n14470\n14471\n14472\n14473\n14474\n14475\n14476\n14477\n14478\n14479\n14480\n14481\n14482\n14483\n14484\n14485\n14486\n14487\n14488\n14489\n14490\n14491\n14492\n14493\n14494\n14495\n14496\n14497\n14498\n14499\n14500\n14501\n14502\n14503\n14504\n14505\n14506\n14507\n14508\n14509\n14510\n14511\n14512\n14513\n14514\n14515\n14516\n14517\n14518\n14519\n14520\n14521\n14522\n14523\n14524\n14525\n14526\n14527\n14528\n14529\n14530\n14531\n14532\n14533\n14534\n14535\n14536\n14537\n14538\n14539\n14540\n14541\n14542\n14543\n14544\n14545\n14546\n14547\n14548\n14549\n14550\n14551\n14552\n14553\n14554\n14555\n14556\n14557\n14558\n14559\n14560\n14561\n14562\n14563\n14564\n14565\n14566\n14567\n14568\n14569\n14570\n14571\n14572\n14573\n14574\n14575\n14576\n14577\n14578\n14579\n14580\n14581\n14582\n14583\n14584\n14585\n14586\n14587\n14588\n14589\n14590\n14591\n14592\n14593\n14594\n14595\n14596\n14597\n14598\n14599\n14600\n14601\n14602\n14603\n14604\n14605\n14606\n14607\n14608\n14609\n14610\n14611\n14612\n14613\n14614\n14615\n14616\n14617\n14618\n14619\n14620\n14621\n14622\n14623\n14624\n14625\n14626\n14627\n14628\n14629\n14630\n14631\n14632\n14633\n14634\n14635\n14636\n14637\n14638\n14639\n14640\n14641\n14642\n14643\n14644\n14645\n14646\n14647\n14648\n14649\n14650\n14651\n14652\n14653\n14654\n14655\n14656\n14657\n14658\n14659\n14660\n14661\n14662\n14663\n14664\n14665\n14666\n14667\n14668\n14669\n14670\n14671\n14672\n14673\n14674\n14675\n14676\n14677\n14678\n14679\n14680\n14681\n14682\n14683\n14684\n14685\n14686\n14687\n14688\n14689\n14690\n14691\n14692\n14693\n14694\n14695\n14696\n14697\n14698\n14699\n14700\n14701\n14702\n14703\n14704\n14705\n14706\n14707\n14708\n14709\n14710\n14711\n14712\n14713\n14714\n14715\n14716\n14717\n14718\n14719\n14720\n14721\n14722\n14723\n14724\n14725\n14726\n14727\n14728\n14729\n14730\n14731\n14732\n14733\n14734\n14735\n14736\n14737\n14738\n14739\n14740\n14741\n14742\n14743\n14744\n14745\n14746\n14747\n14748\n14749\n14750\n14751\n14752\n14753\n14754\n14755\n14756\n14757\n14758\n14759\n14760\n14761\n14762\n14763\n14764\n14765\n14766\n14767\n14768\n14769\n14770\n14771\n14772\n14773\n14774\n14775\n14776\n14777\n14778\n14779\n14780\n14781\n14782\n14783\n14784\n14785\n14786\n14787\n14788\n14789\n14790\n14791\n14792\n14793\n14794\n14795\n14796\n14797\n14798\n14799\n14800\n14801\n14802\n14803\n14804\n14805\n14806\n14807\n14808\n14809\n14810\n14811\n14812\n14813\n14814\n14815\n14816\n14817\n14818\n14819\n14820\n14821\n14822\n14823\n14824\n14825\n14826\n14827\n14828\n14829\n14830\n14831\n14832\n14833\n14834\n14835\n14836\n14837\n14838\n14839\n14840\n14841\n14842\n14843\n14844\n14845\n14846\n14847\n14848\n14849\n14850\n14851\n14852\n14853\n14854\n14855\n14856\n14857\n14858\n14859\n14860\n14861\n14862\n14863\n14864\n14865\n14866\n14867\n14868\n14869\n14870\n14871\n14872\n14873\n14874\n14875\n14876\n14877\n14878\n14879\n14880\n14881\n14882\n14883\n14884\n14885\n14886\n14887\n14888\n14889\n14890\n14891\n14892\n14893\n14894\n14895\n14896\n14897\n14898\n14899\n14900\n14901\n14902\n14903\n14904\n14905\n14906\n14907\n14908\n14909\n14910\n14911\n14912\n14913\n14914\n14915\n14916\n14917\n14918\n14919\n14920\n14921\n14922\n14923\n14924\n14925\n14926\n14927\n14928\n14929\n14930\n14931\n14932\n14933\n14934\n14935\n14936\n14937\n14938\n14939\n14940\n14941\n14942\n14943\n14944\n14945\n14946\n14947\n14948\n14949\n14950\n14951\n14952\n14953\n14954\n14955\n14956\n14957\n14958\n14959\n14960\n14961\n14962\n14963\n14964\n14965\n14966\n14967\n14968\n14969\n14970\n14971\n14972\n14973\n14974\n14975\n14976\n14977\n14978\n14979\n14980\n14981\n14982\n14983\n14984\n14985\n14986\n14987\n14988\n14989\n14990\n14991\n14992\n14993\n14994\n14995\n14996\n14997\n14998\n14999\n15000\n15001\n15002\n15003\n15004\n15005\n15006\n15007\n15008\n15009\n15010\n15011\n15012\n15013\n15014\n15015\n15016\n15017\n15018\n15019\n15020\n15021\n15022\n15023\n15024\n15025\n15026\n15027\n15028\n15029\n15030\n15031\n15032\n15033\n15034\n15035\n15036\n15037\n15038\n15039\n15040\n15041\n15042\n15043\n15044\n15045\n15046\n15047\n15048\n15049\n15050\n15051\n15052\n15053\n15054\n15055\n15056\n15057\n15058\n15059\n15060\n15061\n15062\n15063\n15064\n15065\n15066\n15067\n15068\n15069\n15070\n15071\n15072\n15073\n15074\n15075\n15076\n15077\n15078\n15079\n15080\n15081\n15082\n15083\n15084\n15085\n15086\n15087\n15088\n15089\n15090\n15091\n15092\n15093\n15094\n15095\n15096\n15097\n15098\n15099\n15100\n15101\n15102\n15103\n15104\n15105\n15106\n15107\n15108\n15109\n15110\n15111\n15112\n15113\n15114\n15115\n15116\n15117\n15118\n15119\n15120\n15121\n15122\n15123\n15124\n15125\n15126\n15127\n15128\n15129\n15130\n15131\n15132\n15133\n15134\n15135\n15136\n15137\n15138\n15139\n15140\n15141\n15142\n15143\n15144\n15145\n15146\n15147\n15148\n15149\n15150\n15151\n15152\n15153\n15154\n15155\n15156\n15157\n15158\n15159\n15160\n15161\n15162\n15163\n15164\n15165\n15166\n15167\n15168\n15169\n15170\n15171\n15172\n15173\n15174\n15175\n15176\n15177\n15178\n15179\n15180\n15181\n15182\n15183\n15184\n15185\n15186\n15187\n15188\n15189\n15190\n15191\n15192\n15193\n15194\n15195\n15196\n15197\n15198\n15199\n15200\n15201\n15202\n15203\n15204\n15205\n15206\n15207\n15208\n15209\n15210\n15211\n15212\n15213\n15214\n15215\n15216\n15217\n15218\n15219\n15220\n15221\n15222\n15223\n15224\n15225\n15226\n15227\n15228\n15229\n15230\n15231\n15232\n15233\n15234\n15235\n15236\n15237\n15238\n15239\n15240\n15241\n15242\n15243\n15244\n15245\n15246\n15247\n15248\n15249\n15250\n15251\n15252\n15253\n15254\n15255\n15256\n15257\n15258\n15259\n15260\n15261\n15262\n15263\n15264\n15265\n15266\n15267\n15268\n15269\n15270\n15271\n15272\n15273\n15274\n15275\n15276\n15277\n15278\n15279\n15280\n15281\n15282\n15283\n15284\n15285\n15286\n15287\n15288\n15289\n15290\n15291\n15292\n15293\n15294\n15295\n15296\n15297\n15298\n15299\n15300\n15301\n15302\n15303\n15304\n15305\n15306\n15307\n15308\n15309\n15310\n15311\n15312\n15313\n15314\n15315\n15316\n15317\n15318\n15319\n15320\n15321\n15322\n15323\n15324\n15325\n15326\n15327\n15328\n15329\n15330\n15331\n15332\n15333\n15334\n15335\n15336\n15337\n15338\n15339\n15340\n15341\n15342\n15343\n15344\n15345\n15346\n15347\n15348\n15349\n15350\n15351\n15352\n15353\n15354\n15355\n15356\n15357\n15358\n15359\n15360\n15361\n15362\n15363\n15364\n15365\n15366\n15367\n15368\n15369\n15370\n15371\n15372\n15373\n15374\n15375\n15376\n15377\n15378\n15379\n15380\n15381\n15382\n15383\n15384\n15385\n15386\n15387\n15388\n15389\n15390\n15391\n15392\n15393\n15394\n15395\n15396\n15397\n15398\n15399\n15400\n15401\n15402\n15403\n15404\n15405\n15406\n15407\n15408\n15409\n15410\n15411\n15412\n15413\n15414\n15415\n15416\n15417\n15418\n15419\n15420\n15421\n15422\n15423\n15424\n15425\n15426\n15427\n15428\n15429\n15430\n15431\n15432\n15433\n15434\n15435\n15436\n15437\n15438\n15439\n15440\n15441\n15442\n15443\n15444\n15445\n15446\n15447\n15448\n15449\n15450\n15451\n15452\n15453\n15454\n15455\n15456\n15457\n15458\n15459\n15460\n15461\n15462\n15463\n15464\n15465\n15466\n15467\n15468\n15469\n15470\n15471\n15472\n15473\n15474\n15475\n15476\n15477\n15478\n15479\n15480\n15481\n15482\n15483\n15484\n15485\n15486\n15487\n15488\n15489\n15490\n15491\n15492\n15493\n15494\n15495\n15496\n15497\n15498\n15499\n15500\n15501\n15502\n15503\n15504\n15505\n15506\n15507\n15508\n15509\n15510\n15511\n15512\n15513\n15514\n15515\n15516\n15517\n15518\n15519\n15520\n15521\n15522\n15523\n15524\n15525\n15526\n15527\n15528\n15529\n15530\n15531\n15532\n15533\n15534\n15535\n15536\n15537\n15538\n15539\n15540\n15541\n15542\n15543\n15544\n15545\n15546\n15547\n15548\n15549\n15550\n15551\n15552\n15553\n15554\n15555\n15556\n15557\n15558\n15559\n15560\n15561\n15562\n15563\n15564\n15565\n15566\n15567\n15568\n15569\n15570\n15571\n15572\n15573\n15574\n15575\n15576\n15577\n15578\n15579\n15580\n15581\n15582\n15583\n15584\n15585\n15586\n15587\n15588\n15589\n15590\n15591\n15592\n15593\n15594\n15595\n15596\n15597\n15598\n15599\n15600\n15601\n15602\n15603\n15604\n15605\n15606\n15607\n15608\n15609\n15610\n15611\n15612\n15613\n15614\n15615\n15616\n15617\n15618\n15619\n15620\n15621\n15622\n15623\n15624\n15625\n15626\n15627\n15628\n15629\n15630\n15631\n15632\n15633\n15634\n15635\n15636\n15637\n15638\n15639\n15640\n15641\n15642\n15643\n15644\n15645\n15646\n15647\n15648\n15649\n15650\n15651\n15652\n15653\n15654\n15655\n15656\n15657\n15658\n15659\n15660\n15661\n15662\n15663\n15664\n15665\n15666\n15667\n15668\n15669\n15670\n15671\n15672\n15673\n15674\n15675\n15676\n15677\n15678\n15679\n15680\n15681\n15682\n15683\n15684\n15685\n15686\n15687\n15688\n15689\n15690\n15691\n15692\n15693\n15694\n15695\n15696\n15697\n15698\n15699\n15700\n15701\n15702\n15703\n15704\n15705\n15706\n15707\n15708\n15709\n15710\n15711\n15712\n15713\n15714\n15715\n15716\n15717\n15718\n15719\n15720\n15721\n15722\n15723\n15724\n15725\n15726\n15727\n15728\n15729\n15730\n15731\n15732\n15733\n15734\n15735\n15736\n15737\n15738\n15739\n15740\n15741\n15742\n15743\n15744\n15745\n15746\n15747\n15748\n15749\n15750\n15751\n15752\n15753\n15754\n15755\n15756\n15757\n15758\n15759\n15760\n15761\n15762\n15763\n15764\n15765\n15766\n15767\n15768\n15769\n15770\n15771\n15772\n15773\n15774\n15775\n15776\n15777\n15778\n15779\n15780\n15781\n15782\n15783\n15784\n15785\n15786\n15787\n15788\n15789\n15790\n15791\n15792\n15793\n15794\n15795\n15796\n15797\n15798\n15799\n15800\n15801\n15802\n15803\n15804\n15805\n15806\n15807\n15808\n15809\n15810\n15811\n15812\n15813\n15814\n15815\n15816\n15817\n15818\n15819\n15820\n15821\n15822\n15823\n15824\n15825\n15826\n15827\n15828\n15829\n15830\n15831\n15832\n15833\n15834\n15835\n15836\n15837\n15838\n15839\n15840\n15841\n15842\n15843\n15844\n15845\n15846\n15847\n15848\n15849\n15850\n15851\n15852\n15853\n15854\n15855\n15856\n15857\n15858\n15859\n15860\n15861\n15862\n15863\n15864\n15865\n15866\n15867\n15868\n15869\n15870\n15871\n15872\n15873\n15874\n15875\n15876\n15877\n15878\n15879\n15880\n15881\n15882\n15883\n15884\n15885\n15886\n15887\n15888\n15889\n15890\n15891\n15892\n15893\n15894\n15895\n15896\n15897\n15898\n15899\n15900\n15901\n15902\n15903\n15904\n15905\n15906\n15907\n15908\n15909\n15910\n15911\n15912\n15913\n15914\n15915\n15916\n15917\n15918\n15919\n15920\n15921\n15922\n15923\n15924\n15925\n15926\n15927\n15928\n15929\n15930\n15931\n15932\n15933\n15934\n15935\n15936\n15937\n15938\n15939\n15940\n15941\n15942\n15943\n15944\n15945\n15946\n15947\n15948\n15949\n15950\n15951\n15952\n15953\n15954\n15955\n15956\n15957\n15958\n15959\n15960\n15961\n15962\n15963\n15964\n15965\n15966\n15967\n15968\n15969\n15970\n15971\n15972\n15973\n15974\n15975\n15976\n15977\n15978\n15979\n15980\n15981\n15982\n15983\n15984\n15985\n15986\n15987\n15988\n15989\n15990\n15991\n15992\n15993\n15994\n15995\n15996\n15997\n15998\n15999\n16000\n16001\n16002\n16003\n16004\n16005\n16006\n16007\n16008\n16009\n16010\n16011\n16012\n16013\n16014\n16015\n16016\n16017\n16018\n16019\n16020\n16021\n16022\n16023\n16024\n16025\n16026\n16027\n16028\n16029\n16030\n16031\n16032\n16033\n16034\n16035\n16036\n16037\n16038\n16039\n16040\n16041\n16042\n16043\n16044\n16045\n16046\n16047\n16048\n16049\n16050\n16051\n16052\n16053\n16054\n16055\n16056\n16057\n16058\n16059\n16060\n16061\n16062\n16063\n16064\n16065\n16066\n16067\n16068\n16069\n16070\n16071\n16072\n16073\n16074\n16075\n16076\n16077\n16078\n16079\n16080\n16081\n16082\n16083\n16084\n16085\n16086\n16087\n16088\n16089\n16090\n16091\n16092\n16093\n16094\n16095\n16096\n16097\n16098\n16099\n16100\n16101\n16102\n16103\n16104\n16105\n16106\n16107\n16108\n16109\n16110\n16111\n16112\n16113\n16114\n16115\n16116\n16117\n16118\n16119\n16120\n16121\n16122\n16123\n16124\n16125\n16126\n16127\n16128\n16129\n16130\n16131\n16132\n16133\n16134\n16135\n16136\n16137\n16138\n16139\n16140\n16141\n16142\n16143\n16144\n16145\n16146\n16147\n16148\n16149\n16150\n16151\n16152\n16153\n16154\n16155\n16156\n16157\n16158\n16159\n16160\n16161\n16162\n16163\n16164\n16165\n16166\n16167\n16168\n16169\n16170\n16171\n16172\n16173\n16174\n16175\n16176\n16177\n16178\n16179\n16180\n16181\n16182\n16183\n16184\n16185\n16186\n16187\n16188\n16189\n16190\n16191\n16192\n16193\n16194\n16195\n16196\n16197\n16198\n16199\n16200\n16201\n16202\n16203\n16204\n16205\n16206\n16207\n16208\n16209\n16210\n16211\n16212\n16213\n16214\n16215\n16216\n16217\n16218\n16219\n16220\n16221\n16222\n16223\n16224\n16225\n16226\n16227\n16228\n16229\n16230\n16231\n16232\n16233\n16234\n16235\n16236\n16237\n16238\n16239\n16240\n16241\n16242\n16243\n16244\n16245\n16246\n16247\n16248\n16249\n16250\n16251\n16252\n16253\n16254\n16255\n16256\n16257\n16258\n16259\n16260\n16261\n16262\n16263\n16264\n16265\n16266\n16267\n16268\n16269\n16270\n16271\n16272\n16273\n16274\n16275\n16276\n16277\n16278\n16279\n16280\n16281\n16282\n16283\n16284\n16285\n16286\n16287\n16288\n16289\n16290\n16291\n16292\n16293\n16294\n16295\n16296\n16297\n16298\n16299\n16300\n16301\n16302\n16303\n16304\n16305\n16306\n16307\n16308\n16309\n16310\n16311\n16312\n16313\n16314\n16315\n16316\n16317\n16318\n16319\n16320\n16321\n16322\n16323\n16324\n16325\n16326\n16327\n16328\n16329\n16330\n16331\n16332\n16333\n16334\n16335\n16336\n16337\n16338\n16339\n16340\n16341\n16342\n16343\n16344\n16345\n16346\n16347\n16348\n16349\n16350\n16351\n16352\n16353\n16354\n16355\n16356\n16357\n16358\n16359\n16360\n16361\n16362\n16363\n16364\n16365\n16366\n16367\n16368\n16369\n16370\n16371\n16372\n16373\n16374\n16375\n16376\n16377\n16378\n16379\n16380\n16381\n16382\n16383\n16384\n16385\n16386\n16387\n16388\n16389\n16390\n16391\n16392\n16393\n16394\n16395\n16396\n16397\n16398\n16399\n16400\n16401\n16402\n16403\n16404\n16405\n16406\n16407\n16408\n16409\n16410\n16411\n16412\n16413\n16414\n16415\n16416\n16417\n16418\n16419\n16420\n16421\n16422\n16423\n16424\n16425\n16426\n16427\n16428\n16429\n16430\n16431\n16432\n16433\n16434\n16435\n16436\n16437\n16438\n16439\n16440\n16441\n16442\n16443\n16444\n16445\n16446\n16447\n16448\n16449\n16450\n16451\n16452\n16453\n16454\n16455\n16456\n16457\n16458\n16459\n16460\n16461\n16462\n16463\n16464\n16465\n16466\n16467\n16468\n16469\n16470\n16471\n16472\n16473\n16474\n16475\n16476\n16477\n16478\n16479\n16480\n16481\n16482\n16483\n16484\n16485\n16486\n16487\n16488\n16489\n16490\n16491\n16492\n16493\n16494\n16495\n16496\n16497\n16498\n16499\n16500\n16501\n16502\n16503\n16504\n16505\n16506\n16507\n16508\n16509\n16510\n16511\n16512\n16513\n16514\n16515\n16516\n16517\n16518\n16519\n16520\n16521\n16522\n16523\n16524\n16525\n16526\n16527\n16528\n16529\n16530\n16531\n16532\n16533\n16534\n16535\n16536\n16537\n16538\n16539\n16540\n16541\n16542\n16543\n16544\n16545\n16546\n16547\n16548\n16549\n16550\n16551\n16552\n16553\n16554\n16555\n16556\n16557\n16558\n16559\n16560\n16561\n16562\n16563\n16564\n16565\n16566\n16567\n16568\n16569\n16570\n16571\n16572\n16573\n16574\n16575\n16576\n16577\n16578\n16579\n16580\n16581\n16582\n16583\n16584\n16585\n16586\n16587\n16588\n16589\n16590\n16591\n16592\n16593\n16594\n16595\n16596\n16597\n16598\n16599\n16600\n16601\n16602\n16603\n16604\n16605\n16606\n16607\n16608\n16609\n16610\n16611\n16612\n16613\n16614\n16615\n16616\n16617\n16618\n16619\n16620\n16621\n16622\n16623\n16624\n16625\n16626\n16627\n16628\n16629\n16630\n16631\n16632\n16633\n16634\n16635\n16636\n16637\n16638\n16639\n16640\n16641\n16642\n16643\n16644\n16645\n16646\n16647\n16648\n16649\n16650\n16651\n16652\n16653\n16654\n16655\n16656\n16657\n16658\n16659\n16660\n16661\n16662\n16663\n16664\n16665\n16666\n16667\n16668\n16669\n16670\n16671\n16672\n16673\n16674\n16675\n16676\n16677\n16678\n16679\n16680\n16681\n16682\n16683\n16684\n16685\n16686\n16687\n16688\n16689\n16690\n16691\n16692\n16693\n16694\n16695\n16696\n16697\n16698\n16699\n16700\n16701\n16702\n16703\n16704\n16705\n16706\n16707\n16708\n16709\n16710\n16711\n16712\n16713\n16714\n16715\n16716\n16717\n16718\n16719\n16720\n16721\n16722\n16723\n16724\n16725\n16726\n16727\n16728\n16729\n16730\n16731\n16732\n16733\n16734\n16735\n16736\n16737\n16738\n16739\n16740\n16741\n16742\n16743\n16744\n16745\n16746\n16747\n16748\n16749\n16750\n16751\n16752\n16753\n16754\n16755\n16756\n16757\n16758\n16759\n16760\n16761\n16762\n16763\n16764\n16765\n16766\n16767\n16768\n16769\n16770\n16771\n16772\n16773\n16774\n16775\n16776\n16777\n16778\n16779\n16780\n16781\n16782\n16783\n16784\n16785\n16786\n16787\n16788\n16789\n16790\n16791\n16792\n16793\n16794\n16795\n16796\n16797\n16798\n16799\n16800\n16801\n16802\n16803\n16804\n16805\n16806\n16807\n16808\n16809\n16810\n16811\n16812\n16813\n16814\n16815\n16816\n16817\n16818\n16819\n16820\n16821\n16822\n16823\n16824\n16825\n16826\n16827\n16828\n16829\n16830\n16831\n16832\n16833\n16834\n16835\n16836\n16837\n16838\n16839\n16840\n16841\n16842\n16843\n16844\n16845\n16846\n16847\n16848\n16849\n16850\n16851\n16852\n16853\n16854\n16855\n16856\n16857\n16858\n16859\n16860\n16861\n16862\n16863\n16864\n16865\n16866\n16867\n16868\n16869\n16870\n16871\n16872\n16873\n16874\n16875\n16876\n16877\n16878\n16879\n16880\n16881\n16882\n16883\n16884\n16885\n16886\n16887\n16888\n16889\n16890\n16891\n16892\n16893\n16894\n16895\n16896\n16897\n16898\n16899\n16900\n16901\n16902\n16903\n16904\n16905\n16906\n16907\n16908\n16909\n16910\n16911\n16912\n16913\n16914\n16915\n16916\n16917\n16918\n16919\n16920\n16921\n16922\n16923\n16924\n16925\n16926\n16927\n16928\n16929\n16930\n16931\n16932\n16933\n16934\n16935\n16936\n16937\n16938\n16939\n16940\n16941\n16942\n16943\n16944\n16945\n16946\n16947\n16948\n16949\n16950\n16951\n16952\n16953\n16954\n16955\n16956\n16957\n16958\n16959\n16960\n16961\n16962\n16963\n16964\n16965\n16966\n16967\n16968\n16969\n16970\n16971\n16972\n16973\n16974\n16975\n16976\n16977\n16978\n16979\n16980\n16981\n16982\n16983\n16984\n16985\n16986\n16987\n16988\n16989\n16990\n16991\n16992\n16993\n16994\n16995\n16996\n16997\n16998\n16999\n17000\n17001\n17002\n17003\n17004\n17005\n17006\n17007\n17008\n17009\n17010\n17011\n17012\n17013\n17014\n17015\n17016\n17017\n17018\n17019\n17020\n17021\n17022\n17023\n17024\n17025\n17026\n17027\n17028\n17029\n17030\n17031\n17032\n17033\n17034\n17035\n17036\n17037\n17038\n17039\n17040\n17041\n17042\n17043\n17044\n17045\n17046\n17047\n17048\n17049\n17050\n17051\n17052\n17053\n17054\n17055\n17056\n17057\n17058\n17059\n17060\n17061\n17062\n17063\n17064\n17065\n17066\n17067\n17068\n17069\n17070\n17071\n17072\n17073\n17074\n17075\n17076\n17077\n17078\n17079\n17080\n17081\n17082\n17083\n17084\n17085\n17086\n17087\n17088\n17089\n17090\n17091\n17092\n17093\n17094\n17095\n17096\n17097\n17098\n17099\n17100\n17101\n17102\n17103\n17104\n17105\n17106\n17107\n17108\n17109\n17110\n17111\n17112\n17113\n17114\n17115\n17116\n17117\n17118\n17119\n17120\n17121\n17122\n17123\n17124\n17125\n17126\n17127\n17128\n17129\n17130\n17131\n17132\n17133\n17134\n17135\n17136\n17137\n17138\n17139\n17140\n17141\n17142\n17143\n17144\n17145\n17146\n17147\n17148\n17149\n17150\n17151\n17152\n17153\n17154\n17155\n17156\n17157\n17158\n17159\n17160\n17161\n17162\n17163\n17164\n17165\n17166\n17167\n17168\n17169\n17170\n17171\n17172\n17173\n17174\n17175\n17176\n17177\n17178\n17179\n17180\n17181\n17182\n17183\n17184\n17185\n17186\n17187\n17188\n17189\n17190\n17191\n17192\n17193\n17194\n17195\n17196\n17197\n17198\n17199\n17200\n17201\n17202\n17203\n17204\n17205\n17206\n17207\n17208\n17209\n17210\n17211\n17212\n17213\n17214\n17215\n17216\n17217\n17218\n17219\n17220\n17221\n17222\n17223\n17224\n17225\n17226\n17227\n17228\n17229\n17230\n17231\n17232\n17233\n17234\n17235\n17236\n17237\n17238\n17239\n17240\n17241\n17242\n17243\n17244\n17245\n17246\n17247\n17248\n17249\n17250\n17251\n17252\n17253\n17254\n17255\n17256\n17257\n17258\n17259\n17260\n17261\n17262\n17263\n17264\n17265\n17266\n17267\n17268\n17269\n17270\n17271\n17272\n17273\n17274\n17275\n17276\n17277\n17278\n17279\n17280\n17281\n17282\n17283\n17284\n17285\n17286\n17287\n17288\n17289\n17290\n17291\n17292\n17293\n17294\n17295\n17296\n17297\n17298\n17299\n17300\n17301\n17302\n17303\n17304\n17305\n17306\n17307\n17308\n17309\n17310\n17311\n17312\n17313\n17314\n17315\n17316\n17317\n17318\n17319\n17320\n17321\n17322\n17323\n17324\n17325\n17326\n17327\n17328\n17329\n17330\n17331\n17332\n17333\n17334\n17335\n17336\n17337\n17338\n17339\n17340\n17341\n17342\n17343\n17344\n17345\n17346\n17347\n17348\n17349\n17350\n17351\n17352\n17353\n17354\n17355\n17356\n17357\n17358\n17359\n17360\n17361\n17362\n17363\n17364\n17365\n17366\n17367\n17368\n17369\n17370\n17371\n17372\n17373\n17374\n17375\n17376\n17377\n17378\n17379\n17380\n17381\n17382\n17383\n17384\n17385\n17386\n17387\n17388\n17389\n17390\n17391\n17392\n17393\n17394\n17395\n17396\n17397\n17398\n17399\n17400\n17401\n17402\n17403\n17404\n17405\n17406\n17407\n17408\n17409\n17410\n17411\n17412\n17413\n17414\n17415\n17416\n17417\n17418\n17419\n17420\n17421\n17422\n17423\n17424\n17425\n17426\n17427\n17428\n17429\n17430\n17431\n17432\n17433\n17434\n17435\n17436\n17437\n17438\n17439\n17440\n17441\n17442\n17443\n17444\n17445\n17446\n17447\n17448\n17449\n17450\n17451\n17452\n17453\n17454\n17455\n17456\n17457\n17458\n17459\n17460\n17461\n17462\n17463\n17464\n17465\n17466\n17467\n17468\n17469\n17470\n17471\n17472\n17473\n17474\n17475\n17476\n17477\n17478\n17479\n17480\n17481\n17482\n17483\n17484\n17485\n17486\n17487\n17488\n17489\n17490\n17491\n17492\n17493\n17494\n17495\n17496\n17497\n17498\n17499\n17500\n17501\n17502\n17503\n17504\n17505\n17506\n17507\n17508\n17509\n17510\n17511\n17512\n17513\n17514\n17515\n17516\n17517\n17518\n17519\n17520\n17521\n17522\n17523\n17524\n17525\n17526\n17527\n17528\n17529\n17530\n17531\n17532\n17533\n17534\n17535\n17536\n17537\n17538\n17539\n17540\n17541\n17542\n17543\n17544\n17545\n17546\n17547\n17548\n17549\n17550\n17551\n17552\n17553\n17554\n17555\n17556\n17557\n17558\n17559\n17560\n17561\n17562\n17563\n17564\n17565\n17566\n17567\n17568\n17569\n17570\n17571\n17572\n17573\n17574\n17575\n17576\n17577\n17578\n17579\n17580\n17581\n17582\n17583\n17584\n17585\n17586\n17587\n17588\n17589\n17590\n17591\n17592\n17593\n17594\n17595\n17596\n17597\n17598\n17599\n17600\n17601\n17602\n17603\n17604\n17605\n17606\n17607\n17608\n17609\n17610\n17611\n17612\n17613\n17614\n17615\n17616\n17617\n17618\n17619\n17620\n17621\n17622\n17623\n17624\n17625\n17626\n17627\n17628\n17629\n17630\n17631\n17632\n17633\n17634\n17635\n17636\n17637\n17638\n17639\n17640\n17641\n17642\n17643\n17644\n17645\n17646\n17647\n17648\n17649\n17650\n17651\n17652\n17653\n17654\n17655\n17656\n17657\n17658\n17659\n17660\n17661\n17662\n17663\n17664\n17665\n17666\n17667\n17668\n17669\n17670\n17671\n17672\n17673\n17674\n17675\n17676\n17677\n17678\n17679\n17680\n17681\n17682\n17683\n17684\n17685\n17686\n17687\n17688\n17689\n17690\n17691\n17692\n17693\n17694\n17695\n17696\n17697\n17698\n17699\n17700\n17701\n17702\n17703\n17704\n17705\n17706\n17707\n17708\n17709\n17710\n17711\n17712\n17713\n17714\n17715\n17716\n17717\n17718\n17719\n17720\n17721\n17722\n17723\n17724\n17725\n17726\n17727\n17728\n17729\n17730\n17731\n17732\n17733\n17734\n17735\n17736\n17737\n17738\n17739\n17740\n17741\n17742\n17743\n17744\n17745\n17746\n17747\n17748\n17749\n17750\n17751\n17752\n17753\n17754\n17755\n17756\n17757\n17758\n17759\n17760\n17761\n17762\n17763\n17764\n17765\n17766\n17767\n17768\n17769\n17770\n17771\n17772\n17773\n17774\n17775\n17776\n17777\n17778\n17779\n17780\n17781\n17782\n17783\n17784\n17785\n17786\n17787\n17788\n17789\n17790\n17791\n17792\n17793\n17794\n17795\n17796\n17797\n17798\n17799\n17800\n17801\n17802\n17803\n17804\n17805\n17806\n17807\n17808\n17809\n17810\n17811\n17812\n17813\n17814\n17815\n17816\n17817\n17818\n17819\n17820\n17821\n17822\n17823\n17824\n17825\n17826\n17827\n17828\n17829\n17830\n17831\n17832\n17833\n17834\n17835\n17836\n17837\n17838\n17839\n17840\n17841\n17842\n17843\n17844\n17845\n17846\n17847\n17848\n17849\n17850\n17851\n17852\n17853\n17854\n17855\n17856\n17857\n17858\n17859\n17860\n17861\n17862\n17863\n17864\n17865\n17866\n17867\n17868\n17869\n17870\n17871\n17872\n17873\n17874\n17875\n17876\n17877\n17878\n17879\n17880\n17881\n17882\n17883\n17884\n17885\n17886\n17887\n17888\n17889\n17890\n17891\n17892\n17893\n17894\n17895\n17896\n17897\n17898\n17899\n17900\n17901\n17902\n17903\n17904\n17905\n17906\n17907\n17908\n17909\n17910\n17911\n17912\n17913\n17914\n17915\n17916\n17917\n17918\n17919\n17920\n17921\n17922\n17923\n17924\n17925\n17926\n17927\n17928\n17929\n17930\n17931\n17932\n17933\n17934\n17935\n17936\n17937\n17938\n17939\n17940\n17941\n17942\n17943\n17944\n17945\n17946\n17947\n17948\n17949\n17950\n17951\n17952\n17953\n17954\n17955\n17956\n17957\n17958\n17959\n17960\n17961\n17962\n17963\n17964\n17965\n17966\n17967\n17968\n17969\n17970\n17971\n17972\n17973\n17974\n17975\n17976\n17977\n17978\n17979\n17980\n17981\n17982\n17983\n17984\n17985\n17986\n17987\n17988\n17989\n17990\n17991\n17992\n17993\n17994\n17995\n17996\n17997\n17998\n17999\n18000\n18001\n18002\n18003\n18004\n18005\n18006\n18007\n18008\n18009\n18010\n18011\n18012\n18013\n18014\n18015\n18016\n18017\n18018\n18019\n18020\n18021\n18022\n18023\n18024\n18025\n18026\n18027\n18028\n18029\n18030\n18031\n18032\n18033\n18034\n18035\n18036\n18037\n18038\n18039\n18040\n18041\n18042\n18043\n18044\n18045\n18046\n18047\n18048\n18049\n18050\n18051\n18052\n18053\n18054\n18055\n18056\n18057\n18058\n18059\n18060\n18061\n18062\n18063\n18064\n18065\n18066\n18067\n18068\n18069\n18070\n18071\n18072\n18073\n18074\n18075\n18076\n18077\n18078\n18079\n18080\n18081\n18082\n18083\n18084\n18085\n18086\n18087\n18088\n18089\n18090\n18091\n18092\n18093\n18094\n18095\n18096\n18097\n18098\n18099\n18100\n18101\n18102\n18103\n18104\n18105\n18106\n18107\n18108\n18109\n18110\n18111\n18112\n18113\n18114\n18115\n18116\n18117\n18118\n18119\n18120\n18121\n18122\n18123\n18124\n18125\n18126\n18127\n18128\n18129\n18130\n18131\n18132\n18133\n18134\n18135\n18136\n18137\n18138\n18139\n18140\n18141\n18142\n18143\n18144\n18145\n18146\n18147\n18148\n18149\n18150\n18151\n18152\n18153\n18154\n18155\n18156\n18157\n18158\n18159\n18160\n18161\n18162\n18163\n18164\n18165\n18166\n18167\n18168\n18169\n18170\n18171\n18172\n18173\n18174\n18175\n18176\n18177\n18178\n18179\n18180\n18181\n18182\n18183\n18184\n18185\n18186\n18187\n18188\n18189\n18190\n18191\n18192\n18193\n18194\n18195\n18196\n18197\n18198\n18199\n18200\n18201\n18202\n18203\n18204\n18205\n18206\n18207\n18208\n18209\n18210\n18211\n18212\n18213\n18214\n18215\n18216\n18217\n18218\n18219\n18220\n18221\n18222\n18223\n18224\n18225\n18226\n18227\n18228\n18229\n18230\n18231\n18232\n18233\n18234\n18235\n18236\n18237\n18238\n18239\n18240\n18241\n18242\n18243\n18244\n18245\n18246\n18247\n18248\n18249\n18250\n18251\n18252\n18253\n18254\n18255\n18256\n18257\n18258\n18259\n18260\n18261\n18262\n18263\n18264\n18265\n18266\n18267\n18268\n18269\n18270\n18271\n18272\n18273\n18274\n18275\n18276\n18277\n18278\n18279\n18280\n18281\n18282\n18283\n18284\n18285\n18286\n18287\n18288\n18289\n18290\n18291\n18292\n18293\n18294\n18295\n18296\n18297\n18298\n18299\n18300\n18301\n18302\n18303\n18304\n18305\n18306\n18307\n18308\n18309\n18310\n18311\n18312\n18313\n18314\n18315\n18316\n18317\n18318\n18319\n18320\n18321\n18322\n18323\n18324\n18325\n18326\n18327\n18328\n18329\n18330\n18331\n18332\n18333\n18334\n18335\n18336\n18337\n18338\n18339\n18340\n18341\n18342\n18343\n18344\n18345\n18346\n18347\n18348\n18349\n18350\n18351\n18352\n18353\n18354\n18355\n18356\n18357\n18358\n18359\n18360\n18361\n18362\n18363\n18364\n18365\n18366\n18367\n18368\n18369\n18370\n18371\n18372\n18373\n18374\n18375\n18376\n18377\n18378\n18379\n18380\n18381\n18382\n18383\n18384\n18385\n18386\n18387\n18388\n18389\n18390\n18391\n18392\n18393\n18394\n18395\n18396\n18397\n18398\n18399\n18400\n18401\n18402\n18403\n18404\n18405\n18406\n18407\n18408\n18409\n18410\n18411\n18412\n18413\n18414\n18415\n18416\n18417\n18418\n18419\n18420\n18421\n18422\n18423\n18424\n18425\n18426\n18427\n18428\n18429\n18430\n18431\n18432\n18433\n18434\n18435\n18436\n18437\n18438\n18439\n18440\n18441\n18442\n18443\n18444\n18445\n18446\n18447\n18448\n18449\n18450\n18451\n18452\n18453\n18454\n18455\n18456\n18457\n18458\n18459\n18460\n18461\n18462\n18463\n18464\n18465\n18466\n18467\n18468\n18469\n18470\n18471\n18472\n18473\n18474\n18475\n18476\n18477\n18478\n18479\n18480\n18481\n18482\n18483\n18484\n18485\n18486\n18487\n18488\n18489\n18490\n18491\n18492\n18493\n18494\n18495\n18496\n18497\n18498\n18499\n18500\n18501\n18502\n18503\n18504\n18505\n18506\n18507\n18508\n18509\n18510\n18511\n18512\n18513\n18514\n18515\n18516\n18517\n18518\n18519\n18520\n18521\n18522\n18523\n18524\n18525\n18526\n18527\n18528\n18529\n18530\n18531\n18532\n18533\n18534\n18535\n18536\n18537\n18538\n18539\n18540\n18541\n18542\n18543\n18544\n18545\n18546\n18547\n18548\n18549\n18550\n18551\n18552\n18553\n18554\n18555\n18556\n18557\n18558\n18559\n18560\n18561\n18562\n18563\n18564\n18565\n18566\n18567\n18568\n18569\n18570\n18571\n18572\n18573\n18574\n18575\n18576\n18577\n18578\n18579\n18580\n18581\n18582\n18583\n18584\n18585\n18586\n18587\n18588\n18589\n18590\n18591\n18592\n18593\n18594\n18595\n18596\n18597\n18598\n18599\n18600\n18601\n18602\n18603\n18604\n18605\n18606\n18607\n18608\n18609\n18610\n18611\n18612\n18613\n18614\n18615\n18616\n18617\n18618\n18619\n18620\n18621\n18622\n18623\n18624\n18625\n18626\n18627\n18628\n18629\n18630\n18631\n18632\n18633\n18634\n18635\n18636\n18637\n18638\n18639\n18640\n18641\n18642\n18643\n18644\n18645\n18646\n18647\n18648\n18649\n18650\n18651\n18652\n18653\n18654\n18655\n18656\n18657\n18658\n18659\n18660\n18661\n18662\n18663\n18664\n18665\n18666\n18667\n18668\n18669\n18670\n18671\n18672\n18673\n18674\n18675\n18676\n18677\n18678\n18679\n18680\n18681\n18682\n18683\n18684\n18685\n18686\n18687\n18688\n18689\n18690\n18691\n18692\n18693\n18694\n18695\n18696\n18697\n18698\n18699\n18700\n18701\n18702\n18703\n18704\n18705\n18706\n18707\n18708\n18709\n18710\n18711\n18712\n18713\n18714\n18715\n18716\n18717\n18718\n18719\n18720\n18721\n18722\n18723\n18724\n18725\n18726\n18727\n18728\n18729\n18730\n18731\n18732\n18733\n18734\n18735\n18736\n18737\n18738\n18739\n18740\n18741\n18742\n18743\n18744\n18745\n18746\n18747\n18748\n18749\n18750\n18751\n18752\n18753\n18754\n18755\n18756\n18757\n18758\n18759\n18760\n18761\n18762\n18763\n18764\n18765\n18766\n18767\n18768\n18769\n18770\n18771\n18772\n18773\n18774\n18775\n18776\n18777\n18778\n18779\n18780\n18781\n18782\n18783\n18784\n18785\n18786\n18787\n18788\n18789\n18790\n18791\n18792\n18793\n18794\n18795\n18796\n18797\n18798\n18799\n18800\n18801\n18802\n18803\n18804\n18805\n18806\n18807\n18808\n18809\n18810\n18811\n18812\n18813\n18814\n18815\n18816\n18817\n18818\n18819\n18820\n18821\n18822\n18823\n18824\n18825\n18826\n18827\n18828\n18829\n18830\n18831\n18832\n18833\n18834\n18835\n18836\n18837\n18838\n18839\n18840\n18841\n18842\n18843\n18844\n18845\n18846\n18847\n18848\n18849\n18850\n18851\n18852\n18853\n18854\n18855\n18856\n18857\n18858\n18859\n18860\n18861\n18862\n18863\n18864\n18865\n18866\n18867\n18868\n18869\n18870\n18871\n18872\n18873\n18874\n18875\n18876\n18877\n18878\n18879\n18880\n18881\n18882\n18883\n18884\n18885\n18886\n18887\n18888\n18889\n18890\n18891\n18892\n18893\n18894\n18895\n18896\n18897\n18898\n18899\n18900\n18901\n18902\n18903\n18904\n18905\n18906\n18907\n18908\n18909\n18910\n18911\n18912\n18913\n18914\n18915\n18916\n18917\n18918\n18919\n18920\n18921\n18922\n18923\n18924\n18925\n18926\n18927\n18928\n18929\n18930\n18931\n18932\n18933\n18934\n18935\n18936\n18937\n18938\n18939\n18940\n18941\n18942\n18943\n18944\n18945\n18946\n18947\n18948\n18949\n18950\n18951\n18952\n18953\n18954\n18955\n18956\n18957\n18958\n18959\n18960\n18961\n18962\n18963\n18964\n18965\n18966\n18967\n18968\n18969\n18970\n18971\n18972\n18973\n18974\n18975\n18976\n18977\n18978\n18979\n18980\n18981\n18982\n18983\n18984\n18985\n18986\n18987\n18988\n18989\n18990\n18991\n18992\n18993\n18994\n18995\n18996\n18997\n18998\n18999\n19000\n19001\n19002\n19003\n19004\n19005\n19006\n19007\n19008\n19009\n19010\n19011\n19012\n19013\n19014\n19015\n19016\n19017\n19018\n19019\n19020\n19021\n19022\n19023\n19024\n19025\n19026\n19027\n19028\n19029\n19030\n19031\n19032\n19033\n19034\n19035\n19036\n19037\n19038\n19039\n19040\n19041\n19042\n19043\n19044\n19045\n19046\n19047\n19048\n19049\n19050\n19051\n19052\n19053\n19054\n19055\n19056\n19057\n19058\n19059\n19060\n19061\n19062\n19063\n19064\n19065\n19066\n19067\n19068\n19069\n19070\n19071\n19072\n19073\n19074\n19075\n19076\n19077\n19078\n19079\n19080\n19081\n19082\n19083\n19084\n19085\n19086\n19087\n19088\n19089\n19090\n19091\n19092\n19093\n19094\n19095\n19096\n19097\n19098\n19099\n19100\n19101\n19102\n19103\n19104\n19105\n19106\n19107\n19108\n19109\n19110\n19111\n19112\n19113\n19114\n19115\n19116\n19117\n19118\n19119\n19120\n19121\n19122\n19123\n19124\n19125\n19126\n19127\n19128\n19129\n19130\n19131\n19132\n19133\n19134\n19135\n19136\n19137\n19138\n19139\n19140\n19141\n19142\n19143\n19144\n19145\n19146\n19147\n19148\n19149\n19150\n19151\n19152\n19153\n19154\n19155\n19156\n19157\n19158\n19159\n19160\n19161\n19162\n19163\n19164\n19165\n19166\n19167\n19168\n19169\n19170\n19171\n19172\n19173\n19174\n19175\n19176\n19177\n19178\n19179\n19180\n19181\n19182\n19183\n19184\n19185\n19186\n19187\n19188\n19189\n19190\n19191\n19192\n19193\n19194\n19195\n19196\n19197\n19198\n19199\n19200\n19201\n19202\n19203\n19204\n19205\n19206\n19207\n19208\n19209\n19210\n19211\n19212\n19213\n19214\n19215\n19216\n19217\n19218\n19219\n19220\n19221\n19222\n19223\n19224\n19225\n19226\n19227\n19228\n19229\n19230\n19231\n19232\n19233\n19234\n19235\n19236\n19237\n19238\n19239\n19240\n19241\n19242\n19243\n19244\n19245\n19246\n19247\n19248\n19249\n19250\n19251\n19252\n19253\n19254\n19255\n19256\n19257\n19258\n19259\n19260\n19261\n19262\n19263\n19264\n19265\n19266\n19267\n19268\n19269\n19270\n19271\n19272\n19273\n19274\n19275\n19276\n19277\n19278\n19279\n19280\n19281\n19282\n19283\n19284\n19285\n19286\n19287\n19288\n19289\n19290\n19291\n19292\n19293\n19294\n19295\n19296\n19297\n19298\n19299\n19300\n19301\n19302\n19303\n19304\n19305\n19306\n19307\n19308\n19309\n19310\n19311\n19312\n19313\n19314\n19315\n19316\n19317\n19318\n19319\n19320\n19321\n19322\n19323\n19324\n19325\n19326\n19327\n19328\n19329\n19330\n19331\n19332\n19333\n19334\n19335\n19336\n19337\n19338\n19339\n19340\n19341\n19342\n19343\n19344\n19345\n19346\n19347\n19348\n19349\n19350\n19351\n19352\n19353\n19354\n19355\n19356\n19357\n19358\n19359\n19360\n19361\n19362\n19363\n19364\n19365\n19366\n19367\n19368\n19369\n19370\n19371\n19372\n19373\n19374\n19375\n19376\n19377\n19378\n19379\n19380\n19381\n19382\n19383\n19384\n19385\n19386\n19387\n19388\n19389\n19390\n19391\n19392\n19393\n19394\n19395\n19396\n19397\n19398\n19399\n19400\n19401\n19402\n19403\n19404\n19405\n19406\n19407\n19408\n19409\n19410\n19411\n19412\n19413\n19414\n19415\n19416\n19417\n19418\n19419\n19420\n19421\n19422\n19423\n19424\n19425\n19426\n19427\n19428\n19429\n19430\n19431\n19432\n19433\n19434\n19435\n19436\n19437\n19438\n19439\n19440\n19441\n19442\n19443\n19444\n19445\n19446\n19447\n19448\n19449\n19450\n19451\n19452\n19453\n19454\n19455\n19456\n19457\n19458\n19459\n19460\n19461\n19462\n19463\n19464\n19465\n19466\n19467\n19468\n19469\n19470\n19471\n19472\n19473\n19474\n19475\n19476\n19477\n19478\n19479\n19480\n19481\n19482\n19483\n19484\n19485\n19486\n19487\n19488\n19489\n19490\n19491\n19492\n19493\n19494\n19495\n19496\n19497\n19498\n19499\n19500\n19501\n19502\n19503\n19504\n19505\n19506\n19507\n19508\n19509\n19510\n19511\n19512\n19513\n19514\n19515\n19516\n19517\n19518\n19519\n19520\n19521\n19522\n19523\n19524\n19525\n19526\n19527\n19528\n19529\n19530\n19531\n19532\n19533\n19534\n19535\n19536\n19537\n19538\n19539\n19540\n19541\n19542\n19543\n19544\n19545\n19546\n19547\n19548\n19549\n19550\n19551\n19552\n19553\n19554\n19555\n19556\n19557\n19558\n19559\n19560\n19561\n19562\n19563\n19564\n19565\n19566\n19567\n19568\n19569\n19570\n19571\n19572\n19573\n19574\n19575\n19576\n19577\n19578\n19579\n19580\n19581\n19582\n19583\n19584\n19585\n19586\n19587\n19588\n19589\n19590\n19591\n19592\n19593\n19594\n19595\n19596\n19597\n19598\n19599\n19600\n19601\n19602\n19603\n19604\n19605\n19606\n19607\n19608\n19609\n19610\n19611\n19612\n19613\n19614\n19615\n19616\n19617\n19618\n19619\n19620\n19621\n19622\n19623\n19624\n19625\n19626\n19627\n19628\n19629\n19630\n19631\n19632\n19633\n19634\n19635\n19636\n19637\n19638\n19639\n19640\n19641\n19642\n19643\n19644\n19645\n19646\n19647\n19648\n19649\n19650\n19651\n19652\n19653\n19654\n19655\n19656\n19657\n19658\n19659\n19660\n19661\n19662\n19663\n19664\n19665\n19666\n19667\n19668\n19669\n19670\n19671\n19672\n19673\n19674\n19675\n19676\n19677\n19678\n19679\n19680\n19681\n19682\n19683\n19684\n19685\n19686\n19687\n19688\n19689\n19690\n19691\n19692\n19693\n19694\n19695\n19696\n19697\n19698\n19699\n19700\n19701\n19702\n19703\n19704\n19705\n19706\n19707\n19708\n19709\n19710\n19711\n19712\n19713\n19714\n19715\n19716\n19717\n19718\n19719\n19720\n19721\n19722\n19723\n19724\n19725\n19726\n19727\n19728\n19729\n19730\n19731\n19732\n19733\n19734\n19735\n19736\n19737\n19738\n19739\n19740\n19741\n19742\n19743\n19744\n19745\n19746\n19747\n19748\n19749\n19750\n19751\n19752\n19753\n19754\n19755\n19756\n19757\n19758\n19759\n19760\n19761\n19762\n19763\n19764\n19765\n19766\n19767\n19768\n19769\n19770\n19771\n19772\n19773\n19774\n19775\n19776\n19777\n19778\n19779\n19780\n19781\n19782\n19783\n19784\n19785\n19786\n19787\n19788\n19789\n19790\n19791\n19792\n19793\n19794\n19795\n19796\n19797\n19798\n19799\n19800\n19801\n19802\n19803\n19804\n19805\n19806\n19807\n19808\n19809\n19810\n19811\n19812\n19813\n19814\n19815\n19816\n19817\n19818\n19819\n19820\n19821\n19822\n19823\n19824\n19825\n19826\n19827\n19828\n19829\n19830\n19831\n19832\n19833\n19834\n19835\n19836\n19837\n19838\n19839\n19840\n19841\n19842\n19843\n19844\n19845\n19846\n19847\n19848\n19849\n19850\n19851\n19852\n19853\n19854\n19855\n19856\n19857\n19858\n19859\n19860\n19861\n19862\n19863\n19864\n19865\n19866\n19867\n19868\n19869\n19870\n19871\n19872\n19873\n19874\n19875\n19876\n19877\n19878\n19879\n19880\n19881\n19882\n19883\n19884\n19885\n19886\n19887\n19888\n19889\n19890\n19891\n19892\n19893\n19894\n19895\n19896\n19897\n19898\n19899\n19900\n19901\n19902\n19903\n19904\n19905\n19906\n19907\n19908\n19909\n19910\n19911\n19912\n19913\n19914\n19915\n19916\n19917\n19918\n19919\n19920\n19921\n19922\n19923\n19924\n19925\n19926\n19927\n19928\n19929\n19930\n19931\n19932\n19933\n19934\n19935\n19936\n19937\n19938\n19939\n19940\n19941\n19942\n19943\n19944\n19945\n19946\n19947\n19948\n19949\n19950\n19951\n19952\n19953\n19954\n19955\n19956\n19957\n19958\n19959\n19960\n19961\n19962\n19963\n19964\n19965\n19966\n19967\n19968\n19969\n19970\n19971\n19972\n19973\n19974\n19975\n19976\n19977\n19978\n19979\n19980\n19981\n19982\n19983\n19984\n19985\n19986\n19987\n19988\n19989\n19990\n19991\n19992\n19993\n19994\n19995\n19996\n19997\n19998\n19999\n20000\n20001\n20002\n20003\n20004\n20005\n20006\n20007\n20008\n20009\n20010\n20011\n20012\n20013\n20014\n20015\n20016\n20017\n20018\n20019\n20020\n20021\n20022\n20023\n20024\n20025\n20026\n20027\n20028\n20029\n20030\n20031\n20032\n20033\n20034\n20035\n20036\n20037\n20038\n20039\n20040\n20041\n20042\n20043\n20044\n20045\n20046\n20047\n20048\n20049\n20050\n20051\n20052\n20053\n20054\n20055\n20056\n20057\n20058\n20059\n20060\n20061\n20062\n20063\n20064\n20065\n20066\n20067\n20068\n20069\n20070\n20071\n20072\n20073\n20074\n20075\n20076\n20077\n20078\n20079\n20080\n20081\n20082\n20083\n20084\n20085\n20086\n20087\n20088\n20089\n20090\n20091\n20092\n20093\n20094\n20095\n20096\n20097\n20098\n20099\n20100\n20101\n20102\n20103\n20104\n20105\n20106\n20107\n20108\n20109\n20110\n20111\n20112\n20113\n20114\n20115\n20116\n20117\n20118\n20119\n20120\n20121\n20122\n20123\n20124\n20125\n20126\n20127\n20128\n20129\n20130\n20131\n20132\n20133\n20134\n20135\n20136\n20137\n20138\n20139\n20140\n20141\n20142\n20143\n20144\n20145\n20146\n20147\n20148\n20149\n20150\n20151\n20152\n20153\n20154\n20155\n20156\n20157\n20158\n20159\n20160\n20161\n20162\n20163\n20164\n20165\n20166\n20167\n20168\n20169\n20170\n20171\n20172\n20173\n20174\n20175\n20176\n20177\n20178\n20179\n20180\n20181\n20182\n20183\n20184\n20185\n20186\n20187\n20188\n20189\n20190\n20191\n20192\n20193\n20194\n20195\n20196\n20197\n20198\n20199\n20200\n20201\n20202\n20203\n20204\n20205\n20206\n20207\n20208\n20209\n20210\n20211\n20212\n20213\n20214\n20215\n20216\n20217\n20218\n20219\n20220\n20221\n20222\n20223\n20224\n20225\n20226\n20227\n20228\n20229\n20230\n20231\n20232\n20233\n20234\n20235\n20236\n20237\n20238\n20239\n20240\n20241\n20242\n20243\n20244\n20245\n20246\n20247\n20248\n20249\n20250\n20251\n20252\n20253\n20254\n20255\n20256\n20257\n20258\n20259\n20260\n20261\n20262\n20263\n20264\n20265\n20266\n20267\n20268\n20269\n20270\n20271\n20272\n20273\n20274\n20275\n20276\n20277\n20278\n20279\n20280\n20281\n20282\n20283\n20284\n20285\n20286\n20287\n20288\n20289\n20290\n20291\n20292\n20293\n20294\n20295\n20296\n20297\n20298\n20299\n20300\n20301\n20302\n20303\n20304\n20305\n20306\n20307\n20308\n20309\n20310\n20311\n20312\n20313\n20314\n20315\n20316\n20317\n20318\n20319\n20320\n20321\n20322\n20323\n20324\n20325\n20326\n20327\n20328\n20329\n20330\n20331\n20332\n20333\n20334\n20335\n20336\n20337\n20338\n20339\n20340\n20341\n20342\n20343\n20344\n20345\n20346\n20347\n20348\n20349\n20350\n20351\n20352\n20353\n20354\n20355\n20356\n20357\n20358\n20359\n20360\n20361\n20362\n20363\n20364\n20365\n20366\n20367\n20368\n20369\n20370\n20371\n20372\n20373\n20374\n20375\n20376\n20377\n20378\n20379\n20380\n20381\n20382\n20383\n20384\n20385\n20386\n20387\n20388\n20389\n20390\n20391\n20392\n20393\n20394\n20395\n20396\n20397\n20398\n20399\n20400\n20401\n20402\n20403\n20404\n20405\n20406\n20407\n20408\n20409\n20410\n20411\n20412\n20413\n20414\n20415\n20416\n20417\n20418\n20419\n20420\n20421\n20422\n20423\n20424\n20425\n20426\n20427\n20428\n20429\n20430\n20431\n20432\n20433\n20434\n20435\n20436\n20437\n20438\n20439\n20440\n20441\n20442\n20443\n20444\n20445\n20446\n20447\n20448\n20449\n20450\n20451\n20452\n20453\n20454\n20455\n20456\n20457\n20458\n20459\n20460\n20461\n20462\n20463\n20464\n20465\n20466\n20467\n20468\n20469\n20470\n20471\n20472\n20473\n20474\n20475\n20476\n20477\n20478\n20479\n20480\n20481\n20482\n20483\n20484\n20485\n20486\n20487\n20488\n20489\n20490\n20491\n20492\n20493\n20494\n20495\n20496\n20497\n20498\n20499\n20500\n20501\n20502\n20503\n20504\n20505\n20506\n20507\n20508\n20509\n20510\n20511\n20512\n20513\n20514\n20515\n20516\n20517\n20518\n20519\n20520\n20521\n20522\n20523\n20524\n20525\n20526\n20527\n20528\n20529\n20530\n20531\n20532\n20533\n20534\n20535\n20536\n20537\n20538\n20539\n20540\n20541\n20542\n20543\n20544\n20545\n20546\n20547\n20548\n20549\n20550\n20551\n20552\n20553\n20554\n20555\n20556\n20557\n20558\n20559\n20560\n20561\n20562\n20563\n20564\n20565\n20566\n20567\n20568\n20569\n20570\n20571\n20572\n20573\n20574\n20575\n20576\n20577\n20578\n20579\n20580\n20581\n20582\n20583\n20584\n20585\n20586\n20587\n20588\n20589\n20590\n20591\n20592\n20593\n20594\n20595\n20596\n20597\n20598\n20599\n20600\n20601\n20602\n20603\n20604\n20605\n20606\n20607\n20608\n20609\n20610\n20611\n20612\n20613\n20614\n20615\n20616\n20617\n20618\n20619\n20620\n20621\n20622\n20623\n20624\n20625\n20626\n20627\n20628\n20629\n20630\n20631\n20632\n20633\n20634\n20635\n20636\n20637\n20638\n20639\n20640\n20641\n20642\n20643\n20644\n20645\n20646\n20647\n20648\n20649\n20650\n20651\n20652\n20653\n20654\n20655\n20656\n20657\n20658\n20659\n20660\n20661\n20662\n20663\n20664\n20665\n20666\n20667\n20668\n20669\n20670\n20671\n20672\n20673\n20674\n20675\n20676\n20677\n20678\n20679\n20680\n20681\n20682\n20683\n20684\n20685\n20686\n20687\n20688\n20689\n20690\n20691\n20692\n20693\n20694\n20695\n20696\n20697\n20698\n20699\n20700\n20701\n20702\n20703\n20704\n20705\n20706\n20707\n20708\n20709\n20710\n20711\n20712\n20713\n20714\n20715\n20716\n20717\n20718\n20719\n20720\n20721\n20722\n20723\n20724\n20725\n20726\n20727\n20728\n20729\n20730\n20731\n20732\n20733\n20734\n20735\n20736\n20737\n20738\n20739\n20740\n20741\n20742\n20743\n20744\n20745\n20746\n20747\n20748\n20749\n20750\n20751\n20752\n20753\n20754\n20755\n20756\n20757\n20758\n20759\n20760\n20761\n20762\n20763\n20764\n20765\n20766\n20767\n20768\n20769\n20770\n20771\n20772\n20773\n20774\n20775\n20776\n20777\n20778\n20779\n20780\n20781\n20782\n20783\n20784\n20785\n20786\n20787\n20788\n20789\n20790\n20791\n20792\n20793\n20794\n20795\n20796\n20797\n20798\n20799\n20800\n20801\n20802\n20803\n20804\n20805\n20806\n20807\n20808\n20809\n20810\n20811\n20812\n20813\n20814\n20815\n20816\n20817\n20818\n20819\n20820\n20821\n20822\n20823\n20824\n20825\n20826\n20827\n20828\n20829\n20830\n20831\n20832\n20833\n20834\n20835\n20836\n20837\n20838\n20839\n20840\n20841\n20842\n20843\n20844\n20845\n20846\n20847\n20848\n20849\n20850\n20851\n20852\n20853\n20854\n20855\n20856\n20857\n20858\n20859\n20860\n20861\n20862\n20863\n20864\n20865\n20866\n20867\n20868\n20869\n20870\n20871\n20872\n20873\n20874\n20875\n20876\n20877\n20878\n20879\n20880\n20881\n20882\n20883\n20884\n20885\n20886\n20887\n20888\n20889\n20890\n20891\n20892\n20893\n20894\n20895\n20896\n20897\n20898\n20899\n20900\n20901\n20902\n20903\n20904\n20905\n20906\n20907\n20908\n20909\n20910\n20911\n20912\n20913\n20914\n20915\n20916\n20917\n20918\n20919\n20920\n20921\n20922\n20923\n20924\n20925\n20926\n20927\n20928\n20929\n20930\n20931\n20932\n20933\n20934\n20935\n20936\n20937\n20938\n20939\n20940\n20941\n20942\n20943\n20944\n20945\n20946\n20947\n20948\n20949\n20950\n20951\n20952\n20953\n20954\n20955\n20956\n20957\n20958\n20959\n20960\n20961\n20962\n20963\n20964\n20965\n20966\n20967\n20968\n20969\n20970\n20971\n20972\n20973\n20974\n20975\n20976\n20977\n20978\n20979\n20980\n20981\n20982\n20983\n20984\n20985\n20986\n20987\n20988\n20989\n20990\n20991\n20992\n20993\n20994\n20995\n20996\n20997\n20998\n20999\n21000\n21001\n21002\n21003\n21004\n21005\n21006\n21007\n21008\n21009\n21010\n21011\n21012\n21013\n21014\n21015\n21016\n21017\n21018\n21019\n21020\n21021\n21022\n21023\n21024\n21025\n21026\n21027\n21028\n21029\n21030\n21031\n21032\n21033\n21034\n21035\n21036\n21037\n21038\n21039\n21040\n21041\n21042\n21043\n21044\n21045\n21046\n21047\n21048\n21049\n21050\n21051\n21052\n21053\n21054\n21055\n21056\n21057\n21058\n21059\n21060\n21061\n21062\n21063\n21064\n21065\n21066\n21067\n21068\n21069\n21070\n21071\n21072\n21073\n21074\n21075\n21076\n21077\n21078\n21079\n21080\n21081\n21082\n21083\n21084\n21085\n21086\n21087\n21088\n21089\n21090\n21091\n21092\n21093\n21094\n21095\n21096\n21097\n21098\n21099\n21100\n21101\n21102\n21103\n21104\n21105\n21106\n21107\n21108\n21109\n21110\n21111\n21112\n21113\n21114\n21115\n21116\n21117\n21118\n21119\n21120\n21121\n21122\n21123\n21124\n21125\n21126\n21127\n21128\n21129\n21130\n21131\n21132\n21133\n21134\n21135\n21136\n21137\n21138\n21139\n21140\n21141\n21142\n21143\n21144\n21145\n21146\n21147\n21148\n21149\n21150\n21151\n21152\n21153\n21154\n21155\n21156\n21157\n21158\n21159\n21160\n21161\n21162\n21163\n21164\n21165\n21166\n21167\n21168\n21169\n21170\n21171\n21172\n21173\n21174\n21175\n21176\n21177\n21178\n21179\n21180\n21181\n21182\n21183\n21184\n21185\n21186\n21187\n21188\n21189\n21190\n21191\n21192\n21193\n21194\n21195\n21196\n21197\n21198\n21199\n21200\n21201\n21202\n21203\n21204\n21205\n21206\n21207\n21208\n21209\n21210\n21211\n21212\n21213\n21214\n21215\n21216\n21217\n21218\n21219\n21220\n21221\n21222\n21223\n21224\n21225\n21226\n21227\n21228\n21229\n21230\n21231\n21232\n21233\n21234\n21235\n21236\n21237\n21238\n21239\n21240\n21241\n21242\n21243\n21244\n21245\n21246\n21247\n21248\n21249\n21250\n21251\n21252\n21253\n21254\n21255\n21256\n21257\n21258\n21259\n21260\n21261\n21262\n21263\n21264\n21265\n21266\n21267\n21268\n21269\n21270\n21271\n21272\n21273\n21274\n21275\n21276\n21277\n21278\n21279\n21280\n21281\n21282\n21283\n21284\n21285\n21286\n21287\n21288\n21289\n21290\n21291\n21292\n21293\n21294\n21295\n21296\n21297\n21298\n21299\n21300\n21301\n21302\n21303\n21304\n21305\n21306\n21307\n21308\n21309\n21310\n21311\n21312\n21313\n21314\n21315\n21316\n21317\n21318\n21319\n21320\n21321\n21322\n21323\n21324\n21325\n21326\n21327\n21328\n21329\n21330\n21331\n21332\n21333\n21334\n21335\n21336\n21337\n21338\n21339\n21340\n21341\n21342\n21343\n21344\n21345\n21346\n21347\n21348\n21349\n21350\n21351\n21352\n21353\n21354\n21355\n21356\n21357\n21358\n21359\n21360\n21361\n21362\n21363\n21364\n21365\n21366\n21367\n21368\n21369\n21370\n21371\n21372\n21373\n21374\n21375\n21376\n21377\n21378\n21379\n21380\n21381\n21382\n21383\n21384\n21385\n21386\n21387\n21388\n21389\n21390\n21391\n21392\n21393\n21394\n21395\n21396\n21397\n21398\n21399\n21400\n21401\n21402\n21403\n21404\n21405\n21406\n21407\n21408\n21409\n21410\n21411\n21412\n21413\n21414\n21415\n21416\n21417\n21418\n21419\n21420\n21421\n21422\n21423\n21424\n21425\n21426\n21427\n21428\n21429\n21430\n21431\n21432\n21433\n21434\n21435\n21436\n21437\n21438\n21439\n21440\n21441\n21442\n21443\n21444\n21445\n21446\n21447\n21448\n21449\n21450\n21451\n21452\n21453\n21454\n21455\n21456\n21457\n21458\n21459\n21460\n21461\n21462\n21463\n21464\n21465\n21466\n21467\n21468\n21469\n21470\n21471\n21472\n21473\n21474\n21475\n21476\n21477\n21478\n21479\n21480\n21481\n21482\n21483\n21484\n21485\n21486\n21487\n21488\n21489\n21490\n21491\n21492\n21493\n21494\n21495\n21496\n21497\n21498\n21499\n21500\n21501\n21502\n21503\n21504\n21505\n21506\n21507\n21508\n21509\n21510\n21511\n21512\n21513\n21514\n21515\n21516\n21517\n21518\n21519\n21520\n21521\n21522\n21523\n21524\n21525\n21526\n21527\n21528\n21529\n21530\n21531\n21532\n21533\n21534\n21535\n21536\n21537\n21538\n21539\n21540\n21541\n21542\n21543\n21544\n21545\n21546\n21547\n21548\n21549\n21550\n21551\n21552\n21553\n21554\n21555\n21556\n21557\n21558\n21559\n21560\n21561\n21562\n21563\n21564\n21565\n21566\n21567\n21568\n21569\n21570\n21571\n21572\n21573\n21574\n21575\n21576\n21577\n21578\n21579\n21580\n21581\n21582\n21583\n21584\n21585\n21586\n21587\n21588\n21589\n21590\n21591\n21592\n21593\n21594\n21595\n21596\n21597\n21598\n21599\n21600\n21601\n21602\n21603\n21604\n21605\n21606\n21607\n21608\n21609\n21610\n21611\n21612\n21613\n21614\n21615\n21616\n21617\n21618\n21619\n21620\n21621\n21622\n21623\n21624\n21625\n21626\n21627\n21628\n21629\n21630\n21631\n21632\n21633\n21634\n21635\n21636\n21637\n21638\n21639\n21640\n21641\n21642\n21643\n21644\n21645\n21646\n21647\n21648\n21649\n21650\n21651\n21652\n21653\n21654\n21655\n21656\n21657\n21658\n21659\n21660\n21661\n21662\n21663\n21664\n21665\n21666\n21667\n21668\n21669\n21670\n21671\n21672\n21673\n21674\n21675\n21676\n21677\n21678\n21679\n21680\n21681\n21682\n21683\n21684\n21685\n21686\n21687\n21688\n21689\n21690\n21691\n21692\n21693\n21694\n21695\n21696\n21697\n21698\n21699\n21700\n21701\n21702\n21703\n21704\n21705\n21706\n21707\n21708\n21709\n21710\n21711\n21712\n21713\n21714\n21715\n21716\n21717\n21718\n21719\n21720\n21721\n21722\n21723\n21724\n21725\n21726\n21727\n21728\n21729\n21730\n21731\n21732\n21733\n21734\n21735\n21736\n21737\n21738\n21739\n21740\n21741\n21742\n21743\n21744\n21745\n21746\n21747\n21748\n21749\n21750\n21751\n21752\n21753\n21754\n21755\n21756\n21757\n21758\n21759\n21760\n21761\n21762\n21763\n21764\n21765\n21766\n21767\n21768\n21769\n21770\n21771\n21772\n21773\n21774\n21775\n21776\n21777\n21778\n21779\n21780\n21781\n21782\n21783\n21784\n21785\n21786\n21787\n21788\n21789\n21790\n21791\n21792\n21793\n21794\n21795\n21796\n21797\n21798\n21799\n21800\n21801\n21802\n21803\n21804\n21805\n21806\n21807\n21808\n21809\n21810\n21811\n21812\n21813\n21814\n21815\n21816\n21817\n21818\n21819\n21820\n21821\n21822\n21823\n21824\n21825\n21826\n21827\n21828\n21829\n21830\n21831\n21832\n21833\n21834\n21835\n21836\n21837\n21838\n21839\n21840\n21841\n21842\n21843\n21844\n21845\n21846\n21847\n21848\n21849\n21850\n21851\n21852\n21853\n21854\n21855\n21856\n21857\n21858\n21859\n21860\n21861\n21862\n21863\n21864\n21865\n21866\n21867\n21868\n21869\n21870\n21871\n21872\n21873\n21874\n21875\n21876\n21877\n21878\n21879\n21880\n21881\n21882\n21883\n21884\n21885\n21886\n21887\n21888\n21889\n21890\n21891\n21892\n21893\n21894\n21895\n21896\n21897\n21898\n21899\n21900\n21901\n21902\n21903\n21904\n21905\n21906\n21907\n21908\n21909\n21910\n21911\n21912\n21913\n21914\n21915\n21916\n21917\n21918\n21919\n21920\n21921\n21922\n21923\n21924\n21925\n21926\n21927\n21928\n21929\n21930\n21931\n21932\n21933\n21934\n21935\n21936\n21937\n21938\n21939\n21940\n21941\n21942\n21943\n21944\n21945\n21946\n21947\n21948\n21949\n21950\n21951\n21952\n21953\n21954\n21955\n21956\n21957\n21958\n21959\n21960\n21961\n21962\n21963\n21964\n21965\n21966\n21967\n21968\n21969\n21970\n21971\n21972\n21973\n21974\n21975\n21976\n21977\n21978\n21979\n21980\n21981\n21982\n21983\n21984\n21985\n21986\n21987\n21988\n21989\n21990\n21991\n21992\n21993\n21994\n21995\n21996\n21997\n21998\n21999\n22000\n22001\n22002\n22003\n22004\n22005\n22006\n22007\n22008\n22009\n22010\n22011\n22012\n22013\n22014\n22015\n22016\n22017\n22018\n22019\n22020\n22021\n22022\n22023\n22024\n22025\n22026\n22027\n22028\n22029\n22030\n22031\n22032\n22033\n22034\n22035\n22036\n22037\n22038\n22039\n22040\n22041\n22042\n22043\n22044\n22045\n22046\n22047\n22048\n22049\n22050\n22051\n22052\n22053\n22054\n22055\n22056\n22057\n22058\n22059\n22060\n22061\n22062\n22063\n22064\n22065\n22066\n22067\n22068\n22069\n22070\n22071\n22072\n22073\n22074\n22075\n22076\n22077\n22078\n22079\n22080\n22081\n22082\n22083\n22084\n22085\n22086\n22087\n22088\n22089\n22090\n22091\n22092\n22093\n22094\n22095\n22096\n22097\n22098\n22099\n22100\n22101\n22102\n22103\n22104\n22105\n22106\n22107\n22108\n22109\n22110\n22111\n22112\n22113\n22114\n22115\n22116\n22117\n22118\n22119\n22120\n22121\n22122\n22123\n22124\n22125\n22126\n22127\n22128\n22129\n22130\n22131\n22132\n22133\n22134\n22135\n22136\n22137\n22138\n22139\n22140\n22141\n22142\n22143\n22144\n22145\n22146\n22147\n22148\n22149\n22150\n22151\n22152\n22153\n22154\n22155\n22156\n22157\n22158\n22159\n22160\n22161\n22162\n22163\n22164\n22165\n22166\n22167\n22168\n22169\n22170\n22171\n22172\n22173\n22174\n22175\n22176\n22177\n22178\n22179\n22180\n22181\n22182\n22183\n22184\n22185\n22186\n22187\n22188\n22189\n22190\n22191\n22192\n22193\n22194\n22195\n22196\n22197\n22198\n22199\n22200\n22201\n22202\n22203\n22204\n22205\n22206\n22207\n22208\n22209\n22210\n22211\n22212\n22213\n22214\n22215\n22216\n22217\n22218\n22219\n22220\n22221\n22222\n22223\n22224\n22225\n22226\n22227\n22228\n22229\n22230\n22231\n22232\n22233\n22234\n22235\n22236\n22237\n22238\n22239\n22240\n22241\n22242\n22243\n22244\n22245\n22246\n22247\n22248\n22249\n22250\n22251\n22252\n22253\n22254\n22255\n22256\n22257\n22258\n22259\n22260\n22261\n22262\n22263\n22264\n22265\n22266\n22267\n22268\n22269\n22270\n22271\n22272\n22273\n22274\n22275\n22276\n22277\n22278\n22279\n22280\n22281\n22282\n22283\n22284\n22285\n22286\n22287\n22288\n22289\n22290\n22291\n22292\n22293\n22294\n22295\n22296\n22297\n22298\n22299\n22300\n22301\n22302\n22303\n22304\n22305\n22306\n22307\n22308\n22309\n22310\n22311\n22312\n22313\n22314\n22315\n22316\n22317\n22318\n22319\n22320\n22321\n22322\n22323\n22324\n22325\n22326\n22327\n22328\n22329\n22330\n22331\n22332\n22333\n22334\n22335\n22336\n22337\n22338\n22339\n22340\n22341\n22342\n22343\n22344\n22345\n22346\n22347\n22348\n22349\n22350\n22351\n22352\n22353\n22354\n22355\n22356\n22357\n22358\n22359\n22360\n22361\n22362\n22363\n22364\n22365\n22366\n22367\n22368\n22369\n22370\n22371\n22372\n22373\n22374\n22375\n22376\n22377\n22378\n22379\n22380\n22381\n22382\n22383\n22384\n22385\n22386\n22387\n22388\n22389\n22390\n22391\n22392\n22393\n22394\n22395\n22396\n22397\n22398\n22399\n22400\n22401\n22402\n22403\n22404\n22405\n22406\n22407\n22408\n22409\n22410\n22411\n22412\n22413\n22414\n22415\n22416\n22417\n22418\n22419\n22420\n22421\n22422\n22423\n22424\n22425\n22426\n22427\n22428\n22429\n22430\n22431\n22432\n22433\n22434\n22435\n22436\n22437\n22438\n22439\n22440\n22441\n22442\n22443\n22444\n22445\n22446\n22447\n22448\n22449\n22450\n22451\n22452\n22453\n22454\n22455\n22456\n22457\n22458\n22459\n22460\n22461\n22462\n22463\n22464\n22465\n22466\n22467\n22468\n22469\n22470\n22471\n22472\n22473\n22474\n22475\n22476\n22477\n22478\n22479\n22480\n22481\n22482\n22483\n22484\n22485\n22486\n22487\n22488\n22489\n22490\n22491\n22492\n22493\n22494\n22495\n22496\n22497\n22498\n22499\n22500\n22501\n22502\n22503\n22504\n22505\n22506\n22507\n22508\n22509\n22510\n22511\n22512\n22513\n22514\n22515\n22516\n22517\n22518\n22519\n22520\n22521\n22522\n22523\n22524\n22525\n22526\n22527\n22528\n22529\n22530\n22531\n22532\n22533\n22534\n22535\n22536\n22537\n22538\n22539\n22540\n22541\n22542\n22543\n22544\n22545\n22546\n22547\n22548\n22549\n22550\n22551\n22552\n22553\n22554\n22555\n22556\n22557\n22558\n22559\n22560\n22561\n22562\n22563\n22564\n22565\n22566\n22567\n22568\n22569\n22570\n22571\n22572\n22573\n22574\n22575\n22576\n22577\n22578\n22579\n22580\n22581\n22582\n22583\n22584\n22585\n22586\n22587\n22588\n22589\n22590\n22591\n22592\n22593\n22594\n22595\n22596\n22597\n22598\n22599\n22600\n22601\n22602\n22603\n22604\n22605\n22606\n22607\n22608\n22609\n22610\n22611\n22612\n22613\n22614\n22615\n22616\n22617\n22618\n22619\n22620\n22621\n22622\n22623\n22624\n22625\n22626\n22627\n22628\n22629\n22630\n22631\n22632\n22633\n22634\n22635\n22636\n22637\n22638\n22639\n22640\n22641\n22642\n22643\n22644\n22645\n22646\n22647\n22648\n22649\n22650\n22651\n22652\n22653\n22654\n22655\n22656\n22657\n22658\n22659\n22660\n22661\n22662\n22663\n22664\n22665\n22666\n22667\n22668\n22669\n22670\n22671\n22672\n22673\n22674\n22675\n22676\n22677\n22678\n22679\n22680\n22681\n22682\n22683\n22684\n22685\n22686\n22687\n22688\n22689\n22690\n22691\n22692\n22693\n22694\n22695\n22696\n22697\n22698\n22699\n22700\n22701\n22702\n22703\n22704\n22705\n22706\n22707\n22708\n22709\n22710\n22711\n22712\n22713\n22714\n22715\n22716\n22717\n22718\n22719\n22720\n22721\n22722\n22723\n22724\n22725\n22726\n22727\n22728\n22729\n22730\n22731\n22732\n22733\n22734\n22735\n22736\n22737\n22738\n22739\n22740\n22741\n22742\n22743\n22744\n22745\n22746\n22747\n22748\n22749\n22750\n22751\n22752\n22753\n22754\n22755\n22756\n22757\n22758\n22759\n22760\n22761\n22762\n22763\n22764\n22765\n22766\n22767\n22768\n22769\n22770\n22771\n22772\n22773\n22774\n22775\n22776\n22777\n22778\n22779\n22780\n22781\n22782\n22783\n22784\n22785\n22786\n22787\n22788\n22789\n22790\n22791\n22792\n22793\n22794\n22795\n22796\n22797\n22798\n22799\n22800\n22801\n22802\n22803\n22804\n22805\n22806\n22807\n22808\n22809\n22810\n22811\n22812\n22813\n22814\n22815\n22816\n22817\n22818\n22819\n22820\n22821\n22822\n22823\n22824\n22825\n22826\n22827\n22828\n22829\n22830\n22831\n22832\n22833\n22834\n22835\n22836\n22837\n22838\n22839\n22840\n22841\n22842\n22843\n22844\n22845\n22846\n22847\n22848\n22849\n22850\n22851\n22852\n22853\n22854\n22855\n22856\n22857\n22858\n22859\n22860\n22861\n22862\n22863\n22864\n22865\n22866\n22867\n22868\n22869\n22870\n22871\n22872\n22873\n22874\n22875\n22876\n22877\n22878\n22879\n22880\n22881\n22882\n22883\n22884\n22885\n22886\n22887\n22888\n22889\n22890\n22891\n22892\n22893\n22894\n22895\n22896\n22897\n22898\n22899\n22900\n22901\n22902\n22903\n22904\n22905\n22906\n22907\n22908\n22909\n22910\n22911\n22912\n22913\n22914\n22915\n22916\n22917\n22918\n22919\n22920\n22921\n22922\n22923\n22924\n22925\n22926\n22927\n22928\n22929\n22930\n22931\n22932\n22933\n22934\n22935\n22936\n22937\n22938\n22939\n22940\n22941\n22942\n22943\n22944\n22945\n22946\n22947\n22948\n22949\n22950\n22951\n22952\n22953\n22954\n22955\n22956\n22957\n22958\n22959\n22960\n22961\n22962\n22963\n22964\n22965\n22966\n22967\n22968\n22969\n22970\n22971\n22972\n22973\n22974\n22975\n22976\n22977\n22978\n22979\n22980\n22981\n22982\n22983\n22984\n22985\n22986\n22987\n22988\n22989\n22990\n22991\n22992\n22993\n22994\n22995\n22996\n22997\n22998\n22999\n23000\n23001\n23002\n23003\n23004\n23005\n23006\n23007\n23008\n23009\n23010\n23011\n23012\n23013\n23014\n23015\n23016\n23017\n23018\n23019\n23020\n23021\n23022\n23023\n23024\n23025\n23026\n23027\n23028\n23029\n23030\n23031\n23032\n23033\n23034\n23035\n23036\n23037\n23038\n23039\n23040\n23041\n23042\n23043\n23044\n23045\n23046\n23047\n23048\n23049\n23050\n23051\n23052\n23053\n23054\n23055\n23056\n23057\n23058\n23059\n23060\n23061\n23062\n23063\n23064\n23065\n23066\n23067\n23068\n23069\n23070\n23071\n23072\n23073\n23074\n23075\n23076\n23077\n23078\n23079\n23080\n23081\n23082\n23083\n23084\n23085\n23086\n23087\n23088\n23089\n23090\n23091\n23092\n23093\n23094\n23095\n23096\n23097\n23098\n23099\n23100\n23101\n23102\n23103\n23104\n23105\n23106\n23107\n23108\n23109\n23110\n23111\n23112\n23113\n23114\n23115\n23116\n23117\n23118\n23119\n23120\n23121\n23122\n23123\n23124\n23125\n23126\n23127\n23128\n23129\n23130\n23131\n23132\n23133\n23134\n23135\n23136\n23137\n23138\n23139\n23140\n23141\n23142\n23143\n23144\n23145\n23146\n23147\n23148\n23149\n23150\n23151\n23152\n23153\n23154\n23155\n23156\n23157\n23158\n23159\n23160\n23161\n23162\n23163\n23164\n23165\n23166\n23167\n23168\n23169\n23170\n23171\n23172\n23173\n23174\n23175\n23176\n23177\n23178\n23179\n23180\n23181\n23182\n23183\n23184\n23185\n23186\n23187\n23188\n23189\n23190\n23191\n23192\n23193\n23194\n23195\n23196\n23197\n23198\n23199\n23200\n23201\n23202\n23203\n23204\n23205\n23206\n23207\n23208\n23209\n23210\n23211\n23212\n23213\n23214\n23215\n23216\n23217\n23218\n23219\n23220\n23221\n23222\n23223\n23224\n23225\n23226\n23227\n23228\n23229\n23230\n23231\n23232\n23233\n23234\n23235\n23236\n23237\n23238\n23239\n23240\n23241\n23242\n23243\n23244\n23245\n23246\n23247\n23248\n23249\n23250\n23251\n23252\n23253\n23254\n23255\n23256\n23257\n23258\n23259\n23260\n23261\n23262\n23263\n23264\n23265\n23266\n23267\n23268\n23269\n23270\n23271\n23272\n23273\n23274\n23275\n23276\n23277\n23278\n23279\n23280\n23281\n23282\n23283\n23284\n23285\n23286\n23287\n23288\n23289\n23290\n23291\n23292\n23293\n23294\n23295\n23296\n23297\n23298\n23299\n23300\n23301\n23302\n23303\n23304\n23305\n23306\n23307\n23308\n23309\n23310\n23311\n23312\n23313\n23314\n23315\n23316\n23317\n23318\n23319\n23320\n23321\n23322\n23323\n23324\n23325\n23326\n23327\n23328\n23329\n23330\n23331\n23332\n23333\n23334\n23335\n23336\n23337\n23338\n23339\n23340\n23341\n23342\n23343\n23344\n23345\n23346\n23347\n23348\n23349\n23350\n23351\n23352\n23353\n23354\n23355\n23356\n23357\n23358\n23359\n23360\n23361\n23362\n23363\n23364\n23365\n23366\n23367\n23368\n23369\n23370\n23371\n23372\n23373\n23374\n23375\n23376\n23377\n23378\n23379\n23380\n23381\n23382\n23383\n23384\n23385\n23386\n23387\n23388\n23389\n23390\n23391\n23392\n23393\n23394\n23395\n23396\n23397\n23398\n23399\n23400\n23401\n23402\n23403\n23404\n23405\n23406\n23407\n23408\n23409\n23410\n23411\n23412\n23413\n23414\n23415\n23416\n23417\n23418\n23419\n23420\n23421\n23422\n23423\n23424\n23425\n23426\n23427\n23428\n23429\n23430\n23431\n23432\n23433\n23434\n23435\n23436\n23437\n23438\n23439\n23440\n23441\n23442\n23443\n23444\n23445\n23446\n23447\n23448\n23449\n23450\n23451\n23452\n23453\n23454\n23455\n23456\n23457\n23458\n23459\n23460\n23461\n23462\n23463\n23464\n23465\n23466\n23467\n23468\n23469\n23470\n23471\n23472\n23473\n23474\n23475\n23476\n23477\n23478\n23479\n23480\n23481\n23482\n23483\n23484\n23485\n23486\n23487\n23488\n23489\n23490\n23491\n23492\n23493\n23494\n23495\n23496\n23497\n23498\n23499\n23500\n23501\n23502\n23503\n23504\n23505\n23506\n23507\n23508\n23509\n23510\n23511\n23512\n23513\n23514\n23515\n23516\n23517\n23518\n23519\n23520\n23521\n23522\n23523\n23524\n23525\n23526\n23527\n23528\n23529\n23530\n23531\n23532\n23533\n23534\n23535\n23536\n23537\n23538\n23539\n23540\n23541\n23542\n23543\n23544\n23545\n23546\n23547\n23548\n23549\n23550\n23551\n23552\n23553\n23554\n23555\n23556\n23557\n23558\n23559\n23560\n23561\n23562\n23563\n23564\n23565\n23566\n23567\n23568\n23569\n23570\n23571\n23572\n23573\n23574\n23575\n23576\n23577\n23578\n23579\n23580\n23581\n23582\n23583\n23584\n23585\n23586\n23587\n23588\n23589\n23590\n23591\n23592\n23593\n23594\n23595\n23596\n23597\n23598\n23599\n23600\n23601\n23602\n23603\n23604\n23605\n23606\n23607\n23608\n23609\n23610\n23611\n23612\n23613\n23614\n23615\n23616\n23617\n23618\n23619\n23620\n23621\n23622\n23623\n23624\n23625\n23626\n23627\n23628\n23629\n23630\n23631\n23632\n23633\n23634\n23635\n23636\n23637\n23638\n23639\n23640\n23641\n23642\n23643\n23644\n23645\n23646\n23647\n23648\n23649\n23650\n23651\n23652\n23653\n23654\n23655\n23656\n23657\n23658\n23659\n23660\n23661\n23662\n23663\n23664\n23665\n23666\n23667\n23668\n23669\n23670\n23671\n23672\n23673\n23674\n23675\n23676\n23677\n23678\n23679\n23680\n23681\n23682\n23683\n23684\n23685\n23686\n23687\n23688\n23689\n23690\n23691\n23692\n23693\n23694\n23695\n23696\n23697\n23698\n23699\n23700\n23701\n23702\n23703\n23704\n23705\n23706\n23707\n23708\n23709\n23710\n23711\n23712\n23713\n23714\n23715\n23716\n23717\n23718\n23719\n23720\n23721\n23722\n23723\n23724\n23725\n23726\n23727\n23728\n23729\n23730\n23731\n23732\n23733\n23734\n23735\n23736\n23737\n23738\n23739\n23740\n23741\n23742\n23743\n23744\n23745\n23746\n23747\n23748\n23749\n23750\n23751\n23752\n23753\n23754\n23755\n23756\n23757\n23758\n23759\n23760\n23761\n23762\n23763\n23764\n23765\n23766\n23767\n23768\n23769\n23770\n23771\n23772\n23773\n23774\n23775\n23776\n23777\n23778\n23779\n23780\n23781\n23782\n23783\n23784\n23785\n23786\n23787\n23788\n23789\n23790\n23791\n23792\n23793\n23794\n23795\n23796\n23797\n23798\n23799\n23800\n23801\n23802\n23803\n23804\n23805\n23806\n23807\n23808\n23809\n23810\n23811\n23812\n23813\n23814\n23815\n23816\n23817\n23818\n23819\n23820\n23821\n23822\n23823\n23824\n23825\n23826\n23827\n23828\n23829\n23830\n23831\n23832\n23833\n23834\n23835\n23836\n23837\n23838\n23839\n23840\n23841\n23842\n23843\n23844\n23845\n23846\n23847\n23848\n23849\n23850\n23851\n23852\n23853\n23854\n23855\n23856\n23857\n23858\n23859\n23860\n23861\n23862\n23863\n23864\n23865\n23866\n23867\n23868\n23869\n23870\n23871\n23872\n23873\n23874\n23875\n23876\n23877\n23878\n23879\n23880\n23881\n23882\n23883\n23884\n23885\n23886\n23887\n23888\n23889\n23890\n23891\n23892\n23893\n23894\n23895\n23896\n23897\n23898\n23899\n23900\n23901\n23902\n23903\n23904\n23905\n23906\n23907\n23908\n23909\n23910\n23911\n23912\n23913\n23914\n23915\n23916\n23917\n23918\n23919\n23920\n23921\n23922\n23923\n23924\n23925\n23926\n23927\n23928\n23929\n23930\n23931\n23932\n23933\n23934\n23935\n23936\n23937\n23938\n23939\n23940\n23941\n23942\n23943\n23944\n23945\n23946\n23947\n23948\n23949\n23950\n23951\n23952\n23953\n23954\n23955\n23956\n23957\n23958\n23959\n23960\n23961\n23962\n23963\n23964\n23965\n23966\n23967\n23968\n23969\n23970\n23971\n23972\n23973\n23974\n23975\n23976\n23977\n23978\n23979\n23980\n23981\n23982\n23983\n23984\n23985\n23986\n23987\n23988\n23989\n23990\n23991\n23992\n23993\n23994\n23995\n23996\n23997\n23998\n23999\n24000\n24001\n24002\n24003\n24004\n24005\n24006\n24007\n24008\n24009\n24010\n24011\n24012\n24013\n24014\n24015\n24016\n24017\n24018\n24019\n24020\n24021\n24022\n24023\n24024\n24025\n24026\n24027\n24028\n24029\n24030\n24031\n24032\n24033\n24034\n24035\n24036\n24037\n24038\n24039\n24040\n24041\n24042\n24043\n24044\n24045\n24046\n24047\n24048\n24049\n24050\n24051\n24052\n24053\n24054\n24055\n24056\n24057\n24058\n24059\n24060\n24061\n24062\n24063\n24064\n24065\n24066\n24067\n24068\n24069\n24070\n24071\n24072\n24073\n24074\n24075\n24076\n24077\n24078\n24079\n24080\n24081\n24082\n24083\n24084\n24085\n24086\n24087\n24088\n24089\n24090\n24091\n24092\n24093\n24094\n24095\n24096\n24097\n24098\n24099\n24100\n24101\n24102\n24103\n24104\n24105\n24106\n24107\n24108\n24109\n24110\n24111\n24112\n24113\n24114\n24115\n24116\n24117\n24118\n24119\n24120\n24121\n24122\n24123\n24124\n24125\n24126\n24127\n24128\n24129\n24130\n24131\n24132\n24133\n24134\n24135\n24136\n24137\n24138\n24139\n24140\n24141\n24142\n24143\n24144\n24145\n24146\n24147\n24148\n24149\n24150\n24151\n24152\n24153\n24154\n24155\n24156\n24157\n24158\n24159\n24160\n24161\n24162\n24163\n24164\n24165\n24166\n24167\n24168\n24169\n24170\n24171\n24172\n24173\n24174\n24175\n24176\n24177\n24178\n24179\n24180\n24181\n24182\n24183\n24184\n24185\n24186\n24187\n24188\n24189\n24190\n24191\n24192\n24193\n24194\n24195\n24196\n24197\n24198\n24199\n24200\n24201\n24202\n24203\n24204\n24205\n24206\n24207\n24208\n24209\n24210\n24211\n24212\n24213\n24214\n24215\n24216\n24217\n24218\n24219\n24220\n24221\n24222\n24223\n24224\n24225\n24226\n24227\n24228\n24229\n24230\n24231\n24232\n24233\n24234\n24235\n24236\n24237\n24238\n24239\n24240\n24241\n24242\n24243\n24244\n24245\n24246\n24247\n24248\n24249\n24250\n24251\n24252\n24253\n24254\n24255\n24256\n24257\n24258\n24259\n24260\n24261\n24262\n24263\n24264\n24265\n24266\n24267\n24268\n24269\n24270\n24271\n24272\n24273\n24274\n24275\n24276\n24277\n24278\n24279\n24280\n24281\n24282\n24283\n24284\n24285\n24286\n24287\n24288\n24289\n24290\n24291\n24292\n24293\n24294\n24295\n24296\n24297\n24298\n24299\n24300\n24301\n24302\n24303\n24304\n24305\n24306\n24307\n24308\n24309\n24310\n24311\n24312\n24313\n24314\n24315\n24316\n24317\n24318\n24319\n24320\n24321\n24322\n24323\n24324\n24325\n24326\n24327\n24328\n24329\n24330\n24331\n24332\n24333\n24334\n24335\n24336\n24337\n24338\n24339\n24340\n24341\n24342\n24343\n24344\n24345\n24346\n24347\n24348\n24349\n24350\n24351\n24352\n24353\n24354\n24355\n24356\n24357\n24358\n24359\n24360\n24361\n24362\n24363\n24364\n24365\n24366\n24367\n24368\n24369\n24370\n24371\n24372\n24373\n24374\n24375\n24376\n24377\n24378\n24379\n24380\n24381\n24382\n24383\n24384\n24385\n24386\n24387\n24388\n24389\n24390\n24391\n24392\n24393\n24394\n24395\n24396\n24397\n24398\n24399\n24400\n24401\n24402\n24403\n24404\n24405\n24406\n24407\n24408\n24409\n24410\n24411\n24412\n24413\n24414\n24415\n24416\n24417\n24418\n24419\n24420\n24421\n24422\n24423\n24424\n24425\n24426\n24427\n24428\n24429\n24430\n24431\n24432\n24433\n24434\n24435\n24436\n24437\n24438\n24439\n24440\n24441\n24442\n24443\n24444\n24445\n24446\n24447\n24448\n24449\n24450\n24451\n24452\n24453\n24454\n24455\n24456\n24457\n24458\n24459\n24460\n24461\n24462\n24463\n24464\n24465\n24466\n24467\n24468\n24469\n24470\n24471\n24472\n24473\n24474\n24475\n24476\n24477\n24478\n24479\n24480\n24481\n24482\n24483\n24484\n24485\n24486\n24487\n24488\n24489\n24490\n24491\n24492\n24493\n24494\n24495\n24496\n24497\n24498\n24499\n24500\n24501\n24502\n24503\n24504\n24505\n24506\n24507\n24508\n24509\n24510\n24511\n24512\n24513\n24514\n24515\n24516\n24517\n24518\n24519\n24520\n24521\n24522\n24523\n24524\n24525\n24526\n24527\n24528\n24529\n24530\n24531\n24532\n24533\n24534\n24535\n24536\n24537\n24538\n24539\n24540\n24541\n24542\n24543\n24544\n24545\n24546\n24547\n24548\n24549\n24550\n24551\n24552\n24553\n24554\n24555\n24556\n24557\n24558\n24559\n24560\n24561\n24562\n24563\n24564\n24565\n24566\n24567\n24568\n24569\n24570\n24571\n24572\n24573\n24574\n24575\n24576\n24577\n24578\n24579\n24580\n24581\n24582\n24583\n24584\n24585\n24586\n24587\n24588\n24589\n24590\n24591\n24592\n24593\n24594\n24595\n24596\n24597\n24598\n24599\n24600\n24601\n24602\n24603\n24604\n24605\n24606\n24607\n24608\n24609\n24610\n24611\n24612\n24613\n24614\n24615\n24616\n24617\n24618\n24619\n24620\n24621\n24622\n24623\n24624\n24625\n24626\n24627\n24628\n24629\n24630\n24631\n24632\n24633\n24634\n24635\n24636\n24637\n24638\n24639\n24640\n24641\n24642\n24643\n24644\n24645\n24646\n24647\n24648\n24649\n24650\n24651\n24652\n24653\n24654\n24655\n24656\n24657\n24658\n24659\n24660\n24661\n24662\n24663\n24664\n24665\n24666\n24667\n24668\n24669\n24670\n24671\n24672\n24673\n24674\n24675\n24676\n24677\n24678\n24679\n24680\n24681\n24682\n24683\n24684\n24685\n24686\n24687\n24688\n24689\n24690\n24691\n24692\n24693\n24694\n24695\n24696\n24697\n24698\n24699\n24700\n24701\n24702\n24703\n24704\n24705\n24706\n24707\n24708\n24709\n24710\n24711\n24712\n24713\n24714\n24715\n24716\n24717\n24718\n24719\n24720\n24721\n24722\n24723\n24724\n24725\n24726\n24727\n24728\n24729\n24730\n24731\n24732\n24733\n24734\n24735\n24736\n24737\n24738\n24739\n24740\n24741\n24742\n24743\n24744\n24745\n24746\n24747\n24748\n24749\n24750\n24751\n24752\n24753\n24754\n24755\n24756\n24757\n24758\n24759\n24760\n24761\n24762\n24763\n24764\n24765\n24766\n24767\n24768\n24769\n24770\n24771\n24772\n24773\n24774\n24775\n24776\n24777\n24778\n24779\n24780\n24781\n24782\n24783\n24784\n24785\n24786\n24787\n24788\n24789\n24790\n24791\n24792\n24793\n24794\n24795\n24796\n24797\n24798\n24799\n24800\n24801\n24802\n24803\n24804\n24805\n24806\n24807\n24808\n24809\n24810\n24811\n24812\n24813\n24814\n24815\n24816\n24817\n24818\n24819\n24820\n24821\n24822\n24823\n24824\n24825\n24826\n24827\n24828\n24829\n24830\n24831\n24832\n24833\n24834\n24835\n24836\n24837\n24838\n24839\n24840\n24841\n24842\n24843\n24844\n24845\n24846\n24847\n24848\n24849\n24850\n24851\n24852\n24853\n24854\n24855\n24856\n24857\n24858\n24859\n24860\n24861\n24862\n24863\n24864\n24865\n24866\n24867\n24868\n24869\n24870\n24871\n24872\n24873\n24874\n24875\n24876\n24877\n24878\n24879\n24880\n24881\n24882\n24883\n24884\n24885\n24886\n24887\n24888\n24889\n24890\n24891\n24892\n24893\n24894\n24895\n24896\n24897\n24898\n24899\n24900\n24901\n24902\n24903\n24904\n24905\n24906\n24907\n24908\n24909\n24910\n24911\n24912\n24913\n24914\n24915\n24916\n24917\n24918\n24919\n24920\n24921\n24922\n24923\n24924\n24925\n24926\n24927\n24928\n24929\n24930\n24931\n24932\n24933\n24934\n24935\n24936\n24937\n24938\n24939\n24940\n24941\n24942\n24943\n24944\n24945\n24946\n24947\n24948\n24949\n24950\n24951\n24952\n24953\n24954\n24955\n24956\n24957\n24958\n24959\n24960\n24961\n24962\n24963\n24964\n24965\n24966\n24967\n24968\n24969\n24970\n24971\n24972\n24973\n24974\n24975\n24976\n24977\n24978\n24979\n24980\n24981\n24982\n24983\n24984\n24985\n24986\n24987\n24988\n24989\n24990\n24991\n24992\n24993\n24994\n24995\n24996\n24997\n24998\n24999\n25000\n25001\n25002\n25003\n25004\n25005\n25006\n25007\n25008\n25009\n25010\n25011\n25012\n25013\n25014\n25015\n25016\n25017\n25018\n25019\n25020\n25021\n25022\n25023\n25024\n25025\n25026\n25027\n25028\n25029\n25030\n25031\n25032\n25033\n25034\n25035\n25036\n25037\n25038\n25039\n25040\n25041\n25042\n25043\n25044\n25045\n25046\n25047\n25048\n25049\n25050\n25051\n25052\n25053\n25054\n25055\n25056\n25057\n25058\n25059\n25060\n25061\n25062\n25063\n25064\n25065\n25066\n25067\n25068\n25069\n25070\n25071\n25072\n25073\n25074\n25075\n25076\n25077\n25078\n25079\n25080\n25081\n25082\n25083\n25084\n25085\n25086\n25087\n25088\n25089\n25090\n25091\n25092\n25093\n25094\n25095\n25096\n25097\n25098\n25099\n25100\n25101\n25102\n25103\n25104\n25105\n25106\n25107\n25108\n25109\n25110\n25111\n25112\n25113\n25114\n25115\n25116\n25117\n25118\n25119\n25120\n25121\n25122\n25123\n25124\n25125\n25126\n25127\n25128\n25129\n25130\n25131\n25132\n25133\n25134\n25135\n25136\n25137\n25138\n25139\n25140\n25141\n25142\n25143\n25144\n25145\n25146\n25147\n25148\n25149\n25150\n25151\n25152\n25153\n25154\n25155\n25156\n25157\n25158\n25159\n25160\n25161\n25162\n25163\n25164\n25165\n25166\n25167\n25168\n25169\n25170\n25171\n25172\n25173\n25174\n25175\n25176\n25177\n25178\n25179\n25180\n25181\n25182\n25183\n25184\n25185\n25186\n25187\n25188\n25189\n25190\n25191\n25192\n25193\n25194\n25195\n25196\n25197\n25198\n25199\n25200\n25201\n25202\n25203\n25204\n25205\n25206\n25207\n25208\n25209\n25210\n25211\n25212\n25213\n25214\n25215\n25216\n25217\n25218\n25219\n25220\n25221\n25222\n25223\n25224\n25225\n25226\n25227\n25228\n25229\n25230\n25231\n25232\n25233\n25234\n25235\n25236\n25237\n25238\n25239\n25240\n25241\n25242\n25243\n25244\n25245\n25246\n25247\n25248\n25249\n25250\n25251\n25252\n25253\n25254\n25255\n25256\n25257\n25258\n25259\n25260\n25261\n25262\n25263\n25264\n25265\n25266\n25267\n25268\n25269\n25270\n25271\n25272\n25273\n25274\n25275\n25276\n25277\n25278\n25279\n25280\n25281\n25282\n25283\n25284\n25285\n25286\n25287\n25288\n25289\n25290\n25291\n25292\n25293\n25294\n25295\n25296\n25297\n25298\n25299\n25300\n25301\n25302\n25303\n25304\n25305\n25306\n25307\n25308\n25309\n25310\n25311\n25312\n25313\n25314\n25315\n25316\n25317\n25318\n25319\n25320\n25321\n25322\n25323\n25324\n25325\n25326\n25327\n25328\n25329\n25330\n25331\n25332\n25333\n25334\n25335\n25336\n25337\n25338\n25339\n25340\n25341\n25342\n25343\n25344\n25345\n25346\n25347\n25348\n25349\n25350\n25351\n25352\n25353\n25354\n25355\n25356\n25357\n25358\n25359\n25360\n25361\n25362\n25363\n25364\n25365\n25366\n25367\n25368\n25369\n25370\n25371\n25372\n25373\n25374\n25375\n25376\n25377\n25378\n25379\n25380\n25381\n25382\n25383\n25384\n25385\n25386\n25387\n25388\n25389\n25390\n25391\n25392\n25393\n25394\n25395\n25396\n25397\n25398\n25399\n25400\n25401\n25402\n25403\n25404\n25405\n25406\n25407\n25408\n25409\n25410\n25411\n25412\n25413\n25414\n25415\n25416\n25417\n25418\n25419\n25420\n25421\n25422\n25423\n25424\n25425\n25426\n25427\n25428\n25429\n25430\n25431\n25432\n25433\n25434\n25435\n25436\n25437\n25438\n25439\n25440\n25441\n25442\n25443\n25444\n25445\n25446\n25447\n25448\n25449\n25450\n25451\n25452\n25453\n25454\n25455\n25456\n25457\n25458\n25459\n25460\n25461\n25462\n25463\n25464\n25465\n25466\n25467\n25468\n25469\n25470\n25471\n25472\n25473\n25474\n25475\n25476\n25477\n25478\n25479\n25480\n25481\n25482\n25483\n25484\n25485\n25486\n25487\n25488\n25489\n25490\n25491\n25492\n25493\n25494\n25495\n25496\n25497\n25498\n25499\n25500\n25501\n25502\n25503\n25504\n25505\n25506\n25507\n25508\n25509\n25510\n25511\n25512\n25513\n25514\n25515\n25516\n25517\n25518\n25519\n25520\n25521\n25522\n25523\n25524\n25525\n25526\n25527\n25528\n25529\n25530\n25531\n25532\n25533\n25534\n25535\n25536\n25537\n25538\n25539\n25540\n25541\n25542\n25543\n25544\n25545\n25546\n25547\n25548\n25549\n25550\n25551\n25552\n25553\n25554\n25555\n25556\n25557\n25558\n25559\n25560\n25561\n25562\n25563\n25564\n25565\n25566\n25567\n25568\n25569\n25570\n25571\n25572\n25573\n25574\n25575\n25576\n25577\n25578\n25579\n25580\n25581\n25582\n25583\n25584\n25585\n25586\n25587\n25588\n25589\n25590\n25591\n25592\n25593\n25594\n25595\n25596\n25597\n25598\n25599\n25600\n25601\n25602\n25603\n25604\n25605\n25606\n25607\n25608\n25609\n25610\n25611\n25612\n25613\n25614\n25615\n25616\n25617\n25618\n25619\n25620\n25621\n25622\n25623\n25624\n25625\n25626\n25627\n25628\n25629\n25630\n25631\n25632\n25633\n25634\n25635\n25636\n25637\n25638\n25639\n25640\n25641\n25642\n25643\n25644\n25645\n25646\n25647\n25648\n25649\n25650\n25651\n25652\n25653\n25654\n25655\n25656\n25657\n25658\n25659\n25660\n25661\n25662\n25663\n25664\n25665\n25666\n25667\n25668\n25669\n25670\n25671\n25672\n25673\n25674\n25675\n25676\n25677\n25678\n25679\n25680\n25681\n25682\n25683\n25684\n25685\n25686\n25687\n25688\n25689\n25690\n25691\n25692\n25693\n25694\n25695\n25696\n25697\n25698\n25699\n25700\n25701\n25702\n25703\n25704\n25705\n25706\n25707\n25708\n25709\n25710\n25711\n25712\n25713\n25714\n25715\n25716\n25717\n25718\n25719\n25720\n25721\n25722\n25723\n25724\n25725\n25726\n25727\n25728\n25729\n25730\n25731\n25732\n25733\n25734\n25735\n25736\n25737\n25738\n25739\n25740\n25741\n25742\n25743\n25744\n25745\n25746\n25747\n25748\n25749\n25750\n25751\n25752\n25753\n25754\n25755\n25756\n25757\n25758\n25759\n25760\n25761\n25762\n25763\n25764\n25765\n25766\n25767\n25768\n25769\n25770\n25771\n25772\n25773\n25774\n25775\n25776\n25777\n25778\n25779\n25780\n25781\n25782\n25783\n25784\n25785\n25786\n25787\n25788\n25789\n25790\n25791\n25792\n25793\n25794\n25795\n25796\n25797\n25798\n25799\n25800\n25801\n25802\n25803\n25804\n25805\n25806\n25807\n25808\n25809\n25810\n25811\n25812\n25813\n25814\n25815\n25816\n25817\n25818\n25819\n25820\n25821\n25822\n25823\n25824\n25825\n25826\n25827\n25828\n25829\n25830\n25831\n25832\n25833\n25834\n25835\n25836\n25837\n25838\n25839\n25840\n25841\n25842\n25843\n25844\n25845\n25846\n25847\n25848\n25849\n25850\n25851\n25852\n25853\n25854\n25855\n25856\n25857\n25858\n25859\n25860\n25861\n25862\n25863\n25864\n25865\n25866\n25867\n25868\n25869\n25870\n25871\n25872\n25873\n25874\n25875\n25876\n25877\n25878\n25879\n25880\n25881\n25882\n25883\n25884\n25885\n25886\n25887\n25888\n25889\n25890\n25891\n25892\n25893\n25894\n25895\n25896\n25897\n25898\n25899\n25900\n25901\n25902\n25903\n25904\n25905\n25906\n25907\n25908\n25909\n25910\n25911\n25912\n25913\n25914\n25915\n25916\n25917\n25918\n25919\n25920\n25921\n25922\n25923\n25924\n25925\n25926\n25927\n25928\n25929\n25930\n25931\n25932\n25933\n25934\n25935\n25936\n25937\n25938\n25939\n25940\n25941\n25942\n25943\n25944\n25945\n25946\n25947\n25948\n25949\n25950\n25951\n25952\n25953\n25954\n25955\n25956\n25957\n25958\n25959\n25960\n25961\n25962\n25963\n25964\n25965\n25966\n25967\n25968\n25969\n25970\n25971\n25972\n25973\n25974\n25975\n25976\n25977\n25978\n25979\n25980\n25981\n25982\n25983\n25984\n25985\n25986\n25987\n25988\n25989\n25990\n25991\n25992\n25993\n25994\n25995\n25996\n25997\n25998\n25999\n26000\n26001\n26002\n26003\n26004\n26005\n26006\n26007\n26008\n26009\n26010\n26011\n26012\n26013\n26014\n26015\n26016\n26017\n26018\n26019\n26020\n26021\n26022\n26023\n26024\n26025\n26026\n26027\n26028\n26029\n26030\n26031\n26032\n26033\n26034\n26035\n26036\n26037\n26038\n26039\n26040\n26041\n26042\n26043\n26044\n26045\n26046\n26047\n26048\n26049\n26050\n26051\n26052\n26053\n26054\n26055\n26056\n26057\n26058\n26059\n26060\n26061\n26062\n26063\n26064\n26065\n26066\n26067\n26068\n26069\n26070\n26071\n26072\n26073\n26074\n26075\n26076\n26077\n26078\n26079\n26080\n26081\n26082\n26083\n26084\n26085\n26086\n26087\n26088\n26089\n26090\n26091\n26092\n26093\n26094\n26095\n26096\n26097\n26098\n26099\n26100\n26101\n26102\n26103\n26104\n26105\n26106\n26107\n26108\n26109\n26110\n26111\n26112\n26113\n26114\n26115\n26116\n26117\n26118\n26119\n26120\n26121\n26122\n26123\n26124\n26125\n26126\n26127\n26128\n26129\n26130\n26131\n26132\n26133\n26134\n26135\n26136\n26137\n26138\n26139\n26140\n26141\n26142\n26143\n26144\n26145\n26146\n26147\n26148\n26149\n26150\n26151\n26152\n26153\n26154\n26155\n26156\n26157\n26158\n26159\n26160\n26161\n26162\n26163\n26164\n26165\n26166\n26167\n26168\n26169\n26170\n26171\n26172\n26173\n26174\n26175\n26176\n26177\n26178\n26179\n26180\n26181\n26182\n26183\n26184\n26185\n26186\n26187\n26188\n26189\n26190\n26191\n26192\n26193\n26194\n26195\n26196\n26197\n26198\n26199\n26200\n26201\n26202\n26203\n26204\n26205\n26206\n26207\n26208\n26209\n26210\n26211\n26212\n26213\n26214\n26215\n26216\n26217\n26218\n26219\n26220\n26221\n26222\n26223\n26224\n26225\n26226\n26227\n26228\n26229\n26230\n26231\n26232\n26233\n26234\n26235\n26236\n26237\n26238\n26239\n26240\n26241\n26242\n26243\n26244\n26245\n26246\n26247\n26248\n26249\n26250\n26251\n26252\n26253\n26254\n26255\n26256\n26257\n26258\n26259\n26260\n26261\n26262\n26263\n26264\n26265\n26266\n26267\n26268\n26269\n26270\n26271\n26272\n26273\n26274\n26275\n26276\n26277\n26278\n26279\n26280\n26281\n26282\n26283\n26284\n26285\n26286\n26287\n26288\n26289\n26290\n26291\n26292\n26293\n26294\n26295\n26296\n26297\n26298\n26299\n26300\n26301\n26302\n26303\n26304\n26305\n26306\n26307\n26308\n26309\n26310\n26311\n26312\n26313\n26314\n26315\n26316\n26317\n26318\n26319\n26320\n26321\n26322\n26323\n26324\n26325\n26326\n26327\n26328\n26329\n26330\n26331\n26332\n26333\n26334\n26335\n26336\n26337\n26338\n26339\n26340\n26341\n26342\n26343\n26344\n26345\n26346\n26347\n26348\n26349\n26350\n26351\n26352\n26353\n26354\n26355\n26356\n26357\n26358\n26359\n26360\n26361\n26362\n26363\n26364\n26365\n26366\n26367\n26368\n26369\n26370\n26371\n26372\n26373\n26374\n26375\n26376\n26377\n26378\n26379\n26380\n26381\n26382\n26383\n26384\n26385\n26386\n26387\n26388\n26389\n26390\n26391\n26392\n26393\n26394\n26395\n26396\n26397\n26398\n26399\n26400\n26401\n26402\n26403\n26404\n26405\n26406\n26407\n26408\n26409\n26410\n26411\n26412\n26413\n26414\n26415\n26416\n26417\n26418\n26419\n26420\n26421\n26422\n26423\n26424\n26425\n26426\n26427\n26428\n26429\n26430\n26431\n26432\n26433\n26434\n26435\n26436\n26437\n26438\n26439\n26440\n26441\n26442\n26443\n26444\n26445\n26446\n26447\n26448\n26449\n26450\n26451\n26452\n26453\n26454\n26455\n26456\n26457\n26458\n26459\n26460\n26461\n26462\n26463\n26464\n26465\n26466\n26467\n26468\n26469\n26470\n26471\n26472\n26473\n26474\n26475\n26476\n26477\n26478\n26479\n26480\n26481\n26482\n26483\n26484\n26485\n26486\n26487\n26488\n26489\n26490\n26491\n26492\n26493\n26494\n26495\n26496\n26497\n26498\n26499\n26500\n26501\n26502\n26503\n26504\n26505\n26506\n26507\n26508\n26509\n26510\n26511\n26512\n26513\n26514\n26515\n26516\n26517\n26518\n26519\n26520\n26521\n26522\n26523\n26524\n26525\n26526\n26527\n26528\n26529\n26530\n26531\n26532\n26533\n26534\n26535\n26536\n26537\n26538\n26539\n26540\n26541\n26542\n26543\n26544\n26545\n26546\n26547\n26548\n26549\n26550\n26551\n26552\n26553\n26554\n26555\n26556\n26557\n26558\n26559\n26560\n26561\n26562\n26563\n26564\n26565\n26566\n26567\n26568\n26569\n26570\n26571\n26572\n26573\n26574\n26575\n26576\n26577\n26578\n26579\n26580\n26581\n26582\n26583\n26584\n26585\n26586\n26587\n26588\n26589\n26590\n26591\n26592\n26593\n26594\n26595\n26596\n26597\n26598\n26599\n26600\n26601\n26602\n26603\n26604\n26605\n26606\n26607\n26608\n26609\n26610\n26611\n26612\n26613\n26614\n26615\n26616\n26617\n26618\n26619\n26620\n26621\n26622\n26623\n26624\n26625\n26626\n26627\n26628\n26629\n26630\n26631\n26632\n26633\n26634\n26635\n26636\n26637\n26638\n26639\n26640\n26641\n26642\n26643\n26644\n26645\n26646\n26647\n26648\n26649\n26650\n26651\n26652\n26653\n26654\n26655\n26656\n26657\n26658\n26659\n26660\n26661\n26662\n26663\n26664\n26665\n26666\n26667\n26668\n26669\n26670\n26671\n26672\n26673\n26674\n26675\n26676\n26677\n26678\n26679\n26680\n26681\n26682\n26683\n26684\n26685\n26686\n26687\n26688\n26689\n26690\n26691\n26692\n26693\n26694\n26695\n26696\n26697\n26698\n26699\n26700\n26701\n26702\n26703\n26704\n26705\n26706\n26707\n26708\n26709\n26710\n26711\n26712\n26713\n26714\n26715\n26716\n26717\n26718\n26719\n26720\n26721\n26722\n26723\n26724\n26725\n26726\n26727\n26728\n26729\n26730\n26731\n26732\n26733\n26734\n26735\n26736\n26737\n26738\n26739\n26740\n26741\n26742\n26743\n26744\n26745\n26746\n26747\n26748\n26749\n26750\n26751\n26752\n26753\n26754\n26755\n26756\n26757\n26758\n26759\n26760\n26761\n26762\n26763\n26764\n26765\n26766\n26767\n26768\n26769\n26770\n26771\n26772\n26773\n26774\n26775\n26776\n26777\n26778\n26779\n26780\n26781\n26782\n26783\n26784\n26785\n26786\n26787\n26788\n26789\n26790\n26791\n26792\n26793\n26794\n26795\n26796\n26797\n26798\n26799\n26800\n26801\n26802\n26803\n26804\n26805\n26806\n26807\n26808\n26809\n26810\n26811\n26812\n26813\n26814\n26815\n26816\n26817\n26818\n26819\n26820\n26821\n26822\n26823\n26824\n26825\n26826\n26827\n26828\n26829\n26830\n26831\n26832\n26833\n26834\n26835\n26836\n26837\n26838\n26839\n26840\n26841\n26842\n26843\n26844\n26845\n26846\n26847\n26848\n26849\n26850\n26851\n26852\n26853\n26854\n26855\n26856\n26857\n26858\n26859\n26860\n26861\n26862\n26863\n26864\n26865\n26866\n26867\n26868\n26869\n26870\n26871\n26872\n26873\n26874\n26875\n26876\n26877\n26878\n26879\n26880\n26881\n26882\n26883\n26884\n26885\n26886\n26887\n26888\n26889\n26890\n26891\n26892\n26893\n26894\n26895\n26896\n26897\n26898\n26899\n26900\n26901\n26902\n26903\n26904\n26905\n26906\n26907\n26908\n26909\n26910\n26911\n26912\n26913\n26914\n26915\n26916\n26917\n26918\n26919\n26920\n26921\n26922\n26923\n26924\n26925\n26926\n26927\n26928\n26929\n26930\n26931\n26932\n26933\n26934\n26935\n26936\n26937\n26938\n26939\n26940\n26941\n26942\n26943\n26944\n26945\n26946\n26947\n26948\n26949\n26950\n26951\n26952\n26953\n26954\n26955\n26956\n26957\n26958\n26959\n26960\n26961\n26962\n26963\n26964\n26965\n26966\n26967\n26968\n26969\n26970\n26971\n26972\n26973\n26974\n26975\n26976\n26977\n26978\n26979\n26980\n26981\n26982\n26983\n26984\n26985\n26986\n26987\n26988\n26989\n26990\n26991\n26992\n26993\n26994\n26995\n26996\n26997\n26998\n26999\n27000\n27001\n27002\n27003\n27004\n27005\n27006\n27007\n27008\n27009\n27010\n27011\n27012\n27013\n27014\n27015\n27016\n27017\n27018\n27019\n27020\n27021\n27022\n27023\n27024\n27025\n27026\n27027\n27028\n27029\n27030\n27031\n27032\n27033\n27034\n27035\n27036\n27037\n27038\n27039\n27040\n27041\n27042\n27043\n27044\n27045\n27046\n27047\n27048\n27049\n27050\n27051\n27052\n27053\n27054\n27055\n27056\n27057\n27058\n27059\n27060\n27061\n27062\n27063\n27064\n27065\n27066\n27067\n27068\n27069\n27070\n27071\n27072\n27073\n27074\n27075\n27076\n27077\n27078\n27079\n27080\n27081\n27082\n27083\n27084\n27085\n27086\n27087\n27088\n27089\n27090\n27091\n27092\n27093\n27094\n27095\n27096\n27097\n27098\n27099\n27100\n27101\n27102\n27103\n27104\n27105\n27106\n27107\n27108\n27109\n27110\n27111\n27112\n27113\n27114\n27115\n27116\n27117\n27118\n27119\n27120\n27121\n27122\n27123\n27124\n27125\n27126\n27127\n27128\n27129\n27130\n27131\n27132\n27133\n27134\n27135\n27136\n27137\n27138\n27139\n27140\n27141\n27142\n27143\n27144\n27145\n27146\n27147\n27148\n27149\n27150\n27151\n27152\n27153\n27154\n27155\n27156\n27157\n27158\n27159\n27160\n27161\n27162\n27163\n27164\n27165\n27166\n27167\n27168\n27169\n27170\n27171\n27172\n27173\n27174\n27175\n27176\n27177\n27178\n27179\n27180\n27181\n27182\n27183\n27184\n27185\n27186\n27187\n27188\n27189\n27190\n27191\n27192\n27193\n27194\n27195\n27196\n27197\n27198\n27199\n27200\n27201\n27202\n27203\n27204\n27205\n27206\n27207\n27208\n27209\n27210\n27211\n27212\n27213\n27214\n27215\n27216\n27217\n27218\n27219\n27220\n27221\n27222\n27223\n27224\n27225\n27226\n27227\n27228\n27229\n27230\n27231\n27232\n27233\n27234\n27235\n27236\n27237\n27238\n27239\n27240\n27241\n27242\n27243\n27244\n27245\n27246\n27247\n27248\n27249\n27250\n27251\n27252\n27253\n27254\n27255\n27256\n27257\n27258\n27259\n27260\n27261\n27262\n27263\n27264\n27265\n27266\n27267\n27268\n27269\n27270\n27271\n27272\n27273\n27274\n27275\n27276\n27277\n27278\n27279\n27280\n27281\n27282\n27283\n27284\n27285\n27286\n27287\n27288\n27289\n27290\n27291\n27292\n27293\n27294\n27295\n27296\n27297\n27298\n27299\n27300\n27301\n27302\n27303\n27304\n27305\n27306\n27307\n27308\n27309\n27310\n27311\n27312\n27313\n27314\n27315\n27316\n27317\n27318\n27319\n27320\n27321\n27322\n27323\n27324\n27325\n27326\n27327\n27328\n27329\n27330\n27331\n27332\n27333\n27334\n27335\n27336\n27337\n27338\n27339\n27340\n27341\n27342\n27343\n27344\n27345\n27346\n27347\n27348\n27349\n27350\n27351\n27352\n27353\n27354\n27355\n27356\n27357\n27358\n27359\n27360\n27361\n27362\n27363\n27364\n27365\n27366\n27367\n27368\n27369\n27370\n27371\n27372\n27373\n27374\n27375\n27376\n27377\n27378\n27379\n27380\n27381\n27382\n27383\n27384\n27385\n27386\n27387\n27388\n27389\n27390\n27391\n27392\n27393\n27394\n27395\n27396\n27397\n27398\n27399\n27400\n27401\n27402\n27403\n27404\n27405\n27406\n27407\n27408\n27409\n27410\n27411\n27412\n27413\n27414\n27415\n27416\n27417\n27418\n27419\n27420\n27421\n27422\n27423\n27424\n27425\n27426\n27427\n27428\n27429\n27430\n27431\n27432\n27433\n27434\n27435\n27436\n27437\n27438\n27439\n27440\n27441\n27442\n27443\n27444\n27445\n27446\n27447\n27448\n27449\n27450\n27451\n27452\n27453\n27454\n27455\n27456\n27457\n27458\n27459\n27460\n27461\n27462\n27463\n27464\n27465\n27466\n27467\n27468\n27469\n27470\n27471\n27472\n27473\n27474\n27475\n27476\n27477\n27478\n27479\n27480\n27481\n27482\n27483\n27484\n27485\n27486\n27487\n27488\n27489\n27490\n27491\n27492\n27493\n27494\n27495\n27496\n27497\n27498\n27499\n27500\n27501\n27502\n27503\n27504\n27505\n27506\n27507\n27508\n27509\n27510\n27511\n27512\n27513\n27514\n27515\n27516\n27517\n27518\n27519\n27520\n27521\n27522\n27523\n27524\n27525\n27526\n27527\n27528\n27529\n27530\n27531\n27532\n27533\n27534\n27535\n27536\n27537\n27538\n27539\n27540\n27541\n27542\n27543\n27544\n27545\n27546\n27547\n27548\n27549\n27550\n27551\n27552\n27553\n27554\n27555\n27556\n27557\n27558\n27559\n27560\n27561\n27562\n27563\n27564\n27565\n27566\n27567\n27568\n27569\n27570\n27571\n27572\n27573\n27574\n27575\n27576\n27577\n27578\n27579\n27580\n27581\n27582\n27583\n27584\n27585\n27586\n27587\n27588\n27589\n27590\n27591\n27592\n27593\n27594\n27595\n27596\n27597\n27598\n27599\n27600\n27601\n27602\n27603\n27604\n27605\n27606\n27607\n27608\n27609\n27610\n27611\n27612\n27613\n27614\n27615\n27616\n27617\n27618\n27619\n27620\n27621\n27622\n27623\n27624\n27625\n27626\n27627\n27628\n27629\n27630\n27631\n27632\n27633\n27634\n27635\n27636\n27637\n27638\n27639\n27640\n27641\n27642\n27643\n27644\n27645\n27646\n27647\n27648\n27649\n27650\n27651\n27652\n27653\n27654\n27655\n27656\n27657\n27658\n27659\n27660\n27661\n27662\n27663\n27664\n27665\n27666\n27667\n27668\n27669\n27670\n27671\n27672\n27673\n27674\n27675\n27676\n27677\n27678\n27679\n27680\n27681\n27682\n27683\n27684\n27685\n27686\n27687\n27688\n27689\n27690\n27691\n27692\n27693\n27694\n27695\n27696\n27697\n27698\n27699\n27700\n27701\n27702\n27703\n27704\n27705\n27706\n27707\n27708\n27709\n27710\n27711\n27712\n27713\n27714\n27715\n27716\n27717\n27718\n27719\n27720\n27721\n27722\n27723\n27724\n27725\n27726\n27727\n27728\n27729\n27730\n27731\n27732\n27733\n27734\n27735\n27736\n27737\n27738\n27739\n27740\n27741\n27742\n27743\n27744\n27745\n27746\n27747\n27748\n27749\n27750\n27751\n27752\n27753\n27754\n27755\n27756\n27757\n27758\n27759\n27760\n27761\n27762\n27763\n27764\n27765\n27766\n27767\n27768\n27769\n27770\n27771\n27772\n27773\n27774\n27775\n27776\n27777\n27778\n27779\n27780\n27781\n27782\n27783\n27784\n27785\n27786\n27787\n27788\n27789\n27790\n27791\n27792\n27793\n27794\n27795\n27796\n27797\n27798\n27799\n27800\n27801\n27802\n27803\n27804\n27805\n27806\n27807\n27808\n27809\n27810\n27811\n27812\n27813\n27814\n27815\n27816\n27817\n27818\n27819\n27820\n27821\n27822\n27823\n27824\n27825\n27826\n27827\n27828\n27829\n27830\n27831\n27832\n27833\n27834\n27835\n27836\n27837\n27838\n27839\n27840\n27841\n27842\n27843\n27844\n27845\n27846\n27847\n27848\n27849\n27850\n27851\n27852\n27853\n27854\n27855\n27856\n27857\n27858\n27859\n27860\n27861\n27862\n27863\n27864\n27865\n27866\n27867\n27868\n27869\n27870\n27871\n27872\n27873\n27874\n27875\n27876\n27877\n27878\n27879\n27880\n27881\n27882\n27883\n27884\n27885\n27886\n27887\n27888\n27889\n27890\n27891\n27892\n27893\n27894\n27895\n27896\n27897\n27898\n27899\n27900\n27901\n27902\n27903\n27904\n27905\n27906\n27907\n27908\n27909\n27910\n27911\n27912\n27913\n27914\n27915\n27916\n27917\n27918\n27919\n27920\n27921\n27922\n27923\n27924\n27925\n27926\n27927\n27928\n27929\n27930\n27931\n27932\n27933\n27934\n27935\n27936\n27937\n27938\n27939\n27940\n27941\n27942\n27943\n27944\n27945\n27946\n27947\n27948\n27949\n27950\n27951\n27952\n27953\n27954\n27955\n27956\n27957\n27958\n27959\n27960\n27961\n27962\n27963\n27964\n27965\n27966\n27967\n27968\n27969\n27970\n27971\n27972\n27973\n27974\n27975\n27976\n27977\n27978\n27979\n27980\n27981\n27982\n27983\n27984\n27985\n27986\n27987\n27988\n27989\n27990\n27991\n27992\n27993\n27994\n27995\n27996\n27997\n27998\n27999\n28000\n28001\n28002\n28003\n28004\n28005\n28006\n28007\n28008\n28009\n28010\n28011\n28012\n28013\n28014\n28015\n28016\n28017\n28018\n28019\n28020\n28021\n28022\n28023\n28024\n28025\n28026\n28027\n28028\n28029\n28030\n28031\n28032\n28033\n28034\n28035\n28036\n28037\n28038\n28039\n28040\n28041\n28042\n28043\n28044\n28045\n28046\n28047\n28048\n28049\n28050\n28051\n28052\n28053\n28054\n28055\n28056\n28057\n28058\n28059\n28060\n28061\n28062\n28063\n28064\n28065\n28066\n28067\n28068\n28069\n28070\n28071\n28072\n28073\n28074\n28075\n28076\n28077\n28078\n28079\n28080\n28081\n28082\n28083\n28084\n28085\n28086\n28087\n28088\n28089\n28090\n28091\n28092\n28093\n28094\n28095\n28096\n28097\n28098\n28099\n28100\n28101\n28102\n28103\n28104\n28105\n28106\n28107\n28108\n28109\n28110\n28111\n28112\n28113\n28114\n28115\n28116\n28117\n28118\n28119\n28120\n28121\n28122\n28123\n28124\n28125\n28126\n28127\n28128\n28129\n28130\n28131\n28132\n28133\n28134\n28135\n28136\n28137\n28138\n28139\n28140\n28141\n28142\n28143\n28144\n28145\n28146\n28147\n28148\n28149\n28150\n28151\n28152\n28153\n28154\n28155\n28156\n28157\n28158\n28159\n28160\n28161\n28162\n28163\n28164\n28165\n28166\n28167\n28168\n28169\n28170\n28171\n28172\n28173\n28174\n28175\n28176\n28177\n28178\n28179\n28180\n28181\n28182\n28183\n28184\n28185\n28186\n28187\n28188\n28189\n28190\n28191\n28192\n28193\n28194\n28195\n28196\n28197\n28198\n28199\n28200\n28201\n28202\n28203\n28204\n28205\n28206\n28207\n28208\n28209\n28210\n28211\n28212\n28213\n28214\n28215\n28216\n28217\n28218\n28219\n28220\n28221\n28222\n28223\n28224\n28225\n28226\n28227\n28228\n28229\n28230\n28231\n28232\n28233\n28234\n28235\n28236\n28237\n28238\n28239\n28240\n28241\n28242\n28243\n28244\n28245\n28246\n28247\n28248\n28249\n28250\n28251\n28252\n28253\n28254\n28255\n28256\n28257\n28258\n28259\n28260\n28261\n28262\n28263\n28264\n28265\n28266\n28267\n28268\n28269\n28270\n28271\n28272\n28273\n28274\n28275\n28276\n28277\n28278\n28279\n28280\n28281\n28282\n28283\n28284\n28285\n28286\n28287\n28288\n28289\n28290\n28291\n28292\n28293\n28294\n28295\n28296\n28297\n28298\n28299\n28300\n28301\n28302\n28303\n28304\n28305\n28306\n28307\n28308\n28309\n28310\n28311\n28312\n28313\n28314\n28315\n28316\n28317\n28318\n28319\n28320\n28321\n28322\n28323\n28324\n28325\n28326\n28327\n28328\n28329\n28330\n28331\n28332\n28333\n28334\n28335\n28336\n28337\n28338\n28339\n28340\n28341\n28342\n28343\n28344\n28345\n28346\n28347\n28348\n28349\n28350\n28351\n28352\n28353\n28354\n28355\n28356\n28357\n28358\n28359\n28360\n28361\n28362\n28363\n28364\n28365\n28366\n28367\n28368\n28369\n28370\n28371\n28372\n28373\n28374\n28375\n28376\n28377\n28378\n28379\n28380\n28381\n28382\n28383\n28384\n28385\n28386\n28387\n28388\n28389\n28390\n28391\n28392\n28393\n28394\n28395\n28396\n28397\n28398\n28399\n28400\n28401\n28402\n28403\n28404\n28405\n28406\n28407\n28408\n28409\n28410\n28411\n28412\n28413\n28414\n28415\n28416\n28417\n28418\n28419\n28420\n28421\n28422\n28423\n28424\n28425\n28426\n28427\n28428\n28429\n28430\n28431\n28432\n28433\n28434\n28435\n28436\n28437\n28438\n28439\n28440\n28441\n28442\n28443\n28444\n28445\n28446\n28447\n28448\n28449\n28450\n28451\n28452\n28453\n28454\n28455\n28456\n28457\n28458\n28459\n28460\n28461\n28462\n28463\n28464\n28465\n28466\n28467\n28468\n28469\n28470\n28471\n28472\n28473\n28474\n28475\n28476\n28477\n28478\n28479\n28480\n28481\n28482\n28483\n28484\n28485\n28486\n28487\n28488\n28489\n28490\n28491\n28492\n28493\n28494\n28495\n28496\n28497\n28498\n28499\n28500\n28501\n28502\n28503\n28504\n28505\n28506\n28507\n28508\n28509\n28510\n28511\n28512\n28513\n28514\n28515\n28516\n28517\n28518\n28519\n28520\n28521\n28522\n28523\n28524\n28525\n28526\n28527\n28528\n28529\n28530\n28531\n28532\n28533\n28534\n28535\n28536\n28537\n28538\n28539\n28540\n28541\n28542\n28543\n28544\n28545\n28546\n28547\n28548\n28549\n28550\n28551\n28552\n28553\n28554\n28555\n28556\n28557\n28558\n28559\n28560\n28561\n28562\n28563\n28564\n28565\n28566\n28567\n28568\n28569\n28570\n28571\n28572\n28573\n28574\n28575\n28576\n28577\n28578\n28579\n28580\n28581\n28582\n28583\n28584\n28585\n28586\n28587\n28588\n28589\n28590\n28591\n28592\n28593\n28594\n28595\n28596\n28597\n28598\n28599\n28600\n28601\n28602\n28603\n28604\n28605\n28606\n28607\n28608\n28609\n28610\n28611\n28612\n28613\n28614\n28615\n28616\n28617\n28618\n28619\n28620\n28621\n28622\n28623\n28624\n28625\n28626\n28627\n28628\n28629\n28630\n28631\n28632\n28633\n28634\n28635\n28636\n28637\n28638\n28639\n28640\n28641\n28642\n28643\n28644\n28645\n28646\n28647\n28648\n28649\n28650\n28651\n28652\n28653\n28654\n28655\n28656\n28657\n28658\n28659\n28660\n28661\n28662\n28663\n28664\n28665\n28666\n28667\n28668\n28669\n28670\n28671\n28672\n28673\n28674\n28675\n28676\n28677\n28678\n28679\n28680\n28681\n28682\n28683\n28684\n28685\n28686\n28687\n28688\n28689\n28690\n28691\n28692\n28693\n28694\n28695\n28696\n28697\n28698\n28699\n28700\n28701\n28702\n28703\n28704\n28705\n28706\n28707\n28708\n28709\n28710\n28711\n28712\n28713\n28714\n28715\n28716\n28717\n28718\n28719\n28720\n28721\n28722\n28723\n28724\n28725\n28726\n28727\n28728\n28729\n28730\n28731\n28732\n28733\n28734\n28735\n28736\n28737\n28738\n28739\n28740\n28741\n28742\n28743\n28744\n28745\n28746\n28747\n28748\n28749\n28750\n28751\n28752\n28753\n28754\n28755\n28756\n28757\n28758\n28759\n28760\n28761\n28762\n28763\n28764\n28765\n28766\n28767\n28768\n28769\n28770\n28771\n28772\n28773\n28774\n28775\n28776\n28777\n28778\n28779\n28780\n28781\n28782\n28783\n28784\n28785\n28786\n28787\n28788\n28789\n28790\n28791\n28792\n28793\n28794\n28795\n28796\n28797\n28798\n28799\n28800\n28801\n28802\n28803\n28804\n28805\n28806\n28807\n28808\n28809\n28810\n28811\n28812\n28813\n28814\n28815\n28816\n28817\n28818\n28819\n28820\n28821\n28822\n28823\n28824\n28825\n28826\n28827\n28828\n28829\n28830\n28831\n28832\n28833\n28834\n28835\n28836\n28837\n28838\n28839\n28840\n28841\n28842\n28843\n28844\n28845\n28846\n28847\n28848\n28849\n28850\n28851\n28852\n28853\n28854\n28855\n28856\n28857\n28858\n28859\n28860\n28861\n28862\n28863\n28864\n28865\n28866\n28867\n28868\n28869\n28870\n28871\n28872\n28873\n28874\n28875\n28876\n28877\n28878\n28879\n28880\n28881\n28882\n28883\n28884\n28885\n28886\n28887\n28888\n28889\n28890\n28891\n28892\n28893\n28894\n28895\n28896\n28897\n28898\n28899\n28900\n28901\n28902\n28903\n28904\n28905\n28906\n28907\n28908\n28909\n28910\n28911\n28912\n28913\n28914\n28915\n28916\n28917\n28918\n28919\n28920\n28921\n28922\n28923\n28924\n28925\n28926\n28927\n28928\n28929\n28930\n28931\n28932\n28933\n28934\n28935\n28936\n28937\n28938\n28939\n28940\n28941\n28942\n28943\n28944\n28945\n28946\n28947\n28948\n28949\n28950\n28951\n28952\n28953\n28954\n28955\n28956\n28957\n28958\n28959\n28960\n28961\n28962\n28963\n28964\n28965\n28966\n28967\n28968\n28969\n28970\n28971\n28972\n28973\n28974\n28975\n28976\n28977\n28978\n28979\n28980\n28981\n28982\n28983\n28984\n28985\n28986\n28987\n28988\n28989\n28990\n28991\n28992\n28993\n28994\n28995\n28996\n28997\n28998\n28999\n29000\n29001\n29002\n29003\n29004\n29005\n29006\n29007\n29008\n29009\n29010\n29011\n29012\n29013\n29014\n29015\n29016\n29017\n29018\n29019\n29020\n29021\n29022\n29023\n29024\n29025\n29026\n29027\n29028\n29029\n29030\n29031\n29032\n29033\n29034\n29035\n29036\n29037\n29038\n29039\n29040\n29041\n29042\n29043\n29044\n29045\n29046\n29047\n29048\n29049\n29050\n29051\n29052\n29053\n29054\n29055\n29056\n29057\n29058\n29059\n29060\n29061\n29062\n29063\n29064\n29065\n29066\n29067\n29068\n29069\n29070\n29071\n29072\n29073\n29074\n29075\n29076\n29077\n29078\n29079\n29080\n29081\n29082\n29083\n29084\n29085\n29086\n29087\n29088\n29089\n29090\n29091\n29092\n29093\n29094\n29095\n29096\n29097\n29098\n29099\n29100\n29101\n29102\n29103\n29104\n29105\n29106\n29107\n29108\n29109\n29110\n29111\n29112\n29113\n29114\n29115\n29116\n29117\n29118\n29119\n29120\n29121\n29122\n29123\n29124\n29125\n29126\n29127\n29128\n29129\n29130\n29131\n29132\n29133\n29134\n29135\n29136\n29137\n29138\n29139\n29140\n29141\n29142\n29143\n29144\n29145\n29146\n29147\n29148\n29149\n29150\n29151\n29152\n29153\n29154\n29155\n29156\n29157\n29158\n29159\n29160\n29161\n29162\n29163\n29164\n29165\n29166\n29167\n29168\n29169\n29170\n29171\n29172\n29173\n29174\n29175\n29176\n29177\n29178\n29179\n29180\n29181\n29182\n29183\n29184\n29185\n29186\n29187\n29188\n29189\n29190\n29191\n29192\n29193\n29194\n29195\n29196\n29197\n29198\n29199\n29200\n29201\n29202\n29203\n29204\n29205\n29206\n29207\n29208\n29209\n29210\n29211\n29212\n29213\n29214\n29215\n29216\n29217\n29218\n29219\n29220\n29221\n29222\n29223\n29224\n29225\n29226\n29227\n29228\n29229\n29230\n29231\n29232\n29233\n29234\n29235\n29236\n29237\n29238\n29239\n29240\n29241\n29242\n29243\n29244\n29245\n29246\n29247\n29248\n29249\n29250\n29251\n29252\n29253\n29254\n29255\n29256\n29257\n29258\n29259\n29260\n29261\n29262\n29263\n29264\n29265\n29266\n29267\n29268\n29269\n29270\n29271\n29272\n29273\n29274\n29275\n29276\n29277\n29278\n29279\n29280\n29281\n29282\n29283\n29284\n29285\n29286\n29287\n29288\n29289\n29290\n29291\n29292\n29293\n29294\n29295\n29296\n29297\n29298\n29299\n29300\n29301\n29302\n29303\n29304\n29305\n29306\n29307\n29308\n29309\n29310\n29311\n29312\n29313\n29314\n29315\n29316\n29317\n29318\n29319\n29320\n29321\n29322\n29323\n29324\n29325\n29326\n29327\n29328\n29329\n29330\n29331\n29332\n29333\n29334\n29335\n29336\n29337\n29338\n29339\n29340\n29341\n29342\n29343\n29344\n29345\n29346\n29347\n29348\n29349\n29350\n29351\n29352\n29353\n29354\n29355\n29356\n29357\n29358\n29359\n29360\n29361\n29362\n29363\n29364\n29365\n29366\n29367\n29368\n29369\n29370\n29371\n29372\n29373\n29374\n29375\n29376\n29377\n29378\n29379\n29380\n29381\n29382\n29383\n29384\n29385\n29386\n29387\n29388\n29389\n29390\n29391\n29392\n29393\n29394\n29395\n29396\n29397\n29398\n29399\n29400\n29401\n29402\n29403\n29404\n29405\n29406\n29407\n29408\n29409\n29410\n29411\n29412\n29413\n29414\n29415\n29416\n29417\n29418\n29419\n29420\n29421\n29422\n29423\n29424\n29425\n29426\n29427\n29428\n29429\n29430\n29431\n29432\n29433\n29434\n29435\n29436\n29437\n29438\n29439\n29440\n29441\n29442\n29443\n29444\n29445\n29446\n29447\n29448\n29449\n29450\n29451\n29452\n29453\n29454\n29455\n29456\n29457\n29458\n29459\n29460\n29461\n29462\n29463\n29464\n29465\n29466\n29467\n29468\n29469\n29470\n29471\n29472\n29473\n29474\n29475\n29476\n29477\n29478\n29479\n29480\n29481\n29482\n29483\n29484\n29485\n29486\n29487\n29488\n29489\n29490\n29491\n29492\n29493\n29494\n29495\n29496\n29497\n29498\n29499\n29500\n29501\n29502\n29503\n29504\n29505\n29506\n29507\n29508\n29509\n29510\n29511\n29512\n29513\n29514\n29515\n29516\n29517\n29518\n29519\n29520\n29521\n29522\n29523\n29524\n29525\n29526\n29527\n29528\n29529\n29530\n29531\n29532\n29533\n29534\n29535\n29536\n29537\n29538\n29539\n29540\n29541\n29542\n29543\n29544\n29545\n29546\n29547\n29548\n29549\n29550\n29551\n29552\n29553\n29554\n29555\n29556\n29557\n29558\n29559\n29560\n29561\n29562\n29563\n29564\n29565\n29566\n29567\n29568\n29569\n29570\n29571\n29572\n29573\n29574\n29575\n29576\n29577\n29578\n29579\n29580\n29581\n29582\n29583\n29584\n29585\n29586\n29587\n29588\n29589\n29590\n29591\n29592\n29593\n29594\n29595\n29596\n29597\n29598\n29599\n29600\n29601\n29602\n29603\n29604\n29605\n29606\n29607\n29608\n29609\n29610\n29611\n29612\n29613\n29614\n29615\n29616\n29617\n29618\n29619\n29620\n29621\n29622\n29623\n29624\n29625\n29626\n29627\n29628\n29629\n29630\n29631\n29632\n29633\n29634\n29635\n29636\n29637\n29638\n29639\n29640\n29641\n29642\n29643\n29644\n29645\n29646\n29647\n29648\n29649\n29650\n29651\n29652\n29653\n29654\n29655\n29656\n29657\n29658\n29659\n29660\n29661\n29662\n29663\n29664\n29665\n29666\n29667\n29668\n29669\n29670\n29671\n29672\n29673\n29674\n29675\n29676\n29677\n29678\n29679\n29680\n29681\n29682\n29683\n29684\n29685\n29686\n29687\n29688\n29689\n29690\n29691\n29692\n29693\n29694\n29695\n29696\n29697\n29698\n29699\n29700\n29701\n29702\n29703\n29704\n29705\n29706\n29707\n29708\n29709\n29710\n29711\n29712\n29713\n29714\n29715\n29716\n29717\n29718\n29719\n29720\n29721\n29722\n29723\n29724\n29725\n29726\n29727\n29728\n29729\n29730\n29731\n29732\n29733\n29734\n29735\n29736\n29737\n29738\n29739\n29740\n29741\n29742\n29743\n29744\n29745\n29746\n29747\n29748\n29749\n29750\n29751\n29752\n29753\n29754\n29755\n29756\n29757\n29758\n29759\n29760\n29761\n29762\n29763\n29764\n29765\n29766\n29767\n29768\n29769\n29770\n29771\n29772\n29773\n29774\n29775\n29776\n29777\n29778\n29779\n29780\n29781\n29782\n29783\n29784\n29785\n29786\n29787\n29788\n29789\n29790\n29791\n29792\n29793\n29794\n29795\n29796\n29797\n29798\n29799\n29800\n29801\n29802\n29803\n29804\n29805\n29806\n29807\n29808\n29809\n29810\n29811\n29812\n29813\n29814\n29815\n29816\n29817\n29818\n29819\n29820\n29821\n29822\n29823\n29824\n29825\n29826\n29827\n29828\n29829\n29830\n29831\n29832\n29833\n29834\n29835\n29836\n29837\n29838\n29839\n29840\n29841\n29842\n29843\n29844\n29845\n29846\n29847\n29848\n29849\n29850\n29851\n29852\n29853\n29854\n29855\n29856\n29857\n29858\n29859\n29860\n29861\n29862\n29863\n29864\n29865\n29866\n29867\n29868\n29869\n29870\n29871\n29872\n29873\n29874\n29875\n29876\n29877\n29878\n29879\n29880\n29881\n29882\n29883\n29884\n29885\n29886\n29887\n29888\n29889\n29890\n29891\n29892\n29893\n29894\n29895\n29896\n29897\n29898\n29899\n29900\n29901\n29902\n29903\n29904\n29905\n29906\n29907\n29908\n29909\n29910\n29911\n29912\n29913\n29914\n29915\n29916\n29917\n29918\n29919\n29920\n29921\n29922\n29923\n29924\n29925\n29926\n29927\n29928\n29929\n29930\n29931\n29932\n29933\n29934\n29935\n29936\n29937\n29938\n29939\n29940\n29941\n29942\n29943\n29944\n29945\n29946\n29947\n29948\n29949\n29950\n29951\n29952\n29953\n29954\n29955\n29956\n29957\n29958\n29959\n29960\n29961\n29962\n29963\n29964\n29965\n29966\n29967\n29968\n29969\n29970\n29971\n29972\n29973\n29974\n29975\n29976\n29977\n29978\n29979\n29980\n29981\n29982\n29983\n29984\n29985\n29986\n29987\n29988\n29989\n29990\n29991\n29992\n29993\n29994\n29995\n29996\n29997\n29998\n29999' \ No newline at end of file diff --git a/voice_bridge/scipy/io/arff/tests/data/test11.arff b/voice_bridge/scipy/io/arff/tests/data/test11.arff new file mode 100644 index 0000000000000000000000000000000000000000..fadfaee884e3e91cd59f691afd954a6a6d4042da --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/test11.arff @@ -0,0 +1,11 @@ +@RELATION test11 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class { class0, class1, class2, class3 } +@DATA +0.1, 0.2, 0.3, 0.4,class1 +-0.1, -0.2, -0.3, -0.4,class2 +1, 2, 3, 4,class3 diff --git a/voice_bridge/scipy/io/arff/tests/data/test2.arff b/voice_bridge/scipy/io/arff/tests/data/test2.arff new file mode 100644 index 0000000000000000000000000000000000000000..30f0dbf91b078ef670868d5e7321f956a6a7a506 --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/test2.arff @@ -0,0 +1,15 @@ +@RELATION test2 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 real +@ATTRIBUTE attr2 integer +@ATTRIBUTE attr3 Integer +@ATTRIBUTE attr4 Numeric +@ATTRIBUTE attr5 numeric +@ATTRIBUTE attr6 string +@ATTRIBUTE attr7 STRING +@ATTRIBUTE attr8 {bla} +@ATTRIBUTE attr9 {bla, bla} + +@DATA +0.1, 0.2, 0.3, 0.4,class1 diff --git a/voice_bridge/scipy/io/arff/tests/data/test3.arff b/voice_bridge/scipy/io/arff/tests/data/test3.arff new file mode 100644 index 0000000000000000000000000000000000000000..23da3b30967fcc95d70883f70be9ef6e39d577fa --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/test3.arff @@ -0,0 +1,6 @@ +@RELATION test3 + +@ATTRIBUTE attr0 crap + +@DATA +0.1, 0.2, 0.3, 0.4,class1 diff --git a/voice_bridge/scipy/io/arff/tests/data/test4.arff b/voice_bridge/scipy/io/arff/tests/data/test4.arff new file mode 100644 index 0000000000000000000000000000000000000000..bf5f99ca89375fbd980185fd25711901f23ff844 --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/test4.arff @@ -0,0 +1,11 @@ +@RELATION test5 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class {class0, class1, class2, class3} +@DATA +0.1, 0.2, 0.3, 0.4,class1 +-0.1, -0.2, -0.3, -0.4,class2 +1, 2, 3, 4,class3 diff --git a/voice_bridge/scipy/io/arff/tests/data/test5.arff b/voice_bridge/scipy/io/arff/tests/data/test5.arff new file mode 100644 index 0000000000000000000000000000000000000000..0075daf05e7792e80dcd565e791ce40e4dd49e85 --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/test5.arff @@ -0,0 +1,26 @@ +@RELATION test4 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class {class0, class1, class2, class3} + +@DATA + +% lsdflkjhaksjdhf + +% lsdflkjhaksjdhf + +0.1, 0.2, 0.3, 0.4,class1 +% laksjdhf + +% lsdflkjhaksjdhf +-0.1, -0.2, -0.3, -0.4,class2 + +% lsdflkjhaksjdhf +% lsdflkjhaksjdhf + +% lsdflkjhaksjdhf + +1, 2, 3, 4,class3 diff --git a/voice_bridge/scipy/io/arff/tests/data/test6.arff b/voice_bridge/scipy/io/arff/tests/data/test6.arff new file mode 100644 index 0000000000000000000000000000000000000000..b63280b03aef8e0553a83fbf96692d280a3f86b7 --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/test6.arff @@ -0,0 +1,12 @@ +@RELATION test6 + +@ATTRIBUTE attr0 REAL +@ATTRIBUTE attr1 REAL +@ATTRIBUTE attr2 REAL +@ATTRIBUTE attr3 REAL +@ATTRIBUTE class {C} + +@DATA +0.1, 0.2, 0.3, 0.4,C +-0.1, -0.2, -0.3, -0.4,C +1, 2, 3, 4,C diff --git a/voice_bridge/scipy/io/arff/tests/data/test7.arff b/voice_bridge/scipy/io/arff/tests/data/test7.arff new file mode 100644 index 0000000000000000000000000000000000000000..38ef6c9a7a10afb10caa5913687ea3636ab1d38e --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/test7.arff @@ -0,0 +1,15 @@ +@RELATION test7 + +@ATTRIBUTE attr_year DATE yyyy +@ATTRIBUTE attr_month DATE yyyy-MM +@ATTRIBUTE attr_date DATE yyyy-MM-dd +@ATTRIBUTE attr_datetime_local DATE "yyyy-MM-dd HH:mm" +@ATTRIBUTE attr_datetime_missing DATE "yyyy-MM-dd HH:mm" + +@DATA +1999,1999-01,1999-01-31,"1999-01-31 00:01",? +2004,2004-12,2004-12-01,"2004-12-01 23:59","2004-12-01 23:59" +1817,1817-04,1817-04-28,"1817-04-28 13:00",? +2100,2100-09,2100-09-10,"2100-09-10 12:00",? +2013,2013-11,2013-11-30,"2013-11-30 04:55","2013-11-30 04:55" +1631,1631-10,1631-10-15,"1631-10-15 20:04","1631-10-15 20:04" \ No newline at end of file diff --git a/voice_bridge/scipy/io/arff/tests/data/test8.arff b/voice_bridge/scipy/io/arff/tests/data/test8.arff new file mode 100644 index 0000000000000000000000000000000000000000..776deb4c9e7550eafdb26d16826f5651da37ef12 --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/test8.arff @@ -0,0 +1,12 @@ +@RELATION test8 + +@ATTRIBUTE attr_datetime_utc DATE "yyyy-MM-dd HH:mm Z" +@ATTRIBUTE attr_datetime_full DATE "yy-MM-dd HH:mm:ss z" + +@DATA +"1999-01-31 00:01 UTC","99-01-31 00:01:08 +0430" +"2004-12-01 23:59 UTC","04-12-01 23:59:59 -0800" +"1817-04-28 13:00 UTC","17-04-28 13:00:33 +1000" +"2100-09-10 12:00 UTC","21-09-10 12:00:21 -0300" +"2013-11-30 04:55 UTC","13-11-30 04:55:48 -1100" +"1631-10-15 20:04 UTC","31-10-15 20:04:10 +0000" \ No newline at end of file diff --git a/voice_bridge/scipy/io/arff/tests/data/test9.arff b/voice_bridge/scipy/io/arff/tests/data/test9.arff new file mode 100644 index 0000000000000000000000000000000000000000..b3f97e32a3fd4909a3f9cbf8d5d2e8d250f8dbad --- /dev/null +++ b/voice_bridge/scipy/io/arff/tests/data/test9.arff @@ -0,0 +1,14 @@ +@RELATION test9 + +@ATTRIBUTE attr_date_number RELATIONAL + @ATTRIBUTE attr_date DATE "yyyy-MM-dd" + @ATTRIBUTE attr_number INTEGER +@END attr_date_number + +@DATA +"1999-01-31 1\n1935-11-27 10" +"2004-12-01 2\n1942-08-13 20" +"1817-04-28 3" +"2100-09-10 4\n1957-04-17 40\n1721-01-14 400" +"2013-11-30 5" +"1631-10-15 6" \ No newline at end of file diff --git a/voice_bridge/scipy/io/matlab/mio5_utils.pyd b/voice_bridge/scipy/io/matlab/mio5_utils.pyd new file mode 100644 index 0000000000000000000000000000000000000000..d826ba461711fb9ba17b15820d0d8bb43978113e Binary files /dev/null and b/voice_bridge/scipy/io/matlab/mio5_utils.pyd differ diff --git a/voice_bridge/scipy/io/matlab/mio_utils.pyd b/voice_bridge/scipy/io/matlab/mio_utils.pyd new file mode 100644 index 0000000000000000000000000000000000000000..63448ee2d325c133575e0eb451d03d357908ac34 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/mio_utils.pyd differ diff --git a/voice_bridge/scipy/io/matlab/streams.pyd b/voice_bridge/scipy/io/matlab/streams.pyd new file mode 100644 index 0000000000000000000000000000000000000000..0c31ad90d11b8133c9c4bef07df1438ca7e1c174 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/streams.pyd differ diff --git a/voice_bridge/scipy/io/matlab/tests/afunc.m b/voice_bridge/scipy/io/matlab/tests/afunc.m new file mode 100644 index 0000000000000000000000000000000000000000..5cbf628f1a4abe398185f5ae5aab006f6e48fed5 --- /dev/null +++ b/voice_bridge/scipy/io/matlab/tests/afunc.m @@ -0,0 +1,4 @@ +function [a, b] = afunc(c, d) +% A function +a = c + 1; +b = d + 10; diff --git a/voice_bridge/scipy/io/matlab/tests/data/bad_miuint32.mat b/voice_bridge/scipy/io/matlab/tests/data/bad_miuint32.mat new file mode 100644 index 0000000000000000000000000000000000000000..c9ab357ec85972cf0014752a1e0ccb08ff284af9 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/bad_miuint32.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat b/voice_bridge/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat new file mode 100644 index 0000000000000000000000000000000000000000..a17203fbb2a7628db644b953ac7723b866a2a0a4 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/bad_miutf8_array_name.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/big_endian.mat b/voice_bridge/scipy/io/matlab/tests/data/big_endian.mat new file mode 100644 index 0000000000000000000000000000000000000000..2a0c982c298fba9df96fd5a927a9c08ee12b09df Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/big_endian.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/broken_utf8.mat b/voice_bridge/scipy/io/matlab/tests/data/broken_utf8.mat new file mode 100644 index 0000000000000000000000000000000000000000..4f6323870368cd97a6294e108ffea9067cf5e69b Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/broken_utf8.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat b/voice_bridge/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat new file mode 100644 index 0000000000000000000000000000000000000000..c88cbb6f54b70d4e795de7cf43f7b46ff6d4d5ef Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/corrupted_zlib_checksum.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/corrupted_zlib_data.mat b/voice_bridge/scipy/io/matlab/tests/data/corrupted_zlib_data.mat new file mode 100644 index 0000000000000000000000000000000000000000..45a2ef4e39755ea1f41aab045f18a035af58ea07 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/corrupted_zlib_data.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/japanese_utf8.txt b/voice_bridge/scipy/io/matlab/tests/data/japanese_utf8.txt new file mode 100644 index 0000000000000000000000000000000000000000..1459b6b6ea635b17b5eb04c941e197f98cf04bf1 --- /dev/null +++ b/voice_bridge/scipy/io/matlab/tests/data/japanese_utf8.txt @@ -0,0 +1,5 @@ +Japanese: +すべてγδΊΊι–“γ―γ€η”ŸγΎγ‚ŒγͺγŒγ‚‰γ«γ—γ¦θ‡ͺη”±γ§γ‚γ‚Šγ€ +γ‹γ€γ€ε°ŠεŽ³γ¨ζ¨©εˆ©γ¨ に぀いて平等である。 +δΊΊι–“γ―γ€η†ζ€§γ¨θ‰―εΏƒγ¨γ‚’ζŽˆγ‘γ‚‰γ‚Œγ¦γŠγ‚Šγ€ +δΊ’γ„γ«εŒθƒžγη²Ύη₯žγ‚’γ‚‚γ£γ¦θ‘Œε‹•γ—γͺγ‘γ‚Œγ°γͺらγͺい。 \ No newline at end of file diff --git a/voice_bridge/scipy/io/matlab/tests/data/little_endian.mat b/voice_bridge/scipy/io/matlab/tests/data/little_endian.mat new file mode 100644 index 0000000000000000000000000000000000000000..df6db666dcf2b98d66e04933bd4011f649dcbe30 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/little_endian.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/logical_sparse.mat b/voice_bridge/scipy/io/matlab/tests/data/logical_sparse.mat new file mode 100644 index 0000000000000000000000000000000000000000..a60ad5b605a9dc6b0d85eb0a0e3e655c4955dd34 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/logical_sparse.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/malformed1.mat b/voice_bridge/scipy/io/matlab/tests/data/malformed1.mat new file mode 100644 index 0000000000000000000000000000000000000000..54462e27d663770bc33ef73ed70baae65767719d Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/malformed1.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/miuint32_for_miint32.mat b/voice_bridge/scipy/io/matlab/tests/data/miuint32_for_miint32.mat new file mode 100644 index 0000000000000000000000000000000000000000..fd2c4994578edbf31431902ecfcb601b11f60b0b Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/miuint32_for_miint32.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/miutf8_array_name.mat b/voice_bridge/scipy/io/matlab/tests/data/miutf8_array_name.mat new file mode 100644 index 0000000000000000000000000000000000000000..ccfdaa8adb7879ba852eab9ce55b602e11dad06d Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/miutf8_array_name.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat b/voice_bridge/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat new file mode 100644 index 0000000000000000000000000000000000000000..35dcb715bca4cb7f4b0dca287648ef8ee797cd73 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/nasty_duplicate_fieldnames.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/one_by_zero_char.mat b/voice_bridge/scipy/io/matlab/tests/data/one_by_zero_char.mat new file mode 100644 index 0000000000000000000000000000000000000000..07e7dca456843004dcfd9023a800ea91d309814d Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/one_by_zero_char.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/parabola.mat b/voice_bridge/scipy/io/matlab/tests/data/parabola.mat new file mode 100644 index 0000000000000000000000000000000000000000..66350532a7737c475a3ae6ef1b1d8406543d890e Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/parabola.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/single_empty_string.mat b/voice_bridge/scipy/io/matlab/tests/data/single_empty_string.mat new file mode 100644 index 0000000000000000000000000000000000000000..293f387719e8bdcacb075e0de5737894e5dafed3 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/single_empty_string.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/some_functions.mat b/voice_bridge/scipy/io/matlab/tests/data/some_functions.mat new file mode 100644 index 0000000000000000000000000000000000000000..cc818593b48dd8d29a40a827210b54373e5acf50 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/some_functions.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/sqr.mat b/voice_bridge/scipy/io/matlab/tests/data/sqr.mat new file mode 100644 index 0000000000000000000000000000000000000000..2436d87cc5dfb6d558b841c2367bfe2363bd1b3c Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/sqr.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..453712610bf46501d8dd3667ff72d8033f49d81c Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/test3dmatrix_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..e04d27d30378655ed14634330c7a8ddcd0b98c10 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/test3dmatrix_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..4c0303039826af6f6caa928e505cec10ebb3fa81 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/test3dmatrix_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..232a051c774105176c28c9718c2cd46f1a1ee1af Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/test3dmatrix_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testbool_8_WIN64.mat b/voice_bridge/scipy/io/matlab/tests/data/testbool_8_WIN64.mat new file mode 100644 index 0000000000000000000000000000000000000000..faa30b10bc61ea4889bd9e776c0a1a079e2c2a90 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testbool_8_WIN64.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..512f7d889420a016094a903585f27acaa50bc658 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcell_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..a7633104c1e4f32fe30fd43f389d7559527c8211 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcell_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..2ac1da15873c5edac27758b6f91563d2b8aaace0 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcell_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..fc893f331c985cf17b7ce9b7b8c179eaf2103659 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcell_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..4198a4f2aeb8effcccf94a9c0114539f98124179 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcellnest_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..2c7826eeacdb456e5290cafba343703c7596d191 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcellnest_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..b3b086cc31dce2de1e300a1d018b0bf5661b69f3 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcellnest_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..316f8894c5ecc88468cfa0908c277f730e3163e8 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcellnest_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..36621b25c08f18e4545100c6eaec015123c3bf9f Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcomplex_4.2c_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..32fcd2a93c91eff478a3ab3076e5c78e31f09bf1 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcomplex_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..f3ecd203376c17b09d97a24aceab824dae0f91c1 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcomplex_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..c0c083855f38e62e3a29460b745f198c9c79313d Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcomplex_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..6a187edb1828256362617d3fe24d26cf58e7ca3b Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testcomplex_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..5dbfcf17dd0e01dc0325dd009340291158906e8d Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testdouble_4.2c_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..8e36c0c8ce62d7559b60fde454a96e8eefcbcb92 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testdouble_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..a003b6d866f77a25d3b8b236bc95e343221e3019 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testdouble_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..3106712e1099345b48dc4e4125d5e739c24b5341 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testdouble_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..9097bb08712d5bfccf172b0366573f503136228d Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testdouble_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..e7dec3b81abdae8769e0ae0329948548f4038adf Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testemptycell_5.3_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..a1c93483597f364443158132b31b86693891b02a Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testemptycell_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..f29d4f9327aa906729234a38caa05ebfc50cfc30 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testemptycell_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..8b244044cf3028df9a019a259d8fc533b80f7fb7 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testemptycell_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..adb6c28ee95d1cf8bf3bfeb72295d1a7848020f8 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testfunc_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..6066c1e30f69b76afdb8d251ecefd8cd9e1acde5 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testhdf5_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..3698c8853b46d4a42194002523b57fddfb225908 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testmatrix_4.2c_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..164be1109d977cf7681b1ea00a5df80d5e8f8e71 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testmatrix_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..a8735e9a23558ce86a528ceafa8f3475b053e43b Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testmatrix_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..b6fb05bb7564c863d5bb6c145fe8b06928d3805a Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testmatrix_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..eb537ab1042b0f989d49711b1a36cc508946fe55 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testmatrix_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..cc207ed9f32095f39b7690e2dc1e2dc0d55ee8e0 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testminus_4.2c_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..c2f0ba2ae4c8a1750cace6eae0267e9736272fc0 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testminus_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..b4dbd152d6e9f3d289b3c4a9792729d2735a4c5c Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testminus_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..fadcd2366b1867239782f073291ff327c2af3001 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testminus_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..9ce65f91116f68332d1c16e21319e965541d0d73 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testminus_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..9c6ba793cf41bf36447ab7a1890447fe5e939614 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testmulti_4.2c_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..0c4729c56b6ab1e8945249a4d3144c79d8538e9e Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testmulti_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..6d3e068977edfe6407f29404f0a7d1737f7d3eba Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testmulti_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..fc13642263a64874f6c2ac602be9cdcb9b788996 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testobject_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..f68323b0c8eb7fc999dead349ea3bd3a6da66bd4 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testobject_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..83dcad34249afa543bf66dae9b836276246aab4a Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testobject_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..59d243c4de4fbb3fa653753e40651a6d0a4f4967 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testobject_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..cdb4191c7d2eb0ac66d4f6add250e1f6a604d892 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testonechar_4.2c_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..3b5a428501a53ae7308c7b6edc42f4881820664d Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testonechar_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..8cef2dd7ea6df8aac26ed067a9427935b81c7ac7 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testonechar_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..5ba4810ac67756c17b0ef3163a496e913c0b5e57 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testonechar_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..8964765f7bd207bfab63b4d16569cb1c3763bda7 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testonechar_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..1dcd72e51a51abdcf48bd37f68b9927421c17cb0 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testscalarcell_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testsimplecell.mat b/voice_bridge/scipy/io/matlab/tests/data/testsimplecell.mat new file mode 100644 index 0000000000000000000000000000000000000000..2a98f48917f8f275e541eeac5ef1fe741c40bb0b Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testsimplecell.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..55cbd3c1b3d65630beae47832ffbcc7a6fd43354 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testsparse_4.2c_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..194ca4d7d4d4d22be5669041a25c3ca24ae6edcb Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testsparse_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..3e1e9a1ec916040e94c231f428725add10a2709c Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testsparse_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..55b510762ee9b0ac04776e38f6b4bb46b0d10021 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testsparse_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..bdb6ce66ce79b808f044124156db4b803dab155e Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testsparse_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..81c536d0b067b92cae1b7a2ee71824e2c5e730d9 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_4.2c_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..520e1cedb3823b859666b1fa8872e073904fd4c6 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..969b7143dfff3bb817dbf70c54af8303c3b5822e Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..9117dce3092e3e6a39b67da9a7ad1dcfc3ded385 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..a8a615a320f9c8db068a9120c1ceb2e49bb0ea6d Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testsparsecomplex_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..15424266a3bd4aa1e7525a8fdc4945b51d2b5ad6 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/testsparsefloat_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..137561e1f636d7b08959e43e969a6984eb7a3b37 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststring_4.2c_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..2ad75f2e17d8b3fda285490d52b426d1f27d0d95 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststring_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..6fd12d884d19df65f1534c13944e988e636166f1 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststring_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..ab93994f7befe7d1505c84c238d6409bcb3d438a Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststring_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..63059b84476749119f44ebefda795f85f6ab27d7 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststring_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..fa687ee988ce530bca87f46235667baa30ac038b Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststringarray_4.2c_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..11afb412056ad803f0d8ac1d9dcb188d42285fdf Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststringarray_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..75e07a0b55e008b070f41dabba7480a4e463b67a Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststringarray_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..7d76f63643737834053f80539188c9dad75ed0cb Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststringarray_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..954e39beb8156b460ca904ff66261d8f2fc338cb Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststringarray_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..da57365926afe1e8d7dd424a6fcd5b52bc3233ac Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststructarr_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..d1c97a7a2e1edf9683959ec36e899ef8e355073c Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststructarr_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..c7ca09594106a765e815a55e942019d17c181270 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststructarr_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..8716f7e3db67d1fd479f913d12286715029ed1a4 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststructarr_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat b/voice_bridge/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat new file mode 100644 index 0000000000000000000000000000000000000000..2c34c4d8c1477bc4859880a8d2f800073825dcd1 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststructnest_6.1_SOL2.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..c6dccc00289f61787b235f4299aa5a14ab4f6d07 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststructnest_6.5.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..0f6f5444b0c1e4bcd80dc0f63b28523d655b05d0 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststructnest_7.1_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat b/voice_bridge/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat new file mode 100644 index 0000000000000000000000000000000000000000..faf9221b776eee67cd5d2971da5ba77732ef8016 Binary files /dev/null and b/voice_bridge/scipy/io/matlab/tests/data/teststructnest_7.4_GLNX86.mat differ diff --git a/voice_bridge/scipy/io/matlab/tests/gen_mat4files.m b/voice_bridge/scipy/io/matlab/tests/gen_mat4files.m new file mode 100644 index 0000000000000000000000000000000000000000..a67cc2057de6f10e16cd2aa3d16e4f21aa34edc7 --- /dev/null +++ b/voice_bridge/scipy/io/matlab/tests/gen_mat4files.m @@ -0,0 +1,50 @@ +% Generates mat files for loadmat unit tests +% Uses save_matfile.m function +% This is the version for matlab 4 + +% work out matlab version and file suffix for test files +global FILEPREFIX FILESUFFIX +sepchar = '/'; +if strcmp(computer, 'PCWIN'), sepchar = '\'; end +FILEPREFIX = [pwd sepchar 'data' sepchar]; +mlv = version; +FILESUFFIX = ['_' mlv '_' computer '.mat']; + +% basic double array +theta = 0:pi/4:2*pi; +save_matfile('testdouble', theta); + +% string +save_matfile('teststring', '"Do nine men interpret?" "Nine men," I nod.') + +% complex +save_matfile('testcomplex', cos(theta) + 1j*sin(theta)); + +% asymmetric array to check indexing +a = zeros(3, 5); +a(:,1) = [1:3]'; +a(1,:) = 1:5; + +% 2D matrix +save_matfile('testmatrix', a); + +% minus number - tests signed int +save_matfile('testminus', -1); + +% single character +save_matfile('testonechar', 'r'); + +% string array +save_matfile('teststringarray', ['one '; 'two '; 'three']); + +% sparse array +save_matfile('testsparse', sparse(a)); + +% sparse complex array +b = sparse(a); +b(1,1) = b(1,1) + j; +save_matfile('testsparsecomplex', b); + +% Two variables in same file +save([FILEPREFIX 'testmulti' FILESUFFIX], 'a', 'theta') + diff --git a/voice_bridge/scipy/io/matlab/tests/gen_mat5files.m b/voice_bridge/scipy/io/matlab/tests/gen_mat5files.m new file mode 100644 index 0000000000000000000000000000000000000000..9351127d15cbfe0c426094ce585817eb5265caf6 --- /dev/null +++ b/voice_bridge/scipy/io/matlab/tests/gen_mat5files.m @@ -0,0 +1,100 @@ +% Generates mat files for loadmat unit tests +% This is the version for matlab 5 and higher +% Uses save_matfile.m function + +% work out matlab version and file suffix for test files +global FILEPREFIX FILESUFFIX +FILEPREFIX = [fullfile(pwd, 'data') filesep]; +temp = ver('MATLAB'); +mlv = temp.Version; +FILESUFFIX = ['_' mlv '_' computer '.mat']; + +% basic double array +theta = 0:pi/4:2*pi; +save_matfile('testdouble', theta); + +% string +save_matfile('teststring', '"Do nine men interpret?" "Nine men," I nod.') + +% complex +save_matfile('testcomplex', cos(theta) + 1j*sin(theta)); + +% asymmetric array to check indexing +a = zeros(3, 5); +a(:,1) = [1:3]'; +a(1,:) = 1:5; + +% 2D matrix +save_matfile('testmatrix', a); + +% minus number - tests signed int +save_matfile('testminus', -1); + +% single character +save_matfile('testonechar', 'r'); + +% string array +save_matfile('teststringarray', ['one '; 'two '; 'three']); + +% sparse array +save_matfile('testsparse', sparse(a)); + +% sparse complex array +b = sparse(a); +b(1,1) = b(1,1) + j; +save_matfile('testsparsecomplex', b); + +% Two variables in same file +save([FILEPREFIX 'testmulti' FILESUFFIX], 'a', 'theta') + + +% struct +save_matfile('teststruct', ... + struct('stringfield','Rats live on no evil star.',... + 'doublefield',[sqrt(2) exp(1) pi],... + 'complexfield',(1+1j)*[sqrt(2) exp(1) pi])); + +% cell +save_matfile('testcell', ... + {['This cell contains this string and 3 arrays of increasing' ... + ' length'], 1., 1.:2., 1.:3.}); + +% scalar cell +save_matfile('testscalarcell', {1}) + +% Empty cells in two cell matrices +save_matfile('testemptycell', {1, 2, [], [], 3}); + +% 3D matrix +save_matfile('test3dmatrix', reshape(1:24,[2 3 4])) + +% nested cell array +save_matfile('testcellnest', {1, {2, 3, {4, 5}}}); + +% nested struct +save_matfile('teststructnest', struct('one', 1, 'two', ... + struct('three', 'number 3'))); + +% array of struct +save_matfile('teststructarr', [struct('one', 1, 'two', 2) ... + struct('one', 'number 1', 'two', 'number 2')]); + +% matlab object +save_matfile('testobject', inline('x')) + +% array of matlab objects +%save_matfile('testobjarr', [inline('x') inline('x')]) + +% unicode test +if str2num(mlv) > 7 % function added 7.0.1 + fid = fopen([FILEPREFIX 'japanese_utf8.txt']); + from_japan = fread(fid, 'uint8')'; + fclose(fid); + save_matfile('testunicode', native2unicode(from_japan, 'utf-8')); +end + +% func +if str2num(mlv) > 7 % function pointers added recently + func = @afunc; + save_matfile('testfunc', func); +end \ No newline at end of file diff --git a/voice_bridge/scipy/io/matlab/tests/save_matfile.m b/voice_bridge/scipy/io/matlab/tests/save_matfile.m new file mode 100644 index 0000000000000000000000000000000000000000..a6ff677476aee74f0e6155377ee1aa88e74dceaf --- /dev/null +++ b/voice_bridge/scipy/io/matlab/tests/save_matfile.m @@ -0,0 +1,6 @@ +function save_matfile(test_name, v) +% saves variable passed in m with filename from prefix + +global FILEPREFIX FILESUFFIX +eval([test_name ' = v;']); +save([FILEPREFIX test_name FILESUFFIX], test_name) \ No newline at end of file diff --git a/voice_bridge/scipy/io/tests/data/Transparent Busy.ani b/voice_bridge/scipy/io/tests/data/Transparent Busy.ani new file mode 100644 index 0000000000000000000000000000000000000000..3be500032786398c3efdbd9f873f705b6c1636bd Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/Transparent Busy.ani differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_1d.sav b/voice_bridge/scipy/io/tests/data/array_float32_1d.sav new file mode 100644 index 0000000000000000000000000000000000000000..619a1259670a361ac76ffa86c481a813dbaec07a Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_1d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_2d.sav b/voice_bridge/scipy/io/tests/data/array_float32_2d.sav new file mode 100644 index 0000000000000000000000000000000000000000..804d8b1a8a90636c880e974b6f85bd385033306b Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_2d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_3d.sav b/voice_bridge/scipy/io/tests/data/array_float32_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..3fa56c450eaa916d9c91b492ba17e7e843df2d53 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_3d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_4d.sav b/voice_bridge/scipy/io/tests/data/array_float32_4d.sav new file mode 100644 index 0000000000000000000000000000000000000000..4bb951e274a399f091ff70b639d6e3b55ee1e122 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_4d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_5d.sav b/voice_bridge/scipy/io/tests/data/array_float32_5d.sav new file mode 100644 index 0000000000000000000000000000000000000000..2854dbc8b1e53f298ac3b135eac1f06e73940152 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_5d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_6d.sav b/voice_bridge/scipy/io/tests/data/array_float32_6d.sav new file mode 100644 index 0000000000000000000000000000000000000000..91588d348d5f89af354209840062202d5b28c1df Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_6d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_7d.sav b/voice_bridge/scipy/io/tests/data/array_float32_7d.sav new file mode 100644 index 0000000000000000000000000000000000000000..3e978fad540a8979435d4561de151573696affd8 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_7d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_8d.sav b/voice_bridge/scipy/io/tests/data/array_float32_8d.sav new file mode 100644 index 0000000000000000000000000000000000000000..f699fe2427dfe876283de0fcade2c2325a262061 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_8d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_pointer_1d.sav b/voice_bridge/scipy/io/tests/data/array_float32_pointer_1d.sav new file mode 100644 index 0000000000000000000000000000000000000000..8e3a402c60a515149811e2ca21628e97180c4956 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_pointer_1d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_pointer_2d.sav b/voice_bridge/scipy/io/tests/data/array_float32_pointer_2d.sav new file mode 100644 index 0000000000000000000000000000000000000000..dd3504f0ecfaed178ace02e1a8a84650111c3936 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_pointer_2d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_pointer_3d.sav b/voice_bridge/scipy/io/tests/data/array_float32_pointer_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..285da7f78ffbbf2155fd2e4e648f19a1d3a42ac3 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_pointer_3d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_pointer_4d.sav b/voice_bridge/scipy/io/tests/data/array_float32_pointer_4d.sav new file mode 100644 index 0000000000000000000000000000000000000000..d99fa48f0a43ec06c3101560f9cade829c8b1940 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_pointer_4d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_pointer_5d.sav b/voice_bridge/scipy/io/tests/data/array_float32_pointer_5d.sav new file mode 100644 index 0000000000000000000000000000000000000000..de5e984e49f507ae550b1ae2fd54b799e742a195 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_pointer_5d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_pointer_6d.sav b/voice_bridge/scipy/io/tests/data/array_float32_pointer_6d.sav new file mode 100644 index 0000000000000000000000000000000000000000..bb76671a65be41fd2a426146c6c366f1e7fb07c3 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_pointer_6d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_pointer_7d.sav b/voice_bridge/scipy/io/tests/data/array_float32_pointer_7d.sav new file mode 100644 index 0000000000000000000000000000000000000000..995d23c6ed05b095442b6247b09191126f797f23 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_pointer_7d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/array_float32_pointer_8d.sav b/voice_bridge/scipy/io/tests/data/array_float32_pointer_8d.sav new file mode 100644 index 0000000000000000000000000000000000000000..4249ec62119e264d55a81d3faf9c87dcaed1c7c8 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/array_float32_pointer_8d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/example_1.nc b/voice_bridge/scipy/io/tests/data/example_1.nc new file mode 100644 index 0000000000000000000000000000000000000000..5775622d0ef85828b436dffcd21366f7538fc55c Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/example_1.nc differ diff --git a/voice_bridge/scipy/io/tests/data/example_2.nc b/voice_bridge/scipy/io/tests/data/example_2.nc new file mode 100644 index 0000000000000000000000000000000000000000..07db1cd986a4c3b9929c01c1f22bcc3f562b1c16 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/example_2.nc differ diff --git a/voice_bridge/scipy/io/tests/data/example_3_maskedvals.nc b/voice_bridge/scipy/io/tests/data/example_3_maskedvals.nc new file mode 100644 index 0000000000000000000000000000000000000000..57f8bf9da3bca295c15508963c77a870222af0bc Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/example_3_maskedvals.nc differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-3x3d-2i.dat b/voice_bridge/scipy/io/tests/data/fortran-3x3d-2i.dat new file mode 100644 index 0000000000000000000000000000000000000000..87731eb9d4b1f2ac827a212436fe6de175431e11 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-3x3d-2i.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-mixed.dat b/voice_bridge/scipy/io/tests/data/fortran-mixed.dat new file mode 100644 index 0000000000000000000000000000000000000000..a165a7a30424b20af9a3a0636c5e655239ea6fa5 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-mixed.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-sf8-11x1x10.dat b/voice_bridge/scipy/io/tests/data/fortran-sf8-11x1x10.dat new file mode 100644 index 0000000000000000000000000000000000000000..c3bb9dcbe50ef784ce3282b28e53f4c40beb48ce Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-sf8-11x1x10.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-sf8-15x10x22.dat b/voice_bridge/scipy/io/tests/data/fortran-sf8-15x10x22.dat new file mode 100644 index 0000000000000000000000000000000000000000..351801fd47a2e3e48d9b63034fbae28f8318c9f9 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-sf8-15x10x22.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-sf8-1x1x1.dat b/voice_bridge/scipy/io/tests/data/fortran-sf8-1x1x1.dat new file mode 100644 index 0000000000000000000000000000000000000000..64bf92f74a457d2f4bc42798493db15cc3ab1008 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-sf8-1x1x1.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-sf8-1x1x5.dat b/voice_bridge/scipy/io/tests/data/fortran-sf8-1x1x5.dat new file mode 100644 index 0000000000000000000000000000000000000000..3d3f27f88eef4e02451d18204cdcfd51f96f6d15 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-sf8-1x1x5.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-sf8-1x1x7.dat b/voice_bridge/scipy/io/tests/data/fortran-sf8-1x1x7.dat new file mode 100644 index 0000000000000000000000000000000000000000..0bd683096f18eadceb7168f811c75bf072baecfe Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-sf8-1x1x7.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-sf8-1x3x5.dat b/voice_bridge/scipy/io/tests/data/fortran-sf8-1x3x5.dat new file mode 100644 index 0000000000000000000000000000000000000000..25269ff9ea4f6dd3f8a9ca0c8ad27d399e4248f5 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-sf8-1x3x5.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-si4-11x1x10.dat b/voice_bridge/scipy/io/tests/data/fortran-si4-11x1x10.dat new file mode 100644 index 0000000000000000000000000000000000000000..9850de37cf86af622b759625c15e6b1a9477ce47 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-si4-11x1x10.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-si4-15x10x22.dat b/voice_bridge/scipy/io/tests/data/fortran-si4-15x10x22.dat new file mode 100644 index 0000000000000000000000000000000000000000..98c09c2dff6e1ef605e25ed1d00afe94597abddc Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-si4-15x10x22.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-si4-1x1x1.dat b/voice_bridge/scipy/io/tests/data/fortran-si4-1x1x1.dat new file mode 100644 index 0000000000000000000000000000000000000000..959098d2a9cdd6140758843e059d4ca529b14279 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-si4-1x1x1.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-si4-1x1x5.dat b/voice_bridge/scipy/io/tests/data/fortran-si4-1x1x5.dat new file mode 100644 index 0000000000000000000000000000000000000000..49c0ec1d18d9f08111fe2d2a269ed407da71b158 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-si4-1x1x5.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-si4-1x1x7.dat b/voice_bridge/scipy/io/tests/data/fortran-si4-1x1x7.dat new file mode 100644 index 0000000000000000000000000000000000000000..bb936b8789920ce18281fa754a5c048b31e59ba8 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-si4-1x1x7.dat differ diff --git a/voice_bridge/scipy/io/tests/data/fortran-si4-1x3x5.dat b/voice_bridge/scipy/io/tests/data/fortran-si4-1x3x5.dat new file mode 100644 index 0000000000000000000000000000000000000000..cb3e9e4876249f42924a43232b74f05b91123815 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/fortran-si4-1x3x5.dat differ diff --git a/voice_bridge/scipy/io/tests/data/invalid_pointer.sav b/voice_bridge/scipy/io/tests/data/invalid_pointer.sav new file mode 100644 index 0000000000000000000000000000000000000000..d53893c6c734e6c7771e08042c16874623dc6f0e Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/invalid_pointer.sav differ diff --git a/voice_bridge/scipy/io/tests/data/null_pointer.sav b/voice_bridge/scipy/io/tests/data/null_pointer.sav new file mode 100644 index 0000000000000000000000000000000000000000..8cee5ebecc3bef248ed37c438e0731160b31a310 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/null_pointer.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_byte.sav b/voice_bridge/scipy/io/tests/data/scalar_byte.sav new file mode 100644 index 0000000000000000000000000000000000000000..e4027b3cf302b8610b87d9ef8b0aac39d5a40ef9 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_byte.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_byte_descr.sav b/voice_bridge/scipy/io/tests/data/scalar_byte_descr.sav new file mode 100644 index 0000000000000000000000000000000000000000..182e29bc57dc05154388553a71876820025bca8d Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_byte_descr.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_complex32.sav b/voice_bridge/scipy/io/tests/data/scalar_complex32.sav new file mode 100644 index 0000000000000000000000000000000000000000..593e8c6208ab0bf3aa869de89e213b8aa9f8c071 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_complex32.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_complex64.sav b/voice_bridge/scipy/io/tests/data/scalar_complex64.sav new file mode 100644 index 0000000000000000000000000000000000000000..edb19d388afbaff44e5f0883978e6a74e9755613 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_complex64.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_float32.sav b/voice_bridge/scipy/io/tests/data/scalar_float32.sav new file mode 100644 index 0000000000000000000000000000000000000000..be9e3877ea845da76d9466c14d70c4cce882368c Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_float32.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_float64.sav b/voice_bridge/scipy/io/tests/data/scalar_float64.sav new file mode 100644 index 0000000000000000000000000000000000000000..9680b2878c6008a27c8fc9ae6966903ff936cc4a Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_float64.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_heap_pointer.sav b/voice_bridge/scipy/io/tests/data/scalar_heap_pointer.sav new file mode 100644 index 0000000000000000000000000000000000000000..d02b1756ac043a4ba6119acb28ef34c40359a4dd Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_heap_pointer.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_int16.sav b/voice_bridge/scipy/io/tests/data/scalar_int16.sav new file mode 100644 index 0000000000000000000000000000000000000000..603525694cc307d47412717c4c2f85ddc960897b Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_int16.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_int32.sav b/voice_bridge/scipy/io/tests/data/scalar_int32.sav new file mode 100644 index 0000000000000000000000000000000000000000..40210b889402c0f27562296ab39ce1a714f0d0ef Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_int32.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_int64.sav b/voice_bridge/scipy/io/tests/data/scalar_int64.sav new file mode 100644 index 0000000000000000000000000000000000000000..c91cd0a561e011a2f18c86119e45392fbc0be825 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_int64.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_string.sav b/voice_bridge/scipy/io/tests/data/scalar_string.sav new file mode 100644 index 0000000000000000000000000000000000000000..ee6e69fe8461edfa580f682761118c8afe2add3a Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_string.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_uint16.sav b/voice_bridge/scipy/io/tests/data/scalar_uint16.sav new file mode 100644 index 0000000000000000000000000000000000000000..759c2e64fa034c6ddbdbe6181efae1e699a0c314 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_uint16.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_uint32.sav b/voice_bridge/scipy/io/tests/data/scalar_uint32.sav new file mode 100644 index 0000000000000000000000000000000000000000..74dec7b8933418d30d17c83d617443a73ceef0c6 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_uint32.sav differ diff --git a/voice_bridge/scipy/io/tests/data/scalar_uint64.sav b/voice_bridge/scipy/io/tests/data/scalar_uint64.sav new file mode 100644 index 0000000000000000000000000000000000000000..fc9da5796eab6ce9fb59488b836ba2f567de7b25 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/scalar_uint64.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_arrays.sav b/voice_bridge/scipy/io/tests/data/struct_arrays.sav new file mode 100644 index 0000000000000000000000000000000000000000..40c9cd330e0c731968d71dbbfeae9bd8c4a745a2 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_arrays.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_arrays_byte_idl80.sav b/voice_bridge/scipy/io/tests/data/struct_arrays_byte_idl80.sav new file mode 100644 index 0000000000000000000000000000000000000000..f1aa416f8e661893be282a490005536953d4b7af Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_arrays_byte_idl80.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_arrays_replicated.sav b/voice_bridge/scipy/io/tests/data/struct_arrays_replicated.sav new file mode 100644 index 0000000000000000000000000000000000000000..6f01fbfd109e76c94b6e6e9bfd9eb388f39d99ee Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_arrays_replicated.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_arrays_replicated_3d.sav b/voice_bridge/scipy/io/tests/data/struct_arrays_replicated_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..bac9b207488eb9712ec27fb3567155f0dd773f34 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_arrays_replicated_3d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_inherit.sav b/voice_bridge/scipy/io/tests/data/struct_inherit.sav new file mode 100644 index 0000000000000000000000000000000000000000..8babd56306f09fa612f731ce593ae13c75f84f4c Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_inherit.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_pointer_arrays.sav b/voice_bridge/scipy/io/tests/data/struct_pointer_arrays.sav new file mode 100644 index 0000000000000000000000000000000000000000..a3c678162911426702a9a6e932761385a01f247e Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_pointer_arrays.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_pointer_arrays_replicated.sav b/voice_bridge/scipy/io/tests/data/struct_pointer_arrays_replicated.sav new file mode 100644 index 0000000000000000000000000000000000000000..38b812261125e6aabef8618955b234f6c7b04955 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_pointer_arrays_replicated.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav b/voice_bridge/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..db1c256c85a707f0a0d78c28241b78d1eddcab1e Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_pointer_arrays_replicated_3d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_pointers.sav b/voice_bridge/scipy/io/tests/data/struct_pointers.sav new file mode 100644 index 0000000000000000000000000000000000000000..acbb058a307090f6c9e2d8402c7badf6bb48144c Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_pointers.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_pointers_replicated.sav b/voice_bridge/scipy/io/tests/data/struct_pointers_replicated.sav new file mode 100644 index 0000000000000000000000000000000000000000..d16f4655cc20318db2b0d629cd5ed6d7be01b518 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_pointers_replicated.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_pointers_replicated_3d.sav b/voice_bridge/scipy/io/tests/data/struct_pointers_replicated_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..732dd2cbfa9c7fd029bb59b4cfcb630cc1077f54 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_pointers_replicated_3d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_scalars.sav b/voice_bridge/scipy/io/tests/data/struct_scalars.sav new file mode 100644 index 0000000000000000000000000000000000000000..69d7eaf4ecf8747c21d07e14edcf65b4e394974c Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_scalars.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_scalars_replicated.sav b/voice_bridge/scipy/io/tests/data/struct_scalars_replicated.sav new file mode 100644 index 0000000000000000000000000000000000000000..2222391ae5b93ba34c1fdb982c02eb97d9658b58 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_scalars_replicated.sav differ diff --git a/voice_bridge/scipy/io/tests/data/struct_scalars_replicated_3d.sav b/voice_bridge/scipy/io/tests/data/struct_scalars_replicated_3d.sav new file mode 100644 index 0000000000000000000000000000000000000000..a35f1acfb4cb93ecb637310bbfa7fc1a2151d483 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/struct_scalars_replicated_3d.sav differ diff --git a/voice_bridge/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav b/voice_bridge/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav new file mode 100644 index 0000000000000000000000000000000000000000..056333e713fb53b71d762b3623232a63830fb5aa Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-be.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav b/voice_bridge/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav new file mode 100644 index 0000000000000000000000000000000000000000..57e6f17898dcd84aa0f446dab8e5d4435e9b11e2 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-44100Hz-2ch-32bit-float-le.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav b/voice_bridge/scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav new file mode 100644 index 0000000000000000000000000000000000000000..1825dfcf4c4689e7fa27e2d61120aff3ae158111 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-44100Hz-be-1ch-4bytes.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav b/voice_bridge/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav new file mode 100644 index 0000000000000000000000000000000000000000..bb86f2f3c80bcaa0e0e1af8e1ef7e1cf47cf5ca7 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav b/voice_bridge/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav new file mode 100644 index 0000000000000000000000000000000000000000..d1b7065caabe860f1bc1c3f0775cb16c3217f5dd Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav b/voice_bridge/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav new file mode 100644 index 0000000000000000000000000000000000000000..7271fdd2e62c10bdc2e5c0d95efac9d9fc2131c0 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-incomplete-chunk.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav b/voice_bridge/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav new file mode 100644 index 0000000000000000000000000000000000000000..8aae8e2c6aba3ce8ccceb9bea7efa5d23223c06b Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav b/voice_bridge/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav new file mode 100644 index 0000000000000000000000000000000000000000..31221b2ad7e4fe25cd8010fb1aadb4ddffd43c8f Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav b/voice_bridge/scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..db596cc521e4e7b1610a1171abc4a0f65cb10f45 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-8000Hz-be-3ch-5S-24bit.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav new file mode 100644 index 0000000000000000000000000000000000000000..13f131e399996bb28ce3df0e3f17d1dcd79df1c8 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-1ch-10S-20bit-extra.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav new file mode 100644 index 0000000000000000000000000000000000000000..c4fed62668e80a436f2e3d4268604ff0a2ebe91f Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-1ch-1byte-ulaw.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav new file mode 100644 index 0000000000000000000000000000000000000000..709008194a30544056b82971df02b275bd04c570 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-2ch-1byteu.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav new file mode 100644 index 0000000000000000000000000000000000000000..8e79d54dee97945acbb35198f228c5cefdba80e5 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..9c4312bce7408f2a39577903527892e4ed1527ed Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..5c28ed81b1b49ccad1aab0d739ff11c901a8a95b Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..2d4eea22db22512462368a32047b1bba5f081d94 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..68437dad75a2691985bc532b740c11830dd06765 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-53bit.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..ef478def8a98686257753d485b9d838f97142305 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-3ch-5S-64bit.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..9c93e1323ed5554777541328408ad1d019c63f80 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav differ diff --git a/voice_bridge/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav new file mode 100644 index 0000000000000000000000000000000000000000..b95bcdf33448f246828aafe7985e77db40befcbe Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/test-8000Hz-le-5ch-9S-5bit.wav differ diff --git a/voice_bridge/scipy/io/tests/data/various_compressed.sav b/voice_bridge/scipy/io/tests/data/various_compressed.sav new file mode 100644 index 0000000000000000000000000000000000000000..dcdb0b0d433939d6a240c86e5060214cd8875732 Binary files /dev/null and b/voice_bridge/scipy/io/tests/data/various_compressed.sav differ diff --git a/voice_bridge/scipy/linalg.pxd b/voice_bridge/scipy/linalg.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1f656b870387171cbd1ec810f9ed407bf91f87fb --- /dev/null +++ b/voice_bridge/scipy/linalg.pxd @@ -0,0 +1 @@ +from .linalg cimport cython_blas, cython_lapack diff --git a/voice_bridge/scipy/linalg/_decomp_update.pyd b/voice_bridge/scipy/linalg/_decomp_update.pyd new file mode 100644 index 0000000000000000000000000000000000000000..7d041519fd75756cc6448c9b23951bd5016acfe7 Binary files /dev/null and b/voice_bridge/scipy/linalg/_decomp_update.pyd differ diff --git a/voice_bridge/scipy/linalg/_fblas.pyd b/voice_bridge/scipy/linalg/_fblas.pyd new file mode 100644 index 0000000000000000000000000000000000000000..bb5e575ef3c9bad9bcd17dc4fdd24d7a8066b64b Binary files /dev/null and b/voice_bridge/scipy/linalg/_fblas.pyd differ diff --git a/voice_bridge/scipy/linalg/_flapack.pyd b/voice_bridge/scipy/linalg/_flapack.pyd new file mode 100644 index 0000000000000000000000000000000000000000..fb8938a5e3951786b876aea652f145299984a92b --- /dev/null +++ b/voice_bridge/scipy/linalg/_flapack.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:717e0866b8805efed57e08b93fc4d889b721bc2ba15ed9fbbb892151b3535799 +size 1863680 diff --git a/voice_bridge/scipy/linalg/_flinalg.pyd b/voice_bridge/scipy/linalg/_flinalg.pyd new file mode 100644 index 0000000000000000000000000000000000000000..8a9c055305db29720883005a3c08d781fc41eed5 Binary files /dev/null and b/voice_bridge/scipy/linalg/_flinalg.pyd differ diff --git a/voice_bridge/scipy/linalg/_interpolative.pyd b/voice_bridge/scipy/linalg/_interpolative.pyd new file mode 100644 index 0000000000000000000000000000000000000000..255039a0373ab065d6dfd39890754f7f9eb03671 Binary files /dev/null and b/voice_bridge/scipy/linalg/_interpolative.pyd differ diff --git a/voice_bridge/scipy/linalg/_matfuncs_sqrtm_triu.pyd b/voice_bridge/scipy/linalg/_matfuncs_sqrtm_triu.pyd new file mode 100644 index 0000000000000000000000000000000000000000..c1eb209af1acb90a5951cac6ec931d60998a6cb2 Binary files /dev/null and b/voice_bridge/scipy/linalg/_matfuncs_sqrtm_triu.pyd differ diff --git a/voice_bridge/scipy/linalg/_solve_toeplitz.pyd b/voice_bridge/scipy/linalg/_solve_toeplitz.pyd new file mode 100644 index 0000000000000000000000000000000000000000..2af08e5211de909da190d188460b3b27e5ef4313 Binary files /dev/null and b/voice_bridge/scipy/linalg/_solve_toeplitz.pyd differ diff --git a/voice_bridge/scipy/linalg/cython_blas.pxd b/voice_bridge/scipy/linalg/cython_blas.pxd new file mode 100644 index 0000000000000000000000000000000000000000..5ddaa0b7b7214d96551695a4a1a60b7c7e9bab74 --- /dev/null +++ b/voice_bridge/scipy/linalg/cython_blas.pxd @@ -0,0 +1,314 @@ +# This file was generated by _generate_pyx.py. +# Do not edit this file directly. + +# Within scipy, these wrappers can be used via relative or absolute cimport. +# Examples: +# from ..linalg cimport cython_blas +# from scipy.linalg cimport cython_blas +# cimport scipy.linalg.cython_blas as cython_blas +# cimport ..linalg.cython_blas as cython_blas + +# Within SciPy, if BLAS functions are needed in C/C++/Fortran, +# these wrappers should not be used. +# The original libraries should be linked directly. + +ctypedef float s +ctypedef double d +ctypedef float complex c +ctypedef double complex z + +cdef void caxpy(int *n, c *ca, c *cx, int *incx, c *cy, int *incy) nogil + +cdef void ccopy(int *n, c *cx, int *incx, c *cy, int *incy) nogil + +cdef c cdotc(int *n, c *cx, int *incx, c *cy, int *incy) nogil + +cdef c cdotu(int *n, c *cx, int *incx, c *cy, int *incy) nogil + +cdef void cgbmv(char *trans, int *m, int *n, int *kl, int *ku, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void cgemm(char *transa, char *transb, int *m, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil + +cdef void cgemv(char *trans, int *m, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void cgerc(int *m, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil + +cdef void cgeru(int *m, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil + +cdef void chbmv(char *uplo, int *n, int *k, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void chemm(char *side, char *uplo, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil + +cdef void chemv(char *uplo, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void cher(char *uplo, int *n, s *alpha, c *x, int *incx, c *a, int *lda) nogil + +cdef void cher2(char *uplo, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *a, int *lda) nogil + +cdef void cher2k(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, s *beta, c *c, int *ldc) nogil + +cdef void cherk(char *uplo, char *trans, int *n, int *k, s *alpha, c *a, int *lda, s *beta, c *c, int *ldc) nogil + +cdef void chpmv(char *uplo, int *n, c *alpha, c *ap, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void chpr(char *uplo, int *n, s *alpha, c *x, int *incx, c *ap) nogil + +cdef void chpr2(char *uplo, int *n, c *alpha, c *x, int *incx, c *y, int *incy, c *ap) nogil + +cdef void crotg(c *ca, c *cb, s *c, c *s) nogil + +cdef void cscal(int *n, c *ca, c *cx, int *incx) nogil + +cdef void csrot(int *n, c *cx, int *incx, c *cy, int *incy, s *c, s *s) nogil + +cdef void csscal(int *n, s *sa, c *cx, int *incx) nogil + +cdef void cswap(int *n, c *cx, int *incx, c *cy, int *incy) nogil + +cdef void csymm(char *side, char *uplo, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil + +cdef void csyr2k(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *b, int *ldb, c *beta, c *c, int *ldc) nogil + +cdef void csyrk(char *uplo, char *trans, int *n, int *k, c *alpha, c *a, int *lda, c *beta, c *c, int *ldc) nogil + +cdef void ctbmv(char *uplo, char *trans, char *diag, int *n, int *k, c *a, int *lda, c *x, int *incx) nogil + +cdef void ctbsv(char *uplo, char *trans, char *diag, int *n, int *k, c *a, int *lda, c *x, int *incx) nogil + +cdef void ctpmv(char *uplo, char *trans, char *diag, int *n, c *ap, c *x, int *incx) nogil + +cdef void ctpsv(char *uplo, char *trans, char *diag, int *n, c *ap, c *x, int *incx) nogil + +cdef void ctrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb) nogil + +cdef void ctrmv(char *uplo, char *trans, char *diag, int *n, c *a, int *lda, c *x, int *incx) nogil + +cdef void ctrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, c *alpha, c *a, int *lda, c *b, int *ldb) nogil + +cdef void ctrsv(char *uplo, char *trans, char *diag, int *n, c *a, int *lda, c *x, int *incx) nogil + +cdef d dasum(int *n, d *dx, int *incx) nogil + +cdef void daxpy(int *n, d *da, d *dx, int *incx, d *dy, int *incy) nogil + +cdef d dcabs1(z *z) nogil + +cdef void dcopy(int *n, d *dx, int *incx, d *dy, int *incy) nogil + +cdef d ddot(int *n, d *dx, int *incx, d *dy, int *incy) nogil + +cdef void dgbmv(char *trans, int *m, int *n, int *kl, int *ku, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil + +cdef void dgemm(char *transa, char *transb, int *m, int *n, int *k, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil + +cdef void dgemv(char *trans, int *m, int *n, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil + +cdef void dger(int *m, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *a, int *lda) nogil + +cdef d dnrm2(int *n, d *x, int *incx) nogil + +cdef void drot(int *n, d *dx, int *incx, d *dy, int *incy, d *c, d *s) nogil + +cdef void drotg(d *da, d *db, d *c, d *s) nogil + +cdef void drotm(int *n, d *dx, int *incx, d *dy, int *incy, d *dparam) nogil + +cdef void drotmg(d *dd1, d *dd2, d *dx1, d *dy1, d *dparam) nogil + +cdef void dsbmv(char *uplo, int *n, int *k, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil + +cdef void dscal(int *n, d *da, d *dx, int *incx) nogil + +cdef d dsdot(int *n, s *sx, int *incx, s *sy, int *incy) nogil + +cdef void dspmv(char *uplo, int *n, d *alpha, d *ap, d *x, int *incx, d *beta, d *y, int *incy) nogil + +cdef void dspr(char *uplo, int *n, d *alpha, d *x, int *incx, d *ap) nogil + +cdef void dspr2(char *uplo, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *ap) nogil + +cdef void dswap(int *n, d *dx, int *incx, d *dy, int *incy) nogil + +cdef void dsymm(char *side, char *uplo, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil + +cdef void dsymv(char *uplo, int *n, d *alpha, d *a, int *lda, d *x, int *incx, d *beta, d *y, int *incy) nogil + +cdef void dsyr(char *uplo, int *n, d *alpha, d *x, int *incx, d *a, int *lda) nogil + +cdef void dsyr2(char *uplo, int *n, d *alpha, d *x, int *incx, d *y, int *incy, d *a, int *lda) nogil + +cdef void dsyr2k(char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *b, int *ldb, d *beta, d *c, int *ldc) nogil + +cdef void dsyrk(char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *beta, d *c, int *ldc) nogil + +cdef void dtbmv(char *uplo, char *trans, char *diag, int *n, int *k, d *a, int *lda, d *x, int *incx) nogil + +cdef void dtbsv(char *uplo, char *trans, char *diag, int *n, int *k, d *a, int *lda, d *x, int *incx) nogil + +cdef void dtpmv(char *uplo, char *trans, char *diag, int *n, d *ap, d *x, int *incx) nogil + +cdef void dtpsv(char *uplo, char *trans, char *diag, int *n, d *ap, d *x, int *incx) nogil + +cdef void dtrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb) nogil + +cdef void dtrmv(char *uplo, char *trans, char *diag, int *n, d *a, int *lda, d *x, int *incx) nogil + +cdef void dtrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, d *alpha, d *a, int *lda, d *b, int *ldb) nogil + +cdef void dtrsv(char *uplo, char *trans, char *diag, int *n, d *a, int *lda, d *x, int *incx) nogil + +cdef d dzasum(int *n, z *zx, int *incx) nogil + +cdef d dznrm2(int *n, z *x, int *incx) nogil + +cdef int icamax(int *n, c *cx, int *incx) nogil + +cdef int idamax(int *n, d *dx, int *incx) nogil + +cdef int isamax(int *n, s *sx, int *incx) nogil + +cdef int izamax(int *n, z *zx, int *incx) nogil + +cdef bint lsame(char *ca, char *cb) nogil + +cdef s sasum(int *n, s *sx, int *incx) nogil + +cdef void saxpy(int *n, s *sa, s *sx, int *incx, s *sy, int *incy) nogil + +cdef s scasum(int *n, c *cx, int *incx) nogil + +cdef s scnrm2(int *n, c *x, int *incx) nogil + +cdef void scopy(int *n, s *sx, int *incx, s *sy, int *incy) nogil + +cdef s sdot(int *n, s *sx, int *incx, s *sy, int *incy) nogil + +cdef s sdsdot(int *n, s *sb, s *sx, int *incx, s *sy, int *incy) nogil + +cdef void sgbmv(char *trans, int *m, int *n, int *kl, int *ku, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil + +cdef void sgemm(char *transa, char *transb, int *m, int *n, int *k, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil + +cdef void sgemv(char *trans, int *m, int *n, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil + +cdef void sger(int *m, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *a, int *lda) nogil + +cdef s snrm2(int *n, s *x, int *incx) nogil + +cdef void srot(int *n, s *sx, int *incx, s *sy, int *incy, s *c, s *s) nogil + +cdef void srotg(s *sa, s *sb, s *c, s *s) nogil + +cdef void srotm(int *n, s *sx, int *incx, s *sy, int *incy, s *sparam) nogil + +cdef void srotmg(s *sd1, s *sd2, s *sx1, s *sy1, s *sparam) nogil + +cdef void ssbmv(char *uplo, int *n, int *k, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil + +cdef void sscal(int *n, s *sa, s *sx, int *incx) nogil + +cdef void sspmv(char *uplo, int *n, s *alpha, s *ap, s *x, int *incx, s *beta, s *y, int *incy) nogil + +cdef void sspr(char *uplo, int *n, s *alpha, s *x, int *incx, s *ap) nogil + +cdef void sspr2(char *uplo, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *ap) nogil + +cdef void sswap(int *n, s *sx, int *incx, s *sy, int *incy) nogil + +cdef void ssymm(char *side, char *uplo, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil + +cdef void ssymv(char *uplo, int *n, s *alpha, s *a, int *lda, s *x, int *incx, s *beta, s *y, int *incy) nogil + +cdef void ssyr(char *uplo, int *n, s *alpha, s *x, int *incx, s *a, int *lda) nogil + +cdef void ssyr2(char *uplo, int *n, s *alpha, s *x, int *incx, s *y, int *incy, s *a, int *lda) nogil + +cdef void ssyr2k(char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *b, int *ldb, s *beta, s *c, int *ldc) nogil + +cdef void ssyrk(char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *beta, s *c, int *ldc) nogil + +cdef void stbmv(char *uplo, char *trans, char *diag, int *n, int *k, s *a, int *lda, s *x, int *incx) nogil + +cdef void stbsv(char *uplo, char *trans, char *diag, int *n, int *k, s *a, int *lda, s *x, int *incx) nogil + +cdef void stpmv(char *uplo, char *trans, char *diag, int *n, s *ap, s *x, int *incx) nogil + +cdef void stpsv(char *uplo, char *trans, char *diag, int *n, s *ap, s *x, int *incx) nogil + +cdef void strmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb) nogil + +cdef void strmv(char *uplo, char *trans, char *diag, int *n, s *a, int *lda, s *x, int *incx) nogil + +cdef void strsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, s *alpha, s *a, int *lda, s *b, int *ldb) nogil + +cdef void strsv(char *uplo, char *trans, char *diag, int *n, s *a, int *lda, s *x, int *incx) nogil + +cdef void zaxpy(int *n, z *za, z *zx, int *incx, z *zy, int *incy) nogil + +cdef void zcopy(int *n, z *zx, int *incx, z *zy, int *incy) nogil + +cdef z zdotc(int *n, z *zx, int *incx, z *zy, int *incy) nogil + +cdef z zdotu(int *n, z *zx, int *incx, z *zy, int *incy) nogil + +cdef void zdrot(int *n, z *cx, int *incx, z *cy, int *incy, d *c, d *s) nogil + +cdef void zdscal(int *n, d *da, z *zx, int *incx) nogil + +cdef void zgbmv(char *trans, int *m, int *n, int *kl, int *ku, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zgemm(char *transa, char *transb, int *m, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil + +cdef void zgemv(char *trans, int *m, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zgerc(int *m, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil + +cdef void zgeru(int *m, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil + +cdef void zhbmv(char *uplo, int *n, int *k, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zhemm(char *side, char *uplo, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil + +cdef void zhemv(char *uplo, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zher(char *uplo, int *n, d *alpha, z *x, int *incx, z *a, int *lda) nogil + +cdef void zher2(char *uplo, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *a, int *lda) nogil + +cdef void zher2k(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, d *beta, z *c, int *ldc) nogil + +cdef void zherk(char *uplo, char *trans, int *n, int *k, d *alpha, z *a, int *lda, d *beta, z *c, int *ldc) nogil + +cdef void zhpmv(char *uplo, int *n, z *alpha, z *ap, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zhpr(char *uplo, int *n, d *alpha, z *x, int *incx, z *ap) nogil + +cdef void zhpr2(char *uplo, int *n, z *alpha, z *x, int *incx, z *y, int *incy, z *ap) nogil + +cdef void zrotg(z *ca, z *cb, d *c, z *s) nogil + +cdef void zscal(int *n, z *za, z *zx, int *incx) nogil + +cdef void zswap(int *n, z *zx, int *incx, z *zy, int *incy) nogil + +cdef void zsymm(char *side, char *uplo, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil + +cdef void zsyr2k(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *b, int *ldb, z *beta, z *c, int *ldc) nogil + +cdef void zsyrk(char *uplo, char *trans, int *n, int *k, z *alpha, z *a, int *lda, z *beta, z *c, int *ldc) nogil + +cdef void ztbmv(char *uplo, char *trans, char *diag, int *n, int *k, z *a, int *lda, z *x, int *incx) nogil + +cdef void ztbsv(char *uplo, char *trans, char *diag, int *n, int *k, z *a, int *lda, z *x, int *incx) nogil + +cdef void ztpmv(char *uplo, char *trans, char *diag, int *n, z *ap, z *x, int *incx) nogil + +cdef void ztpsv(char *uplo, char *trans, char *diag, int *n, z *ap, z *x, int *incx) nogil + +cdef void ztrmm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb) nogil + +cdef void ztrmv(char *uplo, char *trans, char *diag, int *n, z *a, int *lda, z *x, int *incx) nogil + +cdef void ztrsm(char *side, char *uplo, char *transa, char *diag, int *m, int *n, z *alpha, z *a, int *lda, z *b, int *ldb) nogil + +cdef void ztrsv(char *uplo, char *trans, char *diag, int *n, z *a, int *lda, z *x, int *incx) nogil diff --git a/voice_bridge/scipy/linalg/cython_blas.pyd b/voice_bridge/scipy/linalg/cython_blas.pyd new file mode 100644 index 0000000000000000000000000000000000000000..a4872f93c356ad6a8296666572859870ce7d13b3 Binary files /dev/null and b/voice_bridge/scipy/linalg/cython_blas.pyd differ diff --git a/voice_bridge/scipy/linalg/cython_lapack.pxd b/voice_bridge/scipy/linalg/cython_lapack.pxd new file mode 100644 index 0000000000000000000000000000000000000000..7c36189dcea014ff1f0fd92bccdc2b0b25062e98 --- /dev/null +++ b/voice_bridge/scipy/linalg/cython_lapack.pxd @@ -0,0 +1,3021 @@ +# This file was generated by _generate_pyx.py. +# Do not edit this file directly. + +# Within SciPy, these wrappers can be used via relative or absolute cimport. +# Examples: +# from ..linalg cimport cython_lapack +# from scipy.linalg cimport cython_lapack +# cimport scipy.linalg.cython_lapack as cython_lapack +# cimport ..linalg.cython_lapack as cython_lapack + +# Within SciPy, if LAPACK functions are needed in C/C++/Fortran, +# these wrappers should not be used. +# The original libraries should be linked directly. + +ctypedef float s +ctypedef double d +ctypedef float complex c +ctypedef double complex z + +# Function pointer type declarations for +# gees and gges families of functions. +ctypedef bint cselect1(c*) +ctypedef bint cselect2(c*, c*) +ctypedef bint dselect2(d*, d*) +ctypedef bint dselect3(d*, d*, d*) +ctypedef bint sselect2(s*, s*) +ctypedef bint sselect3(s*, s*, s*) +ctypedef bint zselect1(z*) +ctypedef bint zselect2(z*, z*) + +cdef void cbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, c *u1, int *ldu1, c *u2, int *ldu2, c *v1t, int *ldv1t, c *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *rwork, int *lrwork, int *info) nogil + +cdef void cbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, c *vt, int *ldvt, c *u, int *ldu, c *c, int *ldc, s *rwork, int *info) nogil + +cdef void cgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, c *ab, int *ldab, s *d, s *e, c *q, int *ldq, c *pt, int *ldpt, c *c, int *ldc, c *work, s *rwork, int *info) nogil + +cdef void cgbcon(char *norm, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void cgbequ(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void cgbequb(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void cgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cgbsv(int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void cgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, int *ipiv, char *equed, s *r, s *c, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cgbtf2(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void cgbtrf(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void cgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, c *ab, int *ldab, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void cgebak(char *job, char *side, int *n, int *ilo, int *ihi, s *scale, int *m, c *v, int *ldv, int *info) nogil + +cdef void cgebal(char *job, int *n, c *a, int *lda, int *ilo, int *ihi, s *scale, int *info) nogil + +cdef void cgebd2(int *m, int *n, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *work, int *info) nogil + +cdef void cgebrd(int *m, int *n, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *work, int *lwork, int *info) nogil + +cdef void cgecon(char *norm, int *n, c *a, int *lda, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void cgeequ(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void cgeequb(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil + +cdef void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil + +cdef void cgeev(char *jobvl, char *jobvr, int *n, c *a, int *lda, c *w, c *vl, int *ldvl, c *vr, int *ldvr, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, c *a, int *lda, c *w, c *vl, int *ldvl, c *vr, int *ldvr, int *ilo, int *ihi, s *scale, s *abnrm, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cgehd2(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cgehrd(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cgelq2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cgelqf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cgels(char *trans, int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, c *work, int *lwork, int *info) nogil + +cdef void cgelsd(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, s *s, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *iwork, int *info) nogil + +cdef void cgelss(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, s *s, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cgelsy(int *m, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *jpvt, s *rcond, int *rank, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *info) nogil + +cdef void cgeql2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cgeqlf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cgeqp3(int *m, int *n, c *a, int *lda, int *jpvt, c *tau, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cgeqr2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cgeqr2p(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cgeqrf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cgeqrfp(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cgeqrt(int *m, int *n, int *nb, c *a, int *lda, c *t, int *ldt, c *work, int *info) nogil + +cdef void cgeqrt2(int *m, int *n, c *a, int *lda, c *t, int *ldt, int *info) nogil + +cdef void cgeqrt3(int *m, int *n, c *a, int *lda, c *t, int *ldt, int *info) nogil + +cdef void cgerfs(char *trans, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cgerq2(int *m, int *n, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cgerqf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cgesc2(int *n, c *a, int *lda, c *rhs, int *ipiv, int *jpiv, s *scale) nogil + +cdef void cgesdd(char *jobz, int *m, int *n, c *a, int *lda, s *s, c *u, int *ldu, c *vt, int *ldvt, c *work, int *lwork, s *rwork, int *iwork, int *info) nogil + +cdef void cgesv(int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void cgesvd(char *jobu, char *jobvt, int *m, int *n, c *a, int *lda, s *s, c *u, int *ldu, c *vt, int *ldvt, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cgesvx(char *fact, char *trans, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, char *equed, s *r, s *c, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cgetc2(int *n, c *a, int *lda, int *ipiv, int *jpiv, int *info) nogil + +cdef void cgetf2(int *m, int *n, c *a, int *lda, int *ipiv, int *info) nogil + +cdef void cgetrf(int *m, int *n, c *a, int *lda, int *ipiv, int *info) nogil + +cdef void cgetri(int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil + +cdef void cgetrs(char *trans, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void cggbak(char *job, char *side, int *n, int *ilo, int *ihi, s *lscale, s *rscale, int *m, c *v, int *ldv, int *info) nogil + +cdef void cggbal(char *job, int *n, c *a, int *lda, c *b, int *ldb, int *ilo, int *ihi, s *lscale, s *rscale, s *work, int *info) nogil + +cdef void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info) nogil + +cdef void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info) nogil + +cdef void cggev(char *jobvl, char *jobvr, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *vl, int *ldvl, c *vr, int *ldvr, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *vl, int *ldvl, c *vr, int *ldvr, int *ilo, int *ihi, s *lscale, s *rscale, s *abnrm, s *bbnrm, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, bint *bwork, int *info) nogil + +cdef void cggglm(int *n, int *m, int *p, c *a, int *lda, c *b, int *ldb, c *d, c *x, c *y, c *work, int *lwork, int *info) nogil + +cdef void cgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *info) nogil + +cdef void cgglse(int *m, int *n, int *p, c *a, int *lda, c *b, int *ldb, c *c, c *d, c *x, c *work, int *lwork, int *info) nogil + +cdef void cggqrf(int *n, int *m, int *p, c *a, int *lda, c *taua, c *b, int *ldb, c *taub, c *work, int *lwork, int *info) nogil + +cdef void cggrqf(int *m, int *p, int *n, c *a, int *lda, c *taua, c *b, int *ldb, c *taub, c *work, int *lwork, int *info) nogil + +cdef void cgtcon(char *norm, int *n, c *dl, c *d, c *du, c *du2, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil + +cdef void cgtrfs(char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *dlf, c *df, c *duf, c *du2, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cgtsv(int *n, int *nrhs, c *dl, c *d, c *du, c *b, int *ldb, int *info) nogil + +cdef void cgtsvx(char *fact, char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *dlf, c *df, c *duf, c *du2, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cgttrf(int *n, c *dl, c *d, c *du, c *du2, int *ipiv, int *info) nogil + +cdef void cgttrs(char *trans, int *n, int *nrhs, c *dl, c *d, c *du, c *du2, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void cgtts2(int *itrans, int *n, int *nrhs, c *dl, c *d, c *du, c *du2, int *ipiv, c *b, int *ldb) nogil + +cdef void chbev(char *jobz, char *uplo, int *n, int *kd, c *ab, int *ldab, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil + +cdef void chbevd(char *jobz, char *uplo, int *n, int *kd, c *ab, int *ldab, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void chbevx(char *jobz, char *range, char *uplo, int *n, int *kd, c *ab, int *ldab, c *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void chbgst(char *vect, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, c *x, int *ldx, c *work, s *rwork, int *info) nogil + +cdef void chbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil + +cdef void chbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void chbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, c *ab, int *ldab, c *bb, int *ldbb, c *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void chbtrd(char *vect, char *uplo, int *n, int *kd, c *ab, int *ldab, s *d, s *e, c *q, int *ldq, c *work, int *info) nogil + +cdef void checon(char *uplo, int *n, c *a, int *lda, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil + +cdef void cheequb(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, c *work, int *info) nogil + +cdef void cheev(char *jobz, char *uplo, int *n, c *a, int *lda, s *w, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cheevd(char *jobz, char *uplo, int *n, c *a, int *lda, s *w, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void cheevr(char *jobz, char *range, char *uplo, int *n, c *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, int *isuppz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void cheevx(char *jobz, char *range, char *uplo, int *n, c *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void chegs2(int *itype, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, int *info) nogil + +cdef void chegst(int *itype, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, int *info) nogil + +cdef void chegv(int *itype, char *jobz, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *w, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void chegvd(int *itype, char *jobz, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *w, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void chegvx(int *itype, char *jobz, char *range, char *uplo, int *n, c *a, int *lda, c *b, int *ldb, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void cherfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void chesv(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *lwork, int *info) nogil + +cdef void chesvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void cheswapr(char *uplo, int *n, c *a, int *lda, int *i1, int *i2) nogil + +cdef void chetd2(char *uplo, int *n, c *a, int *lda, s *d, s *e, c *tau, int *info) nogil + +cdef void chetf2(char *uplo, int *n, c *a, int *lda, int *ipiv, int *info) nogil + +cdef void chetrd(char *uplo, int *n, c *a, int *lda, s *d, s *e, c *tau, c *work, int *lwork, int *info) nogil + +cdef void chetrf(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil + +cdef void chetri(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil + +cdef void chetri2(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil + +cdef void chetri2x(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *nb, int *info) nogil + +cdef void chetrs(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void chetrs2(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *info) nogil + +cdef void chfrk(char *transr, char *uplo, char *trans, int *n, int *k, s *alpha, c *a, int *lda, s *beta, c *c) nogil + +cdef void chgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *t, int *ldt, c *alpha, c *beta, c *q, int *ldq, c *z, int *ldz, c *work, int *lwork, s *rwork, int *info) nogil + +cdef char chla_transtype(int *trans) nogil + +cdef void chpcon(char *uplo, int *n, c *ap, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil + +cdef void chpev(char *jobz, char *uplo, int *n, c *ap, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil + +cdef void chpevd(char *jobz, char *uplo, int *n, c *ap, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void chpevx(char *jobz, char *range, char *uplo, int *n, c *ap, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void chpgst(int *itype, char *uplo, int *n, c *ap, c *bp, int *info) nogil + +cdef void chpgv(int *itype, char *jobz, char *uplo, int *n, c *ap, c *bp, s *w, c *z, int *ldz, c *work, s *rwork, int *info) nogil + +cdef void chpgvd(int *itype, char *jobz, char *uplo, int *n, c *ap, c *bp, s *w, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void chpgvx(int *itype, char *jobz, char *range, char *uplo, int *n, c *ap, c *bp, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, c *work, s *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void chprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void chpsv(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void chpsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void chptrd(char *uplo, int *n, c *ap, s *d, s *e, c *tau, int *info) nogil + +cdef void chptrf(char *uplo, int *n, c *ap, int *ipiv, int *info) nogil + +cdef void chptri(char *uplo, int *n, c *ap, int *ipiv, c *work, int *info) nogil + +cdef void chptrs(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void chsein(char *side, char *eigsrc, char *initv, bint *select, int *n, c *h, int *ldh, c *w, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *ifaill, int *ifailr, int *info) nogil + +cdef void chseqr(char *job, char *compz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, c *z, int *ldz, c *work, int *lwork, int *info) nogil + +cdef void clabrd(int *m, int *n, int *nb, c *a, int *lda, s *d, s *e, c *tauq, c *taup, c *x, int *ldx, c *y, int *ldy) nogil + +cdef void clacgv(int *n, c *x, int *incx) nogil + +cdef void clacn2(int *n, c *v, c *x, s *est, int *kase, int *isave) nogil + +cdef void clacon(int *n, c *v, c *x, s *est, int *kase) nogil + +cdef void clacp2(char *uplo, int *m, int *n, s *a, int *lda, c *b, int *ldb) nogil + +cdef void clacpy(char *uplo, int *m, int *n, c *a, int *lda, c *b, int *ldb) nogil + +cdef void clacrm(int *m, int *n, c *a, int *lda, s *b, int *ldb, c *c, int *ldc, s *rwork) nogil + +cdef void clacrt(int *n, c *cx, int *incx, c *cy, int *incy, c *c, c *s) nogil + +cdef c cladiv(c *x, c *y) nogil + +cdef void claed0(int *qsiz, int *n, s *d, s *e, c *q, int *ldq, c *qstore, int *ldqs, s *rwork, int *iwork, int *info) nogil + +cdef void claed7(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, s *d, c *q, int *ldq, s *rho, int *indxq, s *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, c *work, s *rwork, int *iwork, int *info) nogil + +cdef void claed8(int *k, int *n, int *qsiz, c *q, int *ldq, s *d, s *rho, int *cutpnt, s *z, s *dlamda, c *q2, int *ldq2, s *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, s *givnum, int *info) nogil + +cdef void claein(bint *rightv, bint *noinit, int *n, c *h, int *ldh, c *w, c *v, c *b, int *ldb, s *rwork, s *eps3, s *smlnum, int *info) nogil + +cdef void claesy(c *a, c *b, c *c, c *rt1, c *rt2, c *evscal, c *cs1, c *sn1) nogil + +cdef void claev2(c *a, c *b, c *c, s *rt1, s *rt2, s *cs1, c *sn1) nogil + +cdef void clag2z(int *m, int *n, c *sa, int *ldsa, z *a, int *lda, int *info) nogil + +cdef void clags2(bint *upper, s *a1, c *a2, s *a3, s *b1, c *b2, s *b3, s *csu, c *snu, s *csv, c *snv, s *csq, c *snq) nogil + +cdef void clagtm(char *trans, int *n, int *nrhs, s *alpha, c *dl, c *d, c *du, c *x, int *ldx, s *beta, c *b, int *ldb) nogil + +cdef void clahef(char *uplo, int *n, int *nb, int *kb, c *a, int *lda, int *ipiv, c *w, int *ldw, int *info) nogil + +cdef void clahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, int *info) nogil + +cdef void clahr2(int *n, int *k, int *nb, c *a, int *lda, c *tau, c *t, int *ldt, c *y, int *ldy) nogil + +cdef void claic1(int *job, int *j, c *x, s *sest, c *w, c *gamma, s *sestpr, c *s, c *c) nogil + +cdef void clals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, c *b, int *ldb, c *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *rwork, int *info) nogil + +cdef void clalsa(int *icompq, int *smlsiz, int *n, int *nrhs, c *b, int *ldb, c *bx, int *ldbx, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *rwork, int *iwork, int *info) nogil + +cdef void clalsd(char *uplo, int *smlsiz, int *n, int *nrhs, s *d, s *e, c *b, int *ldb, s *rcond, int *rank, c *work, s *rwork, int *iwork, int *info) nogil + +cdef s clangb(char *norm, int *n, int *kl, int *ku, c *ab, int *ldab, s *work) nogil + +cdef s clange(char *norm, int *m, int *n, c *a, int *lda, s *work) nogil + +cdef s clangt(char *norm, int *n, c *dl, c *d, c *du) nogil + +cdef s clanhb(char *norm, char *uplo, int *n, int *k, c *ab, int *ldab, s *work) nogil + +cdef s clanhe(char *norm, char *uplo, int *n, c *a, int *lda, s *work) nogil + +cdef s clanhf(char *norm, char *transr, char *uplo, int *n, c *a, s *work) nogil + +cdef s clanhp(char *norm, char *uplo, int *n, c *ap, s *work) nogil + +cdef s clanhs(char *norm, int *n, c *a, int *lda, s *work) nogil + +cdef s clanht(char *norm, int *n, s *d, c *e) nogil + +cdef s clansb(char *norm, char *uplo, int *n, int *k, c *ab, int *ldab, s *work) nogil + +cdef s clansp(char *norm, char *uplo, int *n, c *ap, s *work) nogil + +cdef s clansy(char *norm, char *uplo, int *n, c *a, int *lda, s *work) nogil + +cdef s clantb(char *norm, char *uplo, char *diag, int *n, int *k, c *ab, int *ldab, s *work) nogil + +cdef s clantp(char *norm, char *uplo, char *diag, int *n, c *ap, s *work) nogil + +cdef s clantr(char *norm, char *uplo, char *diag, int *m, int *n, c *a, int *lda, s *work) nogil + +cdef void clapll(int *n, c *x, int *incx, c *y, int *incy, s *ssmin) nogil + +cdef void clapmr(bint *forwrd, int *m, int *n, c *x, int *ldx, int *k) nogil + +cdef void clapmt(bint *forwrd, int *m, int *n, c *x, int *ldx, int *k) nogil + +cdef void claqgb(int *m, int *n, int *kl, int *ku, c *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil + +cdef void claqge(int *m, int *n, c *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil + +cdef void claqhb(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil + +cdef void claqhe(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil + +cdef void claqhp(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, char *equed) nogil + +cdef void claqp2(int *m, int *n, int *offset, c *a, int *lda, int *jpvt, c *tau, s *vn1, s *vn2, c *work) nogil + +cdef void claqps(int *m, int *n, int *offset, int *nb, int *kb, c *a, int *lda, int *jpvt, c *tau, s *vn1, s *vn2, c *auxv, c *f, int *ldf) nogil + +cdef void claqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, c *work, int *lwork, int *info) nogil + +cdef void claqr1(int *n, c *h, int *ldh, c *s1, c *s2, c *v) nogil + +cdef void claqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, int *ns, int *nd, c *sh, c *v, int *ldv, int *nh, c *t, int *ldt, int *nv, c *wv, int *ldwv, c *work, int *lwork) nogil + +cdef void claqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, int *ns, int *nd, c *sh, c *v, int *ldv, int *nh, c *t, int *ldt, int *nv, c *wv, int *ldwv, c *work, int *lwork) nogil + +cdef void claqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, c *h, int *ldh, c *w, int *iloz, int *ihiz, c *z, int *ldz, c *work, int *lwork, int *info) nogil + +cdef void claqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, c *s, c *h, int *ldh, int *iloz, int *ihiz, c *z, int *ldz, c *v, int *ldv, c *u, int *ldu, int *nv, c *wv, int *ldwv, int *nh, c *wh, int *ldwh) nogil + +cdef void claqsb(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil + +cdef void claqsp(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, char *equed) nogil + +cdef void claqsy(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil + +cdef void clar1v(int *n, int *b1, int *bn, s *lambda_, s *d, s *l, s *ld, s *lld, s *pivmin, s *gaptol, c *z, bint *wantnc, int *negcnt, s *ztz, s *mingma, int *r, int *isuppz, s *nrminv, s *resid, s *rqcorr, s *work) nogil + +cdef void clar2v(int *n, c *x, c *y, c *z, int *incx, s *c, c *s, int *incc) nogil + +cdef void clarcm(int *m, int *n, s *a, int *lda, c *b, int *ldb, c *c, int *ldc, s *rwork) nogil + +cdef void clarf(char *side, int *m, int *n, c *v, int *incv, c *tau, c *c, int *ldc, c *work) nogil + +cdef void clarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *ldwork) nogil + +cdef void clarfg(int *n, c *alpha, c *x, int *incx, c *tau) nogil + +cdef void clarfgp(int *n, c *alpha, c *x, int *incx, c *tau) nogil + +cdef void clarft(char *direct, char *storev, int *n, int *k, c *v, int *ldv, c *tau, c *t, int *ldt) nogil + +cdef void clarfx(char *side, int *m, int *n, c *v, c *tau, c *c, int *ldc, c *work) nogil + +cdef void clargv(int *n, c *x, int *incx, c *y, int *incy, s *c, int *incc) nogil + +cdef void clarnv(int *idist, int *iseed, int *n, c *x) nogil + +cdef void clarrv(int *n, s *vl, s *vu, s *d, s *l, s *pivmin, int *isplit, int *m, int *dol, int *dou, s *minrgp, s *rtol1, s *rtol2, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, c *z, int *ldz, int *isuppz, s *work, int *iwork, int *info) nogil + +cdef void clartg(c *f, c *g, s *cs, c *sn, c *r) nogil + +cdef void clartv(int *n, c *x, int *incx, c *y, int *incy, s *c, c *s, int *incc) nogil + +cdef void clarz(char *side, int *m, int *n, int *l, c *v, int *incv, c *tau, c *c, int *ldc, c *work) nogil + +cdef void clarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, c *v, int *ldv, c *t, int *ldt, c *c, int *ldc, c *work, int *ldwork) nogil + +cdef void clarzt(char *direct, char *storev, int *n, int *k, c *v, int *ldv, c *tau, c *t, int *ldt) nogil + +cdef void clascl(char *type_bn, int *kl, int *ku, s *cfrom, s *cto, int *m, int *n, c *a, int *lda, int *info) nogil + +cdef void claset(char *uplo, int *m, int *n, c *alpha, c *beta, c *a, int *lda) nogil + +cdef void clasr(char *side, char *pivot, char *direct, int *m, int *n, s *c, s *s, c *a, int *lda) nogil + +cdef void classq(int *n, c *x, int *incx, s *scale, s *sumsq) nogil + +cdef void claswp(int *n, c *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil + +cdef void clasyf(char *uplo, int *n, int *nb, int *kb, c *a, int *lda, int *ipiv, c *w, int *ldw, int *info) nogil + +cdef void clatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, c *ab, int *ldab, c *x, s *scale, s *cnorm, int *info) nogil + +cdef void clatdf(int *ijob, int *n, c *z, int *ldz, c *rhs, s *rdsum, s *rdscal, int *ipiv, int *jpiv) nogil + +cdef void clatps(char *uplo, char *trans, char *diag, char *normin, int *n, c *ap, c *x, s *scale, s *cnorm, int *info) nogil + +cdef void clatrd(char *uplo, int *n, int *nb, c *a, int *lda, s *e, c *tau, c *w, int *ldw) nogil + +cdef void clatrs(char *uplo, char *trans, char *diag, char *normin, int *n, c *a, int *lda, c *x, s *scale, s *cnorm, int *info) nogil + +cdef void clatrz(int *m, int *n, int *l, c *a, int *lda, c *tau, c *work) nogil + +cdef void clauu2(char *uplo, int *n, c *a, int *lda, int *info) nogil + +cdef void clauum(char *uplo, int *n, c *a, int *lda, int *info) nogil + +cdef void cpbcon(char *uplo, int *n, int *kd, c *ab, int *ldab, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void cpbequ(char *uplo, int *n, int *kd, c *ab, int *ldab, s *s, s *scond, s *amax, int *info) nogil + +cdef void cpbrfs(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cpbstf(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil + +cdef void cpbsv(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil + +cdef void cpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *afb, int *ldafb, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cpbtf2(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil + +cdef void cpbtrf(char *uplo, int *n, int *kd, c *ab, int *ldab, int *info) nogil + +cdef void cpbtrs(char *uplo, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil + +cdef void cpftrf(char *transr, char *uplo, int *n, c *a, int *info) nogil + +cdef void cpftri(char *transr, char *uplo, int *n, c *a, int *info) nogil + +cdef void cpftrs(char *transr, char *uplo, int *n, int *nrhs, c *a, c *b, int *ldb, int *info) nogil + +cdef void cpocon(char *uplo, int *n, c *a, int *lda, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void cpoequ(int *n, c *a, int *lda, s *s, s *scond, s *amax, int *info) nogil + +cdef void cpoequb(int *n, c *a, int *lda, s *s, s *scond, s *amax, int *info) nogil + +cdef void cporfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cposv(char *uplo, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil + +cdef void cposvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cpotf2(char *uplo, int *n, c *a, int *lda, int *info) nogil + +cdef void cpotrf(char *uplo, int *n, c *a, int *lda, int *info) nogil + +cdef void cpotri(char *uplo, int *n, c *a, int *lda, int *info) nogil + +cdef void cpotrs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil + +cdef void cppcon(char *uplo, int *n, c *ap, s *anorm, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void cppequ(char *uplo, int *n, c *ap, s *s, s *scond, s *amax, int *info) nogil + +cdef void cpprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cppsv(char *uplo, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil + +cdef void cppsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, char *equed, s *s, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cpptrf(char *uplo, int *n, c *ap, int *info) nogil + +cdef void cpptri(char *uplo, int *n, c *ap, int *info) nogil + +cdef void cpptrs(char *uplo, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil + +cdef void cpstf2(char *uplo, int *n, c *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil + +cdef void cpstrf(char *uplo, int *n, c *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil + +cdef void cptcon(int *n, s *d, c *e, s *anorm, s *rcond, s *rwork, int *info) nogil + +cdef void cpteqr(char *compz, int *n, s *d, s *e, c *z, int *ldz, s *work, int *info) nogil + +cdef void cptrfs(char *uplo, int *n, int *nrhs, s *d, c *e, s *df, c *ef, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cptsv(int *n, int *nrhs, s *d, c *e, c *b, int *ldb, int *info) nogil + +cdef void cptsvx(char *fact, int *n, int *nrhs, s *d, c *e, s *df, c *ef, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cpttrf(int *n, s *d, c *e, int *info) nogil + +cdef void cpttrs(char *uplo, int *n, int *nrhs, s *d, c *e, c *b, int *ldb, int *info) nogil + +cdef void cptts2(int *iuplo, int *n, int *nrhs, s *d, c *e, c *b, int *ldb) nogil + +cdef void crot(int *n, c *cx, int *incx, c *cy, int *incy, s *c, c *s) nogil + +cdef void cspcon(char *uplo, int *n, c *ap, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil + +cdef void cspmv(char *uplo, int *n, c *alpha, c *ap, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void cspr(char *uplo, int *n, c *alpha, c *x, int *incx, c *ap) nogil + +cdef void csprfs(char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void cspsv(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void cspsvx(char *fact, char *uplo, int *n, int *nrhs, c *ap, c *afp, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void csptrf(char *uplo, int *n, c *ap, int *ipiv, int *info) nogil + +cdef void csptri(char *uplo, int *n, c *ap, int *ipiv, c *work, int *info) nogil + +cdef void csptrs(char *uplo, int *n, int *nrhs, c *ap, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void csrscl(int *n, s *sa, c *sx, int *incx) nogil + +cdef void cstedc(char *compz, int *n, s *d, s *e, c *z, int *ldz, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void cstegr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, c *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void cstein(int *n, s *d, s *e, int *m, s *w, int *iblock, int *isplit, c *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void cstemr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, int *m, s *w, c *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void csteqr(char *compz, int *n, s *d, s *e, c *z, int *ldz, s *work, int *info) nogil + +cdef void csycon(char *uplo, int *n, c *a, int *lda, int *ipiv, s *anorm, s *rcond, c *work, int *info) nogil + +cdef void csyconv(char *uplo, char *way, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil + +cdef void csyequb(char *uplo, int *n, c *a, int *lda, s *s, s *scond, s *amax, c *work, int *info) nogil + +cdef void csymv(char *uplo, int *n, c *alpha, c *a, int *lda, c *x, int *incx, c *beta, c *y, int *incy) nogil + +cdef void csyr(char *uplo, int *n, c *alpha, c *x, int *incx, c *a, int *lda) nogil + +cdef void csyrfs(char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void csysv(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *lwork, int *info) nogil + +cdef void csysvx(char *fact, char *uplo, int *n, int *nrhs, c *a, int *lda, c *af, int *ldaf, int *ipiv, c *b, int *ldb, c *x, int *ldx, s *rcond, s *ferr, s *berr, c *work, int *lwork, s *rwork, int *info) nogil + +cdef void csyswapr(char *uplo, int *n, c *a, int *lda, int *i1, int *i2) nogil + +cdef void csytf2(char *uplo, int *n, c *a, int *lda, int *ipiv, int *info) nogil + +cdef void csytrf(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil + +cdef void csytri(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *info) nogil + +cdef void csytri2(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *lwork, int *info) nogil + +cdef void csytri2x(char *uplo, int *n, c *a, int *lda, int *ipiv, c *work, int *nb, int *info) nogil + +cdef void csytrs(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, int *info) nogil + +cdef void csytrs2(char *uplo, int *n, int *nrhs, c *a, int *lda, int *ipiv, c *b, int *ldb, c *work, int *info) nogil + +cdef void ctbcon(char *norm, char *uplo, char *diag, int *n, int *kd, c *ab, int *ldab, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void ctbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void ctbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, c *ab, int *ldab, c *b, int *ldb, int *info) nogil + +cdef void ctfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, c *alpha, c *a, c *b, int *ldb) nogil + +cdef void ctftri(char *transr, char *uplo, char *diag, int *n, c *a, int *info) nogil + +cdef void ctfttp(char *transr, char *uplo, int *n, c *arf, c *ap, int *info) nogil + +cdef void ctfttr(char *transr, char *uplo, int *n, c *arf, c *a, int *lda, int *info) nogil + +cdef void ctgevc(char *side, char *howmny, bint *select, int *n, c *s, int *lds, c *p, int *ldp, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *info) nogil + +cdef void ctgex2(bint *wantq, bint *wantz, int *n, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *j1, int *info) nogil + +cdef void ctgexc(bint *wantq, bint *wantz, int *n, c *a, int *lda, c *b, int *ldb, c *q, int *ldq, c *z, int *ldz, int *ifst, int *ilst, int *info) nogil + +cdef void ctgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, c *a, int *lda, c *b, int *ldb, c *alpha, c *beta, c *q, int *ldq, c *z, int *ldz, int *m, s *pl, s *pr, s *dif, c *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ctgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, c *a, int *lda, c *b, int *ldb, s *tola, s *tolb, s *alpha, s *beta, c *u, int *ldu, c *v, int *ldv, c *q, int *ldq, c *work, int *ncycle, int *info) nogil + +cdef void ctgsna(char *job, char *howmny, bint *select, int *n, c *a, int *lda, c *b, int *ldb, c *vl, int *ldvl, c *vr, int *ldvr, s *s, s *dif, int *mm, int *m, c *work, int *lwork, int *iwork, int *info) nogil + +cdef void ctgsy2(char *trans, int *ijob, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, c *d, int *ldd, c *e, int *lde, c *f, int *ldf, s *scale, s *rdsum, s *rdscal, int *info) nogil + +cdef void ctgsyl(char *trans, int *ijob, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, c *d, int *ldd, c *e, int *lde, c *f, int *ldf, s *scale, s *dif, c *work, int *lwork, int *iwork, int *info) nogil + +cdef void ctpcon(char *norm, char *uplo, char *diag, int *n, c *ap, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void ctpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, c *v, int *ldv, c *t, int *ldt, c *a, int *lda, c *b, int *ldb, c *work, int *info) nogil + +cdef void ctpqrt(int *m, int *n, int *l, int *nb, c *a, int *lda, c *b, int *ldb, c *t, int *ldt, c *work, int *info) nogil + +cdef void ctpqrt2(int *m, int *n, int *l, c *a, int *lda, c *b, int *ldb, c *t, int *ldt, int *info) nogil + +cdef void ctprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, c *v, int *ldv, c *t, int *ldt, c *a, int *lda, c *b, int *ldb, c *work, int *ldwork) nogil + +cdef void ctprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *ap, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void ctptri(char *uplo, char *diag, int *n, c *ap, int *info) nogil + +cdef void ctptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *ap, c *b, int *ldb, int *info) nogil + +cdef void ctpttf(char *transr, char *uplo, int *n, c *ap, c *arf, int *info) nogil + +cdef void ctpttr(char *uplo, int *n, c *ap, c *a, int *lda, int *info) nogil + +cdef void ctrcon(char *norm, char *uplo, char *diag, int *n, c *a, int *lda, s *rcond, c *work, s *rwork, int *info) nogil + +cdef void ctrevc(char *side, char *howmny, bint *select, int *n, c *t, int *ldt, c *vl, int *ldvl, c *vr, int *ldvr, int *mm, int *m, c *work, s *rwork, int *info) nogil + +cdef void ctrexc(char *compq, int *n, c *t, int *ldt, c *q, int *ldq, int *ifst, int *ilst, int *info) nogil + +cdef void ctrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, c *x, int *ldx, s *ferr, s *berr, c *work, s *rwork, int *info) nogil + +cdef void ctrsen(char *job, char *compq, bint *select, int *n, c *t, int *ldt, c *q, int *ldq, c *w, int *m, s *s, s *sep, c *work, int *lwork, int *info) nogil + +cdef void ctrsna(char *job, char *howmny, bint *select, int *n, c *t, int *ldt, c *vl, int *ldvl, c *vr, int *ldvr, s *s, s *sep, int *mm, int *m, c *work, int *ldwork, s *rwork, int *info) nogil + +cdef void ctrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, c *a, int *lda, c *b, int *ldb, c *c, int *ldc, s *scale, int *info) nogil + +cdef void ctrti2(char *uplo, char *diag, int *n, c *a, int *lda, int *info) nogil + +cdef void ctrtri(char *uplo, char *diag, int *n, c *a, int *lda, int *info) nogil + +cdef void ctrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, c *a, int *lda, c *b, int *ldb, int *info) nogil + +cdef void ctrttf(char *transr, char *uplo, int *n, c *a, int *lda, c *arf, int *info) nogil + +cdef void ctrttp(char *uplo, int *n, c *a, int *lda, c *ap, int *info) nogil + +cdef void ctzrzf(int *m, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cunbdb(char *trans, char *signs, int *m, int *p, int *q, c *x11, int *ldx11, c *x12, int *ldx12, c *x21, int *ldx21, c *x22, int *ldx22, s *theta, s *phi, c *taup1, c *taup2, c *tauq1, c *tauq2, c *work, int *lwork, int *info) nogil + +cdef void cuncsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, c *x11, int *ldx11, c *x12, int *ldx12, c *x21, int *ldx21, c *x22, int *ldx22, s *theta, c *u1, int *ldu1, c *u2, int *ldu2, c *v1t, int *ldv1t, c *v2t, int *ldv2t, c *work, int *lwork, s *rwork, int *lrwork, int *iwork, int *info) nogil + +cdef void cung2l(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cung2r(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cungbr(char *vect, int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cunghr(int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cungl2(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cunglq(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cungql(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cungqr(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cungr2(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *info) nogil + +cdef void cungrq(int *m, int *n, int *k, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cungtr(char *uplo, int *n, c *a, int *lda, c *tau, c *work, int *lwork, int *info) nogil + +cdef void cunm2l(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil + +cdef void cunm2r(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil + +cdef void cunmbr(char *vect, char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunmhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunml2(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil + +cdef void cunmlq(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunmql(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunmqr(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunmr2(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil + +cdef void cunmr3(char *side, char *trans, int *m, int *n, int *k, int *l, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *info) nogil + +cdef void cunmrq(char *side, char *trans, int *m, int *n, int *k, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunmrz(char *side, char *trans, int *m, int *n, int *k, int *l, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cunmtr(char *side, char *uplo, char *trans, int *m, int *n, c *a, int *lda, c *tau, c *c, int *ldc, c *work, int *lwork, int *info) nogil + +cdef void cupgtr(char *uplo, int *n, c *ap, c *tau, c *q, int *ldq, c *work, int *info) nogil + +cdef void cupmtr(char *side, char *uplo, char *trans, int *m, int *n, c *ap, c *tau, c *c, int *ldc, c *work, int *info) nogil + +cdef void dbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, d *theta, d *phi, d *u1, int *ldu1, d *u2, int *ldu2, d *v1t, int *ldv1t, d *v2t, int *ldv2t, d *b11d, d *b11e, d *b12d, d *b12e, d *b21d, d *b21e, d *b22d, d *b22e, d *work, int *lwork, int *info) nogil + +cdef void dbdsdc(char *uplo, char *compq, int *n, d *d, d *e, d *u, int *ldu, d *vt, int *ldvt, d *q, int *iq, d *work, int *iwork, int *info) nogil + +cdef void dbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, d *vt, int *ldvt, d *u, int *ldu, d *c, int *ldc, d *work, int *info) nogil + +cdef void ddisna(char *job, int *m, int *n, d *d, d *sep, int *info) nogil + +cdef void dgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, d *ab, int *ldab, d *d, d *e, d *q, int *ldq, d *pt, int *ldpt, d *c, int *ldc, d *work, int *info) nogil + +cdef void dgbcon(char *norm, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dgbequ(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void dgbequb(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void dgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dgbsv(int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, int *ipiv, char *equed, d *r, d *c, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dgbtf2(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void dgbtrf(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void dgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, d *ab, int *ldab, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dgebak(char *job, char *side, int *n, int *ilo, int *ihi, d *scale, int *m, d *v, int *ldv, int *info) nogil + +cdef void dgebal(char *job, int *n, d *a, int *lda, int *ilo, int *ihi, d *scale, int *info) nogil + +cdef void dgebd2(int *m, int *n, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *work, int *info) nogil + +cdef void dgebrd(int *m, int *n, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *work, int *lwork, int *info) nogil + +cdef void dgecon(char *norm, int *n, d *a, int *lda, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dgeequ(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void dgeequb(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info) nogil + +cdef void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil + +cdef void dgeev(char *jobvl, char *jobvr, int *n, d *a, int *lda, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, d *work, int *lwork, int *info) nogil + +cdef void dgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, d *a, int *lda, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, int *ilo, int *ihi, d *scale, d *abnrm, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dgehd2(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dgehrd(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgejsv(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, d *a, int *lda, d *sva, d *u, int *ldu, d *v, int *ldv, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dgelq2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dgelqf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgels(char *trans, int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *work, int *lwork, int *info) nogil + +cdef void dgelsd(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *s, d *rcond, int *rank, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dgelss(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *s, d *rcond, int *rank, d *work, int *lwork, int *info) nogil + +cdef void dgelsy(int *m, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *jpvt, d *rcond, int *rank, d *work, int *lwork, int *info) nogil + +cdef void dgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *info) nogil + +cdef void dgeql2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dgeqlf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgeqp3(int *m, int *n, d *a, int *lda, int *jpvt, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgeqr2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dgeqr2p(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dgeqrf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgeqrfp(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgeqrt(int *m, int *n, int *nb, d *a, int *lda, d *t, int *ldt, d *work, int *info) nogil + +cdef void dgeqrt2(int *m, int *n, d *a, int *lda, d *t, int *ldt, int *info) nogil + +cdef void dgeqrt3(int *m, int *n, d *a, int *lda, d *t, int *ldt, int *info) nogil + +cdef void dgerfs(char *trans, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dgerq2(int *m, int *n, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dgerqf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dgesc2(int *n, d *a, int *lda, d *rhs, int *ipiv, int *jpiv, d *scale) nogil + +cdef void dgesdd(char *jobz, int *m, int *n, d *a, int *lda, d *s, d *u, int *ldu, d *vt, int *ldvt, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dgesv(int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dgesvd(char *jobu, char *jobvt, int *m, int *n, d *a, int *lda, d *s, d *u, int *ldu, d *vt, int *ldvt, d *work, int *lwork, int *info) nogil + +cdef void dgesvj(char *joba, char *jobu, char *jobv, int *m, int *n, d *a, int *lda, d *sva, int *mv, d *v, int *ldv, d *work, int *lwork, int *info) nogil + +cdef void dgesvx(char *fact, char *trans, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, char *equed, d *r, d *c, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dgetc2(int *n, d *a, int *lda, int *ipiv, int *jpiv, int *info) nogil + +cdef void dgetf2(int *m, int *n, d *a, int *lda, int *ipiv, int *info) nogil + +cdef void dgetrf(int *m, int *n, d *a, int *lda, int *ipiv, int *info) nogil + +cdef void dgetri(int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil + +cdef void dgetrs(char *trans, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dggbak(char *job, char *side, int *n, int *ilo, int *ihi, d *lscale, d *rscale, int *m, d *v, int *ldv, int *info) nogil + +cdef void dggbal(char *job, int *n, d *a, int *lda, d *b, int *ldb, int *ilo, int *ihi, d *lscale, d *rscale, d *work, int *info) nogil + +cdef void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info) nogil + +cdef void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil + +cdef void dggev(char *jobvl, char *jobvr, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *vl, int *ldvl, d *vr, int *ldvr, d *work, int *lwork, int *info) nogil + +cdef void dggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *vl, int *ldvl, d *vr, int *ldvr, int *ilo, int *ihi, d *lscale, d *rscale, d *abnrm, d *bbnrm, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, bint *bwork, int *info) nogil + +cdef void dggglm(int *n, int *m, int *p, d *a, int *lda, d *b, int *ldb, d *d, d *x, d *y, d *work, int *lwork, int *info) nogil + +cdef void dgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *info) nogil + +cdef void dgglse(int *m, int *n, int *p, d *a, int *lda, d *b, int *ldb, d *c, d *d, d *x, d *work, int *lwork, int *info) nogil + +cdef void dggqrf(int *n, int *m, int *p, d *a, int *lda, d *taua, d *b, int *ldb, d *taub, d *work, int *lwork, int *info) nogil + +cdef void dggrqf(int *m, int *p, int *n, d *a, int *lda, d *taua, d *b, int *ldb, d *taub, d *work, int *lwork, int *info) nogil + +cdef void dgsvj0(char *jobv, int *m, int *n, d *a, int *lda, d *d, d *sva, int *mv, d *v, int *ldv, d *eps, d *sfmin, d *tol, int *nsweep, d *work, int *lwork, int *info) nogil + +cdef void dgsvj1(char *jobv, int *m, int *n, int *n1, d *a, int *lda, d *d, d *sva, int *mv, d *v, int *ldv, d *eps, d *sfmin, d *tol, int *nsweep, d *work, int *lwork, int *info) nogil + +cdef void dgtcon(char *norm, int *n, d *dl, d *d, d *du, d *du2, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dgtrfs(char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *dlf, d *df, d *duf, d *du2, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dgtsv(int *n, int *nrhs, d *dl, d *d, d *du, d *b, int *ldb, int *info) nogil + +cdef void dgtsvx(char *fact, char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *dlf, d *df, d *duf, d *du2, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dgttrf(int *n, d *dl, d *d, d *du, d *du2, int *ipiv, int *info) nogil + +cdef void dgttrs(char *trans, int *n, int *nrhs, d *dl, d *d, d *du, d *du2, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dgtts2(int *itrans, int *n, int *nrhs, d *dl, d *d, d *du, d *du2, int *ipiv, d *b, int *ldb) nogil + +cdef void dhgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *t, int *ldt, d *alphar, d *alphai, d *beta, d *q, int *ldq, d *z, int *ldz, d *work, int *lwork, int *info) nogil + +cdef void dhsein(char *side, char *eigsrc, char *initv, bint *select, int *n, d *h, int *ldh, d *wr, d *wi, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *ifaill, int *ifailr, int *info) nogil + +cdef void dhseqr(char *job, char *compz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, d *z, int *ldz, d *work, int *lwork, int *info) nogil + +cdef bint disnan(d *din) nogil + +cdef void dlabad(d *small, d *large) nogil + +cdef void dlabrd(int *m, int *n, int *nb, d *a, int *lda, d *d, d *e, d *tauq, d *taup, d *x, int *ldx, d *y, int *ldy) nogil + +cdef void dlacn2(int *n, d *v, d *x, int *isgn, d *est, int *kase, int *isave) nogil + +cdef void dlacon(int *n, d *v, d *x, int *isgn, d *est, int *kase) nogil + +cdef void dlacpy(char *uplo, int *m, int *n, d *a, int *lda, d *b, int *ldb) nogil + +cdef void dladiv(d *a, d *b, d *c, d *d, d *p, d *q) nogil + +cdef void dlae2(d *a, d *b, d *c, d *rt1, d *rt2) nogil + +cdef void dlaebz(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, d *abstol, d *reltol, d *pivmin, d *d, d *e, d *e2, int *nval, d *ab, d *c, int *mout, int *nab, d *work, int *iwork, int *info) nogil + +cdef void dlaed0(int *icompq, int *qsiz, int *n, d *d, d *e, d *q, int *ldq, d *qstore, int *ldqs, d *work, int *iwork, int *info) nogil + +cdef void dlaed1(int *n, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *work, int *iwork, int *info) nogil + +cdef void dlaed2(int *k, int *n, int *n1, d *d, d *q, int *ldq, int *indxq, d *rho, d *z, d *dlamda, d *w, d *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info) nogil + +cdef void dlaed3(int *k, int *n, int *n1, d *d, d *q, int *ldq, d *rho, d *dlamda, d *q2, int *indx, int *ctot, d *w, d *s, int *info) nogil + +cdef void dlaed4(int *n, int *i, d *d, d *z, d *delta, d *rho, d *dlam, int *info) nogil + +cdef void dlaed5(int *i, d *d, d *z, d *delta, d *rho, d *dlam) nogil + +cdef void dlaed6(int *kniter, bint *orgati, d *rho, d *d, d *z, d *finit, d *tau, int *info) nogil + +cdef void dlaed7(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, d *work, int *iwork, int *info) nogil + +cdef void dlaed8(int *icompq, int *k, int *n, int *qsiz, d *d, d *q, int *ldq, int *indxq, d *rho, int *cutpnt, d *z, d *dlamda, d *q2, int *ldq2, d *w, int *perm, int *givptr, int *givcol, d *givnum, int *indxp, int *indx, int *info) nogil + +cdef void dlaed9(int *k, int *kstart, int *kstop, int *n, d *d, d *q, int *ldq, d *rho, d *dlamda, d *w, d *s, int *lds, int *info) nogil + +cdef void dlaeda(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, d *q, int *qptr, d *z, d *ztemp, int *info) nogil + +cdef void dlaein(bint *rightv, bint *noinit, int *n, d *h, int *ldh, d *wr, d *wi, d *vr, d *vi, d *b, int *ldb, d *work, d *eps3, d *smlnum, d *bignum, int *info) nogil + +cdef void dlaev2(d *a, d *b, d *c, d *rt1, d *rt2, d *cs1, d *sn1) nogil + +cdef void dlaexc(bint *wantq, int *n, d *t, int *ldt, d *q, int *ldq, int *j1, int *n1, int *n2, d *work, int *info) nogil + +cdef void dlag2(d *a, int *lda, d *b, int *ldb, d *safmin, d *scale1, d *scale2, d *wr1, d *wr2, d *wi) nogil + +cdef void dlag2s(int *m, int *n, d *a, int *lda, s *sa, int *ldsa, int *info) nogil + +cdef void dlags2(bint *upper, d *a1, d *a2, d *a3, d *b1, d *b2, d *b3, d *csu, d *snu, d *csv, d *snv, d *csq, d *snq) nogil + +cdef void dlagtf(int *n, d *a, d *lambda_, d *b, d *c, d *tol, d *d, int *in_, int *info) nogil + +cdef void dlagtm(char *trans, int *n, int *nrhs, d *alpha, d *dl, d *d, d *du, d *x, int *ldx, d *beta, d *b, int *ldb) nogil + +cdef void dlagts(int *job, int *n, d *a, d *b, d *c, d *d, int *in_, d *y, d *tol, int *info) nogil + +cdef void dlagv2(d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *csl, d *snl, d *csr, d *snr) nogil + +cdef void dlahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, int *info) nogil + +cdef void dlahr2(int *n, int *k, int *nb, d *a, int *lda, d *tau, d *t, int *ldt, d *y, int *ldy) nogil + +cdef void dlaic1(int *job, int *j, d *x, d *sest, d *w, d *gamma, d *sestpr, d *s, d *c) nogil + +cdef void dlaln2(bint *ltrans, int *na, int *nw, d *smin, d *ca, d *a, int *lda, d *d1, d *d2, d *b, int *ldb, d *wr, d *wi, d *x, int *ldx, d *scale, d *xnorm, int *info) nogil + +cdef void dlals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, d *b, int *ldb, d *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *work, int *info) nogil + +cdef void dlalsa(int *icompq, int *smlsiz, int *n, int *nrhs, d *b, int *ldb, d *bx, int *ldbx, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *work, int *iwork, int *info) nogil + +cdef void dlalsd(char *uplo, int *smlsiz, int *n, int *nrhs, d *d, d *e, d *b, int *ldb, d *rcond, int *rank, d *work, int *iwork, int *info) nogil + +cdef d dlamch(char *cmach) nogil + +cdef void dlamrg(int *n1, int *n2, d *a, int *dtrd1, int *dtrd2, int *index_bn) nogil + +cdef int dlaneg(int *n, d *d, d *lld, d *sigma, d *pivmin, int *r) nogil + +cdef d dlangb(char *norm, int *n, int *kl, int *ku, d *ab, int *ldab, d *work) nogil + +cdef d dlange(char *norm, int *m, int *n, d *a, int *lda, d *work) nogil + +cdef d dlangt(char *norm, int *n, d *dl, d *d, d *du) nogil + +cdef d dlanhs(char *norm, int *n, d *a, int *lda, d *work) nogil + +cdef d dlansb(char *norm, char *uplo, int *n, int *k, d *ab, int *ldab, d *work) nogil + +cdef d dlansf(char *norm, char *transr, char *uplo, int *n, d *a, d *work) nogil + +cdef d dlansp(char *norm, char *uplo, int *n, d *ap, d *work) nogil + +cdef d dlanst(char *norm, int *n, d *d, d *e) nogil + +cdef d dlansy(char *norm, char *uplo, int *n, d *a, int *lda, d *work) nogil + +cdef d dlantb(char *norm, char *uplo, char *diag, int *n, int *k, d *ab, int *ldab, d *work) nogil + +cdef d dlantp(char *norm, char *uplo, char *diag, int *n, d *ap, d *work) nogil + +cdef d dlantr(char *norm, char *uplo, char *diag, int *m, int *n, d *a, int *lda, d *work) nogil + +cdef void dlanv2(d *a, d *b, d *c, d *d, d *rt1r, d *rt1i, d *rt2r, d *rt2i, d *cs, d *sn) nogil + +cdef void dlapll(int *n, d *x, int *incx, d *y, int *incy, d *ssmin) nogil + +cdef void dlapmr(bint *forwrd, int *m, int *n, d *x, int *ldx, int *k) nogil + +cdef void dlapmt(bint *forwrd, int *m, int *n, d *x, int *ldx, int *k) nogil + +cdef d dlapy2(d *x, d *y) nogil + +cdef d dlapy3(d *x, d *y, d *z) nogil + +cdef void dlaqgb(int *m, int *n, int *kl, int *ku, d *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil + +cdef void dlaqge(int *m, int *n, d *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil + +cdef void dlaqp2(int *m, int *n, int *offset, d *a, int *lda, int *jpvt, d *tau, d *vn1, d *vn2, d *work) nogil + +cdef void dlaqps(int *m, int *n, int *offset, int *nb, int *kb, d *a, int *lda, int *jpvt, d *tau, d *vn1, d *vn2, d *auxv, d *f, int *ldf) nogil + +cdef void dlaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, d *work, int *lwork, int *info) nogil + +cdef void dlaqr1(int *n, d *h, int *ldh, d *sr1, d *si1, d *sr2, d *si2, d *v) nogil + +cdef void dlaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, int *ns, int *nd, d *sr, d *si, d *v, int *ldv, int *nh, d *t, int *ldt, int *nv, d *wv, int *ldwv, d *work, int *lwork) nogil + +cdef void dlaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, int *ns, int *nd, d *sr, d *si, d *v, int *ldv, int *nh, d *t, int *ldt, int *nv, d *wv, int *ldwv, d *work, int *lwork) nogil + +cdef void dlaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, d *h, int *ldh, d *wr, d *wi, int *iloz, int *ihiz, d *z, int *ldz, d *work, int *lwork, int *info) nogil + +cdef void dlaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, d *sr, d *si, d *h, int *ldh, int *iloz, int *ihiz, d *z, int *ldz, d *v, int *ldv, d *u, int *ldu, int *nv, d *wv, int *ldwv, int *nh, d *wh, int *ldwh) nogil + +cdef void dlaqsb(char *uplo, int *n, int *kd, d *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil + +cdef void dlaqsp(char *uplo, int *n, d *ap, d *s, d *scond, d *amax, char *equed) nogil + +cdef void dlaqsy(char *uplo, int *n, d *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil + +cdef void dlaqtr(bint *ltran, bint *lreal, int *n, d *t, int *ldt, d *b, d *w, d *scale, d *x, d *work, int *info) nogil + +cdef void dlar1v(int *n, int *b1, int *bn, d *lambda_, d *d, d *l, d *ld, d *lld, d *pivmin, d *gaptol, d *z, bint *wantnc, int *negcnt, d *ztz, d *mingma, int *r, int *isuppz, d *nrminv, d *resid, d *rqcorr, d *work) nogil + +cdef void dlar2v(int *n, d *x, d *y, d *z, int *incx, d *c, d *s, int *incc) nogil + +cdef void dlarf(char *side, int *m, int *n, d *v, int *incv, d *tau, d *c, int *ldc, d *work) nogil + +cdef void dlarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *ldwork) nogil + +cdef void dlarfg(int *n, d *alpha, d *x, int *incx, d *tau) nogil + +cdef void dlarfgp(int *n, d *alpha, d *x, int *incx, d *tau) nogil + +cdef void dlarft(char *direct, char *storev, int *n, int *k, d *v, int *ldv, d *tau, d *t, int *ldt) nogil + +cdef void dlarfx(char *side, int *m, int *n, d *v, d *tau, d *c, int *ldc, d *work) nogil + +cdef void dlargv(int *n, d *x, int *incx, d *y, int *incy, d *c, int *incc) nogil + +cdef void dlarnv(int *idist, int *iseed, int *n, d *x) nogil + +cdef void dlarra(int *n, d *d, d *e, d *e2, d *spltol, d *tnrm, int *nsplit, int *isplit, int *info) nogil + +cdef void dlarrb(int *n, d *d, d *lld, int *ifirst, int *ilast, d *rtol1, d *rtol2, int *offset, d *w, d *wgap, d *werr, d *work, int *iwork, d *pivmin, d *spdiam, int *twist, int *info) nogil + +cdef void dlarrc(char *jobt, int *n, d *vl, d *vu, d *d, d *e, d *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info) nogil + +cdef void dlarrd(char *range, char *order, int *n, d *vl, d *vu, int *il, int *iu, d *gers, d *reltol, d *d, d *e, d *e2, d *pivmin, int *nsplit, int *isplit, int *m, d *w, d *werr, d *wl, d *wu, int *iblock, int *indexw, d *work, int *iwork, int *info) nogil + +cdef void dlarre(char *range, int *n, d *vl, d *vu, int *il, int *iu, d *d, d *e, d *e2, d *rtol1, d *rtol2, d *spltol, int *nsplit, int *isplit, int *m, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, d *pivmin, d *work, int *iwork, int *info) nogil + +cdef void dlarrf(int *n, d *d, d *l, d *ld, int *clstrt, int *clend, d *w, d *wgap, d *werr, d *spdiam, d *clgapl, d *clgapr, d *pivmin, d *sigma, d *dplus, d *lplus, d *work, int *info) nogil + +cdef void dlarrj(int *n, d *d, d *e2, int *ifirst, int *ilast, d *rtol, int *offset, d *w, d *werr, d *work, int *iwork, d *pivmin, d *spdiam, int *info) nogil + +cdef void dlarrk(int *n, int *iw, d *gl, d *gu, d *d, d *e2, d *pivmin, d *reltol, d *w, d *werr, int *info) nogil + +cdef void dlarrr(int *n, d *d, d *e, int *info) nogil + +cdef void dlarrv(int *n, d *vl, d *vu, d *d, d *l, d *pivmin, int *isplit, int *m, int *dol, int *dou, d *minrgp, d *rtol1, d *rtol2, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, d *z, int *ldz, int *isuppz, d *work, int *iwork, int *info) nogil + +cdef void dlartg(d *f, d *g, d *cs, d *sn, d *r) nogil + +cdef void dlartgp(d *f, d *g, d *cs, d *sn, d *r) nogil + +cdef void dlartgs(d *x, d *y, d *sigma, d *cs, d *sn) nogil + +cdef void dlartv(int *n, d *x, int *incx, d *y, int *incy, d *c, d *s, int *incc) nogil + +cdef void dlaruv(int *iseed, int *n, d *x) nogil + +cdef void dlarz(char *side, int *m, int *n, int *l, d *v, int *incv, d *tau, d *c, int *ldc, d *work) nogil + +cdef void dlarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, d *v, int *ldv, d *t, int *ldt, d *c, int *ldc, d *work, int *ldwork) nogil + +cdef void dlarzt(char *direct, char *storev, int *n, int *k, d *v, int *ldv, d *tau, d *t, int *ldt) nogil + +cdef void dlas2(d *f, d *g, d *h, d *ssmin, d *ssmax) nogil + +cdef void dlascl(char *type_bn, int *kl, int *ku, d *cfrom, d *cto, int *m, int *n, d *a, int *lda, int *info) nogil + +cdef void dlasd0(int *n, int *sqre, d *d, d *e, d *u, int *ldu, d *vt, int *ldvt, int *smlsiz, int *iwork, d *work, int *info) nogil + +cdef void dlasd1(int *nl, int *nr, int *sqre, d *d, d *alpha, d *beta, d *u, int *ldu, d *vt, int *ldvt, int *idxq, int *iwork, d *work, int *info) nogil + +cdef void dlasd2(int *nl, int *nr, int *sqre, int *k, d *d, d *z, d *alpha, d *beta, d *u, int *ldu, d *vt, int *ldvt, d *dsigma, d *u2, int *ldu2, d *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info) nogil + +cdef void dlasd3(int *nl, int *nr, int *sqre, int *k, d *d, d *q, int *ldq, d *dsigma, d *u, int *ldu, d *u2, int *ldu2, d *vt, int *ldvt, d *vt2, int *ldvt2, int *idxc, int *ctot, d *z, int *info) nogil + +cdef void dlasd4(int *n, int *i, d *d, d *z, d *delta, d *rho, d *sigma, d *work, int *info) nogil + +cdef void dlasd5(int *i, d *d, d *z, d *delta, d *rho, d *dsigma, d *work) nogil + +cdef void dlasd6(int *icompq, int *nl, int *nr, int *sqre, d *d, d *vf, d *vl, d *alpha, d *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *work, int *iwork, int *info) nogil + +cdef void dlasd7(int *icompq, int *nl, int *nr, int *sqre, int *k, d *d, d *z, d *zw, d *vf, d *vfw, d *vl, d *vlw, d *alpha, d *beta, d *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *c, d *s, int *info) nogil + +cdef void dlasd8(int *icompq, int *k, d *d, d *z, d *vf, d *vl, d *difl, d *difr, int *lddifr, d *dsigma, d *work, int *info) nogil + +cdef void dlasda(int *icompq, int *smlsiz, int *n, int *sqre, d *d, d *e, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *work, int *iwork, int *info) nogil + +cdef void dlasdq(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, d *vt, int *ldvt, d *u, int *ldu, d *c, int *ldc, d *work, int *info) nogil + +cdef void dlasdt(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub) nogil + +cdef void dlaset(char *uplo, int *m, int *n, d *alpha, d *beta, d *a, int *lda) nogil + +cdef void dlasq1(int *n, d *d, d *e, d *work, int *info) nogil + +cdef void dlasq2(int *n, d *z, int *info) nogil + +cdef void dlasq3(int *i0, int *n0, d *z, int *pp, d *dmin, d *sigma, d *desig, d *qmax, int *nfail, int *iter, int *ndiv, bint *ieee, int *ttype, d *dmin1, d *dmin2, d *dn, d *dn1, d *dn2, d *g, d *tau) nogil + +cdef void dlasq4(int *i0, int *n0, d *z, int *pp, int *n0in, d *dmin, d *dmin1, d *dmin2, d *dn, d *dn1, d *dn2, d *tau, int *ttype, d *g) nogil + +cdef void dlasq6(int *i0, int *n0, d *z, int *pp, d *dmin, d *dmin1, d *dmin2, d *dn, d *dnm1, d *dnm2) nogil + +cdef void dlasr(char *side, char *pivot, char *direct, int *m, int *n, d *c, d *s, d *a, int *lda) nogil + +cdef void dlasrt(char *id, int *n, d *d, int *info) nogil + +cdef void dlassq(int *n, d *x, int *incx, d *scale, d *sumsq) nogil + +cdef void dlasv2(d *f, d *g, d *h, d *ssmin, d *ssmax, d *snr, d *csr, d *snl, d *csl) nogil + +cdef void dlaswp(int *n, d *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil + +cdef void dlasy2(bint *ltranl, bint *ltranr, int *isgn, int *n1, int *n2, d *tl, int *ldtl, d *tr, int *ldtr, d *b, int *ldb, d *scale, d *x, int *ldx, d *xnorm, int *info) nogil + +cdef void dlasyf(char *uplo, int *n, int *nb, int *kb, d *a, int *lda, int *ipiv, d *w, int *ldw, int *info) nogil + +cdef void dlat2s(char *uplo, int *n, d *a, int *lda, s *sa, int *ldsa, int *info) nogil + +cdef void dlatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, d *ab, int *ldab, d *x, d *scale, d *cnorm, int *info) nogil + +cdef void dlatdf(int *ijob, int *n, d *z, int *ldz, d *rhs, d *rdsum, d *rdscal, int *ipiv, int *jpiv) nogil + +cdef void dlatps(char *uplo, char *trans, char *diag, char *normin, int *n, d *ap, d *x, d *scale, d *cnorm, int *info) nogil + +cdef void dlatrd(char *uplo, int *n, int *nb, d *a, int *lda, d *e, d *tau, d *w, int *ldw) nogil + +cdef void dlatrs(char *uplo, char *trans, char *diag, char *normin, int *n, d *a, int *lda, d *x, d *scale, d *cnorm, int *info) nogil + +cdef void dlatrz(int *m, int *n, int *l, d *a, int *lda, d *tau, d *work) nogil + +cdef void dlauu2(char *uplo, int *n, d *a, int *lda, int *info) nogil + +cdef void dlauum(char *uplo, int *n, d *a, int *lda, int *info) nogil + +cdef void dopgtr(char *uplo, int *n, d *ap, d *tau, d *q, int *ldq, d *work, int *info) nogil + +cdef void dopmtr(char *side, char *uplo, char *trans, int *m, int *n, d *ap, d *tau, d *c, int *ldc, d *work, int *info) nogil + +cdef void dorbdb(char *trans, char *signs, int *m, int *p, int *q, d *x11, int *ldx11, d *x12, int *ldx12, d *x21, int *ldx21, d *x22, int *ldx22, d *theta, d *phi, d *taup1, d *taup2, d *tauq1, d *tauq2, d *work, int *lwork, int *info) nogil + +cdef void dorcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, d *x11, int *ldx11, d *x12, int *ldx12, d *x21, int *ldx21, d *x22, int *ldx22, d *theta, d *u1, int *ldu1, d *u2, int *ldu2, d *v1t, int *ldv1t, d *v2t, int *ldv2t, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dorg2l(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dorg2r(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dorgbr(char *vect, int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorghr(int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorgl2(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dorglq(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorgql(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorgqr(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorgr2(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *info) nogil + +cdef void dorgrq(int *m, int *n, int *k, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorgtr(char *uplo, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dorm2l(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil + +cdef void dorm2r(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil + +cdef void dormbr(char *vect, char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dormhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dorml2(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil + +cdef void dormlq(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dormql(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dormqr(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dormr2(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil + +cdef void dormr3(char *side, char *trans, int *m, int *n, int *k, int *l, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *info) nogil + +cdef void dormrq(char *side, char *trans, int *m, int *n, int *k, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dormrz(char *side, char *trans, int *m, int *n, int *k, int *l, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dormtr(char *side, char *uplo, char *trans, int *m, int *n, d *a, int *lda, d *tau, d *c, int *ldc, d *work, int *lwork, int *info) nogil + +cdef void dpbcon(char *uplo, int *n, int *kd, d *ab, int *ldab, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dpbequ(char *uplo, int *n, int *kd, d *ab, int *ldab, d *s, d *scond, d *amax, int *info) nogil + +cdef void dpbrfs(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dpbstf(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil + +cdef void dpbsv(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil + +cdef void dpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *afb, int *ldafb, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dpbtf2(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil + +cdef void dpbtrf(char *uplo, int *n, int *kd, d *ab, int *ldab, int *info) nogil + +cdef void dpbtrs(char *uplo, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil + +cdef void dpftrf(char *transr, char *uplo, int *n, d *a, int *info) nogil + +cdef void dpftri(char *transr, char *uplo, int *n, d *a, int *info) nogil + +cdef void dpftrs(char *transr, char *uplo, int *n, int *nrhs, d *a, d *b, int *ldb, int *info) nogil + +cdef void dpocon(char *uplo, int *n, d *a, int *lda, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dpoequ(int *n, d *a, int *lda, d *s, d *scond, d *amax, int *info) nogil + +cdef void dpoequb(int *n, d *a, int *lda, d *s, d *scond, d *amax, int *info) nogil + +cdef void dporfs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dposv(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil + +cdef void dposvx(char *fact, char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dpotf2(char *uplo, int *n, d *a, int *lda, int *info) nogil + +cdef void dpotrf(char *uplo, int *n, d *a, int *lda, int *info) nogil + +cdef void dpotri(char *uplo, int *n, d *a, int *lda, int *info) nogil + +cdef void dpotrs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil + +cdef void dppcon(char *uplo, int *n, d *ap, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dppequ(char *uplo, int *n, d *ap, d *s, d *scond, d *amax, int *info) nogil + +cdef void dpprfs(char *uplo, int *n, int *nrhs, d *ap, d *afp, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dppsv(char *uplo, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil + +cdef void dppsvx(char *fact, char *uplo, int *n, int *nrhs, d *ap, d *afp, char *equed, d *s, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dpptrf(char *uplo, int *n, d *ap, int *info) nogil + +cdef void dpptri(char *uplo, int *n, d *ap, int *info) nogil + +cdef void dpptrs(char *uplo, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil + +cdef void dpstf2(char *uplo, int *n, d *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil + +cdef void dpstrf(char *uplo, int *n, d *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil + +cdef void dptcon(int *n, d *d, d *e, d *anorm, d *rcond, d *work, int *info) nogil + +cdef void dpteqr(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil + +cdef void dptrfs(int *n, int *nrhs, d *d, d *e, d *df, d *ef, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *info) nogil + +cdef void dptsv(int *n, int *nrhs, d *d, d *e, d *b, int *ldb, int *info) nogil + +cdef void dptsvx(char *fact, int *n, int *nrhs, d *d, d *e, d *df, d *ef, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *info) nogil + +cdef void dpttrf(int *n, d *d, d *e, int *info) nogil + +cdef void dpttrs(int *n, int *nrhs, d *d, d *e, d *b, int *ldb, int *info) nogil + +cdef void dptts2(int *n, int *nrhs, d *d, d *e, d *b, int *ldb) nogil + +cdef void drscl(int *n, d *sa, d *sx, int *incx) nogil + +cdef void dsbev(char *jobz, char *uplo, int *n, int *kd, d *ab, int *ldab, d *w, d *z, int *ldz, d *work, int *info) nogil + +cdef void dsbevd(char *jobz, char *uplo, int *n, int *kd, d *ab, int *ldab, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dsbevx(char *jobz, char *range, char *uplo, int *n, int *kd, d *ab, int *ldab, d *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void dsbgst(char *vect, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *x, int *ldx, d *work, int *info) nogil + +cdef void dsbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *w, d *z, int *ldz, d *work, int *info) nogil + +cdef void dsbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dsbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, d *ab, int *ldab, d *bb, int *ldbb, d *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void dsbtrd(char *vect, char *uplo, int *n, int *kd, d *ab, int *ldab, d *d, d *e, d *q, int *ldq, d *work, int *info) nogil + +cdef void dsfrk(char *transr, char *uplo, char *trans, int *n, int *k, d *alpha, d *a, int *lda, d *beta, d *c) nogil + +cdef void dsgesv(int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *work, s *swork, int *iter, int *info) nogil + +cdef void dspcon(char *uplo, int *n, d *ap, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dspev(char *jobz, char *uplo, int *n, d *ap, d *w, d *z, int *ldz, d *work, int *info) nogil + +cdef void dspevd(char *jobz, char *uplo, int *n, d *ap, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dspevx(char *jobz, char *range, char *uplo, int *n, d *ap, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void dspgst(int *itype, char *uplo, int *n, d *ap, d *bp, int *info) nogil + +cdef void dspgv(int *itype, char *jobz, char *uplo, int *n, d *ap, d *bp, d *w, d *z, int *ldz, d *work, int *info) nogil + +cdef void dspgvd(int *itype, char *jobz, char *uplo, int *n, d *ap, d *bp, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dspgvx(int *itype, char *jobz, char *range, char *uplo, int *n, d *ap, d *bp, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void dsposv(char *uplo, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *x, int *ldx, d *work, s *swork, int *iter, int *info) nogil + +cdef void dsprfs(char *uplo, int *n, int *nrhs, d *ap, d *afp, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dspsv(char *uplo, int *n, int *nrhs, d *ap, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dspsvx(char *fact, char *uplo, int *n, int *nrhs, d *ap, d *afp, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dsptrd(char *uplo, int *n, d *ap, d *d, d *e, d *tau, int *info) nogil + +cdef void dsptrf(char *uplo, int *n, d *ap, int *ipiv, int *info) nogil + +cdef void dsptri(char *uplo, int *n, d *ap, int *ipiv, d *work, int *info) nogil + +cdef void dsptrs(char *uplo, int *n, int *nrhs, d *ap, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dstebz(char *range, char *order, int *n, d *vl, d *vu, int *il, int *iu, d *abstol, d *d, d *e, int *m, int *nsplit, d *w, int *iblock, int *isplit, d *work, int *iwork, int *info) nogil + +cdef void dstedc(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dstegr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dstein(int *n, d *d, d *e, int *m, d *w, int *iblock, int *isplit, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void dstemr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, int *m, d *w, d *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dsteqr(char *compz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil + +cdef void dsterf(int *n, d *d, d *e, int *info) nogil + +cdef void dstev(char *jobz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *info) nogil + +cdef void dstevd(char *jobz, int *n, d *d, d *e, d *z, int *ldz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dstevr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dstevx(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void dsycon(char *uplo, int *n, d *a, int *lda, int *ipiv, d *anorm, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dsyconv(char *uplo, char *way, int *n, d *a, int *lda, int *ipiv, d *work, int *info) nogil + +cdef void dsyequb(char *uplo, int *n, d *a, int *lda, d *s, d *scond, d *amax, d *work, int *info) nogil + +cdef void dsyev(char *jobz, char *uplo, int *n, d *a, int *lda, d *w, d *work, int *lwork, int *info) nogil + +cdef void dsyevd(char *jobz, char *uplo, int *n, d *a, int *lda, d *w, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dsyevr(char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dsyevx(char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *ifail, int *info) nogil + +cdef void dsygs2(int *itype, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, int *info) nogil + +cdef void dsygst(int *itype, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, int *info) nogil + +cdef void dsygv(int *itype, char *jobz, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *w, d *work, int *lwork, int *info) nogil + +cdef void dsygvd(int *itype, char *jobz, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *w, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dsygvx(int *itype, char *jobz, char *range, char *uplo, int *n, d *a, int *lda, d *b, int *ldb, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, d *z, int *ldz, d *work, int *lwork, int *iwork, int *ifail, int *info) nogil + +cdef void dsyrfs(char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dsysv(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *work, int *lwork, int *info) nogil + +cdef void dsysvx(char *fact, char *uplo, int *n, int *nrhs, d *a, int *lda, d *af, int *ldaf, int *ipiv, d *b, int *ldb, d *x, int *ldx, d *rcond, d *ferr, d *berr, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dsyswapr(char *uplo, int *n, d *a, int *lda, int *i1, int *i2) nogil + +cdef void dsytd2(char *uplo, int *n, d *a, int *lda, d *d, d *e, d *tau, int *info) nogil + +cdef void dsytf2(char *uplo, int *n, d *a, int *lda, int *ipiv, int *info) nogil + +cdef void dsytrd(char *uplo, int *n, d *a, int *lda, d *d, d *e, d *tau, d *work, int *lwork, int *info) nogil + +cdef void dsytrf(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil + +cdef void dsytri(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *info) nogil + +cdef void dsytri2(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *lwork, int *info) nogil + +cdef void dsytri2x(char *uplo, int *n, d *a, int *lda, int *ipiv, d *work, int *nb, int *info) nogil + +cdef void dsytrs(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, int *info) nogil + +cdef void dsytrs2(char *uplo, int *n, int *nrhs, d *a, int *lda, int *ipiv, d *b, int *ldb, d *work, int *info) nogil + +cdef void dtbcon(char *norm, char *uplo, char *diag, int *n, int *kd, d *ab, int *ldab, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dtbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dtbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, d *ab, int *ldab, d *b, int *ldb, int *info) nogil + +cdef void dtfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, d *alpha, d *a, d *b, int *ldb) nogil + +cdef void dtftri(char *transr, char *uplo, char *diag, int *n, d *a, int *info) nogil + +cdef void dtfttp(char *transr, char *uplo, int *n, d *arf, d *ap, int *info) nogil + +cdef void dtfttr(char *transr, char *uplo, int *n, d *arf, d *a, int *lda, int *info) nogil + +cdef void dtgevc(char *side, char *howmny, bint *select, int *n, d *s, int *lds, d *p, int *ldp, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *info) nogil + +cdef void dtgex2(bint *wantq, bint *wantz, int *n, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *j1, int *n1, int *n2, d *work, int *lwork, int *info) nogil + +cdef void dtgexc(bint *wantq, bint *wantz, int *n, d *a, int *lda, d *b, int *ldb, d *q, int *ldq, d *z, int *ldz, int *ifst, int *ilst, d *work, int *lwork, int *info) nogil + +cdef void dtgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, d *a, int *lda, d *b, int *ldb, d *alphar, d *alphai, d *beta, d *q, int *ldq, d *z, int *ldz, int *m, d *pl, d *pr, d *dif, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dtgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, d *a, int *lda, d *b, int *ldb, d *tola, d *tolb, d *alpha, d *beta, d *u, int *ldu, d *v, int *ldv, d *q, int *ldq, d *work, int *ncycle, int *info) nogil + +cdef void dtgsna(char *job, char *howmny, bint *select, int *n, d *a, int *lda, d *b, int *ldb, d *vl, int *ldvl, d *vr, int *ldvr, d *s, d *dif, int *mm, int *m, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dtgsy2(char *trans, int *ijob, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *d, int *ldd, d *e, int *lde, d *f, int *ldf, d *scale, d *rdsum, d *rdscal, int *iwork, int *pq, int *info) nogil + +cdef void dtgsyl(char *trans, int *ijob, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *d, int *ldd, d *e, int *lde, d *f, int *ldf, d *scale, d *dif, d *work, int *lwork, int *iwork, int *info) nogil + +cdef void dtpcon(char *norm, char *uplo, char *diag, int *n, d *ap, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dtpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, d *v, int *ldv, d *t, int *ldt, d *a, int *lda, d *b, int *ldb, d *work, int *info) nogil + +cdef void dtpqrt(int *m, int *n, int *l, int *nb, d *a, int *lda, d *b, int *ldb, d *t, int *ldt, d *work, int *info) nogil + +cdef void dtpqrt2(int *m, int *n, int *l, d *a, int *lda, d *b, int *ldb, d *t, int *ldt, int *info) nogil + +cdef void dtprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, d *v, int *ldv, d *t, int *ldt, d *a, int *lda, d *b, int *ldb, d *work, int *ldwork) nogil + +cdef void dtprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *ap, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dtptri(char *uplo, char *diag, int *n, d *ap, int *info) nogil + +cdef void dtptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *ap, d *b, int *ldb, int *info) nogil + +cdef void dtpttf(char *transr, char *uplo, int *n, d *ap, d *arf, int *info) nogil + +cdef void dtpttr(char *uplo, int *n, d *ap, d *a, int *lda, int *info) nogil + +cdef void dtrcon(char *norm, char *uplo, char *diag, int *n, d *a, int *lda, d *rcond, d *work, int *iwork, int *info) nogil + +cdef void dtrevc(char *side, char *howmny, bint *select, int *n, d *t, int *ldt, d *vl, int *ldvl, d *vr, int *ldvr, int *mm, int *m, d *work, int *info) nogil + +cdef void dtrexc(char *compq, int *n, d *t, int *ldt, d *q, int *ldq, int *ifst, int *ilst, d *work, int *info) nogil + +cdef void dtrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, d *x, int *ldx, d *ferr, d *berr, d *work, int *iwork, int *info) nogil + +cdef void dtrsen(char *job, char *compq, bint *select, int *n, d *t, int *ldt, d *q, int *ldq, d *wr, d *wi, int *m, d *s, d *sep, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void dtrsna(char *job, char *howmny, bint *select, int *n, d *t, int *ldt, d *vl, int *ldvl, d *vr, int *ldvr, d *s, d *sep, int *mm, int *m, d *work, int *ldwork, int *iwork, int *info) nogil + +cdef void dtrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, d *a, int *lda, d *b, int *ldb, d *c, int *ldc, d *scale, int *info) nogil + +cdef void dtrti2(char *uplo, char *diag, int *n, d *a, int *lda, int *info) nogil + +cdef void dtrtri(char *uplo, char *diag, int *n, d *a, int *lda, int *info) nogil + +cdef void dtrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, d *a, int *lda, d *b, int *ldb, int *info) nogil + +cdef void dtrttf(char *transr, char *uplo, int *n, d *a, int *lda, d *arf, int *info) nogil + +cdef void dtrttp(char *uplo, int *n, d *a, int *lda, d *ap, int *info) nogil + +cdef void dtzrzf(int *m, int *n, d *a, int *lda, d *tau, d *work, int *lwork, int *info) nogil + +cdef d dzsum1(int *n, z *cx, int *incx) nogil + +cdef int icmax1(int *n, c *cx, int *incx) nogil + +cdef int ieeeck(int *ispec, s *zero, s *one) nogil + +cdef int ilaclc(int *m, int *n, c *a, int *lda) nogil + +cdef int ilaclr(int *m, int *n, c *a, int *lda) nogil + +cdef int iladiag(char *diag) nogil + +cdef int iladlc(int *m, int *n, d *a, int *lda) nogil + +cdef int iladlr(int *m, int *n, d *a, int *lda) nogil + +cdef int ilaprec(char *prec) nogil + +cdef int ilaslc(int *m, int *n, s *a, int *lda) nogil + +cdef int ilaslr(int *m, int *n, s *a, int *lda) nogil + +cdef int ilatrans(char *trans) nogil + +cdef int ilauplo(char *uplo) nogil + +cdef void ilaver(int *vers_major, int *vers_minor, int *vers_patch) nogil + +cdef int ilazlc(int *m, int *n, z *a, int *lda) nogil + +cdef int ilazlr(int *m, int *n, z *a, int *lda) nogil + +cdef int izmax1(int *n, z *cx, int *incx) nogil + +cdef void sbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, s *theta, s *phi, s *u1, int *ldu1, s *u2, int *ldu2, s *v1t, int *ldv1t, s *v2t, int *ldv2t, s *b11d, s *b11e, s *b12d, s *b12e, s *b21d, s *b21e, s *b22d, s *b22e, s *work, int *lwork, int *info) nogil + +cdef void sbdsdc(char *uplo, char *compq, int *n, s *d, s *e, s *u, int *ldu, s *vt, int *ldvt, s *q, int *iq, s *work, int *iwork, int *info) nogil + +cdef void sbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, s *vt, int *ldvt, s *u, int *ldu, s *c, int *ldc, s *work, int *info) nogil + +cdef s scsum1(int *n, c *cx, int *incx) nogil + +cdef void sdisna(char *job, int *m, int *n, s *d, s *sep, int *info) nogil + +cdef void sgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, s *ab, int *ldab, s *d, s *e, s *q, int *ldq, s *pt, int *ldpt, s *c, int *ldc, s *work, int *info) nogil + +cdef void sgbcon(char *norm, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void sgbequ(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void sgbequb(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void sgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sgbsv(int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, int *ipiv, char *equed, s *r, s *c, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sgbtf2(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void sgbtrf(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void sgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, s *ab, int *ldab, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sgebak(char *job, char *side, int *n, int *ilo, int *ihi, s *scale, int *m, s *v, int *ldv, int *info) nogil + +cdef void sgebal(char *job, int *n, s *a, int *lda, int *ilo, int *ihi, s *scale, int *info) nogil + +cdef void sgebd2(int *m, int *n, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *work, int *info) nogil + +cdef void sgebrd(int *m, int *n, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *work, int *lwork, int *info) nogil + +cdef void sgecon(char *norm, int *n, s *a, int *lda, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void sgeequ(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void sgeequb(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, int *info) nogil + +cdef void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info) nogil + +cdef void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil + +cdef void sgeev(char *jobvl, char *jobvr, int *n, s *a, int *lda, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, s *work, int *lwork, int *info) nogil + +cdef void sgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, s *a, int *lda, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, int *ilo, int *ihi, s *scale, s *abnrm, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void sgehd2(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sgehrd(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgejsv(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, s *a, int *lda, s *sva, s *u, int *ldu, s *v, int *ldv, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void sgelq2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sgelqf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgels(char *trans, int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *work, int *lwork, int *info) nogil + +cdef void sgelsd(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *s, s *rcond, int *rank, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void sgelss(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *s, s *rcond, int *rank, s *work, int *lwork, int *info) nogil + +cdef void sgelsy(int *m, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *jpvt, s *rcond, int *rank, s *work, int *lwork, int *info) nogil + +cdef void sgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *info) nogil + +cdef void sgeql2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sgeqlf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgeqp3(int *m, int *n, s *a, int *lda, int *jpvt, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgeqr2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sgeqr2p(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sgeqrf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgeqrfp(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgeqrt(int *m, int *n, int *nb, s *a, int *lda, s *t, int *ldt, s *work, int *info) nogil + +cdef void sgeqrt2(int *m, int *n, s *a, int *lda, s *t, int *ldt, int *info) nogil + +cdef void sgeqrt3(int *m, int *n, s *a, int *lda, s *t, int *ldt, int *info) nogil + +cdef void sgerfs(char *trans, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sgerq2(int *m, int *n, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sgerqf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sgesc2(int *n, s *a, int *lda, s *rhs, int *ipiv, int *jpiv, s *scale) nogil + +cdef void sgesdd(char *jobz, int *m, int *n, s *a, int *lda, s *s, s *u, int *ldu, s *vt, int *ldvt, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void sgesv(int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sgesvd(char *jobu, char *jobvt, int *m, int *n, s *a, int *lda, s *s, s *u, int *ldu, s *vt, int *ldvt, s *work, int *lwork, int *info) nogil + +cdef void sgesvj(char *joba, char *jobu, char *jobv, int *m, int *n, s *a, int *lda, s *sva, int *mv, s *v, int *ldv, s *work, int *lwork, int *info) nogil + +cdef void sgesvx(char *fact, char *trans, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, char *equed, s *r, s *c, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sgetc2(int *n, s *a, int *lda, int *ipiv, int *jpiv, int *info) nogil + +cdef void sgetf2(int *m, int *n, s *a, int *lda, int *ipiv, int *info) nogil + +cdef void sgetrf(int *m, int *n, s *a, int *lda, int *ipiv, int *info) nogil + +cdef void sgetri(int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil + +cdef void sgetrs(char *trans, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sggbak(char *job, char *side, int *n, int *ilo, int *ihi, s *lscale, s *rscale, int *m, s *v, int *ldv, int *info) nogil + +cdef void sggbal(char *job, int *n, s *a, int *lda, s *b, int *ldb, int *ilo, int *ihi, s *lscale, s *rscale, s *work, int *info) nogil + +cdef void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info) nogil + +cdef void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info) nogil + +cdef void sggev(char *jobvl, char *jobvr, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *vl, int *ldvl, s *vr, int *ldvr, s *work, int *lwork, int *info) nogil + +cdef void sggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *vl, int *ldvl, s *vr, int *ldvr, int *ilo, int *ihi, s *lscale, s *rscale, s *abnrm, s *bbnrm, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, bint *bwork, int *info) nogil + +cdef void sggglm(int *n, int *m, int *p, s *a, int *lda, s *b, int *ldb, s *d, s *x, s *y, s *work, int *lwork, int *info) nogil + +cdef void sgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *info) nogil + +cdef void sgglse(int *m, int *n, int *p, s *a, int *lda, s *b, int *ldb, s *c, s *d, s *x, s *work, int *lwork, int *info) nogil + +cdef void sggqrf(int *n, int *m, int *p, s *a, int *lda, s *taua, s *b, int *ldb, s *taub, s *work, int *lwork, int *info) nogil + +cdef void sggrqf(int *m, int *p, int *n, s *a, int *lda, s *taua, s *b, int *ldb, s *taub, s *work, int *lwork, int *info) nogil + +cdef void sgsvj0(char *jobv, int *m, int *n, s *a, int *lda, s *d, s *sva, int *mv, s *v, int *ldv, s *eps, s *sfmin, s *tol, int *nsweep, s *work, int *lwork, int *info) nogil + +cdef void sgsvj1(char *jobv, int *m, int *n, int *n1, s *a, int *lda, s *d, s *sva, int *mv, s *v, int *ldv, s *eps, s *sfmin, s *tol, int *nsweep, s *work, int *lwork, int *info) nogil + +cdef void sgtcon(char *norm, int *n, s *dl, s *d, s *du, s *du2, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void sgtrfs(char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *dlf, s *df, s *duf, s *du2, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sgtsv(int *n, int *nrhs, s *dl, s *d, s *du, s *b, int *ldb, int *info) nogil + +cdef void sgtsvx(char *fact, char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *dlf, s *df, s *duf, s *du2, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sgttrf(int *n, s *dl, s *d, s *du, s *du2, int *ipiv, int *info) nogil + +cdef void sgttrs(char *trans, int *n, int *nrhs, s *dl, s *d, s *du, s *du2, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sgtts2(int *itrans, int *n, int *nrhs, s *dl, s *d, s *du, s *du2, int *ipiv, s *b, int *ldb) nogil + +cdef void shgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *t, int *ldt, s *alphar, s *alphai, s *beta, s *q, int *ldq, s *z, int *ldz, s *work, int *lwork, int *info) nogil + +cdef void shsein(char *side, char *eigsrc, char *initv, bint *select, int *n, s *h, int *ldh, s *wr, s *wi, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *ifaill, int *ifailr, int *info) nogil + +cdef void shseqr(char *job, char *compz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, s *z, int *ldz, s *work, int *lwork, int *info) nogil + +cdef void slabad(s *small, s *large) nogil + +cdef void slabrd(int *m, int *n, int *nb, s *a, int *lda, s *d, s *e, s *tauq, s *taup, s *x, int *ldx, s *y, int *ldy) nogil + +cdef void slacn2(int *n, s *v, s *x, int *isgn, s *est, int *kase, int *isave) nogil + +cdef void slacon(int *n, s *v, s *x, int *isgn, s *est, int *kase) nogil + +cdef void slacpy(char *uplo, int *m, int *n, s *a, int *lda, s *b, int *ldb) nogil + +cdef void sladiv(s *a, s *b, s *c, s *d, s *p, s *q) nogil + +cdef void slae2(s *a, s *b, s *c, s *rt1, s *rt2) nogil + +cdef void slaebz(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, s *abstol, s *reltol, s *pivmin, s *d, s *e, s *e2, int *nval, s *ab, s *c, int *mout, int *nab, s *work, int *iwork, int *info) nogil + +cdef void slaed0(int *icompq, int *qsiz, int *n, s *d, s *e, s *q, int *ldq, s *qstore, int *ldqs, s *work, int *iwork, int *info) nogil + +cdef void slaed1(int *n, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *work, int *iwork, int *info) nogil + +cdef void slaed2(int *k, int *n, int *n1, s *d, s *q, int *ldq, int *indxq, s *rho, s *z, s *dlamda, s *w, s *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info) nogil + +cdef void slaed3(int *k, int *n, int *n1, s *d, s *q, int *ldq, s *rho, s *dlamda, s *q2, int *indx, int *ctot, s *w, s *s, int *info) nogil + +cdef void slaed4(int *n, int *i, s *d, s *z, s *delta, s *rho, s *dlam, int *info) nogil + +cdef void slaed5(int *i, s *d, s *z, s *delta, s *rho, s *dlam) nogil + +cdef void slaed6(int *kniter, bint *orgati, s *rho, s *d, s *z, s *finit, s *tau, int *info) nogil + +cdef void slaed7(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, s *work, int *iwork, int *info) nogil + +cdef void slaed8(int *icompq, int *k, int *n, int *qsiz, s *d, s *q, int *ldq, int *indxq, s *rho, int *cutpnt, s *z, s *dlamda, s *q2, int *ldq2, s *w, int *perm, int *givptr, int *givcol, s *givnum, int *indxp, int *indx, int *info) nogil + +cdef void slaed9(int *k, int *kstart, int *kstop, int *n, s *d, s *q, int *ldq, s *rho, s *dlamda, s *w, s *s, int *lds, int *info) nogil + +cdef void slaeda(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, s *givnum, s *q, int *qptr, s *z, s *ztemp, int *info) nogil + +cdef void slaein(bint *rightv, bint *noinit, int *n, s *h, int *ldh, s *wr, s *wi, s *vr, s *vi, s *b, int *ldb, s *work, s *eps3, s *smlnum, s *bignum, int *info) nogil + +cdef void slaev2(s *a, s *b, s *c, s *rt1, s *rt2, s *cs1, s *sn1) nogil + +cdef void slaexc(bint *wantq, int *n, s *t, int *ldt, s *q, int *ldq, int *j1, int *n1, int *n2, s *work, int *info) nogil + +cdef void slag2(s *a, int *lda, s *b, int *ldb, s *safmin, s *scale1, s *scale2, s *wr1, s *wr2, s *wi) nogil + +cdef void slag2d(int *m, int *n, s *sa, int *ldsa, d *a, int *lda, int *info) nogil + +cdef void slags2(bint *upper, s *a1, s *a2, s *a3, s *b1, s *b2, s *b3, s *csu, s *snu, s *csv, s *snv, s *csq, s *snq) nogil + +cdef void slagtf(int *n, s *a, s *lambda_, s *b, s *c, s *tol, s *d, int *in_, int *info) nogil + +cdef void slagtm(char *trans, int *n, int *nrhs, s *alpha, s *dl, s *d, s *du, s *x, int *ldx, s *beta, s *b, int *ldb) nogil + +cdef void slagts(int *job, int *n, s *a, s *b, s *c, s *d, int *in_, s *y, s *tol, int *info) nogil + +cdef void slagv2(s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *csl, s *snl, s *csr, s *snr) nogil + +cdef void slahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, int *info) nogil + +cdef void slahr2(int *n, int *k, int *nb, s *a, int *lda, s *tau, s *t, int *ldt, s *y, int *ldy) nogil + +cdef void slaic1(int *job, int *j, s *x, s *sest, s *w, s *gamma, s *sestpr, s *s, s *c) nogil + +cdef void slaln2(bint *ltrans, int *na, int *nw, s *smin, s *ca, s *a, int *lda, s *d1, s *d2, s *b, int *ldb, s *wr, s *wi, s *x, int *ldx, s *scale, s *xnorm, int *info) nogil + +cdef void slals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, s *b, int *ldb, s *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *work, int *info) nogil + +cdef void slalsa(int *icompq, int *smlsiz, int *n, int *nrhs, s *b, int *ldb, s *bx, int *ldbx, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *work, int *iwork, int *info) nogil + +cdef void slalsd(char *uplo, int *smlsiz, int *n, int *nrhs, s *d, s *e, s *b, int *ldb, s *rcond, int *rank, s *work, int *iwork, int *info) nogil + +cdef s slamch(char *cmach) nogil + +cdef void slamrg(int *n1, int *n2, s *a, int *strd1, int *strd2, int *index_bn) nogil + +cdef s slangb(char *norm, int *n, int *kl, int *ku, s *ab, int *ldab, s *work) nogil + +cdef s slange(char *norm, int *m, int *n, s *a, int *lda, s *work) nogil + +cdef s slangt(char *norm, int *n, s *dl, s *d, s *du) nogil + +cdef s slanhs(char *norm, int *n, s *a, int *lda, s *work) nogil + +cdef s slansb(char *norm, char *uplo, int *n, int *k, s *ab, int *ldab, s *work) nogil + +cdef s slansf(char *norm, char *transr, char *uplo, int *n, s *a, s *work) nogil + +cdef s slansp(char *norm, char *uplo, int *n, s *ap, s *work) nogil + +cdef s slanst(char *norm, int *n, s *d, s *e) nogil + +cdef s slansy(char *norm, char *uplo, int *n, s *a, int *lda, s *work) nogil + +cdef s slantb(char *norm, char *uplo, char *diag, int *n, int *k, s *ab, int *ldab, s *work) nogil + +cdef s slantp(char *norm, char *uplo, char *diag, int *n, s *ap, s *work) nogil + +cdef s slantr(char *norm, char *uplo, char *diag, int *m, int *n, s *a, int *lda, s *work) nogil + +cdef void slanv2(s *a, s *b, s *c, s *d, s *rt1r, s *rt1i, s *rt2r, s *rt2i, s *cs, s *sn) nogil + +cdef void slapll(int *n, s *x, int *incx, s *y, int *incy, s *ssmin) nogil + +cdef void slapmr(bint *forwrd, int *m, int *n, s *x, int *ldx, int *k) nogil + +cdef void slapmt(bint *forwrd, int *m, int *n, s *x, int *ldx, int *k) nogil + +cdef s slapy2(s *x, s *y) nogil + +cdef s slapy3(s *x, s *y, s *z) nogil + +cdef void slaqgb(int *m, int *n, int *kl, int *ku, s *ab, int *ldab, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil + +cdef void slaqge(int *m, int *n, s *a, int *lda, s *r, s *c, s *rowcnd, s *colcnd, s *amax, char *equed) nogil + +cdef void slaqp2(int *m, int *n, int *offset, s *a, int *lda, int *jpvt, s *tau, s *vn1, s *vn2, s *work) nogil + +cdef void slaqps(int *m, int *n, int *offset, int *nb, int *kb, s *a, int *lda, int *jpvt, s *tau, s *vn1, s *vn2, s *auxv, s *f, int *ldf) nogil + +cdef void slaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, s *work, int *lwork, int *info) nogil + +cdef void slaqr1(int *n, s *h, int *ldh, s *sr1, s *si1, s *sr2, s *si2, s *v) nogil + +cdef void slaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, int *ns, int *nd, s *sr, s *si, s *v, int *ldv, int *nh, s *t, int *ldt, int *nv, s *wv, int *ldwv, s *work, int *lwork) nogil + +cdef void slaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, int *ns, int *nd, s *sr, s *si, s *v, int *ldv, int *nh, s *t, int *ldt, int *nv, s *wv, int *ldwv, s *work, int *lwork) nogil + +cdef void slaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, s *h, int *ldh, s *wr, s *wi, int *iloz, int *ihiz, s *z, int *ldz, s *work, int *lwork, int *info) nogil + +cdef void slaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, s *sr, s *si, s *h, int *ldh, int *iloz, int *ihiz, s *z, int *ldz, s *v, int *ldv, s *u, int *ldu, int *nv, s *wv, int *ldwv, int *nh, s *wh, int *ldwh) nogil + +cdef void slaqsb(char *uplo, int *n, int *kd, s *ab, int *ldab, s *s, s *scond, s *amax, char *equed) nogil + +cdef void slaqsp(char *uplo, int *n, s *ap, s *s, s *scond, s *amax, char *equed) nogil + +cdef void slaqsy(char *uplo, int *n, s *a, int *lda, s *s, s *scond, s *amax, char *equed) nogil + +cdef void slaqtr(bint *ltran, bint *lreal, int *n, s *t, int *ldt, s *b, s *w, s *scale, s *x, s *work, int *info) nogil + +cdef void slar1v(int *n, int *b1, int *bn, s *lambda_, s *d, s *l, s *ld, s *lld, s *pivmin, s *gaptol, s *z, bint *wantnc, int *negcnt, s *ztz, s *mingma, int *r, int *isuppz, s *nrminv, s *resid, s *rqcorr, s *work) nogil + +cdef void slar2v(int *n, s *x, s *y, s *z, int *incx, s *c, s *s, int *incc) nogil + +cdef void slarf(char *side, int *m, int *n, s *v, int *incv, s *tau, s *c, int *ldc, s *work) nogil + +cdef void slarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *ldwork) nogil + +cdef void slarfg(int *n, s *alpha, s *x, int *incx, s *tau) nogil + +cdef void slarfgp(int *n, s *alpha, s *x, int *incx, s *tau) nogil + +cdef void slarft(char *direct, char *storev, int *n, int *k, s *v, int *ldv, s *tau, s *t, int *ldt) nogil + +cdef void slarfx(char *side, int *m, int *n, s *v, s *tau, s *c, int *ldc, s *work) nogil + +cdef void slargv(int *n, s *x, int *incx, s *y, int *incy, s *c, int *incc) nogil + +cdef void slarnv(int *idist, int *iseed, int *n, s *x) nogil + +cdef void slarra(int *n, s *d, s *e, s *e2, s *spltol, s *tnrm, int *nsplit, int *isplit, int *info) nogil + +cdef void slarrb(int *n, s *d, s *lld, int *ifirst, int *ilast, s *rtol1, s *rtol2, int *offset, s *w, s *wgap, s *werr, s *work, int *iwork, s *pivmin, s *spdiam, int *twist, int *info) nogil + +cdef void slarrc(char *jobt, int *n, s *vl, s *vu, s *d, s *e, s *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info) nogil + +cdef void slarrd(char *range, char *order, int *n, s *vl, s *vu, int *il, int *iu, s *gers, s *reltol, s *d, s *e, s *e2, s *pivmin, int *nsplit, int *isplit, int *m, s *w, s *werr, s *wl, s *wu, int *iblock, int *indexw, s *work, int *iwork, int *info) nogil + +cdef void slarre(char *range, int *n, s *vl, s *vu, int *il, int *iu, s *d, s *e, s *e2, s *rtol1, s *rtol2, s *spltol, int *nsplit, int *isplit, int *m, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, s *pivmin, s *work, int *iwork, int *info) nogil + +cdef void slarrf(int *n, s *d, s *l, s *ld, int *clstrt, int *clend, s *w, s *wgap, s *werr, s *spdiam, s *clgapl, s *clgapr, s *pivmin, s *sigma, s *dplus, s *lplus, s *work, int *info) nogil + +cdef void slarrj(int *n, s *d, s *e2, int *ifirst, int *ilast, s *rtol, int *offset, s *w, s *werr, s *work, int *iwork, s *pivmin, s *spdiam, int *info) nogil + +cdef void slarrk(int *n, int *iw, s *gl, s *gu, s *d, s *e2, s *pivmin, s *reltol, s *w, s *werr, int *info) nogil + +cdef void slarrr(int *n, s *d, s *e, int *info) nogil + +cdef void slarrv(int *n, s *vl, s *vu, s *d, s *l, s *pivmin, int *isplit, int *m, int *dol, int *dou, s *minrgp, s *rtol1, s *rtol2, s *w, s *werr, s *wgap, int *iblock, int *indexw, s *gers, s *z, int *ldz, int *isuppz, s *work, int *iwork, int *info) nogil + +cdef void slartg(s *f, s *g, s *cs, s *sn, s *r) nogil + +cdef void slartgp(s *f, s *g, s *cs, s *sn, s *r) nogil + +cdef void slartgs(s *x, s *y, s *sigma, s *cs, s *sn) nogil + +cdef void slartv(int *n, s *x, int *incx, s *y, int *incy, s *c, s *s, int *incc) nogil + +cdef void slaruv(int *iseed, int *n, s *x) nogil + +cdef void slarz(char *side, int *m, int *n, int *l, s *v, int *incv, s *tau, s *c, int *ldc, s *work) nogil + +cdef void slarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, s *v, int *ldv, s *t, int *ldt, s *c, int *ldc, s *work, int *ldwork) nogil + +cdef void slarzt(char *direct, char *storev, int *n, int *k, s *v, int *ldv, s *tau, s *t, int *ldt) nogil + +cdef void slas2(s *f, s *g, s *h, s *ssmin, s *ssmax) nogil + +cdef void slascl(char *type_bn, int *kl, int *ku, s *cfrom, s *cto, int *m, int *n, s *a, int *lda, int *info) nogil + +cdef void slasd0(int *n, int *sqre, s *d, s *e, s *u, int *ldu, s *vt, int *ldvt, int *smlsiz, int *iwork, s *work, int *info) nogil + +cdef void slasd1(int *nl, int *nr, int *sqre, s *d, s *alpha, s *beta, s *u, int *ldu, s *vt, int *ldvt, int *idxq, int *iwork, s *work, int *info) nogil + +cdef void slasd2(int *nl, int *nr, int *sqre, int *k, s *d, s *z, s *alpha, s *beta, s *u, int *ldu, s *vt, int *ldvt, s *dsigma, s *u2, int *ldu2, s *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info) nogil + +cdef void slasd3(int *nl, int *nr, int *sqre, int *k, s *d, s *q, int *ldq, s *dsigma, s *u, int *ldu, s *u2, int *ldu2, s *vt, int *ldvt, s *vt2, int *ldvt2, int *idxc, int *ctot, s *z, int *info) nogil + +cdef void slasd4(int *n, int *i, s *d, s *z, s *delta, s *rho, s *sigma, s *work, int *info) nogil + +cdef void slasd5(int *i, s *d, s *z, s *delta, s *rho, s *dsigma, s *work) nogil + +cdef void slasd6(int *icompq, int *nl, int *nr, int *sqre, s *d, s *vf, s *vl, s *alpha, s *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *poles, s *difl, s *difr, s *z, int *k, s *c, s *s, s *work, int *iwork, int *info) nogil + +cdef void slasd7(int *icompq, int *nl, int *nr, int *sqre, int *k, s *d, s *z, s *zw, s *vf, s *vfw, s *vl, s *vlw, s *alpha, s *beta, s *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, s *givnum, int *ldgnum, s *c, s *s, int *info) nogil + +cdef void slasd8(int *icompq, int *k, s *d, s *z, s *vf, s *vl, s *difl, s *difr, int *lddifr, s *dsigma, s *work, int *info) nogil + +cdef void slasda(int *icompq, int *smlsiz, int *n, int *sqre, s *d, s *e, s *u, int *ldu, s *vt, int *k, s *difl, s *difr, s *z, s *poles, int *givptr, int *givcol, int *ldgcol, int *perm, s *givnum, s *c, s *s, s *work, int *iwork, int *info) nogil + +cdef void slasdq(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, s *d, s *e, s *vt, int *ldvt, s *u, int *ldu, s *c, int *ldc, s *work, int *info) nogil + +cdef void slasdt(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub) nogil + +cdef void slaset(char *uplo, int *m, int *n, s *alpha, s *beta, s *a, int *lda) nogil + +cdef void slasq1(int *n, s *d, s *e, s *work, int *info) nogil + +cdef void slasq2(int *n, s *z, int *info) nogil + +cdef void slasq3(int *i0, int *n0, s *z, int *pp, s *dmin, s *sigma, s *desig, s *qmax, int *nfail, int *iter, int *ndiv, bint *ieee, int *ttype, s *dmin1, s *dmin2, s *dn, s *dn1, s *dn2, s *g, s *tau) nogil + +cdef void slasq4(int *i0, int *n0, s *z, int *pp, int *n0in, s *dmin, s *dmin1, s *dmin2, s *dn, s *dn1, s *dn2, s *tau, int *ttype, s *g) nogil + +cdef void slasq6(int *i0, int *n0, s *z, int *pp, s *dmin, s *dmin1, s *dmin2, s *dn, s *dnm1, s *dnm2) nogil + +cdef void slasr(char *side, char *pivot, char *direct, int *m, int *n, s *c, s *s, s *a, int *lda) nogil + +cdef void slasrt(char *id, int *n, s *d, int *info) nogil + +cdef void slassq(int *n, s *x, int *incx, s *scale, s *sumsq) nogil + +cdef void slasv2(s *f, s *g, s *h, s *ssmin, s *ssmax, s *snr, s *csr, s *snl, s *csl) nogil + +cdef void slaswp(int *n, s *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil + +cdef void slasy2(bint *ltranl, bint *ltranr, int *isgn, int *n1, int *n2, s *tl, int *ldtl, s *tr, int *ldtr, s *b, int *ldb, s *scale, s *x, int *ldx, s *xnorm, int *info) nogil + +cdef void slasyf(char *uplo, int *n, int *nb, int *kb, s *a, int *lda, int *ipiv, s *w, int *ldw, int *info) nogil + +cdef void slatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, s *ab, int *ldab, s *x, s *scale, s *cnorm, int *info) nogil + +cdef void slatdf(int *ijob, int *n, s *z, int *ldz, s *rhs, s *rdsum, s *rdscal, int *ipiv, int *jpiv) nogil + +cdef void slatps(char *uplo, char *trans, char *diag, char *normin, int *n, s *ap, s *x, s *scale, s *cnorm, int *info) nogil + +cdef void slatrd(char *uplo, int *n, int *nb, s *a, int *lda, s *e, s *tau, s *w, int *ldw) nogil + +cdef void slatrs(char *uplo, char *trans, char *diag, char *normin, int *n, s *a, int *lda, s *x, s *scale, s *cnorm, int *info) nogil + +cdef void slatrz(int *m, int *n, int *l, s *a, int *lda, s *tau, s *work) nogil + +cdef void slauu2(char *uplo, int *n, s *a, int *lda, int *info) nogil + +cdef void slauum(char *uplo, int *n, s *a, int *lda, int *info) nogil + +cdef void sopgtr(char *uplo, int *n, s *ap, s *tau, s *q, int *ldq, s *work, int *info) nogil + +cdef void sopmtr(char *side, char *uplo, char *trans, int *m, int *n, s *ap, s *tau, s *c, int *ldc, s *work, int *info) nogil + +cdef void sorbdb(char *trans, char *signs, int *m, int *p, int *q, s *x11, int *ldx11, s *x12, int *ldx12, s *x21, int *ldx21, s *x22, int *ldx22, s *theta, s *phi, s *taup1, s *taup2, s *tauq1, s *tauq2, s *work, int *lwork, int *info) nogil + +cdef void sorcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, s *x11, int *ldx11, s *x12, int *ldx12, s *x21, int *ldx21, s *x22, int *ldx22, s *theta, s *u1, int *ldu1, s *u2, int *ldu2, s *v1t, int *ldv1t, s *v2t, int *ldv2t, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void sorg2l(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sorg2r(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sorgbr(char *vect, int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorghr(int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorgl2(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sorglq(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorgql(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorgqr(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorgr2(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *info) nogil + +cdef void sorgrq(int *m, int *n, int *k, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorgtr(char *uplo, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void sorm2l(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil + +cdef void sorm2r(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil + +cdef void sormbr(char *vect, char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sormhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sorml2(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil + +cdef void sormlq(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sormql(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sormqr(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sormr2(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil + +cdef void sormr3(char *side, char *trans, int *m, int *n, int *k, int *l, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *info) nogil + +cdef void sormrq(char *side, char *trans, int *m, int *n, int *k, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sormrz(char *side, char *trans, int *m, int *n, int *k, int *l, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void sormtr(char *side, char *uplo, char *trans, int *m, int *n, s *a, int *lda, s *tau, s *c, int *ldc, s *work, int *lwork, int *info) nogil + +cdef void spbcon(char *uplo, int *n, int *kd, s *ab, int *ldab, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void spbequ(char *uplo, int *n, int *kd, s *ab, int *ldab, s *s, s *scond, s *amax, int *info) nogil + +cdef void spbrfs(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void spbstf(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil + +cdef void spbsv(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil + +cdef void spbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *afb, int *ldafb, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void spbtf2(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil + +cdef void spbtrf(char *uplo, int *n, int *kd, s *ab, int *ldab, int *info) nogil + +cdef void spbtrs(char *uplo, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil + +cdef void spftrf(char *transr, char *uplo, int *n, s *a, int *info) nogil + +cdef void spftri(char *transr, char *uplo, int *n, s *a, int *info) nogil + +cdef void spftrs(char *transr, char *uplo, int *n, int *nrhs, s *a, s *b, int *ldb, int *info) nogil + +cdef void spocon(char *uplo, int *n, s *a, int *lda, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void spoequ(int *n, s *a, int *lda, s *s, s *scond, s *amax, int *info) nogil + +cdef void spoequb(int *n, s *a, int *lda, s *s, s *scond, s *amax, int *info) nogil + +cdef void sporfs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sposv(char *uplo, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil + +cdef void sposvx(char *fact, char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void spotf2(char *uplo, int *n, s *a, int *lda, int *info) nogil + +cdef void spotrf(char *uplo, int *n, s *a, int *lda, int *info) nogil + +cdef void spotri(char *uplo, int *n, s *a, int *lda, int *info) nogil + +cdef void spotrs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil + +cdef void sppcon(char *uplo, int *n, s *ap, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void sppequ(char *uplo, int *n, s *ap, s *s, s *scond, s *amax, int *info) nogil + +cdef void spprfs(char *uplo, int *n, int *nrhs, s *ap, s *afp, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sppsv(char *uplo, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil + +cdef void sppsvx(char *fact, char *uplo, int *n, int *nrhs, s *ap, s *afp, char *equed, s *s, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void spptrf(char *uplo, int *n, s *ap, int *info) nogil + +cdef void spptri(char *uplo, int *n, s *ap, int *info) nogil + +cdef void spptrs(char *uplo, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil + +cdef void spstf2(char *uplo, int *n, s *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil + +cdef void spstrf(char *uplo, int *n, s *a, int *lda, int *piv, int *rank, s *tol, s *work, int *info) nogil + +cdef void sptcon(int *n, s *d, s *e, s *anorm, s *rcond, s *work, int *info) nogil + +cdef void spteqr(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil + +cdef void sptrfs(int *n, int *nrhs, s *d, s *e, s *df, s *ef, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *info) nogil + +cdef void sptsv(int *n, int *nrhs, s *d, s *e, s *b, int *ldb, int *info) nogil + +cdef void sptsvx(char *fact, int *n, int *nrhs, s *d, s *e, s *df, s *ef, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *info) nogil + +cdef void spttrf(int *n, s *d, s *e, int *info) nogil + +cdef void spttrs(int *n, int *nrhs, s *d, s *e, s *b, int *ldb, int *info) nogil + +cdef void sptts2(int *n, int *nrhs, s *d, s *e, s *b, int *ldb) nogil + +cdef void srscl(int *n, s *sa, s *sx, int *incx) nogil + +cdef void ssbev(char *jobz, char *uplo, int *n, int *kd, s *ab, int *ldab, s *w, s *z, int *ldz, s *work, int *info) nogil + +cdef void ssbevd(char *jobz, char *uplo, int *n, int *kd, s *ab, int *ldab, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ssbevx(char *jobz, char *range, char *uplo, int *n, int *kd, s *ab, int *ldab, s *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void ssbgst(char *vect, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *x, int *ldx, s *work, int *info) nogil + +cdef void ssbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *w, s *z, int *ldz, s *work, int *info) nogil + +cdef void ssbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ssbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, s *ab, int *ldab, s *bb, int *ldbb, s *q, int *ldq, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void ssbtrd(char *vect, char *uplo, int *n, int *kd, s *ab, int *ldab, s *d, s *e, s *q, int *ldq, s *work, int *info) nogil + +cdef void ssfrk(char *transr, char *uplo, char *trans, int *n, int *k, s *alpha, s *a, int *lda, s *beta, s *c) nogil + +cdef void sspcon(char *uplo, int *n, s *ap, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void sspev(char *jobz, char *uplo, int *n, s *ap, s *w, s *z, int *ldz, s *work, int *info) nogil + +cdef void sspevd(char *jobz, char *uplo, int *n, s *ap, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void sspevx(char *jobz, char *range, char *uplo, int *n, s *ap, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void sspgst(int *itype, char *uplo, int *n, s *ap, s *bp, int *info) nogil + +cdef void sspgv(int *itype, char *jobz, char *uplo, int *n, s *ap, s *bp, s *w, s *z, int *ldz, s *work, int *info) nogil + +cdef void sspgvd(int *itype, char *jobz, char *uplo, int *n, s *ap, s *bp, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void sspgvx(int *itype, char *jobz, char *range, char *uplo, int *n, s *ap, s *bp, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void ssprfs(char *uplo, int *n, int *nrhs, s *ap, s *afp, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void sspsv(char *uplo, int *n, int *nrhs, s *ap, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sspsvx(char *fact, char *uplo, int *n, int *nrhs, s *ap, s *afp, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void ssptrd(char *uplo, int *n, s *ap, s *d, s *e, s *tau, int *info) nogil + +cdef void ssptrf(char *uplo, int *n, s *ap, int *ipiv, int *info) nogil + +cdef void ssptri(char *uplo, int *n, s *ap, int *ipiv, s *work, int *info) nogil + +cdef void ssptrs(char *uplo, int *n, int *nrhs, s *ap, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void sstebz(char *range, char *order, int *n, s *vl, s *vu, int *il, int *iu, s *abstol, s *d, s *e, int *m, int *nsplit, s *w, int *iblock, int *isplit, s *work, int *iwork, int *info) nogil + +cdef void sstedc(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void sstegr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void sstein(int *n, s *d, s *e, int *m, s *w, int *iblock, int *isplit, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void sstemr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, int *m, s *w, s *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ssteqr(char *compz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil + +cdef void ssterf(int *n, s *d, s *e, int *info) nogil + +cdef void sstev(char *jobz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *info) nogil + +cdef void sstevd(char *jobz, int *n, s *d, s *e, s *z, int *ldz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void sstevr(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void sstevx(char *jobz, char *range, int *n, s *d, s *e, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *iwork, int *ifail, int *info) nogil + +cdef void ssycon(char *uplo, int *n, s *a, int *lda, int *ipiv, s *anorm, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void ssyconv(char *uplo, char *way, int *n, s *a, int *lda, int *ipiv, s *work, int *info) nogil + +cdef void ssyequb(char *uplo, int *n, s *a, int *lda, s *s, s *scond, s *amax, s *work, int *info) nogil + +cdef void ssyev(char *jobz, char *uplo, int *n, s *a, int *lda, s *w, s *work, int *lwork, int *info) nogil + +cdef void ssyevd(char *jobz, char *uplo, int *n, s *a, int *lda, s *w, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ssyevr(char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, int *isuppz, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ssyevx(char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *ifail, int *info) nogil + +cdef void ssygs2(int *itype, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, int *info) nogil + +cdef void ssygst(int *itype, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, int *info) nogil + +cdef void ssygv(int *itype, char *jobz, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *w, s *work, int *lwork, int *info) nogil + +cdef void ssygvd(int *itype, char *jobz, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *w, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ssygvx(int *itype, char *jobz, char *range, char *uplo, int *n, s *a, int *lda, s *b, int *ldb, s *vl, s *vu, int *il, int *iu, s *abstol, int *m, s *w, s *z, int *ldz, s *work, int *lwork, int *iwork, int *ifail, int *info) nogil + +cdef void ssyrfs(char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void ssysv(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, s *work, int *lwork, int *info) nogil + +cdef void ssysvx(char *fact, char *uplo, int *n, int *nrhs, s *a, int *lda, s *af, int *ldaf, int *ipiv, s *b, int *ldb, s *x, int *ldx, s *rcond, s *ferr, s *berr, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void ssyswapr(char *uplo, int *n, s *a, int *lda, int *i1, int *i2) nogil + +cdef void ssytd2(char *uplo, int *n, s *a, int *lda, s *d, s *e, s *tau, int *info) nogil + +cdef void ssytf2(char *uplo, int *n, s *a, int *lda, int *ipiv, int *info) nogil + +cdef void ssytrd(char *uplo, int *n, s *a, int *lda, s *d, s *e, s *tau, s *work, int *lwork, int *info) nogil + +cdef void ssytrf(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil + +cdef void ssytri(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *info) nogil + +cdef void ssytri2(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *lwork, int *info) nogil + +cdef void ssytri2x(char *uplo, int *n, s *a, int *lda, int *ipiv, s *work, int *nb, int *info) nogil + +cdef void ssytrs(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, int *info) nogil + +cdef void ssytrs2(char *uplo, int *n, int *nrhs, s *a, int *lda, int *ipiv, s *b, int *ldb, s *work, int *info) nogil + +cdef void stbcon(char *norm, char *uplo, char *diag, int *n, int *kd, s *ab, int *ldab, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void stbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void stbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, s *ab, int *ldab, s *b, int *ldb, int *info) nogil + +cdef void stfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, s *alpha, s *a, s *b, int *ldb) nogil + +cdef void stftri(char *transr, char *uplo, char *diag, int *n, s *a, int *info) nogil + +cdef void stfttp(char *transr, char *uplo, int *n, s *arf, s *ap, int *info) nogil + +cdef void stfttr(char *transr, char *uplo, int *n, s *arf, s *a, int *lda, int *info) nogil + +cdef void stgevc(char *side, char *howmny, bint *select, int *n, s *s, int *lds, s *p, int *ldp, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *info) nogil + +cdef void stgex2(bint *wantq, bint *wantz, int *n, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *j1, int *n1, int *n2, s *work, int *lwork, int *info) nogil + +cdef void stgexc(bint *wantq, bint *wantz, int *n, s *a, int *lda, s *b, int *ldb, s *q, int *ldq, s *z, int *ldz, int *ifst, int *ilst, s *work, int *lwork, int *info) nogil + +cdef void stgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, s *a, int *lda, s *b, int *ldb, s *alphar, s *alphai, s *beta, s *q, int *ldq, s *z, int *ldz, int *m, s *pl, s *pr, s *dif, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void stgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, s *a, int *lda, s *b, int *ldb, s *tola, s *tolb, s *alpha, s *beta, s *u, int *ldu, s *v, int *ldv, s *q, int *ldq, s *work, int *ncycle, int *info) nogil + +cdef void stgsna(char *job, char *howmny, bint *select, int *n, s *a, int *lda, s *b, int *ldb, s *vl, int *ldvl, s *vr, int *ldvr, s *s, s *dif, int *mm, int *m, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void stgsy2(char *trans, int *ijob, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *d, int *ldd, s *e, int *lde, s *f, int *ldf, s *scale, s *rdsum, s *rdscal, int *iwork, int *pq, int *info) nogil + +cdef void stgsyl(char *trans, int *ijob, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *d, int *ldd, s *e, int *lde, s *f, int *ldf, s *scale, s *dif, s *work, int *lwork, int *iwork, int *info) nogil + +cdef void stpcon(char *norm, char *uplo, char *diag, int *n, s *ap, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void stpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, s *v, int *ldv, s *t, int *ldt, s *a, int *lda, s *b, int *ldb, s *work, int *info) nogil + +cdef void stpqrt(int *m, int *n, int *l, int *nb, s *a, int *lda, s *b, int *ldb, s *t, int *ldt, s *work, int *info) nogil + +cdef void stpqrt2(int *m, int *n, int *l, s *a, int *lda, s *b, int *ldb, s *t, int *ldt, int *info) nogil + +cdef void stprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, s *v, int *ldv, s *t, int *ldt, s *a, int *lda, s *b, int *ldb, s *work, int *ldwork) nogil + +cdef void stprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *ap, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void stptri(char *uplo, char *diag, int *n, s *ap, int *info) nogil + +cdef void stptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *ap, s *b, int *ldb, int *info) nogil + +cdef void stpttf(char *transr, char *uplo, int *n, s *ap, s *arf, int *info) nogil + +cdef void stpttr(char *uplo, int *n, s *ap, s *a, int *lda, int *info) nogil + +cdef void strcon(char *norm, char *uplo, char *diag, int *n, s *a, int *lda, s *rcond, s *work, int *iwork, int *info) nogil + +cdef void strevc(char *side, char *howmny, bint *select, int *n, s *t, int *ldt, s *vl, int *ldvl, s *vr, int *ldvr, int *mm, int *m, s *work, int *info) nogil + +cdef void strexc(char *compq, int *n, s *t, int *ldt, s *q, int *ldq, int *ifst, int *ilst, s *work, int *info) nogil + +cdef void strrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, s *x, int *ldx, s *ferr, s *berr, s *work, int *iwork, int *info) nogil + +cdef void strsen(char *job, char *compq, bint *select, int *n, s *t, int *ldt, s *q, int *ldq, s *wr, s *wi, int *m, s *s, s *sep, s *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void strsna(char *job, char *howmny, bint *select, int *n, s *t, int *ldt, s *vl, int *ldvl, s *vr, int *ldvr, s *s, s *sep, int *mm, int *m, s *work, int *ldwork, int *iwork, int *info) nogil + +cdef void strsyl(char *trana, char *tranb, int *isgn, int *m, int *n, s *a, int *lda, s *b, int *ldb, s *c, int *ldc, s *scale, int *info) nogil + +cdef void strti2(char *uplo, char *diag, int *n, s *a, int *lda, int *info) nogil + +cdef void strtri(char *uplo, char *diag, int *n, s *a, int *lda, int *info) nogil + +cdef void strtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, s *a, int *lda, s *b, int *ldb, int *info) nogil + +cdef void strttf(char *transr, char *uplo, int *n, s *a, int *lda, s *arf, int *info) nogil + +cdef void strttp(char *uplo, int *n, s *a, int *lda, s *ap, int *info) nogil + +cdef void stzrzf(int *m, int *n, s *a, int *lda, s *tau, s *work, int *lwork, int *info) nogil + +cdef void xerbla_array(char *srname_array, int *srname_len, int *info) nogil + +cdef void zbbcsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, d *theta, d *phi, z *u1, int *ldu1, z *u2, int *ldu2, z *v1t, int *ldv1t, z *v2t, int *ldv2t, d *b11d, d *b11e, d *b12d, d *b12e, d *b21d, d *b21e, d *b22d, d *b22e, d *rwork, int *lrwork, int *info) nogil + +cdef void zbdsqr(char *uplo, int *n, int *ncvt, int *nru, int *ncc, d *d, d *e, z *vt, int *ldvt, z *u, int *ldu, z *c, int *ldc, d *rwork, int *info) nogil + +cdef void zcgesv(int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *x, int *ldx, z *work, c *swork, d *rwork, int *iter, int *info) nogil + +cdef void zcposv(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *x, int *ldx, z *work, c *swork, d *rwork, int *iter, int *info) nogil + +cdef void zdrscl(int *n, d *sa, z *sx, int *incx) nogil + +cdef void zgbbrd(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, z *ab, int *ldab, d *d, d *e, z *q, int *ldq, z *pt, int *ldpt, z *c, int *ldc, z *work, d *rwork, int *info) nogil + +cdef void zgbcon(char *norm, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void zgbequ(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void zgbequb(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void zgbrfs(char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zgbsv(int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zgbsvx(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, int *ipiv, char *equed, d *r, d *c, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zgbtf2(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void zgbtrf(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, int *ipiv, int *info) nogil + +cdef void zgbtrs(char *trans, int *n, int *kl, int *ku, int *nrhs, z *ab, int *ldab, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zgebak(char *job, char *side, int *n, int *ilo, int *ihi, d *scale, int *m, z *v, int *ldv, int *info) nogil + +cdef void zgebal(char *job, int *n, z *a, int *lda, int *ilo, int *ihi, d *scale, int *info) nogil + +cdef void zgebd2(int *m, int *n, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *work, int *info) nogil + +cdef void zgebrd(int *m, int *n, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *work, int *lwork, int *info) nogil + +cdef void zgecon(char *norm, int *n, z *a, int *lda, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void zgeequ(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void zgeequb(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, int *info) nogil + +cdef void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil + +cdef void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil + +cdef void zgeev(char *jobvl, char *jobvr, int *n, z *a, int *lda, z *w, z *vl, int *ldvl, z *vr, int *ldvr, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zgeevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, z *a, int *lda, z *w, z *vl, int *ldvl, z *vr, int *ldvr, int *ilo, int *ihi, d *scale, d *abnrm, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zgehd2(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zgehrd(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zgelq2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zgelqf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zgels(char *trans, int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *work, int *lwork, int *info) nogil + +cdef void zgelsd(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, d *s, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *iwork, int *info) nogil + +cdef void zgelss(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, d *s, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zgelsy(int *m, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *jpvt, d *rcond, int *rank, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zgemqrt(char *side, char *trans, int *m, int *n, int *k, int *nb, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *info) nogil + +cdef void zgeql2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zgeqlf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zgeqp3(int *m, int *n, z *a, int *lda, int *jpvt, z *tau, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zgeqr2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zgeqr2p(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zgeqrf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zgeqrfp(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zgeqrt(int *m, int *n, int *nb, z *a, int *lda, z *t, int *ldt, z *work, int *info) nogil + +cdef void zgeqrt2(int *m, int *n, z *a, int *lda, z *t, int *ldt, int *info) nogil + +cdef void zgeqrt3(int *m, int *n, z *a, int *lda, z *t, int *ldt, int *info) nogil + +cdef void zgerfs(char *trans, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zgerq2(int *m, int *n, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zgerqf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zgesc2(int *n, z *a, int *lda, z *rhs, int *ipiv, int *jpiv, d *scale) nogil + +cdef void zgesdd(char *jobz, int *m, int *n, z *a, int *lda, d *s, z *u, int *ldu, z *vt, int *ldvt, z *work, int *lwork, d *rwork, int *iwork, int *info) nogil + +cdef void zgesv(int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zgesvd(char *jobu, char *jobvt, int *m, int *n, z *a, int *lda, d *s, z *u, int *ldu, z *vt, int *ldvt, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zgesvx(char *fact, char *trans, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, char *equed, d *r, d *c, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zgetc2(int *n, z *a, int *lda, int *ipiv, int *jpiv, int *info) nogil + +cdef void zgetf2(int *m, int *n, z *a, int *lda, int *ipiv, int *info) nogil + +cdef void zgetrf(int *m, int *n, z *a, int *lda, int *ipiv, int *info) nogil + +cdef void zgetri(int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil + +cdef void zgetrs(char *trans, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zggbak(char *job, char *side, int *n, int *ilo, int *ihi, d *lscale, d *rscale, int *m, z *v, int *ldv, int *info) nogil + +cdef void zggbal(char *job, int *n, z *a, int *lda, z *b, int *ldb, int *ilo, int *ihi, d *lscale, d *rscale, d *work, int *info) nogil + +cdef void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info) nogil + +cdef void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info) nogil + +cdef void zggev(char *jobvl, char *jobvr, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *vl, int *ldvl, z *vr, int *ldvr, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zggevx(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *vl, int *ldvl, z *vr, int *ldvr, int *ilo, int *ihi, d *lscale, d *rscale, d *abnrm, d *bbnrm, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, bint *bwork, int *info) nogil + +cdef void zggglm(int *n, int *m, int *p, z *a, int *lda, z *b, int *ldb, z *d, z *x, z *y, z *work, int *lwork, int *info) nogil + +cdef void zgghrd(char *compq, char *compz, int *n, int *ilo, int *ihi, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *info) nogil + +cdef void zgglse(int *m, int *n, int *p, z *a, int *lda, z *b, int *ldb, z *c, z *d, z *x, z *work, int *lwork, int *info) nogil + +cdef void zggqrf(int *n, int *m, int *p, z *a, int *lda, z *taua, z *b, int *ldb, z *taub, z *work, int *lwork, int *info) nogil + +cdef void zggrqf(int *m, int *p, int *n, z *a, int *lda, z *taua, z *b, int *ldb, z *taub, z *work, int *lwork, int *info) nogil + +cdef void zgtcon(char *norm, int *n, z *dl, z *d, z *du, z *du2, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil + +cdef void zgtrfs(char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *dlf, z *df, z *duf, z *du2, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zgtsv(int *n, int *nrhs, z *dl, z *d, z *du, z *b, int *ldb, int *info) nogil + +cdef void zgtsvx(char *fact, char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *dlf, z *df, z *duf, z *du2, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zgttrf(int *n, z *dl, z *d, z *du, z *du2, int *ipiv, int *info) nogil + +cdef void zgttrs(char *trans, int *n, int *nrhs, z *dl, z *d, z *du, z *du2, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zgtts2(int *itrans, int *n, int *nrhs, z *dl, z *d, z *du, z *du2, int *ipiv, z *b, int *ldb) nogil + +cdef void zhbev(char *jobz, char *uplo, int *n, int *kd, z *ab, int *ldab, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil + +cdef void zhbevd(char *jobz, char *uplo, int *n, int *kd, z *ab, int *ldab, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zhbevx(char *jobz, char *range, char *uplo, int *n, int *kd, z *ab, int *ldab, z *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void zhbgst(char *vect, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, z *x, int *ldx, z *work, d *rwork, int *info) nogil + +cdef void zhbgv(char *jobz, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil + +cdef void zhbgvd(char *jobz, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zhbgvx(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, z *ab, int *ldab, z *bb, int *ldbb, z *q, int *ldq, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void zhbtrd(char *vect, char *uplo, int *n, int *kd, z *ab, int *ldab, d *d, d *e, z *q, int *ldq, z *work, int *info) nogil + +cdef void zhecon(char *uplo, int *n, z *a, int *lda, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil + +cdef void zheequb(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, z *work, int *info) nogil + +cdef void zheev(char *jobz, char *uplo, int *n, z *a, int *lda, d *w, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zheevd(char *jobz, char *uplo, int *n, z *a, int *lda, d *w, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zheevr(char *jobz, char *range, char *uplo, int *n, z *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, int *isuppz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zheevx(char *jobz, char *range, char *uplo, int *n, z *a, int *lda, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void zhegs2(int *itype, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, int *info) nogil + +cdef void zhegst(int *itype, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, int *info) nogil + +cdef void zhegv(int *itype, char *jobz, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *w, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zhegvd(int *itype, char *jobz, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *w, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zhegvx(int *itype, char *jobz, char *range, char *uplo, int *n, z *a, int *lda, z *b, int *ldb, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void zherfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zhesv(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *lwork, int *info) nogil + +cdef void zhesvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zheswapr(char *uplo, int *n, z *a, int *lda, int *i1, int *i2) nogil + +cdef void zhetd2(char *uplo, int *n, z *a, int *lda, d *d, d *e, z *tau, int *info) nogil + +cdef void zhetf2(char *uplo, int *n, z *a, int *lda, int *ipiv, int *info) nogil + +cdef void zhetrd(char *uplo, int *n, z *a, int *lda, d *d, d *e, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zhetrf(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil + +cdef void zhetri(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil + +cdef void zhetri2(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil + +cdef void zhetri2x(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *nb, int *info) nogil + +cdef void zhetrs(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zhetrs2(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *info) nogil + +cdef void zhfrk(char *transr, char *uplo, char *trans, int *n, int *k, d *alpha, z *a, int *lda, d *beta, z *c) nogil + +cdef void zhgeqz(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *t, int *ldt, z *alpha, z *beta, z *q, int *ldq, z *z, int *ldz, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zhpcon(char *uplo, int *n, z *ap, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil + +cdef void zhpev(char *jobz, char *uplo, int *n, z *ap, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil + +cdef void zhpevd(char *jobz, char *uplo, int *n, z *ap, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zhpevx(char *jobz, char *range, char *uplo, int *n, z *ap, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void zhpgst(int *itype, char *uplo, int *n, z *ap, z *bp, int *info) nogil + +cdef void zhpgv(int *itype, char *jobz, char *uplo, int *n, z *ap, z *bp, d *w, z *z, int *ldz, z *work, d *rwork, int *info) nogil + +cdef void zhpgvd(int *itype, char *jobz, char *uplo, int *n, z *ap, z *bp, d *w, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zhpgvx(int *itype, char *jobz, char *range, char *uplo, int *n, z *ap, z *bp, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, z *work, d *rwork, int *iwork, int *ifail, int *info) nogil + +cdef void zhprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zhpsv(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zhpsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zhptrd(char *uplo, int *n, z *ap, d *d, d *e, z *tau, int *info) nogil + +cdef void zhptrf(char *uplo, int *n, z *ap, int *ipiv, int *info) nogil + +cdef void zhptri(char *uplo, int *n, z *ap, int *ipiv, z *work, int *info) nogil + +cdef void zhptrs(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zhsein(char *side, char *eigsrc, char *initv, bint *select, int *n, z *h, int *ldh, z *w, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *ifaill, int *ifailr, int *info) nogil + +cdef void zhseqr(char *job, char *compz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, z *z, int *ldz, z *work, int *lwork, int *info) nogil + +cdef void zlabrd(int *m, int *n, int *nb, z *a, int *lda, d *d, d *e, z *tauq, z *taup, z *x, int *ldx, z *y, int *ldy) nogil + +cdef void zlacgv(int *n, z *x, int *incx) nogil + +cdef void zlacn2(int *n, z *v, z *x, d *est, int *kase, int *isave) nogil + +cdef void zlacon(int *n, z *v, z *x, d *est, int *kase) nogil + +cdef void zlacp2(char *uplo, int *m, int *n, d *a, int *lda, z *b, int *ldb) nogil + +cdef void zlacpy(char *uplo, int *m, int *n, z *a, int *lda, z *b, int *ldb) nogil + +cdef void zlacrm(int *m, int *n, z *a, int *lda, d *b, int *ldb, z *c, int *ldc, d *rwork) nogil + +cdef void zlacrt(int *n, z *cx, int *incx, z *cy, int *incy, z *c, z *s) nogil + +cdef z zladiv(z *x, z *y) nogil + +cdef void zlaed0(int *qsiz, int *n, d *d, d *e, z *q, int *ldq, z *qstore, int *ldqs, d *rwork, int *iwork, int *info) nogil + +cdef void zlaed7(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, d *d, z *q, int *ldq, d *rho, int *indxq, d *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, d *givnum, z *work, d *rwork, int *iwork, int *info) nogil + +cdef void zlaed8(int *k, int *n, int *qsiz, z *q, int *ldq, d *d, d *rho, int *cutpnt, d *z, d *dlamda, z *q2, int *ldq2, d *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, d *givnum, int *info) nogil + +cdef void zlaein(bint *rightv, bint *noinit, int *n, z *h, int *ldh, z *w, z *v, z *b, int *ldb, d *rwork, d *eps3, d *smlnum, int *info) nogil + +cdef void zlaesy(z *a, z *b, z *c, z *rt1, z *rt2, z *evscal, z *cs1, z *sn1) nogil + +cdef void zlaev2(z *a, z *b, z *c, d *rt1, d *rt2, d *cs1, z *sn1) nogil + +cdef void zlag2c(int *m, int *n, z *a, int *lda, c *sa, int *ldsa, int *info) nogil + +cdef void zlags2(bint *upper, d *a1, z *a2, d *a3, d *b1, z *b2, d *b3, d *csu, z *snu, d *csv, z *snv, d *csq, z *snq) nogil + +cdef void zlagtm(char *trans, int *n, int *nrhs, d *alpha, z *dl, z *d, z *du, z *x, int *ldx, d *beta, z *b, int *ldb) nogil + +cdef void zlahef(char *uplo, int *n, int *nb, int *kb, z *a, int *lda, int *ipiv, z *w, int *ldw, int *info) nogil + +cdef void zlahqr(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, int *info) nogil + +cdef void zlahr2(int *n, int *k, int *nb, z *a, int *lda, z *tau, z *t, int *ldt, z *y, int *ldy) nogil + +cdef void zlaic1(int *job, int *j, z *x, d *sest, z *w, z *gamma, d *sestpr, z *s, z *c) nogil + +cdef void zlals0(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, z *b, int *ldb, z *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, d *givnum, int *ldgnum, d *poles, d *difl, d *difr, d *z, int *k, d *c, d *s, d *rwork, int *info) nogil + +cdef void zlalsa(int *icompq, int *smlsiz, int *n, int *nrhs, z *b, int *ldb, z *bx, int *ldbx, d *u, int *ldu, d *vt, int *k, d *difl, d *difr, d *z, d *poles, int *givptr, int *givcol, int *ldgcol, int *perm, d *givnum, d *c, d *s, d *rwork, int *iwork, int *info) nogil + +cdef void zlalsd(char *uplo, int *smlsiz, int *n, int *nrhs, d *d, d *e, z *b, int *ldb, d *rcond, int *rank, z *work, d *rwork, int *iwork, int *info) nogil + +cdef d zlangb(char *norm, int *n, int *kl, int *ku, z *ab, int *ldab, d *work) nogil + +cdef d zlange(char *norm, int *m, int *n, z *a, int *lda, d *work) nogil + +cdef d zlangt(char *norm, int *n, z *dl, z *d, z *du) nogil + +cdef d zlanhb(char *norm, char *uplo, int *n, int *k, z *ab, int *ldab, d *work) nogil + +cdef d zlanhe(char *norm, char *uplo, int *n, z *a, int *lda, d *work) nogil + +cdef d zlanhf(char *norm, char *transr, char *uplo, int *n, z *a, d *work) nogil + +cdef d zlanhp(char *norm, char *uplo, int *n, z *ap, d *work) nogil + +cdef d zlanhs(char *norm, int *n, z *a, int *lda, d *work) nogil + +cdef d zlanht(char *norm, int *n, d *d, z *e) nogil + +cdef d zlansb(char *norm, char *uplo, int *n, int *k, z *ab, int *ldab, d *work) nogil + +cdef d zlansp(char *norm, char *uplo, int *n, z *ap, d *work) nogil + +cdef d zlansy(char *norm, char *uplo, int *n, z *a, int *lda, d *work) nogil + +cdef d zlantb(char *norm, char *uplo, char *diag, int *n, int *k, z *ab, int *ldab, d *work) nogil + +cdef d zlantp(char *norm, char *uplo, char *diag, int *n, z *ap, d *work) nogil + +cdef d zlantr(char *norm, char *uplo, char *diag, int *m, int *n, z *a, int *lda, d *work) nogil + +cdef void zlapll(int *n, z *x, int *incx, z *y, int *incy, d *ssmin) nogil + +cdef void zlapmr(bint *forwrd, int *m, int *n, z *x, int *ldx, int *k) nogil + +cdef void zlapmt(bint *forwrd, int *m, int *n, z *x, int *ldx, int *k) nogil + +cdef void zlaqgb(int *m, int *n, int *kl, int *ku, z *ab, int *ldab, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil + +cdef void zlaqge(int *m, int *n, z *a, int *lda, d *r, d *c, d *rowcnd, d *colcnd, d *amax, char *equed) nogil + +cdef void zlaqhb(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil + +cdef void zlaqhe(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil + +cdef void zlaqhp(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, char *equed) nogil + +cdef void zlaqp2(int *m, int *n, int *offset, z *a, int *lda, int *jpvt, z *tau, d *vn1, d *vn2, z *work) nogil + +cdef void zlaqps(int *m, int *n, int *offset, int *nb, int *kb, z *a, int *lda, int *jpvt, z *tau, d *vn1, d *vn2, z *auxv, z *f, int *ldf) nogil + +cdef void zlaqr0(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, z *work, int *lwork, int *info) nogil + +cdef void zlaqr1(int *n, z *h, int *ldh, z *s1, z *s2, z *v) nogil + +cdef void zlaqr2(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, int *ns, int *nd, z *sh, z *v, int *ldv, int *nh, z *t, int *ldt, int *nv, z *wv, int *ldwv, z *work, int *lwork) nogil + +cdef void zlaqr3(bint *wantt, bint *wantz, int *n, int *ktop, int *kbot, int *nw, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, int *ns, int *nd, z *sh, z *v, int *ldv, int *nh, z *t, int *ldt, int *nv, z *wv, int *ldwv, z *work, int *lwork) nogil + +cdef void zlaqr4(bint *wantt, bint *wantz, int *n, int *ilo, int *ihi, z *h, int *ldh, z *w, int *iloz, int *ihiz, z *z, int *ldz, z *work, int *lwork, int *info) nogil + +cdef void zlaqr5(bint *wantt, bint *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, z *s, z *h, int *ldh, int *iloz, int *ihiz, z *z, int *ldz, z *v, int *ldv, z *u, int *ldu, int *nv, z *wv, int *ldwv, int *nh, z *wh, int *ldwh) nogil + +cdef void zlaqsb(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, char *equed) nogil + +cdef void zlaqsp(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, char *equed) nogil + +cdef void zlaqsy(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, char *equed) nogil + +cdef void zlar1v(int *n, int *b1, int *bn, d *lambda_, d *d, d *l, d *ld, d *lld, d *pivmin, d *gaptol, z *z, bint *wantnc, int *negcnt, d *ztz, d *mingma, int *r, int *isuppz, d *nrminv, d *resid, d *rqcorr, d *work) nogil + +cdef void zlar2v(int *n, z *x, z *y, z *z, int *incx, d *c, z *s, int *incc) nogil + +cdef void zlarcm(int *m, int *n, d *a, int *lda, z *b, int *ldb, z *c, int *ldc, d *rwork) nogil + +cdef void zlarf(char *side, int *m, int *n, z *v, int *incv, z *tau, z *c, int *ldc, z *work) nogil + +cdef void zlarfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *ldwork) nogil + +cdef void zlarfg(int *n, z *alpha, z *x, int *incx, z *tau) nogil + +cdef void zlarfgp(int *n, z *alpha, z *x, int *incx, z *tau) nogil + +cdef void zlarft(char *direct, char *storev, int *n, int *k, z *v, int *ldv, z *tau, z *t, int *ldt) nogil + +cdef void zlarfx(char *side, int *m, int *n, z *v, z *tau, z *c, int *ldc, z *work) nogil + +cdef void zlargv(int *n, z *x, int *incx, z *y, int *incy, d *c, int *incc) nogil + +cdef void zlarnv(int *idist, int *iseed, int *n, z *x) nogil + +cdef void zlarrv(int *n, d *vl, d *vu, d *d, d *l, d *pivmin, int *isplit, int *m, int *dol, int *dou, d *minrgp, d *rtol1, d *rtol2, d *w, d *werr, d *wgap, int *iblock, int *indexw, d *gers, z *z, int *ldz, int *isuppz, d *work, int *iwork, int *info) nogil + +cdef void zlartg(z *f, z *g, d *cs, z *sn, z *r) nogil + +cdef void zlartv(int *n, z *x, int *incx, z *y, int *incy, d *c, z *s, int *incc) nogil + +cdef void zlarz(char *side, int *m, int *n, int *l, z *v, int *incv, z *tau, z *c, int *ldc, z *work) nogil + +cdef void zlarzb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, z *v, int *ldv, z *t, int *ldt, z *c, int *ldc, z *work, int *ldwork) nogil + +cdef void zlarzt(char *direct, char *storev, int *n, int *k, z *v, int *ldv, z *tau, z *t, int *ldt) nogil + +cdef void zlascl(char *type_bn, int *kl, int *ku, d *cfrom, d *cto, int *m, int *n, z *a, int *lda, int *info) nogil + +cdef void zlaset(char *uplo, int *m, int *n, z *alpha, z *beta, z *a, int *lda) nogil + +cdef void zlasr(char *side, char *pivot, char *direct, int *m, int *n, d *c, d *s, z *a, int *lda) nogil + +cdef void zlassq(int *n, z *x, int *incx, d *scale, d *sumsq) nogil + +cdef void zlaswp(int *n, z *a, int *lda, int *k1, int *k2, int *ipiv, int *incx) nogil + +cdef void zlasyf(char *uplo, int *n, int *nb, int *kb, z *a, int *lda, int *ipiv, z *w, int *ldw, int *info) nogil + +cdef void zlat2c(char *uplo, int *n, z *a, int *lda, c *sa, int *ldsa, int *info) nogil + +cdef void zlatbs(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, z *ab, int *ldab, z *x, d *scale, d *cnorm, int *info) nogil + +cdef void zlatdf(int *ijob, int *n, z *z, int *ldz, z *rhs, d *rdsum, d *rdscal, int *ipiv, int *jpiv) nogil + +cdef void zlatps(char *uplo, char *trans, char *diag, char *normin, int *n, z *ap, z *x, d *scale, d *cnorm, int *info) nogil + +cdef void zlatrd(char *uplo, int *n, int *nb, z *a, int *lda, d *e, z *tau, z *w, int *ldw) nogil + +cdef void zlatrs(char *uplo, char *trans, char *diag, char *normin, int *n, z *a, int *lda, z *x, d *scale, d *cnorm, int *info) nogil + +cdef void zlatrz(int *m, int *n, int *l, z *a, int *lda, z *tau, z *work) nogil + +cdef void zlauu2(char *uplo, int *n, z *a, int *lda, int *info) nogil + +cdef void zlauum(char *uplo, int *n, z *a, int *lda, int *info) nogil + +cdef void zpbcon(char *uplo, int *n, int *kd, z *ab, int *ldab, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void zpbequ(char *uplo, int *n, int *kd, z *ab, int *ldab, d *s, d *scond, d *amax, int *info) nogil + +cdef void zpbrfs(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zpbstf(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil + +cdef void zpbsv(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil + +cdef void zpbsvx(char *fact, char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *afb, int *ldafb, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zpbtf2(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil + +cdef void zpbtrf(char *uplo, int *n, int *kd, z *ab, int *ldab, int *info) nogil + +cdef void zpbtrs(char *uplo, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil + +cdef void zpftrf(char *transr, char *uplo, int *n, z *a, int *info) nogil + +cdef void zpftri(char *transr, char *uplo, int *n, z *a, int *info) nogil + +cdef void zpftrs(char *transr, char *uplo, int *n, int *nrhs, z *a, z *b, int *ldb, int *info) nogil + +cdef void zpocon(char *uplo, int *n, z *a, int *lda, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void zpoequ(int *n, z *a, int *lda, d *s, d *scond, d *amax, int *info) nogil + +cdef void zpoequb(int *n, z *a, int *lda, d *s, d *scond, d *amax, int *info) nogil + +cdef void zporfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zposv(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil + +cdef void zposvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zpotf2(char *uplo, int *n, z *a, int *lda, int *info) nogil + +cdef void zpotrf(char *uplo, int *n, z *a, int *lda, int *info) nogil + +cdef void zpotri(char *uplo, int *n, z *a, int *lda, int *info) nogil + +cdef void zpotrs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil + +cdef void zppcon(char *uplo, int *n, z *ap, d *anorm, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void zppequ(char *uplo, int *n, z *ap, d *s, d *scond, d *amax, int *info) nogil + +cdef void zpprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zppsv(char *uplo, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil + +cdef void zppsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, char *equed, d *s, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zpptrf(char *uplo, int *n, z *ap, int *info) nogil + +cdef void zpptri(char *uplo, int *n, z *ap, int *info) nogil + +cdef void zpptrs(char *uplo, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil + +cdef void zpstf2(char *uplo, int *n, z *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil + +cdef void zpstrf(char *uplo, int *n, z *a, int *lda, int *piv, int *rank, d *tol, d *work, int *info) nogil + +cdef void zptcon(int *n, d *d, z *e, d *anorm, d *rcond, d *rwork, int *info) nogil + +cdef void zpteqr(char *compz, int *n, d *d, d *e, z *z, int *ldz, d *work, int *info) nogil + +cdef void zptrfs(char *uplo, int *n, int *nrhs, d *d, z *e, d *df, z *ef, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zptsv(int *n, int *nrhs, d *d, z *e, z *b, int *ldb, int *info) nogil + +cdef void zptsvx(char *fact, int *n, int *nrhs, d *d, z *e, d *df, z *ef, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zpttrf(int *n, d *d, z *e, int *info) nogil + +cdef void zpttrs(char *uplo, int *n, int *nrhs, d *d, z *e, z *b, int *ldb, int *info) nogil + +cdef void zptts2(int *iuplo, int *n, int *nrhs, d *d, z *e, z *b, int *ldb) nogil + +cdef void zrot(int *n, z *cx, int *incx, z *cy, int *incy, d *c, z *s) nogil + +cdef void zspcon(char *uplo, int *n, z *ap, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil + +cdef void zspmv(char *uplo, int *n, z *alpha, z *ap, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zspr(char *uplo, int *n, z *alpha, z *x, int *incx, z *ap) nogil + +cdef void zsprfs(char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zspsv(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zspsvx(char *fact, char *uplo, int *n, int *nrhs, z *ap, z *afp, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zsptrf(char *uplo, int *n, z *ap, int *ipiv, int *info) nogil + +cdef void zsptri(char *uplo, int *n, z *ap, int *ipiv, z *work, int *info) nogil + +cdef void zsptrs(char *uplo, int *n, int *nrhs, z *ap, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zstedc(char *compz, int *n, d *d, d *e, z *z, int *ldz, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *liwork, int *info) nogil + +cdef void zstegr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, d *abstol, int *m, d *w, z *z, int *ldz, int *isuppz, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void zstein(int *n, d *d, d *e, int *m, d *w, int *iblock, int *isplit, z *z, int *ldz, d *work, int *iwork, int *ifail, int *info) nogil + +cdef void zstemr(char *jobz, char *range, int *n, d *d, d *e, d *vl, d *vu, int *il, int *iu, int *m, d *w, z *z, int *ldz, int *nzc, int *isuppz, bint *tryrac, d *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void zsteqr(char *compz, int *n, d *d, d *e, z *z, int *ldz, d *work, int *info) nogil + +cdef void zsycon(char *uplo, int *n, z *a, int *lda, int *ipiv, d *anorm, d *rcond, z *work, int *info) nogil + +cdef void zsyconv(char *uplo, char *way, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil + +cdef void zsyequb(char *uplo, int *n, z *a, int *lda, d *s, d *scond, d *amax, z *work, int *info) nogil + +cdef void zsymv(char *uplo, int *n, z *alpha, z *a, int *lda, z *x, int *incx, z *beta, z *y, int *incy) nogil + +cdef void zsyr(char *uplo, int *n, z *alpha, z *x, int *incx, z *a, int *lda) nogil + +cdef void zsyrfs(char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void zsysv(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *lwork, int *info) nogil + +cdef void zsysvx(char *fact, char *uplo, int *n, int *nrhs, z *a, int *lda, z *af, int *ldaf, int *ipiv, z *b, int *ldb, z *x, int *ldx, d *rcond, d *ferr, d *berr, z *work, int *lwork, d *rwork, int *info) nogil + +cdef void zsyswapr(char *uplo, int *n, z *a, int *lda, int *i1, int *i2) nogil + +cdef void zsytf2(char *uplo, int *n, z *a, int *lda, int *ipiv, int *info) nogil + +cdef void zsytrf(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil + +cdef void zsytri(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *info) nogil + +cdef void zsytri2(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *lwork, int *info) nogil + +cdef void zsytri2x(char *uplo, int *n, z *a, int *lda, int *ipiv, z *work, int *nb, int *info) nogil + +cdef void zsytrs(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, int *info) nogil + +cdef void zsytrs2(char *uplo, int *n, int *nrhs, z *a, int *lda, int *ipiv, z *b, int *ldb, z *work, int *info) nogil + +cdef void ztbcon(char *norm, char *uplo, char *diag, int *n, int *kd, z *ab, int *ldab, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void ztbrfs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void ztbtrs(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, z *ab, int *ldab, z *b, int *ldb, int *info) nogil + +cdef void ztfsm(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, z *alpha, z *a, z *b, int *ldb) nogil + +cdef void ztftri(char *transr, char *uplo, char *diag, int *n, z *a, int *info) nogil + +cdef void ztfttp(char *transr, char *uplo, int *n, z *arf, z *ap, int *info) nogil + +cdef void ztfttr(char *transr, char *uplo, int *n, z *arf, z *a, int *lda, int *info) nogil + +cdef void ztgevc(char *side, char *howmny, bint *select, int *n, z *s, int *lds, z *p, int *ldp, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *info) nogil + +cdef void ztgex2(bint *wantq, bint *wantz, int *n, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *j1, int *info) nogil + +cdef void ztgexc(bint *wantq, bint *wantz, int *n, z *a, int *lda, z *b, int *ldb, z *q, int *ldq, z *z, int *ldz, int *ifst, int *ilst, int *info) nogil + +cdef void ztgsen(int *ijob, bint *wantq, bint *wantz, bint *select, int *n, z *a, int *lda, z *b, int *ldb, z *alpha, z *beta, z *q, int *ldq, z *z, int *ldz, int *m, d *pl, d *pr, d *dif, z *work, int *lwork, int *iwork, int *liwork, int *info) nogil + +cdef void ztgsja(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, z *a, int *lda, z *b, int *ldb, d *tola, d *tolb, d *alpha, d *beta, z *u, int *ldu, z *v, int *ldv, z *q, int *ldq, z *work, int *ncycle, int *info) nogil + +cdef void ztgsna(char *job, char *howmny, bint *select, int *n, z *a, int *lda, z *b, int *ldb, z *vl, int *ldvl, z *vr, int *ldvr, d *s, d *dif, int *mm, int *m, z *work, int *lwork, int *iwork, int *info) nogil + +cdef void ztgsy2(char *trans, int *ijob, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, z *d, int *ldd, z *e, int *lde, z *f, int *ldf, d *scale, d *rdsum, d *rdscal, int *info) nogil + +cdef void ztgsyl(char *trans, int *ijob, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, z *d, int *ldd, z *e, int *lde, z *f, int *ldf, d *scale, d *dif, z *work, int *lwork, int *iwork, int *info) nogil + +cdef void ztpcon(char *norm, char *uplo, char *diag, int *n, z *ap, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void ztpmqrt(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, z *v, int *ldv, z *t, int *ldt, z *a, int *lda, z *b, int *ldb, z *work, int *info) nogil + +cdef void ztpqrt(int *m, int *n, int *l, int *nb, z *a, int *lda, z *b, int *ldb, z *t, int *ldt, z *work, int *info) nogil + +cdef void ztpqrt2(int *m, int *n, int *l, z *a, int *lda, z *b, int *ldb, z *t, int *ldt, int *info) nogil + +cdef void ztprfb(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, z *v, int *ldv, z *t, int *ldt, z *a, int *lda, z *b, int *ldb, z *work, int *ldwork) nogil + +cdef void ztprfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *ap, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void ztptri(char *uplo, char *diag, int *n, z *ap, int *info) nogil + +cdef void ztptrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *ap, z *b, int *ldb, int *info) nogil + +cdef void ztpttf(char *transr, char *uplo, int *n, z *ap, z *arf, int *info) nogil + +cdef void ztpttr(char *uplo, int *n, z *ap, z *a, int *lda, int *info) nogil + +cdef void ztrcon(char *norm, char *uplo, char *diag, int *n, z *a, int *lda, d *rcond, z *work, d *rwork, int *info) nogil + +cdef void ztrevc(char *side, char *howmny, bint *select, int *n, z *t, int *ldt, z *vl, int *ldvl, z *vr, int *ldvr, int *mm, int *m, z *work, d *rwork, int *info) nogil + +cdef void ztrexc(char *compq, int *n, z *t, int *ldt, z *q, int *ldq, int *ifst, int *ilst, int *info) nogil + +cdef void ztrrfs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, z *x, int *ldx, d *ferr, d *berr, z *work, d *rwork, int *info) nogil + +cdef void ztrsen(char *job, char *compq, bint *select, int *n, z *t, int *ldt, z *q, int *ldq, z *w, int *m, d *s, d *sep, z *work, int *lwork, int *info) nogil + +cdef void ztrsna(char *job, char *howmny, bint *select, int *n, z *t, int *ldt, z *vl, int *ldvl, z *vr, int *ldvr, d *s, d *sep, int *mm, int *m, z *work, int *ldwork, d *rwork, int *info) nogil + +cdef void ztrsyl(char *trana, char *tranb, int *isgn, int *m, int *n, z *a, int *lda, z *b, int *ldb, z *c, int *ldc, d *scale, int *info) nogil + +cdef void ztrti2(char *uplo, char *diag, int *n, z *a, int *lda, int *info) nogil + +cdef void ztrtri(char *uplo, char *diag, int *n, z *a, int *lda, int *info) nogil + +cdef void ztrtrs(char *uplo, char *trans, char *diag, int *n, int *nrhs, z *a, int *lda, z *b, int *ldb, int *info) nogil + +cdef void ztrttf(char *transr, char *uplo, int *n, z *a, int *lda, z *arf, int *info) nogil + +cdef void ztrttp(char *uplo, int *n, z *a, int *lda, z *ap, int *info) nogil + +cdef void ztzrzf(int *m, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zunbdb(char *trans, char *signs, int *m, int *p, int *q, z *x11, int *ldx11, z *x12, int *ldx12, z *x21, int *ldx21, z *x22, int *ldx22, d *theta, d *phi, z *taup1, z *taup2, z *tauq1, z *tauq2, z *work, int *lwork, int *info) nogil + +cdef void zuncsd(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, z *x11, int *ldx11, z *x12, int *ldx12, z *x21, int *ldx21, z *x22, int *ldx22, d *theta, z *u1, int *ldu1, z *u2, int *ldu2, z *v1t, int *ldv1t, z *v2t, int *ldv2t, z *work, int *lwork, d *rwork, int *lrwork, int *iwork, int *info) nogil + +cdef void zung2l(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zung2r(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zungbr(char *vect, int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zunghr(int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zungl2(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zunglq(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zungql(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zungqr(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zungr2(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *info) nogil + +cdef void zungrq(int *m, int *n, int *k, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zungtr(char *uplo, int *n, z *a, int *lda, z *tau, z *work, int *lwork, int *info) nogil + +cdef void zunm2l(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil + +cdef void zunm2r(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil + +cdef void zunmbr(char *vect, char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunmhr(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunml2(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil + +cdef void zunmlq(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunmql(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunmqr(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunmr2(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil + +cdef void zunmr3(char *side, char *trans, int *m, int *n, int *k, int *l, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *info) nogil + +cdef void zunmrq(char *side, char *trans, int *m, int *n, int *k, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunmrz(char *side, char *trans, int *m, int *n, int *k, int *l, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zunmtr(char *side, char *uplo, char *trans, int *m, int *n, z *a, int *lda, z *tau, z *c, int *ldc, z *work, int *lwork, int *info) nogil + +cdef void zupgtr(char *uplo, int *n, z *ap, z *tau, z *q, int *ldq, z *work, int *info) nogil + +cdef void zupmtr(char *side, char *uplo, char *trans, int *m, int *n, z *ap, z *tau, z *c, int *ldc, z *work, int *info) nogil diff --git a/voice_bridge/scipy/linalg/cython_lapack.pyd b/voice_bridge/scipy/linalg/cython_lapack.pyd new file mode 100644 index 0000000000000000000000000000000000000000..ee119d668fcbbdbebfc48c82fd440c695aae4fc3 Binary files /dev/null and b/voice_bridge/scipy/linalg/cython_lapack.pyd differ diff --git a/voice_bridge/scipy/linalg/src/id_dist/doc/doc.tex b/voice_bridge/scipy/linalg/src/id_dist/doc/doc.tex new file mode 100644 index 0000000000000000000000000000000000000000..8bcece8c4b69a0e6f5bf7a65122b1109ab2b0460 --- /dev/null +++ b/voice_bridge/scipy/linalg/src/id_dist/doc/doc.tex @@ -0,0 +1,977 @@ +\documentclass[letterpaper,12pt]{article} +\usepackage[margin=1in]{geometry} +\usepackage{verbatim} +\usepackage{amsmath} +\usepackage{supertabular} +\usepackage{array} + +\def\T{{\hbox{\scriptsize{\rm T}}}} +\def\epsilon{\varepsilon} +\def\bigoh{\mathcal{O}} +\def\phi{\varphi} +\def\st{{\hbox{\scriptsize{\rm st}}}} +\def\th{{\hbox{\scriptsize{\rm th}}}} +\def\x{\mathbf{x}} + + +\title{ID: A software package for low-rank approximation + of matrices via interpolative decompositions, Version 0.4} +\author{Per-Gunnar Martinsson, Vladimir Rokhlin,\\ + Yoel Shkolnisky, and Mark Tygert} + + +\begin{document} + +\maketitle + +\newpage + +{\parindent=0pt + +The present document and all of the software +in the accompanying distribution (which is contained in the directory +{\tt id\_dist} and its subdirectories, or in the file +{\tt id\_dist.tar.gz})\, is + +\bigskip + +Copyright \copyright\ 2014 by P.-G. Martinsson, V. Rokhlin, +Y. Shkolnisky, and M. Tygert. + +\bigskip + +All rights reserved. + +\bigskip + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +\begin{enumerate} +\item Redistributions of source code must retain the above copyright +notice, this list of conditions, and the following disclaimer. +\item Redistributions in binary form must reproduce the above copyright +notice, this list of conditions, and the following disclaimer in the +documentation and/or other materials provided with the distribution. +\item None of the names of the copyright holders may be used to endorse +or promote products derived from this software without specific prior +written permission. +\end{enumerate} + +\bigskip + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNERS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +} + +\newpage + +\tableofcontents + +\newpage + + + +\hrule + +\medskip + +\centerline{\Large \bf IMPORTANT} + +\medskip + +\hrule + +\medskip + +\noindent At the minimum, please read Subsection~\ref{warning} +and Section~\ref{naming} below, and beware that the {\it N.B.}'s +in the source code comments highlight key information about the routines; +{\it N.B.} stands for {\it nota bene} (Latin for ``note well''). + +\medskip + +\hrule + +\bigskip + + + +\section{Introduction} + +This software distribution provides Fortran routines +for computing low-rank approximations to matrices, +in the forms of interpolative decompositions (IDs) +and singular value decompositions (SVDs). +The routines use algorithms based on the ID. +The ID is also commonly known as +the approximation obtained via skeletonization, +the approximation obtained via subsampling, +and the approximation obtained via subset selection. +The ID provides many advantages in many applications, +and we suspect that it will become increasingly popular +once tools for its computation become more widely available. +This software distribution includes some such tools, +as well as tools for computing low-rank approximations +in the form of SVDs. +Section~\ref{defs} below defines IDs and SVDs, +and provides references to detailed discussions of the algorithms +used in this software package. + +Please beware that normalized power iterations are better suited than +the software in this distribution +for computing principal component analyses +in the typical case when the square of the signal-to-noise ratio +is not orders of magnitude greater than both dimensions +of the data matrix; see~\cite{halko-martinsson-tropp}. + +The algorithms used in this distribution have been optimized +for accuracy, efficiency, and reliability; +as a somewhat counterintuitive consequence, many must be randomized. +All randomized codes in this software package succeed +with overwhelmingly high probability (see, for example, +\cite{halko-martinsson-tropp}). +The truly paranoid are welcome to use the routines {\tt idd\_diffsnorm} +and {\tt idz\_diffsnorm} to evaluate rapidly the quality +of the approximations produced by the randomized algorithms +(as done, for example, in the files +{\tt idd\_a\_test.f}, {\tt idd\_r\_test.f}, {\tt idz\_a\_test.f}, +and {\tt idz\_r\_test.f} in the {\tt test} subdirectory +of the main directory {\tt id\_dist}). +In most circumstances, evaluating the quality of an approximation +via routines {\tt idd\_diffsnorm} or {\tt idz\_diffsnorm} is much faster +than forming the approximation to be evaluated. Still, we are unaware +of any instance in which a properly-compiled routine failed to produce +an accurate approximation. +To facilitate successful compilation, we encourage the user +to read the instructions in the next section, +and to read Section~\ref{naming}, too. + + + +\section{Compilation instructions} + + +Followed in numerical order, the subsections of this section +provide step-by-step instructions for compiling the software +under a Unix-compatible operating system. + + +\subsection{Beware that default command-line flags may not be + sufficient for compiling the source codes!} +\label{warning} + +The Fortran source codes in this distribution pass {\tt real*8} +variables as integer variables, integers as {\tt real*8}'s, +{\tt real*8}'s as {\tt complex*16}'s, and so on. +This is common practice in numerical codes, and is not an error; +be sure to provide the relevant command-line flags to the compiler +(for example, run {\tt fort77} and {\tt f2c} with the flag {\tt -!P}). +When following the compilation instructions +in Subsection~\ref{makefile_edit} below, +be sure to set {\tt FFLAGS} appropriately. + + +\subsection{Install LAPACK} + +The SVD routines in this distribution depend on LAPACK. +Before compiling the present distribution, +create the LAPACK and BLAS archive (library) {\tt .a} files; +information about installing LAPACK is available +at {\tt http://www.netlib.org/lapack/} (and several other web sites). + + +\subsection{Decompress and untar the file {\tt id\_dist.tar.gz}} + +At the command line, decompress and untar the file +{\tt id\_dist.tar.gz} by issuing a command such as +{\tt tar -xvvzf id\_dist.tar.gz}. +This will create a directory named {\tt id\_dist}. + + +\subsection{Edit the Makefile} +\label{makefile_edit} + +The directory {\tt id\_dist} contains a file named {\tt Makefile}. +In {\tt Makefile}, set the following: +% +\begin{itemize} +\item {\tt FC} is the Fortran compiler. +\item {\tt FFLAGS} is the set of command-line flags + (specifying optimization settings, for example) + for the Fortran compiler specified by {\tt FC}; + please heed the warning in Subsection~\ref{warning} above! +\item {\tt BLAS\_LIB} is the file-system path to the BLAS archive + (library) {\tt .a} file. +\item {\tt LAPACK\_LIB} is the file-system path to the LAPACK archive + (library) {\tt .a} file. +\item {\tt ARCH} is the archiver utility (usually {\tt ar}). +\item {\tt ARCHFLAGS} is the set of command-line flags + for the archiver specified by {\tt ARCH} needed + to create an archive (usually {\tt cr}). +\item {\tt RANLIB} is to be set to {\tt ranlib} + when {\tt ranlib} is available, and is to be set to {\tt echo} + when {\tt ranlib} is not available. +\end{itemize} + + +\subsection{Make and test the libraries} + +At the command line in a shell that adheres +to the Bourne shell conventions for redirection, issue the command +``{\tt make clean; make}'' to both create the archive (library) +{\tt id\_lib.a} and test it. +(In most modern Unix distributions, {\tt sh} is the Bourne shell, +or else is fully compatible with the Bourne shell; +the Korn shell {\tt ksh} and the Bourne-again shell {\tt bash} +also use the Bourne shell conventions for redirection.) +{\tt make} places the file {\tt id\_lib.a} +in the directory {\tt id\_dist}; the archive (library) file +{\tt id\_lib.a} contains machine code for all user-callable routines +in this distribution. + + + +\section{Naming conventions} +\label{naming} + +The names of routines and files in this distribution +start with prefixes, followed by an underscore (``\_''). +The prefixes are two to four characters in length, +and have the following meanings: +% +\begin{itemize} +\item The first two letters are always ``{\tt id}'', + the name of this distribution. +\item The third letter (when present) is either ``{\tt d}'' + or ``{\tt z}''; + ``{\tt d}'' stands for double precision ({\tt real*8}), + and ``{\tt z}'' stands for double complex ({\tt complex*16}). +\item The fourth letter (when present) is either ``{\tt r}'' + or ``{\tt p}''; + ``{\tt r}'' stands for specified rank, + and ``{\tt p}'' stands for specified precision. + The specified rank routines require the user to provide + the rank of the approximation to be constructed, + while the specified precision routines adjust the rank adaptively + to attain the desired precision. +\end{itemize} + +For example, {\tt iddr\_aid} is a {\tt real*8} routine which computes +an approximation of specified rank. +{\tt idz\_snorm} is a {\tt complex*16} routine. +{\tt id\_randperm} is yet another routine in this distribution. + + + +\section{Example programs} + +For examples of how to use the user-callable routines +in this distribution, see the source codes in subdirectory {\tt test} +of the main directory {\tt id\_dist}. + + + +\section{Directory structure} + +The main {\tt id\_dist} directory contains a Makefile, +the auxiliary text files {\tt README.txt} and {\tt size.txt}, +and the following subdirectories, described in the subsections below: +% +\begin{enumerate} +\item {\tt bin} +\item {\tt development} +\item {\tt doc} +\item {\tt src} +\item {\tt test} +\item {\tt tmp} +\end{enumerate} +% +If a ``{\tt make all}'' command has completed successfully, +then the main {\tt id\_dist} directory will also contain +an archive (library) file {\tt id\_lib.a} containing machine code +for all of the user-callable routines. + + +\subsection{Subdirectory {\tt bin}} + +Once all of the libraries have been made via the Makefile +in the main {\tt id\_dist} directory, +the subdirectory {\tt bin} will contain object files (machine code), +each compiled from the corresponding file of source code +in the subdirectory {\tt src} of {\tt id\_dist}. + + +\subsection{Subdirectory {\tt development}} + +Each Fortran file in the subdirectory {\tt development} +(except for {\tt dfft.f} and {\tt prini.f}) +specifies its dependencies at the top, then provides a main program +for testing and debugging, and finally provides source code +for a library of user-callable subroutines. +The Fortran file {\tt dfft.f} is a copy of P. N. Swarztrauber's FFTPACK library +for computing fast Fourier transforms. +The Fortran file {\tt prini.f} is a copy of V. Rokhlin's library +of formatted printing routines. +Both {\tt dfft.f} (version 4) and {\tt prini.f} are in the public domain. +The shell script {\tt RUNME.sh} runs shell scripts {\tt make\_src.sh} +and {\tt make\_test.sh}, which fill the subdirectories {\tt src} +and {\tt test} of the main directory {\tt id\_dist} +with source codes for user-callable routines +and with the main program testing codes. + + +\subsection{Subdirectory {\tt doc}} + +Subdirectory {\tt doc} contains this documentation, +supplementing comments in the source codes. + + +\subsection{Subdirectory {\tt src}} + +The files in the subdirectory {\tt src} provide source code +for software libraries. Each file in the subdirectory {\tt src} +(except for {\tt dfft.f} and {\tt prini.f}) is +the bottom part of the corresponding file +in the subdirectory {\tt development} of {\tt id\_dist}. +The file {\tt dfft.f} is just a copy +of P. N. Swarztrauber's FFTPACK library +for computing fast Fourier transforms. +The file {\tt prini.f} is a copy of V. Rokhlin's library +of formatted printing routines. +Both {\tt dfft.f} (version 4) and {\tt prini.f} are in the public domain. + + +\subsection{Subdirectory {\tt test}} + +The files in subdirectory {\tt test} provide source code +for testing and debugging. Each file in subdirectory {\tt test} is +the top part of the corresponding file +in subdirectory {\tt development} of {\tt id\_dist}, +and provides a main program and a list of its dependencies. +These codes provide examples of how to call the user-callable routines. + + + +\section{Catalog of the routines} + +The main routines for decomposing {\tt real*8} matrices are: +% +\begin{enumerate} +% +\item IDs of arbitrary (generally dense) matrices: +{\tt iddp\_id}, {\tt iddr\_id}, {\tt iddp\_aid}, {\tt iddr\_aid} +% +\item IDs of matrices that may be rapidly applied to arbitrary vectors +(as may the matrices' transposes): +{\tt iddp\_rid}, {\tt iddr\_rid} +% +\item SVDs of arbitrary (generally dense) matrices: +{\tt iddp\_svd}, {\tt iddr\_svd}, {\tt iddp\_asvd},\\{\tt iddr\_asvd} +% +\item SVDs of matrices that may be rapidly applied to arbitrary vectors +(as may the matrices' transposes): +{\tt iddp\_rsvd}, {\tt iddr\_rsvd} +% +\end{enumerate} + +Similarly, the main routines for decomposing {\tt complex*16} matrices +are: +% +\begin{enumerate} +% +\item IDs of arbitrary (generally dense) matrices: +{\tt idzp\_id}, {\tt idzr\_id}, {\tt idzp\_aid}, {\tt idzr\_aid} +% +\item IDs of matrices that may be rapidly applied to arbitrary vectors +(as may the matrices' adjoints): +{\tt idzp\_rid}, {\tt idzr\_rid} +% +\item SVDs of arbitrary (generally dense) matrices: +{\tt idzp\_svd}, {\tt idzr\_svd}, {\tt idzp\_asvd},\\{\tt idzr\_asvd} +% +\item SVDs of matrices that may be rapidly applied to arbitrary vectors +(as may the matrices' adjoints): +{\tt idzp\_rsvd}, {\tt idzr\_rsvd} +% +\end{enumerate} + +This distribution also includes routines for constructing pivoted $QR$ +decompositions (in {\tt idd\_qrpiv.f} and {\tt idz\_qrpiv.f}), for +estimating the spectral norms of matrices that may be applied rapidly +to arbitrary vectors as may their adjoints (in {\tt idd\_snorm.f} +and {\tt idz\_snorm.f}), for converting IDs to SVDs (in +{\tt idd\_id2svd.f} and {\tt idz\_id2svd.f}), and for computing rapidly +arbitrary subsets of the entries of the discrete Fourier transforms +of vectors (in {\tt idd\_sfft.f} and {\tt idz\_sfft.f}). + + +\subsection{List of the routines} + +The following is an alphabetical list of the routines +in this distribution, together with brief descriptions +of their functionality and the names of the files containing +the routines' source code: + +\begin{center} +% +\tablehead{\bf Routine & \bf Description & \bf Source file \\} +\tabletail{\hline} +% +\begin{supertabular}{>{\raggedright}p{1.2in} p{.53\textwidth} l} +% +\hline +{\tt id\_frand} & generates pseudorandom numbers drawn uniformly from +the interval $[0,1]$; this routine is more efficient than routine +{\tt id\_srand}, but cannot generate fewer than 55 pseudorandom numbers +per call & {\tt id\_rand.f} \\\hline +% +{\tt id\_frandi} & initializes the seed values for routine +{\tt id\_frand} to specified values & {\tt id\_rand.f} \\\hline +% +{\tt id\_frando} & initializes the seed values for routine +{\tt id\_frand} to their original, default values & {\tt id\_rand.f} +\\\hline +% +{\tt id\_randperm} & generates a uniformly random permutation & +{\tt id\_rand.f} \\\hline +% +{\tt id\_srand} & generates pseudorandom numbers drawn uniformly from +the interval $[0,1]$; this routine is less efficient than routine +{\tt id\_frand}, but can generate fewer than 55 pseudorandom numbers +per call & {\tt id\_rand.f} \\\hline +% +{\tt id\_srandi} & initializes the seed values for routine +{\tt id\_srand} to specified values & {\tt id\_rand.f} \\\hline +% +{\tt id\_srando} & initializes the seed values for routine +{\tt id\_srand} to their original, default values & {\tt id\_rand.f} +\\\hline +% +{\tt idd\_copycols} & collects together selected columns of a matrix & +{\tt idd\_id.f} \\\hline +% +{\tt idd\_diffsnorm} & estimates the spectral norm of the difference +between two matrices specified by routines for applying the matrices +and their transposes to arbitrary vectors; this routine uses the power +method with a random starting vector & {\tt idd\_snorm.f} \\\hline +% +{\tt idd\_enorm} & calculates the Euclidean norm of a vector & +{\tt idd\_snorm.f} \\\hline +% +{\tt idd\_estrank} & estimates the numerical rank of an arbitrary +(generally dense) matrix to a specified precision; this routine is +randomized, and must be initialized with routine {\tt idd\_frmi} & +{\tt iddp\_aid.f} \\\hline +% +{\tt idd\_frm} & transforms a vector into a vector which is +sufficiently scrambled to be subsampled, via a composition of Rokhlin's +random transform, random subselection, and a fast Fourier transform & +{\tt idd\_frm.f} \\\hline +% +{\tt idd\_frmi} & initializes routine {\tt idd\_frm} & {\tt idd\_frm.f} +\\\hline +% +{\tt idd\_getcols} & collects together selected columns of a matrix +specified by a routine for applying the matrix to arbitrary vectors & +{\tt idd\_id.f} \\\hline +% +{\tt idd\_house} & calculates the vector and scalar needed to apply the +Householder transformation reflecting a given vector into its first +entry & {\tt idd\_house.f} \\\hline +% +{\tt idd\_houseapp} & applies a Householder matrix to a vector & +{\tt idd\_house.f} \\\hline +% +{\tt idd\_id2svd} & converts an approximation to a matrix in the form +of an ID into an approximation in the form of an SVD & +{\tt idd\_id2svd.f} \\\hline +% +{\tt idd\_ldiv} & finds the greatest integer less than or equal to a +specified integer, that is divisible by another (larger) specified +integer & {\tt idd\_sfft.f} \\\hline +% +{\tt idd\_pairsamps} & calculates the indices of the pairs of integers +that the individual integers in a specified set belong to & +{\tt idd\_frm.f} \\\hline +% +{\tt idd\_permmult} & multiplies together a bunch of permutations & +{\tt idd\_qrpiv.f} \\\hline +% +{\tt idd\_qinqr} & reconstructs the $Q$ matrix in a $QR$ decomposition +from the output of routines {\tt iddp\_qrpiv} or {\tt iddr\_qrpiv} & +{\tt idd\_qrpiv.f} \\\hline +% +{\tt idd\_qrmatmat} & applies to multiple vectors collected together as +a matrix the $Q$ matrix (or its transpose) in the $QR$ decomposition of +a matrix, as described by the output of routines {\tt iddp\_qrpiv} or +{\tt iddr\_qrpiv}; to apply $Q$ (or its transpose) to a single vector +without having to provide a work array, use routine {\tt idd\_qrmatvec} +instead & {\tt idd\_qrpiv.f} \\\hline +% +{\tt idd\_qrmatvec} & applies to a single vector the $Q$ matrix (or its +transpose) in the $QR$ decomposition of a matrix, as described by the +output of routines {\tt iddp\_qrpiv} or {\tt iddr\_qrpiv}; to apply $Q$ +(or its transpose) to several vectors efficiently, use routine +{\tt idd\_qrmatmat} instead & {\tt idd\_qrpiv.f} \\\hline +% +{\tt idd\_random\_} {\tt transf} & applies rapidly a +random orthogonal matrix to a user-supplied vector & {\tt id\_rtrans.f} +\\\hline +% +{\tt idd\_random\_ transf\_init} & \raggedright initializes routines +{\tt idd\_random\_transf} and {\tt idd\_random\_transf\_inverse} & +{\tt id\_rtrans.f} \\\hline +% +{\tt idd\_random\_} {\tt transf\_inverse} & applies +rapidly the inverse of the operator applied by routine +{\tt idd\_random\_transf} & {\tt id\_rtrans.f} \\\hline +% +{\tt idd\_reconid} & reconstructs a matrix from its ID & +{\tt idd\_id.f} \\\hline +% +{\tt idd\_reconint} & constructs $P$ in the ID $A = B \, P$, where the +columns of $B$ are a subset of the columns of $A$, and $P$ is the +projection coefficient matrix, given {\tt list}, {\tt krank}, and +{\tt proj} output by routines {\tt iddr\_id}, {\tt iddp\_id}, +{\tt iddr\_aid}, {\tt iddp\_aid}, {\tt iddr\_rid}, or {\tt iddp\_rid} & +{\tt idd\_id.f} \\\hline +% +{\tt idd\_sfft} & rapidly computes a subset of the entries of the +discrete Fourier transform of a vector, composed with permutation +matrices both on input and on output & {\tt idd\_sfft.f} \\\hline +% +{\tt idd\_sffti} & initializes routine {\tt idd\_sfft} & +{\tt idd\_sfft.f} \\\hline +% +{\tt idd\_sfrm} & transforms a vector into a scrambled vector of +specified length, via a composition of Rokhlin's random transform, +random subselection, and a fast Fourier transform & {\tt idd\_frm.f} +\\\hline +% +{\tt idd\_sfrmi} & initializes routine {\tt idd\_sfrm} & +{\tt idd\_frm.f} \\\hline +% +{\tt idd\_snorm} & estimates the spectral norm of a matrix specified by +routines for applying the matrix and its transpose to arbitrary +vectors; this routine uses the power method with a random starting +vector & {\tt idd\_snorm.f} \\\hline +% +{\tt iddp\_aid} & computes the ID of an arbitrary (generally dense) +matrix, to a specified precision; this routine is randomized, and must +be initialized with routine {\tt idd\_frmi} & {\tt iddp\_aid.f} +\\\hline +% +{\tt iddp\_asvd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified precision; this routine is randomized, and must +be initialized with routine {\tt idd\_frmi} & {\tt iddp\_asvd.f} +\\\hline +% +{\tt iddp\_id} & computes the ID of an arbitrary (generally dense) +matrix, to a specified precision; this routine is often less efficient +than routine {\tt iddp\_aid} & {\tt idd\_id.f} \\\hline +% +{\tt iddp\_qrpiv} & computes the pivoted $QR$ decomposition of an +arbitrary (generally dense) matrix via Householder transformations, +stopping at a specified precision of the decomposition & +{\tt idd\_qrpiv.f} \\\hline +% +{\tt iddp\_rid} & computes the ID, to a specified precision, of a +matrix specified by a routine for applying its transpose to arbitrary +vectors; this routine is randomized & {\tt iddp\_rid.f} \\\hline +% +{\tt iddp\_rsvd} & computes the SVD, to a specified precision, of a +matrix specified by routines for applying the matrix and its transpose +to arbitrary vectors; this routine is randomized & {\tt iddp\_rsvd.f} +\\\hline +% +{\tt iddp\_svd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified precision; this routine is often less efficient +than routine {\tt iddp\_asvd} & {\tt idd\_svd.f} \\\hline +% +{\tt iddr\_aid} & computes the ID of an arbitrary (generally dense) +matrix, to a specified rank; this routine is randomized, and must be +initialized by routine {\tt iddr\_aidi} & {\tt iddr\_aid.f} \\\hline +% +{\tt iddr\_aidi} & initializes routine {\tt iddr\_aid} & +{\tt iddr\_aid.f} \\\hline +% +{\tt iddr\_asvd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified rank; this routine is randomized, and must be +initialized with routine {\tt idd\_aidi} & {\tt iddr\_asvd.f} +\\\hline +% +{\tt iddr\_id} & computes the ID of an arbitrary (generally dense) +matrix, to a specified rank; this routine is often less efficient than +routine {\tt iddr\_aid} & {\tt idd\_id.f} \\\hline +% +{\tt iddr\_qrpiv} & computes the pivoted $QR$ decomposition of an +arbitrary (generally dense) matrix via Householder transformations, +stopping at a specified rank of the decomposition & {\tt idd\_qrpiv.f} +\\\hline +% +{\tt iddr\_rid} & computes the ID, to a specified rank, of a matrix +specified by a routine for applying its transpose to arbitrary vectors; +this routine is randomized & {\tt iddr\_rid.f} \\\hline +% +{\tt iddr\_rsvd} & computes the SVD, to a specified rank, of a matrix +specified by routines for applying the matrix and its transpose to +arbitrary vectors; this routine is randomized & {\tt iddr\_rsvd.f} +\\\hline +% +{\tt iddr\_svd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified rank; this routine is often less efficient than +routine {\tt iddr\_asvd} & {\tt idd\_svd.f} \\\hline +% +{\tt idz\_copycols} & collects together selected columns of a matrix & +{\tt idz\_id.f} \\\hline +% +{\tt idz\_diffsnorm} & estimates the spectral norm of the difference +between two matrices specified by routines for applying the matrices +and their adjoints to arbitrary vectors; this routine uses the power +method with a random starting vector & {\tt idz\_snorm.f} \\\hline +% +{\tt idz\_enorm} & calculates the Euclidean norm of a vector & +{\tt idz\_snorm.f} \\\hline +% +{\tt idz\_estrank} & estimates the numerical rank of an arbitrary +(generally dense) matrix to a specified precision; this routine is +randomized, and must be initialized with routine {\tt idz\_frmi} & +{\tt idzp\_aid.f} \\\hline +% +{\tt idz\_frm} & transforms a vector into a vector which is +sufficiently scrambled to be subsampled, via a composition of Rokhlin's +random transform, random subselection, and a fast Fourier transform & +{\tt idz\_frm.f} \\\hline +% +{\tt idz\_frmi} & initializes routine {\tt idz\_frm} & {\tt idz\_frm.f} +\\\hline +% +{\tt idz\_getcols} & collects together selected columns of a matrix +specified by a routine for applying the matrix to arbitrary vectors & +{\tt idz\_id.f} \\\hline +% +{\tt idz\_house} & calculates the vector and scalar needed to apply the +Householder transformation reflecting a given vector into its first +entry & {\tt idz\_house.f} \\\hline +% +{\tt idz\_houseapp} & applies a Householder matrix to a vector & +{\tt idz\_house.f} \\\hline +% +{\tt idz\_id2svd} & converts an approximation to a matrix in the form +of an ID into an approximation in the form of an SVD & +{\tt idz\_id2svd.f} \\\hline +% +{\tt idz\_ldiv} & finds the greatest integer less than or equal to a +specified integer, that is divisible by another (larger) specified +integer & {\tt idz\_sfft.f} \\\hline +% +{\tt idz\_permmult} & multiplies together a bunch of permutations & +{\tt idz\_qrpiv.f} \\\hline +% +{\tt idz\_qinqr} & reconstructs the $Q$ matrix in a $QR$ decomposition +from the output of routines {\tt idzp\_qrpiv} or {\tt idzr\_qrpiv} & +{\tt idz\_qrpiv.f} \\\hline +% +{\tt idz\_qrmatmat} & applies to multiple vectors collected together as +a matrix the $Q$ matrix (or its adjoint) in the $QR$ decomposition of +a matrix, as described by the output of routines {\tt idzp\_qrpiv} or +{\tt idzr\_qrpiv}; to apply $Q$ (or its adjoint) to a single vector +without having to provide a work array, use routine {\tt idz\_qrmatvec} +instead & {\tt idz\_qrpiv.f} \\\hline +% +{\tt idz\_qrmatvec} & applies to a single vector the $Q$ matrix (or its +adjoint) in the $QR$ decomposition of a matrix, as described by the +output of routines {\tt idzp\_qrpiv} or {\tt idzr\_qrpiv}; to apply $Q$ +(or its adjoint) to several vectors efficiently, use routine +{\tt idz\_qrmatmat} instead & {\tt idz\_qrpiv.f} \\\hline +% +{\tt idz\_random\_ transf} & applies rapidly a random unitary matrix to +a user-supplied vector & {\tt id\_rtrans.f} \\\hline +% +{\tt idz\_random\_ transf\_init} & \raggedright initializes routines +{\tt idz\_random\_transf} and {\tt idz\_random\_transf\_inverse} & +{\tt id\_rtrans.f} \\\hline +% +{\tt idz\_random\_ transf\_inverse} & applies rapidly the inverse of +the operator applied by routine {\tt idz\_random\_transf} & +{\tt id\_rtrans.f} \\\hline +% +{\tt idz\_reconid} & reconstructs a matrix from its ID & +{\tt idz\_id.f} \\\hline +% +{\tt idz\_reconint} & constructs $P$ in the ID $A = B \, P$, where the +columns of $B$ are a subset of the columns of $A$, and $P$ is the +projection coefficient matrix, given {\tt list}, {\tt krank}, and +{\tt proj} output by routines {\tt idzr\_id}, {\tt idzp\_id}, +{\tt idzr\_aid}, {\tt idzp\_aid}, {\tt idzr\_rid}, or {\tt idzp\_rid} & +{\tt idz\_id.f} \\\hline +% +{\tt idz\_sfft} & rapidly computes a subset of the entries of the +discrete Fourier transform of a vector, composed with permutation +matrices both on input and on output & {\tt idz\_sfft.f} \\\hline +% +{\tt idz\_sffti} & initializes routine {\tt idz\_sfft} & +{\tt idz\_sfft.f} \\\hline +% +{\tt idz\_sfrm} & transforms a vector into a scrambled vector of +specified length, via a composition of Rokhlin's random transform, +random subselection, and a fast Fourier transform & {\tt idz\_frm.f} +\\\hline +% +{\tt idz\_sfrmi} & initializes routine {\tt idz\_sfrm} & +{\tt idz\_frm.f} \\\hline +% +{\tt idz\_snorm} & estimates the spectral norm of a matrix specified by +routines for applying the matrix and its adjoint to arbitrary +vectors; this routine uses the power method with a random starting +vector & {\tt idz\_snorm.f} \\\hline +% +{\tt idzp\_aid} & computes the ID of an arbitrary (generally dense) +matrix, to a specified precision; this routine is randomized, and must +be initialized with routine {\tt idz\_frmi} & {\tt idzp\_aid.f} +\\\hline +% +{\tt idzp\_asvd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified precision; this routine is randomized, and must +be initialized with routine {\tt idz\_frmi} & {\tt idzp\_asvd.f} +\\\hline +% +{\tt idzp\_id} & computes the ID of an arbitrary (generally dense) +matrix, to a specified precision; this routine is often less efficient +than routine {\tt idzp\_aid} & {\tt idz\_id.f} \\\hline +% +{\tt idzp\_qrpiv} & computes the pivoted $QR$ decomposition of an +arbitrary (generally dense) matrix via Householder transformations, +stopping at a specified precision of the decomposition & +{\tt idz\_qrpiv.f} \\\hline +% +{\tt idzp\_rid} & computes the ID, to a specified precision, of a +matrix specified by a routine for applying its adjoint to arbitrary +vectors; this routine is randomized & {\tt idzp\_rid.f} \\\hline +% +{\tt idzp\_rsvd} & computes the SVD, to a specified precision, of a +matrix specified by routines for applying the matrix and its adjoint +to arbitrary vectors; this routine is randomized & {\tt idzp\_rsvd.f} +\\\hline +% +{\tt idzp\_svd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified precision; this routine is often less efficient +than routine {\tt idzp\_asvd} & {\tt idz\_svd.f} \\\hline +% +{\tt idzr\_aid} & computes the ID of an arbitrary (generally dense) +matrix, to a specified rank; this routine is randomized, and must be +initialized by routine {\tt idzr\_aidi} & {\tt idzr\_aid.f} \\\hline +% +{\tt idzr\_aidi} & initializes routine {\tt idzr\_aid} & +{\tt idzr\_aid.f} \\\hline +% +{\tt idzr\_asvd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified rank; this routine is randomized, and must be +initialized with routine {\tt idz\_aidi} & {\tt idzr\_asvd.f} +\\\hline +% +{\tt idzr\_id} & computes the ID of an arbitrary (generally dense) +matrix, to a specified rank; this routine is often less efficient than +routine {\tt idzr\_aid} & {\tt idz\_id.f} \\\hline +% +{\tt idzr\_qrpiv} & computes the pivoted $QR$ decomposition of an +arbitrary (generally dense) matrix via Householder transformations, +stopping at a specified rank of the decomposition & {\tt idz\_qrpiv.f} +\\\hline +% +{\tt idzr\_rid} & computes the ID, to a specified rank, of a matrix +specified by a routine for applying its adjoint to arbitrary vectors; +this routine is randomized & {\tt idzr\_rid.f} \\\hline +% +{\tt idzr\_rsvd} & computes the SVD, to a specified rank, of a matrix +specified by routines for applying the matrix and its adjoint to +arbitrary vectors; this routine is randomized & {\tt idzr\_rsvd.f} +\\\hline +% +{\tt idzr\_svd} & computes the SVD of an arbitrary (generally dense) +matrix, to a specified rank; this routine is often less efficient than +routine {\tt idzr\_asvd} & {\tt idz\_svd.f} \\ +% +\end{supertabular} +\end{center} + + + +\section{Documentation in the source codes} + +Each routine in the source codes includes documentation +in the comments immediately following the declaration +of the subroutine's calling sequence. +This documentation describes the purpose of the routine, +the input and output variables, and the required work arrays (if any). +This documentation also cites relevant references. +Please pay attention to the {\it N.B.}'s; +{\it N.B.} stands for {\it nota bene} (Latin for ``note well'') +and highlights important information about the routines. + + + +\section{Notation and decompositions} +\label{defs} + +This section sets notational conventions employed +in this documentation and the associated software, +and defines both the singular value decomposition (SVD) +and the interpolative decomposition (ID). +For information concerning other mathematical objects +used in the code (such as Householder transformations, +pivoted $QR$ decompositions, and discrete and fast Fourier transforms +--- DFTs and FFTs), see, for example,~\cite{golub-van_loan}. +For detailed descriptions and proofs of the mathematical facts +discussed in the present section, see, for example, +\cite{golub-van_loan} and the references +in~\cite{halko-martinsson-tropp}. + +Throughout this document and the accompanying software distribution, +$\| \x \|$ always denotes the Euclidean norm of the vector $\x$, +and $\| A \|$ always denotes the spectral norm of the matrix $A$. +Subsection~\ref{Euclidean} below defines the Euclidean norm; +Subsection~\ref{spectral} below defines the spectral norm. +We use $A^*$ to denote the adjoint of the matrix $A$. + + +\subsection{Euclidean norm} +\label{Euclidean} + +For any positive integer $n$, and vector $\x$ of length $n$, +the Euclidean ($l^2$) norm $\| \x \|$ is +% +\begin{equation} +\| \x \| = \sqrt{ \sum_{k=1}^n |x_k|^2 }, +\end{equation} +% +where $x_1$,~$x_2$, \dots, $x_{n-1}$,~$x_n$ are the entries of $\x$. + + +\subsection{Spectral norm} +\label{spectral} + +For any positive integers $m$ and $n$, and $m \times n$ matrix $A$, +the spectral ($l^2$ operator) norm $\| A \|$ is +% +\begin{equation} +\| A_{m \times n} \| += \max \frac{\| A_{m \times n} \, \x_{n \times 1} \|} + {\| \x_{n \times 1} \|}, +\end{equation} +% +where the $\max$ is taken over all $n \times 1$ column vectors $\x$ +such that $\| \x \| \ne 0$. + + +\subsection{Singular value decomposition (SVD)} + +For any positive real number $\epsilon$, +positive integers $k$, $m$, and $n$ with $k \le m$ and $k \le n$, +and any $m \times n$ matrix $A$, +a rank-$k$ approximation to $A$ in the form of an SVD +(to precision $\epsilon$) consists of an $m \times k$ matrix $U$ +whose columns are orthonormal, an $n \times k$ matrix $V$ +whose columns are orthonormal, and a diagonal $k \times k$ matrix +$\Sigma$ with diagonal entries +$\Sigma_{1,1} \ge \Sigma_{2,2} \ge \dots \ge \Sigma_{n-1,n-1} + \ge \Sigma_{n,n} \ge 0$, +such that +% +\begin{equation} +\| A_{m \times n} - U_{m \times k} \, \Sigma_{k \times k} + \, (V^*)_{k \times n} \| \le \epsilon. +\end{equation} +% +The product $U \, \Sigma \, V^*$ is known as an SVD. +The columns of $U$ are known as left singular vectors; +the columns of $V$ are known as right singular vectors. +The diagonal entries of $\Sigma$ are known as singular values. + +When $k = m$ or $k = n$, and $A = U \, \Sigma \, V^*$, +then $U \, \Sigma \, V^*$ is known as the SVD +of $A$; the columns of $U$ are the left singular vectors of $A$, +the columns of $V$ are the right singular vectors of $A$, +and the diagonal entries of $\Sigma$ are the singular values of $A$. +For any positive integer $k$ with $k < m$ and $k < n$, +there exists a rank-$k$ approximation to $A$ in the form of an SVD, +to precision $\sigma_{k+1}$, where $\sigma_{k+1}$ is the $(k+1)^\st$ +greatest singular value of $A$. + + +\subsection{Interpolative decomposition (ID)} + +For any positive real number $\epsilon$, +positive integers $k$, $m$, and $n$ with $k \le m$ and $k \le n$, +and any $m \times n$ matrix $A$, +a rank-$k$ approximation to $A$ in the form of an ID +(to precision $\epsilon$) consists of a $k \times n$ matrix $P$, +and an $m \times k$ matrix $B$ whose columns constitute a subset +of the columns of $A$, such that +% +\begin{enumerate} +\item $\| A_{m \times n} - B_{m \times k} \, P_{k \times n} \| + \le \epsilon$, +\item some subset of the columns of $P$ makes up the $k \times k$ + identity matrix, and +\item every entry of $P$ has an absolute value less than or equal + to a reasonably small positive real number, say 2. +\end{enumerate} +% +The product $B \, P$ is known as an ID. +The matrix $P$ is known as the projection or interpolation matrix +of the ID. Property~1 above approximates each column of $A$ +via a linear combination of the columns of $B$ +(which are themselves columns of $A$), with the coefficients +in the linear combination given by the entries of $P$. + +The interpolative decomposition is ``interpolative'' +due to Property~2 above. The ID is numerically stable +due to Property~3 above. +It follows from Property~2 that the least ($k^\th$ greatest) singular value +of $P$ is at least 1. Combining Properties~2 and~3 yields that +% +\begin{equation} +\| P_{k \times n} \| \le \sqrt{4k(n-k)+1}. +\end{equation} + +When $k = m$ or $k = n$, and $A = B \, P$, +then $B \, P$ is known as the ID of $A$. +For any positive integer $k$ with $k < m$ and $k < n$, +there exists a rank-$k$ approximation to $A$ in the form of an ID, +to precision $\sqrt{k(n-k)+1} \; \sigma_{k+1}$, +where $\sigma_{k+1}$ is the $(k+1)^\st$ greatest singular value of $A$ +(in fact, there exists an ID in which every entry +of the projection matrix $P$ has an absolute value less than or equal +to 1). + + + +\section{Bug reports, feedback, and support} + +Please let us know about errors in the software or in the documentation +via e-mail to {\tt tygert@aya.yale.edu}. +We would also appreciate hearing about particular applications of the codes, +especially in the form of journal articles +e-mailed to {\tt tygert@aya.yale.edu}. +Mathematical and technical support may also be available via e-mail. Enjoy! + + + +\bibliographystyle{siam} +\bibliography{doc} + + +\end{document} diff --git a/voice_bridge/scipy/linalg/src/lapack_deprecations/LICENSE b/voice_bridge/scipy/linalg/src/lapack_deprecations/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..8d713b6ae7c4a5d6457f2d045c8e5ae7988025f9 --- /dev/null +++ b/voice_bridge/scipy/linalg/src/lapack_deprecations/LICENSE @@ -0,0 +1,48 @@ +Copyright (c) 1992-2015 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. +Copyright (c) 2000-2015 The University of California Berkeley. All + rights reserved. +Copyright (c) 2006-2015 The University of Colorado Denver. All rights + reserved. + +$COPYRIGHT$ + +Additional copyrights may follow + +$HEADER$ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + +- Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +The copyright holders provide no reassurances that the source code +provided does not infringe any patent, copyright, or any other +intellectual property rights of third parties. The copyright holders +disclaim any liability to any recipient for claims brought against +recipient by any third party for infringement of that parties +intellectual property rights. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/voice_bridge/scipy/linalg/tests/data/carex_15_data.npz b/voice_bridge/scipy/linalg/tests/data/carex_15_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..660bbb41b7fad43ed945dc701693451ceb60166c --- /dev/null +++ b/voice_bridge/scipy/linalg/tests/data/carex_15_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13f3e1491a876bbf59d7ea10ad29c1f9b5996a2ab99216f31d5bfcd659012c1e +size 34462 diff --git a/voice_bridge/scipy/linalg/tests/data/carex_18_data.npz b/voice_bridge/scipy/linalg/tests/data/carex_18_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..0b3d569a1a65e9b5ff153ae4121a6a5a69409f7c --- /dev/null +++ b/voice_bridge/scipy/linalg/tests/data/carex_18_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59f839467f2752b7df6fb6d4094396edd32a5929b764f7ffa1e6666431e6cac6 +size 161487 diff --git a/voice_bridge/scipy/linalg/tests/data/carex_19_data.npz b/voice_bridge/scipy/linalg/tests/data/carex_19_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..90168ad4e888fba29a772ee13798ec126016140e --- /dev/null +++ b/voice_bridge/scipy/linalg/tests/data/carex_19_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38e8fc7b041df0b23d7e5ca15ead1a065e6467611ef9a848cc7db93f80adfd87 +size 34050 diff --git a/voice_bridge/scipy/linalg/tests/data/carex_20_data.npz b/voice_bridge/scipy/linalg/tests/data/carex_20_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..87266deb46238307347362b63a4878f2565baf56 --- /dev/null +++ b/voice_bridge/scipy/linalg/tests/data/carex_20_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14e222d34a7118c7284a1675c6feceee77b84df951a5c6ba2a5ee9ff3054fa1d +size 31231 diff --git a/voice_bridge/scipy/linalg/tests/data/carex_6_data.npz b/voice_bridge/scipy/linalg/tests/data/carex_6_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..35d1681786c95602c4f0d5260fc5ad0ff4236189 --- /dev/null +++ b/voice_bridge/scipy/linalg/tests/data/carex_6_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b2a0736b541ebf5c4b9b4c00d6dab281e73c9fb9913c6e2581a781b37b602f9 +size 15878 diff --git a/voice_bridge/scipy/linalg/tests/data/gendare_20170120_data.npz b/voice_bridge/scipy/linalg/tests/data/gendare_20170120_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..ff967f2ca0d0868aacf7d7e67402599e64bab817 --- /dev/null +++ b/voice_bridge/scipy/linalg/tests/data/gendare_20170120_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3dfab451d9d5c20243e0ed85cd8b6c9657669fb9a0f83b5be165585783d55b5 +size 2164 diff --git a/voice_bridge/scipy/misc/ascent.dat b/voice_bridge/scipy/misc/ascent.dat new file mode 100644 index 0000000000000000000000000000000000000000..f3602460a5170a78ef09b038171853ce8e76fca7 --- /dev/null +++ b/voice_bridge/scipy/misc/ascent.dat @@ -0,0 +1,749 @@ +€]q(]q(KSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKRKRKSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKRKUKVKUKUKUKVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKTKUKVKUKUKUKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYK[KZK[KZKZKZKZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K_K`K]K\K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKaKaKcKeKdKdKdKdKeKbK^KOKQKRKTKRKVKTKVKNKRKMKOKIKPKYKXKRKPKUK`KjK[KSKRKUK9K!K$K%K&K&K'K*K0K K +K KKKKKCKBKAKEK*KKKK!K)K-K(K)K-K+K"KKKK8KBKK9K2K/K/K+K"KKK!K/K0K$K+K3K5K4K?KGKAK;K9K-K+K+K+K$K8KGKFKFKFKFKFKFKFKFKFKFKGK6KK$KBKIKJKJKHKHKAK9K=K=K=KKKHKFKFKFKFKFKFKFKGKFKGKHK2KK*KEKFKHKIKHKGK?KKdKsKrKtKsKsKsKsKsKsKsKsKsKsKuKuKsKtKuKtKsKtKtKtKtKvKtKsKsKsKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KPKQKSKSKSKSKSKRKRKRKRKRKRKRKRKRKRKSKSKSKSKSKSKSKRKRKRKRKRKRKRKRKRKUKVKUKUKUKUKUKUKUKUKUKVKTKUKVKUKUKUKUKUKUKWKXKUKUKUKUKUKUKUKWKWKUKVKXKWKWKUKVKWKWKWKWKWKXKXKWKWKWKWKWKWKWKWKWKWKWKWKZK[K[KYKWKWKWKZKZKZKZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKYKZKZKZKZKZKZKZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKeKdKdKdKdKeKcKaKcK^KNKTKTKVKQKVKTKSKQKUKOKTKIKCKVKZKYKMKCKJKNKVKUKSKPK*K$K&K%K!KKKKK K +K +KKKK?KAK@KK=K;K;K?K?K=KK.K-K+K)K KKKK'K'K&K%K)K$K K"K%K%K1K>K(K)K)K+K"KKKK0KDKDKFKGKFKFKFKFKFKGKFKFKFK)KK4KFKGKIKHKFKEK@K;KK=K=K=KK=KK:K:K9KK?K=KK?K=K=K=K;K4K*K,K0K4K8K7K5K4K3K1K0K/K0K4K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K1K6KYKrKtKsKsKsKsKsKsKsKsKsKsKsKtKvKvKvKuKuKsKtKvKtKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KSKRKSKSKSKSKSKSKSKSKSKSKSKSKRKRKRKRKRKRKSKUKTKRKSKRKSKSKUKUKTKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKUKVKTKVKUKUKUKUKUKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKUKUKXKWKWKXKWKXKYK[KYKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K_K]K\K\K\K\K]K]K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKbKaKdKdKeKcKbKdKeKdKdKeKcKbKbKXKOKQKWKWKTKVKUKWKSKWKRKVKLKMKLKPKDKNKSK]KhKPKVKVKBK!K&K%K&KK K K +K KKK K K KKKKKKKK K KKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKK#K)K'K)K&KKKKKKKKKKKKKKK1K/K(K+K(K%KKKKKKKKKKKKKK#K/K)K'K)K)K&KKKKKKK1KGKGKGKFKFKFKFKFKFKGKFKHKBK!KK:KHKHKIKIKGKCK?K;K=K=K=K=K>K>K=KK;KK?K=KKK?K=K=KK?K>K>K>K>KKHKHKHKIKGKBKK>KK>K>K?K>K=K;K=K>K4K'K.K2K5K5K8K6K5K2K/K*K*K,K$K0K2K2K4K3K3K3K3K3K3K3K3K3K3K3K3K3K2K0KHKlKtKsKsKtKuKtKtKtKtKuKuKuKuKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKuKuKuKuKuKue]q (KSKSKSKSKSKSKSKSKSKSKSKSKSKSKRKTKVKTKRKRKSKVKVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKWKXKXKXKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZKZK\K]K\K\K]K\KZK\K]KZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K\K\K]K`K`K`K]K\K\K]K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKdKeKeKeKdKdKdKdKdKdKdKeKdKaK`KUKSKUKYKWKWKYKWKVKUKVKTKZKRKVKLKGKWKDKMKIKBKTKSKNK(K"K%K&K!K K +K +K K +K +KKK K +K K K K K KKKKK K K K K K K K K K K K K K K K K K K K K K +K K +KK K K K K KKKKKKKKK K K K K K K K K K KKKKKKKKKK K K K K K K K K KKKKKKKKKKKKK K KKKKKKKK%KDKGKFKFKFKFKFKGKFKGKIKHKGK3KK#KEKGKHKHKHKIKBK:KKK?K?K>KK=K>K?K>KK=KK?K=KKK?K=KK?K=K=K>K>K>K>K>K>K>K?K>K=K5K+K2K6K3K4K6K5K1K2K/K*KKKK2K6K2K4K4K5K5K4K3K3K4K4K4K3K3K4K4K3K0KK?K=K=K>K?K?K?K>K?K?K?K>KK>K>K>K>K>K>K>K>K?K>KK3K*K0K/K4K7K8K5K3K2K0K+KKKK,K4K4K3K4K5K3K3K3K3K3K3K3K3K3K4K4K4K3K9KYKrKsKrKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKue]q(KSKSKSKSKSKSKSKRKSKSKTKVKUKUKSKTKVKUKVKVKVKUKUKUKUKUKUKUKTKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKVKVKUKVKXKWKVKVKVKWKWKWKWKWKVKVKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKWKZKZKXKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZK\K\K[K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K^K]K]K]K]K]K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKbKbKcKeKdKdKdKdKdKdKdKdKdKdKdKeKdKaKXKTKRKXKYKWKXKWKYKTKWKWKYKXKXK[KPKWKXK^KdKJKTKTKQK,K$K&K&K"KK&K'K(K'K'K)K%K$K'K'K'K'K(K&K&K&K&K&K%K$K&K&K$K$K$K#K"K"K"K"K!K K K K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKKK$KDKFKGKGKFKFKGKFKHKHKFKFKGK>K?KAKEKGKJKKKIKFKFK=K;K=KK?K>K>K>K?K?K?K>K?K>K=K=KKYK\KXKWKWKXKTKXKXKXKXKWK[KRKXKYKcKdKVKUKUKNK)K%K&K'K#K K&K%K&K&K$K&K'K#K%K&K%K%K%K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K&K'K'K'K'K'K&K$K$K#K$K$K$K$K$K$K$K"K!K!K!K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK-KEKFKGKGKFKFKGKHKHKFKGKIKHKFKEKFKFKFKIKIKHKKKFK>K=K=KK?K?K?K?K=KK?K>K>K?K>K=KK>K>K>K>K=KK?K>K>K?K=K;KK>K5K,K0K4K7K7K5K4K2K0K2K-KKKK-K3K4K4K5K5K5K3K4K4K4K4K5K4K4K3K3K3K3K4KPKoKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKyKwKuKuKuKwKwKxKyKxKwKwKvKvKwKwKwKvKuKwKwKuKuKuKuKuKuKue]q(KRKRKRKRKRKSKSKRKSKVKUKUKUKUKUKWKUKTKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKWKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKZKWKWKWKZK[KZK[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K\K\K\KZKZK]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K`K_K]K\K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKaKaKaKaKaKbKeKbKaKdKdKdKdKdKdKdKdKdKeKdK]KRKMKGK[KYKTKYKWKXKTKWKSKXKXKVK\KVKXK^KlKaKUKUKWKBK#K&K&K'K"K#K%K$K&K'K'K&K%K'K%K%K%K%K%K&K&K&K&K%K%K%K%K%K%K&K%K&K&K&K&K&K&K&K&K&K&K&K%K&K&K&K&K%K'K&K%K#K#K#K#K#K#K$K&K$K#K#K#K#K#K$K#K#K%K&K#K$K&K%K#K$K$K!K%K$K$K$K$K$K$K$K$K$K$K&K#K$K$K!K"K"K"K#K#K#K#K#K#K#K#K!K K!K!K!KKKKKKKKKKK;KDKDKFKHKFKFKGKIKIKIKGKFKFKGKGKCKFKIKIKHKHKGKDK?KK>KK?K?K?K>K>K?K=KK=K>K>K>K?K?K>K>K>K=K=K5K-K2K2K4K6K8K6K4K2K2K-KKKK-K4K5K4K4K5K5K5K4K4K3K3K3K3K3K3K3K3K3K3KJKmKsKsKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKvKvKuKuKuKwKxKyKxKvKvKvKwKwKuKwKyKxKwKxKwKuKuKuKwKxKxKwe]q(KVKUKRKUKVKSKSKVKVKUKUKUKUKUKVKVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKVKUKWKWKUKUKUKUKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKWKYK[KZKZK[KZKWKYK[KXKWKWKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]K\K\KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K_K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKcKeKdKeKeKeKeKdKdKdKeKdKdKdKdKdKdKdKdKdKeKdK[KOKPKVK[K\KWK[KZK[KXKYKWK\K[KYKWKUKYKaKnK\KUKUKUK.K#K'K&K$K'K=KBKHKKKKKK)KK8K/K1K1K7K1K,K-K-K.K(K%K'K$K*K0K.K.K.K.K.K.K-K*K+K(K'K'K%K&K%K%K&K&K%K#K$K%K$K$K$K$K%K$K%K$K#K#K#K$K$K$K$K#K%K&K$K$K$K$K$K$K$K$K$K$K$K#K$K$K$K$K$K$K$K$K$K#K$K$K$K$K$K$K$K#K#K#K#K$K#K!K!K*KDKGKDKGKIKHKIKHKIKGKFKHKHKFKHKBKAKEKHKIKLKJKHKEK>KK>K>K>K>K?K?K?K>KK6K-K/K4K6K8K6K5K3K2K2K.KKKK-K4K4K4K5K5K5K5K5K3K3K3K3K3K3K3K3K3K2K2KEKhKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKyKxKwKuKuKuKxKxKuKwKxKxKxKyKxKuKuKuKwKyKyKxe]q(KUKUKUKUKVKUKUKVKVKVKVKVKVKVKUKUKUKUKVKVKVKVKTKUKWKUKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKVKUKWKWKWKWKWKWKWKWKXKWKWKXKWKVKWKWKWKWKWKWKWKWKWKXKXKWKYK[KXKWKZKZKWKYK[KZKZKWKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K[K[K\K\KZKZK[KZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K_K]K\K\K]K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K_K_KaKaK_K_K_K_K_KaKaK`K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKXKPKSKVK\K[KWK[KYK[KWKYKZK]KYKZKZKYKZKfKkKXKTKVKQK&K$K&K&K$K+K>KBK?KKKKKK,K;K7K5K9K5K+K+K$K7KIKDKDKCKIK;K7K8K;KBKKGKEKJKJKLKKK:K.K0K2K5K,K&K#K#K+K,K-K-K0K6K7K6K5K5K/K(K(K&K%K'K&K&K%K$K%K%K&K'K#K$K#K"K"K"K$K$K!K!K#K$K$K$K$K#K#K$K#K!K#K#K#K$K#K#K$K$K$K#K K"K$K#K$K K2KGKGKFKHKIKIKIKIKHKHKGKFKFKFKGKCKCKDKHKMKKKKKJKCKK?K?K>K>K>K?K>K=K=K=K>KK>K>K>K>K>K?K>K=KK5K/K4K5K6K7K7K4K3K2K1K-KKKK+K7K6K4K3K4K5K4K4K3K3K3K4K4K3K3K3K2K4K2K>KdKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKuKuKxKxKxKwKvKxKyKxKwKuKvKvKvKvKxKxKyKwKvKvKuKvKvKuKuKuKwKyKvKve]q(KVKVKUKVKVKVKVKVKVKVKVKVKVKVKTKUKVKUKVKVKVKUKUKUKUKVKVKVKUKVKXKVKUKUKUKUKVKXKWKWKWKWKXKXKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]KZKZKZKZKZKZKZK\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K\K\K`K_K\K^K`K_K_K_K_K_K_K_K_K_K_K_K`K^K\K_K`K_K_K_K_K_K_K_KaKaK_K_K_K_K_KaKaK_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKbKUKTKYK\K^K\KWK[KXKXKVKYKYK^KXKZKZKWK]KoKhKUKVKXKIK"K&K&K&K#K3KK>K@K9KEKHKJKKKPKQKLK:K3K6K8K7K)K&K!K)K4K4K4K5KHKSKVKQK\KUKAK4K0K0K+K.K'K#K$K,K-K/K+K1KMKWKXKYKWKRK8K-K+K+K)K$K#KK'K+K*K)K&K=KDKCKCK@K=K7K%K!K!K"K KK"K%K%K%K"K%KK>K?K?K?K?K?K>KK?K>KKHKGKJKLKKKIKAK>K?K>K=K=K?K>K=K>K>K?K>K>K=K>K?K=KK6K-K2K5K6K7K9K8K3K1K/K,KKKK)K2K1K4K5K4K4K5K5K5K3K3K2K3K4K3K3K4K5K5K:K]KtKuKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKuKvKwKwKwKvKvKwKwKyKwKwKwKwKxKxKxKwKwKxKxKuKvKwKwKwKwKwe]q(KRKSKVKUKUKUKUKUKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKWKWKUKUKUKUKUKUKUKUKUKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKZKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K`K_K\K\K\K\K\K_K`K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_KaKaK_K`KbKaKbKaKaKbKbKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKcK`KTKRKXK\K_K[KWK[KZK[KVKYK\KaK[K]K]KXK`KpKaKVKVKXKK>KFK!KKKKK%K:K9K:K3K8K,K,K$K1KEKCKBK@KGKAK6K8K9K=K?K;K6K4K6K/K#K'K#K1K?K?KBKAKDKDKPKRKTKQKCK-K.K3K:K2K(K%K"K.K4K5K3KK=KK?K?K>KK?K>K?K?K?K?K=KK>KK5K/K2K3K6K8K7K5K5K2K0K.KKKK*K2K4K5K5K5K4K4K5K3K2K2K3K5K4K2K4K5K5K3K7KYKqKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKyKyKvKuKuKvKxKxKxKxKyKyKxKuKvKyKvKuKuKue]q(KTKUKUKUKUKUKUKVKVKVKUKUKTKVKVKUKVKTKSKUKUKUKUKUKUKUKWKWKUKVKWKVKUKWKWKUKUKUKVKXKVKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKYKYKYKZKZKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K[KZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K_K_K_K^K^K^K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKbKaKaKaKaKbKbKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKcKbKdKdKcK^KRKSKXK\K^K]KXK[K[KZKWK[K\K`K[K]K\KYKcKqK_KVKVKXK5K"K&K'K$K2K>K?K?KKKKKK(K9K9K;K;K4K-K-K#K4KGKCKCK@KFK=K8K9K;K?K>K8K6K4K6K.K&K(K%K6K=KAK?K@KHKGKQKTKUKIK:K-K2K3K7K*K(K"K%K2K3K4K4KCKTKTKYK\KWKJK7K,K-K1K/K(K#K K,K.K/K-K1KKKSKUKWKRKPK:K+K/K0K-K'K"KK$K/K-K+K(KCKKKLKPKOKKKEK+K$K#K$K KK K(K+K+K)K4KOKQKSKTKJKFKHKIKHKHKHKHKHKHKHKIKJKEK)KK)KDKHKLKLKKKGKEKAK>K>K=K>K?K=K>K>K?K?K>K=K=K?K>K>K>K>K>K?K>K3K.K2K6K5K6K8K7K4K4K1K.KK KK'K4K4K4K5K4K4K5K3K2K2K3K3K4K4K5K5K3K4K3K5KWKsKwKuKuKuKuKuKuKuKuKvKxKwKuKuKuKuKvKxKwKxKvKuKwKxKwKwKwKxKwKuKvKyKwKvKvKuKwKxKxKxKxKxKxKxKxKwKxKxKxKwKwKxe]q(KVKUKUKUKUKUKUKVKVKVKUKUKSKVKVKUKUKVKVKUKUKUKUKUKUKUKWKWKUKVKXKVKUKWKWKUKUKUKVKXKVKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KZK[K[KZKZKZKWKWKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K]K\KZK\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K_K`K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKaKaKaKaKaKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKdKdKcK]KQKSKXK\K^K^KYK[K[KZKXK\K\K`K[K^K\KZKfKpKZKVKXKUK.K"K&K'K"K5KK=KK?K?K?K>K?K>KK?K>K?K@K=K1K.K4K4K6K8K7K4K3K0K1K.KKKK)K2K4K5K4K4K5K3K2K3K3K2K4K5K5K5K3K3K5K4K5KSKsKvKvKuKuKuKuKuKuKvKyKwKuKuKuKuKvKyKxKyKvKuKxKyKyKyKyKyKyKvKvKyKwKuKuKuKxKyKxKxKxKxKxKxKxKyKxKxKxKyKyKye]q(KUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKUKUKUKUKUKUKUKWKWKUKUKUKUKUKWKWKWKWKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKZKWKYK[KZKZKXKWKWKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K`KbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKgKeKdKcK\KRKVKZK\K^K[KXK[KZKZKVKZK^K`K[K^K\K[KjKnKXKWKXKQK)K%K)K'K%K9K=KBK7KKKKKK3KKCKEK@KFKFK9K:K:K@K=K4K.K2K5K4K(K)K'K*K?KBKBK=KFKLKPKTKXKUKEK3K2K4K9K4K(K'K"K-K3K3K0K9KQKTKXK[KZKMK>K6K3K2K2K+K$K"K)K.K.K,K,KAKSKUKVKTKSKIK/K/K0K.K(K#K K!K+K+K+K(K9KLKMKNKNKLKPK7K%K$K$K$KKK(K+K+K*K-KIKMKVKWKVKYKNKGKIKIKIKHKHKHKHKHKHKIKHKIKEK$KK3KFKGKJKIKIKGKDK@KK?K?K?K?K?K>K>K>K>K?K>K>K?K>K>K?KAK=K2K,K1K3K5K7K8K6K2K2K/K/KKKK(K5K3K2K4K5K5K4K3K3K3K3K3K4K5K3K3K3K3K3K2KNKrKwKuKvKuKuKuKtKwKuKwKyKvKuKuKuKuKvKuKwKxKxKxKxKxKxKxKxKxKxKuKwKxKxKxKxKxKyKwKuKwKyKxKxKxKxKxKxKxKxKxe]q(KUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKWKWKVKVKVKVKVKWKWKWKWKVKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKYKWKYK[KZKZKXKXKXKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZK[K[K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K^K`K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK`K`KbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKeKdKcKZKRKTKZK\K^K\KYK[KZKZKVKZK_K`K[K_K[K[KjKlKWKWKXKLK&K&K)K&K&K-K8KFK/KKKKKK4K6K6K8K4K0K.K(K+K?KAKEKAKEKDK6K:K;K?KKGKJKQKUK\KRKAK3K3K4K9K/K%K&K#K-K3K3K2K@KSKUKYK\KYKJKKKKHKHKHKHKHKHKHKHKHKIKHKKKAKKK8KHKIKIKIKJKMKEK=KK>K>K>K>K>K?K?K?K>K?K>K>K?K>K>K>K>K?K=K1K+K0K5K7K7K8K6K4K2K0K-KKKK'K0K3K4K5K5K5K3K3K3K3K3K4K5K3K3K3K3K3K2K1KJKpKwKuKuKuKuKuKvKuKvKxKwKvKvKvKvKvKvKxKyKxKxKxKxKxKxKxKyKxKvKwKyKxKxKxKxKyKwKvKwKyKxKxKxKxKxKxKxKxKxe]q (KUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKUKUKUKUKUKUKUKUKWKWKWKWKWKWKWKVKUKWKWKXKVKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYK[KXKWKZKZK[KYKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\KZKZKZK\K]K\K\K]K\KZK\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K\K^K`K_K_K\K]K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKbK`K_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKaKaKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKcKYKOKQK[K[K^K^KYK[K\KZKXK\K_K`K\K_KZK_KoKfKUKXKYKEK$K&K&K%KKK7KEK'KKKKKK2K7K9K4K5K.K.K&K/KAKDKDKAKGKCK9K;K>K=K;K9K4K4K8K/K#K$K"K4K@K?K?KBKKKOKVKYK[KNK2K.K3K3K6K-K&K"K'K.K2K3K4KHKRKYK^K^KMK@K4K.K2K6K0K%K#K$K-K/K.K,K5KMKTKVKUKSKNK7K.K0K,K(K"K!K K&K*K+K+K-KIKOKQKPKMKOK>K%K%K$K#KKK#K*K+K*K*KAKQKUK[K[KYKRK/K-KAKIKFKHKIKHKHKHKHKHKIKHKEKHKAKKKK?K>K>K>K>K>K>K>K>K>K?K>K>K>K;K1K/K3K5K4K7K8K4K4K2K0K.KK KK(K4K5K5K5K5K5K5K5K5K5K5K5K3K3K3K3K3K2K3K0KGKlKuKuKuKuKuKuKuKuKuKwKyKyKyKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKuKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q!(KUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKUKTKVKUKUKVKVKUKUKUKWKWKWKWKWKWKWKVKVKWKWKWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKYKYKYKYKYKZKZKZKZKXKZKZKZKZKZKZKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K[K[K[K\K\K\K\K\K\K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K^K^K\K^K`K_K_K^K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`KaKaKaKaKaKaK`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKcKcKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKeKdKdKdKdKdKdKWKPKRK^K\K^K\KYK[K\KZKXK\K^K`KZK^K[KaKpKfKVKWKYK?K"K'K'K"K&K5K=KCK KKKKK&K5K5K8K7K1K-K.K$K2KAKDKCK>KJK?K:K;K?K=K9K8K5K4K5K-K'K#K!K;K>K:K>KCKIKPKVKYKXKJK0K.K1K5K5K*K'K"K)K1K3K2K6KMKTKYK]KYKJK@K2K0K2K5K.K!KK&K-K/K.K+K;KOKTKUKTKSKLK2K.K.K+K'K"K!K"K(K*K+K(K6KMKQKQKQKLKQK6K%K&K&KKKK'K*K)K(K/KKKSKVKZKZKZKFK-K+K,KCKGKHKIKHKHKHKHKHKIKHKGKHKFK7KKK@KGKHKLKLKKKHKBK=K=K=K=KK?K>K>K>K>K>K>K>K>K>K>K>K>K>K?K=K/K.K4K5K7K8K7K7K3K1K1K0K KKK%K2K5K5K5K5K5K5K5K5K4K4K2K4K4K4K4K3K4K3K1KCKjKxKuKuKuKuKuKuKuKwKxKwKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q"(KVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKXKWKUKUKUKWKWKWKWKWKWKWKWKXKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKWKZK[KZKZK[K[K[KZKZKZKZKZKZKZK\KYKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K]K]K\K\K\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K\K\K\K^K`K_K_K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKaKaKaKaKaKaKbKbKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKfKdKdKdKdKdKbKTKNKRK]K\K^KZKXK[K\KZKXK[K^K`KXK]K[KbKqKbKUKXKYK7K!K'K(K K-KK2K.K1K3K+K#KK*K+K"K(K-KGKOKSKVKTKSK@K.K/K,K,K&K#K!K$K+K+K+K)KCKOKQKQKPKNKLK,K%K&K$KKK!K)K*K)K)K9KPKUKXKZK[KWK9K.K)K"K/KFKIKHKHKHKHKHKHKHKIKHKJKFKEK4KK KBKGKHKLKLKKKHKDKK?K>K>K>K>K>K>K>K>K>K>K>K>K?K>K>K:K1K0K2K4K5K8K8K5K4K2K.K/KKKK'K3K2K2K5K5K5K5K5K3K2K2K2K2K3K5K5K5K3K4K2KBKgKwKuKuKuKuKuKuKwKxKuKuKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q#(KVKVKVKUKUKUKUKUKUKUKUKUKUKUKVKUKUKVKUKUKUKVKVKWKVKUKWKWKVKVKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYKZKZKZKYKZKZKZKZKXKYKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZKZKZKZK[K\K[KZKZKZK\K\K\K\K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K_K]K]K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_KaK`K_K`KaKaKaKaKaKaKaKaKbKbKaKaKaKaKaKaKaKaKaKaKaKaKcKdKcKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKfKeKdKeKcK`KRKNKRK]K^K`KZKXK[K]KYKZK\K^KaK[K]K[KiKrK^KUKXKXK0K$K(K'K#K.K9K?K;KKKKKK-K9K5K6K4K*K.K*K$K?KEKAK?KAKHK:K9K;K?K=K7K7K8K6K-K'K*K#K,KAK:K1K@KJKHKRKYKZKUK;K.K3K0K6K1K(K#K$K1K3K4K3K@KTKSKZK[KOKBK5K0K1K5K2K(K$K"K)K$KK&K1KMKQKTKUKQKLK3K.K-K*K)K#K KK%K+K+K*K/KOKPKSKSKOKPKBK%K'K$K"KKK$K)K&K%K)KFKTKUKXKYK\KNK1K,K$K#K#K4KIKHKHKIKHKHKHKHKHKHKIKIKGKFK/KK#KCKGKIKKKLKKKIK?K>K>KK?K@K@K>K>K>K>K>K>K>K>K>K>K>K>K>K@K>K1K/K3K4K5K6K7K8K6K2K/K-KKKK$K2K3K3K4K5K5K5K4K4K5K3K1K2K3K3K3K2K3K4K4K>KfKwKtKvKuKuKuKwKyKxKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q$(KVKVKVKUKUKUKUKUKUKUKUKUKUKUKUKVKUKUKUKUKUKUKVKXKVKUKWKWKUKUKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKZKZKZK[KZKZK[KZKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K\KZKZKZK\K\K\K\KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K\K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbK`K_KbKaK_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKeKdKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKeKdK^KNKMKTK\K`K`KZKXK[K]KYK\K]K^KaK\K]K[KkKqKYKVKXKUK*K'K)K&K%K3K9K@K4KKKKKK.K4K5K9K7K,K/K(K*K?K?KCKBKDKDK9K:K;K@KBK8K2K8K8K0K$K*K#K2KAK>K=KAKIKJKVKZK\KVK8K.K1K2K9K.K&K$K&K2K3K2K5KHKTKTKZKVKMKAK3K0K5K7K.K#K K$K*K%K&K*K;KOKQKUKSKPKKK3K/K,K*K*K#K K!K)K+K+K(K7KOKRKSKQKLKOK2K"K!K#KKK K'K*K$KK0KOKUKXKXKYK^KAK(K*K$K$K"K#K:KIKHKHKIKHKHKHKHKHKIKHKIKJKHK*KK(KFKEKHKKKKKJKIKCK=KK>K=K>K?K>K>K>K>K>K>K>K>K>K>K?K>K?K@K>K1K/K2K4K6K8K9K7K5K2K2K.KKKK'K4K2K3K5K5K5K5K5K5K5K3K3K2K2K3K2K3K4K6K5K=KcKwKvKvKuKuKwKyKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q%(KUKUKUKUKUKUKUKUKUKUKUKUKUKUKVKVKVKUKUKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYKZKZKZKXKXK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K]K\KZK\K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K\K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKdKbKaKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKeKdK^KOKLKRK\K_K`K\K[KZK\KYK[K\K^K`K\K^K^KmKpKVKWKYKPK&K(K)K$K#K4K9K@K.KKKKK K3K6K7K5K3K-K,K&K-K>K?KCKBKEKAK9K=K=KBK:K6K/K2K7K/K'K)K%K8K@K;K6KAKLKWK^K_K_KPK;K4K2K4K3K+K'K$K+K3K3K2K8KKKTKWKYKRKNK>K0K2K4K7K*K#K!K&K,K.K2K+KEKTKQKSKSKRKHK/K-K+K*K'K#KK%K*K.K-K'KBKRKRKPKOKNKNK(K#K#K$KKK%K)K)K)K'K:KTKVKYK]K\KVK2K+K(K$K$K$K#K'K?KJKHKHKIKIKHKIKHKIKIKIKHKJKEK&KK.KGKHKGKJKLKKKEK>K=K=K=KK>K>K>K>K>K>K>K>K>K>K>K>K?K?K:K/K-K4K4K5K6K8K7K3K2K2K,KKKK#K4K4K5K5K5K5K5K5K5K5K4K3K4K5K3K3K3K3K5K3KK9K>K>KBK8K5K2K5K5K+K'K(K'KK>K>K>K>K>K>K>K>K>K>K>K>K>K=K?K;K-K/K2K5K6K8K7K4K3K2K1K.KKKK%K3K4K4K5K5K5K5K5K5K4K3K4K5K3K3K3K2K/K2K4K9K\KuKwKuKwKxKxKxKvKvKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKvKwKyKxKxKxKxKxKxKxKxKxKxKxe]q'(KUKUKUKUKUKUKUKUKUKUKUKUKWKWKVKVKVKUKUKUKVKWKWKUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKYK[KXKWKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K]K\KZKZKZK\K\KZK[K]K\K\K\K\K\K\K]K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K\K\K\K^K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKbKbKbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbK`KaKeKeKeKeKeKdKdKdKdKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKgKfKdKVKLK?KOK]K]KbK]K\KYK[KYK\K]K^K^K^K[K_KsKhKVKWKXKAK!K(K)K%K*K6K;K>KKKKKK'K6K8K8K5K1K.K*K&K9KCKBK@K>KKK=K:K>K@KDK8K6K8K9K8K*K&K%K*KKKK7KHKGKHKKKLKJKDK@KK?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K?K?K>K9K2K/K1K5K8K8K8K5K5K1K/K/KKKK+K4K4K5K5K5K5K4K4K5K2K1K3K5K4K3K2K1K3K5K3K6KWKqKuKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q((KUKUKUKUKUKUKUKUKVKVKVKUKVKVKVKVKVKUKUKUKVKWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKYKWKWKWKYK[KXKWKYKYKYKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[KZK\K\K\K\KZKZKZK\K\KZK[K\K\K\K\K\K\K\K\K\K[K\K\K\K\K\K\K\K]K]K\K\K\K\K\K^K^K^K^K^K^K^K^K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K`KaKaKaKaK`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKbKaKaKbKcKcKcKcKcKdKdKdKdKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKfKfKeKeKRKIK8KOK]K^K`K[K\KYK[KYK\K\K_K]K\KZKdKvKdKVKXKXKK?KDKGK;K;K=KAKAK8K9K9K;K/K%K'K%K-K:K9K:KBKPKVK\K\K[KSK;K2K/K6K4K,K(K"K(K5K4K4K7KGKTKWKZK\KQKBK1K0K3K7K.K!KK(K/K/K0K,K?KTKTKVKVKVKKK-K,K,K-K'K$K K!K)K.K+K'KBKSKSKRKQKOK?K'K$K#K%KKK%K)K)K)K'K?KVKVKZK[KYKVK.K,K)K%K#K"K"K$K!K!K"K3KHKKKIKIKHKHKJKLKKKJKIKIKGKKK;KKK;KGKGKJKLKIKIKFK?K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K?KK9KKKKKK0K6K8K8K8K-K+K)K*K>KCKBK?KEKHK9K;K=KAKBK=K:K:K8K2K&K&K%K4K;K:K:KBKRKWK\K^K\KVK=K3K5K7K5K,K'K"K+K2K5K8K;KOKUKXKYKZKKK@K6K5K8K4K(K#K!K)K/K1K/K0KHKSKWKVKUKQK>K,K.K-K,K%K$KK$K*K+K'K+KHKQKRKRKSKQK9K$K#K#K%KKK%K)K)K(K,KLKVKUK[KXKZKGK.K/K'K$K$K$K#K#K K!K"K$KK>K?K?K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K?K?K>K:K.K.K3K4K5K5K5K5K4K2K0K-K,K(K+K/K3K5K5K5K5K5K5K5K3K2K3K3K2K3K5K3K3K3K3K2K3KOKqKwKxKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKxKxKxe]q*(KUKUKUKUKUKUKUKUKVKVKVKWKWKWKWKVKUKVKWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKYKYKYKZKZKZKYKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[KZK[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K^K_K]K]K_K_K]K]K\K^K_K`K]K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKbKdKcKcKcKcKcKcKeKdKdKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKfKfKcKIKAK"KNK^K]K^KYK[KYK\KYK\K\K_K_KZKYKiKxK_KVKXKTK-K$K(K%K"K.K8K>K1KKKKKK3K5K6K5K5K/K+K&K+K@KCKCK?KGKCK7K;K=KBKDK;K:K8K5K.K%K%K%K7KK>K?K>K?K>K>K>K>K>K>K>K>K>K>K>K?K=K=K>K?K:K/K/K4K3K6K7K8K8K5K2K1K.K*K0K4K2K3K4K5K5K5K5K5K4K4K3K3K1K3K5K3K3K3K3K3K3K4KNKrKxKvKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKvKxKxKxKxe]q+(KVKUKUKUKUKUKUKUKUKUKTKTKWKXKXKWKUKWKXKYKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[K[KZKZK[K[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K\K\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K]K`K^K\K\K\K_K`K`K\K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbK`K_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKcKeKdKeKdKdKeKdKeKdKeKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKcKdKgKgKfK`KFK;KKPK]K\K]KYK[KYK\KYK\K\K_K^KZK\KmKtK\KWKYKOK(K%K'K$K K0K7K=K*KKKKK"K6K4K4K4K5K-K+K%K0KAKCKBK>KJK?K9K:K>KFK@K:K?K8K2K-K%K%K(K;K9K5K;KIKXK]K`KXKUKJK9K5K2K7K4K'K$K(K2K5K8K6KCKVKWK[K]K\KIK2K3K3K7K-K%K"K%K.K1K4K,KK=K?K>K?K>K>K>K>K>K>K>K>K>K>K>K?K=KK?K8K,K-K4K8K8K7K9K7K4K1K2K/K,K*K/K3K3K5K5K5K5K5K5K5K3K3K3K4K5K3K3K3K3K3K2K4K3KHKoKxKtKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKwKuKwKyKxKxe]q,(KUKUKUKUKUKUKUKUKUKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKWKWKWKWKWKZKZKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K\K\K\K\K\K\K\K]K`K]K\K_K_K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKdKdKbKbKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKgKdK]K=KYK'KLK^K]K]KYK\KYK[KYK^K[KaK\K\K]KnKpKZKWKYKJK$K&K'K#K#K5K9K?K$KKKKK(K5K2K8K8K4K*K)K$K4KDK@KBKAKHKK7K6K'K&K$K/K;K8K8KK?K>K>K>K>K>K>K>K>K>K>K?K>K>K>K>K>K>K>K>K@K6K*K0K6K8K8K8K8K6K2K3K2K0K)K)K1K2K4K4K5K5K5K5K5K5K4K5K4K3K3K3K4K4K3K2K2K5K3KFKkKwKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q-(KVKVKUKVKVKUKUKVKVKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKXKXKWKXKXKZKYKXKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K_K^K]K_K_K]K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKaKaKdKdKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKfKdK`KAKrKPKKK_K]K]KYK\KYK[KYK^K[KaK\K\K_KrKmKXKWKYKCK$K&K'K#K%K5K:K>KKKKKK*K1K3K8K8K1K*K(K%K;KCK?KAKBKGK:K:K=KCKEKK>K?K>K>K>K?K>K>K>K?K>K>K?K>K>K>K?K>K?K=KK?K;K>KBKDK8K=K?KGKFK9K8K:K6K.K$K'K%K8KK?K>K?K?K>K?K>K>K?K>K>K@K>K>K>K?K=K?K=KK:K-K-K3K7K8K8K8K6K2K3K1K/K-K-K0K2K3K4K6K5K5K5K5K5K4K2K3K3K3K3K3K3K3K3K2K5K2K=KeKwKuKuKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q/(KWKVKUKVKWKVKVKWKWKUKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKXKZKZKWKXKZKZKZKYKYKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZKZKZK[K[KZKZKZKZKZKZKZK[K[KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K]K\K\K\K\K]K^K^K]K_K_K_K_K_K_K_K_K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K`K`K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKdKaKbKcKcKcKcKcKbKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKeKfKUK]K[KPK…K`K\K]K]KYK\KYK\KYK]K[KaK]KZKbKtKfKWKXKXK6K$K(K&K#K*K5K:K4KKKKKK.K4K4K2K4K.K(K%K*KKCKJKBK:K=K;K9K.K#K&K'K;KK?K>K>K?K>K?K>K?K>K>K?K>K@K@K?K>K?K>K=K?K@K9K.K/K4K7K7K7K8K6K3K3K2K/K,K+K1K3K3K4K4K4K6K6K5K4K4K3K3K3K3K4K3K3K2K3K4K4K2K;KcKvKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q0(KUKUKUKUKUKUKUKWKWKUKUKUKWKWKWKWKWKWKWKUKUKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZK[K[K[KZKWKXK[KZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]K[KZKZKZKZKZKZKZKZKZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K_K_K_K_K_K`K]K\K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbKbKaK_K`KbK_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKaKcKeKbKaKeKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKfKhKXKXK`KOKfK^K^K\K\KYK\KYK\KXK^K[KbK\KZKhKuKaKYKYKTK/K'K)K%K"K,K5K9K.KKKKK K3K4K2K6K3K(K'K%K-K>K?K;KK?K?K>K>K>K>K>K?K?K>K>K>K>K>K?K>K?K?K>K>K?K2K*K0K3K5K5K8K8K5K4K2K/K1K-K.K3K3K2K2K3K5K5K5K5K5K3K2K3K3K5K4K2K3K3K3K3K5K4K>K`KtKtKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q1(KUKUKUKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKXKWKWKXKYKZK[KYKXKZKZKYKYKXKYK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKYKYKZKZKZKZKZK[K[KZK[K\K[KZK[KYK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K^K^K]K]K^K^K]K_K`K_K_K_K_K_K_K^K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaK`K_K`KaK`K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKcKdKdKdKcKbKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKfKfKeKeKhKcKXKTKcKQKTK`KYK[K^K[K]K[K]K[K^K[KaK]K[KkKsK^KZKYKQK)K(K)K&K$K1K5K:K&KKKKKK0K5K1K4K1K&K#K#K0K?K>K;K=KFK9K8K=KGKLK:KKFKGKIKLKLKIKBK>K@K?K>K?K>K>K?K>K=K>K?K>K?K>K>K>K>K>K?K>K>K8K)K,K,K3K5K6K6K7K6K3K1K1K0K-K1K0K1K3K4K2K3K3K4K5K4K4K3K3K5K4K4K3K3K3K3K3K4K3K8K[KtKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q2(KUKUKUKWKXKWKWKWKWKWKXKXKWKWKWKWKWKWKWKWKXKWKXKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KYKWKWKXKZKZK[KYKWKZK[K[KZKWKYK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZK[K[KZKZKZKZKZKZKZKZK[K]K\KZK\KYK\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K_K`K]K]K`K^K\K^K`K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaK_KaKbK`K_K_K`KbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKdKdKeKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKgKgKfKdKfKcK\KVKaKVKTK{KTK[K_K]K]K\K^K]K^KZKaK^K\KnKrK\KZK[KLK&K'K*K'K&K2K4K7K KKKKK%K6K3K2K1K,K'K$K#K7K=K=KKDK8K;KAKLKKK:K5K5K1K.K"K$K%K7K>K;KK;K9K8K(K"K%K4K8K8K5K=KTK^KaK`K_KMK3K0K0K0K,K#K"K&K0K3K4K.K?KTKYK[KYKWKAK,K*K*K)K&K#K K&K+K,K*K.KLKOKRKSKQKSK7K$K'K&K$KKK$K&K(K'K2KLKQKPKRKUKVK9K6K.K%K&K$K$K$K#KK#K%K#K\KwKsKsKsKsKsKsKsKtKfKPKJKKKLKLKLKKKLKLKHKJKLKKKIKIK/KKK@KCKDKGKLKLKIKDK?K>K?K>K>K>K>K?K?K?K>K>K>K>K>K>K>K>K?K>K=K;K;K6K&K)K5K5K5K8K7K4K3K3K1K/K-K(K2K4K2K3K3K2K4K5K5K5K3K3K5K5K5K3K3K3K3K2K2K3K2K6KWKuKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q3(KUKVKWKWKXKXKXKWKWKWKWKXKXKXKWKWKWKWKVKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKWKWKWKXKZKZK[KYKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K]K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K`K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKcKeKdKdKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKeKdKdKeKfKfKfKgKdK]K[K\K^KHK~K\KWK]K\K]K\K^K\K^K\KaK]K_KpKoK[KZKZKFK$K)K)K#K%K1K7K8KKKKKK+K6K4K5K5K,K&K%K'K;K=K:K:K=K@K6K=KCKNKHK;K9K0K,K*K#K#K&KK?K>K?K>K>K?K?K?K>K>K>K>K>K>K>K>K>K>K>K>K?K?K4K)K-K2K5K5K6K8K5K5K3K3K0K)K&K$K!K-K3K2K3K5K5K5K3K3K5K4K3K4K5K5K4K3K3K3K5K4K5KYKuKvKuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q4(KVKVKXKXKXKXKXKWKWKWKXKXKXKXKWKWKWKXKYKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKWKXKZKZKZKYKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZKZKZK[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K\K\K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKcKaKcKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKeKeKfKfKfKdK_KZK[K[KbKHKsKuKTK]K\K]K\K^K]K^K\KbK\K_KrKlKZKZKZKKK6K9K:KK>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K>K@K=K4K-K0K4K5K6K8K6K5K3K2K/K0K$KK KK1K3K2K5K5K5K3K3K5K4K3K4K5K5K4K3K3K3K4K5K2K3KPKrKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q5(KWKWKWKXKXKXKXKWKWKWKXKXKXKXKWKWKWKXKXKXKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KWKWKWKWKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZKZK[K]K\KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K\K\K\K\K\K\K]K_K_K`K]K\K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKeKbKaKdKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKeKdKfKfKfKfKeK\KTK]K_KWKaKOK`K}KUK]K\K]K]K\K^K\K]KdK]KbKqKfKZKXKYK5K#K&K&K"K(K2K6K/KKKKKK,K3K4K5K4K(K&K K+K;K9K8K7K?K8K6K@KJKRK=K;K8K1K1K'K"K#K/K=K9K;K;KLKVKQK_KhKVK@K9K:KK>K>K>K>K>K>K>K>K>K>K>K>K>K?K>KK?K>K5K-K2K3K4K5K7K8K4K4K1K0K-KK K K!K2K5K5K5K5K4K1K2K4K5K3K2K5K4K3K3K3K2K2K3K2K3KPKqKvKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q6(KWKWKWKWKXKXKXKWKWKWKWKXKXKWKWKWKWKWKWKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKWKXKXKYK[KYKXKYKYKXKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK[K[K\K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K^K]K^K^K^K^K\K]K_K_K_K^K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaK`KaKaKaKaKaKaKaKaKaKaKaKbKaKaKaKaKaKaKaKbKcKcKcKcKcKbKcKdKdKdKcKcKcKcKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKeKdKfKfKfKgKeK]KVK]KdKWK^KZKOKZKVKZK\K]K]K\K]K^K^KbK]KdKuKeKYKYKWK0K%K(K(K#K*K2K7K)KKKKK"K0K4K4K5K1K&K'K#K/K:K9K8K7K@K:K;KDKOKPK;K9K7K5K0K"K#K#K5K=K9K:K=KLKFKCKSKbKMK;K:K:K>K9K&K K$K5K:K8K6K=KYKcKfKbK`KHK6K3K2K2K*K"K"K)K2K3K1K/KFKSKXKWKTKQK9K)K*K&K%K%K#KK%K+K/K)K4KNKPKQKQKQKPK+K!K)K*KKK#K(K&K'K*K?KJKIKKKMKPKIK0K2K(K$K"K$K&K%K#K#K&K K?KuKsKtKsKsKsKsKsKsKsKsKtKsKuKpKQKIKJKJKJKJKIKIKJKIKHKIKHKIKKK>KKK7KFKFKIKJKKKKKJKCK>K?K?K>K?K?K?K?K>K>K>K>K>K>K>K?K>K=K?K>K>K?K>K5K-K/K2K4K7K8K7K7K5K2K0K/K!K KKK1K5K4K5K4K2K3K4K5K4K3K5K5K3K4K4K3K3K3K2K2K2KLKqKuKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q7(KXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[KWKXK[KZKZK[K[K[K[K[KZKZK[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK\K\KZK[K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K`K`K`K`K]K]K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaK_K`KbKaKaKaKaKaKaKaKaKbKbK`KaKaKaKaKaKaKbKeKeKeKeKeKeKeKdKdKdKeKdKaKbKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKfKfKcKcK]K`K[KYKaKZK[K`KLKlKcKUK^K]K]K\K\K_K_KaK[KgKtK`KYK\KSK+K(K)K(K$K-K2K5K"KKKKK%K1K1K4K3K.K#K#K$K4K9K7K8K8K@KK?K?K?K?K?K>K>K>K>K>K>K>K>K?K?K>K>K>K>K?K=K4K-K4K2K6K8K8K9K8K5K2K/K.K KKK!K1K5K5K5K5K5K5K5K5K5K5K5K5K5K5K5K5K3K3K3K2K2KHKpKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q8(KVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKXKWKXKZKZKZKYKXKZKZKZKZKZKYKXKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K\K]K[KZK\K\KZK[K\K\K\K\K\K\K\K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K^K_K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaKaKaKaKaKaKaK`K`KaKaKaKaKaKaKaKaKaKbKaKaKaKaKaKaKaKbKeKcKbKdKdKdKdKdKdKdKdKdK`KaKeKdKdKdKdKdKdKdKdKdKdKdKdKeKfKfKfKeKdKeKhKjK[KVK`K]KUK\K_KXKaKKKmKlKLK]K[K[K\K_K_K_K^K\KkKsK^KZK[KMK'K)K'K%K#K+K1K7KKKKKK$K.K/K2K3K*K$K#K&K7K9K8K7K9K@KK>K?K?K?K@K?K>K>K>K>K>K>K?K>K>K?K>K>K>K>K>K?K>K2K-K2K4K6K6K7K9K7K3K0K/K.K KKKK2K5K4K5K5K5K5K5K5K5K5K5K5K5K5K5K3K3K3K3K3K0KFKkKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q9(KUKVKXKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXK[KZK[KXKWKZKZKZKZK[KXKWKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K\K\K[KZK\K\KZK[K]K\K\K\K\K\K]K]K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K`K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K`K^K_K`K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaKaKaKaKaKaKbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKcKaKdKeKdKdKdKdKdKdKdKbKcKdKdKdKdKdKdKdKdKdKdKdKdKdKeKgKgKgKfKfKaK`K^KVKVKaKaKVKYK`KWK`KSKXK|KRK\KZKZK]K`K_K_K\K]KoKsK\KZK[KCK#K)K(K%K%K-K1K4KKKKKK&K0K.K1K4K(K$K"K&K7K8K8K6K9K@K=KIKTKTK;K9K9K5K1K%K K!K,K;K:K=KK>K>K?K>K>K>K>K>K>K?K?K>K>K>K>K>K>K>K>K;K3K.K0K1K4K5K8K7K5K2K.K/K0K KKK K1K5K4K5K5K5K5K5K5K5K5K5K5K5K5K3K3K3K3K3K4K2K@KjKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q:(KWKWKVKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKXKZKYKWKWKWKWKXKZKYKWKYKZKXKWKWKWKWKWKWKWKXKZKZKZKZKZKZKZKZKZK[KYKWKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZKZKZKZK[K]K\KYK\K\K\K\K\K\K\K\K\K\K\K\K]K]K]K\K\K\K\K\K\K[K]K]K\K\K\K\K\K\K\K\K\K]K`K^K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKaKdKeKdKdKdKdKdKdKdKeKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKfKeKaKbK[KSKNKSKTKZK`KUKSK`KTK]K^KIK†KgKZK\K\K^K`K_K_K^K`KqKpKYKZK^K>K$K*K(K%K(K-K0K0KKKKKK-K0K.K1K2K&K"KK)K8K8K8K5K;K?K@KJKXKQK:K9K6K2K+K"K!K"K2K;K:K;KKLKKKLKJKOKDK)K)K)K'KKK$K'K K K*KFKEKHKKKIKLK7K3K.K&K$K$K%K&K#K#K&K$K%KfKvKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKsK[KIKIKJKIKKKKKKKKKKKKKKKHKIKGKEKJKGKDKGKFKFKHKIKJKLKFK?K>K>K>K>K?K>K>K>K>K>K>K>K>K>K?K>KK?K>K?K?KK?K@KJKXKKK8K:K5K2K,K!K!K"K2K;K9K7K:K9K4KKtKsKtKsKsKsKsKsKsKsKsKsKsKtKsKsKtKtKsKuKpKVKIKMKLKKKKKLKKKKKKKKKIKJKKKIKHKBKCKFKGKFKGKHKJKKKIKFK?K?K?K>K>K>K>K>K>K>K>K>K>K>K?K>K=K>K?K>K?K?K=KKAKMKRKAK7K8K6K-K)K K K$K7K;K9K7KK4K5K2K6K,K"K!K2KK3K0K'K$K$K$K%K"K!K%K'K#K[KwKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKuKvKvKuKtKkKPKLKJKHKIKLKJKHKKKLKKKKKLKJKIKDKCKFKDKGKFKFKGKJKKKLKFK?K>K?K>K>K>K>K>K>K>K>K>K?K>K>K?K>K>K>K>K>K>K?KK?K>K>K>K>K>K>K?K?K>K>K>K>K>K>K>K>K>K>K>K>K:K2K.K3K4K4K6K7K7K7K4K1K0K0K KKK K1K4K3K4K5K5K3K4K4K4K5K5K5K5K4K3K3K3K3K3K3K2K9K^KuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxe]q>(KWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZK[KWKWKWKWKWKWKWK[KYKYK\KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K]K]K]K\K\K\K]K\K\K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K_K_K_K_K_K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbK`K_K_K`KbKaKbKaKbKbKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKdKeKeKdKeKeKeKaK_K^K\K]K^K]K]K[K[KZK[KZKPKTKAKIK`KTKQK_KXK]KaKGK~K^KRK^K]KaKWKXK\KiKrK`KXKZKRK(K&K)K'K!K&K*K-KKKKKK(K/K,K*K-K*K K K&K5K5K4K4K6K=K;KCKNKSK:K6K8K2K,K#KK K/K9K8K7K6K:K5K7KEKPKCK1K6K8K6K2K'K"K'K9K:KKAK@KAKAK.K,K*K*K%K KK#K*K+K'K+KEKHKGKGKEKLK5K&K)K*K"KK"K%K%K&K&K7KEKEKHKFKHKCK3K6K'K$K$K$K$K$K"K$K$KKLKxKrKsKsKsKsKsKsKsKsKuKuKsKtKvKtKsKsKsKsKtKvKuKuKdKJKHKJKLKLKLKLKKKKKKKLKJKHKJKJKIKHKCKDKGKDKHKKKJKHKIKBK>K?K?K?K?K?K>K>K?K@K=K>K>K>K>K>K>K>K>K>K?K=KKaKWKQK]K]KWKdKJKiKxKRK_K]K`KVKYK[KkKoK\KYK[KGK#K'K)K'KK$K,K/KKKKKK)K.K-K-K-K%K!KK)K5K5K4K4K9K=K;KDKKKGK6K6K4K.K-KKKK1K4K4K2K6K9K4K9KEKMKK>KAK;K,K+K*K)K%K K!K(K*K*K&K4KJKFKEKEKHKHK*K(K)K*KKK%K'K(K(K,K@KCKCKFKEKJK:K9K2K'K%K%K%K$K"K$K%K%K,KfKuKsKsKsKsKsKsKsKtKuKuKuKuKtKtKtKuKuKuKuKuKuKuKuKvK^KJKJKLKKKKKKKKKKKKKLKKKJKIKIKGKFKHKBKEKFKGKGKGKIKLKHK@K>K=K=K=K>K?K>K?K?K>K>K>K>K>K>K>K>K>K>K?K>K>K=K;K1K2K4K4K5K6K6K5K4K3K3K0K/K KKKK0K4K5K7K5K4K3K3K4K3K3K4K3K3K4K4K3K3K3K3K3K2K3KUKtKwKwKyKxKxKxKxKxKxKxKxKxKxKxe]q@(KWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKWKZKZKWKWKWKWKWKWKWKWKWKWKWKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZKZKZKZKZKZKZKZKZKZK\K\K\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K\K\K^K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbKaK_KaKaKaKaK_K_K_K`KbKaKaK_K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKeKdKdKdKdKdKdKdKdKdKdKdKdKdKdKcKdKeKdKeKbK^K^K\K]K^KXKMKK"K(K)K&KKK%K-KKKKK K.K-K.K.K-K"K K K,K3K5K3K0K=K;K;KEKOKEK5K7K2K*K'KKK K7K&KK)K:K6K4KKAK5K,K+K+K'K#K K#K*K(K)K'K=KGKDKCKCKJK@K*K)K'K&KKK%K&K)K(K3KCKAKBKFKFKEK5K9K-K'K#K%K%K$K!K$K'K!K@KuKsKsKsKsKsKsKsKsKuKvKuKuKvKuKsKuKvKuKvKvKuKuKuKuKvKrKZKJKLKLKKKLKLKKKKKKKLKLKIKIKKKGKIKIKFKFKFKFKFKHKLKLKHK>KK>K>K?K>K>K>K>K>K>K>K>K>K>K?K?K?K?KKIKAK1K/K5K7K1K'K#K#K7K:K:KK4K1K2K4K*K"K K0K6K4K2K7K>K9K:K:KKK>K>K>K>K>K>K>K>K>K>K>K>K?K=K=K>K>K?KKBKPKKKUKUKYK[KZKWKXKXKWKXKPKQKCKFK`KSKQK_KZKYK_KIKRKVKUK[K`K[K\KdKtKhKZKZKXK0K'K)K)K$K&K(K.K%KKKKK$K,K-K-K.K)K!KK#K4K4K2K2K3K5K4K8K>KBK8K4K4K1K0K$KK K,K5K3K0K4K=K5K4K@KFK>K0K3K5K2K/K&K!K)K;K8K:K=K@KEKHKJKKKIK:K6K3K2K0K&K"K%K2K,K)K2K9K=K9K:K8K9K:K0K0K-K*K"K!KK&K)K)K'K3KFKCKCKDKCKBK-K(K(K'KKK%K)K)K(K,K@KAKBKCKCKGK5K5K/K&K'K$K"K&K#K#K$K"K0KnKtKsKsKsKsKsKtKtKsKtKuKuKuKsKtKvKuKuKtKtKuKuKuKuKuKuKvKwKmKQKMKJKIKJKKKLKKKKKKKLKLKLKKKHKHKLKHK;KBKFKFKGKJKMKMKFK?KK?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K=K=K>K?K>K?K8K.K.K4K5K4K6K8K7K4K1K.K.K.K!KKKK-K3K3K5K3K3K3K3K3K3K3K3K3K3K3K3K2K2K3K3K1K1K2KKKoKxKyKxKxKxKxKxKxKxKxKxe]qC(KWKXK[KXKWKWKWKWKWKWKWKWKZKZKWKWKWKWKWKWKWKWKXK[KYKWKZKZKWKWKWKWKWKWKWKVKYKWKWKWKWKXKZKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K\K\K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKaKaKaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKcKfKeKeKeKdKdKdKdKdKdKdKeKeKcKcKaK\K\K_KaKZKIK=KEKUKLK+KKRKYKYK[K[K[KXKXKWKYKSKMKLKK8KEKDKGKGKLKLKIKEK?K?K?K?K>K>K>K>K?K?K?K>K>K?K?K?K>K?K>K>K>K?K=K=K9K/K/K3K5K5K5K7K8K5K3K.K0K/KKKKK/K3K2K4K5K3K3K3K4K5K4K3K3K3K3K2K1K2K3K3K3K2K1KJKpKxKwKyKxKxKxKxKxKxKxe]qD(KXKYK[KYKWKWKXKWKXKYKXKWKZKZKXKXKXKXKXKXKXKXKYK[KYKWKZKZKWKWKWKWKWKWKWKWKXKYKXKWKWKXKZKZKYKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K^K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K_K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKbKdKcKcKcKdKdKdKeKeKdKeKdKbK`K_K]K]K_KZKLKAKFKSKKK0KK#KAK[KWKXK[K[K[KXKXKXKWKQKIKOK5KYK\KUK[K]KVK^KSKKKKKKXK^KYK[KiKtK`KXKZKOK'K(K)K&K#K(K(K.KKKKKK'K*K*K+K-K&K!K K+K3K2K/K/K2K.K,K4K;KKDKCK4K-K2K4K3K)K#K$K5K:K8K8K=K?K>K@K>K@KAK;K5K1K3K,K"K K.K7K3K2K4K=K6K7K7K;KBK2K0K.K/K(K K!K%K*K)K)K-KAK?K@KBK=KBK0K"K(K+K KK#K(K(K%K&K;K>K>KAKAKCK:K9K5K%K%K$K$K&K#K#K&K%K%KeKvKsKsKsKsKsKtKuKtKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKtKtKuKaKLKKKJKKKLKKKKKKKLKKKKKJKJKJKGKFKAK$K,KCKDKCKFKKKLKJKEK?K=K>K?K>K>K>K>K>K>K?K?K>K>K>K>K?K>K>K>K?K>K=K=K9K-K/K4K5K5K6K7K6K6K2K1K/K-K KKKK.K3K3K5K4K3K3K3K4K2K3K3K3K3K4K5K3K3K3K3K3K4K2KFKnKxKxKxKxKxKxKxKxKxe]qE(K[K[K[KYKWKVKYKWKXK[KYKWKZK[K[K[K[K[K[KXKWK[K[K[KYKWKZKZKWKWKWKWKWKWKWKWKXK[KYKWKWKWKZKZKWKYK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK\K]K]K\KZK\K\K\K\KZKZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K`K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KbKaK_K_K_KaKaK_K`KbK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKdKdKaKaKaKaKaKdKeKeKeKeKbK^K^KZK_K_K]KXKJKBKHKUKLK.KK$KDK^KhK`KWKWK[K\K[KXKVKTKWKRKJKOK8KOK^KUKWK^KYK\K\KCKzKfKUK_KXK]KmKsK^KYK^KIK$K)K(K%K%K*K)K+KKKKKK*K*K*K+K)K"K K!K*K4K1K/K/K1K+K*K3K:KKFK=K/K0K3K1K0K%K"K'K7K8K6K7K=K;KK2K2K,K,K&K K!K'K,K+K)K3KAK=K>K?K>K@K)K$K'K)KKK'K)K)K(K.K=KK?K?KBK6K=K/K$K#K&K%K'K$K"K'K!K@KwKsKtKsKsKsKsKsKsKsKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKtK\KKKIKLKKKKKKKKKLKKKHKIKLKJKHKGKAKK +K/KGKCKDKGKLKMKJKBK=K>K?K>K>K?K=KK?K?K>KK?K?K>K>K?K?K=K=K9K.K.K4K5K5K5K5K5K3K0K/K,K+K KKKK/K5K5K5K4K3K3K3K1K3K3K3K3K2K2K3K3K3K3K3K3K3K3KEKjKwKxKyKxKxKxKxKxe]qF(KXKXKYKXKWKYKZKYKZKZKZKYKZK[KZKZKZKYKXKYKYKXKYK[KYKWKZKZKYKYKWKXKYKXKWKWKXK[KZKYKYKYKZKZKYKZKZKZKZKZKZKZKZKZKZKZKZK[K\K[KZK[K\K[K[K[K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K^K^K^K^K^K^K^K^K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaK`K_K_K_K_K_K_K`KaKaKaK_K`KaK`K`KaK`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKcKcKdKdKaKbKcKbKaKbKcKeKbK_K[K[K]K_KbK[KHK?KIKTKIK/KK(KDK`KiKgKgKcKYKTKZKYKYKXKVKUKXKWKLKJKFKKK_KUKSKYK[KVKaKHKiKƒKUK_KYK\KoKtK[KXK\KAK$K*K)K'K$K%K*K(KKKKKK,K*K*K+K(KKK!K-K3K1K/K/K.K*K+K1K:K9K3K2K/K+K$K!KK*K5K4K2K3K>K6K6K@KCK:K0K3K4K4K0K$K"K+K8K8K7K9K>K:K9K9K7K9K:K6K4K5K1K#KK%K4K5K5K4K9K6K5K8K8KKK>K>K?K8K;K*K%K$K$K$K#K!K%K'K"KVKvKsKsKsKsKsKsKtKuKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKsKVKKKLKKKLKKKKKLKKKJKKKLKKKJKKKIK4KKK4KFKDKFKIKKKLKJKCK>KK=K>K>K>K>K?K>KK=K@K>K>K>K>K=K=K9K+K/K4K4K3K4K6K6K4K1K.K/K/K!KKKK-K5K5K4K3K3K3K2K3K1K1K3K2K1K2K3K1K1K3K3K3K3K1KBKjKxKwKxKyKxKxKxe]qG(KWKWKWKWKXKZK[K[K[KZK[K[K[K[KZKZK[KYKWKZKZKWKXK[KYKWKZKZK[KZKWKYK[KXKWKWKXK[KZK[K[K[KZKZK[KZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK\K]KZKZKZKZKZK\K]K\K\K]K\K]K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K_K`K`K`K`K`K`K]K\K\K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_KaKbK`K_K_K_K_K_K_KaKbKaKaK_KaKbK`K_KbKaK_KaKbKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKeKeKeKdKdKaKbKeKcKaKbKbKbK]K[K\K]K`K[KJK?KHKQKJK+KK'KIK]KhKhKcKcKaK]KTKRKYKXKWKXKXKYKVKVKRKHKPKQK\KVKNKYK`KVKaKQKQKlKWK_K\K_KqKnKXKXKZK;K#K)K(K%K!K#K)K%KKKKK K,K*K+K+K'KKK"K.K/K/K0K.K(K&K(K.K8K4K.K/K/K,K#K"K"K.K7K6K3K5K;K6K8KAKBK6K.K0K2K1K,K#K#K/K:K7K8K;KK=KDK4K*K)K*K"KK!K(K)K&K+KK?K?K7K=K2K&K#K$K$K#K K"K'K%K0KlKsKsKsKsKsKsKsKtKvKuKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKpKTKJKLKKKLKKKKKLKLKLKKKLKLKLKLKKK3KKK;KIKEKCKHKLKMKJKBK=K?K>KK>K?K>KK?K?K>K>K>K>K?K?K=K=K6K-K.K3K3K3K7K8K7K2K0K0K0K,K KKKK/K5K4K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K2K@KfKvKxKyKxKxKxe]qH(KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KYKWKZKZKZKYKWKXKXKWKWKWKWKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK[K[KZK[K\K\K\K[K[K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K_K_K_K_K_K^K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K_K`KaKaKaK`K`K`K`KaKaKaK`K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKdKdKdKdKdKdKcKbKcKcKbK`K]K\K_KaK[KKKAKKKOK@K%KK%KHK`KdK`K]K]K\K]K^K]KYKUKVKXKXKXKXKXKVKUKVKHKNKRK[KYKOKXK`KVK^KYKHKSKRKVK\K`KsKiKXK[KZK4K'K*K(K K K)K)K!KKKKK$K,K*K(K*K'KKK$K/K-K-K.K-K(K&K&K-K5K2K,K/K0K)K!K!K"K0K6K3K2K6K:K6K9KAK?K4K.K/K/K1K(K"K$K4K;K7K6K;K:K8K:K7K6K:K8KK?K5K=K+K'K$K#K$K#K!K%K'KKGKxKsKsKsKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKjKNKKKLKLKLKLKKKLKJKKKLKKKLKKKIKHK.KKK>KHKFKFKGKLKLKJKCK>K?K>K>K>K?K?K>K>K>K>K?K>K?K?K>K>K>K>K>K>K>K>K=K7K.K0K3K5K5K5K6K7K6K3K0K0K-K KKKK/K2K2K2K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K1K>KdKxKwKxKxKxe]qI(KZKZK[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[KYKXKZKZKZKZKXKXKXKWKWKXKXKZK[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K]K[KZK[K[K[K[K]K\K\K[KZK[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K\K\K\K\K]K]K]K]K]K_K_K_K_K_K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K`KbKaKaK`K_K_K`KbKaKaK`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKdKdKdKdKdKdKdKaKaK`K_K\K]K_KZKOKCKGKNKCK,K%K2KDKZK`K_K]K_KdKhKiKgKiKiKcKVKUKXKXKXKXKXKWKVKVKIKKKJKSK]KPKSK_KZKYK_KEKgKmKHK]KfKuKfKYK\KTK,K'K)K'KKK'K(KKKKKK$K+K*K(K*K%KKK%K0K-K-K-K+K%K%K&K-K3K/K*K,K.K&K K K$K4K5K3K2K7K8K5K;KAK9K3K0K0K0K.K#K K'K7K7K8K6K;K8K5K6K3K5K7K5K9K8K1K#K K&K4K5K4K2K8K3K1K0K1K9K9K/K3K2K,K"K K#K-K+K+K)K7K9K9K:KK8K;K7K'K$K#K$K$K#K#K&K%K%KdKvKsKsKsKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKvKfKMKKKLKLKLKKKLKJKKKLKKKLKLKLKLKHK+KK KCKHKEKDKHKKKKKJK@K>K?K>K>K>K>K>K>K?K?K>K>K>K>K?K>K>K>K>K>K>K?K=KK?K3K@K.K%K#K"K$K%K!K&K'K!K9KuKsKsKsKsKsKsKsKsKtKvKuKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuK_KIKKKLKKKKKKKKKKKKKKKKKKKLKJKJKDK#KK%KBKGKFKFKIKKKLKGK@K>K?K?K=KK?K>K?K?K?K>K>K>K>K>K>K>K>K>K>K?K=K=K6K-K/K3K5K5K4K7K8K6K3K0K.K.K KKKK-K4K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K2K3K0K7K]KvKxKxe]qK(KZK[K[K[K[K[KZKZKZK[KZKZKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZKZKYKZKZKZKZKYKYKZKZKZKZKZKZKZKZKZKZKZKZKZK[KZKZKZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K^K^K\K\K\K\K\K]K^K_K_K_K_K_K]K]K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`K`K_K_K_K_K_K_K_K`KaKaKaKaK`K`KaKaK_K`KbKaK`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKaKaKbKcKcKcKcKbKcKbK^K^K`K`KbKYKKKBKHKWK_K`KdKiKlKlKlKjKiKkKiKhKgKfKgKeKeKeKfKgKaKXKTKWKXKWKXKWKWKXKWKSKJKJK7K^KWKNKZK_KUKaKUKHK~KSK\KhKuK`KYK[KJK%K'K&K%K$K%K(K#KKKKKK*K*K*K)K&K KKK*K,K*K+K+K%KK"K&K+K1K0K.K-K+K$K K!K,K4K4K3K3K4K3K3K3K3K4K/K0K/K1K+K#K"K/K9K7K7K8K;K0K1K2K2K;K8K6K8K8K.K!KK.K4K4K3K5K4K/K/K/K2K6K2K2K2K0K&K!K!K*K.K,K*K2K:K3K7K8K9K9K-K)K)K'KKK%K'K(K(K4K;K8K;K=K>K=K8K=K'K$K#K$K$K$K#K&K&K#KXKxKsKsKsKsKsKsKsKsKtKuKtKsKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKtK[KJKKKLKKKKKKKKKKKKKKKKKLKKKIKKK@K KK(KEKEKGKHKJKLKMKFK?K>K?K=K=K>K>K>K>K>K>K?K>K>K>K>K?K>K>K?K>K>K=KK?K?K?K?K?K=KK?K>K?K?K>K>K>K>K?K>KK,K$K#K$K$K$K"K%K'KKFKxKrKsKsKsKsKsKsKsKsKsKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKmKRKLKKKKKLKKKKKKKKKKKKKKKLKKKKKKK9KKK0KGKFKFKGKJKLKLKGK>K>K>K=K>KK>K>K?K?K>K=K>K@K?K>K?K>K>K=KKK?K>K>K?K>KK>K?K>K>K?K>KK6K,K/K3K3K3K6K5K5K1K0K-K,K,K KKKK/K4K3K3K3K3K3K3K3K3K3K3K3K2K2K3K3K3K3K3K3K3K2K3e]qO(KλKΧK¨KvK]K\K`K_K^K\K\K\K\K\KZKZKZK[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[KZKZKZKZKZKZKZKZKZKZKZKZKZK[K[K[KZKZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K[K\K\K\K\K]K]K_K_K_K_K_K]K\K]K^K`K_K_K_K_K_K_K_K_K_K_K_K_K`K_K^K`K_K_K_K_K_K_K_K_K_K_K_K_K`K`KaKaKaKaKaKaKaK`K`K`K`K`K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKaKaKbKaK`K`K`K_K_K^KcK^KKKDKPKXKHK+KK1KPKbKfKeKdKdKeKbK`K_K_K]K]K_K`KaKfKjKmKnKnKnKmKlKkKeKWKTKVKXKWKVKTKNKUKWKQKIKNKOK\K]K]KXK]KUK]KWKGK]KcKrKhKZK[KWK/K'K)K'K"K K K"KKKKKK#K$K$K$K%K KKK%K)K'K'K+K$K K!K!K%K+K+K*K,K+K$KK K)K1K1K1K2K/K'K'K'K)K/K.K.K/K1K'K!K K,K6K5K4K5K2K-K.K-K/K5K5K6K8K:K)KK"K1K6K5K5K5K.K.K0K/K0K7K4K3K1K,K"K K"K)K+K+K,K3K2K2K5K6K:K8K)K(K+K#KK!K&K$K&K)K5K8K6K6K9K=K6K9K4K'K$K"K$K%K"K$K%K!K:KqKsKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKYKFKMKLKKKLKKKKKKKKKKKKKKKLKLKKKIK1KKKK>KK>K?K>K>K=KK?K>K>K?K>K>K=KKKEKFKFKFKLKKKIKBK>K>K=K=K=K>K?K>K>K?K>KK?K>K>K>K>K?K=K=K=K=K>K>K5K,K/K4K5K5K5K8K6K4K2K0K/K/K!KKKK*K3K3K2K2K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3K3e]qQ(KμKθKγKηKξKζKΒKŒKdK\K`K_K]K\K\K\K]K[KZKZKZK\K\KZKZKZKZKZK\K\K]K[KZKZKZK[K[K[KZKZKZKZKZKZKZKZKZK\K]K\K\K\K\K\K\K\KZKZKZK[K]K[KZK\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K\K\K\K\K\K\K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaK_K_K_KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKbKbKaK_K_K_K`KaK\KNKGKMKLKK>K?K@K=K=K?K=KK?K?K>KK>K?K@K=K3K+K/K3K5K5K5K5K5K4K3K.K0K-KKKKK,K1K3K3K3K3K3K3K3K3K3K3K4K1K3K3K3K3K3K3K3e]qR(KͺKΩKξKκKγKζKμKιKΛK›KlKZK[K_K^K\K\K\K[K[K[K\K\KZK[K[K[K[K\K\K\K[KZKZKZK[K[KZK[K[K[KZKZKZKZKZKZK[K\K\K\K\K\K\K\K\K[K[K[K\K]K[K[K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K\K]K]K\K\K\K\K\K]K]K]K]K\K^K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KaKaK_K`K`KaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKaKbKcKaKaKaKaK`KaKaKYKLKJKIKSKVKJKLKVK_KgKjKjKmKsKvKwKuKtKsKoKmKkKjKfKfKfKdKcKfKdKeKgKgKgKhKhKgKgKfKfK[KLKQKVKVKVKUKUKUKTKSKMKJKPKMK[K\KSK_KgKYKcKaK[KjKqK^KXK]KBK#K*K)K%KKKKKKKKKK#K#K#K$K KKKK'K(K(K)K(KKK!K K&K,K*K,K+K(K!KK#K.K0K.K/K2K'K#K"K%K,K0K.K/K,K(KKK(K3K2K3K4K2K-K.K-K.K3K8K5K4K;K.KKK1K7K4K8K7K/K-K.K-K/K:K9K5K5K0K#KK#K,K,K-K,K2K0K/K2K2K9K5K)K+K*K KKK$K&K&K*K4K4K7K8K6KK?KK=K>K?K>K>K=K=K>K>K>K>K>K>K=KK=K?KK?K?K?K?K?K?K?K?K?K>KK>KK?K?K?K>K=K=K=K=K=K=K=K=K=K>K>K>K>K>KK?K?K>KKKK>K=KK?K=K=KK?K>K=K=KK>K=KK?K?K>K?K=KK>KK?K?K=KK>KK;K=K?K=KKK>K>K=KKK?K=KKK?K>K>K>K>K>K=KK>K?K>KK=K:KK?K?K?K?K>K?K=K=K>K?K>K=KK>K=KK?K>K?K>K?K?K>KK?K=K=K?K?K?K=KK>K=K>K8K,K-K1K4K5K5K5K3K2K3K2K-K/K-K*K/K/K1K0K1K3K3K3K3K3e]q`(KMKSK[KeKqK…K•K’K²KΑKΟKΧKΣKΩKΨKΠKμKηKιKιKιKθKιKμKΩKͺKΪKΣKΖK°KŽKKΞKξKνKεKεKοKμKΠKŸKqK]K_K`KaK_K^K^K_K_K]K]K]K]K]K\K]K]K]K]K\K\K]K]K]K^K`K_K_K]K^K_K_K_K^K]K]K^K_K_K_K_K_K_K`K^K_K_K_K_K_K_K_K_K_K_K_K_K_K`K^K_K`K_K_K_K_K_K_K_K_K_K_K_K`K`K`KaKaKaK`KaKaKaKaKaKaKaKaKaKaKbKaKaKcKbKbKdKdKbKVKPKVKUKSKRKXK`KcKcKbKaKdKTKPKRKXK^K^KaKdKfKjKmKpKtKvKvKwKwKuKrKoKlKkKiKgKeKeKdKcKdKdKcKfKgKfKfKfKfKfKfKhKgKgKfKfKeKcKbK_K\K\K\KYKWKYKVKWK[KZKWKOKOKRKSKRKSK\K\K\K]K]KNKUKUKaKfK]KVKiK\KQK\K[K8K!K%K%KKK +KKKKK KKKKKKKKK K K +K K K +K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK K%KKKKKKKKKKKKKK K"K)K(K,K#KKKK K!K#K%K&K,KkKtKsKsKsKsKtKtKuKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKuKvKyKxKxKxKxKwKxKxKxKxKxKxKxKxKxKxKuKZKLKMKLKLKLKKKLKKKKKKKKKKKKKLKKKLK@KKK#KCKFKFKFKFKHKJKJKBK>K?K>K=K>K>K>K>K>KK=K=K?K>K?K=KK>KK7K*K/K4K4K4K5K4K3K2K1K/K.K-K,K,K/K1K/K0K3K3K3K3K3e]qa(K\KZKXKTKOKMKKKHKGKLKRKRK―KςKΛKέKλKθKιKιKθKθKιKοKΊK[K’KΪKͺK‘KΜKΊKK‘KΒKθKοKθKζKνKνKΨK«KzK^K_KaK_K_K_K_K`K`K`K`K_K\K]K`K`K`K`K`K`K`K`K`K_K_K_K`K`K_K_K_K_K`K`K`K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K_K`KbK`K_K_K_KaKaKaKaKaKbKaKbKbKbKaKaKcKdKeKeKcKaKXKPKVKZKHKHKTKGKVK`KaKaKbKaKdKVKQKQK?KRKbKiKkKlKoKoKnKlKlKjKfKdKdKcKcKcKcKdKdKdKeKeKeKeKeKeKfKhKgKfKdKbK`K_K^K]K]K\KZKYKZKZK]K`KbKdKhKhKhKgKeKhKjKbKQKOKRKSKQKTKVKZK^KZKSKNKQKUKZKdK^KWKcKcKRKZKZK2K$K$K%KKKKKKKKK KKKKKKKK +K +K +K +K +KK K K KKKKKKKKK K K K K K KKKKKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK K%K&K+K KKKK K!K%K'K"KBKwKsKsKsKsKsKtKvKuKuKuKuKuKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKxKxKxKxKuKvKyKxKxKxKxKxKxKxKxKzKrKTKMKMKNKMKKKLKKKKKKKKKKKKKKKKKKKMK=KKK(KEKFKFKFKHKIKJKHKBK>K=K;KKK?K=KK=KK?K=KK=KK?K=KK>KKK>K>K>K?K=KK>K=K=K=K>K>K>K>KKZKdK]KWK]KaKaKaK_K]KaKbKbK`KaKYKNKTKFKcKcKdK_K^K^K_K_K`KbKcKeKlKnKqKsKvKyKxKxKvKvKrKqKnKlKhKfKeKdKdKeKeKdKeKfKfKfKfKfKgKfKfKgKgKgKgKhKhKhKhKhKfKeKeKYKEKFKMKNKNKPKQKNKRKSKJKIKQKEKHKfKVKNKVKZKPK>KK$K'K KK K KK K K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK%KKKKKKK$K&K$K*KiKvKsKsKsKtKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKuKuKuKuKvKvKvKvKuKuKvKvKvKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKtK[KMKNKLKKKLKLKKKMKMKKKLKKKKKLKJKKKHK&KKK>KJKGKGKEKGKJKKKGK?KK?K?K?K?K=K=K>K>KK?K?K>KK>K>K>K>K?K>K=K=KK>K?K>KKKK`KfKjKjKmKoKmKjKjKhKfKeKdKcKcKcKdKfKdKfKgKgKgKgKfKfKfKfKgKgKgKhKgKfKfKdKdKaK_K]KZKYKXKUKTKUKTKUKWKWKWKPKJKMKOKOKQKMKLKNKNKOKKKKKRKUKTKEKSK;K`K`KQKQKYKFKK#K K"KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK!K%K'K&KZKwKsKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKxKvKuKuKuKuKuKuKwKxKxKwKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKnKQKKKLKMKMKNKLKKKMKMKMKMKLKKKLKKKNK@KKK!KCKJKGKFKFKGKKKMKGK>K=K?K>K>K>K>K?K=KK>K?K>K>K=K=KK>K?K=K?K=KK?K>K?K?K=KK?K=KK?K?K>K?K>K=KK>K?K>K?K?K=K>K?K>K>K=KK?K>K?K>K?K>KK>K>K?K>K?K?K>K?K>KKSK˜KΜKΔK–KŒKΈKζKσKλKιKοKςKΰK­KzKaK_KdKeKbKaKaKaKaKaKaKaKaK_K_K_K_K_K_K_K_K_K_K`KaK_K`KaKaKaK`K_K_K_K_K_K_KaKaKaKaKaKaKbK_K`KaKcKdKeKgKiKgKbKZK\KcK\KCK1K?K]KaK_KaKaKbKcK`K\K_KbKbKcKaK[K[K^KaKcKcK^K\K_KaK`K`K`KbKPKWKFKYKeKaKcK`K_K]K]K_KaKaKcKcKdKhKkKoKqKsKvKsKrKsKsKqKoKnKlKjKjKjKhKfKfKfKfKfKfKgKfKfKfKiKhKbK[KQKPKQKQKQKPKQKOKMKDKK>K>K>K>K>K?K>K?K>K>K?K>K?K>K=K>K>K>K=KKNKVKLKBKLKRKSKEKPK;KSK]KLKEKZKNK'K$K"KKK KKK K K KKKKKKKKKKKK"KKKKKK!K$K$K$K$K$K#K#K$K$K$K$K%K&K&K&K%K&K&K%K%K%K&K%K%K%K%K&K&K&K$K%K'K(K%K%K%K&K&K'K'K&K&K&K%K%K&K%K$K(K(K&K&K&K&K)K)K'K(K(K(K(K(K)K*K*K*K(K'K(K(K)K(K%K(K(K'K'K$K$K$K"K!K#K#K!K"K KKKKK!K(K*K$KXKvKsKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKwKxKwKyKxKxKvKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KnKRKMKNKMKMKMKNKMKKKLKNKMKJKKKLKLKKKHKBKAK=KDKIKJKFKDKDKGKJKDK>K>K?K>K>K>K?K>K?K>K?K>K>K>K>K>K=KK>K?K>K=K=KK=KGKLKJKDKEKFKHKHKDK?K>K?K?K?K?K?K>K>K>K>K?K?K>KK?K=K=K>K>K?K=KK>K>K>K>K?K>K?K?K>K>K=KK?KK?K>KK;KFKKKYK[KNKHKVKFK(K(K)K"K +KKKKK +KKKKKKKKKKKKKKKKKK%K&K%K&KKKK$K&K'K&K'K KKKK'K+K*K*K(K!KK K(K)K)K,K/K*K+K.K0K0K*K+K+K(K"KK#K)K)K(K+K2K0K0K0K0K.K,K.K.K.K)K'K-K1K0K1K4K2K-K)K-K0K3K=K=K9K,K"K'K0K1K.K-K-K*K)K)K)K)K(K(K&K(K'K(K#K#K&K%K)KfKwKrKtKuKuKvKtKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKvKvKyKxKtKwKyKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwK]KKKKKKKKKLKKKKKLKKKLKNKNKNKMKKKMKFKCKDK@KBKJKMKIKGKFKHKKKIKAKK?K@K=KK?KK>K?K;KK;K8K;K3K/K3K1K3K/K&K"KKK K"K K&K'K"KK>K?K>K?K?K>K>K>K=K>K?K=K=K>K>K?K>KK=K;K=K8K.K/K3K3K3K3K4K3K2e]qq(KlKgKaK^K_KZKtKΫKηKΕKθKιKηKηKιKθKηKμKγKηK½KjKaKgKdKeKbK~KΧKKuK\KcKbKeKcK€KΨKΛKpK\KcKcKeKhK’KηK¦K_K`KbKcKcK‹KαKΒKhK_KeKdKhK’KΩK³KŠKΏKΟK¦KŽK­KήKφKοKθKνKσKγKΈK‚KeK_KeKfKeKbKbKaKaKaKaKaKaKaKaKaKaKbKbKbK`K`KeKhKlKlKlKmKdK\K\KeKaKGKMKdKgKcKaKaKdKdKdKcKaKaKaKbKaKaKcKaKbKbKbKaKaKbKaKbKbKcK`K_KaKcKaKcK]KZK_KaK_K`K_KbKYKSKQKJKgKdKdKcKbKaKaK^K\K\K\K\K]K\K[K]K^K^K^KbK`KbKeKdKdKfKiKjKhKfKcKZKUKSKSKQKPKPKRKSKNKFKK>K?K?K?K?K?K>K?K>KK6K/K1K0K1K6K'KKKKKK$K)K)K&K0KkKtKsKsKsKtKtKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKvKvKxKxKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKyKyKxKxKzKiKNKMKMKKKLKLKKKLKMKMKKKLKLKLKKKMKKKDKBKIKDKGKMKLKGKFKGKHKIKFK@K>K>K=K>K?K>K>K>K>K>K>K>K>K>K>K>K?K=KK>KK3K,K4KAKCKCK,KKKVKKKMKXKIK,K*K*K(KK K KKK K K K K K K K K K K K K K KKK K$K&K%K KKK K"K!K"K$KKKKK%K+K+K)K)K!KKK(K)K&K*K)K'K+K*K.K,K)K*K-K'K!KK#K'K$K&K*K/K-K/K0K/K*K*K+K+K(K!K#K,K*K,K0K5K4K*K)K0K1K7K>KHKEK-KK2KLKIKFK@K2K/K1K/K2K0KKKKKKK%K)K)K#KIKtKrKtKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKuKuKvKvKuKuKuKuKuKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKxKxKxKxKxKzKdKNKKKKKLKLKKKLKNKMKKKKKKKKKLKKKLKIKBKGKEKAKFKMKKKGKFKHKHKIKFK@K;KK?K>K>K>K>K>K?K>K>K>K?K?K?K=KK>KKLKJKHK?K0K2K3K/K2K)KKK!K!K!KK%K)K)K+KdKwKsKsKsKsKsKsKsKuKuKuKuKsKtKvKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKvKuKxKxKuKuKuKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKwK^KKKLKKKKKKKLKMKMKKKKKKKKKKKLKKKJKIKDKCKCKDKHKMKLKFKDKGKIKHKFK@K=K>K?K>K>K?K>K?K?K>K?K>K>K>K?K=KK5K+K-K3K3K3e]qu(KdKcKaKdK]K“KοKΤKΚKκKεKηKηKηKηKθKηKξK¨K_KfK_KaK«KμK€K^K`K`KaKeK`K’KζKΉKdK]KeKdKdKaK”KζK΅KdK^KdKcKeKpKΊKβKŽK\KcKbKcKdKKεK¨K_KcKeKdKfK£KηK§K^KNK7K?K~KΓKΚKžKŒKΕKπKλKξKζKαKςKζKΌK‡KhKfKmKgKeKhKvKuK™K…KlKqK[KPKbKaKGK.K8KUKhKjKhKfKcKaKbKeKfKcKcKdKdKcKeKeKcKbKbKcKeKdKdKeKeKbKbKdKbKaKbKbKdKeKbK_K\K]KbKaK]K\KaK`K^K\K_KQKKKSK7KcKfKdKdKfKeKbKdKfKeKeKfKfKeKfKgKfKfKgKgKdKdK_KVKTKRKTKVKTKSKSKQKJK>K4K1K1K6K4K6K@KMKXKUKOKMKLKMKMKLKKKKKKKIKHKIKHKHKGKGKHKDK;K0K%K'K'K$K3KCKAKFK8K4KWKLKIKNKSK:K,K,K,K#KKKKKKK K K KKKKK K K K +K K KKK!K"KKKKK!K K K"KKKKKK)K+K*K'K$KKK K'K)K'K)K'K)K(K*K.K,K)K'K'K!KKK&K&K&K&K*K,K-K-K.K-K+K*K*K+K#KK$K*K+K+K1K4K0K(K0K/K1K6K?KGK;K!K'KEKEKHKHK9K.K1K2K0K1K#KKK!K!K!KK%K*K&K;KtKsKsKtKsKsKtKtKsKuKuKuKuKtKtKvKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKwKvKwKwKvKvKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKyKzKzK{KtKTKJKLKLKLKLKMKMKLKLKLKLKKKKKLKMKLKIKEKCKDKEKHKMKLKFKFKHKHKHKHKAKK?K>K?K>K?K?K>K>K>K>K>K?K>K=K=K=K=K>K?K=KK=K6K,K.K3K2e]qv(KbKbKaKeKfKΊKοKΖKΨKκKζKηKηKηKιKκKεKγK‚K`KfKbKbKaK˜KηK·KcK_KbKbKdKcK‚KάKΝKoK]KeKaKeK\K…KέKΖKkK]KgKfKeKfK§KθKŸK[KbKdKgKcKŒKαKΌKhK`KfKgKeKKγKΊKiKcKUK4K2KmKάKΜK’KπKκKοKεKαKνKηKρKοKΌK’KΙKΔKKuK{KzKK|KmKuKPK]KBKK'KOKdKgKjKjKhKcKaKbKgKgKgKdKaKdKeKeKdKeKeKeKeKeKdKeKeKeKeKbKaKaKaKaKcKeKdK`K`K]K^KbKaKdK[KYKaK`K^K\K]KRKGKVK?KbKeKdKeKeKeKeKeKeKeKeKeKeKeKeKeKdKdKcK`K\KWKTKTKVKSKSKSKTKQKGK;K3K2K4K4K5K8KBKMKWKUKOKLKKKKKKKLKLKLKLKIKHKIKIKIKGKGKHKCK:K/K)K)K$K&K'K%K-KCKBKBK@K/KQKNKIKGKUKCK-K.K.K)KKKK"KKKKKKKKKK K K K K +K KKKKKKKKKK!K!KKKKKKK)K(K)K'K KKK#K'K)K(K(K"K%K(K+K+K)K(K)K(KKK"K&K&K%K&K)K*K+K+K/K+K*K'K)K&K K K'K+K*K+K1K1K+K(K-K0K1K2K7KAK1KK/KAKAKFKCK2K.K0K0K3K0KKKK!K!K K"K'K)K%KUKwKrKsKsKsKtKvKuKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKuKuKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{KzKxKxKxKzKzKzKpKPKKKNKLKLKMKNKNKNKNKLKKKLKKKKKKKLKIKDKGKDKBKIKPKLKFKFKFKHKKKFK>K:K>K?K>K?K>K>K>K>K>K>K>K>K?K?K?K?K?K?K?K=KKK?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K=K=K>K=KK?K>K>K>K?K?K?K?K?K?K>K>K>K?K>KK`KeKjKkKiKaKaKgKiKiKiKeKbKbKeKgKeKdKbKcKdKeKfKeKdKbKbKdKdKdKdKdKdKeKdKaK`KcKcKdKdKdKbKUKKKWK]K^K]K]K\K]KVKDKKKLKTKWKXK[K_K`K]KcKdKeKgKjKgK_KXKUKVKVKUKSKSKUKSKNKBK8K3K3K5K6K7K;KJKVKWKQKKKMKMKNKMKKKLKLKLKKKIKHKIKHKHKIKHK=K0K)K%K(K)K&K(K%KKKKKKKK5KDKAKFK:K3KUKOKFKEKUK=K.K/K.K)KKK!K K!K K!K$K!KKKKKKKKKKKKKKKKKKKKKKKKKK&K&K%K&K"KKK"K$K&K(K%KK K"K"K'K)K)K)K&KKK K&K$K$K(K$K#K&K)K+K+K)K(K(K#KKK$K)K)K)K/K0K)K$K0K/K*K*K*K/K)KK!K%K'K*K.K.K.K-K,K2K+KKK K K K"K#K)K+K)K]KwKsKsKsKsKsKtKuKuKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKwKxKxKxKvKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKzKzKzKzKzKzKzKzKzKzKyK{KyK^KKKKKLKKKKKLKMKLKKKJKMKJKLKKKLKLKIKDKGKAK7KFKOKPKIKFKFKHKIKEK@K>K>K?K?K=K=K=K=K=K>K?K>K>K=K=K>K>K>K=KK>K>K=KK>K>K=K=K?K?K?K=KKK?K>K>K?K>KKK=KK?K>K>K?K>K=K=KK?K?K?K?K?K?K=KKJKVKZKSKOKPKQKPKOKOKOKNKMKMKLKMKLKKKKKLKHK=K1K)K'K'K(K(K)K'KKKKKKKK!K!K KKKKKKKKKK(K@KBKDKCK/KPKTKLKEKTKKK4K3K0K1K&K K KKKK%K+K,K%K"K$K$K$K#K#K#K KKKKKKKKKKKKKKKKKK KKKKK!K!K"KKKKKK%K%K&K%K KKK$K%K#K%K$KKKK!K'K)K(K*K&KKK K&K&K'K*K*K(K K'K,K+K#K$K&KKKK"K%K$K&K+K,K,K-K/K+KKK!K!K"K"K%K+K.K,K\KwKrKsKtKtKtKtKuKuKuKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KzKyK^KLKNKNKMKNKNKMKKKLKKKLKMKLKKKLKJKKKIK-KKK3KKKOKKKFKFKFKFKHKDK=KK=K=K=K>K?K=KKK>KK>KK>e]qƒ(KέKΫKΰKθKβKάKιKζKθKηKηKλKιKμKK_KfKfKcK[K–KιKΐKlK_KcKbKeKeKcK§KξKKcKaKcKaKeKbKwKΟKέK€K]KfKdKcKcKiKΈKιK”K]KeKdKgKcKhKΊKζK‘K]KeKeKfK`K€KάKΟKrK`KgKjK`K±KσKΌKλKξKοKθKθKπKρKρKυKΪKœKΔKζK§KuK‡K‚KˆKšKtKuKKcKdKYK_KeKcKfKkKjKiKdKcKdKeKfKjKjKdKdKeKeKhKhKfKeKeKeKbKbKeKdKdKeKdKdKcK_K`KdKeKdKdKcK^KXK\KcKdKcK`KYKZKVKUK[KTKRKXKRKLKVKTKIK:K4K5K5K:K:K?KKKWKZKTKOKPKSKRKPKQKPKPKPKPKOKNKMKMKNKOKJKK>KK=KK?e]q„(KάKΩKίKζKΫKδKιKθKδKζKηKαKεKΧKqKfKiKdKfKfKZKƒKβKΣKxK`KfKdKeKdK_K”KθKΑKjK_KfKeKfKcKlK½KιK“K_KcKbKcKeKeK§KμK¬KaKcKeKcKhKeK¦KμK₯KaKcKfKfK`KqKΜKέKK_KkKfKΔKλKΑKπKνKοKζKκKρKπKοKφKΡKžKΚKαK•KwKŠK‡K“KKtKuK€KgKaK?KbKcKgKjKjKhKdKeKeKfKhKjKiKdKdKdKdKiKjKhKcKaKcKeKdKdKeKeKdKfKcKaKaKaKbKeKfKdKbK^K[K]KaKdKdKeK]KVKVKLKMKWKTKRKZKKKCKDK9K3K3K6K8K9KBKNKYKXKTKOKNKQKPKRKSKPKQKPKOKQKPKLKMKOKOKIK:K0K)K'K)K)K*K)K$KKKKKKKK$K$K!KKKKKKKKKKKKKKKKKKK!K#K$K"KKK;KGKCKEK?KDKVKPKGKLKWKAK3K5K4K0K(K$KKKKK!K$K$K#K"K'K.K3K4K.K)K&K%K$K$K$K#K$K$K!KKKKKKKKKK K KKKKKKKKK"K#K$K!KKKK K K!K"KKKKK#K'K%K'K%KKKK$K&K$K'K%K!KK$K)K'K!K!K"K KKK K!KK$K*K,K.K.K.K"KKKKK!K$K+K3K2KK?K?K>KK0KSKRKKKFKUKJK4K5K4K5K+K$KKK K!K!K!K"K&K0K5K3K.K+K+K,K+K(K%K#K"K#K$K#K#K&K"KKKKKKKKKK K KKKKKKK!K K"KKKKK!K K!KKKKKK%K&K%K'K#KKKK$K%K$K'K"KK!K&K)K$KK!K!KKKK K!KK%K.K-K/K.K.KKK KKK!K%K4K5K1KOKxKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKxKwKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKyKyKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|KpKVKLKKKKKLKKKLKKKLKLKMKKKKKKKLKKKHKJKGK*KKK5KLKOKLKFKDKFKGKGKCK>KK=K=K>K>KK>KKPKXKWKRKSKSKSKRKRKQKQKQKQKQKPKMKNKOKQKMKCK5K'K%K*K*K(K*K%KKKKKKK K&K(K"KKKKKKKKKKKKKKKKKKKK%K(K"KKKKKKKKK K+KCKEKBKGK0KCKXKOKEKMKTK@K5K5K5K1K+K$KK#K)K/K2K-K*K)K)K,K-K.K,K.K2K4K4K,K%K$K$K$K$K"K$K$K%K$K KKKKKKKKKKK KKKKKKKKKKKKKKKKKK$K$K$K$KKKK#K$K#K(K#KKK#K&K'K KK KKKKKKK"K+K-K.K.K1K"KKKKKKK3KK=K=KK>KK/K)K*K*K*K*K)K$KKKKKKK"K(K'K KKKKKKKKKKKKKKKKKKKK'K'K!KKK KKKK!KKK%K%K$K=KEKCKFK8K5KYKQKHKNKVKGK5K5K5K4K.K)K&K,K+K'K(K(K(K*K+K-K-K-K0K4K2K(K"K K$K$K$K$K$K"K$K$K#K$K%K#KKKKKKKKKKKKKKKKKKKKKKKKKKK"K$K#K$K!KKKK$K#K#K%K KKK#K'K KKK KKKKKKK$K(K,K/K2K0KKKKK KK$K5K=K;KJKuKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKxKyKyKxKxKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K_KNKNKLKKKLKKKKKKKLKJKIKKKKKLKJKHKHKIK>KK K KDKOKNKIKDKFKGKGKEKAK=KK>KKJKPKWKSKQKFKEKTKUKUKRKRKSKRKRKSKQKPKPKQKSKOKBK4K(K'K(K*K)K'K%K!KKKKKK#K)K,K'KKKKKKKKKKKKKKKKKKK#K)K)K#KKK K!K!K KK K!K(K'K"KKKKKKKK!KCKEKBKHK/K@KXKOKIKOKUKDK7K8K6K1K-K)K!K!K#K&K,K/K*K&K#K"K$K$K$K$K$K#K"K!K!K$K$K$K#K!K$K+K0K&K#K$K$K#K#K$KKKKKKKKKKKK KKKKKKKKKK!K$K#KKKKK#K$K#K$K KKKK$K!KKKKKKKKKKK$K#K'K.K/KKKKKKKK$K@K—KΤKΟKKpKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKxKxKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KiKPKJKLKLKKKKKLKKKKKLKLKKKKKKKKKLKKKMKIK,KKK3KJKPKKKGKFKGKGKEKBK=K=K=K=KK?KAKCKIKSKSKRKGKDKNKVKWKWKUKRKRKSKRKRKRKQKPKRKOKBK0K)K'K)K+K*K*K$K!KKKKK!K'K*K)K$KKKKK KKKKKKKKKKKKKK$K+K(K#K KK!K"K"K K!K#K%K)K$K KKKKKKKKKKK;KGKDKFK6K4KWKRKKKLKWKKK8K9K8K6K1K-K#K"K*K-K+K%K!K#K#K$K"K"K#K K K"K"K"K"K"K$K$K&K,K4K7K6K1K,K$K$K$K$K#K$K#KKKKKKKKKK K K KKKKKKKK K"K!KKKKK!K"K#K!KKKKK%KKKKKKKKKKK!K"K"K%K.K$KKKKKK K!K,KœKΣKΛKKΉKwKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKzKzKyKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K|K{KzKzKzK{KzKzKdKKKKKLKMKLKKKLKKKKKKKKKKKKKKKKKLKKKIKEK(KKK8KNKPKNKFKEKGKDKDKDK=KK.K(K&K+K,K,K(K"KKKKKK K'K)K&K"KK K!K!KKKKKKKKKKKKKKK#K*K'K KK K!K!K"K$K!K$K(K(K!KKKKKKKKKKKKKK1KFKFKGK>K-KRKSKNKJKTKQK;K9K8K8K4K-K*K&K%K!K"K#K$K$K"K K K K K K!K"K K#K"K"K*K1K7KKAKFKPKXK[KYKXKSKPKVKVKUKUKVKUKSKTKTKRKRKTKRKIKK=K?K@KBKCKMKZK]K[KWKWKWKXKWKVKUKVKVKVKTKRKRKQKPKPKHK9K,K'K*K,K+K)K(K#KKKKK K%K+K*K"KKKKKK K!KKKKKKKKKKKK K&K*K'K!K K!K$K$K$K#K$K'K+K)K!KKKKKKKKKKKKKKKKKK!KCKDKCKFK7KAKYKQKIKIKWKFK9K:K9K8K4K-K!KK KKKKKKK K"K"K!K!K$K.K6K9K8K8K8K7K/K1K4K9K=K;K2K+K&K%K$K$K$K$K$K$K%K!KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK'K!KKKKKK!K!K#K)K'KxKΠKΛKΞKΥK›KtKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKwKuKwKyKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KzK{K~K|KzK{K}KqKQKKKMKNKMKLKLKKKKKKKKKKKKKLKKKHKHKHKKKKFKDKFK9K2KZKRKHKFKUKKK:K;K9K8K4K3K&KKKKKKK K!KKK#K(K/K8K;K8K9K8K4K3K0K2K:K=K;K1K)K$K#K$K!K"K#K$K%K%K$K$K$K$KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK"KK KKKKK!K"K$K&K)K:K²KΟKΛKKΒK}KwKxKuKuKuKuKuKuKvKxKwKuKuKuKuKvKyKwKuKwKyKxKxKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKyKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K}K}K~K|KzK|K}K~KjKPKKKLKLKLKLKKKKKKKKKKKKKLKKKHKHKHKJKJK4KKK(KHKOKMKHKEKGKFKGKFK@K:K=K=KKKK9K>K8K;KK:e]qš(KdKdKeKfKcK^KUKPKNKMKKKKKMKbKŒKΒKεKοKιKδKεKηKθKθKθKιKιKιKλKμKλKμKνKλKιKλKοKοKάK·K‹KoKhKnKvKyKzKuKsK€K KΙKζKρKνKιKκKλKλKιKμKπKρKΪK³K~KdK¬KρK«KzKιKΩKΖKυKοKςKζKμKςKπKπKσKκKͺK·KζK·KwKKˆKƒK›KK|K|KˆKlKdK=KhK^KYKhKmKjKiKiKiKiKgKeKaK]K^K`KcKgKiKiKgKbK`K_K^K\K\K\KZK[K\K\K\K[KZKZKYKXKWKJK9K,K(K*K+K,K*K#KK"K"K#K&K*K*K,K)K%K&K+K*K(K'KKKKKKKKKKKKK#K&K/K4K2K1K3K1K3K2K0K0K2K1K+K#K KKKKKKKKKKKKKKKKKK-K.K+K%K#K$K%K#K#K!K%K,K-K'KKKKKKKKKKKKKK'KCKCKDKEK,KEKXKPKFKJKWKHK:K:K:K9K8K7K#KKKKKKKKKKKK K"K KKK$K-K6KKe]q›(KdKeKeKdKeKeKfKaK]KUKNKMKMKIKFKRKtK¦KΦKμKμKηKεKζKθKιKιKιKμKμKμKμKλKξKξKμKιKηKλKοKκKΞK’KzKgKkKrKwKxKuKtKuKK·KΫKξKρKμKκKλKλKκKλKπKσKηKΔKŽK§KζKΖKνKΠKΠKχKοKςKεKνKςKοKοKχKέK˜KΎKθK²KtKŒK†K„KœKˆK~KK‡KiKaKVKfK_KkKlKkKiKiKjKjKgKdK_K`KbKfKjKmKnKiKeKbK_K`K_K\K]K]K\KZKZKZKZKZKZK[K\KXKHK2K)K,K-K,K,K)K%K K K!K!K%K)K.K+K'K&K%K%K%K*K+K)K)K$KKKKK K KKKKK(K0K4K2K2K3K3K3K2K2K3K4K,K&K#K K K!K!K!K!KKKKKKKKKKKK"K+K.K)K%K$K$K$K&K%K$K(K+K,K"KKKKKKKKKKKKKKKKKAKDKCKFK8K:K\KSKJKGKWKNKK>K;K7K2K5K:K9K;K?K9K2K'K#K"K&K'K&K$K!K"K#K#K&K'K&K&K$K%K,K7KAKKDKCKFK>K2KXKRKMKDKQKTK>K;KK1K&K%K%K#K$K#KKKKKKKKKKKK K KKKKKK KKKKKKKKKK K"K$K&K'K(K)K*K+K.K1K;K¨KΧKKΥKKŒKtKyKxKxKxKwKxKxKvKuKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKyKxKyKyKxKxKxKyKzKzKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KwKXKIKJKJKIKKKJKIKIKIKLKKKJKIKHKIKHKHKHKIK3KKK"KDKKKOKKKFKEKFKEKDe]q(KdKdKdKdKdKdKdKdKdKdKeKfKcKZKRKJKLKKKIKFKRKoK‘KΠKκKξKιKζKζKθKκKλKμKλKλKμKλKθKκKμKλKκKηKμKσKνKΡK£K}KlKkKsKyKyKvKrKxKKΎKήKπKοKλKμKνKνKμKνKςKοKεKζKΌKζKσKπKπKζKοKρKρKοKχKΗKžKΟKιK•K{KŒK†K„K‘K„K|K…K€KaKnKoKoKmKkKlKlKkKeKbKbKfKjKmKmKnKmKlKhKcKaK_K`K`K_K`K^K\K^K[K\K]K[K[KRKAK0K&K)K+K,K+K)K#KKKKK!K%K*K,K'K"K&K%K(K)K&K&K KK'K+K'K)K)KKKKK K KK KKK,K3K5K5K6K8K2K+K!KKKKK"K!KKKKKKKKKKKK!K+K-K+K(K$K%K'K&K#K"K&K+K*K*K%KKKKKKKKKKKKKKKKKKK%K&K7KDKCKCKDK.KOKTKOKFKMKYKDK:K=K=K=K:K7KKKKKKKKKK"K(K-K2K4K4K6K:KK@K;K2K(K!K!K!K$K$K#K"K"K!K!K$K%K)K*K'K%K+K5KBKMKKKHKIKIKBK@K?K9K0K$K$K$K#K"K$KKKKKKKKKKKK KKKKKK KKKKKKKKK K"K$K%K&K(K)K+K*K-K0K-KgKKΤKΥKΧK²KvKyKxKxKxKyKxKxKvKuKuKvKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKxKxKxKxKxKzK{KzKzKzKzKzKzKzKyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KrKSKIKIKIKLKJKHKHKHKJKJKHKHKIKIKIKGKFKHKHK/KKK(KEKLKPKIKFKGKDKCe]qž(KbKcKdKdKdKdKdKeKcKaKdKeKeKeKaK_KWKMKIKIKKKGKHK[K…K·KΰKοKξKηKηKθKιKιKιKιKιKθKκKμKμKμKλKλKκKλKπKςKβK½KKpKhKnKvKyKwKtKuK‚K₯KΞKκKρKοKνKξKνKνKνKπKοKΞKζKοKπKνKηKρKπKρKπKφKΌK€KΠKεKŒK}K‹K…K‰K’KKzKyKxKsKpKnKlKmKkKiKeKbKdKgKkKmKoKoKkKlKkKkKiKbK_K`K_K`K_K]K]K\K]K]K\K[KRK@K.K(K*K-K-K+K(K#KKKKK!K%K'K'K&K%K%K$K%K)K(K%K KK"K%K(K)K)K)K*K'KKKKK KKKKK(K0K7KK7K.K%KK"K$K!K!K!K"K"K!K$K)K)K(K(K%K(K/K=KHKIKKKKKHKFKAKAKCKFKDK5K*K$K"K"K%K$K$K%K"KKKKKKKKKK K KKK +KK KKKKKKKKK!K!K#K&K)K)K)K+K*K,K/K3K;K©KΨKΥKΥKΣKKuKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKzKzKzKzKzKzKzKzKzKzKyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K|KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKmKOKLKIKIKIKHKJKKKIKHKIKIKIKHKHKHKHKIKIKHK'KKK.KGKNKOKIKFKDKBe]qŸ(KfKeKdKdKdKdKdKeKdKbKcKeKdKdKdKdKdKdK\KSKLKIKJKJKHKLKgKšKΚKκKρKκKζKθKιKιKιKθKκKμKμKμKμKμKμKλKλKθKνKςKμKΠK§K~KjKiKrKuKvKtKrK{K“KΊKΰKρKυKμKκKνKνKνKνKηKιKλKιKβKνKςKπKπKυK²K§KKδK‡KKK„KK KuKuKwKtKpKoKnKlKiKfKfKgKhKmKpKrKoKmKmKkKmKmKiKdK`K_K`K_K`K^K\K[K\K]K[KPK=K-K+K+K,K,K*K%KKKKKK#K&K$K'K#K$K%K%K%K$K%K"KK K"K&K*K*K)K)K(K)K)K*K KKKK K KKKK'K3K1K)KKKKKKKKKKKKKK!KKKK K+K/K0K+K(K&K%K%K%K%K$K)K/K*K"KKKKKKKKKKKKKKKKKKK#K*K)K(K%K&K$K.KDKDKCKGK7K:K[KQKJKDKUKRK=KKBKEKAK6K,K"KK KK!K$K#K$K$K$K%K"KKKKKKKKK K +K KKK KKKKKKKKK!K!K#K&K)K(K)K+K+K/K/K5K1KdKΠKΣKΣKΫK·KvKzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKyKzKzKzKzKzKzKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K~K|K{K|K}K}K}K}K}K}K}K}K}K}K}K}K}K|K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KfKLKJKHKIKIKKKLKIKHKIKIKHKIKIKIKIKIKHKJKDK#KKK3KHKNKNKJKFKCe]q (KgKfKeKeKdKdKdKdKdKeKbKeKeKdKdKdKdKeKfKeKaKZKRKKKJKKKGKGKTK}K±KΫKπKοKιKηKθKθKκKμKμKμKμKμKμKμKμKμKλKκKλKρKρKβKΏK“KqKgKkKuKzKyKuKtK‚KͺKΣKιKσKοKμKνKνKξKξKμKγKήKκKλKθKρKςK©K­KΤKέK€K…K‹K„KŽKšKwKxKsKqKqKoKjKeKdKgKjKnKqKrKrKpKpKpKqKpKmKmKjKdK`K_K_K_K`K^K]K^KZKMK;K.K*K*K-K,K+K%KKKKK!K"K%K&K"K#K%K$K$K$K%K&K"KKK K&K)K)K)K'K&K*K.K)K'K)K'KKKKK KKKK$K"KKKK K KKKKKKKKK KKK K"K-K4K4K1K.K+K(K%K$K$K)K,K.K+K KKKKKKKKKKKKKKKKKK K%K*K)K'K)K&K%K%K%K)K?KEKCKEK>K.KXKSKKKFKRKWKBK:K=KK2K&K"K#K#K!K!K KK"K%K$K#K#K#K$K#KKKKKKKK K K K KK KKKKKKKK K K"K#K&K&K&K*K+K*K.K.K1K5K8K£KΨKΣKΩKΧKKuKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K~K}K}K~K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K{K_KJKHKJKLKLKLKIKHKIKIKHKJKLKIKIKIKHKHKJK?KKKK8KLKNKMKHKDe]q‘(KdKhKfKdKdKdKdKdKdKdKcKeKeKdKdKdKdKdKdKdKdKeKfK_KUKNKJKIKHKCKIKbKKΕKηKρKλKθKθKκKκKκKμKμKμKμKμKμKμKμKλKλKκKμKσKοKΦK«K€KlKlKrKvKuKsKrKyK’K»KαKτKςKνKμKνKνKεKβKμKιKθKςKξK‘K±KΩKΤKxK†K{K}KKšKzKvKpKpKnKiKgKgKkKoKrKtKrKnKmKqKrKpKpKnKlKjKdKbK`K_KaK_K_KaKYKJK6K+K,K-K.K+K*K$KKKKKK!K$K$K!KK K$K#K#K$K!KKKK K$K%K&K*K)K(K(K/K9K8K-K'K)K(K$KKKKKKKKKKKKKKKKKKKKKKKKK#K/K4K5K2K0K0K-K*K&K&K,K.K,K$KKKKKKKKKKKKKKKKKKK$K)K)K(K'K(K(K)K%K&K'K&K%K:KEKCKCKDK*KOKWKMKFKKKXKGK;K=KK=K=K%KK"K#K$K'K)K)K*K,K0K1K/K(K!KKKK!K!KK K K#K%K%K%K%K"K#K&K2K@KFKIKGKAKAKDKGKGKGKAK@K8K-K#K"K#K"K KKK K$K%K'K)K(K%K$K%K$K#K%K"KKKKKKK K +KKK KKKKKKKKK K!K#K&K(K(K+K,K,K-K0K0K4K0KdKΠKΤKΦKάKΉKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K}K}K}K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K}KzKUKGKJKJKJKJKIKHKIKIKHKJKLKIKHKHKJKHKHKIK9KKKK=KJKOKJKEe]q’(KKhKbKdKhKfKeKdKdKdKdKdKdKdKdKdKdKeKeKeKeKdKdKfKfKbK[KQKIKGKFKGKFKOKsK¨KΦKξKπKκKζKθKμKμKμKμKμKμKμKμKμKμKμKλKμKνKρKτKθKΔK”KqKiKlKrKuKtKqKnKK₯KΡKνKφKοKμKγKγKνKλKλKσKθK›K΅KήKΝKsKˆK†K~K’K˜KvKsKkKgKgKgKkKnKrKrKqKpKnKnKoKnKmKnKoKmKjKeKaK_K_K_KbK`KYKIK1K*K*K-K/K.K(K"KKKKKK!K"K$KKKK K"K$K$K!KKKK!K!K#K$K$K&K)K,K3K:K=K;K8K6K-K(K'K&K!KKKKKKKKKKKKKKKKKKKKKK(K1K8K:K6K1K/K/K,K,K-K0K1K-K"KKKKKKKKKKKKKKKKKKK)K+K(K(K(K)K)K)K(K(K*K)K)K)K"K1KCKCKCKGK.KCKYKNKJKHKUKLK=KK:K:KK5KK K!K"K&K*K(K&K"K KKK KKKKK"K#K%K&K$K"K K%K0K:KEKFKDKAK=K>K=KBKFKGKDK:K.K%K#KK$K KK!K K&K)K+K-K,K*K'K*K,K-K0K.K)K&K$K#K#K!KKKKKKK K +K K K KKKKKKKK K!K#K&K)K*K,K+K,K/K0K/K0K8K5KdKΠKΤKΥKΫK»K|KzKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K|K}K}K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KlKMKHKIKIKHKHKIKIKHKIKJKIKHKHKIKHKHKHKIKGK/KKK'KEKNKLe]q€(KλKπKοKίKΈKKmKdKeKhKiKgKeKeKdKdKdKdKdKdKdKdKdKdKeKdKdKdKfKgKcK]KRKJKFKGKHKGKLKmKŸKΡKκKοKιKθKλKλKμKμKμKλKνKξKξKξKξKξKνKμKνKπKςKηKΓK“KtKgKiKnKpKlKfKdKzK£KKκKξKηKμKχKΣKœKΔKδK€KrKŒK‹K~K˜KlKMK]KdKiKoKuKsKsKtKtKrKqKpKmKoKmKlKiKhKhKaK_KbK^KUK@K1K*K,K.K/K-K(K"KKKKKK$K$K KK KK K!K!K!K!KKKK K"K K!K K K K)K3K8K=KK>KK>K@K0K$K&K&K#KKKKKK K KKKKKKKKK#K-K7K;K9K5K5K5K3K3K6K7K7K4K)KKKKKKKKKKKKKKKKKKK'K-K.K+K(K(K(K)K*K*K)K*K,K)K&K$K%K$K!K#K#K>KEKBKDK?K,KXKTKOKJKUKWKCK9K>K=K=K=K;K$KK"K%K$K!KKKKKKKKKK"K"K$K$K"K KK(K6KCKGKFKEKDKCKK’KΩKΤKΧKΪK˜KvKyKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KeKKKHKIKIKHKHKHKIKHKHKHKIKHKHKHKHKIKIKGKCK(KKK-KEKJe]q₯(KνKθKιKξKσKμKΣK«K‚KiKbKeKgKgKgKfKfKfKfKeKdKdKdKdKdKdKdKdKdKdKdKeKgKbKYKPKHKGKGKFKDKXK‚K·KαKρKοKθKηKλKμKμKμKξKξKξKξKξKξKξKξKνKμKοKσKπKΧK­K‚KlKfKfKfKaK]KUKΐKχKλKεKνKτKΙK­KΤKθK”KvKŒK…K‚K—KVKHKaKsKŽKŒKtKyKwKuKsKtKrKpKmKlKkKlKgKbKcKdKaKTK?K/K+K-K/K-K+K&KKKKKKK"K%K KKKK!K!K!KKKKKKK!K!K K!K K!K*K4K8K:K7K:K=K=KKHKKKIKHKEKCK?K;K=KBKCK:K-K%K$K%K%K#KK!K%K&K,K,K+K*K)K)K,K1K2K3K5K4K3K0K.K+K/K1K-K+KKK$K$KKKKKKK K +K KKKKKKK K!K!K#K&K)K+K+K+K,K0K2K2K3K2K8K5KaKΟKΥKΦKΫKΎK{KyKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K}KzKzKzKzKzK}K}K~K}KzK|K~K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K{K{K^KIKJKIKHKFKGKIKHKIKIKHKHKHKHKHKIKJKGKIKEK%KKK/KHe]q¦(KΥKξKτKνKιKθKοKρKζKΘKœKwKbK`KfKiKhKfKfKdKdKeKeKdKeKeKdKdKeKeKeKdKdKeKhKgK_KTKKKGKFKHKFKJKhK™KΞKλKρKξKκKκKλKξKξKνKξKξKξKξKξKξKξKνKμKμKςKτKζKΓK—KtKbK[KZK_KΠKσKνKξKξKςKΡKΈKΧKΰK‡K}KŠK‡K‚K–KFK9KfKŸK›K€KxKwKuKsKsKtKrKoKkKkKnKjKdKaK^KRK>K.K-K-K.K/K,K$KKKKKKK!K!KKKKKKK!K KKKKK K KK K KK"K+K1K5K8K7K:KKKKK7e]q§(K’KžK»KέKσKσKμKιKμKςKπKίKΉK‹KpKdKdKgKgKgKfKgKfKdKfKgKeKdKdKdKdKdKdKdKdKgKgKeKeKZKPKHKFKFKDKFKUKzK±KήKπKοKκKκKλKμKξKξKξKξKξKξKξKξKξKνKμKμKοKτKρKΨK¬K„K`KXKίKρKνKνKξKρKέKΏKΪKάK‡KƒK‰K†KƒK”K7KBKvKtKrKxKtKsKtKsKpKqKqKnKiKiKlKgK^KMK9K/K,K.K/K/K*K"KKKKKK"K#K!KKKKKK K"K!KKKKKKKKKKK"K(K2K6K5K5K4K8K:K9K;KAK@K8K-K,K-K+K,K-K*K'K&K%K&K%KKKKK +K KKKK(K0K4K8K8K7K6K9K=K;K3K(K#K"K$K KKKKKKKKKKKKKKK&K*K*K)K)K*K+K+K)K'K)K'K&K&K&K%K&K(K%K$KKKKKKKKK&KDKCKCKFK9K7K\KTKLKJKUKRK@K>K?K>KK:KKKKKKKKKK$K KK K K)K8KCKNKPKPKLKHK>K@KEKCK>K.K'K!KK K!K!K"KK#K(K,K/K.K*K)K*K-K2K4K6K9K8K9K9K9K8K8K:K7K3K*K KKK K KK#K$K%KKKKKKK +K +K KKKKKKK!K K#K%K&K'K)K(K.K0K0K3K3K2K4K8K8K`KΞKΩKΧKΪKΐK|KxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKxKxKxKxKyKzKzK{KyKxKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K~KzK{K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~KqKOKIKGKIKHKHKHKHKHKHKHKHKIKIKHKIKHKFKGKHK9KKKe]q¨(KΔKΣK·KžK€KΖKζKτKπKιKθKνKοKνKΩK±KˆKkKcKfKiKhKgKdKdKeKeKeKdKeKeKeKdKdKeKeKeKeKdKfKgKaKWKLKEKGKGKDKFKbK’KΔKθKςKοKλKλKμKνKνKξKξKξKξKξKξKξKξKνKμKνKςKτKθKΎK•KηKπKνKξKνKπKΧKΏKάKΦK|K„K‡K‚K‹KžKrK|KvKwKxKuKtKsKsKrKqKpKpKlKkKkK_KOK8K*K+K0K.K,K(K%KKKKKK"K%K$KKKKKK K KKKKKKKKKKKK$K*K1K2K2K4K6K5K4K9K;KK=K=KAK$KKKKKKKK KKK"K.K9K@KDKDKFKIKHKHKFKHKCK1K(K"KK K"K!K"K K"K&K,K/K/K-K-K+K-K2K6K8K:K6K3K7K;KK8K-K)K&K)K-K.K)K&K$KKK#K(K'K&K'K$K&KKKKK K KKK K-K1K8K7K3K-K#KKKKKKKKKKKKKKKKKK'K/K/K+K'K(K)K)K)K(K(K(K)K(K&K#K#K$K&K'K KKKKKKKKKKK K$K(KK>K?K?K=KBK0KKKKKKKKK#K1K:K?K@K@K@K>K>K=KFKRKLKK?K;K6K5K6K8K;K:K7K7K2K(K KKKKKKKKKKKK$K%K&KKKKKKK K +K KKKKKK!K"K$K$K'K*K-K.K.K0K2K3K2K3K5K6K;K8K`KΜKΧKΧKΫKΕKKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKxKxKxKxKyKzKzKzKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K~K~K{KzKzK{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K€KK}K~K~KaKIKIKIKIKIKHKHKHKHKHKIKHKIKHKHKIKHKFKGKGK0Ke]qͺ(KέKtKEKRKzK²KΡKΗKͺKžK΄KΧKοKσKλKθKλKςKςKεKΒK—KwKgKdKgKhKgKeKdKeKeKfKfKfKfKfKfKfKfKeKeKgKgKhKhKcKWKKKEKEKEK@KDK[KŠKΑKθKτKρKμKλKμKξKξKξKξKξKξKξKξKξKξKνKμKαKκKξKζKιKοKκKΌKΖKαKΥK|K†K‚KK…KŸKvKyKwKwKvKsKpKnKkKiKlKmKcKLK4K-K/K1K/K,K(K KKKKK"K&K%K!KKKKKK KKKKKKKKKKKK!K'K-K0K.K3K4K4K3K2K4K8K;K3K,K&K$K&K)K,K,K&K KK!K'K,K.K(KK%K%K$K&K$KKKKK KKKK1K2K3K'KKKKKKKKKKKK KKKKKK!K-K2K0K-K+K*K,K*K*K*K'K&K&K'K&K#K$K%K%K#K KKKKKKKKKKK%K*K'K'K&K5KEKEKFKGKBKMK[KUKJKEKTKRK?K?K=KK@K:KKKKKKK%K,KK?K7K(K#KKKKK!K#K$K'K)K#K$K*K)K+K2K;K=K>K9K;K9K;K=K>K;K8K3K-K%KKK KKKKKKKKKKKK#K'K%K#KKKKKK K K KKKKKKK!K"K#K'K(K+K-K/K0K0K3K3K3K4K6K6K;KK>K?KKKKK#K0KBK@K;K9K8K8K9K9K6K8K3K+K"KK"K#K!KKK"K$K)K+K)K)K&K)K2K9K;K=K9KKK;K6K3K4K2K.K+K+K)K'K$K$K"KK K#K#K!K KKKKKKKKKK#K'K)K)K(K'K(K(K'K%K+KCKGKFKGKDKGKXKWKRKHKPKYKKKK?K>K>K@K-KKK"K(K2K?K;K3K1K2K6K8K8K.K%KKK!K!K!K!K#K%K(K)K(K)K)K)K,K3K;K?K>K:K3K+K:K?K;K=K9K4K(KKKKKKKKKKKKKKKKKKKKKK$K$K%K%KKKKKK K K KKKKKK K!K$K'K(K)K-K0K0K/K1K3K5K3K5K8K8K;KK;K9K;K>KAKAKCKAK8K/K!KKKKKKKKKKKKKKKKKKKKKKKKK%K&K*KKKKKKK K KKKKKKK!K$K&K)K)K,K/K0K/K1K4K6K6K7K8K8K;K;K]KΝKΫKΨKάKΞKƒKxK{KzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~K~KK~K}K}K~K€KK~K~K~K~K~K€KjKKKGKIKGKGKIKHKHKHKHKHKHKIKHKGKGKFKGKFe]q(KgKiK_K|KΪKKxKaKkKiKiKcK}KΟKΡK™K°KΡKΕK©KŸKΆKΪKπKσKνKιKλKρKσKδKΑK“KqKeKfKiKjKiKiKgKfKfKfKfKfKfKfKfKfKfKfKfKfKgKhKeK]KQKEKDKDKCKBKSKzK²KΰKτKςKνKμKμKπKκKίKζKοKνKνKσKΎK°KΣKβK„KlKˆKˆKtK“K‹KoKtKuKlKXK>K-K.K0K1K.K+K#KKKKK K#K$K!KKKKKKK KKKKK K"KKKKKK#K+K.K.K/K/K3K2K3K4K6K0K+K&K!K K#K$K$K%K"KKKK!K$K$K$K#K!K"K-K:K@KFKDKDKBKBKEK@K1K&K&K%K!KKKKKKKKKKKKKKKKKK-K;K@K@K=KKKKAKCKCK=K2K$KKKKKKKKKKKKKKKKKKKKKKKKKKKK*K+K,K%KKKKKK K K KKKKKK!K$K$K(K)K*K-K0K3K3K4K4K4K7K8K8K:K=KK=KK8K-K&K"K#K$K%K$K#K%K!K!KKKKKKKKKKK'K,K.K+K)K)K)K(K(K)K,K-K'K"KKKKK%KEKFKGKGKAK?KYKVKOKGKPKTKEK@KBKAK@KBKDK/KKK!K%K%K"KKKKKKKK!K%K(K(K&K(K)K,K1K8K?K=K>K=K@KBKBKCKCK>K:K*KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK$K,K*K*KKKKKKK K KKKKKK!K#K$K'K(K*K,K/K1K3K5K4K5K8K8K8K9K=K;K\KΝKΪKΧKάKΡK…KxKyKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K|K|K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K~K~K}K}K}K~KKK~K}K}K}K~K~KKKKKKKK~K€KzKZKJKHKIKHKGKFKGKHKHKIKIKHKHKGKGKGKGe]q°(KbKiKfKiKcKeK³KκKKcKgKgKhKdKdK±KλK˜KMKHKdK˜KΖKΞK΄KžK©KΝKηKτKςKλKμKπKτKλKΡK§K~KhKfKgKjKiKfKfKfKfKfKfKfKfKfKfKfKfKfKfKfKhKiKeK^KRKIKCKCKDKBKMKtK«KάKσKνKςKξKοKοKξKξKKΉKΧKΧKoKxKK~K|K•KuKQK7K1K0K/K/K.K%KKKKKKKKKKKKKKKKKKKKKKKKKKKK%K,K.K*K-K0K.K1K3K4K5K.K&K KK K"K$K$K$K"KKKK!K$K$K$K#K"K&K/K=KEK@K:K:K>K@KDKHKKKHKKK>K?KK?KCK7KKKKKKKKKKK!K#K$K&K&K(K'K(K/K7K;KBK@K>K@KCKEKHKIKEKAK/KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK+K,K+K%KKKKKK K K KKKKK!K!K$K%K&K+K*K-K0K3K5K4K7K9K7K8K7K:K@K?K—KήKΩKΨKήK°KwK{K{KyKxKxKxKxKxKxKxKxKxKxKxKxKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K~K~K~K~K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KK€K}K~K€KK}K}K}K}K}K}K}K}K}K}KK€KKKKKKKK€KK~KuKQKHKIKFKFKFKFKFKHKHKFKFKFKHKIKIKHe]q±(KKbKfKhKhKeK_KšKιK°KgKfKhKhKgK^K˜KλKΈKiKXKKKGKkK¨KΟKΙKͺK K΅KΤKξKτKνKιKμKσKυKθKΕKšKwKgKfKhKjKiKiKhKgKgKgKgKfKfKfKfKfKfKgKgKgKhKhKdKYKMKGKDKDKCKBK[KK§K΅KνKλKλKοKκK§KΐKΫKKiKyK~K|K_KMKKDKEK?K:K9K=KCKGKCK8K1K.K.K0K/K*K%K&K(K"KKKK K K KKKK0K8KK?K>K>K=KAK;K)K"K"K#K#K#K$K"K!KKKKKKKKKKK$K+K,K-K,K.K-K,K+K+K-K.K)K%KKKKKKKKKKK;KHKFKFKIK,KJK\KSKIKJKYKQK@KBKAKAK?KCK?K%KKKKKKKKK K#K%K$K$K&K,K3K9KK;K@KCK?K4K+K-K0K0K1K.K&K!K%K&K&K$KKKKK K KKKK-K5K6K:KK—KήKΩKΫKήK±KxK{K{K{K{KzKxKxKxKxKxKxKxKxKxKxKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{KzKzK{K}K}K}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K€K€K€K€K€KKKKKKKKKKKK€KfKIKFKHKIKGKFKFKFKGKGKIKHKHKHKIe]q³(KKαKΙKpKbKhKgKiK^KuKΣKΩK~K`KjKjKlK`KtKKΪK}KbKlKjKbKYK„KΧKίKΗKΝK½K₯K§KΔKζKσKσKξKμKπKυKοKΦK­K…KlKdKgKkKhKhKgKiKiKgKfKfKfKfKfKfKfKgKhKhKhKiKeK[KNKGKDKJK?KcKιKεKςKάK KΘKζKΌK_KzKtKWKKKK"KKKKK K KKKKKKKKKKKKKKKKKKKKKK K'K,K1K3K3K1K0K-K0K3K2K)K!KKKK K"K KKKKKK"K!K!K KK"K*K0K5K9K9K9K:K=K=K=K@K@K;K3K.K.K0K1K1K.K'K$K#K#K'K&K&K&K#KKKKK K KKK$K7K8K9KKAK?KDKFK?K/KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK+K.K/K)KKKKKKKK KKKKKK"K&K&K)K*K.K/K1K3K3K4K7K8K9K:K8KK[KΝKΪKάKέKΧK‹KvK{K{KyKxKxKyKzKyKxKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K}K}K}K}K|KzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K}KKK}K~KKKKKKKKKK€K€K€KKKKKKKKKKKKK}K^KFKGKGKGKFKGKGKFKGKGKGKGKGKGe]q΄(K[KoKΠKάK€K`KhKgKiKaKiKΐKδKKbKjKjKjKdKiKΎKδK‘KaKhKkKkKaKKΤKΗKK›KΛKΠK·K’KͺKΝKμKφKρKκKμKσKφKιKΛK K|KgKeKjKlKkKiKfKfKgKgKfKgKgKfKgKiKhKfKfKgKiKiKbKUKHK7KŽKωKοKψKΦK–KΑKαKK]K{KyK]K%KKKKKKKKKKKKKKKKKKKKKKKKKKKKKK"K.K/K+K0K1K2K3K4K3K.K'K#KK K!KKK!K KKKKK KKK K K#K(K4K6K6K5K7K8K9K;K>K@K@K6K/K,K/K0K0K0K-K$KK!K'K.K.K-K(K%K%K&K!KKKKK KKKK7K?K8K-K!KKKKK KKKKKKKKKKKK K+K1K1K.K)K,K-K,K)K.K2K-K%KKKKKKKKKKKKKKKKKKK-KFKGKFKGKDK.KTKXKPKHKKK[KOKEKDKBKAKAKHK?K)KKKKKKK$K+K.K5K6K8K;K?K?KAKCK=K5K&KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK%K/K.K0K KKKKKKK KKKKKK"K%K&K)K)K-K.K0K3K3K4K7K8K9K:K9K;K=K@K?K–KίKΪKΫKαK·KxK{K{KyKyKyKyKzKyKyKzKzKyKyK{KzKzKzKzKzKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{KzKzK{K|K~K}K}K}K}K~K}KzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K~K~KKK~K~K€KKKKKKKKK€K€K€KKKKKKKKKKK€KK€KzKVKHKFKFKGKFKFKFKFKFKFKFKFKGe]q΅(KiK`KeKΎKεK“KbKfKgKhKeKbK§KκK§KeKeKhKhKgK`K₯KλK₯KdKeKjKlKcKqKΛKΪKrKOKyKͺKΟKΜK±K’K³KΧKοKτKοKλKνKτKσKεKΐK–KtKeKhKiKiKiKjKiKhKjKjKgKfKfKfKfKiKiKiKiKkKlKgK\KͺKψKπKϊKΞK’K»KάK“K\KzKwKYK'KKKKKKKKKKKKKKKK K KKKKKKKKKKKK#K)K-K-K,K0K2K3K2K,K%K!K K"K$K"K!KKKKKKK K K KKK%K0K4K6K5K4K6K8K5K8KKCK@K4K&KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK+K/K1K)KKKKKKK K KKKK K!K$K(K)K(K+K.K1K3K3K5K4K7K:K:K9K9K;K?KK;K8K5K5K2K0K,K-K/K)KKKKKKKKKKKKKKKKKKK)K1K2K0K/K.KK_KTKNKFKRKVKEKBKDKDKEKFKLK1K#KKKK!K%K(K.K5K5K;K?K6K)KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK KKKKKKKKKKKKKK(K1K/K/K KKKKKKK KKKKK!K#K'K)K)K+K,K0K3K3K4K5K7K9K:KKBK@K“KήKΪKάKΰKΆKyK{KzKzKzKzKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{K|K~K{KzK|K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~K~KKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€KKKKKKnKMKGKFKGKGKFKGKFKGKGKGKEe]q·(KhKgKgKgK\K’KθKΎKlKcKhKgKiK]K{KάKΠKtKcKhKgKiK^K|KάKΠKvKbKkKkKhKbKKμK³KkKaKSKDK[K”KΕKΘK­K£KͺKΗKθKφKςKμKλKοKυKνKΡK§K~KkKgKiKjKkKjKhKjKjKjKiKiKiKiKiKiKlKnKΘKωKοKψK΅K—KΕKαKͺKqKwKyKSK+K&KKKKKKKKKKK K KK KKKKKKKKKKK K)K,K*K,K0K-K(K$K"K#K#K$K$K#K"K KKKKKKKKKKK#K-K5K7K5K5K5K4K4K8K:K6K/K)K*K*K)K+K,K)K'K%K%K+K1K1K2K/K-K/K3K=KIKOKOKNK;K$K$K&K#KKKKKKKKKKKKKKKK KK KKK)K6K@KAK@K?K;K;K9K6K4K5K-K"KKKKKKKKKKKKKKKKKK"K,K3K4K1K/K/K0K/K8KEKEKFKIK=K4K]KXKQKHKNKWKKKCKDKDKBKJKMKKCK@K>K?K=K=K>K;K:K5K)KKKKKKKKKKKKKKKKKKK(K0K2K4K5K3K2K1K/K0K3K8KEKHKFKGKFK/KXKZKQKIKJKVKNKEKFKDKZKŠK\KBK+K&KKKK&K+K)K KKKKKKKKKKKKKKKKKKKKKK"K&K,K)KKKKKKK K K K K K K KK K"K K K!K K K K KKKKKKKKKKK,K1K1K1KKKKKKKKKKKKK"K%K(K*K+K+K.K3K3K4K6K9K:K:K:KK=K?K?K>K=KK>K?K@KK@KCKAK‘KήKΫKέKγKΎKzK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K~K€KK}KKKKKKKKKKKKKKKKKKKKKK€K€K€K€KKKKKKKKK€KK€KrKOKFKGKGKFKGKGKFKFe]q»(KtKKΫKƒK`KhKgKjKgK_KŸKμK²KhKgKjKgKhK`KˆKβKΖKpKfKiKgKjK_KˆKεKΕKpKdKkKlKeKbK¨KιK£KdKiKkKlKcKzKΧKKiKLKdKKΒKKΐK©K¬KΝKλKυKρKμKμKσKψKμKΝK‘K}KkKhKlKmKnKnK˜KσKσKςKςKΑKͺKΞKαK™KpKuKpKKDKDKDKFKIKJKMKOKOKIK?K4K,K)K)K'K&K&K%K&K KKKK K KKKK"KK9K4K-KKKKKKKKKKKKKKKKKK#K-K4K5K5K2K4K4K4K5K7K7K3K(KKKKKKK*KIKFKFKIK?K5K^KYKQKHKPKXKxKΐKΝKΝKKΉKYKFK,K*K'KKKK KKKKKKKKKKKKKKK!K#K)K,K)K*K5KIKaKpKwKxKvKAKK#K KKK!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K!K KKKKK*K4K2K5K%KKKKKKKKKKK K"K%K(K+K+K.K/K1K4K7K9K8K8K9K:K=K=K>K@KBKAK[KΙKήKήKΰKΫK’KxK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{KzK{K{K{K{KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K€K~K}K~K~K€KK}KK€KKKKKKKKKKKKKKKKKKKK€K€K€K€KKKKKKKKKKKKƒKmKJKGKGKFKGKFKFKFe]qΌ(K`KfKΎKθK–KbKhKgKgKgK]KŠKεKΗKpKdKlKjKkK]KuKΥKΨK|KcKiKiKlK_KwKΥKΨK}KcKlKkKjK_K”KθKΊKlKfKkKlKcKlKΔKηKKZKRKKKfKžKΜKΠK±K€K·KΤKξKχKσKξKπKφKυKδKΑK—KwKjKkKpK₯KχKρKτKκK¨K°KΣKΰK…KoKtKjK9K1K KKKKK!KKKKKKKKKK K KKKKKKKKKKKK!KKKKKK#K$K$K$K"KKK$K+K0K,K'K*K*K)K)K)K*K+K&K!K$K$K'K'K$K$K"K"K"K&K(K*K)K(K'K*K1KK?K>K;K/K$KKKKKKKKKKKKKKKKKK'K0K4K4K4K5K2K1K5K7K9K7K/K KKKKKKKKK)KEKGKFKGKEK/KUKZKSKKKLKWKyKΟKΞKΟKΟKΜKoKIK6K*K,KKKKKKKKKKKKKKKKK$K(K,K)K&K.K?KUKkKsKuKtKrKrKtK[K#K!K KKK K!K K K K K K K!K!K!K K#K$K K!K$K"K K!K!K K K!K KKK#K2K3K3K.KKKKKKKKKKKK!K%K&K(K+K*K.K1K1K5K8K:K9K8K;K=K=K>K=K?KFKCK‘KαKΫKέKδK·KxK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K|KzK}K}K~K|KzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K€K~K}KKKKKKKKKKKKKKKKKKKKKKKK€K€K€K€K€K€K€KKKKKKKKKKKKK~KaKFKFKGKFKGKFKFe]q½(KkKeK`K§KλK«KeKeKiKhKkK_KvKΧKΫKKbKjKjKkKcKjKΒKδKKcKkKiKlKdKiKΑKδKŽKaKkKjKlK`KKήKΝKvKeKlKkKhKbK­KκK‘KiKmK_KHKUK…KΐKΪKΔK±K¨K»KίKυKχKρKξKςKχKτKΫKΊKKwK°KϋKρKϋKΥKŽKΊKΦKΨK{KoKtK_K4K2KKKKKK K KK KKKKKKKK KKKKKKKKKKKKKKKK!K"K$K"K"K KK'K,K0K0K-K)K'K*K*K(K&K%K#K$K#K#K!K!K#K"K K"K$K'K'K)K)K&K'K+K-K7KAKBKBKBKBKDKFKFKCKK=K?KKHKGKGKIK1KIK_KWKPKJKVKeKΑKKΟKΟKΠK«KͺK‚K(K.K'KKKKKKKKKKKKK"K'K+K)K%K)K5KNKeKsKvKtKsKqKsKsKsKrKnK0K!K$K#KK K"K"K"K"K"K"K"K!K K"K"K#K$K"K"K$K#K"K!K K"K"K!K KK!KK,K6K3K5K$KKKKKKKKKKKK%K'K(K+K,K/K0K1K4K6K9K:K9K;KK@K>K?KDKBKYKΗKέKέKίKΧKKxK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K|K{K}K}K}K}K{K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~KK€K~K~KKKKKKKKKKKKKKKKKKKKKKKK€K€K€K€K€K€K€KKKKKKKKKKK€KK€K{KYKEKFKGKFKFKFe]qΎ(KjKiKgK^KŽKηKΒKnKdKlKiKlKaKiKΓKζKKbKiKiKkKdKbKͺKμK¦KcKhKjKlKgKaK«KκK£KeKjKkKkKaKpKΚKάKƒKbKjKkKiK_K™KκK΅KjKlKqKhK]KsKΛKθKΜKΞKΙK­K©KΖKγKςKσKοKρKσKμKτKœKΊKύKσKύKΖK‘KΏKΧKΕKpKpKrKRK3K3KKKKKKKKKK KKKKKKKKKKKKKKKKKKKKKKKKKKKKK(K/K.K-K-K,K)K(K)K&K#K#K!K!K$K$K!KKKK!K"K&K&K&K&K#K$K(K/K4K6KGKOKCK>K@KCKCK@K;K1K.K/K.K1K/K)KK K K%K.K-K%K%K%K&K%K!KKKK K KKK,K=KKHKIKHKJK7K>K`KXKRKIKVKZK¬KΦKΞKΠKΝKάKϊKΥK:K)K,KKKKKKKKKK K$K(K*K&K&K.KEK`KpKvKsKpKsKsKtKtKsKsKsKsKwKGKK%K#KKK#K$K$K$K$K$K$K"K K#K$K$K$K$K$K$K$K$K#K#K$K#K K!K!KKK'K4K4K8K-KKKKKKKKKKKK"K'K)K(K,K-K-K1K4K4K9K:K:K:K=K=K?K?K?KAKCKCKKΰKάKήKδKΌKzK|KzKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K~K~K~K~K}K}K~K|KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KK€KKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€K€K€K€K€KKKKKKKKKKKK€KKKwKSKFKGKGKGKFe]qΏ(KjKiKjKjK\K{KΪKΥK{KcKlKiKlKfK`K¬KλK¦KcKgKiKjKiK_K•KκKΊKlKfKkKkKgK^K”KηKΉKkKfKjKkKfKeKΆKηK™KdKjKlKmKaK„KαKΝKtKfKmKpKgKmKΐKKK’KΗKΣK΅K΅KΗKαKοKοKιKβKπK•KΙKϊKςKύKΉK”KΑKΧK·KjKqKqKJK2K0KKK KKKKK K!K KKKKKKKKKKKKKKKK KKKKKKKKK!K!KK$K(K*K-K-K-K-K&K#K$K#K K"K KK K KKK K!K#K&K%K%K#K#K'K1K5K4K3K:KHKFK@K>K?K=K3K.K.K0K0K.K+K%KKK"K+K0K-K.K)K$K-K(K#K$K#KKKKKKKK$K*K#KKKKKKKKKKKKKKKKK(K1KKDKGKHKIKHK=K5K^KYKRKIKSKWKKΦKΞKΠKΞKΧKφKυKdK%K.K%KKKKKKK%K(K)K*K'K+K=KUKhKtKwKtKrKrKrKsKsKsKsKsKsKsKsKwKaK$K#K#K"KK#K$K$K$K$K$K$K#K"K#K$K$K$K"K"K$K$K$K$K%K$K#K#K"K!KKK#K1K5K6K4K"KKKKKKKKKKK K$K'K(K*K+K-K1K4K4K8K:K:K:KK?K?K?K>KAKEKXKΘKήKάKίKέK—KyK}K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K}K~K|K{K{K|K}K}K}K}K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KK~K}K~KK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€K€KKKKK€K€KKKKK€K€KKK‚KrKLKGKFKFKGe]qΐ(KgKhKjKjKmK_KnKΙKδKKbKjKjKkKgK[K–KκKΊKkKfKkKjKlK`K€KίKΟKvKdKkKkKlK^K€KΰKΝKsKdKkKlKgK`KŸKκK―KkKjKnKqKeKtKΟKΩKƒKfKhKmKoK{KΠKαK©KΘKςKώKςKΦKKπKνKιKζKιK‘KΨKψKσKϋK«K˜KΔKΩK¬KiKpKnKFK6K0KKK +K +K KKKK K KKKKKKKKKKKKK +K +K K KKKKKKKK#K*K&KK$K#K'K*K,K%K!K K$K#KKK!KKKKKK K"K!K K!K!K%K*K/K3K4K5K5K7K=KAK@K=K7K-K)K-K1K4K5K0K'K#K!K%K-K0K3K.K'K+K5K@KNKEK$K#K$K KKKKKKKKKKKKKKKKKKK KKKKK-K>KKKDK;K8K;KKAKHKHKIKGKEK/KUK\KTKMKMKXKvKΟKΠKΠKΟKKοKK˜K(K.K+KKKK#K&K*K+K(K(K8KLKaKrKxKrKqKsKsKsKsKtKsKsKsKsKsKsKsKsKsKtK4KK'K'K!K!K$K$K$K$K$K$K$K$K$K$K$K$K$K$K$K$K$K$K#K$K$K$K"K K!K!K K+K6K5K8K,KKKKKKKKKKK K#K&K)K,K,K/K1K5K4K7K;K7K6K;K?K?K?K?K@KDKFKBKŒKίKάKέKγKΕK}K~K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K}K~K|KzKzKzK}K}K}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKK}K~K€K~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€K€KKKK€K€K€K€KKKKK€K€K€K€K€KhKKKIKFKGe]qΑ(KΏKnKgKkKjKkKeKcK΄KμK‘KcKjKiKiKlK_K‚KήKΟKvKbKkKjKlKcKpKΛKΰK†KdKlKkKlKcKnKΜKίK„KcKkKlKmKaK‹KδKΔKrKeKeKkKhKrKΙKίK¨KΈKΝKαKνKπKφKυKλKΝK°KΣKΪKΦKχKοKθKηKΰK’KδKφKτKχK KKΖKΜKKdKqKjKBK7K/KKK K KKKKKK#K K"K KKKKKKKK K K K K K KKKKKKKKK#K$KK K K"K&K&K!K"K$K$K#K K KKKKK"K"K"KKK K#K(K0K3K5K5K4K4K6K;K>K:K3K,K)K+K1K4K2K.K&K"K$K)K/K/K.K*K-K,K8KMKUKKKGKNK6K K$K%KKKKKKKKKKKKKKKK K"KKKK&K6KBKNKNKIKFKK>KFKIKHKHKJK5KLK^KWKOKJKZKbKΌKΣKΞKΠKΝKθKKΙK5K0K/K'K%K*K+K)K'K.KCKZKnKvKsKrKrKpKqKtKsKsKsKsKsKsKsKsKsKsKsKtKrKvKMK!K'K&K"KK#K&K%K%K%K%K%K%K%K%K%K%K%K$K#K$K%K%K%K%K%K#K#K#K!K K K#K2K6K9K3K KKKKKKKKKKK#K&K)K+K,K/K.K4K5K7K8K8K9KK?K?K?KAKCKEKEKYKΗKΰKέKήKΰKœKxK|K}K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K}K}K}K{K{K{KzKzKzKzK{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€K€KKKK€K€K€K€KKKK€K€K€K€KK€KK`KKKGKIe]qΒ(KίKΣKzKcKkKjKkKhK^KœKκK΅KiKeKkKhKlKaKmKΝKίKƒKaKkKiKkKdKeKΈKλK™KcKjKkKmKeKcKΉKκK˜KgKhKgKiK^KxKΨKΟK“K‘KΉKΝKέKζKφKυKςKθKΫKΚK·K›KŒKKΕKmKΝKΫKΩKψKρKηKξKΣK•KξKτKφKςK™K K°K†KVKeKmKfK>K7K*KKK K KKKKK"K K K!K"KKKKKKKKK K +K +K K KKKKK K"K#K K!K%K KKK#K&K&K"K#K$K$K"K KKK K K!K"K KKKK&K+K/K4K6K4K4K5K6K7K4K*K#K!K#K)K0K0K(K"K K%K,K0K0K/K-K,K3K?KEKNKYKVKHKK=K/K KKKKKKKK!K$K"K!K"K&K.K7K=K>K>K;KK>K@KBKEKHKHKIKIKJKOK_K[KSKMKXKZK’KΧKΟKΠKΞKΰKϊKξKTK(K3K.K.K)K-K;KTKjKsKuKrKpKpKsKsKqKrKsKsKsKsKsKsKsKsKsKsKsKsKtKsKtKgK,K%K%K#KK#K&K&K&K&K&K&K&K&K&K&K&K'K&K$K%K&K&K&K&K%K$K$K$K"K!K!K K-K8K7K8K*KKKKKKKKKKK K&K(K+K,K/K.K3K4K6K8K9K:K=K=K>K?K?KAKAKAKGKDKŒKΰKέKέKγKΙK}K|K~K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K}K}K{K{K{K{K{K{K{K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~KK€KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€K€KKKKK€K€K€KKKK€K€K€K€K€KKK}KZKHKGe]qΓ(KrKΟKΰKŠKcKjKjKkKjKZK‡KγKΚKvKfKlKhKkK`KbKΉKλKšKdKhKiKkKiK`K’KξK±KlKgKhKiKdK[K›KδK¬KK•K₯K·KΔKέKφKπKρKπKνKήKΚK±KKΦKΙKvKfKhKhKaKxKΥKΧKδKKαKυKςKιKςKΕKšKτKρKχKλKK“K}KXKNKhKlKaK:K5K)KKK K KKKKKKKK K!K"KKKKKKKKK +K +K KKKKKK#K%K%K&K%K$K#K!K"K(K'K$K$K#K"K KKK!K"K KKKKKK"K$K'K,K/K2K4K3K5K3K.K'K KK KKK K$K KK$K*K0K0K0K.K.K6KEKPKUKSKPKSKTKVKOKMKIK/K"K$K$K#KKKKK K K KKKKKKK-K:KIKPKOKKKJKFKAKJKMKEK5K%KKKKKKKKK K K"K!K"K*K/K7KAK>K?KBK@K?K?K@K@K@K8K'K/KJKHKIKHKMKNK_K\KTKNKVKXKˆKΣKΞKΠKΞKΦKσKύKƒK&K1K0K8KNKdKrKwKuKsKrKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsK:K"K'K)K$K#K%K%K%K%K%K'K%K&K(K&K%K&K&K%K&K&K%K%K%K%K&K%K#K$K#K$K#K)K8K8K8K2KKKKKKKKKKKK$K&K)K,K/K0K/K2K5K9K9K:K=K=KK?KAKAK@KEKFKWKΗKαKίKβKβKœKzKK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K€KKKKK}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€KKKKKKKKKKKKKKKKKK€KƒKK~K‚KxKPKFe]qΔ(KeKgK·KμK KdKiKkKjKlK^KsKΦKήK‚KeKlKjKkKiK\K‘KνK°KkKhKjKjKkK[KˆKήKΏKwKwK†K•K¨K²KέKσKηKοKχKχKνKάKΔK΄KΰKΑK{KpKlKfK^K~KΪKΤK{KhKoKpKgKnK·KθKΛKεKςKρKιKτK·K’KωKςKωKβKKpK[KOKPKeKiK\K8K5K(KKKKKKKKKKKKK K!K!KKKKKKKKK +K K KKKKK%K)K(K%K"K#K%K(K)K(K#K!K!KKK!K%K#K!K KKKK!K'K(K(K'K'K+K0K3K2K/K*K%K!KK K K"KKKK K&K,K+K-K-K-K3KK3K/K+K(K$K"K%KKKKK K +K KKKKK-K@KMKQKQKLKIKIKLKIKHK>K1K KKKKKKKKK!K!K"K#K'K-K6K;K>K?K@KAKAKAK=K?K@K;K.KKKK%KGKIKIKHKIKLKYK]KUKOKSKWKmKΘKΡKΟKΟKΡKοKKΈK4KIK_KnKwKuKsKsKsKsKsKqKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKrKwKTK"K'K+K(K!K'K'K'K'K%K'K'K%K"K&K'K&K&K'K&K&K&K'K'K&K&K%K$K$K%K#K"K!K1K9K7K8K(KKKKKKKKKKK$K&K(K+K-K/K/K2K5K8K9K:K=K=KK?KAKBKAKEKJKFKŒKαKίKΰKεKΙK€K}K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K}K}K}K}K~KKKKKK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€KKKKKKKKKKKKKKK€KKKKKKK€K‚KpKMe]qΕ(KkKgK`K’KκKΆKkKfKkKiKmKeKiKΏKθK–KcKiKkKkKkKZK…KήKΓKtKfKoKxKˆKKΈKοKΪKάKνKυKχKσKδKΠKΝKδK―K†KKuKlK^K‘KδK·KlKjKnKoKdKnKΛKγK‡KfKoKqKgKƒKμKΙKλKοKπKιKτK§K¬KψKλKιK»KjK^KWKOKOKcKhKXK6K6K&KKKKKKKKKKKKKK"K!K KKKKKKKKK K KKKKK$K'K#K#K#K'K&K&K%KKKK!K!K!K$K$K!KKKK#K)K+K)K)K(K)K)K'K*K+K(K#K K K!K!KKKKK#K&K*K,K-K+K-K1K=KGKMKNKOKKKNKSKUKSKIK:K0K/K,K-K*K%K#K$K#K"KKKKK K +K KK"KCKGKMKLKKKJKLKMKIKBK4K(KKKKKKKKKKK K"K$K*K1K8KK?KAKBKAKBKGKIKWKΔKαKίKδKεK€K{K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{KzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K€KK}K}K}K}K}K}K}K}K~K€KKKKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€KKKKKKKKKKKKKKK‚KƒKƒK‚KKKKKKKge]qΖ(KkKkKiK_KKεKΙKtKeKlKjKnKiK`KͺKλKͺKiKdKeKiKrKkK•KιKΟKΉKKγKξKχKσKιKέKζKΤK¬KœK‰K|KnKlKΎKέK‰KaKlKmKmK`K€KγKΞKtKfKlKnKhKeK³KκKKgKqKkK›KοKΗKοKξKοKκKσK™K“K―K‘K~KaK^KZKSKLKQKfKgKTK6K5K!KKKKKKKKKKKKKKK!KK"KKKKKKKK K +KKKKKKK!K%K&K'K%KKKK!K%K$K"K!K!KKKK#K&K+K)K(K*K*K*K(K&K$K#K"K#K!KKKKKK!K#K%K)K(K&K)K/K4K;KCKEKGKLKLKMKLKQKQKCK5K.K-K-K1K.K'K!KKK$K"K#K"KKKKK KKK KCKGKLKJKIKLKHK=K/K!KKKKKKKK K!K"K K K$K+K5K9K;K>KK=KDKHKIKIKFK>K3K.K.K1K2K.K'K!KKK K'K&K"K#K$KKKKKKKKK6KIKKKHKDK7K(KKKKKKKKK!K!K K#K$K)K/K5K;K=KKAK>KK?KAKBKBKDKEKGKGKYKΘKδKβKγKδK’KyKK~K|KzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K}K}K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K€KKKKKKKKKKK€KKKKKKKKKK€K€K€KKKKKKKK€K€K€K€K€K€K€KKKKKKKKKƒK€KKKKƒKƒKƒK€KKKKƒKKK‚KƒKƒKƒKƒKƒKƒK€K€e]qΘ(KfKlKlKjKjK_KhKΎKΰK™KKK‘K΅KΖKΠKιKςKκKνKκKδKKΐK¬KK£KγK¨KdKfKhKiKhKaKšKμK»KoKjKmKmKkK\K›KπKΈKkKiKmKmKeKfKΊKκK—KeKlKmKmKbK‰KζKΙKnKΎKζKΝKσKπKλKνKθK‰KZKUKVKPKBKWKTKQKGKTKdKdKIK6K7KKKKKKKKKKKKKKKK+K'KK"K KKKKKKKKKKKKKKKKKKK$K&K'K%K!KKKK"KKK K!K'K(K(K$K$K!K K!K KKKKKKK$K%K'K'KK K'K(K-K8K;K;K9KK?K=K>K>KAKBK?K8K(KKKKKKKKKKKKKKK)KHKIKHKFKJKBKTK\KWKPKTK[KhKΔKΣKΟKΡKΠKκKύKΡKuKvKuKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKsKtKtKsKsKsKsKuKlK2K(K*K+K&K'K+K+K)K)K)K*K+K+K*K*K*K*K*K*K)K)K)K)K)K)K)K)K'K%K&K&K#K*K9K9K9K8K#KKKKKKKKKKK$K(K+K-K1K3K3K3K7K8K;K:K;K?K?K@KCKBKBKDKDKHKGKŽKδKίKΰKηKΡK€K{K~K|KzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}KKKKKK~K~KKKKKKKKKKKKKKKKK€K€K€KKKKKKKK€K€K€K€K€K€K€KKKKKKKKKƒKK€K‚K‚KƒK‚K‚K€KKKKƒK‚K‚KƒKƒKƒKƒKƒKƒKƒK€Ke]qΙ(K¦KlKoKxK†K˜K‘KΆKθKιKΰKλKυKυKνKαKΞK΄KΌKγK§KsKoKkKgKaK]K™KμKΌKlKiKlKlKiK_K†KίKΞKvKgKmKjKkK`K‡KζKΝKrKfKlKkKiKaK£KπKKgKiKmKnKdKwKΧKΥKΨKΪKΧKτKςKιKρKΰKKYKQKQKHK@KXKSKPKFKUKcKbKGK9K2KKKKK KKKKKKKKK#K/K;K=K&KK K KKKKKKKKKKKKK KKKKKK%K#K!K!K$K'K'K&K"KKK"K#K$K!KKKKK K#KKKKK K$K%K&K&K&K"K#K)K1K6K8K9K:K:KK6K1K-K/K0K0K-K)K%K KK$K)K.K.K.K*K)K/K:K(K!K#K!KKKKKKKK#KKKKKKKKKKKK K"K&K+K7K=KAK@K@K?KK@KCKAKBKDKDKHKLKYKΖKγKίKδKιK’KyKK|KzKzKzK{K{K{K{KzKzKzKzKzK{K{KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K~KKKKKK~K~KKKKKKKKKKKKKKKKK€K€K€KKKKKKKK€K€K€K€K€K€K€KKKKKKKKK‚K€K€K‚K‚KƒK‚K‚K€KK€KKƒKƒKƒKƒKƒKƒK‚K‚K‚KƒK€K€e]qΚ(KκKΗKΙKήKμKσKφKςKδKΠKΦKίK¬KŽK†KwKlKdKfK΅KθKžKeKjKnKmKlK\K€KβKΟKyKfKkKjKlK_KsKΣKΰK…KdKmKlKnKbKwKΨKήKKcKmKlKjKbKKκKΔKpKhKmKmKgKlKΈKζKΠKίKφKσKηKρKΤKxKVKPKPKEKBKYKRKNKGKXKaK^KCK7K0KKKKK KKKKKKKKK)K5K?KIKAK&K!K!K KKKKKKKKK K +K KKKKKK K#K,K0K.K*K)K)K&K&K&K$K!KKKKKK K KKKKK K#K$K$K#K"K"K$K%K&K'K.K5K9K:KK!K$K$K KKKKKKKKKKKKKKKKK K$K+K1K6K:KDKEKDKBK>K>K?K9K2K#KKKKKKKKKKKKKKKKKKKK#K=KJKHKHKKK:K:K`KYKRKHKQKWK“KΦKΞKΠKΞKΦKφKϋK€KnKuKvKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKvKtKrKwK[K,K-K*K+K&K*K+K+K+K+K+K*K+K&K(K)K+K+K+K*K*K+K+K+K*K(K)K)K)K(K%K%K#K*K8K:K:K6K KKKKKKKKKK K$K'K*K.K0K2K3K3K6K:K;K=K>K?KAKAK@KDKDKDKGKLKFKŽKζKαKβKηKΞKK}K}KzKzKzK}K}K~K|KzKzKzKzK{K}K|KzKzKzK}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€KKKKKKKK€K€K€K€K€K€K€KKK‚K‚KKKƒKƒKƒK€K€KƒKƒKƒK‚K‚K‚K‚KƒKKKKƒKƒK‚e]qΛ(KεKθKθKΩKΖK·K‘K‘K‚KlK|KΧKΝKxKbKjKjKlKhK^K’KοK·KiKiKmKlKkK]KpKΣKαK„KcKlKkKkKeKiKΏKμK–KdKkKlKnKfKhKΐKκK“KcKkKkKmKaKzKάKΧKzKfKmKoKhKvKθKΝKεKτKσKεKξKΘKqKSKQKPKKKKKVKRKMKKK]K`K\K=K7K0KKKKKKKKKKKKK'K1K;KGKKKJK.K K!K!KKKKKKKKKKK KKKKKKK2KAK:K0K)K(K*K%K$K%K"KKKKKKKKKKK!K"K K"K#K"K#K%K%K$K%K&K'K+K0K6K8K3K,K)K,K-K.K/K1K)K KK$K*K.K0K-K-K-K3K=KGKPKPKMKMKJK,K"K#K#KKKKK K KKKKKKKKK"K'K,K4K:K?KCKEKDKDKDKCK?K4K'KKKKKKKKKKKKKKKKKKK K!K$K'K'K3KJKHKHKJKDK0KZKZKSKMKKKZKxKΡKΡKΡKΡKΠKξKKΙKsKvKvKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKtKsKsKuKuKuKtKsKtKoK9K)K,K+K(K(K+K,K+K*K+K,K+K(K)K*K+K*K)K+K,K+K+K+K*K)K)K)K)K(K'K$K!K$K5K:K:KK4K*K&K!K#K#KKKKKKKKKK!K,K4K7K=KEKCKDKFKGKGKBK7K'KKKKKKKKKKKKKKKKKKK K"K#K&K'K&K*K:KRKhKqKQKGKIKHKKK3KDKbKZKSKJKVK\K₯KΧKΟKΡKΟKέKωKφK–KqKwKuKuKuKtKsKsKsKsKsKsKsKsKtKuKtKsKsKsKtKuKuKuKuKuKtKuKuKuKuKuKtKwKbK,K-K.K*K%K*K.K.K.K0K,K+K-K,K,K,K-K.K+K*K+K+K+K+K+K*K*K*K*K)K(K)K'K'K8K:K;K=K'KKKKKKKKKK K#K%K)K.K/K1K2K5K9K;K;KK2K*K'K%KKK%K!K!KKKKKKKKK(K1K=KCKEKCKAKBKCKK:KaK[KSKJKRKWK‡KΥKΞKΠKΠKΥKσKK³KpKwKvKuKvKuKsKsKsKsKsKsKsKsKtKvKtKsKsKtKuKvKuKuKuKuKvKuKuKvKuKuKuKuKrK:K+K-K+K'K(K.K-K-K/K-K-K.K.K.K.K.K.K+K*K+K+K+K+K+K+K+K+K*K)K)K)K'K$K2KK>KBKBKDKCKDKDKBKKKHKKδKαKζKκKΥK†K|K}K~K}K}KzKzKzKzKzK}K}KzK{K~K~K~K}K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K€K~K}K}K}K}K~K€KKKKKKK€KKKKKKKKKKKK€K€K€K€K€K€K€KKKKKKKKKKKKKKKKKKKKK€KƒK‚KK‚KƒKƒKƒKƒKƒKƒKƒKƒKƒK‚KKKƒKƒKƒKƒKƒKƒKƒKƒKKK‚Kƒe]qΟ(KηKζKμKΟKpK‘KιKΘKwKeKmKkKmKmK_KŒKζKΜKuKeKkKkKnKgKaK―KπK¨KhKjKmKkKkK_K{KάKΦKzKeKmKjKlKcKlKΚKηK‹KbKkKjKlKbKkKΛKζK‹KbKkKjKmK]KKεKΏKœK²KnKkKmKkKoKuKxK^KKKNKJKIKPKRKNKKKQK[K^KQK9K8K1KKKKKK +K KKKKKKKKKKKKKKKK!K!K"K"KKKKKKKKK KKKKKKKK K$K!K#K#K!KK!K%K%K$K#K!KKKKK!K!K K#K&K%K'K'K$K$K$K$K%K$K%K%K$K KK K%K,K.K.K.K,K.K4KAKKKOKIKIKHKEKMKHKCK:K3K,K'K,K'K KKK K#K$K#KKKKKKKK'K1K=KDKEKBK;K1K#KKKKKKKKKKKKKKKKKKKK!K%K)K'K'K-K=KWKhKsKtKqKpKpKrK`KFKIKHKJKCK1KYK]KWKMKMKYKnKΝKKΟKΟKΟKλKύKΣKvKvKvKuKtKtKsKsKsKsKsKsKsKsKsKtKsKsKsKsKtKtKtKtKtKtKtKuKuKsKtKuKuKuKwKQK*K/K0K.K'K,K/K/K/K-K-K.K.K.K.K.K.K-K-K-K-K-K,K*K+K+K+K*K(K)K)K)K'K+K;K=K=K:K#KKKKKKKKKKK#K&K(K-K1K1K4K4K7K8K9K>K>K?KBKBKBKCKDKDKGKJKKKYKΘKζKγKζKιK«K{KK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKK}K}K}K~KKKKKKKKKKKKKKKKKKKK€K€K€K€K€K€K€KKKKKKKKKKKKKKKKKKKKKK€KK‚KK€K‚KƒKƒKƒKƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒKƒKƒKƒKƒK‚K‚KƒKƒe]qΠ(KηKζKεKνKKsK|KέKΫKƒKfKmKlKlKnK`KxKΥKέK„KdKlKkKjKjK_K–KμKΏKnKhKlKiKnKaKkKΙKηKKeKkKkKmKdK_K²KξK‘KeKiKjKmKeK^KΆKρK’KiKiKjKmK`KhK€KeK\KgKrKqKkKoKrKwKYKKKNKIK=KPKQKNKDKJK]K]KKK8K8K0KKKKKK KKKKKKKKKKKKKKKKKKK K!K KKKKKKKK KKKKKKKK#K$KKKK#K$K KK#K"KKK"K$K#K"K!K!K!K#K%K%K%K$K#K#K#K%K'K$K!KKK!K)K,K+K,K-K-K-K.K7KCKFKEKCKEKGKJKDK;K7K/K,K,K.K)K$KKK!KKK#K#K KKKKKKKK K6K@KAK7K&KKKKKKKKKKKKKKKKKKKKK%K&K(K&K(K3KMKdKrKsKqKnKpKpKpKpKqKkKKKHKIKIKJK/KOK`KXKPKKKZK_KΆKΦKΟKΟKΞKΰKϊKξKˆKrKvKuKtKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKsKtKtKtKsKtKuKuKsKtKuKuKuKuKgK2K.K0K/K)K*K/K0K/K.K.K.K.K-K-K.K.K.K.K.K.K.K,K+K+K+K+K+K)K)K)K)K*K(K5K>K=K=K.KKKKKKKKKKK"K&K)K-K0K1K3K4K7K7K9K=K?K?KAKAKAKCKCKDKGKGKLKIKŽKηKγKζKκKΦKˆK}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKK~K~K}K~KKKKKKKKKKKKKKKKKKKK€K€K€K€K€K€K€KKKKKKKKKKKKKKKKKKKKK€K€KKƒKK€K‚K‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒe]qΡ(KηKηKηKεKλKΡKtKlKΙKθK–KfKlKlKiKlKcKjKΓKκK˜KdKjKlKkKkK\K€KΰKΤKyKeKmKiKmKdKcK΅KπK€KfKjKlKmKhK\K›KοKΊKjKgKjKlKgKZK˜KΘKvKgKjKiKnKcKIKaKbKgKpKoKjKnKsKvKSKJKNKGK;KSKQKOKDKKK\K]KJK8K:K/KKKKKKKKKKKKKKKKKKKKKKKK*K*K K!K!KKKKKKKKKKKKKKKKKKK$K&K%KKK#K&K$K(K(K%K&K%K"K!K!K"K!K K#K'K&K'K&K#KKK!K"K(K*K*K)K)K)K(K-K2K7K9KKAKDKDKDKDKHKKKLK\KΜKιKηKθKνK­KyK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKKKKKKKKKK€K€K€K€K€K€K€K€K€K€K€KKKKKKKKKKKKKKKKKKKKKKKKKKKK€KƒKƒKƒK‚K‚KK€KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒe]q(KζKηKηKεKεKλKKxKaK±KξK­KhKjKkKjKnKfK`KͺKπK«KhKkKlKjKlK`KqKΟKδK‰KeKlKiKmKjK[KšKπKΊKlKgKlKkKlK[KKζKΜKmKhKkKlKlKYKOKbKjKiKkKkKmK\KcK`KiKqKpKlKoKtKrKPKJKNKFK@KUKPKOKDKMK\K^KGK8K9K.K"KKKKKKKKKKKKKKKKKKKK K2KCKNKDK(KK#K!KKKKKKKKKKKKK K KKKK"K"K!K$K-K8K6K-K+K+K'K&K%K!KK KK K!K"K&K&K"KKK$K*K,K*K)K(K&K%K&K'K-K0K3K5K6K9KK=K=K-KKKKKKKKKKK K%K)K-K0K3K5K5K6K9K:K>K>K?K?KAKCKCKCKDKGKHKLKKKŽKιKηKηKμKΥK†K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~K~KKKKKKKKKKKKKKK€K€K€K€K€K€K€K€K€K€K€KKKKKKKKKKKKKKKKKKKK€K€KKKKKK€KKƒKƒKƒK‚K‚KKKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒe]qΣ(KηKζKηKηKηKεKκKΟKyK]KKμKΐKqKgKlKkKmKhK^K˜KξKΔKqKgKmKlKnKeKeKΊKοK KeKlKlKkKjKZK„KηKΡKyKhKiKkKlK[KfK‹KoKiKlKiKkKkK_KLK[KiKjKjKkK[KeK_KdKhKiKgKkKtKoKNKKKNKDKCKVKPKNKBKRK]K]KEK8K6K,K&KKKKKKKKKKKKKKKKKK"K9KMKSKRKNKLKCK&KKKKKKKKKKKK K +K K KKKKKK#K-K:KAK@K8K0K-K,K'K"K K K K K!K!K!KKKKK&K*K,K,K,K+K&K!K"K#K%K*K-K/K2K5K5K5K3K1K.K/K2K3K*K$K$K!K K"K#K"K%K,K3K;KAKCKEK:K&K"K$K"KKKK KKK;KKKKKKKKKKKKKKKK#K(K'K'K'K0KEK[KmKtKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKaKFKLKLKJKEK0KXK\KUKMKMK[KjKΘKΥKKΣKΠKηKόKδKKtKvKuKuKvKuKsKsKsKuKuKsKsKsKtKvKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKwKkK6K1K4K1K*K,K0K/K0K0K0K0K0K0K0K0K0K0K0K/K-K.K-K.K.K-K-K.K,K+K)K(K)K'K/K>K?K?K8K!KKKKKKKKKKK#K(K,K.K1K6K3K4K9K:KK@KBKAKBKCKDKFKGKLKMK]KΛKλKηKηKοKKzKK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K€KKKKKKKKKKKKKKKKK€K€K€K€K€K€K€K€K€K€K€KKKKKKKKKKKKKKKKKKKK‚K‚KKKKKK‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒe]qΤ(KμKθKεKεKβKήKΧKΦKKoK^K…KαKΥKKhKmKkKmKmK_K‚KγKΧK~KfKlKjKkKhK]K KρKΆKhKgKkKiKnK]KnKΡKΎKhKiKjKiKlKaKEKSKdKhKiKiKiKkKdKPKYKhKjKiKZKbKjK‘K­K­K KK…KjKKKKKLK?KGKVKPKLKCKVK\K\KCK9K6K)K)KKKKKKKKKKKKKKKK&K:KOKVKTKUKSKSKPKGK/K!K K"KKKKKKKKKK +K KKKKKK$K/K5K;K=KAK=K5K.K&K"K K K"K"K$K!KKKK K(K*K)K,K,K(K#K$K"K"K%K%K&K(K*K+K/K1K.K0K2K4K5K/K)K%K KK K!KK"K*K/K5KKAKAKBKBKEKEKCKFKJKNKJKKδKίKβKζKKƒK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K~KKKK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€KKK€KKKKK€K€K€KKK‚K‚KKKKKK‚KƒKƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒe]qΥ(K…KΫKΩKΝKΑK΄K£K‘KKiKjKbKuKΠKεKŽKcKmKlKjKlKbKoKΠKζKKeKlKiKkKmK[K‹KθKΜKvKgKjKjKlKaKSK_KeKgKjKjKjKjKiKPKNKbKhKiKjKiKlKgKQKSKgKdKfKK¬KωKωKφKμKυK¦K_KLKLKMK>KJKVKQKLKDKXK\KWK>K9K6K'K)KKKKKKKKKKKKKKK-KEKMKSKVKYKYKXKHK3K"K K KK!K KKKKKKKK K +K +K KKKKK'K+K0K8K?K=K5K*K!KK K!K!K!K!KKKK K&K%K(K(K(K$K#K KK#K$K$K%K&K%K%K)K-K,K.K1K3K4K.K'K"K K K K K"K%K*K1K7K;KK@K5K"KKKKKKKKKK!K%K)K+K/K1K4K5K7K6K9K9K>K@KK:K5K)K'KKKKKKKKKKKKKKK7KLKQKVK^K]KHK3KKKKKKK K K!KKKKKKKK K +K KKKKK$K+K2K6K3K'KK"K!K K"K KKKKK#K'K'K&K$K%K%K KKKK"K$K$K#K$K'K(K(K,K/K.K/K0K,K&KKKK!K K$K(K.K0K3K7K8K=K@K@K?K>KK8KaK\KUKKKQKYKxKΟKΠKΟKΠKΟKκKόKΥKxKwKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKmK9K1K2K3K0K,K2K1K2K2K0K/K0K/K2K2K1K0K0K0K0K0K/K/K/K.K-K/K.K.K,K*K+K)K/K>K?K?K=K'K KKKKKKKKKK!K&K+K,K/K2K3K4K4K5K9K=KK;K4K(K&KKKKKKKKKKKKKK$KCKSK\KTKEK.KKKKKKKKKK!K KKKKKKKKK K KKKKK$K)K)K#KK K K$K$K!KKKK K!K$K(K'K%K"K"K"KKKKKK"K#K$K$K&K*K-K-K/K.K-K'K KKKK K"K&K,K1K5K5K3K6K8K;K?K@K;K3K$KKKKKKKK#K$K#KKKKKKKKKKK$K'K(K(K*K6KMKdKqKsKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKpKpKqKqKpKsKaKGKIKHKJKGK1KXK_KXKPKLKYKdKΐKΣKΟKΠKΞKβKωKμKˆKuKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKyKLK.K3K3K0K+K3K3K3K3K0K0K0K0K-K.K/K0K0K0K0K0K0K0K0K/K-K/K.K/K,K+K+K+K*K:K@K>KAK0KKKKKKKKKKK K%K*K+K/K1K2K1K7K:K=K=K=K@KBKBKAKAKFKHKFKGKLKWKcKΙKβKΪKΨKίK¦KzKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K}K}K~K}K}K}K~K€K~K}KK€KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€KK€K€K€K€K€K€K€KKK€K€K€KKƒK€KK‚K‚K€K€KK‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚KƒKƒKƒKƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒe]qΨ(KeKMK3K;KPKRKQKQKPKPKPKSK‘KιKΚKšK₯KβKΙK|KjKlKkKmKlKZKvKŒKgKhKjKiKiKjKeKOKOK_KhKhKiKiKjKlKYKEKVKeKiKjKiKiKlKaKLKœKΙK}KgKlKlKoKbKΪKίKΰKKωKοKυKηKKWKLKNKKK:KQKSKOKEKFK[K\KSK;K;K4K&K'K KKKKK KKKKKKKK3KRKQK>K(KKKKKKKKKKKKKKKKKKKKKKK +KKKKKK!K KK K K"K!KKKK%K)K)K&K%K&K$K%K%K$K#KKKKKK"K$K%K)K*K,K*K'K'K#KKK KK!K$K)K+K1K4K3K5K5K5K6K7K9K7K*KKKKKKKKKKK K$K#K!KKKKKKK"K'K(K'K)K0KDK[KlKtKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKqKpKsKsKsKtKjKJKHKIKHKIK1KNK`KYKSKHKWKZK¦KΨKΡKΠKΞKΨKυKωK’KrKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKtKzK`K/K2K3K2K,K/K3K3K3K3K3K3K1K-K2K2K0K/K/K0K0K0K/K/K/K/K/K0K.K-K+K)K&K(K1K?K?K@K9K#KKKKKKKKKKK$K(K+K,K+K.K3K:KAK=K8K=K?K?KBKBKEKGKFKFKGKLKUKRK“KαKάKΰKγKΠK„KKK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKK}K}K}K~K€K~K}K}K~KKKKKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKK€KƒKKKKKKKKKKKK‚K‚KƒKKKKKK€K‚K‚K‚KƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒe]qΩ(K/KMKFK0K9KOKQKPKQKQKQKRKQKKπKμKΨKΥKδKΓKsKkKlKlKjKlKZKDKUKeKhKjKiKhKkKhKPKKK\KeKiKjKiKiKkK^KEKPKcKiKjKjKiKlKhKXKhKqKcKeKmKmKmKΝKΊKK·KΞKΚKΡKΜKzKSKMKNKIK;KSKSKOKDKGK\K]KRKKBKAKBKEKGKGKFKGKNKRKTKfKΛKεKβKβKεK«K|K€K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK}K}K}K~KK~K~K~K~KKKKKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKK€K‚K€KKKK€K€KK€K€K€KK‚K‚K‚KK€KKK€KK‚K‚K‚KƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒe]qΪ(KK4KEK;K1K5KMKQKQKTKRKTKUKVKwK{K€K‹KxKlK^KcKiKjKjKiKlKbKLKQKbKiKiKkKkKiKjKVKIKWKbKhKjKiKiKlKcKHKJKbKiKdKkKlKlKkKTKTKƒK„KkKmKcKhKuK‹K€KwKtKsKxKvKPKMKOKGK;KTKRKPKCKKK]K_KOKKVKhKrKtKpKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKrKqKpKpKqKtKrKqKrKrKsKsKtKaKFKIKHKIKEK/KYK^KYKQKKKWK`KΌKΦKKΣKΠKέKχKρK•KtKyKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKdK5K5K4K0K-K1K3K2K3K3K3K3K4K3K3K3K3K3K3K4K2K1K3K2K1K0K0K/K/K.K-K*K,K*K.K=K@K?K@K+KKKKKKKKKKKK(K8K@K@K-KKK,K9KK?KAKBKEKGKFKFKFKNKQKRKWKcKΛKεKαKβKηK­KzKKKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKKKKKK~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€KKK‚KƒK€KKKKKK‚K‚K‚K‚K‚KƒKƒK‚KƒKƒKƒKƒKƒKƒKƒKƒK‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒe]qά(KjKVKEK:KHKFK/K:KXKbKoK€K‘K’K³K½K«KtKdKfKeKdK`K\K]K`KbKcKhKiKPKKK_KfKgKkKkKiKlKdKOKNK]KfKjKkKkKmKkKTKEKΑKΡKqKgKmKlKoK^KwKΰKΨKΊKφKΟKυKτKτKνKKΊKdKLKMKPKBKDKVKPKMKBKQK\K^KGK:K@KBK'K&K&KKKKKK KKKKKKKKKKKKKKKKKK#K1K@KMKSKMKGKGK>K'KKKKKKKKKK K K K +K KKKKK!K$K(K/K0K.K.K+K*K(K)K)K&K%K&K&K*K'K%K KKK K!K#K%K%K)K+K+K+K)K+K-K.K/K/K+K#KKKKKKKKKKKKKKKKKKKKK"K'K(K*K*K4KIKbKqKsKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKtKqKpKpKqKtKqKpKpKqKsKsKuKiKJKIKIKHKIK:KRK`KYKQKIKTKWK KΩKKΣKKΦKςKϊK²KrKyKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKsK@K3K5K7K3K.K3K2K3K3K2K4K5K3K3K3K3K3K2K3K0K/K3K2K/K/K0K0K0K.K-K+K+K+K+K8K?K>KBK8K KKKKKKKKKK&K;KAK;K+KKK&K4K9KK2K"KKKK$K-K6K6K9KKNK0K'K*K KKKKKKKKKKKKKKKKKK*K6KK%KKKKKK1K?K=K-KKKKKK&K2K7K8K8K:K=KAK@KCKCKFKGKGKGKGKKKPKTKTKbKΛKεKαKαKζKͺKzK€KKKKK€K~K}KKK}K}K}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€KK€K‚K‚K‚KKKKK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒK‚KƒKƒKƒKƒe]qΰ(K{KͺKKSK_K@KKuKπKΑKLKŠKνKκKηKιKιKιKζKλKΩKzK|KΠKιKοKοKνKνKΚK‡K_K€KΘKξKπKηKζKάK΄KsKiKkKlK`KEKOKbKiKlKlKlKnKkKTKPKdKeKbKkKqKgK¦K£KsKŒK£K¨K¦K°K|KYKKKMKJKBKQKUKPKJKOKZK[KXKAKK:K'KKKKKK"K*K3K6K4K5K:K;K=K?KCKCKEKGKGKGKGKIKLKRKTKQK•KζKίKέKίKΜK†K€K€KKKKK~K}KKK~K~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€KK€KK‚K‚KK€K€K€K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒe]qα(KΐKδK€KK7KOK@KKzKοKΖKSKŠKνKκKθKιKιKιKεKλKΪK~KcK‚K¦KΛKΟK™KfKsK΅KθKπKλKιKοKξKΙKšKdKaKeKhKlKdKGKIK_KhKfKjKlKlKnK`KSKŽK€KzKnKjKkKwK­KœK…K|KKvKxKUKJKLKGKEKSKSKLKLKTK\K\KUK?K=KDKPK;K#K&K'KKKKKK KKKKKKKKKKK!K+K5KKAKBKDKEKFKGKHKIKHKKKSKVKUK–KεKήKίKαKKŠK~K€K€KKK~K}K~KK~KKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€KK€KKKKKKKK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒK„KƒKƒKƒKƒKƒKƒK‚KƒK…K„K„KƒKƒe]qγ(KέKΖK;KKKKKKK&KvKοKΘKSKKαKζKηKηKιKκKηKνKήK†KbKdKbKcKfKyK›KΔKγKβK¬KjKeK¦KμKͺKSK\KZKYKWK[K_KLKNKΛKΠKyKjKqKpKqKdKbK½KκKΙKςKΟKςKρKσKξKόKΖKlKQKNKLKKKLKWKRKMKKKUKZK^KPK;K=KHKOKDK&K%K'KKKKKK K K KKKKKKKKKKKKKKKKKKKKKKKK K!K"K#K)K-K%KKKKKKKKKKKKKKKKKK K&K$K!K#K&K'K+K,K.K/K.K,K)K$KKKKKKKKKKKKKKKKKKKKKK"K$K(K)K*K4KJKaKpKrKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKrKpKsKtKsKtKtKsKtKqKpKsKtKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKvKaKHKIKHKHKJKEKXK_KWKQKIKYK[K©KΥKΞKΠKΞKΦKπKϊK³KrKxKyKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKyKwKuKxKyKyKxKyKwKuKxKxKzK^K6K9K8K7K/K2K8K8K8K8K5K4K5K5K5K5K5K5K4K4K5K6K4K2K3K3K3K2K0K/K-K.K.K+K*K:KCKCKDK8K"K#KKKKKKKKKKK"K*K1K2K4K6K;KKDKGKFKGKIKHKIKJKMKQKcKΞKδKαKίKδKK~K€KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€K€KKK‚K‚K‚KƒKƒKƒK‚K‚K‚K‚K‚K‚KƒKƒKƒK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒK…K„KƒKƒKƒKƒKƒK…K…K…K…K…KƒKƒe]qζ(KΌKWK>KAK9K)KKK1KaKiKpKqKgK‡KkK2KJKK‚K|KK‚K‰KKK‘KK˜KuKpKmKiKjKdK[KQKGKJKUKIKHKPKRKUKVKWKWKYKYK’KΈK¦K§K±K°K£K‡KpKrKoKΫKηKΒKρKχKτKρKύK™KYKJKLKNK:KDKWKOKMK>KRK\K]KIK=K?KLKNKJK3K"K&K#KKKKK K +KKKKKKKKKKKKKKKKKKKKKK&K(K*K)K)K)K+K(K'K$K K!KKKKKKKKKKKKKKKKKK"K$K&K%K!KKKKKKKKKKKKKKKKKKKKKK"K$K'K(K'K/K@KZKmKtKtKpKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKtKqKpKsKsKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKRKFKIKHKJK=K3K^K^KYKOKPK[KdKΏKΣKΟKΠKΝKΪKσKφK KtKzKxKxKvKuKuKuKuKuKuKuKvKxKxKuKwKyKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{K`K8K9K:K:K2K3K8K8K7K8K8K8K7K5K6K6K7K8K5K5K4K5K5K5K5K3K2K0K0K0K-K+K%K$K2K>KCKCKEK?K$K K KKKKK KKKK#K'K*K0K4K8K:K:K:K:K>K@KDKCKEKGKEKHKIKHKHKMKQKSKQK—KδKίKήKαKK‰K}KK€K€K€KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK‚KƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒK‚KƒK‚K„K…KƒKƒK‚KƒK…KƒK‚K„K…K…K…K…K…K…K…K…e]qη(KΞKNKKŠK|K=KeKΑKΌK°K£K—K‹KK}K©KΌKjKfK‚K{KlKkKjKdK\KNK>K;K8K8K>KCKMKSKWKYK\KΗKόKςKλKγKΦKΜKΌKK}KgKŒK’KiKxKŠK“KŠK KKXKIKLKKK8KKKTKNKKKAKWK[K\KGK=K?KLKMKJK9K$K&K%KKKKK K KKKKKKKKKKKKKKKKKKKK!K$K(K)K&K'K'K(K&K$K$K#K"K$K"KKKKKKKKKKKKKKKKK%K%KKKKKKKKKKKKKKKKKKKKKKK!K)K)K%K+K:KPKfKqKsKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKpKqKsKqKpKsKsKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKvK]KGKJKHKIKHK.KVK_KYKPKMK[KYK₯KΧKΞKΠKΠKΤKνKωKΙKuKyKyKxKvKuKvKvKuKvKvKuKvKxKwKuKvKxKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKpK@K8K9K;K5K/K7K8K9K8K8K8K8K9K9K9K8K7K5K5K6K5K5K4K4K3K2K0K0K/K.K*K(K5K?K>K?KCKCKDK/KK KKKKK K +KKK"K(K+K/K3K7K9K:K9K:K>K@KBKDKFKGKEKGKIKIKHKIKQKSKVKcKΘKίKέKήKβK¬K~K‚K€K€K€KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€K€KK‚KƒKƒKƒK‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒKƒKƒK‚K„K„KƒKƒKƒK„K…K„KƒK„K…K„K„K…K…K…K…K…e]qθ(K½KNKDKGKGK=KKHKOKcKΫKυKλKμKοKσKχKχKυKζKΗK—KKΈKK•K†KKzKuKYKKKLKJK7KOKSKOKJKBKXK[K[KFK=K@KLKLKJK>K$K#K#KKKKKKKKKKKKKKKKKKKKKKKKK K!K!K%K&K'K$K!K"K!K&K%K#K"KKKKKKKKKKKKKKKKK K$K$KKKKKKKKKKKKKKKKKKKKK!K&K(K(K&K/KCK\KnKrKqKpKpKpKqKpKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKtKsKpKpKpKpKpKsKsKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKvKeKJKIKHKHKKK0KHKaK[KRKNKYKXKŠKΥKΠKΟKΠKKζKυKηK‚KwKyKxKvKuKyKxKuKwKyKvKuKuKuKuKuKuKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKzKOK8K:K9K4K/K5K7K7K7K8K8K8K8K8K8K8K8K7K7K8K6K5K3K2K3K2K/K0K-K)K/K;K?K=K'K4KDKCKCK>K#KKKKKKKKKKK%K+K/K1K4K7K:K9K:K>K>K?KDKGKGKGKGKHKIKHKHKLKSKYKRK–KαKέKήKαKΣKŠKK€K€K€KKKKKK~K}KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKƒKƒK‚KK€KƒKƒKƒK‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒK‚K‚K‚K‚KƒKƒKƒK…K„K‚K‚K‚K‚KƒK…K…K…K…K…K…K„K‚KƒK…K…K…K…K…e]qι(K†K,KSKCKBKBKK?KVK€KKΨKπKυKςKρKξKξKοKςKπKξKξKαKθKΘKqKQKKKLKHK9KVKTKNKDKEK]K]KZKAK>KAKLKJKFK@K'K K"KKKKKKKKKKKKKKKKKKKKKKKKK K KKKKKK"K#KKKKKKKKK K#KKK!KKKKKKKKKKKIKKKKKKKKKKKKKKKK!K$K%K$K&K3KGK^KmKrKoKnKmKnKnKoKpKpKpKpKpKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKrKpKsKtKsKsKtKtKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKrKSKHKMKIKJK@K4K\K\KYKQKSK[K`KΊKΦKΞKΠKΞKΧKρKχK΄KsKzKxKyKxKuKuKuKuKuKxKyKyKxKuKwKyKvKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKtKBK8K:K;K7K/K9K:K:K:K8K7K8K8K8K8K8K8K7K4K4K4K6K4K3K2K/K/K7K?KCK6K KKKK;KGKFKHK:K"K!K KKKKKKKK#K(K,K1K4K7K:K:KKBKCKDKGKFKFKHKIKIKIKLKRKTKUKiKΞKΰKέKάKαK¬K|KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€K€K€KKKKKK€KƒK‚KKKƒK‚KƒKƒKƒKƒKƒKƒK‚K‚K‚K‚KƒKƒKƒK‚K‚K‚K‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚K‚K‚K„K„KƒKƒKƒKƒKƒK‚KƒK…K„K„K„K…K…K„K„K„K…K…K…K…K…K…K…K…K…K…K…K…e]qμ(K;K=K4KdKŽKcKEKIKIK=K–KπKζKˆKDKeKKnKKuKξKΤK_KƒKλKκKηKιKμKμKλKνKΩKK·KξKοKοKΞK\KQKRKOKOKTK[KbKkKqKnKmKjKbK\KVKQKVKhK†K­KΦKξKχKχKτKπKγKξK­KgKMKMKMKCK@KWKPKOKCKJK]K^KVK>KKAKXKQKMKAKMK]K^KSK?K?KDKJKGKEKAK0K!K KKKKKK K K KKKKKK K"K#K$K$K!KKKKKKKKKKKK K-K)K K!K(K+K/K1K0K2K3K3K2K.K"K KKKKKKKKKKqK!KKKKKKK!K#K(K'K'K.K>KVKjKsKsKoKmKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKuKuKuKsKuKuKuKvKmKJKHKIKHKLK7KK=K5K1KKrKμKK^KƒKλKοKλKλKλKμKλKμKλKιKοKθKKOKSKRKQKQKPKPKQKQK]KΡKσKΧKΈK—K|KrKnKnKmKhKcK]K\KaK{KK|K™K‰K_KIKJKIK:KEKXKQKMK@KNK^K^KQK@K@KEKJKFKBK@K7K!KKKKKKK KK K KKKKK!K&K'K KKKKKKKKKKKKKKK,K,K)K)K-K1K1K0K/K1K0K,K!KKKK!KKKKKKKKKrK#KKKKK!K&K'K'K)K5KMKcKoKrKpKpKpKoKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKpKpKpKpKpKpKpKpKpKpKpKpKsKsKqKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKuKuKuKuKtKuKuKuKuKtKTKGKIKHKLKBK1K\K^KZKQKJKZK^K³KΦKΟKΠKΠKΤKνKψKΉKuK|KyKxKxKxKxKxKxKxKxKxKxKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwK{KYK9K:K:K;K4K6K:K:K:K:K:K8K9K9K7K8K8K8K7K5K3K7K>K@K/KKKKK K$K'K!KK"K.KCKEKDKEK1K#K%K!KKKKKKK!K%K*K0K2K5K8K:K:K>K>K>KAKDKEKBKEKHKHKIKJKKKJKLKSKXKQK˜KβKΪKέKαKΡKŠK‚KK€KKKKKKKKKKKKKKKKKKKKKKKKKKKK€K‚K‚KK€K€K€KK‚KƒK‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒKƒK„K„KƒKƒK„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…e]qο(KšKΕK›K±KƒKvK΄K·KfKiKΫKκKyKNKΖKΝKnKgK\KPK>K%KzKΥKΞKcKKλKοKκKμKμKμKλKκKξKΡKyKTKRKQKQKQKPKMKOKRKPKiKήKπKπKυKρKήKΎK™K~KpKnKnKlKhK\KMKQKtKmKwKYKFKHKKKK@KHKJKFKDKBK:K!K KKKKKK KK K KKKKK KKKKKKKKKKKKKKKKK%K*K+K.K-K,K/K.K/K-K'K KKKKKKKKKKKKKKKrK$KK K%K'K'K'K/KCKZKmKrKqKnKmKnKoKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKpKpKpKpKpKpKpKpKpKpKpKsKsKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKxK]KHKKKHKHKHK.KSK_KZKTKIKWKWK—KΩKΟKΠKΣKΣKθKχKΦK{KzKyKxKxKxKxKxKxKxKxKxKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxK{KkKK7K3K4K;K9K:K:K:K9K7K9K:K8K8K8K7K4K4K>K=K'KKK"K&K(K*K(K*K,K(K)K*K=KGKFKIK=K%K&K"K KKKKKKK#K*K.K2K6K8K:K9K;K>K>KCKDKCKCKEKHKIKIKIKHKHKJKTKWKTKiKΜKήKάKάKΰK¬K~KƒK‚KKKKKKKKKKKKKKKKKKKKKKKKKKKK€KƒK‚KKKƒKƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒK‚K‚K‚KƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚K„K…KƒKƒK…K„K‚KƒKƒK‚KƒK…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…e]qπ(K§K”K±K K[KK¬KγKΏK…K»K»K`KCKYKΙKΟK~KnKqKnKQK’KΑKίKΪK]KsKΨKεKηKκKκKκKιKμKΪKKNKQKOKOKMKMKMKNKPKNKzKλKρKξKξKπKτKφKρKΰKΑKKKuKmKpKXKxKξKΜKtKQKHKHKKK;KMKVKOKHK?KXK_K^KKK?KAKGKHKFKBK@K;K"KK KKKKKKK KKKKKKKKKKKKKKKKKKKKK'K+K,K+K*K-K-K-K,K'K#KKKKKKKKKKK"K#KKKKKKDK(K%K*K(K*K8KPKeKrKrKpKoKoKoKoKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKpKpKpKpKpKpKpKpKpKqKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKsKtKtKtKtKtKuKuKuKuKuKuKtKuKuKwKfKIKIKHKIKLK1KGKaK[KTKLKSKXKzKΣKΠKΟKΡKΟKΰKσKκKKvKzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKwKGK9K=K7K6K2K:K:K:K:K:K9K7K9K:K8K8K8K8K5K7K:K#KK%K*K,K/K2K2K1K.K/K,K,K*K3KFKGKFKEK/K#K#K KKKKKKK K&K-K2K5K8K9K:K:K=K>KAKCKCKCKEKHKGKGKGKHKIKKKOKSKWKVKKαKΪKέKΰKΣKŠKKKKKKKKKKKKKKKKKK€KK€KKKKKKK€KK€K€KKKK€KK‚KƒKƒKƒK‚K‚K‚KƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K„K…K„K„K…K„K‚KƒKƒK„K„K…K„KƒK„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…e]qρ(KŒKfK‘KXKTKK΅KΥKΩK‘KsKpKJKSKFK[KΛKΛKqKnKqKmK»KΫK—KΞKΛKSKcK²KΓKΙKΠKΦKάKίKκKαKyKJKOKMKNKNKNKMKOKMKKρKνKοKοKπKοKοKπKσKφKτKδKΙK£K‹K[K†KωKΜKqKRKGKIKIK:KRKVKMKHKAKWK]K_KIK>KAKGKGKEK@K;K9K'KK!KKKKKK K KKKKKKKKKKKKKKKKKK%K,K+K+K*K*K+K*K+K+K&K KKKKKKKKKKKKK#K%K$K$KKK'K&K'K*K0KCK]KoKtKsKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKpKpKpKpKpKpKpKpKpKsKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKvKtKtKuKuKvKtKsKuKuKuKuKuKtKsKuKuKtKnKLKHKIKKKOK9K;K^K\KWKNKNK[KfKΔKΣKΟKΠKΞKΧKπKυK«KuKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKwK{KZK9K>K=K>K7K7K:K:K:K:K:K:K:K9K8K7K8K8K7K;K5K)K2K2K4K3K5K5K3K2K0K0K-K+K)K,KBKGKEKJK;K%K%K!K!K KKKKK K#K*K/K2K5K8K;K9K=K?K@KBKCKCKEKHKFKHKIKHKIKKKMKSKVKUKiKΙKΪKΫKΫKίK©K{K€K€KKKKKKKKKKKK€K€KK€KƒKKK‚KƒKƒK‚KKKƒK€KKKKKKK‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚KƒKƒKƒK‚K‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒKƒK‚K„K…K…K…K…K„K‚KƒKƒK„K…K„KƒK‚K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…e]qς(KvKjK`K.KUK{KΑKΥKΞK₯K¦K|KBKJKTKAKYKΊK—KiKpKmKŽKKQK?K©K–KBKLKyKK‚KŒK—K€K²KΐK΅KbKKKOKLKLKMKMKQKMKžKόKφKχKυKσKρKοKοKοKπKρKτKχKφKλKΑKΐKτK½KmKSKJKKKGK:KTKRKMKEKAKYK\K]KIK?K@KHKHKHKDK=K;K,KKKKKKKK KKK K KKKKKKKKKKKK K%K/K1K1K-K*K+K-K+K,K#KKKKKKKKKKKKKKKKK&K&K'K&K)K)K,KK;K=K=K8K3K:K:K:K:K:K;K9K8K9K9K9K8K7K8K8K7K5K2K5K5K6K7K4K3K0K.K.K-K/K-K9KIKGKHKCK+K%K$K K KKKKKK#K(K-K0K3K8K:K:K:K=K@KDKDKDKFKGKFKHKIKHKIKKKMKQKRKWKVKœKάKΥKΩKέKΝKˆK}K€KKKKKKKKKKKK€K€KK€KƒK‚K‚K‚KƒKƒK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚KƒKƒKƒK‚K‚K‚K‚K‚K‚K‚K„K„KƒKƒKƒKƒK‚K„K…KƒKƒK…K„K„K„K„K…K…K„K„K„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…e]qσ(KoKMKEK0KLK„KΔKΟKΒKΠKήKΗKjKDKNKPKAKOK—KqKnKoKvKuKRKK.KmKjK9KHK‘K•K„K}KyKtKsKyKnKGKFKNKLKJKJKMKNKeKŒK£KΊKKγKπKψKωKωKωKυKςKοKξKρKφKγKθK΅KhKOKKKLKDK:KWKPKMKEKAK\K\K\KFK>KAKUKYKYKXKWKYK>KK KKKKKK K K K K KKKKKKKKKK#K,K/K.K/K/K1K/K,K)K%KCK*KKKKKKKKKKKKKKKKK!K&K*K%K'K3KFK]KpKsKpKnKpKpKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKpKpKsKsKqKpKqKqKpKsKtKtKsKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKsKsKsKsKsKtKvKtKsKsKtKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKwKZKHKLKIKHKHK.KSK^KYKTKHKTKXKKΦKΞKΠKΠKΠKζKυKγK†KyK{KyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKxKxKxKLK9K=KK@KCKDKFKFKFKHKHKIKIKLKKKQKSKWKWKkKΝKάKΩKΩKίKͺK|KK€KKKKKKKKK€KKK€K€KKƒK‚KƒK‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚KƒKƒKƒK‚K‚K‚K‚K‚K‚K‚K„K…KƒKƒKƒKƒK‚K„K…KƒKƒK„K…K…K…K…K…K…K„K„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…e]qτ(KVK=KK7K7K:K9K9K:K:K:K:K5K6K:K9K:K8K8K8K8K6K5K5K2K4K4K5K5K4K3K1K/K0K/K@KHKFKJKCK(K%K#K!KKKKKK!K$K(K-K1K5K9K:K9KK?KCKDKEKGKGKEKHKIKLKJKLKSKTKWKVKžKίKΨKΩKάKΞKŠKK‚KKKKKK€K€KKKKK‚KƒKK‚K‰K„K„K…KƒKƒK‚KK‚K‚K‚K‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K„K…K…K„K‚KƒK‚K„K…K…K„K‚KƒK„K„K…K…K…K„K„K„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…e]qυ(K:KAK6KKAKYKΫKςKΪKΖK°K˜KƒKqKdK\K[KdKuKŠK£K»KΘK²KΗK—K`KIKKKLK@K?KZKPKMKAKHK_K^KZKAK?K9KKKKKKKKKKKK K +K K K K K KKKKKKK"K(K*K+K-K/K/K*K+K#KKKKK.KxKKKKKKKKKKK KK"K&K)K'K'K3KKKbKnKrKqKoKnKoKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKqKpKqKsKrKqKqKqKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKtKsKsKtKtKvKtKsKtKtKtKtKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKnKLKKKMKLKLK:KKCKYKPKMK>KHK^K_KWK>K?K7K*K%KKKKKKKKKKKKK K K K KKKKKKK#K$K(K)K+K+K$KKKKKKKK0KuKKKKKKKKK"K#K%K&K&K&K+KK?KAKBKDKDKCKCKHKHKIKLKLKLKLKNKQKZKUKŸKΰKΧKΦKΪKΛKŠK€KƒKKK‚KƒK€KKKŽKͺK’K‘K“KœK©KΑKΕK©KžK”K†K‰K™KŽK€K‡K…KKKƒK‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒKƒK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚KƒK…KƒK‚K„K…K…K„K‚K„K…K…K…K„K„K„K…K…K…K…K„K„K„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…e]qχ(K/K9K:KJK{K¦K£KΐKΫKμKαKΫKΫKΫKΰKΝKmKFKMKGK_KάKσK£KhKpKmKoKWK!KhKιKβKiKwKγKλKθKκKλKλKκKοKιK‰KJK@K2K,K3KQKK°KΪKσKϊKωKχKφKχKτKμKίKΠK³KΉK¦KyK[KHKIKIKFKNKWKOKKKDKSK]K`KTK>KAK=K2K+K%KKKKKKKKK KRK#KK K K K KKKKK K'K)K)K#KKKKKKKKKKK/KjKK K KK KKK"K'K&K(K(K2KHKbKpKrKqKnKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKrKrKrKrKrKrKsKsKsKsKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKtKtKtKtKsKtKtKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvK]KHKJKJKJKJK-KRK`KZKTKJKTKXK‰KΥKKΣKΣKΠKγKυKνKKwK{KzKxKyKyKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKyKxKxKxKyKyKyKzKzKzKyK{KdK;K>KK?K@K@KCKCKEKGKFKHKIKKKLKLKJKMKTKWKUKnKKάKΪKΪKαKKKƒK‚KK€K‚K€KK„K·KΞK¬KŸK­KΌKΑKΠKΝKΒKΎKΜK­K‹K₯K KpKKK—K‰K‚K„KƒK‚K‚K‚KƒKƒKƒK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚KƒKƒKƒKƒKƒKƒK‚KƒKƒK„K…K„K„K…K…K…K„K‚K„K…K…K…K„K„K„K…K…K…K…K„K„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…e]qψ(K)K4K=KJK{KKͺKΘKβKθKάKΩKήKίKΪKαKΛKmKHKEKxKκKιK‹KjKoKoKnKrKXK"KgKκKηKmKtKΰKλKκKμKμKμKκKξKμKKiKcKQK>K1K-K2KLKuK¦KΣKοKφKυKρKνKοKωKλKκKάKxKTKHKIKIKGKPKWKNKIKKKZK\K_KRK?KCK>K4K.K)K$K!KKKKKKK K9KKK K K K K KKKK!K"KKKKKKKKKKKKKK(K[KK!K K K#K%K'K)K&K.K@KWKjKrKsKpKpKpKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKtKqKpKsKtKtKsKsKsKsKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKuKsKsKsKsKsKsKsKsKtKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKfKIKIKHKLKMK3KGK_K[KUKMKPKYKrKΜKΣKKΣKΠKΪKςKχK­KuK|KzKxKzK{KyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKxKxKzK{K{KzKzKzKzK{KtKFK;KKK>K:K9K6K5K7K8K7K9K:K:K9K8K8K8K8K8K8K8K8K6K4K3K2K3K2K1K6KFKIKIKHK;K(K&K#K K!K KKKK K'K,K0K4K6K;K=K>K?K>K?KAKBKGKHKFKHKHKJKMKLKJKKKRKWKWKTK₯KδKΫKάKίKΟK‹KKƒKƒKK„K€KƒK“K«K―K£K™KKΑK―KΦKήKΟK΄KΛK©KšK°KΊKK„K‘K₯K₯KŠKƒKƒK‚K‚K‚KƒKƒKƒK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚KƒK…K…K„K…K„K‚KƒK…K…K…K…K…K…K„K„KƒK‚K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…e]qω(K%K3K@KGKgKˆKΑKΙKεKζKΡKΗKΑKΎKΕKΔKΫKΜKiKCK–KσKΫK\KcKrKmKoKoKrKYK"KfKέKΤKoKsKδKξKηKλKμKμKκKνKζK™KqKxKoKdKUKDK5K,K0KCKnKžKΞKνKψKυKρKΰKμKΘKpKOKGKIKHKGKTKVKOKIKMKZK\K^KNK?KBK=K5K2K/K*K&K"KKKK KKKKKK K KKK KKKKKKKKKKKKKKKKKKK*KSKKK"K%K'K%K&K5KMKdKpKsKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKrKsKpKpKpKpKpKpKpKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKtKsKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKuKuKuKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKkKIKHKIKIKKK9K9K_K]KVKOKLK[K`K»KΧKKΣKKΤKοKωKΙKxK{KzKzKzKzKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKyKyKzKzKzKzKzKyKzKzKzKzKzK}KVK;K>K>K?K2K8K;K7K5K4K9K;K=KK?K?K?KAKCKDKFKGKGKGKIKIKKKLKKKNKTKUKUKpKΣKάKάKΫKήK­K~K…K‚K„K„KKŒK€KˆK€KnKqKˆK‰KK₯KΏKΚK―K›KuK›K¦K°KͺK…K|KŽKœKŽKƒK‚K‚K‚K‚KƒKƒKƒK‚K‚K‚K‚K‚KƒK„KƒK‚K„K„KƒK„K„K„K„K‚KƒK…K…K…KƒKƒK„K…K„K„K„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…e]qϊ(KK$K:KEK;KwK±KΈKζKΠKKnKfK[KgKlKKήK²KaK°KϊKΘKIKBKfKqKnKnKnKrKXK4K–KΫKδKrKuKεKμKιKλKλKλKκKξKζK™KmKtKsKuKrKiKYKIK8K.K0K@KcK“KΒKγKάKοKΓKgKLKHKIKGKHKVKUKOKHKMK[K]K^KJK?KBK=K6K3K0K*K(K&K!KKKKKKK KK KKKKKK KKKKKKKKKKKKKKKKK"K5K#K$K%K&K,K>KWKkKrKqKnKnKpKpKqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKpKpKpKpKpKpKqKpKpKpKrKtKsKsKsKsKsKsKsKsKsKsKsKsKtKrKrKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKuKuKuKuKuKvKuKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKsKPKGKIKHKKKBK7KZK]KWKOKIKYKXKŸKΩKΡKΣKKKιKχKγK„KyK{KzKzKzKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKxKyKzKzKzKzKzKxKyKzKzKzKzK}KgK@K>K>K?K8K0K5K6K:K;KK?K>K>K@KCKDKFKGKFKGKHKHKKKMKKKLKRKTKYKWK£KήKΪKάKέKΦKKK‡KK•K”KqKnKdK`K`K`KoKbKzK’K΅KΤK±KžK„KKΚKΝKΉKKKŒK›K”K„K„K‚KƒKƒKƒKƒKƒK‚K‚K‚K‚K‚KƒK…KƒK‚K„K„K‚KƒK„K„K„KƒKƒK…K…K…KƒKƒK„K…K„K„K„K„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K„K…e]qϋ(KKK0K>K!KUK}K™KΒKK7K:KK?K:K0K:K=K=K=K=KK:KTKKΏKΓKαK…K=KRKFK@KfKrKoKrKlK‡KμKΰK‚KΰKλKqKqKέKμKθKθKλKνKλKξKηKœKnKqKqKpKpKpKrKsKsKlK`KOK;K)KK#KhKiKFKGKGKGKHKPKPKKKBKOK]K`K_KIK?KBKK>K5K:KK=K=KAK@KAKEKFKFKGKJKJKKKKKLKLKNKSKWKXKWK§KήKΧKΩKΨKΙKK€K—KnKXKIK/K?KAK2K.K2KUKJK?KNK{K‰K•K…K K·K―K΄K¨K¬KΗKΘKΡKΘKŒKƒK†K„KƒKƒKƒK‚K‚K‚K‚K‚K‚K‚KƒK„K‚K‚K‚KƒK„K„K„K„K„K…K…K…K„K„K„K„K„K„K…KƒKƒK„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…e]qύ(KKKK)K>K5KdK¬KšKIK5K:K8K9K9KK>KKKAKBKDKGKFKFKHKHKJKLKLKKKNKTKVKUKtKΟKΥKΦKΡKΑK¦KKnKJKHK:K%K2K*K"K"K1K0K"K%K8K>KBKGKK"K"K K)K4K=K;K7KƒK KwKtKmKTK?KJKLK=KK>K?K>K>K=KKIKIKKKHK-K&K%K%K!K KKKKK$K+K/K5K7K9K;K=K=K?K>KAKCKDKGKEKAKHKHKJKLKLKKKLKPKQKZKYK­KήKΣKΙK¬KžKeKRK7K3K+K+K/K)K"K"K#KKK$K*KKK&KK5KQKwKlKKuKgK•K΄KΘKΠK•K‚K…K„K„K…KƒK‚KƒKƒK‚KƒKƒKƒKƒKƒKƒKƒKƒKƒKƒK„K„K…K…K…K…K…K„K„K„K„K„K…K„K„K„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K„K…K†K…K…K…K…K…K…K…K…K…K…K…e]q(K/K&KKK=KFK½KφK·KKKK K K!K6K?K7KvK΄KΜK¦KgKmKSKBKFKGK?K>KfKnKhKqKsKtK\K%K7KK|KK;K6K9K@K?K?K?K?K?K>KKAKDKDKFKGKGKGKHKJKLKKKKKMKNKOKWKVKyKΥKΩKΐK₯KyK5K.K(K(K-K4K4K)K%KKKKK!KKKKKK#K7K\KgK”K›KyK“K½K΄KΡKK…K†K…K…K…KƒK‚K„K„K‚KƒK…K…K…K„K„K„K„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K„K†KˆK†K„K…K…K„K„K„K…K…K„K…e]r(K-K-K&KK6KXKΥKυK K*K4K2K9KHKYKmKKMKXKΙKςK₯KRKsKqKVKCKEKCK?K@KaK™K§KKnKsK]K&K*KnK|KHKMK©KΆK₯KšKK†K}KsKoKpKoKqKoKoKpKpKpKoKoKpKZKXKrKUKDKGKHK8KCKWKOKKK?KKK`K`KZKKKKKFK5K0K0K/K-K+K'K$K"K.K-K)KK +KK K K KKKKKKKKKKKKKKK!K!K"K$K&K%K6KJK]KqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKsKsKsKrKqKpKqKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKsKuKuKsKtKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKvKuKvKvKuKuKuKvKxKaKHKIKIKHKIKLKRK\K[KVKLKMKXKfKΕKΦKKΣKKΦKοKχKΖKwK{K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}KkK>K?K?K@KK>K>K>K=KK?K?K?KBKDKDKEKGKFKHKJKLKKKKKLKLKRKVKXK]K°KήKΊKˆKBK&K&K#K(K.K1K2K*K%KKKKKKKKKKK KHKXKvK›KΒK±KΌKΧKΛKΞKΚK‰K„K…K…K…K„K„K„K„K„K„K„KƒKƒK„K„K„K„K„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K„K…K‡K‡K‡K…K„K…K…e]r(K(K)K)K)K0KuKζKοKK9KQKgK„KŸK°KΌKΓK KwKΩKφKKAKpKpKnKVKAKCKJKKOK_K`KZKKKIKBK4K0K0K.K-K+K'K$K"K.K.K.KK +KK K K KKKKKKKKKKKK!K"K#K%K%K%K%K&K(K7KKK^KqKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKsKsKqKpKpKqKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKuKuKsKtKvKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKwKuKuKuKvKvKvKuKxKjKKKIKIKIKHKLKOKYK\KVKNKHKYKYK―KΩKΡKΣKKKθKχKΰK‚KzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KyKMK>K?K>K?K6KK?K?K?K?K?K?K?K=KK?K?K>KBKDKCKDKFKGKHKJKKKKKKKLKJKOKUKYKWKyKΤK£KRK4K&K)K)K,K(K)K-K+K!K KKK KKK!KKKK#K:KPKoKK₯K’KΐKΧKίKίKΰKœKƒK…K…K…K…K„K„K„K„K„K„KƒK‚K„K„K„K„K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K„K…K…K…K…KˆKˆKˆK†K„K…K…e]r(K+K+K*K*K&K’KπKγKjKdKK―KΌKΓKΖKΔKΝKΗK«KδKοKsK9KiKqKnKnKRKFKIKAK{KνKκKKoKsKrKtK`K%KQKάKεKwKjKέKξKθKλKκKθKθKθKΰKŸKqKtKqKpKpKoKnKnKVK]KmKJKDKCKHK5KLKTKMKJK=KQK`KaKZKLKJK@K3K1K.K+K-K+K'K$K$K1K0K/KK +KK K K KKKKKKKKKKK#K$K$K%K'K(K)K(K+K(K8KKK^KpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKsKsKtKrKpKpKpKpKqKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKuKtKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKxKyKxKxKxKxKxKxKxKvKvKqKNKHKIKHKHKKKEKUK]KWKPKIKVKWK“KΩKΡKΣKΣKΡKβKτKρK˜KxK|KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K\KK9K9K?K>K>K>K>K>K?K>K9K;K=K?K>K=K=K=K=K=K=K=K=K;K9K9K:K8K4K4K5K4K4KEKLKKKKKBK*K)K&K#K#K KKKK!K*K2K3K4K8K9K;K?K>K>KBKAKBKEKEKHKIKIKIKKKLKLKKKLKQKVKWKVK§K„K4K3K%K+K'K'K&K)K)K&K$KKKKKKKKKKK#KK>K?K@KCKBKDKEKHKIKHKHKKKLKKKLKIKMKVKWKRKnKaK4K3K,K*K$K'KK K"KK#KKKKKKKKKK$K3KJKYKmKŸK²K–K«K¬K£K²KΧKΖKK…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K‡K‡K…K…K…K…K…K…K†e]r(K$K#K!K5KVKΝKρKΟKΆKΜKΙKΞKΧKΨKΤKΥKΣKΎKΑKςKΝKCK4KQKrKpKpKrKkKSKJKKσKηKδKεKΩK˜KpKrKwKdK+KdKΦKγK…KmKήKςKιKλKνKνKοKσKΰK‰KnKpKqKpKqKiKdKŠK^KFKFKGKDK6KPKSKMKGK=KYK_K_KTKMKLK;K-K-K.K+K(K&K&K#K$K0K2K3KK K KK KKKKKKKK$K&K#K&K(K'K+K*K*K*K*K,K-K.K=KMK`KsKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKqKpKpKpKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKxKyKwKuKwKyKxKxKxKxKxKxKxK{KaKFKIKHKIKKK/KBK]K[KUKMKMK[KdKΐKΥKKΣKKΦKοKχKΠK}K~K~K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KzKPK@KBKAKBK:K>K?K>K?KBK@K>K@K>K>K?K?K=K;KKZK^K^KRKMKNK:K-K-K,K*K(K'K&K#K%K2K2K1KKK K K KKKKKKK"K'K)K(K*K,K,K,K-K/K.K.K/K0K0KAKNKaKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKrKrKrKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKsKtKtKtKtKtKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKwKwKwKxKxKxKxKxKxKwKxKxKxKxKxKxKxKxKxKzKhKFKIKJKIKKK7K6K^K[KXKPKIKZKZK§KΨKΡKΣKΣKKθKφKθKŠKzK~K{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}KbK>KBKBK@K9K9K=K?K@KBK@K>K?K>K>K?K?K?K?K=K=K?K>K=K=K=K=K=K:K:K:K8K8K4K/K5K4KDKNKKKLKHK.K&K&K%K#K"K KK K!K(K0K3K5K7K:KK?K?K>KAKBKEKEKHKGKIKLKKKKKKKLKKKNKTKXK\KPKAK7K7K4K*K-K1K"KK"KKKKKKKKKKKK*KcKKŒK|K™K―KͺK·KΒKΖKΡKΓK„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K†K†K…K…KˆKˆKˆK‡K†K‡KˆKˆKˆKˆKˆKˆKˆKˆe]r(K,K+K4KKK€KΞKΠKŽKtK~K‚K‚K€K‘KΔKΟKΜK§KΦKψKŽK K/K6KbKqKpKqKpKqKwKΨKςKνKνKξKνKοKλK’KqKvKqK₯KυKΌKΩKωK‡KjKΫKξKζKλKνKοKΫK‘KVKgKrKoKrKeKVKhKZKEKGKHK>K7KVKQKJKBK@K[K^K]KRKMKMK6K,K-K*K*K)K'K&K"K&K4K2K/KKK K +K KKKKKK#K%K)K)K)K+K.K.K.K.K0K0K0K.K1K3KCKPKbKrKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKpKqKtKsKtKtKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKtKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKyKyKyKxKxKxKxKxKxKyKxKxKxKxKxKxKxKxKxKyKqKKKIKLKIKJKAK/KWK\KXKQKHKVKUK‰KΦKΡKΣKΣKΡKΰKτKτK’KxKK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{KpKEKBKCK>K7K7K?KBKBKBK@K>K>K?K?K>K>K?K?K=KKKAKBK?K@KAK?K>K>K>K>K?K?K>K>K?K?K>K>K>K=K=KKAKBKDKEKGKGKEKGKHKIKJKLKKKJKOKQKYKPKDK>K:K/K-K9K7KMK4K%K&K(K#KKKKKKKKK KMKLKtKK›KŸK«K¬KK₯K€KΊK K~KK„K†K†K‡K†K…K…K„K‚KƒK„K„K„K„K…K…K…K…K…K…K…K…K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K‡K‡K„K…K‡K†K…K‡K‡KˆKˆKˆK†K…K‡KˆKˆKˆKˆKˆKˆKˆKˆe]r(KKK@KBK@K>K?K?K>K?K?K?K?K?K?K?K?K?K=K=K=K=K=K;K9K9K6K4K5K3K>KLKKKLKLK6K'K(K'K%K$K#KKK!K&K-K0K7K8K9K;K?K@K>KBKBKCKEKGKFKGKHKIKHKIKLKLKLKLKJKUKTKIKKAKAKAKBKAKAKAKAK?K?K?K?K>K?K?K?K>K=K=K=K=K=K=K;K8K9K6K4K5K4K7KIKKKKKMK?K(K(K&K$K#K$K#KKK$K,K/K2K6K9K;KKAKBKCKEKGKGKGKHKIKKKLKKKKKLKKKOK\K^KGK7K8K-KK/KAKDKMK$KK$K(K(K%KKKKKK8K‘KΓKκKKKχKσKοKθKξKτKγKKΡKΨKΞK”K}KiK\KWKVK^KcKfKjKtKwKtK|K†K…K„K„K„K„K„K„K‡K†K‡K…KƒKƒK„K…K…K„K„K…K…K…K…K…K‡KˆKˆK†K„K†KˆKˆK‡K„K†KˆK‡K‡K…K…KˆKˆKˆK‡K‡KˆKˆKˆKˆKˆKˆKˆKˆKˆe]r +(K2K;KPKˆKΰKαKΡKΘKΟKΛKΌK±K˜KK]KpKjK•K΅KwK+K1K0K2K1K^KtKpKtKrKpKxK»KσKνKνKνKνKξKμKυKšK[KΡKγK’KpKfK'KFKΘKνKzKIKžK²KŠK[KLKNKHKDKHKLKRKYKIKCKCKFK5KEKSKMKIKK>K?K?K?K?K?K?K>K=K=K=K=K=K=KKK?K?K?K?K?K=K=K=K>KK)K)K(K%K"K"K!KK#K(K+K.K1K6K:K;KCK@KAKEKDKDKCKEKGKHKIKJKKKKKMKNKLKJKQK^KLK8K=K.KK/KOKIKQK>KK,K4K/K"KK"K1K†KΝKυKδKδKίKΛKΑKΔKΜKΣKΨKΦKΦKάKαKξKυKσK‹KtKΛKΛKΓKΰK½KŠK„KK_KK +KKYKŠK‡K„KxKhK^KMKCKUK^KnKxK~K‡K‰K‡K‡K‡K„K…K…K„K„K„K…KˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆK‡K‡KˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆe]r (KΉKΦKΠKΝKμKεKΟKKKΠKΧKΤKΠKΏKK3KlKκKνKmK$K/K5K7K3KMKsKqKsKtKΔKΩKΒKΎKΘKΡKΙK¨KΝKΰKΰKgK`K{KoKrKrKpKsKcK%K KVK`K:K.KGKPKMKLKHKFKKKEKCKIKIKEKCKEK4KLKRKMKJK:KMK^K[K]K^K[KOK2K(K$K$K'K%K#K$K!K-K5K5K,KK K KKKKKK K%K)K.K1K1K3K5K7K7K4K6K9K8K7K9K9K;KLKTKeKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKsKsKsKtKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKxKxKxKxKxKwKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK|K[KGKKKJKGKJK/KCK^K[KUKMKLKYK]KΊKΨKΡKΣKΣKΡKιKσKγK†K|K~K}K}K|K|K|K{KzKzKzKzKzKzKzKzK|K{KzK{K|K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KvKFKAKDKCKCK;K@KBKBKBKBKBKAKBK@K@K@KAKBK@K@K?K>K?K?K?K>K=K;KKKMKLKKKLK:K)K'K%K&K%K$K#K K K'K+K.K0K5K:KK?K?K?K?K?K>KKCK>K@K;K@KCKAKBKBKBKBKAKCKAK@KBKBK@KBKAK?K?K?K?K?K?K>K=K=K=K:K9K8K8K8K5K@KMKLKNKMK5K(K)K&K%K$K#K K!K$K'K.K1K5K:KK?K?K>K?K?K=K=K;K8K8K6K5K5K9KLKMKKKMK?K)K)K)K&K%K&K#KK!K'K+K/K3K8K:K=K>KDKGKCKBKBKDKGKGKGKDKFKMKKKMKMKMKQKKKIKSKiK˜KΔKϋKΞK₯K΄KΎKΞKεKσKφKςKόKιKάKιKψKϋKϊKζK―K K΅KΎKΎKΒKΗKΨKφKσKβKίKεKψKKKAK:K)K K K#K)K7KGKfKK”KkKIK%KKKKKKKKK6KKKVKeK€K‘K“KK”KŽKŠK†K„K„KˆK…KˆK‹K…KˆKŠK‰K‰K‡K‡K‡K‡KˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆe]r(KwK‡KΏKιKΪKΌKΕKΛKΗKΒK°KdKK KKmKκKσKpK!K/K.K/K2K3K6KgKuKvKΡKπKνKοKλKνKνKμKμKιKσK†K[KΠKΐKWK9KBK/KUKsKoKpKqKlKIKBK'K-KCK0K$K3KIKDKGKAKDKCKCK?KDKNKLKGKBKOKXKZKZK[K\KbKXKRKaK^K]K^K`K_K_K_K_K^K^K[KKK/KKKKKK!K%K*K,K0K3K5K5K9K8K7K:K:K:K:K:K=K;KIKrKSKfKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKtKtKsKsKsKtKtKsKsKtKtKtKuKuKtKtKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKuKuKuKvKxKvKuKvKvKvKvKvKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKxKxKxKxK{KbKGKIKGKGKJK7K5KZK[KYKRKGKUKTK•KΪKΡKKΠKΞKίKσKφK«KyKK~K}K}K}K}K}K{K{KzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K|K€KjKBKDKCKEK@K8KBKCKBKBKBKBKAKBKBKAKAKAKBKBKBKBK@K?K?K?K?K?K?K=K=KKDKHKDKBKBKDKFKFKGKGKJKLKKKLKMKOKIKGKaK‘KΤKΝKΡKόKλKšK€K³K²K°K·KΝKεKχKυKηKδKωKύKμKυKδK§K˜K­K½K½KΌKΖKΥKυKθKΦKαKοKK’KK0K;K3K,K&K&K"KKK'KgK›K«K₯KƒK'KK KKK K K +KK"K,KKKcKzKK€KœKK‡K‚KxKzK}KmKmK‰KKKK„KŠKŒK‹KˆK‡KˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆe]r(K‘KšKΉKK·KŸK―KΆKΑK€KUKKK KK‡KοKβKJKK*K-K0K2K6K9KbKsKKέKσKΎKΦKρKκKλKλKλKλKπKtK[K…K^K>KK?K=KK0K#K1KGK>KBKDKCK>KEKNKJKFKDKPKXKZKWKEKPK`KQKVK_K]K^K^K^K_K`K`K`K`K_K^K^K\K]KaK`K\KOK@K1K+K,K0K4K6K9K:K;K;K=K=K=K=K=K>K=KJKqKTKhKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKtKtKtKtKuKuKtKtKtKuKuKtKtKuKuKuKuKuKuKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKwKuKuKuKvKwKwKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKyKyKyKyK{KvKOKHKHKIKJKJK+KMK\K[KWKKKMKWKgKΗKΥKKΠKΠKΡKμKφKΰK†KKK}K}K}K}K}K}K}K|K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K€K[K@KDKDKDK=K=KCKCKBKDKCKBKCK@K9K>KBKAKBKBKBKBKBK@KAKBK?K?K>K=KKBKGKHKEKCKCKCKDKGKFKHKKKLKJKJKKK`KK–K”K‘K“KKηKώKφKKŒKžKK±K΄K²K¨KKΛKΪKεKοKύKοKΝK΄KΰKΙKœK§K±K―K»KΓKίKςKΩKεKόK·K(K K KKK%K'K(K'K&K$KKKKBK†K«K KpKLK4KKKKKKKK K +K KK4K‹KΉK£K“KK‰KtKaK}K·KΈKKfKQKcKƒKyK€K‰K‰KˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆKˆe]r(K KUK}KrK_KKKQKRKK KKKKK"K}K‘KxKKK"K#K*K-K0K2KYKpK“KβKΫK„KnKŠKΥKνKδKήKΧKͺKIKRKHK\KpKHK2K2K0K7K*KJKtK^KAKCKkKcK!KK2KJK8K!K#K>KGKDKBK3KCKNKIKGK>KJKZKXKSKK*K8K=KXKdKaK]K\K\K_K`K`K`K`K`K`K^K\K\K[K\K]K_KaK[KQKDK;K6K5K8K:K=K=K=K=K=K=KKIKlKSKhKrKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKvKvKvKvKuKuKuKvKvKuKuKuKuKuKuKuKuKuKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzK{K{KzK{KzKxKXKHKHKJKLKNK0KBK]K[KYKOKNKXKYKKΨKΡKΠKΠKΞKγKσKρKšK{K€K}K~K}K}K}K}K}K~K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKkKCKCKDKCKAKK?K?K=K;K9K:K:K9K9K4K?KNKOKNKMK6K)K+K)K&K&K&K"K!K%K)K,K2K6K8K=K@KEKHKFKCKDKDKDKGKFKHKKKLKKKQKŠKK€K’KœKšKšKšKΌKμKύKφKΟK‘K”KŸK£K­K²K₯K’KΓKίKδKτKKθK±KΌKιKΝKKžK¦K«KΐKΐKκKλKγKόKΌK1KK KKKKKK"K$K"KKKKK2KbK¦K•KVKHKRKEK$K KKKKKK0K6K&KGK¨K·K­K°K©K–KoKcKK·K©KœK–K‡K₯K­K„K„K‡K‡KˆKˆKˆKˆK‡KˆKˆKˆKˆKˆKˆe]r(KKŒKΙK KOKEKLK'KKKKKKK3KmKlKBKK!KK K$K'K0K1KWKrK…K±K£KvKtKqK‚K΅KΎK£KƒK\KBKOKFK_KuKlKFK2K'K(K5K%KMKZKAKHKkKvKaK$KK[KgKKKCKEKBKAK-KDKPKIKFK9KHKVKYKQKKFKHK@KCKIKSK[K^K_K^K_K_K`K`K_K]K]K]K_K_K_K^K\K\K]K^K_K\KVKIK>K;KK@KCKDKBKBKDKCKCKCK@KAKBKBK@KBKBKBKBKBKBK@K>K?K?K>K>KK>KHKhKSKiKvKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKtKuKuKuKsKsKuKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKxKyKxKuKwKyKxKxKuKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KyKxKzKzKzKyK{KlKFKHKIKHKJKAK0KXK\KYKRKKKUKUKuKKΣKΠKΠKΞKΧKοKυKK~KK~K|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K|K€K^KBKDKCKK?K?K?K>K=K;K:K6K5K9K7KFKNKNKMKKK5K)K)K(K%K$K$K!K K$K*K-K3K9K;K=KAKHKIKIKGKDKDKGKFKIKJKIKgK‚KxK|K‚K‡K•K—KK—KœKΦKυKσKπKτKξKŸKKƒK†K”K§K§K€KΠKίKάKϋKΩK£KΈKΆKδKΓK€KœK΅KΏK΅KήKζKKwK@K K K +K KKK KKKKKKKKK +KK^K›KEKK K KKK&K9KŠKΐKͺKŠK'KKCKžK˜K‰KžKͺK§K’K­K±KK¬K”KsK‘K–K{K‰K₯K—K…KˆKˆKˆKŠK‰KˆKˆKˆKˆKˆe]r(KrKέKυK†KKKKKKKKKKK}KηKΨKLKK%K$K!K#K%K'K,KHKqKOKBKaKvKsKsKsKsKeKHKGKAK@KJKFKgKvKpKrKoKHK0K)K/K.KJKvK‹KuKpKoKtKdK1K4K&K;KEKBKBK>K.KMKNKIKEK8KLKWK[KMKK+K@KRK\K\KXKRKHKAKCKKKVK\KaK]K\K\K]K]K]K`K`K_K_K_K_K_K_K^K\K]K]K^K\KUKOKFK>K:K;K=KHKfKSKjKuKtKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKsKuKuKsKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKxKuKvKxKwKuKxKxKxKxKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzK{KuKMKHKHKIKIKHK.KPK\KZKWKNKOKWKcKΓKΥKKKΟKΠKλKφKηKK~K~K|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K€KqKDKCKEKAK@KK>K?K?K>K=K;K9K;K:K8K6K=KNKNKMKOK?K)K+K*K(K%K%K$K"K$K'K,K0K8K;K=K@KDKHKIKIKEKEKGKFKIKHK[KwK}K{K{KƒK„KŒK“K—K˜K›KΏKσKυKξKνKεK«K†K…K~K|KŒK€KͺK§KΣKΪKΩKυKΈKKΎK΅KήK·K~K›K³KΈK·KξKΐKKDK+KKK +KKKKK KKKKKKK K +KKlK}KK KK KKK K+KwK¬K³KfK +KK`K’KKK¦K¦KKšK«K¨K§K©K‡KqK–KKrKlKœK˜K‡KˆKˆKŠKŠK‰K‰K‰K‰K‰e]r(KΆKβKπKfK KKKKKKKKK!K₯KτKΤK6KK$K#K%K&K&K)K*KDKkKIKGKhKtKrKsKsKrKuKfKJK:K@KHKIKnKuKrKpKtKmKEK-K+K0K„KίKΈKoKsKpKpKuKcKGK5KGKCKBKBKKEKcKSKjKuKsKsKsKsKsKsKsKsKsKsKsKsKtKtKsKsKtKtKsKsKsKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKxKxKvKvKyKwKuKwKxKxKxKvKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKyKyKzKzKzKzKzKzKzKzK|KVKGKJKMKOKTK4KBK\K[KXKOKNKYKVKͺKΩKΡKKΠKΝKΰKτKτK¦KzKK}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K|KQKAKEKEKEK=K@KDKCKDKDKDKCKCKDKDKCKBKBKBKBKBKBKAKAKBK?K?K?K?K>K=KK@KBKDKHKIKIKHKFKGKEKSKzK‚KK{K{K„K‚KˆK‘K”KœKŸK£KΗKγKξKρKγK K•KK„K|K|KˆK£KͺK¦KΚKΡKέKξKΉK·KΒKΊKΫK»K€KK―K©KΞKλKŒKGK0KKK +KKKKKK K KKKKK KKKK^K7KK K K KKKKK^K’K€KMKKK}K KK˜K©K™K“K€K€K₯K¦K‘K‚K‰K›KK‰K₯K§K’K‡KˆKŠKŠKŠKŠKŠKŠKŠe]r(KΚKιKβKCKKKKKKKKKK/KΐKτK΄K$K$K$K$K%K(K)K'K)K?KaKDKIKkKrKoKsKsKpKpKsKiK>K@KFKNKqKtKsKtKpKrKmKBK-K7KΆKK§K@KuKqKpKrK_KEKEKIKAKAK?K8K4KPKKKGKCK9KPKVKXKDK*K+K(K&K"K K&K5KJKXKXKZKXKQKFKCKDKNKYK^K_K]K]K_K`K`K`K_K`K`K`K^K\K_K`K_K_K`K^K^KZKUKUK`KUKkKwKtKsKsKsKsKsKsKsKsKsKsKsKuKvKtKsKvKuKsKsKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKyKxKxKyKyKyKwKuKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzK|KbKJKRKSKQKXK=K5K[K[KVKRKNKXKTKŒKΨKKKΠKΞKΨKοKψKΒKzKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKaKAKCKBKFKAKKAKBKJKLKKKGKEKFKPKvK}KK„K~K{KƒKƒK‡KK–K›KžKžKKΕKΠKΰKιKΉKžK K™KŒKƒKKˆK¦K¦K˜K²KΖKΰKσKΕKΌKΓKΉKΟKΘK‡KˆK‘K³KοK™KHK7K"KK KKKKKKKK KKK KKKKKKK +K K KK-KWK7K!K5KvK‘K™KLKK2KŽK„KvKžK‘K“K˜K“K‘K€K₯KK‰KKUKdK`KYK•K“KˆKŠKŠKŠKŠKŠKŠKŠe]r(KΝKνKΘK)KKKKKKKKKK@KΚKΰK‚KK%K"K"K$K%K'K)K)K;KUKEKLKoKrKrKsKsKrKrKrKuKhKNKCKSKtKsKtKsKrKpKsKnKAK>KΝKώK–KKHKsKtKqK[K@KNKIK@KAK@K4K9KRKJKGKAK:KSKVKYKFK0K1K,K(K'K&K!KK!K.KIKRKYK^K]KZKRKHKAKEKQK\K`KbK`K^K`KaK`K`K`K_K^K_K`K`K`K`K`K^K]K]K]K^K[KeKoKrKtKtKsKtKtKsKsKsKsKsKsKuKuKtKsKtKtKtKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKwKwKwKwKwKwKxKxKwKwKxKxKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKzKzKzKzKzKzKzKzKzKzK}KmKOKTKTKSKUKHK-KVK[KXKSKMKTKWKtKΡKΤKΡKΠKΟKΣKνKτKήK†KKK~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K~K~K~K~K~K~K~KKrKEKCKDKCKCK;KDKDKCKDKCKCKDKCKCKDKDKCKDKCKCKCKBKBKBKBKBKBK?K>K?K=K=K;K9K7K8K7K:KJKNKMKOKHK/K)K)K'K'K&K%K"K!K%K,K1K6K:K>KAKCKHKKKJKGKFKFKoKƒKwK}K†K€K{KK…K‡KŒK‘K˜KšK›KŸK³KΐKΎKΜKιKΫKΆK‘K™K•K‹K…KK©KœK‰KœK¬KΦKψKΟKΏKΕK΄KΚKΡK’K‘KΊKΫKvKEK>K&KK KKKKKKKKK +KKK K KK KKK K +KKKKJK•K†KYKvK–KKK‰KyK‰K’K’K—K™K—KKŽKKK—KKwKvKSKKK0K;K„KŸK‹K‡K‰KŠKŠKŠKŠKŠe]r(KΤKρK¨K!KKKKKKKK KKNK±K›KEKK%K!K K"K%K%K)K*K6KNKEKSKqKrKtKsKsKtKsKsKsKqKQKJKfKvKsKsKsKsKtKsKuKnK^KΝKδKpK3K,KEKrKqKWK@KHKFKAK@KAK4K=KTKIKGK?K9KVKVKYKDK4K2K)K&K&K&K%K#K#K'KSKK@KDKDKBKAKDKCKBKBKBKBKBK@K=K?K>K=K;K9K:K:K8K8KCKNKMKOKMK8K+K+K*K)K&K&K$K"K$K*K-K5K9K;K@KDKEKJKIKIKIKSK‚K„K}KK†K|KwK}K€K†KŠKŽK’K•K˜KœK’K―KΆKΆKΖKιKτKΪK²K™KŒKŠK…K’K©K“KŠKŠK•KΛKλKΠKΑKΒK·KΉKΑK΄K₯KšKXKKTKTKUK@K5K4K!KKK$K%K%K#K)K[KAKK$K%K2KDKUK]K_KNKJKMKCK@KHKQK]KbKaK_K\K]K]K^K`K_K]K^K]K\K]K_K^K\K\K\K_K^K_KbKgKmKqKuKuKtKtKtKuKuKuKuKuKuKuKtKtKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKwKxKwKwKwKwKwKxKxKxKxKvKwKxKxKxKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKxKyKzKzKzKzKzKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K\KQKVKUKSKPKHKPKZK[KUKOKKKWKUK₯KΪKΠKΠKΠKΜKήKπKτK²KyKKK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KK~K}K}K}K}K~KKKKKK~KK€K~KKcKAKDKDKDKAKK@KCKDKBKBKCKCKBKBKBK@K?KAK@K?K>K>K=KKLKNKNKOKBK-K*K)K(K&K%K$K"K#K'K+K1K5K8K=KBKDKHKFKIKLKsKK‡K{KyKƒK{KsKyK|K~KƒK‡K‰KŽK”K—K˜K£K©K§KͺKΊKΪKσKκKΝK™K…KK‰K KKKK‰K”K©K©KΔK½K½K²K KK~KiKYK;K-KK KK K +KKKKKKKK +K K K KK +KK KK KK KK KKAK5KEKbKsK{K„K‘KK“KžK’K§K₯K’KK‘KͺKͺK·KΗKΕK±KœK‘K£KK”KšK“K‡K‰KŠKŠKŠKŠe]r(KίKζKwKKKKKKKKKK$KGKCKK3K3KKKKK K"K!K,KXKAK+K;K8K)KKK1KGKUKZK]KZKSKIKDKBKHKSK\K`K^K\K]K`K_K\K\K]K]K]K`K_K\K]K]K_K`K`K_K_K^KaKgKmKsKuKwKuKuKuKuKuKuKuKtKsKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKxKyKxKyKyKyKyKxKxKxKxKuKwKyKxKxKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzK{KzKzKxKyK{KzKzK{KzKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|KbKQKVKTKSKPKNKLKWK[KXKQKHKRKSK‰KΨKΡKΠKΠKΟKΧKξKυKΜK€K€KK~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K€K~K}K}K}K}K~KKKKKK}K~K€K}KKtKGKCKDKCKEK=KBKDKDKDKCKDKCKCKGKEKDKDKBKBKCKCKBKBKBK@K>KAKAK?K?K?K>K=K:K9K7K7K9KHKOKNKMKKK3K)K)K)K&K%K$K"K#K#K(K0K5K8K=KAKDKGKFKEKaK‹KŠKKuKsKK€KtKsKyKyK~K‚K€K†KŒK‘K–K›K£K’KKK₯KΚKΞKΞKΜK…K{K…K—K K’KKK•K°KŒK­KΒK΄K΅K¦K–KŽKsK\KBK(KK K K KKKKKKKKKK K K K K KKKK KK +KK KKKK"K8KAKGKPKZKqK€KK†K‰KK˜KšK›K K’K§KΉKΐKšKrKwKwKoKgKhK€K‹KK‰K‰KŠKŠe]r(KΦKΙKSKKKKKKKKKK%KIKFK7KKKKKK!K#K%K'K&KK>K-KGKPKHKFK9K?KWKVKUK=K5K2KKKKKK$K+K8KWKBK+K2K8K-KKKKKMKIKFKVK^K_K[KPKFKBKAKIKSK_KbKaK`K\K]K]K\K]K]K]K\K^K`K`K`K`K`K`K`K_K\K\K^KdKkKsKvKvKuKuKuKvKvKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKxKwKuKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKxKxKxKzKzKzKzKxKxKxKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}KjKPKTKSKSKRKNKNKVK[KZKTKIKOKVKlKΞKΤKΟKΠKΠKΡKθKρKγK‰K|K€K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKKKKK~K~KKKKKKKTKDKDKCKCK=K?KFKDKDKCKCKDKDKDKDKDKDKDKCKBKBKBKBKBKAKAKBKAK>K?K?K=KK>K=K,KIKOKHKFK8KCKVKUKPK9K4K1K0K(KKKK!K/KEKXKAK,K(K*K&KK KKKZK@KK!K7KLKXK`K]KYKNK1K7KBKLKXK`K`K^K]K]K]K]K]K]K^K_K_K_K_K_K`K`K`K]K\K[K[KZK^KfKmKqKuKwKuKuKuKuKuKuKuKuKuKuKuKuKuKuKvKvKwKxKxKxKxKwKvKuKuKuKuKuKuKuKuKuKuKuKvKyKwKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzKzKxKxKxKzK{KzKzKyKyKyKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|KtKTKRKSKSKSKOKOKTKZK[KXKNKKKXKYK»KΧKΟKΠKΠKΞKβKοKπK K{K€K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKKKKKKK~K~KKKKKKKfKDKEKEK?K;K=KCKCKCKCKCKCKCKCKCKCKCKCKDKBKBKBKBKBKBKBKBKAK?K?K?K>K=KK?KK?K?K>K:K:K:K7K5KCKNKMKMKNK:K*K+K)K(K&K$K"K K$K)K-K4K7K=KBKEKEKFKWKKK{KsKqKtK}KKwKsKvK~KKK€K~K|KK‹K”K˜KK’KK”K€KηKΉKK‰KKK~K–K”KŽKƒK˜K·K„KŽKΗK§K‘KK‹KKkKTK6K"KK KKKKKKKKKKKKKK +K K KKKK K K K +K +KKKKKKKKK K KKRKrKpKtKtKrKjKaKpK…KXK#K(KPKxK“K€KΈKΑKΕKK¦K—K‰KŠe]r!(K?K#KKKKKKKKKKKK>K@KKFKEKCKDKDKCKDKEKEKEKDKCKDKDKDKCKBKCKDKBKBKBKAK@K@K>K>KK"KKKKK KKK"K"K$KAKDK=K'K'K!K!KKKK K$K!K3K€KΪK²KpKuKsKsKsKsKsKsKtKnKZKNK\KuKsKsKsKsKsKsKsKuKhKFK@KaKtKsKtKuKNK$K)KDKCK=K?K?K8K4KSKKKDK@K6KKKSKTKMK2K0K-K'K&K&K&KK K!K=KWK2K#K?KTKXKWKOK?KCKZKEK/K'K)K+K1K8K8K0KAKSKKSK\K^K^KWKLKEK@KAKOK[K`K_K`K`K`K_K\K\K\K]K]K]K]K]K]K]K]K]K]K\KZKYKZK^KdKmKvKyKxKvKuKuKtKiKWKPKIKIKBK=K;K5K9K7KBKWKjKvKwKuKuKuKvKyKxKtKwKyKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K~K}K~KjKPKRKQKRKOKOKLKTK[KXKVKKKOKUKiKΛKΤKKΣKΠKΟKεKοKθK‘K{K€K€K€KK}K}K}K}K}KKK}K}K}K}K}KKK}K~K€KKKKKKKKKKKKKKKKKK‚KhKCKGKGKGKDK9KDKEKCKCKCKCKDKGKEKCKDKCKCKDKCKDKDKDKDKBKAKAKBKBKAK=K>K>K:K9K8K8K9KHKNKQKQKLK4K,K+K*K)K(K%K$K#K&K*K.K2K:K?KBKGKGKEKIKkKƒKuKtKsKwKzKKyKsK~K†KƒKƒK}KxKzK}K…KŽK•KšK KŸK–K›KζKΓKvKwK‚KƒKxKŽKK‰KxK€K©KkK|K~KnKxKwKvKrK^KAK(KK +K K KKKKK +KKKKKKKK K K +KK +KKKK K K KK +KKAKdKyKˆK{KgKKKKK7KtKŠKŠK‡K‹KŠKˆKƒKxK•KuK.KKKK KK K#K2K|K‰e]r#(K8KKKKKKKKKKK&KDKDK3KKK%K'K&K&K$K"K"K KJKΤKοK¬KmKuKsKsKsKsKsKsKuKlKGKCKaKvKsKsKsKsKsKsKsKuKdKEKAKfKvKsKsKsKsKCK.KIKDK=K?K?K4K4KSKKKEK@K5KMKTKTKGK/K/K-K'K&K%K%K$K"KKAKYK0K%KBKUKWKXKYKZKXKZKDK5K:K.K,K.K1K7K9KIK_K+KK&K2KGKVK^K_K]KQK7K>K?KDKPK[K`K_K]K\K\K]K]K]K]K]K]K]K]K]K]K]K\K\K\K]K[KXKYK]KdKlKrKvKvKpK]KMKFK?K?K8K6K=K9K4K0K,K:KIKZKoKxKvKvKuKxKxKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K|K{K{K{K~KrKSKRKOKRKOKNKPKWKYKXKVKLKKKXK[KΆKΧKKΣKΡKΞKίKνKπK«KyK€K€KKK}K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€KxKJKEKEKEKFK:KCKDKCKEKFKDKDKGKEKCKCKCKDKCKDKCKDKCKBKCKCKCKBKBKAK@K@K=KK6K6K9K2K6K9K4K-K0K/K7K9KGKPKhKwKtKuKxKvKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K~K|KzKzKzK|KyKXKQKOKRKOKNKPKQKYK\KXKOKHKWKSKšKΪKKΣKΡKΞKΩKκKπKΖK}K€K€KKK}K~K€KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK€KYKBKDKCKGK@K=KGKCKFKGKDKDKGKFK@K=KGKCKCKDKCKDKBKAKCKDKDKCKAKBKBK@K=K=KKBKEKGKHKJKwKKoKqKtKwKyK|KwK{K‚K„K€KK{K{KzKyK}K…KŽK”KœK K—K”KήKβK€KwK€KqKiK‚KˆK„KpK KŠK^KmK[K_KhKjKkK`KKK-KK K K K K K K KK KKKKKK K K K KK K K K K K K K K K K K KKK K'KRKˆK’K„KDK8KbKzK€K†KŠK“KqK`KiK`KKŸK›K£K―KKwKtKKŠe]r%(K#KKKKKKKKKKK0KEKIK-KK KKKKKK#K(K"KyKεKΝK‚KrKtKsKsKsKsKtKsKvK`KBKEKkKuKsKsKsKsKsKsKsKuK[K?KGKoKuKsKsKtKnKPKCKEK@K>K?K>K0K;KQKHKCK>K9KPKRKTKCK3K4K.K&K&K&K$K"K$K!KGKUK*K(KEKQKUKXKYK[KXKXKBK-K1KFKVKRK5K+K)KGK\K=K5K?KAK?K:K2K/K1KFKSKYK^K\KVKOKFK=K?KEKSK^K]K\K[K[K]K]K]K]K\K[K[K[K[K[K[K[K[K[K[K[KZKYKZKZKSKHK@K;KK=K;K9K9K8K7KEKNKOKPKOK;K,K-K+K)K&K&K%K#K#K'K,K2K8K:K?KDKGKGKJKvKƒKpKpKrKuKvK{K{KwK}K…K„K|K{K{KyKyKzK}K‡KŽK–KœKžKŒKKχKΏKsK|KrKdKoK…KƒKuKuKqKOKgKeK^KaKfKaK\KNK1KK KK K +K +K K K KK +KKKKKK K K KK KKK K K K K K K +K K K K K +KKKKOK‰K‘K†KmKoKtKiKlKƒK–K¦K³KΌKΏKΒK½K²K«K°K£K—K„Kˆe]r&(K%KKKKKKKKKKK=KnK‡K=KKKK KKK +KKK"K€KΉKKuKuKsKsKsKsKsKsKsKtKZKCKJKpKtKsKsKsKsKsKsKsKtKTK?KLKrKtKtKsKuKkKNK@KFK?K>K>K>K.K@KOKGKDK=K>KTKQKTKBK4K8K0K&K&K&K%K"K$K"KIKTK*K+KIKTKWKWKYKZKXKWK>K(KKK'K4K0K+K7KPKYK>K0K6K=K>K=K?K@K9KDKTKK@KHKUK[K^K^K\K\K\K\KZK[K[K[K[K[K[K[K[K[K[K[K[K[K\K\KYKVKPKDK?K2K.K1K7K6K5K5K:K3K0K0K0K7K)K.KHKdKwK|KyKwKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKyKzKzKzKzKzKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K{KzKzKzKzKzKzK{K~K}K{K|K}K{K{K}K}K}K}K}K{K}KhKQKSKSKRKQKNK@KVK\KZKSKJKPKXKfKΖKΤKKΠKΠKΟKδKοKκK—K|K€KKKKKKKKKK}KKKKKKKKKKKKKKKKKKKKKKKKKKKK‚K|KMKEKGKFKFK=K@KEKEKGKGKGKGKFKDKDKDKDKDKDKDKDKDKDKDKCKBKBKBKBKBK?K>K=KKMKPKQKQKEK.K,K+K*K*K'K%K$K$K&K+K2K7K:K?KDKFKGKHK`KKrKpKrKsKsKwK}K{KzK„K…K|K{KzK{K|K{KzK€K‡KK”KK˜K”KέKφK–KoKoKhKcK{KƒK|KlKWKJKXKmKiKaK^KWKUKPK3KK +K K K K +K K K K KK KKKKK K K KK KKK +K K +K +K +K +K +K +K +K +K +K K K KKKKOK†K“KƒKyKkK]KiK‚K₯KΈKΊKK²KΆKΈKΈKΉK½K­K‚K†e]r'(K$KKKKKKKKK KKkK½KΎKKKSKRKSK@K7K9K0K(K(K%K"K K$K$KOKRK)K+KJKUKWKXKWKWKXKXK=K+K0K%KKKKK0KSKXK=K9K7K,K2K:K=K=K8KLK^K/KK2K1KAKRKYK\K\KXKOK4K3KAKJKTK]K`K\KZKZK[K]K\KZK[K[K[K[K[K[K[K[K[K[KZK[KYKYK[KVKPKK>K5K1K;K9KGKQKPKPKNK6K+K*K*K(K&K&K$K#K%K)K.K2K:KKZKvKsKtKtKtKtKsKsKtKpKMK?KVKuKsKsKsKtKtKtKsKuKpKMK?KTKvKsKuKuKuKcKEK@KEK>K>KK;KKAKKKTK]K]K]K\KZKZK[K[K[K[K[K[K[K[KZKZKZK[KYKXKYKZKXKYKWKIK1KK KKKK +K +KK K K KKKKKKKxKzKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKxKyKxKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK{K|K}K}KzKzKzKzKzK{K|K}K}K|K|K|K}K}K}K}K}K}K}K}K~KwKUKRKRKQKPKOK.KDK\KZKUKOKIKVKSK”KΧKΞKΠKΠKΟKΤKκKσKΛK|K€K€KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK‚KmKFKHKFKBK@K;KDKGKFKFKFKFKFKGKFKEKEKEKGKEKCKDKDKDKDKDKDKDKBKAKCKBK?K>K=K9K8K:K7K?KQKQKPKRKBK.K,K,K)K&K%K$K$K#K'K*K/K6K:K@KBKEKGKMK|KwKpKpKqKsKuKxK|K{K}K†KKxK{KwK|K‚K|K{KKK‹K“K›K—K‘KΩKϋK§KaKjKgKeKzK}KvKcKSKHKZKjKkKeK\KSKUK>KK K K K K K K +K K K K +K K KK K K K K K K K K +K K +K K +K +K K +K K K K K K K K K K K K K4KqK‚KwKqKkKmKsK{K†KK™K K K§K§K‡KOKwe]r)(KK K"K"K%K$KKKKK:KΜKςK­KK#K%KKKKKKKK$KGK@KaKvKsKvKuKvKtKsKsKuKmKJK?K\KwKsKsKsKtKvKtKsKvKmKIK@K[KvKsKvKuKtK_KEKAKEK>K>KK-K$K&K&K#KKK%KSKLK'K/KKKYKDK)KHKZKWKWK:K'K!K!K(K.K2K5K1KHKYK7K;KWKhKsKXK.K%K'KCK\KAKBKwK{KzKqK_KGK:K8KHKWKYKYKYKSKJKAKKAKHKFKGKFKFKFKGKDKCKFKGKGKEKCKDKCKDKDKDKDKDKBKAKBKAK?K>K=K;K:K8K6K8KLKQKPKQKLK3K-K-K+K)K$K%K%K!K&K*K0K6K:K=K@KEKFKHKuKyKpKpKpKsKuKuK{KK~K‚K‚KvKyKwKxK‚KƒKK~KK…KŒK”K›K˜KΘKςKμKsKOKjKYKoK{KoKfKXKJKRKbKgKiKbKZKUKBKK +K K K K K K KK +K K +K +K +K K K K +K +K +K +K +K +K +K +K +K +K K K KKKKKKKKKKKK K +KKK:KPK^KeKmKqKrKuKzK†K’K•K—KKjK?Kje]r*(K)K)K(K(K#KKKKKKQKΫKιK‚KK&K$K"KKKKKKKGKBKCKjKuKsKvKuKtKuKuKsKuKiKEKAKbKxKrKtKuKuKuKuKtKuKhKEK@K`KvKsKtKsKsK^KBKBKDKK:KKKMKFKAKCKMKPKSKOK?K>KK>KK-K,K+K*K&K&K$K#K%K)K.K4K8K=K@KDKGKGKqK}KqKqKoKsKtKvKxK~KK‚KKwKsKuKwKK†K„KK~KK…K‹K“K“K©KθKόK·KCK`KXKiKtKnKcKYKLKJKZKaKfKgK`KYKHKK K K K K +K K KKK K K +K +K +KKK K +K +K +K K K K K K K K K KKKKKKKKKKKK KK KK KK)K8KHKZKfKkKtK{K„KŽK‘K€K\K:KSe]r+(K)K*K*K#KKK KK KKaKΡKΏKPKK'K$K#K KK"KK"KjKVK>KHKpKtKtKvKtKsKuKuKsKuKbKAKBKhKuKsKtKvKuKuKuKuKvKeKDK?KeKvKsKsKtKtKZK@KCKCKK=K:K:K7K=KOKQKQKNKJK1K-K/K,K'K'K$K#K$K(K+K/K5KKCKCKKOKIKEKAKDKOKQKQKLK>K?K9K(K%K%K"K!K!KK-KZKCK%K4KOKUKXKYKXKXKYKUK7K)K%K*K.K0K4K3K5KRKXK6K8K?K9K4K4K0K,K,KIK[K;KOKqKUKKKFKAKK?K8K'K%K%K"K!K"KK1KZKAK&K6KPKUKWKWKXKWKXKTK7K)K%K*K.K0K4K6K7KRKXK5K8K>KKDKCKDKGKGKDKCKDKDKDKDKDKDKCKBKAK?K=K=K;K:K:K8K@KPKQKPKRKGK0K-K.K+K'K&K&K%K%K'K)K0K4K8K?KDKEK^KKtKoKmKoKtKvKyK|K{KK‡KzKtKsKpKpKsKxKKˆK‹K‹K‰KŠK‹K}KKΧKνKυKtK>KBK K(K8KAKHKIKGKSK[KcKfKgKSKAKKK K K K K K K KKKKKK +K +K K +K K K +K K K K K K K K +KKKKKKKK K +KKKKKKKKKK KK K K KKKKK%K1K?KMKEe]r.(K0K+K(K$K K K!K"K#K,K@KCK5KK!K$K$K"K K"K"KVKxKeKCKAK]KyKuKuKuKuKuKuKuKtKrKOK?KPKvKvKuKuKuKuKuKuKuKuKRKK>K6K'K%K$K$K!K!KK4K\KAK'K:KSKUKVKXKXKWKVKRK8K(K%K)K/K1K4K6K9KQKWK2K7K@KK;K;KLKXKAK2K:KJKbKiKNK5KDKCK@K[KNKBKpK{KyKyK{KtKaKJKHKKKMKVKZKYKXKSKKKBK=K>KGKQKWKZKYKWKWKWKWKXKXKXKXKXKXKXKXKXKWKXKXKXKWKTKUKXKZKTKGK7K"K KPK‚KwKxKxKzKzKzKzKzKzKzKxKxKxKxKxKxKxKxKxKxKzK{KzKzK{KyKxKzK{KzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K€KbKPKSKPKPKTKBK/KWKXKYKRKHKKKTK^KΑKΧKKΣKΤKΟKήKκKξK«K}K„KƒKƒKƒK€KKKKKKKKKKKKKKKKKKKKKKƒKƒKƒK‚K‚KƒK€KK‚K‚K‚K‚K‚K„KoKGKGKGKGKEK?KCKGKGKGKGKFKFKGKEKBKGKGKGKGKFKDKCKCKDKDKDKDKDKCKBKBKBK@K>K=KKUKxKuKuKuKuKuKuKuKvKsKNKKAKQKPKQKHK=K=K4K&K%K%K#KK KK5K[K>K'K;KTKVKWKXKWKVKWKQK8K)K&K*K/K1K3K6K9KSKVK4K7K>KK7K@K1K6K@KDK?KGKbKcKYKPK;K8KJKdKuK|K{KzKzK^KQK)K#K6KIKWKYKYKUKPKIK?K8K:KFKRKWKZKYKXKVKWKWKWKWKWKWKWKWKXKWKWKXKWKUKUKUKTKUKVKXKVKFKTKpKwKzKzK{KyKyKyKzKzKzKyKyKxKyKyKxKyKyKyKyKzKzKzKzKzKzKyKzKzKzKzKzKzKzKzKzK{K{KzKzKzKzKzK{K|K|K|K|K|K|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKiKPKSKQKPKRKLK,KSKYKYKTKKKGKWKRK¨KΧKΠKKΣKΡKΩKιKπKΔK~K‚KƒK‚KK€KKKKKKKKKKKKKKKKK€KK€KK‚K‚KKK‚K‚K‚KKK‚K‚K‚K‚K‚K‚KKSKEKHKIKIK@KBKGKFKGKGKFKFKFKGKGKFKGKFKFKFKEKEKDKDKDKDKDKDKCKBKAKBK@K>K=K;K8K9K8KEKQKPKQKSKCK.K-K,K*K(K'K&K&K%K(K*K0K6K9K=KAKTKKyKrKoKnKqKwKzK}K}K~K…KƒKxKsKnKoKqKnKsKyK~K†K‹KŒK‹K„KyKKΩKοK¦KKK KKK(K4K6KCKOKYK[KaKdKQKLKKK +KKK K +K K +K +K K +K K K K K K K K +K K K +K K K K K K KKKK K K K KKKKKKKKKKKKKKKKK +K K KKK#K-K/KAe]r0(K,K,K(K KKKKK!K3K?K@K)KK"KK K#K KK(KkKzKXK>KBKlKvKuKuKuKuKuKuKuKwKjKCKK?K>KKRKHKCK;K6KOKPKRKHKK?K>KLKZK9KEKuKfKTK=K2K1K1KKGKTKYKXKVKUKWKWKUKUKUKWKXKXKXKXKWKUKVKVKVKUKUKTKQKWKVKSK[KbKnKvK{KzKyKxKzKzK{KzKxKzK{KxKyK{KzKzKzKzKzKzKzKzK{KzKzKzKzKzKzKzKzKzK}K}KzKzKzKzKzK}K~K~K~K~K~K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}KKtKRKSKRKOKPKRK0KHK[KYKWKNKEKUKRKˆKΥKΞKΣKΣKΣKΥKθKνKέKŠK€KƒKKKKKKKKKKKKKKKKKKKƒK‚KKKƒK‚K‚KK€K‚K‚K‚K‚KƒK‚K‚K‚K‚K‚K‚K…KcKDKIKHKDK;K?KGKFKFKFKGKFKGKIKHKFKGKFKFKFKGKFKCKCKCKCKDKDKCKAKCKAK>K?K>K;K2K4K:KK&K!K$K%KKKKK*KnKvKOK>KGKrKuKuKuKuKuKuKuKuKxKdK@KAKbKxKuKuKuKuKuKuKuKvKmKGK=KYKxKuKuKuKwKcKDK=KCKK=K;K:K8K8KIKQKQKQKQK>K.K-K+K*K)K&K&K%K&K&K-K2K6K=KAKLK}K{KtKpKpKnKmKxK}K~KK‚KƒKKxKtKpKpKnKnKtKwKxK|KK‡K†K|K…KΘK»KhKSKKKKKKKKKKKKK/K5K/K +KKK K +K +K +K +K +K +K +K +K +K +K +K +K K K KK K K K K K K KK K +K K +K +KKKKKKKKKK K K KKKKKKKKKKKK KKKKe]r2(KK#K%KKKKKKKKMKuKvKuKuKuKuKuKuKuKyK]K@KCKjKxKuKuKuKuKuKuKuKwKgKCK>K]KxKuKuKuKwK`K@K>KDK;KKRKPKRKCKKHKSKVKVKVKUKUKUKUKUKUKVKVKUKUKUKUKUKVKTKRKRKSKSKRKQKQKRKXKbKlKvKzKzKyKyKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzKzK|K~K{KzK}K}KzKzKzK|K~K}K}KzKzKzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K`KPKQKQKPKSKAK0KVKWKXKTKKKKKTK\KΎKΤKΟKΟKΠKΟKέKμKρKΆK}K„KƒKƒKƒK‚K€KKKKKKKKKKKK‚KƒKK€KƒK‚K‚K‚K‚KƒK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚KKUKGKHKGKHK?K=KGKFKFKGKGKFKFKFKGKGKFKFKFKGKGKGKGKGKEKCKDKDKDKCKBK@K>K?K?K=K=K;K9KAKQKQKPKQKIK0K-K+K+K)K&K%K&K%K%K+K0K4KKAK2KKKKK K#K!KKBKxKkKBK@KUKxKuKuKuKuKuKuKuKuKwKVK=KEKpKwKuKuKuKuKuKuKuKxKbK>K?KeKxKuKuKuKwK\K>K?KCKKHKUKWKWKWKUKUKVKUKUKVKUKVKVKVKVKTKRKSKSKSKRKSKSKRKQKPKQKVKbKmKuK|K}KyKxKzKzKzKzKzKzKzKzKzKzKzKzKzK{K}K|KzK|K}K}K}K}K|KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKiKTKQKOKQKQKKK,KPKWKXKVKLKHKVKQK KΥKΞKΠKΠKΟKΧKκKρKKƒKƒKƒK‚KƒK‚K‚KKKKKK‚KK€K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K„KdKFKJKHKHKDK;KEKGKGKGKGKFKHKHKFKGKGKFKGKGKGKFKFKGKEKCKDKDKDKDKCKBKAK?K?K=K:K:K9K:KLKRKRKRKOK8K.K-K-K+K(K(K'K%K(K+K+K3K9K=KAKpKKuKrKpKoKmKrKxK{K~KK‚K‚KK{KvKoKnKnKnKpKqKtKvKyK€KK€K‘KkKaK[K9KKKKKKKKKKKKKKKKKK +K +KKKK K KKK K K KKKKKKK K K K K K K +KKKKKKKKK K +K K K K K K K K +K +K +K +KK KKKKKKK K K K e]r4(K%KKKKKKKK)K@K@K,KK K"K!KK!K(K"KVK|KcKBK@K^KxKuKuKuKuKuKuKuKuKuKPKKAK?KJKSKTKDK@K9K/K:KRKjKtK|KzK4KHKWK$KK!K7KJKSKTKTKSKQKIK?K4K5K>KHKTKWKWKUKUKUKUKUKUKUKUKUKTKRKSKRKRKSKSKRKSKSKSKSKQKPKQKWK_KlKvK{K}K{KzKzK{KzKzKzKzKzKzKzKzK{K}K|KzK|K~K}K}K~K|K{K}K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~K~K}K}K}K~K~K}K~KKrKSKQKPKQKPKQK.KEKXKWKUKNKHKUKPKƒKΤKΞKΠKΠKΠKΡKηKξKδK‘K€KƒKƒKƒKƒKƒK‚K€K€KKK‚KK€KƒK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K„KuKJKIKIKHKFK=KAKGKGKGKGKFKHKHKFKGKFKGKFKFKGKGKFKGKEKCKDKDKDKDKDKBKBK?K?K>K:K:K9K9KEKQKRKRKQKDK/K.K.K+K)K)K'K%K%K'K+K2K5K:K>KoK€KvKsKqKpKmKrKyK{K~KK€K}KK|KzKrKnKnKnKpKpKpKqKuKyKzKsKfKaK^KUKGK KKKKK KKKKKKKKKKKK +K +KKKKKKKKKKKKKKKKKKKKKK +K KK K KKKKKK K +KKK +K KKKK K +K K +K K K K K KKKK K +K K +e]r5(K!KKKKKK!KK+KBK=K)KKKK#KKKK&K_K{K[K=K@KdKxKuKuKuKuKuKuKuKvKqKKK>KPKwKuKuKuKuKuKuKuKuKxKYKKJKwKyKxKxKxKxKxKzKnKYKOKDKyKzKpK]KGK2K.K3KIKVKBKRKsK]KEK-K/KAK]KIK KJKWK7KK +KKK-K=KMKSKUKSKPK:K>K@K8K7K?KIKTKWKWKVKUKUKSKRKRKRKRKRKRKRKRKSKRKRKSKRKSKRKQKQKPKOKQKTK_KmKwK|K~K|KzKzKzKzKzKzKzKzKzKzKzKzK|K~K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKK}K}K}KKK}K}K~KzKUKQKRKQKOKQK4K;KVKVKWKPKGKNKSKjKΚKΡKΟKΠKΠKΞKΰKκKμK₯K~KƒKƒKƒKƒK‚K‚KƒKKKKKK€K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚KƒK‚KUKGKIKGKGK@K=KGKGKIKGKGKFKFKFKHKHKGKFKFKGKGKFKGKEKCKDKDKDKDKDKBKAK?K?K@KKTKyKvKwKuKvKwKvKuKuKvKSKKQKYK@KLKwKyKxKxKxKxKxKzKpK[KOKCKtKwKzK}K{KlKRK=KHKVKBKEKdKnKqKVK3K+K+K%K$KMKWKBKKK +K +K KK#K4KAKMKQKMKQKQKPKIK=K6K:KCKMKTKWKVKSKRKRKRKRKRKRKRKSKRKSKRKRKRKRKQKQKQKRKSKQKOKNKPKTK^KkKuK{K}K{K{K{KzKzK{K|K{K|K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKK~K~K~KKK~K~K~K€K]KPKSKQKPKTKBK0KUKXKYKRKHKJKVKYKΆKΤKΟKΠKΠKΞKΩKκKοK½KK…KƒKƒKƒK‚K‚KƒKKK€KK€KK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K…KfKGKIKHKHKEK;KGKGKIKGKFKGKGKGKFK@KAKHKGKFKGKFKGKEKCKDKDKDKDKDKBKBK@K@K?K=K;K:K:K8KGKSKPKQKRK?K-K/K,K+K(K&K%K%K&K'K+K1K6K8KfKKxKvKsKrKnKrKwKxK}KK€K}K~K|K{KtKrKoKoKkKkKjKpKuKzKxKoKeK]KSKMKFK0K@K1K2KIKLKIKKKDK9K0K(KKKKKK K +K +K +K +K +K K K K K K K K KKKKK K K K KKKKK K KKKKKKKKKKKKKKKKKKKKKKKKKKK K +K Ke]r7(KKKKK%KKKK8KAK:K KKKK#K$K%K K&KkKvKNK>KIKrKvKuKuKuKuKwKyKvKwKhKCK?K\KyKxKyKuKvKyKwKuKuKuKNK>KMKuKuKuKuKyKnKIK;KDK?KKXKQK=K:K:K=K=K=K;K=K>KOKZKCKMKwKyKxKxKxKxKxKzKrK^KPKCKuKyKxKwKxKzK{KxK_KQKCK1K8KFKVK]KMK=KGKMKAKQKVKDK#K KKKKKK KK%K>KIKLKRKTKQKNKMKEK?K8KKBKHKIKGKFKHKHKFKFKCKCKGKGKFKFKGKGKEKCKCKCKDKDKDKBKAKBKAK?K>K=K;K9K8K@KQKQKPKQKKK2K.K/K*K)K'K%K&K%K'K*K/K6K7KaKKxKvKsKqKpKsKwKwK{KKK|K}K~KzKtKpKoKqKoKnKkKkKoKvKvKlKbK[KQKIKBK1K?K&K:KJKPKRKKKDK@K@KBK9K,K6KKKKK K K +K +K +K +K +K +K K K KKKKK K K K K K K +K KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKe]r8(K#KK%K$KKKK#K>K=K4KKKKK#K%K%K!K.KrKtKFK=KOKwKxKwKwKwKwKxKxKuKxKaKAKAKcKzKvKwKwKxKxKxKwKwKsKIKKK.K6KQKFKBK>K4KJKPKRKJK:K;K6K)K%K#K"K KKK&KRKOK8K6K7K3KKKKK2KYKGK4K#K&K(K+K0K4K3K>KXKOK>K:K:K:K;K=K=K;K=KOKZKCKMKwKyKxKxKxKxKxKzKrK]KPKDKtKyKxKxKxKxKwKyKfKTKCK?KFK6K5K?KGKCKGK]KfK[KTKFK5K)KKK KKKKKK0KQK?K,K?KPKUKTKQKQKNKEK;K5K6KBKMKSKUKTKRKRKRKRKSKRKRKRKQKQKRKQKQKQKPKPKPKQKPKOKPKOKLKMKPKZKhKsKzKK~K|K|K}K|K{K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKKK~K}K~KKKKKKKKKKKKKKKKKrKQKSKQKPKPKRK-KFKWKWKUKNKFKTKPK~KΣKΟKΟKΠKΠKΞKγKνKηK–KKƒKƒKƒK‚K‚K‚K‚KKK€K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K„KƒK‚KXKFKIKIKKKDK>KHKGKGKFKHKHKFKGKIKHKHKGKFKGKGKGKFKEKFKEKDKDKDKCKCKBKAK?K>K>K=KK/KKKKK#K&K'K$K5KtKnKAK=KXKyKxKyKyKyKyKxKxKuKxK[K@KBKhKyKuKvKxKyKxKxKxKyKmKEK=KVKxKuKuKtKzKeKBK:KDK=KK,K9KOKFKCKK:K:K9K;K=KKOKZKCKLKwKyKxKxKxKxKxKzKrK\KQKDKtKyKxKxKxKxKxKzKhKUKBKOK}KnKXKBK4K1K8KAKOKWKUKDKFKJK6K-K%KKK KKK'KXKEKKK#K6KGKQKSKRKPKOKIKAK7K.K:KEKLKSKUKSKRKRKRKRKRKQKQKSKRKPKPKQKPKPKQKOKMKMKMKNKNKMKLKNKQKYKfKrKzK€KK{KzK|K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K}K~KKKKKK€KK}K~K€KKKKKKKKKKKKKKK€K{KVKRKQKPKPKTK6K8KWKWKVKNKFKOKSKdKΖKΠKΟKΠKΠKΜKέKμKπKK~K„KƒKƒK‚K‚K‚K‚KƒK€KK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K„K„K…KhKGKIKIKIK@K>KGKJKGKFKHKHKFKGKIKIKIKGKFKGKGKGKGKGKGKFKDKDKDKDKDKBKAK?K?K?K>K6K9K9KCKRKSKTKTKDK.K.K.K+K)K)K&K%K%K%K,K1K1KXK…KyK{KwKrKpKsKwKwK{K€KK|KzKzKzKtKpKoKqKrKpKpKqKrKrKqKhK`KVKJK@K:K?K=K4KKK[KUKNKJK;K3K4K;KBKAK9K:K'K KKK K K +K +K +K +K +K +K +K K K K +K +K +K KKKKKKKK +K K K K K K K +KK K K KKKKKKKKKKKKKKKK KK KKe]r:(KCK=KKKKKK-KCK>K*KK!KKK"K$K%K#KK^KyKuKvKuKvKyKwKuKvKvKRK=KCKmKwKwKxKxKxKxKxKxKyKjKAK=K[K{KwKxKwKyKcK?K:KCK=KK-K>KNKEKCKKWKNKKQKVKCKMKlK`KMK5K*K'K KK K"KSKJK*KK +KKK+KK/KUKVKUKRKFKHKUKUK±KΣKΟKΠKΠKΝKΣKιKξKΗKK…KƒK‚K‚K‚K‚K‚K‚KK~K~KK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚KƒK„KƒK‚K‚K‚K‚K‚K‚K‚K‚K„K„K„KƒK„KyKKKIKIKGKEKCKEKKKGKFKGKGKHKHKIKGKGKGKFKFKGKGKFKFKGKFKCKCKDKDKDKCKCKAK@K?K=K1K7K;K=KPKQKRKTKNK6K-K-K*K*K'K(K(K&K%K(K/K/KUK„K{KzKyKtKsKsKtKvK{K€KƒK‚K~KxKvKrKqKpKoKrKrKmKlKpKtKqKfK`KUKIK9K9KDK;KGKYKTKNKTKAK:K=K>K@K?KKcKzKxKxKxKxK]K>K=KCK=K=KK*KAKNKEKCK;K5KKKMKOKEK;K:K3K'K%K!K!K KKK/KUKHK:K8K7K0KKKKK8KYKDK/K#K&K(K/K1K4K2K?KXKMK;K9K:K=KKOKxKxKxKxKxKxKxKxKxKyK\K>K@KjK{KxKxKxKwKYK>K>KCK>K?K=KK7K4K-K"KK KKKKKK K K KKKKKKKKKK KKKKKKKKKKKKKKKKK +K K K KKKKKKKKKKKKKKe]r=(KHKEKBKEK?K&K!K:KBK8K!KKKKKK#K%K!KSK{KJK>KHKtKyKxKxKxKxKxKxKxKzKkKAK>KVKyKxKxKxKxKxKxKxKxKyKWK=KCKnKyKxKxKyKvKSK=K?KAK=K>K=K:K)KHKKKDKBK6K:KLKMKNKBK(KIKIKCK@KCK9K,K:KAK4K KKKKKK K"K"KZKvKDK=KRKyKxKxKxKxKxKxKxKxKzKeK@K>K\K{KxKxKxKxKxKxKxKyKzKQK>KGKqKxKxKxKyKrKOK;KAK>K;KKSKDK8K8K8K1K(K)K.K/KJKSK?K7K6K7K6K6K5K5K.K;K[KKK:K9K:K9K9K:K9K:K=KPKWKCKSKyKyKxKyKzKzKzK|KqK[KQKKKvK{KzKzKzKzKzK}KkKUKMKOK{KzKzKzKzKyK~KTKK/KYKJK*KK KK,K9K=K>KBKHKWKUKNK;KEK?K2K-K$KKK +KK.KGKMKQK2KK)K+K;KJKNKPKPKPKMKEK=K3K/K:KFKNKPKOKQKPKQKNKMKMKMKMKMKNKMKMKMKNKSKSKSKPKPKPKQKQKRKQKPKRKVK`KoK{K~KK~KK€KK€KK€K€KKKKKKKKKKKKKKKKKKKKKKKKKKKK€KKZKOKQKPKPKTK?K0KTKUKVKQKIKJKUKUK°KΥKΠKΟKΡKΝKΦKμKνKΞK…K…K…KƒK‚KƒK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K„K…K„K„K„K„K„K„K„K‚KƒK„K„K…K…K…K„K„K„K„KƒKcKOKQKPKSKHK=KIKHKIKHKFKGKIKIKIKGKGKFKFKFKFKFKGKGKGKGKGKEKCKDKDKDKCKBK@K>K=KK=K>K,KKKKKKK K$K#K`KnK?KKKMKLKLK>K:K8K1K*K)K)K'K&K&K"KK4K4K?KLKQKOKNKLK9K3K9K5K5K;KGKNKQKQKNKMKMKNKPKOKMKOKOKNKQKRKSKSKNKMKPKQKPKQKSKSKNKOKNKOKWKaKqKzKK€KK~KK~K~KKKKKKKKKKKKKKKKKKKKKKKKKKKKKƒKeKPKQKQKPKOKGK,KQKVKUKRKLKGKUKOK”KΧKΞKΠKΣKΟKΡKηKνKγK‘K‚K…K„KƒKƒK‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K‚K„KƒK‚KƒK„K„K„K„K„K„K„K„K„K„K„K„K„K„K…K…K…K„K„K„K„K†KqKPKQKPKQKOKK=KK?KAK=K%KKKKKKKK!K&KgKcK;K=K_K|KxKxKxKxKxKxKxKxK{KZK>KAKjKzKxKxKxKxKxKxKxKzKsKGK;KOKzKxKxKxKzKoKLK8K@K=K=KKSKYKEKSKyKyKxKyKzKzKzK|KsKYKTKKKuK{KzKzKzKzKzK|KlKTKMKMKwK{KzKzKzKyKKLKK*KYKLK/KKKKKKKKK K1KLKRK=K?KWKcKaKFK1K4K3K/K)K(KDKVKEKCKeKgKjKnK\KCK7K6KAKJKOKIKIKLKIKAK;K5K6KKHKIKIKIKIKGKGKGKGKGKHKEKCKDKDKCKBKAK>KK[K{KyKxKxKzKhKCK8KCK=K=KK/K2KMKGKEK?K2KDKMKKKIK;K9K6K#KKKKK K K K?KUKK>KMK[K\KQKQKLK:K8KJK[KgKtKrKuK{KzKpKXKIKEK=K@KMKPKNKKKKKHKAK9K4K5K;KIKNKSKWKUKTKVKUKSKTKTKSKSKRKRKSKSKSKRKSKRKRKSKSKSKRKQKQKPKOKTKaKlKzKK‚KKK~KKKKKKKKKKKKKKKKKKKKKK‚KKYKOKNKMKMKOK>K;KWKUKUKPKGKIKUKTK±KΪKΞKΞKΟKΟKKθKξKΧK‡K„K…K„K…K„KƒKƒKƒK„K„KƒKƒKƒK„K…K„K„K„K„K…K…K…K…K…K…K„K„K„K„K…K…K…K…K…K…K…K…K…K…K…K„KˆKrKOKQKQKPKMKAKDKJKIKHKIKIKIKIKIKIKHKIKHKHKHKIKHKGKGKGKGKGKGKFKDKDKDKCKBK@K=KKAK3K K K!K!K!K!KK7KtKFKKK=KEKVKYKQKMK=K5K-K1K?KVKkKvKzKvK|K|KRKNK,KK*K0KCKLKOKLKJKHKEK?K8K-K2KAKQKXKWKVKVKUKRKSKRKRKOKQKSKRKRKRKRKRKRKRKRKRKQKPKQKQKQKPKNKMKUK`KmKxKK‚KKKKKKKKKKKKKKKKKKKKKKK„K_KMKNKMKNKMKNKSKTKVKVKRKHKEKTKPK’KέKΧKΥKKΠKKδKκKηK”K}KƒK…K…K…K„K„K„K„K„K„K„K„K„K„K„K„K„K„K…K…K…K…K…K…K„K„K„K„K…K…K…K…K…K…K…K…K…K„K…K„K†K€KXKPKQKPKDKAKCKLKKKHKIKIKIKIKIKHKIKHKHKHKHKIKHKGKGKGKGKGKGKGKGKFKCKDK@K?K>K;K5K9K;KEKTKRKRKTKIK2K/K+K+K)K&K&K$K#K.KK‹K„K„K„K„KKzKzK{K~KK€K~KuKrKwKvKuKqKqKnKjKjKkKlKfK\KSKCKDKXKLKNK\K`KdKeKeKcKaK`K_K`K_K\K\K[KZKVKTKTKVKYKYKSKOKKKTKOK@K?K@KDKBK>K;K:K8K5K6K8K3K-K&K"KKKKKKK1K2K)K"KKKKKKKKKKKK!K%K'K*K,K1K>KGKAK`KΩe]rD(KHKIKHKGKGKHKHKHKDKAK@K7K!KK K K KK7KkKBKKhK|KxKxKxKyK_K?KK4K5K:K;KOKSKRKTKSK9K-K,K*K)K&K&K#K K,KK‹K„K…K‡K‡KƒK~K|K{K|K}KK|KrKmKqKuKvKuKuKsKoKnKlKiKeKZKOKAKIKSKIKXKaKdKdKeKeKcKaKaK`K`K_K^K\KZKYKWKWKXK[K[KXKQKOKRKVKLKDKFKEKHKGKDKAK>K>K>K?KAK7K3K.K.K-K,K+K,K'K#K,K?KK@KDKKKSK{K΄Kιe]rE(KIKHKFKFKFKFKGKIKIKDK@K@K6K KKKK!K)KK;K]K|KyK{KzKzK{KzKxKwK|KXKK;K@K;K;K=KKIKNKMK?KMKUKRKCK5K*K&K,K9KKKiKYKOK?KTK‚K‚KzKgKNK:K3K;KFKMKIKEKJKHKCK=K6K3K;KDKNKWKXKVKRKRKRKRKSKSKRKRKSKRKRKRKSKRKPKPKPKQKPKNKRKQKQKPKMKMKSK]KjKwKKƒKK€KKKK€KKKK€K‚KƒKƒKƒKƒK‚K„KvKRKQKOKMKMKNKRKUKUKTKRKOKGKKKRKPKQK[KbKuK–K»KΩKηKδKδKΰKΞK»K¨K—K‡K‚KK‚K„K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K…K„K…KˆK†K„K…K†KsKRKQKOKRKOKAKFKKKHKIKHKIKHKHKIKHKIKIKIKIKIKHKFKFKGKGKGKGKGKGKFKCKBKAK@K=K=K:K:K:K8KGKSKRKSKUKFK.K,K)K)K&K&K%K"K(K|K‹KƒK‡K‰KˆK‡KƒKK~K}K|K}K{KrKkKiKnKqKtKvKvKuKpKlKiKcKZKNK@KPKLKQK_KdKeKdKeKeKcKaKbKaK_K`K`K\KYKWKXKZK[K]K[KWKRKTKZKUKMKLKKKKKKKIKHKCKBKCKFKFKCK?KKAK:K#KK!K)K:KEK=KhK}KwKxKyKzKxKxKxKyKzKWK=K?KiK{KyKzKzKzKzKzKxKxKxKJK7KHKxK{KyKxKzKvKUK;K>K@KK:K:KKEKJKHKJKAK;K=K5K#K!KKKKKK!KOKJK9K8K8K1KKKKK"KTKLK9K%K#K&K*K.K/K0K0KHKVKGK8K7K8K7K7K7K8K:K>KSKWKDKSKzK{KzKzKzKzKzK}KuKWKQKHKsK{KzKzK|K~K|KuKbKUKRKIK>K8K8K8K?KNKjKSKKKRKQKEKKKKK KK-K5K$KK:KTKMK@K KK)K-K-K1K5K4K3KKIKMKPKJKBKDKIKKKJKGKGKGKHKNKOKbK―KΥKδKοKςKκKνKρKμe]rH(KGKHKIKIKIKHKEKFKEKDKDKGKDK@KAK7K"K K-K8K>K;KJKrK|K~K~K|KzKyKyKyKzKPK=KEKqK|KzKzKzKzKzKzKzK{KuKFK9KJKxK{KzKzK}KtKOK8K?K?KKFKHKHKJK?K9K:K3K%K#K KKKKK$KQKJK8K7K8K1KKKKK$KVKLK8K&K"K%K*K.K/K1K2KJKWKDK:K6K8K6K9K7K8K:K;KRKWKDKVKzK{KzK{K}K|KzK}KtKWKRKIKsK~K}K|KrKbKTKDKAKSKSKIK@K9K;K=K=K:KKBKNKPK=KBKOKJKIKGKBK@K;K@KKKQKQKNKJK8K/K7KIKcKxK€K~KK}K„KfKIKEKK#K.K@KGKOKLKJKJKJKAK7K1K3K=KKKPKSKPKQKQKRKSKRKRKRKRKRKQKPKPKPKPKQKQKRKRKQKPKQKPKQKQKPKMKOKPKYKiKsKK„KK€K€K‚K‚KƒKƒK‚KƒKiKPKQKQKOKLKOKMKSKSKSKPKLKEKNKQKRKNKXKIKVKOKOKPKOKOKMKMKOK^KxK™KΌKΤKέKαKΰKέKKΏK«KšK‹KƒK‚KƒK…K†KˆKˆK‡K†K„K…K…K‡K‡K‡K†K„K†K‡K…K„K…K„K‡K†K„K‡KuKQKQKPKPKRKCKEKIKIKIKHKIKIKIKHKIKIKHKIKIKHKGKGKIKGKFKGKGKFKEKCKCKDKCK@K?K>K=K;K:K8KDKSKSKUKTKNK4K,K)K&K&K"K!K$K{KŽK‡K‡KˆK†KˆK‰K…K‚KƒK‚K‚K}KuKkKiKjKhKfKiKqKwKwKqKjKdKZK?KBKOK[KdKhKiKfKgKeKdKdKdKfKgKeKaKaK`K\K^KcKaK]KZKYKXKXK\K]K\KXKTKRKOKNKSKPKPKSKTKNKFKGKKKLKKKIKIKIKHKFKEKBKEKNKNKRKRKJKHKKKOKOKKKKKJKKKSKPKšKφKρKοKιKλKξKμKξKξe]rI(KGKGKIKHKIKHKEKGKFKCKAKCKGKDK?K?K9K%K-K8K;KKKEKHKHKJK@K9K:K2K&K$K!KKKKK%KRKEK5K6K8K2KKKKK&KUKKK8K&K"K%K*K.K.K2K3KKKWKCK:K5K8K6K9K7K7K:K;KRKWKDKWK{K{KzK{K}K|KzK|KuKXKRKIKpKoK^KMK?K;K:K=KCKSKSKJK>K7K=KKK-K)K'K&K"KK(KKK‡KˆK‰K†KˆKŠK†KƒK‚K‚K‚K€KzKqKhKjKiKfKfKlKrKwKrKjKdKWK;KDKSKaKhKlKmKjKgKdKdKeKeKfKgKfKbKbKcK`K_KbKaK\KZK[K[K\K`K]K[KXKSKRKRKSKTKRKSKVKSKIKHKKKNKLKLKMKMKMKKKIKHKFKIKQKRKSKSKRKLKKKOKTKPKOKQKPKTK]KΓKμKκKοKμKλKλKξKξKξe]rJ(KGKGKGKGKGKGKGKGKEKCKDKCKFKGKDK>KK:KXK}KzK{KzK{KmKHK7K@K=KKRKnKmKKKKK@KsK„K~KmKQK;K2K7KBKMKLKHKGK8K;KAK8K3K5K=KJKRKRKQKSKRKSKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKRKNKMKPKRKQKOKNKOKWKdKrK}K„K„K…K„K{KOKNKMKNKMKLKPKSK[KWKQKNKIKIKMKQKUKQKJKQKTKMKNKNKNKNKPKPKNKOKOKOKMKKKQK^KwK—K·KΠKίKαKέKΨKΣKΖK΅K₯K“K‰K…K†K…K†KˆKˆKˆKˆKˆKˆKˆKˆKˆKˆK†KƒK‚KKƒKˆKcKLKQKPKQKNKAKIKIKHKHKHKHKHKIKHK?KEKIKHKIKIKIKIKIKIKGKGKGKGKGKEKCKDKCK?K=K;K9K:K7K6KFKUKRKSKTKIK1K-K(KK K0KIK‚KŒKˆKŠKŠK†K‰KŠK„K„KKKKK|KvKkKiKjKfKbKgKmKsKuKlKaKRK9KIK\KeKjKoKqKqKeKaKcKfKfKfKfKdKdKdKeKcKaKbKaK\K[K[K[K^K`K\KXKVKPKQKUKUKTKWKWKWKPKJKNKPKOKMKNKOKQKOKJKJKJKJKNKUKTKUKVKSKOKOKQKWKXKUKVKWKRK‚KγKηKγKλKσKμKμKξKξKξe]rK(KGKGKGKGKGKGKGKGKFKCKDKEKGKFKHKCK?K?KK=K;KJKsKyK}K~K|KzKzKyKyK}KeK;KKKKEKCK?KBKJKJKHKIK>K:K:K1K%K$K!KKKKK-KPKCK7K6K6K/KKKKK0KYKGK7K$K#K%K*K.K/K1K2KLKWKBK7K4K5K7K7K8K6K0K7KUKWKCKVK{K{K{K}K}K}KvKhKWKWKRKDK=K=K?K@K=KK>K:K:K:K6K;KSKSKRKSKSK8K0K/K)K3KKKSK‚KKˆKŠKŠK†KˆKŠK…K…KƒK‚KKK|KyKnKiKgKgKcKaKhKqKvKkKaKNK:KRKcKjKlKmKpKqKfKbKeKgKhKhKgKfKeKeKfKfKbKaK`K^K_K_K_K`K`K\KXKUKTKVKWKVKTKXKZKUKNKMKPKRKSKOKQKSKRKOKNKNKMKNKTKYKUKVKWKVKRKSKSKVKXKTKVKeKyKΖKνKιKδKγKμKςKνKνKνKνe]rL(KGKGKGKGKGKGKGKGKFKBKFKGKGKGKFKGKDK?K?K=KKdK~KzK{KzK|KbK>K8KAKKTKWKCKWK€K~KzKqKcKRKEK;K@KXKSKBK=K=KK?K@KRKTKJK-K+K8K9K8K:K9K0KK KHKQKMK$KKKKKK#K*K +KKKPKNKEK-K#K%K)K(K)K/K2K6K8KKK?KAK8K5K6K5K6K7K7K7K7K8K8K7K:KKKKK>KKVKTKUKVKQK5K0K7KDKHKCK}K‘KŒK‹KŠKˆKˆKŠKˆK†K‡K…K€KKzKvKtKlKkKfK`KcKcKfKqKpKaK=KBKaKeKjKkKkKpKpKhKdKfKjKkKjKhKgKfKfKfKfKdK`K_K`KaK`K_KbK`K[KYKYKYKWKXKYK[K^KWKRKQKTKUKTKRKRKSKSKQKPKPKQKTKWK[KWKXKYKVKUK[KYKeK“KΝKρKρKκKμKζKήKεKκKίKΪKθKοKμKμKνe]rN(KGKGKGKGKGKGKGKGKDKCKCKGKGKGKIKIKIKIKDK6K8K?K3K0K9K;K:K9K8K7K7K7K6K8K:K6K7K9K8K7K7K6K4K2K4K9K;K:KK=K4K*K#KK"K/KPKOKJK:K5K3K%K(K)K-K6KKKEKVKOK*KKKKKKKKKKKLKQKKK9K/K2K3K9K=K=K?K>KKHKOKNKHKIKPKPKRKVKZK\K_KbKeKiKUKNKCKYK€KtKhK]KUKMKAK;K?KIKLKLKHK;K3K(K*K;KQKiK|K‚K€K~KK}KMKNK2KK&K)K8KEKJKNKIKEKFKBK;K2K1K7K@KKKQKTKQKPKPKPKQKQKPKQKNKNKKKJKPKRKQKQKPKMKNKLKPKQKQKDKK+K:KKKDKAK:K2KFKHKIKEK9K;K9K-K%K"KKKKKK9KRK>K6K4K6K)KKKKK9KWKDK2K#K$K&K)K,K.K.K2KQKTK?K7K4K5K5K5K5K4K5K:KRKSKCK;K5K5K;K:KKEK;K8K/K*K.K>KVKnKK„KKSKKK;KQKyKWK=K-K/K;KFKJKJKGKGKFK?K8K/K+K2KAKLKRKRKOKMKMKOKMKMKNKPKOKPKQKQKOKNKMKNKKKKKOKPKQKNKRKFK>KPKOKMKMKPKEK*KPKRKRKMKHKFKNKQKRKNKPKFKRKOKQKPKPKPKPKNKNKMKNKMKNKNKNKNKNKNKNKNKNKNKNKMKPKQKOKPKNKJKKKTKmK‰K°KΠKδKκKιKθKζKδKΩKΙK΄KŸKK‰K…K†K€K[KTKTKSKRKKKCKLKKKLKKKKKLKJKHKJKKKKKKKKKJKHKIKIKHKHKHKHKGKFKFKDKCKBKAK?KK:K6K8K4K.K1K0K1K.K:KK(K>KGKCKAK:K0KFKGKIKEK:KKRK>K6K4K5K(KKKKK;KTKCK2K#K$K&K)K+K-K.K2KQKSK?K7K4K4K4K4K5K3K4K:KRKSKCKKFKJKHKDKGK>K/K4K/K1K9KCKLKNKPKQKMKKKPKRKOKKKNKQKNKMKNKNKKKJKNKPKQKPKQKLKKK?KCKEKFK=K8K6K:K7K/K+K5KGKKKFKFKKK~KK…KƒKqKXK=K-K2K=KFKIKBK=KDKFKAK:K3K2K6KBKJKPKPKPKNKIKFKNKQKNKNKPKPKNKKKNKNKMKAKIKQK>KLKOKNKMKMKSK-K@KWKQKOKLKBKFKOKSKUKWKKKQKSKQKQKQKQKQKQKQKQKNKMKNKNKNKNKMKMKMKMKMKNKNKMKNKNKMKNKNKNKNKNKNKOKNKKKJKHKRKkKK²KΝKΩKβKζKζKεKδKΩKΝK¬K€KhKZKOKPKHKIKPKMKNKMKKKLKKKKKLKLKKKHKJKLKJKHKIKIKIKGKGKGKGKDKDKBK?K=K7K7K9KBKMKOKOKSKUKUKUKTKCK.K.K+K"KnKšKKŽKŽKŒKŠKŠKˆK…K‡KŠK‡KK~KzKvKpKhKeKeKcK^K`KgKmK[KCKTK^KaKbKfKiKoKmKkKlKkKkKkKkKkKlKkKjKhKcKcKgKgKfKgKeKaK]K\KbKaK`K`KbKbK]K[KXKWKYK[KYKVKWKWKVKUKTKTKXKZK`KKΐKΕKΕKΡKΥKΞKΨKγKΫKΰKθKΦKΥKήKδKνKέKδKζKβKίKαKθKεKλKλKλe]rR(KDKDKAKDKEKFKFKGKFKCKCKDKCKEKGKGKGKGKGKGKGKGKCK?K=K:K8KAKEKFKFK=K7K;K/KKKKK#K*K/K4K8K8K8K:KKGKGKIKAK;K>K6K'K$K!K KKKKK?KOK=K6K4K4K$KKKKK?KSKAK0K"K$K%K*K+K-K-K1KPKRKAK5K2K3K4K4K4K3K3K;KTKRKCK;K4K0K1K&KKK%K.KAKUKPKEKLKEK9K,K KKKK,KQKQKIKQKHK/K*K;KKKJKEK,KKKK=KBKNKSKRKUKNKK΄K;KύK K +KK KKK5KVKNK;KKKKKKK)K$K K$K0KLKNKMKAK;K?KAKFKPKLKOKVKYK[KXKPKNKGKmK…K€K€KKKKKK€K€KVKLKEKZK†K‚K~KsKjKcKVKIKDKAKFKIKKKIK?K-K+K*K+K;KSKpKK†KƒKK~K†K€KQKIK6KK"K2KAKIKKKFKDKEKBK=K8K2K1K8KDKJKLKOKPKMKLKLKMKOKPKFKMKLK>KNKMKMKMKMKIK%KHKSKQKMKIKBKHKMKLK(K+KJKLKJKJKHKGKIKNKOKOKOKOKNKMKNKNKMKNKNKNKNKNKNKNKMKLKNKMKMKLKKKLKLKLKLKLKLKLKMKNKMKLKNKOKLKNKLKIKMK^K~K₯KΖKίKμKλKιKζKεKδKήKΘK¦K„KhKSKKKKKJKFKMKNKLKLKJKHKJKJKJKHKEKCKAK?KAKIKRKSKUKTKFK)KKKK5KTKSKRKUKJK/K/K&K\K™K“K‘KŽKŒK‰KŠKŒK‰K…KK„K†K€KxKsKtKoKfKeKiKaK`K^KcKTK>KMKTKVKYKaKnKpKmKlKkKkKmKoKlKkKlKkKiKiKiKiKiKiKiKeKdKaK\KaKbKeKcKdKdKaK^K\K[K\K[KZKYK[KZKYKWKYKYK[K‰KšKžK²K²KΔKKKΣKΦKΨKΨKίKΩKΫKεKάKKΧKΰKηKδKίKαKήKάKέKεKιKκKνe]rU(K4K@KBKAKBKBKBKDKDKCKDKDKGKGKGKGKGKGKFKFKGKGKGKGKGKDK>K=K=KIKAK4K=KK7K5K:KK4K,KIKDKAK?K5KK8KK4K2K3K3K3K1K.K2K:KSKSKBKBKEK>K6K-K$KKKK)KUKQKGKfK}KK€KK‚KHK*KdKaKPKHK`K‚KzK-K,KpK€KjKK@KKKNKLKBK=KAKCKIKNKMKMKSKUKTKSKNKNKFKcK…K‚KKKKKKKKK]KLKGKSK‚K‚KƒK„K‚KzKsKeKTKKKGKDKIKKKFK'K*K2K3K+K(K-K=K[KtK‚K„KƒK‚KUKGKK\K9K*K)K6KDKGKEKDKEKFKBK>K5K-K2K9KCKOKPKNKJKLKOKLK:KHKOK=KKKNKMKNKKKHK*K>KSKRKMKIKCKFKOKNK3K'K?KOKLKKKPKOKJKCKDKJKLKMKNKNKMKNKNKMKNKNKNKNKNKNKLKKKMKMKKKKKKKKKKKKKKKKKKKKKMKNKNKNKNKMKKKMKNKNKNKKKHKKKTKlKKΈKΥKηKμKιKζKζKηKθKέKΑK›K{K^KMKIKLKQKRKOKKKNKOKOKMKGKBKAKGKPKTKSKVKMK3KKKKKK$KQKUKRKSKTK;K/K&KWK˜K”K‘KŽK‹K‰KŠK‹KˆK‰K‚K‚K†K€KsKoKuKpKcK_KeKgKbK^K`KOK?KJKQKTKYKcKnKnKnKlKlKkKmKoKlKkKlKkKiKiKiKiKiKiKiKcKcKeKbKdKdKdKeKfKdK`K_K]K]K]K\KZK[KZK[KZKWKXKZKtK™K–K•KΈKΑKΒKΝKΠKΟKΥKΪKKΩKΪKΣKίKδKΖKΜKάKδKγKάKάKήKΩKάKίKίKεKνe]rV(K(K6KAK@K?KAKBKBKDKDKDKDKDKEKGKGKGKGKGKGKGKGKFKDKEKDKAK?K@KSKvKHK9K:K(K%KKKKK!K'K-K3K6K7K:K:K6K5K-K'KKK +KK7K=KKHKFKFKKK0K-K4K9K9K7K2K,K,KRKRKEK KKKKAKfKwK€K…KyKWKRKIKrK€K}K~K|KƒKGK"K^KcKMKIK_KK}K:K/KjKKƒK^KK7KTKMKGK#K7KK KKKK +KK K&KSKMKDKKKKK K KKK K KKBKPKKKHKFKEKNKSKSKQKTKRKSKUKSKNKNKGKUK}KƒKKƒK‚K‚K‚K‚KK‚KkKKKIKIK{K„K‚K‚K‚K‚K‚K‚KƒK‚KKwK[KHKHK-K%K1K;K@KDKDKBK;K0K)K*K5KLKTKHKCKAKxK‚K„K‡K…KtKYK>K1K0K:KGKKKGK;KCKEKBKK@KDKDKDKDKDKDKDKEKGKGKGKGKGKGKGKGKEKDKDKDKAK?K>K=K=K2KNK…K€KKK|KtKiKHKK!K5KK;K9K;K9K;KK*K3KJKDK@KK0K#K"KKKKKK$KMKGK9K1K2K0K'K"K"K"K+KMKNKAK2K*K+K,K.K.K0K/K6KQKMK>K5K1K2K/K&KKKK KQKRKDK>K`KAK0KhK…K~K|K}KtKWKRKHKsK‚KKK}KƒKIK"K^KeKOKKK_K€K~K?K.KfK€K‚K`KK2KTKPKIK+KKUK;KόKK KK +KK!KSKMKFKKKKKKKKKKKKAKPKNKAK,K.K2KBKKKMKSK[K_K^K]KSKNKHKOKuK‚K€KƒK‚K‚KK€K‚K„KrKKKKKIKvK„K‚K‚K‚K‚K‚K‚K‚K‚KƒK‡KnKIKKK2K$K0K;K@KCKDKHKGKCKAK;K3K)K3KGKGK@KnKŠK†KKK„K‰K…KrKUK>K1K5K@KFKJKGKCKCKDKCK:K3K.K,K0K:KGKGK>KNKNKMKKKKKLK*KIKPKRKMKHKBKGKNKMK;K/K]KΙKΝKΟKΧKΥK¦KhKPKSKRKOKMKHKHKFKEKJKMKOKNKNKNKMKNKNKMKMKMKNKLKKKLKLKLKKKKKMKNKNKMKKKLKLKLKLKKKKKLKKKLKKKKKMKNKNKOKOKNKJKHKNKaKK’KΕKΰKκKκKεKδKκKλKθKΫKΑKŸKyKdKXKRKTKLK3K K#K*K,K-K,K&K'K&K&K&K,KNKTKRKSKSKBK+KEKK•K”K’K’KK‰K‡K‰K‡K‰K†K‚K~KoKiKjKlKlKjK`KWKVK]K^KLK4K?KCKLK[KnKoKnKoKoKoKnKnKnKoKnKlKkKkKkKlKjKiKkKkKjKhKfKgKdKdKdKhKeKdKbKaKcK`K`K^K\K]K\K[K[K[KXK[K]KgKƒK|KƒKˆK‹KKΔKΑKΒK»KΠKΟKΔKΞKΓKίKαKΚKΏKΠKεKΧKΞKΘKΟKΥKΥKΖK¨K°e]rY(KKK(K+K4K?K?KAKCKDKDKDKDKDKDKDKFKFKGKGKGKEKFKGKEKCKDKEKEKAK=K?K@K-KWK‚KzKzK{K|K~K…K]KK@K?K9K@KAK0K"KKKKK*K9K;K:KK(KKKKKKKK%KRKRKCKUK…KGK-KbK€K}K~KKvKWKRKHKsK‚KKK}KƒKMK%K^KeKPKKK^K€K€KDK/KfKK‚KhKK3KTKPKJK5K%KKK-KK K KKKK&KRKNKGK"KKKKK KK KK+K/KDKOKLKHK;K5K0K0K0K6K?KNK[KcKdKVKLKIKMKfKKKƒK‚K‚K€KK‚KƒKwKNKLKIKoK…K‚K‚K‚K‚K‚K‚K‚K‚K‚K…KwKKKKK7K%K1K:K?KCKDKFKEKFKFKFKDK@K;KFKIKAK;KUKrK‚K‡K„KK‚K†K‰K…KpKRKBKKBKFKIKFKDKAKAK@K:K4K-K*K$K5KPKMKMKLKKKPK-K=KQKRKQKJKCKEKMKOK@K:K‘KΠKΙKΙKΝKθKψK­KCKFKNKJKLKPKPKDKKDKEKFKHK?K7K2K9KLK\KTKLKIKJKcKwKƒK‚K‚K‚KƒKƒK‚K„K|KSKLKIKjK†K‚K‚K‚K‚K‚K‚K‚K‚K‚K„K{KNKKK>K$K.K7K>KBKEKGKGKGKFKFKDKDKBKGKJKDK/K$K-K>K[KuKƒKˆK†KƒK„K†KŠKqKEKDK0KK+K=KIKGKDKDKEKEK>K7KK&KRKMKNKNKLKPK4K0KRKRKQKLKDKBKNKSK@KeKΜKΚKΚKΚKΚKάKλKΞKUK*K,KiKeKIKNKGKBKLKIKEKDKDKDKJKNKNKNKNKMKMKNKNKLKKKKKMKNKNKNKKKKKLKLKLKKKKKKKKKKKLKKKKKKKKKKKKKKKKKKKKKMKNKNKOKOKNKKKHKNK`K€K¨KΛKαKλKιKεKβKεKιKκKέKΑKKtKTK=K6K7K7K8K8K5K5K1K1K8KPKTKRKSKUK;K3K~K–K“K’K’KK‹K…K„KˆK‡KˆKK}KzKmKfKdKdKfKgK\KVKRKRKMK2K3K@KLKaKkKoKoKnKkKlKoKnKoKnKoKoKoKoKmKkKlKlKlKkKiKiKjKgKfKgKfKdKdKeKbKaKbKaK_K`K_K`K^K\K\K]K[KZK[K[KoK~KeKK“K…KŽK§KΆK‘K³KΗKΆK―K½KΓKΪKΨKΓKΙKΝKάKΞKΘKΞKΣKΡKΝK·K‡e]r[(K$KKKK(K'K5KBKBKDKBKCKDKDKDKDKDKDKEKEKEKHKGKGKEKCKFKGKEKFKFK@K6K>KMKsK‚KK}K}K}KKBKKGK;K:KYKK~KKK„KlK5K)K9KK9K1KEKFKGKCK;K=K;KKKKKKKKK.KQKCK2KKKKKKKK"KNKJKK+K&K%K"K"KKKK&KPKRKDKTKƒKAK$KcKƒKKKKxKXKRKHKsK‚KKK~K„KXK-KXKeKOKKKWK~K‚KPK+K[K‚KK€KWK1KPKQKLK(KjKYKK!K!KKK&K'K-KLKNKKK4K'K)K&K(K&K&K'K'K#KK/KNKNKHK$KKKK#K,K9KEK?KPKNKHKKKKKFK[KeKtK„K…K‚KK‚KƒKKK]KJKIK\K…KK‚K‚K‚K‚K‚K‚K‚K‚KƒK„K\KHKFK(K*K3KK6K1KEKFKGK@K:K=K8K'K!KKKKKKK1KPKCK3K KK KKKKK'KQKIK;K$K K"K%K&K&K)K&K6KTKIKK:KHKpKK~KuK,K2KHK8K;KnK€KKKK€KXK-K+K;K;K=K;K:K5K'KEKGKAK?K6K6KFKFKFK@K9K=K8K'K!KKKKKKK2KOKCK1KKKKKKKK(KRKHK:K#K K K"K%K&K$K"K6KTKHK=K%KKKKKKKK'KRKRKDKWK„KBK$KcKƒKKK‚KxKTKPKJKrK‚KKK~K…KYK*KXKiKQKMKUKK…KXK*KWK‚KKKKyKSKOKKK.KKK K"K%K'K&K&K&K+KJKNKJK7K'K)K+K+K*K'K K$K+K+K3KLKMKKK2K%K%K%K'K$K#KKKKK2KLKKKFKLKPK`KNK(KBK_KzK‡K…K†KiKJKKKOK}K‚K‚KƒKƒKƒKƒK‚K‚KƒK‚K†KkKEKJK1K$K2K;K>KDKGKEKFKGKFKFKFKDKBKFKJK?KKK K"K%K1KK4K6KFKGKBKXKyK†KŠK†K‚KƒK†KŠK…KuK|KwKGKFKGKEKCKGK8K'KPKQKOKJKFKBKNKLKxKΠKΝKΛKΛKΜKΘKΐKιKμK|K“KΟKΚKΈK‰K…K‹KnKHK-KKΦKœKqKaKPKJKKKLKLKLKLKHKDKDKEKGKJKNKLKLKKKKKLKLKKKKKKKKKKKKKLKLKKKKKLKKKLKLKKKKKKKLKJKHKHKHKKKLKLKJKHKIKIKKKLKKKLKLKLKMKMKLKKKHKNKZKuK›KΑKάKκKλKιKδKδKθKκKλKαKΖK©K†KjKEKKBK8KDKxK€KKK~K}KNK,K1K:K:KK2K8KEKCKDK>K:K>K5K!KKKKKKKKK8KIK~K€KKK€K|KKK-K2K:K9K9K;K?K2K-KEKDKBK>K2K8KDKCKDKK/K K"KKKKKK/KTKIK9KKKKK!K K!KK7KUKHK;K!K K!K K K K!K#K0KSKNKAKXK„KIK-KeKƒKKK‚KxKSKRKJKrK‚KKK~K„KbK*KTKkKOKLKQKzK†KcK+KNK€K~K…KTK'KMKRKMK5K K&K&K$K#K"K"K$K(K*KEKOKKK>K!KKK+K0K2K.K/K/K.K2KIKNKJKKEKDKDKDKBKDKDKCKDKCKCKHKCKFKrKK€KuKgKZKPKGK>K;K;K=KAKGKGKFK7K(K#K&K3KJKgK~K‰K‰K„KƒK†K\KCKGKFKGKDKFK&K@KPKOKMKIKCKIKQKQK°KΤKΝKΝKΤKˆKLKΞKςKΑKbK½KΡK—K[K„K–K—KUKKΜKΗK½KŠK‡K‘K‰KTK>K3KDKGKJKLKKKMKKK9K>KEKCKEKJKJKKKLKKKKKLKLKKKKKLKKKKKLKKKKKKKLKKKKKKKLKLKLKLKLKKKHKHKIKHKIKHKHKHKJKLKLKLKLKLKKKKKLKLKLKOKMKGKGKNK_K|K£KΕKΰKνKμKζKαKαKεKκKξKεKΧKΓKKŸK“KŠK†K‚K‚K…KƒKsKsKyKmK`KeK_K]KVKPKOK2KK?KZKbKfKjKlKnKoKqKqKqKpKnKoKnKlKkKlKkKkKkKkKnKnKiKhKjKgKfKgKeKgKhKcKbKfKdKhKcKdKfKaK`K_K_K`K^KXK_K^K\KbK[KWK^K\KYKbKcKnK{KKxK|K‚KƒKƒK‘KΊKΉKΘK¬K·K©KΏKΎKΑKΚKΒK~e]rb(K)K*K+K*K(K%KKK K)K1K5K4K:K>KAKCKBKBKBKBKCKCKEKEKEKFKGKDKDKFKEKEKDKDKDKCKDKCK>K;K7K,KKKKBKDKCKFK1K3KBKCKCKCKCKIKIKDKzK…K„K‡K…K€KrKcKYKNKGKAK=KDKFKFK7K;K3K.K'K"K+K8KQKmKƒK‰KŒKiKDKGKFKGKCKGK-K4KQKOKMKJKDKCKPKLK“KΥKΚKΞKΌKWKAK¬KρKάKhK¨KΟKWK5KUK\KsKXKvKΚKΖKΛKšK†KˆK‡KcKDK%K"KxKƒKVKLKJKJK7KBKKKGKBKBKDKGKJKLKLKKKKKKKKKLKKKKKLKKKKKLKKKLKKKKKKKKKKKKKLKKKJKJKHKIKHKJKJKHKIKJKJKJKJKJKLKKKJKIKHKIKJKKKMKLKKKHKJKSKkKŽKΆKΧKηKμKθKβKΰKβKηKιKθKαKΞKΊK¦KKKyK‚KzKqKxKtKaKdK_KZKVKPKQK/KKKK[KcKfKiKlKnKoKoKoKoKoKnKoKnKkKkKlKlKlKlKnKoKmKkKiKhKfKeKgKhKgKgKdK`KcKhKgKdKbKcK_K]KaK_K[K[KXK`KaK\KbK]KVK^K]KWKWKVK_KqKtKrKjKiKqK|KgK›K²KΆKΆK€K©K­KΗKΈKΑKΙKƒe]rc(K+K+K+K)K$KKK K&KKK3K7K7K8KBKDKBKAKAKBKCKCKCKCKCKFKGKDKEKHKEKCKDKDKCKDKCKDKAK=K>K>KIK9K7KVK‚KKKK‚KpKAK.K6K>K=K=K;K;K*K3KGKAK?K=K1K=KFKDKCK;K;K?K/KKKKKKKKKAKKK;K.K(K)KKKKKK1KQKGK8KKKKK K!K!KK9KRKIK;K"K#K$K$K$K&K&K%K1KQKNKAKXK…KHK-KdKƒKKK‚KxKTKQKIKrK‚KKKK‡KfK+KOKmKRKMKPK{K„KhK*KHK|KƒKQK$K%KHKRKKK;K$K&K&K&K'K)K)K(K)K&K=KQKLKBK1K2K0K0K0K0K0K3K2K1K3KEKNKKK@K1K2K-K!K!K)K$K$K!KKK2KNKJK=KK'KK'K,K&K*K*K*K-K/KFKMKLKBK3K:KOKkKK†K‡K…K„K„K„K…KaKEKHK+K)K5K=K@KDKCK3KKK9KBKCKCKBKHKIKBKnK†K‚K„K„K†KˆK…K}KpK`KVKNKFKDKDK9K;K=K6K3K3K1K*K&K*K:KSKsKsKHKFKGKGKDKFK5K*KQKPKMKJKGKBKNKKKuKΝKΙKΡK†KAKBK†KμKξKKK€K4K2KLKTKPKNK]KΌKΙKΜK«KˆK‹KŒKtKNK1KBKΐK»K{KnKcKQKDKIKLKLKKKGKGKEKDKGKFKFKLKLKKKKKKKKKKKLKLKKKLKKKKKKKKKKKKKKKKKKKLKJKHKHKHKKKKKHKHKHKHKHKHKIKLKJKHKHKIKHKIKLKLKLKKKKKNKLKHKEKMK_KK₯KΛKγKμKκKεKαKαKδKηKθKεKΩKΕK¨K’K†KmKmKzKdKcKbKXKVKOKPK,K(KPK]KcKfKiKlKoKoKnKnKnKnKnKoKnKlKlKlKlKkKlKpKpKkKhKhKfKgKgKhKjKfKfKeK`KbKfKgKeKaKbK^K\KcK_KZK[KZK^KeK^KaKbKYK]K^KXKYKWK[KhKyKpKkKjKhKhKYKsK’K©K΄K™KKKΖKΉK£KΑK–e]rd(K*K,K*K&K!KKK#K!KKKK/K8K8K;KAKCKAKBKAKCKEKDKEKGKEKDKFKFKDKDKDKDKDKCKCKCKDKDKBK=K>K:K6K7K]KƒK~KKK„KkK>K.K6K;K:K=KK*K8KGKAK?KK(K;KGKAK?K;K0K@KDKDKBK;K=K>K+KKKKKKKKKFKHK;K.K(K'KKK KKK6KRKFK2KKKKKKK K K=KRKHK;K&K&K%K%K(K)K)K'K4KTKPKDK[K†KHK-KeKƒK€KƒKƒKwKTKQKIKsK…K‚K‚K‚K†KkK*KIKmKQKOKNKzK‡KqK-KCKvKAK$K(K$KEKRKMKKAKBKEKBK>K@KAKDKDKBKGKJKBKYK‰K„K…K„K„K„K…K…K„K…K‡K‰KKOKFKCK?KIK>K8K5K6K=KCKBKAK=K/K(K?KEKCKDKCKCKDK&K@KPKQKNKJKAKGKQKPK¨KΦK„KK*KKKKKKKKKGKGK9K-K(K'KKK KKK9KRKCK0KKKKK!K K"K"K@KSKIK:K'K&K%K'K)K(K*K)K3KOKMKDK[K†KHK.KdK…K‚K‚KƒKwKTKQKIKsK…K‚K‚K‚K†KmK+KIKpKSKOKMKzK‡KrK/KAKMK(K(K$K$KCKPKKK=K(K&K$KKKKK$K+K,K;KOKLKEK1K1K3K2K5K4K3K3K4K5K5KCKMKLKBK"K&K'K"K"K KKKKKK*KOKIKIKKKKKKKK&K&K'K'K9KJKGKEK6K1K4K5K8K8K9K8K:KEKZKvK{KJKIKK:K;K;KBKJKLKCKCKGKGKGKDKCKGK-K6KQKMKMKIKDKEKQKJK“KΓKOK?KCKEK=KŸKπKβKrK9K2K.K1KFKLKMKLKgKΕKΛKΙKΊKŽK‹KKoKVKΊKΘKΟK₯K;KCKbKmK\KKK5K&K8KUKQKKKKKMKLKJK7K=KBKAKBKFKLKLKLKLKKKLKLKKKKKKKKKKKKKKKLKLKLKKKLKKKIKIKHKIKHKHKHKHKHKIKHKHKHKHKIKIKIKIKHKIKIKIKIKIKHKHKIKIKJKKKLKIKHKJKYKxKžKΓKίKκKμKθKβKαKεKζKθKκKεKΣK΅K’KjK6K4KKKYKaKhKjKjKjKfKgKkKmKqKmKmKoKmKjKiKjKkKpKqKmKiKfKgKeKiKjKiKjKfK`KaKfKkKgKfKgK]KZKaK^K]KaK[KVK[K]K`KdKeKeKaK\K\KXK\KYK]K[KUKUKVKWKPKPKWK\KZK}K˜KK‘KKΖKΛKK½KΩe]rg(K-K&K!KKK&K KKKKKKK K&K6K5K6K:KCKDKCK@KBKDKDKDKDKCKCKCKDKEKDKDKCKCKDKCKCKBKAKAKAK1K,KDKnKƒK~K€KƒK\K5K+K6K:K;K=K;K:K*K?KDKBK?K;K:KCKDKDKAK8K>K=K*KKKKKKKKKHKGK9K-K(K'KKKKKK:KQKBK/KKKKK K!K#K#K@KRKHK:K'K&K'K(K)K)K)K)K3KOKNKCK[K†KHK-KdK†K‚K‚KƒKwKTKQKIKsK…K‚K‚K‚K†KoK,KHKpKRKNKMKyK†KrK3K6K-K(K(K%K%KBKPKKK>K#KKKK K(K+K,K,K*K9KOKLKFK6K1K3K4K6K7K7K7K7K5K5KBKLKLKEKKKKKKKKKKKK!KJKIKIK%KKK +KKKKKK$K&K4KIKHKEK8K2K4K5K6K7K9K:K;K9K6KK6K8KDKmK„K€K~KVK4K+K6K;K>K=K;K9K7K@KFKBK?K=K?KCKDKDKAK;K>K;K&KKKKKKKKKIKGK:K-K(K%KKKKKK=KPK@K/KKKKKK!K%K%KAKQKEK9K'K&K*K)K(K)K'K'K4KPKOKAK[K‡KHK*KcK†K‚K‚KƒKwKTKQKIKsK…K‚K‚K‚K†KqK,KEKpKQKPKJKvK†KwK6K/K)K(K&K&K!K>KQKLK?KKK"K)K,K+K*K*K+K'K5KNKLKFK8K4K7K7K8K8K8K8K7K9K6KKMKIK1KKKKKK"K%K'K&K K!KEKJKHK;K&K.K2K2K1K4K5K6K6K6K9K8K>KGKGK-K&K0K:K@KCKBKEKGKGKGKGKGKCKAKDKIKAK]K‰K‡K‡KˆKˆKˆKˆKˆKˆKˆK‡K‰K†KSKDKAKZKŠK‡K‡K‡K‡K‰KˆK…KKtKhK^KJKEKGKDKCKCKFK*K6KPKMKMKHKBKAKKKLKEK?K>K?KBKCK@K¨KπKεK{K;K1K/K0KBKHKKKLKaKΎKΛKΗKΏK„K…K’KKOK₯KΞKΘKΎK{KiK[KkKwKMKeKΓKΕKΙK£KoKdKZKYKaKQKDKFKHKKKJKJKJKJKGKGKDKBKAKBKFKLKMKLKLKLKLKKKLKKKHKHKHKHKHKIKHKHKHKIKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKIKIKIKHKIKIKIKIKIKHKHKIKIKLKLKIKFKFKLK`K‚KͺKΛKγKξKλKεKβKΰKαKδKιKμKκKέKΗK«KKwKnKiKiKjKjKmKrKrKkKcKfKjKjKlKlKgKbK^K^KcKhKlKkKjKbK[K`K]KZKbK`KcKaK[KWKVK_KbKeKfK`KXK]KdK_K[KUK[K`K_KTKQKMKLKYK\K_K]K`K_K}KzKpKΊKΑKƒKe]rk(KKK#K%KKKKKKK +K%K8K5K4K0K/KKK!K)K(K1KAKCKBKBKBKDKCKAKBKBKBKBKBKBKBKBKBKBKAKBKAKBK@K?K=K8K7K?K^KGK-K-K9K=K=K>K=K:K7KBKBKAK=K;K?KCKDKFKKBKCKDKDKDKDKDKDKDKAKEKIKCKUKˆK‡K‡KˆKˆKˆKˆKˆKˆKˆKˆKˆKŠK]KCKCKLK„K‡KˆK‰K‰K‰KˆK‰K‰K‹K†K~KaKEKGKFKFKCKFK4K.KOKLKMKIKFK@KHKMKHK?K?K?KAKAKSKΒKηKμKŸKBK5K1K-K=KFKHKMKQK§KΜKΜK‰KbKzKrK^KKK…KΟKΗKΗK KKK€KrKWKbKΏKΗKΖKΉK…KxKtKlKfK]KLK=K'K8KCKEKIKLKIKHKGKGKCK9KAK@KCKHKIKKKJKLKLKKKKKKKKKKKKKIKIKKKJKHKIKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKIKIKIKHKGKHKIKIKHKGKGKIKIKIKGKGKHKHKHKJKLKKKHKDKFKVKrKšKΏKέKκKνKθKεKγKαKγKεKιKκKαKΠKΊKœKKlKiKkKqKrKjKfKgKiKkKlKlKgK[K`KcKfKlKnKjKgKaKZK\KaKZK_KdKdK`K[K[KVK^KbKcKhKbK[KZKaKbK]KVK[K^KdKXKSKRKOKZKaKcKbKbK_KtKƒKhK¦KΩK†Kne]rl(KKK%KKKKKKK KK7K4K4K3K1K'KKKKK%K*K3KAKBKAKBKCKCKAKBKBKBKAKAKAKAKBKBKAKAKBKAKBK@K?K?KKAKDKCKCKCKCKCKCKDKCKDKFKEKMKKˆK‡KˆKˆKˆKˆKˆKˆKˆKˆK‡K‹KjKBKDKAK|KŠK‡KŠKŠKŠKŠKŠKˆK‡K‡KK~KIKFKGKFKDKDK=K&KJKMKMKJKGK>KDKNKJK?K>K>KBK?K†KΠKίKνKΑKOK8K2K-K6KFKHKNKLK‰KΠKΌKRK=KHK;K>KLKhKΗKΘKΙK¬K‰KKŽK‘KqKKK£KΜKΔKΕK†KbKdK`KaKfKQKGK&KK?K”KdKGKHKHKJKJKAK1K@KDK@K@K@KCKHKLKMKKKLKKKLKLKLKIKIKLKKKIKIKIKIKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKFKHKHKHKHKFKGKHKHKHKGKFKHKHKIKHKGKGKHKHKIKHKDKDKMKbK…K­KΟKεKκKιKεKβKΰKαKγKδKθKηKίKΗKK”K~KnKaKbKgKkKlKlKjKcKZK_KeKgKlKmKkKfKaK[KXKbK[K]KeKdK`K[K[KUKZKaKcKiKbK\KZKbKeK`KZK]K^KhK]KZKZKQKWKdKfKeKaK_KbKKiKvKΔK«Kle]rm(KK$K$KKKKKKKK5K:K7K1K0K&K,KKKKKK'K/K7KAKDKCKDKCKAKBKBK@K>KBKAKAKBKBKCKDKBK@KBKAKBK?K?K=K:K7K8K0K,K7K:K:KK=K:K9K5KKKK%K"K'KFKMKJKKVKKKJKHKzK‰K†KGK0K(KKKKKK3KLKHKGK-K%K&K"KKKK(K1K5K7K5K9KGKJK>K+K/K7K>KBKDKCKDKDKCKCKCKDKCKCKGKFKFKyK‰KˆK‡KˆKˆKˆKˆKˆKˆK‡K‡K‹KwKEKHKAKpK‹K‡KŠKŠKŠKŠKŠKŠK‰K‰K‰K…KRKCKDKDKCKDKCK#KDKOKNKLKHK@KCKOKLKBK>K?K?KSK»KΚKΫKκKέKfK9K3K.K1KFKKKLKLKkKΝKK5K;KDKSKxKqKRK³KΜKΗKΌKŽKKŒKK‚KKKKΞKΔKΚKͺK…KƒKrKhKFKCKNK:K#KuKΤK—KVKVKQKJKGKGK6KBKJKIKGKEKAK?KAKEKGKJKKKLKKKKKKKKKLKKKLKJKHKIKIKHKHKHKHKHKHKHKHKHKHKHKIKGKFKFKGKIKGKFKHKHKGKGKGKGKGKGKFKGKGKGKGKGKFKFKFKGKGKIKJKIKFKEKGKWKuKKΑKέKλKμKζKβKαKαKβKδKθKιKεKΧKΏKŸK€KmKhKhKkKhKaK\K]KcKgKnKmKjKfKbK^KWKaK`KZKeKbK_K^K]KXKYK`KdKlKgK_K^KfKhKfK_KbK`KjKcK[K\KTKUKaKhKfKaK_KbKpKdK^KvK’Kte]rn(K"K"KKKKKKK K,K>K;K:K2K(K'K>KKK%K&K$K K(K.K9KBKDKCKCKBKBK@K?K>KAKAKAKBKBKCKCKBK@KBKBKBK@K?K;K?K9K3K3K,K6K9K;K=KK;K:K6KKK%K&K$K$K,KFKMKKKAK/K0K/K/K0K4K8K2K1K8KKwKHKKKFKvK‡KˆKEK$KSK\K4KKKK(KLKIKFK2K,K-K*K&K$KKKK#K.K3K7KEKJKCK8K8K8K=KAKDKDKBKCKDKDKDKDKBKDKGKGKCKoKŠK‡K‡KˆK‡KˆKˆKˆKˆKˆK‡K‰K‚KJKFK>KdKK‡KŠKŠKŠKŠKŠKŠKŠKŠK‰KŠK[KAKDKCKCKCKDK9KCKMKMKLKIKCKAKJKKKDK=KAK?K†KΠKΗKΥKηKλKˆK=K6K0K-K@KIKHKJKUKK]K2K:KvKKKƒKPK’KΝKΔKΕK™K‹KKŽKŽKZK`KΓKΗKΘKΈKKKŠKKwKIKLKDK7K©KΛK΄KcKVK[K^K[KOKFKHKHKGKGKHKFKDKBK@K@KCKHKJKLKMKLKKKKKKKLKJKIKIKHKIKIKHKHKHKHKHKHKHKHKHKIKGKGKFKGKHKGKFKHKHKGKFKGKFKFKFKGKGKFKGKFKDKFKGKGKGKFKGKHKGKGKIKHKFKCKBKNKdK‰K³KΠKηKμKιKδKγKαKγKδKζKκKιKβKΝK±K”KzKdKWKZK^KfKkKqKnKkKfKbK_KWK`KeKZKaKfK`K^K^K\K]KaKdKnKnKcK`KfKlKkKdKbK`KkKeK^K^KUKUK\KeKfK`K\K`K]KaKzKŒK˜Kˆe]ro(K%KKKKKKK K K7K9K=K;K1K!KK>K>K@KBKBKBKBKBKBKBKBKBKAKBKBK:K>K=K9K1K%K4K9KK:K7K>KCKDKDK2K.K?KGK@K1KKKKKKK2KNK?K1K)K)KKKKKKKGKJK>K'KKKK K#K$K!K%KIKOKCK4K'K)K)K(K)K)K)K'K3KOKMKCK^KŠKJK,KbK…K‚K‚K†KyKQKPKGKqK…KK‚K„K†K|K2K9KoKVKKKJKBK.K+K(K$K$K$K'K)K'K8KQKMKHKeKeKQKDK?KBK@K:K5K8K:KJKNKJKGK>K:K9K#KK K$K#K%K)K,KEKMKJKCK1K0K0K0K3K6K8K5K3K?KuK{KJKJKEKqK‡KˆKQKKUK‹K€KhKGK%K'KJKIKIK2K)K.K+K,K+K&K%K"KKK K-KCKJKFK:K8K7K9K=K?KBKAKCKDKGKFKCKCKDKFKHKCKdKŒK‡K‡K‡KˆKˆKˆK‡K‰KŠKˆK‡KˆKSKAKAKTKŒK‰K‰KŠKŠKŠKŠKŠKŠKŠKˆKŒKjKBKDKDKCKDKCKGKEKKKLKLKJKGK?KHKKKHK@K@KQKΈKΜKΙKΡKδKοK­KFK5K2K-K:KFK@K9KMKbK?K8K5KoKK‹KŒK[KhKΓKΕKΗK¨KŠKKŽK’KoKLKͺKΛKΔKΕKšK‡KˆKˆKˆK{KQKDKhKΘKΕKΘKƒK`K[KYKiKtKbKJKJKJKIKGKFKHKJKGKEKCKAK?KBKIKJKIKHKIKLKKKLKIKHKHKHKIKHKHKHKHKHKHKHKHKHKIKHKGKFKFKFKGKFKFKFKFKFKFKGKFKGKGKDKGKGKGKGKFKFKFKGKFKFKHKEKFKGKGKHKHKHKDKCKHKXKzK’KΖKίKλKκKζKγKαKαKβKδKηKθKεKΧKΏKK€KiKfKjKnKnKlKhKbK`K[K`KgK]K_KiKdKaK_KaKaKdKiKnKpKeKbKhKlKlKhKbK^KfKfK`K^KYKYK[K]KdK_K_K_K\K‰KžK―K«KŒe]rp(K"KKKKKK KK9K9K;KK=K=K3K"K4K:K;K=K;K%K7KEKBK?K:K,KK(KKKK!K!K$K!K&KIKNKCK4K'K'K'K)K(K'K(K'K3KOKMKCK^KŠKJK,KbK…K‚KƒK†KzKRKOKHKrK…K‚KƒK„K†K}K3K6KnKXKNKKK9K,K+K)K'K(K*K+K,K.K=KOKMKIKNK:K KKKKKKK'K/KFKPKKKFKK5K4K5K7K9KK@KBKBKBKBKBKBKBKAKAK@K;K=K=K;K8K0K1K:K:K=K;K!K:KCKBKAK9K1KAKCKAKCKDK?K=KDKHKGKIKKK=K%KKK7KOK>K1K&K'KKKKKKKKKHK>K&KKKK!K K$K"K'KHKNKCK4K'K&K&K)K'K%K(K'K3KPKMKCK^KŠKKK,KbK…KƒK…K…KzKSKNKJKsK…KƒK…K„K†K~K6K5KnKYKPKLK:K)K+K+K,K)K"KKKK5KMKLKHKZK^KjKZK#K"KKKKKK?KQKLKEKK?KCKDKDKCKGKFKDKAKDKNKbKwK…KŒK‰K‡K‡K‡K‰K‰K‹KnKBKDKAKyKK‰KŠKŠKŠKŠKŠKŠKŠK‰K‹KƒKMKBKDKDKCKCKDKDKGKMKNKLKGK@KCKLKMKCKNK·KΞKΚKΛKΙKΫKιKαKrK;K2K.K/K5K$K"K7KNKK:K0KCKBKAKBKFKFKCKDK>KBKHKIKKKHK7KK7KMKK@KBKFKIKIKIKHKIKHKHKIKHKHKHKHKHKIKHKHKHKGKGKFKHKHKGKGKGKGKFKGKGKGKFKFKFKFKFKFKFKFKFKGKGKGKFKGKGKFKGKGKEKDKDKEKGKEKDKFKHKHKFKBKCKDKTKqK–K½KΫKιKιKεKαKαKΰKΰKαKβKζKθKβKΤK·KK‚KkKjKiKgKnKkKdKaKfKkKhKdKmKlKeKhKgKeKdKgKgK_KhK_KbKcKfKYKXK_KfKcK_KYK\KgKoK€K§e]rs(KKKKK +KKK;K2K+K%KKKK&K(K/K?KBKAKAKBKAK?K?K?K>K?KAKBKBKBKBK@KK@K8K5KK9K1KBKAKBKBKDKCKFKDK;K?K?KGKHKJKMKBKCKIK;K/K%K'KKKKKK!KMKIK:K"KKKK!K#K$K K(KLKNKCK2K$K&K&K(K'K&K%K K4KOKLKFK^K‰KJK,KbK…KƒK„K‡KyKRKOKIKpKˆK„K„K„K…KK9K5KlK[KNKLKKAK4K>KpK₯KtKK@KCKBKAKAK?K>K>KAKBKBKBK@K>K>K=K;K;K=K4K5K:K:K?K5K&KBKAK?K?K6K1KAKCKCKCKEK9K>KCK?KHKFKCKDKHKKKKKKKIK?K/K%K'KKKKKK$KNKHK>K#KKKK!K"K$K#K*KLKMKCK2K$K&K%K%K'K%K(K'K5KNKLKDK^KŠKKK)K`K…KƒK„K‡KyKQKPKIKqKˆK„K„K„K…K€K;K3KkK^KLKKK9K%K(K+K,K,K.K'K%K!K%KMKPKKK[K~KK—K¦KΌKίKάKΆKKVK;KPKNKHKKKKK$K*K'K'K*K,K4K^KOKKKOKŠKfKDK›KγKδKίKνK―K*KTKŠK`KJKJKSKƒKˆKK6K.KqK‰K‡K‡K†K‰KjKFKJKAKEK‚KŒKK`K?K'KK"K*K2K9K;KEKJKEK8K1K0K+K&K K!K)K1K6K8K;K=KK8KFKFKFKGKDK@K?K?K@KDKGKIKGKHKHKHKHKIKHKHKIKHKFKFKFKIKHKFKFKFKFKGKFKFKFKFKFKFKFKFKFKFKFKGKEKCKFKFKCKDKGKEKCKDKDKDKDKDKDKDKDKDKDKDKDKDKCKFKHKIKHKCK?KDKYKzK‘KΖKέKιKηKγKΰKαKαKΰKαKβKγKδKΰKΟKΌKžKKoKeKeKgKiKnKpKlKcKeKhKjKgKcKgKbKhK^KaKaKgK`K_KgKeKcK_K[KXKMK?K0e]ru(KKK K K,K=KK=K>K5K6K;K;K>K1K(KCKAK?K>K6K2KBKCKCKCKEKDKFKAKAKJKNKQKJKBKEKIKCKFKMKDK3K&KKKKKK%KMKGK=K"KKKK!K"K$K"K+KLKMKCK2K%K&K%K%K&K%K'K'K6KNKLKEK_KŠKKK)K`K†KƒK„K‡KyKQKPKFKoKˆK„K„K„K…K„K>K1KkK_KLKLK=K+K/K0K0K.K*K&K"KK%KJKOKGK‰KΑKΆK³K¬K­K·KΖKΤKίKKˆKLKNKIKKKKK K-K0K=K5K-K4KOKQKKKLK­K·KKKHK΄KιKεKνK€K!KMK‰KfKIKKKNK€KˆK‡K@K*KhKŠK‡K‡K‡K‰KqKJKMKEKAKyK‰KˆKK‡KqKKK.K!K&K*K3KDKHKFK>K7K3K2K1K-K$K#K!K%K0K6K;K:K>KEKEKCKAKAKCKDKDKDKDKFKGKDKDKKK[KsK]KCKCKEK‚K‹K‰KŠKŠKŠKŠKŠKŠKŠK‰KKƒKJKBKDKDKCKCKGK?KHKOKMKJKGKCKFKNKLK₯KΡKΚKΛKΛKΛKΙKΪKιKθK|KK4K#KAKCK+K"K1K7K7K8K9K8K2K.K/K0K3K3K(K#K#KKK+K-K0K=KBKAKAKBKBKBKBKBKBKBKBKBK@K9K@KCKBKAK=K5K6KK5K4KDKCKCKCKDKJKJK@KAKKKOKQKRKPKIKBK7KDKNKLKKK?K KKKKK(KOKFK;K"KKKK!K"K$K!K,KMKLKEK2K%K&K&K&K%K%K'K&K8KPKLKFKaK‹KJK)K`KˆK„K„K‡KyKRKOKHKoKˆK„K„K„K„K„K?K.KjK`KLKOK?K.K/K/K.K(K$K"KK K,KIKMKIKsK£K‘K‘K¦K¬K΅KΊKΐKΔKΦKΈKOKNKMKKKKKK&K3K›KΙK\K,KFKNKNKIK‘KΧK‰K6KkKKεKΨKnK#KEK†KkKIKIKJK~K‰K‹KIK&K_KŠK‡KˆK‡KŠKyKKKJKFK=KrK‹K†K†K‡K‹K‰KxKXK3K'K$KKBKBKBKDKDKCKEKGKGKFKEKGKHKDKDK@KxKŽK‰KŒKKKŠK‰KKŒK‰K‹K‰KUKCKFKFKDKDKCKDKGKNKNKKKIKEKCKOKJK‹KΠKΙKΛKΛKΛKΘKKζKπKŸKCK4K/K*K8KFKFKHKNK?K5K7K5KlK“KŽKKcKFKK@KBKBKBK?K;KBKAK8K8K:K7K9KKLK]KlK}KK˜K¦K³KΒK±KUKNKHK¦KΆKPK"K&K*K%K&KnKΫKlKEKMKOKHKˆKΖKΎKZK@KKΖK’KwK.K:K}KwKHKJKEKrKŠKŒK_K'KQK†KˆK‡K‡KˆK…KRKIKJK?KaK‹K‡KŠKŠKŠKŠKˆKŠKTK'KRKiKGKIKEK;KIKPKGK>K:K7K7K5K4K3K5K2K)K6KEKBK>K;K=K?K?KAKBKCKCKCKCKCKGKGKGKEKDKAK?KOKgK~K‹KŒKŽKŒK‹KŒKKŒK‘KqKBKDKCKCKDKCKHKDKLKOKNKKKFKCKIKMKVK»KΞKΚKΛKΛKΛKΚKήKθKΧKeK8K1K-K,KAKGKIKMKLK:K7K=K™KžK‹KK€KNKBK8K6KAK‚KKŽKKKaKVK΅KΖKXKFKŒK~KƒKŽK‹K‚KWKQK²KΙKΔKΗK¨KˆKWKLKNKNKNKMKNK§KΝKΓKΝK’KFKOK\KjKiKbKYKVKKKIK6K&K8KFKCKDKIKIKIKGKGKFKCKAK@K?K?K@KDKIKIKIKIKHKHKIKHKFKFKFKHKIKFKFKGKFKFKFKFKFKFKFKFKFKFKFKFKGKFKGKGKDKCKDKCKCKCKDKCKDKEKBKCKDKCKCKCKCKDKDKDKDKCKDKDKBKCKEKGKGKHKFKDKCKBKNKdK‰KKΟKαKηKζKβKΰKαKαKαKαKβKεKζKγKΨKΔKŸKaK.KKKKK K +KKKKKKKKKKKe]ry(K K)K=K;K9KK>K(K)K6K8K:K;KKBKBK@K?K>K@KBKBKCK?K:KBK@KK1K:KEKBKBKCKDK;KFK@KDKPKQKPKPKQKRKRKSKRKQKIKFKGKHKKKNKMK?K:KLKEK9KKKKK!K"K$K!K-KMKKK@K,K$K&K%K%K%K%K&K#K6KPKMKDKbKŠKKK)K`KˆK„K„K†KzKQKOKGKmK‹K…K„K„K…KˆKDK.KfKbKLKMKFK1K/K1K1K0K0K0K1K2K3KIKMKMKDK4K4K3K2K6KIKhK~K™KΐK§KVKNKMKmKͺKΖKK@K,K2K)K,KšK¬KNKLKPKHK|K½K―KYKK@KBKAKBKCKDKDKDKDKDKDK>KCKDKIKZKnKƒKŽKŽKKŒK‹KKKEKCKDKCKDKCKFKDKKKOKLKKKHKBKGKNKKK£KKΙKΛKΛKΙKΘKΣKΩKΨKzK;K2K/K*K?KGKGKJKMK@K5KaKΓK£K‹KŠK‹KYKCK7K9K6KlK”KŽKŽK“KuKHK‘KͺK:K:KxKŽK{K„K–KŒKhKGK‘KΚKΓKΗK±K”KgKLKNKLKLKMKFKKΜKΖKΒK¬KYKDKDKJKWKfKlKeKOKHKAK KK"KgKnKIKEKFKIKHKGKFKFKEKEK>K8K=K=K@KDKHKIKHKIKHKHKGKFKGKGKHKHKFKGKFKFKFKFKGKFKFKFKFKFKFKFKGKEKDKDKCKCKCKCKCKCKCKDKDKCKCKCKCKDKDKDKCKBKBKBKBKBKDKDKDKCKDKDKDKCKEKGKGKGKCKAKHKZKwKKΑKΪKηKεKΰKήKήKΰKΰKΰKΰKβKζKηKβKΞK§KsK?KKKKK K +KKKKKKKKe]rz(K"K9K:K9K;K2K K;KAK*K$K3K5K9K9K:K9K1K%KKKKK!K8K8K5K/K)K)K$K K K%K1K3K3K5K>KBK?K?K>K?KBK@K@K>K;KDK=KK°K—KPKNKGKyKΣK³KUK@K“KžK‡K…K7K-KrK„KNKLKDKdKŠK‹KsK(K@K€K‰K‰KŠK‡K‹K_KEKKKBKRKˆK‹KŠKŠKŠKŠKˆKŽKqK*K?K}KTKEKDKXKkKDK.K7KLKSKMKFKBK;K8K7K8K>KGKFKCK4K'K'K.K9KKAKBK@K@KCKCKCKCKDKDK?KEKHKHKGKGKPKcKxK‰K‘KŽKK‡KNKAKDKDKCKCKEK@KFKPKKKKKHKDKCKNKJK‡KΟKΙKΛKΚKΘKΘKΛKΡKΑK}KFK4K/K)K6KDKGKIKMKFK9K—KΝK­KŒKŽKKnKHK;K:K5KUK‘KŽKŽKK†KMKvKpK3K6KeKKK‘K—K’K|KIKoKΕKΔKΘKΈK‹KyKMKMKLKLKLKJK_KΏKΗKΑKΘK‚KAKHKHKGKEKNKbK]KEKFK1K K,KKΒKnK]KRKFKFKGKGKGKEKDKK;K=KBKGKIKIKHKGKFKFKGKHKHKFKGKFKFKFKGKFKGKFKFKFKFKFKFKGKEKCKDKDKDKDKDKDKDKDKDKCKDKCKCKDKCKCKDKCKAKAKAKAKBKCKDKDKBKCKCKCKDKCKBKCKEKFKFKDKEKBKBKNKjKK³KΤKδKζKαKέKάKΰKαKαKίKίKδKιKκKΫKΌK’K_K0KKKKK +K +K KKKe]r{(K6KK>K>K?KBKAK>KKBKAKAKDKGKFKJK=KGKPKPKQKPKQKSKRKRKSKRKPKRKSKMKIKEKHKJKKKKKIK@K$KKKK K"K$K#K0KMKIK@K-K#K&K%K%K%K&K$KK7KPKKKBKbK‹KJK)K`KˆK„K„K†KzKRKOKHKjKˆK…K„K‡K‡K‰KIK)KeKgKOKLKWK‡KUK-K2K2K3K3K2K0K0KCKMKKKDK*K7KaKK¦K­K²K―K­KΏKΒKaKLKHK‘KηKΗK‘K―K·KRK8K3K(KYKΛKtKIKGKyKαKΈKXKEKK“KˆKŒKCK)KiK‰KSKKKEKaKŠKŠK{K0KK:K7KKK‡K“KKK’K‹KVKUK΅KΘKΕKΘKhKsK`KIKMKLKLKLKJK KΚKΒKΖKŸKJKHKGKGKGKEKCKHKIKIK=K!KZKΕKΖK‹KdKyKkKSKHKEKFKFKGK=K,KK=KKBKDKGKGKHKHKGKGKFKFKFKFKGKGKEKGKFKFKFKFKFKFKGKFKFKFKFKDKCKCKCKCKCKCKCKCKCKCKDKBKAKCKCKAKAKAKCKDKCKDKDKCKDKDKDKDKDK@KBKDKCKCKEKFKIKHKFKBK@KIK]KƒKͺKΛKΰKζKζKΰKέKήKαKΰKίKίKαKδKκKζKΥK¬KzKIKKKKKK +K e]r|(K9K=K:K;K%K)K?K6K$K.K4K6K:K9K9K7K*K!KKKKK2K>K7K4K-K(K'K"K"K#K4K9K)K&K.K2K3K8K=K?K?K@KAK@K>KKCKAKAKEKFKDKJK=KHKQKQKQKPKQKSKRKRKRKRKQKRKRKSKQKNKJKEKBK;KJKMKGK3K!KK!K#K$K#K1KMKHK?K,K#K&K%K&K%K%K$K!K7KPKJKBKbKŒKKK)K`K‡K„K…K‡KzKQKPKIKjK‰K…K…K‡K‡K‰KIK)KeKhKPKLKVK…K‚KKK,K2K2K2K4K5K3KCKMKKKEK1KK2KsKfKCKFKLK…K‹KŒKKdK2K1K3K@KOKQKIKCKAKDKEKDK@K8K8K8K5K.K*K)K1K:K?K@K?KCKDKCKGKCKAKGKFKGKHKIKIKHKGKEKLK]KyKeK@KDKCKEKCKDK2K+KMKLKNKKKEK@KIKLKVKΈKΝKΘKΙKΙKΗKΜK¬KqKeKUKK8K7K;KK•K‹K‚KŠK”KhKGK”KΛKΒKΠK}K>KdKHKIKIKKKLKFKxKΙKΓKΓK»KbKCKHKFKGKGKGKGKIKIKGK,KˆKΛKΔK­K^K_KoKxKpK^KMKFKGKEK:KCKFKEKDKCKBKAK>KK;K'K'K1K3K7K9K:K7K/K"KKKKK-K@K:K6K.K)K'K#K!K K,K9K+K&K(K$K)K2K6K:K>KBKAK>K@K?K9K=KDK6K.K=K9K7K;K=K=K8K#KK8K.KAKDKBKBKFKGKHKIK=KIKPKSKRKPKQKSKSKSKPKQKSKRKSKSKSKSKSKOKDKK1KnK‹KˆKŠK‰KŒKxKHKIKFKCKxKK‰KŠKŠKŠK‰KŠKŠKGK/KkKpKGKIKEK|KŒK‡KŽKrK1K>K[KFK1K=KLKRKNKGKEKDKBK8K6K7K8K:K9K2K+K(K*K5K=K>KBKEKEKAK?KFKGKGKFKFKGKHKIKJKHKEKFKIKCKDKDKCKDKCK>K KFKLKLKKKFKBKFKNKMK£KΠKΘKΘKΙKΘKΝK°KfKaKVKBK0K0K,K=KIKIKLKLKœKΛKΔKΖK©KK‘KKgKFK:K;K:K^K‘K‘K‘K”K€KNKDK5K7K4KiK–KK†KŒK–K}KJKsKΕKΓKΚKwKK@KMKHKIKHKJKLKZKΉKΖKΕKΜK‡KBKHKFKGKFKGKFKFKIKGKJK°KΖKΓKΔK~KZKYKbKoKwKmK\KMKEKGKGKFKGKEKCKDKDKDKCK=KK>K>K8K?KBK;K:KK8K0KBKDKCKBKEKIKKKHK=KFKMKRKQKPKQKRKQKQKQKRKRKRKRKRKRKRKRKUKPKIKFKFKJKLKNKKKAK0K"K K1KOKIK>K+K$K%K&K%K&K$K%K$K6KQKNKDKaKŽKMK)K`K‰K†K‡K‰KyKPKQKEKfK‹K‡K‡KˆK†KŒKPK(KaKkKNKNKVK…KŠK|K5K/K/K0K3K5K4KBKLKLKJK9K7K7K7K7K5K5K5K0K+K-KEKQKIKcKΆKΖKΠKΥKΩKΰKΰK¨KgK?K,KFKPKJK\KΊKŸKkKnKKˆKˆKŽK[K#KPK‹KcKHKKKOK„KŠK‹KJK,KgKK‰K‰K‰KŒKKLKHKHKAKpKK‰KŠKŠKŠKŠK‰KKTK-K]KyKFKHKBKsKŽKŠKK|K5K8K|KKuKVK?K8KDKNKGKEKCK@K;K8K6K8K9K9K:K5K,K(K)K0K9KBKDKBK=KDKEKEKFKFKEKIKHKGKIKKKHKFKDKCKDKCKDKBKBK!K=KNKLKKKGKCKEKLKHK†KΟKΙKΚKΙKΙKΚKΐKnK`KWKJK5K1K.K6KEKJKMKJK~KΚKΔKΖK·K“KK’KxKMK?KK;K;K?KCKFKHKGKFKFKFKFKGKFKFKFKFKFKFKFKFKGKFKEKEKGKFKEKCKBKCKDKCKCKCKCKCKDKCKCKBKAKCKDKCKCKBKCKDKBKAKBKCKBKBKBKCKDKCKCKDKDKDKCKCKDKCKCKDKDKEKFKDKAK>KDKPK[KiKqKzK„K‰KK—K K¨K―KΆKΉKΐKΙKΥKάKΨe]r(K8K5KK0KBK0K!K.K0K5K;K=K9K6K)KKKKK$K9K:K7K2K*K*K%K!K K'K:K2K*K)K$KKKKK#K3K:K:K=K?K=K=K9K?KAK;K=KK4K%K>KAKAK>K8K2KCKDKDKCKFK>K?KHK=KFKMKQKPKQKPKPKPKPKRKSKSKSKRKRKRKRKRKRKSKQKOKJKGKHKJKMKNKLK>K)K2KMKIKAK+K$K&K&K%K'K&K%K#K7KPKMKCKaKŽKMK*K`KŠK‡K‡K‰KyKPKQKIKfK‹KˆK‡KˆK†KŒKOK'K_KkKOKNKTK…KŠK~K2K3K7K.K4K6K4K@KLKLKKK8K3K4K4K4K0K,K(K6KeKyK`KMKLK\K›K³KΞKΠKΠKΠKΔKΒKΚK•K:K>KPKKKQKžK K|K}KK‰K‰KŽKeK%KJK‹KkKHKKKMK€K‹KKTK,K_KKŠK‰K‰K‹K…KRKGKGKCKgKŽK‰KŠK‰K‰K‰KˆKKaK/KTK}KIKFKBKhKKŒKŒK‡K@K2KmKKKŽKKcKAK=KHKGKAKFKGK@K=K9K8K9K9K8K9K9K-K'K&K8KEKDK=K?KAK@KCKGKGKFKFKFKGKHKGKGKDKBKDKDKDKAKEK)K2KOKLKLKIKDKCKKKKKkKΘKΚKΛKΙKΙKΘKΙK„K[K\KPK:K2K.KPKaKHKJKMKbKΐKΗKΔKΐK™KKK‡KUKDK>K:K’K¨KŒK’KKKkKIK?K5K6K=KuK‹K‹K“K–K•KgKDK’KΟK‘K4KEKGKAKJKIKHKJKMKEKrKΗKΔKΔKΔKfKBKHKGKGK>K;KDKFKIKOK¬KΚKΓKΖKΈKZKVKgKcKaK_K]KcKeK^KGKAKCKFKJKFKCKCKDKDKDKDKBKBK@K=K=KK1K&K>KBK@K>K8K3KBKBKCKDKCKBKFKFK=KGKMKOKQKQKPKQKQKQKRKRKQKRKSKRKRKRKRKRKSKRKSKRKMKHKGKIKKKKKOKHK?KDKEK>K,K$K%K&K&K%K$K%K#K7KPKKKDKdKKMK'K_KŠK‡K‡KŠK{KPKQKIKfK‰K†KˆK‡K‡KKRK&K]KmKOKMKQK€KŠK€K2K4KfKTK0K3K5KCKNKJKKK8K4K1K-K+K*K*K1KCKcKžKwKIKMKUKK–K K΅KΑKΘKΙKΟKγKσK”K:KRKOKTK‡KbK[K~KK‰K‰KKoK(KBK‡KsKEKIKJKyKŠKK\K)KUKŠKŠK‰K‰KŠK‰KVKDKEKBK_KK‰KŠKŒKŒKŒK‹K‘KlK2KLK€KOKFKBK`KKŒK‹KŒKMK3KbKK‹KŠKŽK‘KŒKnKIKFKDK@KGKKKLKGKBK;K;K7K6K8K7K7K3K7KDKDK=K=KAK@KAKDKDKCKDKFKDKDKFKFKEKBKBKCKDKAKEK2K&KLKKKLKIKDKDKIKKKUKΊKΞKΚKΙKΙKΗKΞK™KYK]KTK>K1K/K„K™KwKZKLKQK«KΚKΔKΖK¦KŒK‘K’KbKGK;KSK½K·K˜K“KK”KyKMKDK6K8K8KFKKKAKLKlKjKWKGKoKΘKbK1KKEKHKIKIKJKJKVK΄KΘKΑKΛKŽKBKHKEKDKCKCKCKEKJKFK‚KΛKΒKΒKΙKwK6KBKVKjKlKeK_K[K]KOKCKCK5K-K=KGKGKCKEKDKCKDKDKCKCKBK@KKKK2K7KCK?KK,K'K3K2K9K>KK-K,KCKCK@K?K5K2KDKCKCKDKFKFKGKDK:KIKHKIKNKOKPKQKPKQKQKQKSKRKRKRKRKRKRKRKSKSKSKRKRKSKTKSKMKGKEKIKJKHKKKHK5K%K$K&K&K&K&K&K$K7KPKKKAKdK‘KLK&K^KˆKˆK‡KŠK|KPKQKGKcKŒK‡K‡K‡K‡KŒKVK&KZKlKKKMKPK€KŠK„K9K4KpK‹KŠKvKCK;KMKKKJK9K6K:K9K9K9K8K6K3K3K3K=KKKIKHK8KEKvKK‘K±KΒKΐKΐKΡKέK°KRKNKGKŠKgKRK‰KŠK‰K‰KŒK}K2K8KzKKHKIKDKmKŒKKpK.KFK…KŒK‰K‰K‰KKeKEKJKBKNKˆKKŒKKKKŒKŽKK>K=K{KdKCKEKMK‡KK‹K‘KhK4KNK‰KKŒKKŒKŒKK`KBKFKBK@KEK:K?KLKOKLKLKDK>K;K:K7K6KBKDKBK8K/K*K,K3KK9K9K;K:K@KGKIKGKGKFKFKGKGKFKCKEKGKGKGKGKFKCKDKCKCKCKCKCKCKDKDKEKCKCKCKCKBKAKBKBKBKAKBKBKAKBKBKAKAKBKBKBKBKBKBKBKAKAKBKAKBKBKBKBKBKBKBKBKBKAKAKAKAKAKAKBKAK>K>K?K@KDKBKCe]rƒ(K1KCK2K&K0K/K6K>K?KK)K/KBKBK?K>K5K7KEKCKCKCKFKFKGKBKK1KmK‹K†KŠK‚K]KJKLKLK5K/K3K5K6K6K7K8K7K5K4K>KKKIKHK9K;KEKZK|K‘KjKBK‚KKνKΰK]KKKKKoKoKfKKŠKŠK‰K‹KƒK8K4KtK„KMKHKDKgKŒKŠK{K2K>K€KŒK‰KŠKŠKKlKHKKKCKGKKKŒKKKKŒKK‡KEK6KtKnKBKGKEK€KK‹K‘KuK8KFKKKŒKKKŒKKkKAKFK@KKK†KsKSK?K=KIKSKQKMKJK@K7K9K@KDKAK:K;K;K4K-K)K-K9K>KBKBKBKAKCKDKDKCKAKAKAKFK(K1KOKKKKKHKDKEKLKJKhKΘKΙKΘKΙKΘKΖKΏKxKUKZKNKSK©KbK\K”KK•KyKXK·KΙKΕKΓK£K’KKuKQKhKΔKΔKΙKŸKKŒKqKKKFKKK?K6K9K9KAKJKJKIKGKHKIKLKBK3K2K4K8KGK?KEKGKGKHKHKIKSK²KΙKΓKΚK—KDKEKDKCKBKBKBKDKIKDKtKΙKΔKΕKΙK˜KeKeKWKDK6K0K.K6KIKJKEKHK=K$K!K{KΜK΅K|KsK[KKKEKDKCKDKDKCKDKBK?K-K7K@K;K:K=K@KDKFKGKGKEKFKEKCKEKFKFKFKFKEKCKDKCKCKCKDKDKCKCKCKDKCKCKDKDKCKBKBKAKBKAKAKAKAKAKBKBKBKAKAKAKAKAKAKBKBKBKAKBKAKAKAKAKAKAKAKAKAKBKBKBKBKAKAK@KAK?K?K?K>K@K?K?e]r„(K?K5K$K0K2K3KKK;K7K3K8K;K9KK?K>K>K>K?K>K?K>e]r…(K:K&K*K4K8K=KBK>K?K6K#KKKK!K=K9K4K8K/K*K&K!K"K*K;K2K+K+K&K KKKKKK(K'K#KKK K"K%K'K,K0K3K3K,K1K9K4K4K9K:K:K:K$K5KBKAK?K=K/K:KDKCKBKDKEK>KEK@K:KGKKKPKQKPKPKPKPKPKPKQKPKQKSKRKRKRKRKSKRKQKSKRKRKSKRKSKRKRKSKRKSKNKEKFKIKHKMKOKJK>K+K#K"K5KNKKK@KeK‘KLK&K^K‰KˆKˆK‹K|KPKNKFKbK‹KˆKŠKŠKˆKŒK^K%KWKqKPKOKNK}K‹K‹KFK-KhKŒKˆK‰KŒKyKLKJKMK=K3K6K0K-K+K+K-K/K1K1K9KLKIKIK:K2KCK5K(K,K]K~K›K΄KΕK¬KUKHKJKPKdKaKlK‡K‰K‹KŠKKJK,KeK‹KTKHKFKZKŒK‹K‡K?K5KsKŽKŠKŒKŒKK}KIKHKGKBKqKKŒKKKKŒKŒKKZK0K\K~KFKIK?KlK‘K‹KKˆKJK:KnKKŒKŽKKKK„KIKEKCK?KuK’KŒKK‘K…KlKLK>KEKVK^KVKKKCKCKBK9K8K8K7K;K8K1K2K,K*K-K7K>KCKDKBKAKAKAKCKK?K>K>K?K>K>K?K>K>e]r†(K)K#K1K5KK=K7K)KKKKK6KAK8K9K3K+K%K#K"K'K=K7K.K,K(K!KKKKKK'K&K#KKKK#K%K(K)K*K-K2K4K8K+K.K3K5K:K9K;K7K#K8KCK@K>KK:K9K9KK>K?K>K?K>K>K>K>K>K>K>e]r‡(K%K/K4K8K=K;K:K7K*K KKKK1KAK:K;K3K,K&K"K"K'K;K7K.K-K)K"KKKKKK(K)K&KKKK!K#K%K(K*K+K-K/K0K3K8KK K5K:K9K:K8K"K;KCK@K>KK_K’KKŽKKKKŽK‘K•KaK6K8K=KBKDKCK>KNKJKDK@K:K8K5K6K=KK;K=K>KK?K>K>K?K>K>K>K>K>K>K>e]rˆ(K.K5K9K:K=K=K7K.K"KKKK,KAKK/K+K)K%KKKKKK(K'K$KKKK K"K&K)K+K*K-K2K0K(K*K&KK&K6K9K9K:K6K#K=KBK@K>K=K0K@KDKCKFK&K KK+K:K@KJKLKMKNKQKPKPKPKPKPKPKQKPKPKRKSKRKRKRKRKSKRKSKSKSKSKSKRKRKRKSKSKSKRKOKNKPKNKFKBKFKLKLKMKMKJKHK>KfK‘KLK%K[KˆK‰K‰K‹K|KMKNKFK^KK‰KŠKŠK‰KKeK%KPKtKPKQKJKwKŒKŒKPK)K^KŒK‰K‰K‹KKMKKKKKNK„K‹KiKDK7K1K,K,K)K'K.KHKHKKK?K(K KKK#K6KIK^KZKuKK^KGKMKGK9KHKMKaKfKvK—K‘KfK+KNK‹KiKGKIKJKKKK\K.KZKKKŒKŒKKŒKYKEKFKAKWKŒKKŒKKKKKK}K=KBKK_KDKFKMKˆKKŽK“KlK:KPK‡KŽKŽKKKŽK’KgKAKEKAKTKŒKŽKKKŽKKKK“KqK3KCKgKOKBKDK@KTKVKOKJKFKAK9K3K4K6K8K;K9K?KCK@K@KBKAKEK1K(KNKLKKKJKFKDKKKMKUK΅KΜKΘKΘKΙKΔK€KjKSKVKSKGK;K;KFKoKhK[KTKKKKΚKΖK―K[KKKOKKKMKQK§KΛKΕKΎK`KDKIKIKKKKKJK9KKΛKΏK^KCKIKHKFKCKGKKK=K3K6K9K'KK5KGKCKDKCKHKGK^KΗK‡K0K3K?KNKRKYKaK_KOKCK;KHKFKuKΙKΓKΓKΘK|K8K>K>K;KK>K>K?K=K>K?K>K>K>K?K?K>K>e]r‰(K6K9KK:K0KAKCKCKDK$KK KKK8KNKMKMKNKQKPKPKPKPKPKPKPKQKPKRKSKQKQKQKQKSKRKRKSKSKSKSKRKRKRKSKSKSKQKNKOKOKSKSKLKEKCKGKLKOKMKMKEKdK’KNK%K\K‹KŠK‰KŒK|KJKNKFK^KK‰K‰K‰K‰KKhK%KNKvKQKOKGKvKŒKKTK(KZK‹KŠK‰KK„KOKIKJKNK~KŒKKˆKyKeKOK>K5K1K0KFKHKIKAK$KKKKKLKiKfKuK…K…K_KGKMKIK;K;KFKQK\K]KƒK“KnK.KHK‰KpKGKIKHK{KŽKKfK,KSK‹KKŒKŒKŒKKaKCKHKCKQKˆKŽKŒKŽKKŽKŽKŽK„KAK=KyKjKCKGKGKK‘KŽK“KyK>KGKK‘KŽKKKŽK’KqKCKHKDKKK…KKŽKKKKŽKŽKK}K;KKDK?K>KLKYKZKSKMKJKDK;K6K4K5K9K?KCK@KAKBKAKCK9K"KHKLKKKJKFKBKIKNKKK›KΟKΗKΙKΙKΔK­K|KVKTKSKIK:K=KK>K:K;KIKLK K]K-K2K@K\KYKVKMK>K2K-K3KCKGKXKΆKΗKΒKΘK§KDK;K=K=K:K:K:K9K7K:KGKIKNK¨KΖKΓKΒKΓKnK@KNKPKUKVK]K]K\KZKWKXKJK>K?K@KBKEKFKCKCKDKCKDKCKBKBKBK?KK>K>K?K>K=K>K?K>K>K?K>K>K>K>e]rŠ(K/K9KK5K0K,K%KKKK KK%K*K&KK KK%K"K#K'K*K+K+K/K2K.K"K K"K'K7K8K8K:K9KK9K2KAKCKDKCK$KKK K +KK1KOKPKNKPKPKPKPKPKPKPKPKQKPKRKRKQKPKPKQKSKRKRKSKSKSKSKRKRKRKSKSKSKRKRKRKRKRKRKRKRKJKDKGKGK;KIKQKOKhKHK&KZKŠKŠK‰KŒK|KHKKKEK]KKŠKŠKŠK‰KKjK%KLKuKQKOKGKuKŒKŽKWK'KXKŠK‹K‰KŠK…KRKIKJKLK}KŽKˆK‹KKKKfK/K=KAKGKIKKK@KKKKK.KK5K=KIK/KlK–KuK/KDK…KwKGKJKEKuKKKrK0KIK†KŽKKKŒK‘KkKDKHKFKKKƒKŽKŒKŽKKKŽKKŒKKK7KpKuKCKHKDKwK’KKKƒKDKCKzK’KŽKKKŽK‘K|KGKGKDKEK|K’KŽKKKKKŽKK‰KHK7KoKKBKCK=KJKKK@KIKYK\KWKQKKKDK?K8K6K=KDK@KAKBKAKAK?KK@KMKKKJKHKCKCKKKHK€KΟKΘKΘKΙKΔK³KŒK\KSKQKMK=K:KKDKIKHKXK>K1K2K7KIKBK8K1K*K-K3K>KBKIKGK•KΛKΑKΔKΔKdK5K=K=K:K9K9K9K7K8KBKJKCK‚KΗKΒKΐKΖKKHKHKHKJKIKGKSK]K\KXKWKQKDKDKCK0K'K8KBKGKDKDKEKCKBKBKBKAKAK?K?K=K9K9KK?K@K?K>K?K?K>K?K?K?K>e]r‹(K5KK5K3K.K'K!KKKKK#K(K#KKKK&K&K$K%K)K,K/K/K3K.K$K!K K"K,K>K;K9K:K9KKZKKŠKˆKŒK{KLKJKDKZKKŒKKŠK‰KŽKmK%KHKsKQKQKJKrKŒKKZK'KWKŠK‹K‰KŠK‡KTKHKKKKKzKKŒKŒKŒK‹K‘KqK(K@KxKXKGKHKMKSKK +KKKKKDKoK‚KrKQKHKIKIKKnK’KŽKKKŽK‘K…KLKDKDKAKsK“KKKKKKŽKK‘KUK7KbK‹KJKCK=K\K’KuKSKCKGKXKXKXKTKOKJKCK?KBKAKBKAKAKAKDK$K4KOKKKMKHKDK?KIKHKeKΖKΚKΘKΘKΗKΉK•K^KQKQKOKEK7K:K@KJKLKLKOKMKKΙK·K{KJKLKJKHKMKVK°KΙKΗKΉKWKHKEK?K=KGK[KΈKΗKΖK»KQK:KAK@KBKAKFKKK=K5K8K7KK@K=KKAKBKBKBKBK@K>K>K>K>K>K>K?K>K?K?K?K>e]rŒ(K9K=K=K9K+K KKKK4KAK7K7K0K*K&K#K!K*KAK9K1K/K*K"KKK KK!K+K'KKKK"K%K%K'K*K*K-K1K1K+K"K"K#K"K#K0K@K;K9K:K9K;K9K:K@KBK@K=K9K@KCKCKEK@K#KKKKKKKK)KK‚KaKEKIKJKbK=K4KK%KOKfKxK‡KjKJKJKIKHKIK;KGK9K/K5K2KhKŽK…K;K7KxK„KIKIKDKfKKŽKK7KKAK?K?K=KBKLKCK6K8K9K?KVKWKRKOKNKLKJKIKHK8K1K3K3K0K/K7KK@K@K@K@K@K@K@K@K@K?K>K?K>K?K>K=K>K>e]r(K:K8K8K-K!KKKK.K@K6K7K3K,K'KK!K(KK:K?KDKCKEK>K#KKK KKKK-K:K;K>KHKOKOKMKPKQKPKPKPKPKPKPKPKQKPKPKPKPKRKSKSKSKRKRKRKRKSKSKSKRKRKRKSKSKSKSKSKQKSKTKTKKKDKBKGKLKLKNKKKYKtK‹K€KKKKKAKYKK‹KKK‹KKrK$KEKuKOKMKDKpKKKcK$KNKŠKKŒKK‰KYKIKKKKKuKKŒKŒKKŒKKzK,K:K}KdKEKHKCK#KQK_K!K3K:K]KƒKoKQKZKSKIKHKKK7K0K$K0K,KK=KK>K>K>K>K>K@KBKBKBK?K>K?K>K>K?K?K?K?e]rŽ(K8K7K0K$KKKK'K?K6K8K3K,K'K!K K$K9K:K3K/K)K$KKK K KK'K%KKKK$K#K$K(K)K*K+K,K1K-K%K#K$K"K&KK K1KDK9K9K9K:K;K6K9KAKBK?K;K:K@KDKCKEKKFK>KvK•KKKŠKFK6K7K3K3K=KKK^KUKAKBKAKBKAKBK?K!KCKMKKKKKGKAKCKOKGK~KΞKΗKΘKΘKΔK²K’K`KQKQKMK?K7K7K?KIKHKIKJKQK―KΏKKDK=K8K2K.KDK]K·KΕKΗK±KLKBKBKAKBKGKaK½KΔKΕKΉKYKUKfKjKjKfKPKIK>K:K5KzK‘K6K0K0K-K/K,K;KIKDK6K1K2K3K7K:K:K9K9K:K:K9K;KHKGKbKΐKΔKΓK½KYK9K?K?K?KKBKCKBKAKAKBK=K+K5K>K:K7K6K;KAKCKDKCKCKCKCKCKCKCKDKCKCKBKBKAKBKCKBKAKAKAKAKAKAKAKAKBKAKAKAKAKAK@K>K?K?K?K?K>K?K?K>K>K>K>K=K=e]r(K8K0K#KKKK%K>K;K6K6K/K*K"KK K5K=K3K1K+K#KKKK KK)K$KKKKK#K$K(K+K,K-K/K2K/K#K#K$K#K#KK KK;KBK9K9K9K:K;K6K=KCKAK?K:K=KDKDKCKEK;K$KKKKK K-K=KIKIKBK6K$KK4KKKRKOKPKPKPKPKPKPKPKPKPKPKQKRKRKRKRKRKRKRKSKRKQKRKSKRKRKRKRKRKRKRKTKRKRKRKSKRKSKQKJKBKDKIKKKMKJKKKHKGK@KXKŒKŒKŒKKŒKKvK(KAKuKQKMKHKlKKKkK%KHK…KKŒK‹KK^KJKLKGKmKKŒKKKŒKŽKƒK5K0KxKoKEKHKGK"KKIKJKeK~KyK^KfKgKcKKKGKIKJK>K1KK;K|KKKŽKKOK,KaKKYKJKFKSKŒKŒKKSK2KeKKŽKŽKŽKKŽKSKDKEK?KYKKKŽKKKKŽK‘KƒKCK@KKgKBKFKGK…K‘KK’K{KAKHKƒK‘K‘K‘KKŽK“KtKBKEKBKHKƒK”K‘K’K’K’K’K‘K“KŒKPK?KrKƒKDKEKKK>K=K=K=K;K9K:KDKJKDK€KΗKΐKΏKΖK KLK0KKKKKKKKKKK?KDKEK;K!K}KΗK½KΐK}KbKzKxKkK[KLKAKK>K>K=K@K?K>K?K>K>K=K=e]r(K1K%KKKKK8K>K8K7K/K(K"K K K2KK=K8K:K9K9K9K6K>KBKAKAK=KKHKFKYKΌKΓKΏKΐKΏKYKKKKKKKKKKKK1KEKDKAK4KœKΖKΎKΓKžKWKXKeKnKqKhKUKHKEKFKBKDKBKAK9K0K%K?KBKAK@K=K=K9K8K7K:KKAKAKAK@K>K?K?K>K>K?K>K?K=KK6K6K-K)K$KK K2K>K4K0K-K%KKKKKK,K*K KKK"K"K!K(K,K,K.K.K2K/K$K%K$K%K"K"K)K-K(K'K>K>K8K:K9K9K9K5K>KBKAK@KKWKKYK@KAKHKŽK’KK—KyKK;K9K7K:K=K?KAKDKBKAKAKAKBKBKAKAKAKAKAKAKAKAKAKAKAKAKBK@K?KAKAKBKAK?K?K>K?K?K>K>K?K>K=K=K=e]r’(K"KKKK/K=K1K4K.K(K$K KK,K=K3K0K.K)KKKKKK)K)K%KKKK$K%K(K)K,K/K0K4K.K#K$K'K#K$K*K-K-K,K'K)KEK@K9K:K9K:K9K5K?KAKBK?K:K=KAKCKCKEK7K#K"KKK K K K K KKKKKKKKKKK9KOKQKLKNKQKPKQKPKPKPKPKPKQKSKQKPKRKSKSKSKRKSKSKQKQKSKSKSKOKQKSKRKRKRKRKRKRKRKRKTKSKNKDKAK5K?KKKMKKKXKsKŠKKKK+K6KtKSKJKGKcKK‘KxK(K>K€KŽKŒKŒK‘KeKGKIKFKeKK‹KŒKKKŽKKBK+KiK{KIKIKFKbK5KPK\KEK;KK^KK‘K’K’K’K‘K‘K‘K”KuKDKPKKhK>KCK?KK•KK”K‡KIK?K=K:K9K8K9KdKTK?KBKAKAKAKBKK>K>KKAKAK>K>K>K?K?K?K>e]r“(KKKK)K=K1K,K0K+K$KKK'K;K2K*K*K'K!KK K KK(K)K"KKKK"K$K&K,K+K+K/K5K2K%K#K%K%K%K!KK"K!K KK.KHK=K9K:K9K:K8K6K?KAKAK?K:KK0KDKJKBK-K,KJKLKHKiKΣKκKξKΏKKKK’KvK4KDK‡KvKDKIKEKwK‘KKxK4KFKƒK‘KŽKKŽK“KuKEKHKFKDKvK’KKKKK’KK•KiK8KVK‡KNKFKCKZK’K‘K‘K”KeK;KYKK’K‘K’K‘K‘K“K`K@KEK>KRKK’K‘K’K’K“K’K‘K“KKKKHKKvK=KDK=KrK—KK’K’KUK@KCK@K=K:K@KgK`KAKAKAKBKAK@KAK$K8KKKHKIKHKDKAKIKIKaKΐKΘKΗKΖKΖKΔKΜK–KIKMKNKCK9K9KKBKEKzK‹KCKKGKAKrKK.K2K0K6K=K=K=KK-K@KQK^KcK]KVKWK[KYKVKPKKKGKAKCKAKBKBKAKAKAK@K@KAKAK?K?K=K8K7K5K9K=K@KCKDKBKAKAKAKBKAKAKAKAKAKAKAKBKAK@K@K@K@K@K@K?K>KAKAK>K>K>K>K>K?K>e]r”(KKK'KK;K2KAKDKDKEK5K%K"KKKKKK KKKKK +KKKKKKKKKK1KIKOKNKLKPKQKPKQKSKSKSKSKSKQKPKPKQKSKQKPKQKPKPKPKPKPKPKRKSKRKRKRKRKRKSKRKOKRKSKQKQKOKEK@KBKHKLKKKIKOKeKrK,K3KrKWKHKFK]KŽKK}K)K3KzK‘KŒKŒK‘KmKIKIKGK]KKKKKKŽK’KNK&K`K€KKKHKLK6K%K*K(K&K0K9K*K/K)K!KaK_KIKJK[KΎKΥKλKΚKŒKKŽK’K{K3K@K‚K|KFKJKDKqK‘K‘KK:K@KyK‘KŽKŽKŽK’KKGKFKGKCKnK”K‘K’K’K’K’K‘K—KvK:KLK†KWKDKEKQKK’K‘K–KoK?KQKˆK“K‘K’K’K‘K•KnK@KEK@KKK…K“K‘K’K’K”K“K‘K’KŒKQKBKsK„KDKDKKEKCK@K=K@KNKXKDKAKBKAKAK?KAK+K.KLKHKHKIKFK?KGKLKPK¬KΚKΔKΖKΖKΕKΘK±KPKIKOKJK:K9K;K@KCKKKRKLK_KŠKLK:KNK^K\KQKSKKKmKΖKΔKΘKKJK\KoK„KˆKOKiKΓKΔKΕK½K˜K•KK•KKKbKEK—KΙKΑKΚKK1KJK9K:K9K:K@KGKGK8K3K5K4K7KK@K?KK>K>K>K>K>K>K>KAKAK>K>K>K>K>K>K?e]r•(KK!K;K3K(K,K*K%KKK!K3K+KKKKKKKK K"K)K"KK KKK#K&K+K*K+K.K4K5K(K$K%K&K&KKK KKKKKK0KIK=K9K:K9K;K.K)KAKAKBK=K8K+K@KDKCKCK2K'K$K KKKKK K K +K K K K KKKKKKKKKKK;KNKPKMKNKNKNKPKRKQKQKQKQKQKRKRKSKQKPKPKQKRKRKRKRKRKRKSKRKRKRKRKRKRKSKRKQKQKQKQKQKSKNKEKAKEKGKKKLKIKNK>K:KtKYKGKFKZKŽKK€K.K1KvK’KKŒK‘KpKHKJKGK\KŽKKŽKKŽKŽK“KRK$K\K…KMKGKJK6KK&K(K&K!K%K/K,K"K!KPK[KJKLKTK΄KΙKΫKΚKKKK’KK6K:K|K…KHKJKDKiK‘KK‰KBK9KsK‘KŽK‘KK‘K„KJKFKFKAKcK’K‘K‘K’K’K’K‘K•K€K?KEK‚KbKBKFKJK‰K“KK•K|K@KHKK”K‘K’K’K‘K•KyKCKGKCKEK|K”K‘K’K’K’K’K‘K’K“K^KAKhKŽKMKBK?KRK“K“K“K–KrK@KFKEKCK?K>K@KGKCKAKBKAKAKAKCK5K$KKKHKHKIKFK?KDKKKGK‘KΝKΔKΖKΕKΔKΕKKVKEKLKLKKK>K>K=K=K>K;K?KHKGK9K.K0K0K/K9K>KKOK^K\KUKOKQKTKQKAK?K:K8K;KAKAKAKAKAKAKAK@K?K>K?K?K?KK@KAKAKAK>K>K>K>K>K>K>e]r–(KK6K6K%K&K'K&K KKK/K.KKKKKK K K +KK)K"KKKKK!K&K*K*K+K/K1K3K)K&K&K%K&K#KKKKKK KKK8KEK:K8K:K9KKXKKK’K’K’K’K‘K”KˆKFKCK|KoKBKHKEK}K•KK”K†KGKDKwK•K‘K’K’K‘K“K„KHKGKDK@KqK•K‘K’K’K’K’K’KK–KlKBK[K“KZK?KCKEKŠK•K“K”KKFKFKGKBK@K?K?KCKAKBKBKBKBKAKCKK>K=KKEKGKAK0K/K0K/K3K=K=K=K=K=K=K:K9K8K7KCKHKCK—KΗKΏKΏKΗK€KKKKKKKKKKKKKBKHKDKjKΐKΏKΎKΎKΐKaKK#KKKK!K)K9KLK[K_KYKOKGKBKCKAKK>K?K=K;K;K;K:K5K2K7K>K?KBKCKBK?KAKAKAKBKBKBKAK>K?K>K@KBKAKAK>K>K>K>K>K>K>e]r—(K0K9K K!K#K"KKKK.K0KKKKKK KK KK,K%KKKK!K#K$K'K*K+K.K0K4K)K$K'K%K&K"KKKKKKKKKK;KEK:K:K:K9KK6K/KBKDKCK?K/K)K&K%KKKKKK K K +K K K KK K KKKKKKKKKK%KK>KK>K>K?K;K;K=K9K@KIKCKmKΒKΏK½KΒK±K0KKKKKKKKKKKK7KIKFKJK¦KΔK½K½KΓK›K$K"K KKKKKK!K3KFKZKaKZKNK@KAKCK9KKKK(K;KAK>KBKBKBKAKAKAK=K?K>KK>K>K@KBK?K?K>K>K>K>K>K>K>e]r˜(K:K KKKKKKK(K2KKKKKK +K KKK,K(KKKK!K$K%K%K'K+K.K2K0K(K%K&K%K&K#KKKKKKKKKKKAKEK:K:K:K9KK>K6K0KCKDKCK?K/K)K'K$K"KKKKK K K +K +K +K K K K K K K K KKKK K%KKK K@KOKMKOKQKPKQKRKRKPKPKPKPKPKPKPKPKQKQKRKRKQKQKRKRKSKRKRKQKQKRKRKPKQKQKPKPKQKPKOKQKRKLKEK@KBKGKGKIKKKJKEKSK‰K’KŠK4K*KlK“KŽKŽK’KzKJKLKHKRK‰KKŽKKKŽK”KcK"KJKPK@KIKJK:KK!KK K#K(K)K"K%KžKεKzKHKMKJK¬KΧKΩKΞKK‘K’KKKKK2KcK’KWKFKCKTKKŽK’KZK4K_KKK’K‘KŽK’K_KCKGKCKOK‡K“K‘K’K’K’K‘K‘K“KYK9KfK„KGKHK@KgK–KK‘K“KXK@KfK”K‘K’K’K‘K“K“KZKAKCK=KXKK’K“K”K”K”K”K’K•K„KHKIK‚K|K@KCK@KoK™K“K“K•KaKCKGKBKAKVK]KLKIKAKAKBKAKAKAKDK(K/KKKHKIKFKCK?KFKJKNK«KΚKΕK·KvKAK;K=KFKEKIKIKK>K=KK@K?K=K=K>K>K=K:K9K5K4K7K:K?KAKBKAK@KAK@K>K?K?KAKBK?K>K?K>K>K>K>K>K>e]r™(K'KKKKKKK!K-KKKKK K KKKK(K&KKKK"K#K&K)K)K)K-K/K/K+K%K&K&K&K#KKKKKKKKKKK"KBKCK8K:K9K9K;K%K2K@K?K>KK9K:K7KDKŠKœKyKIK}KcK8KQK’K‘K—KwKIKMKŸKΚKΒKΓK£K—K™K—KœKvKHK™KΙKΑKΖKK˜K™K™K˜K˜KˆKMKYK΄KΕKΑKΚKeK,KBK@KAKBK@KAKHKBK7KKBKDKCKBKBKBKBKEKGKEK2K-K-K0K5K>K@KBK@K>K>K?K=K:K:KBKEKDK‚KΔKΌKΎK½KPKKKKKKKKKKKKK;KHKCKTK¬KΑKΎKΌKΒK†K"K!KKKKKKKKKKK%K4K@KAKAKCKK?K>K?K=K;KK>K@KBKBKAK?K>K?K>K>K>K>K>K>e]rš(KKKKKKKK/KKKKK K KKKK&K'K!KKK"K!K%K'K)K)K+K1K2K(K%K'K%K'K"KKKKKKKKKKKKKEKCK8K:K;K:K;K#K4KAK@K>K;K1K4KEKCKDK@K-K(K%K#K$K KKKK K K K KK K K +K K K K KKKK K"KKKKKKKK;KPKRKQKQKPKRKQKPKPKPKPKPKPKQKRKQKPKQKRKRKRKSKQKPKQKQKPKQKQKQKQKSKTKQKQKRKQKPKQKRKRKQKLKDKAK7K7KGKJKHKNK`K;K'KgK’KKŽK’KKHKJKFKMK…K‘KŽKKKK”KqK&K=K)K;KMKFKCKKKK-K,K!K(K'K!K9KΊKΠKRKHKIKHK›KβKΠK‘K’K’KŽK”KaK-KVKKdKEKGKJK„KK•KnK4KPK‰K“K‘K’K‘K–KuKDKFKCKDKxK–K‘K’K’K’K’K‘K—KoK:KSK‹KWKCKCKRK’K’K’K–KrK?KUKŒK”K’K’K’K’K–KqKAKEK>KDKƒK–K“K”K”K”K”K“K“K•K]K@KeK“KPKAK@KOK”K•K’K˜KKHKDKDKTK^KZKHKLKMK@KBKBKAK@K@K;K KDKIKHKGKDK@KAKIKEKjKKpKGKKK>K>K?K>K=K=K;K8K5K4K5K9KK?K@K@KAKAK?K>K>K>K>K>K>K>K>e]r›(KKKKKKK)KKKKK KKKKK#K%K KKKK!K#K'K)K*K*K0K1K'K#K'K&K&K K KK K K K KKKKKKK KHKCK8K:K9K:K:K"K6KBKBK>K=K1K6KEKCKCK=K*K&K#K$K$K#KKKK K K K K KKKKKKKKKKKK#KKKKKKKKKKKK‡KbKAKEKJKŠK”K“K—K}KCKNK…K•K“K”K”K“K—K}KCKFK@K@KwK•K’K”K”K”K”K“K“K—KmKCKXK’K^K>KBKBK‹K–K’K•KKSKHKGK\K^K]K\K^KPKAKBKBK@K>K>K@K"KKFKHKKKPKAK:K=KKzKΗKΑKΒKeKAKHKFKDKDKDKDKDKGKGK?K/K0K0K/K9KBKBKBKBKBKBK@K?K:K&K=KHKEK”KΑKΑK‰K.K"KKKKKKKKKKKKK>KFKBK_KΆKΌKΎK½KΕK|KKKKKKKKKKKKKKK.KAKAKCK:K"K)K—KΏK·KKvKqKlKbKWKLKDKBKAKAK?K>K?K?K>K?K?K=KK?K?K>K>K>K>K>e]rœ(K K K KKK$KK K KK K KKK +KK!KKKKK K$K(K)K*K,K-K0K'K"K%K&K%K&KKKK!K"K!K"K%K%K&K(K(K%K.KIKBK9K9K9K8K7K!K9KBKAK>KKEKBK€K–K’K•K†KHKFK{K–K“K”K”K“K–K‰KIKDKAK?KlK–K“K“K”K”K”K”K’K–KzKEKNKKnK;KBK>K|K˜K’K•K”K^KGKCKLKYK^K_K_KNKAK@KBKAKAK>KAK&K0KLKHKHKFKBK>KBKHKGK=K;KK>K=K=K?K?K>K>K>K6K1K"K1K=K8K6K4K5K6K9K?KAK>K=K=K>K>K?K?K>e]r(K K K KK KKKK +KKKKKKKKKKK!K!K#K'K)K)K+K.K.K'K#K(K&K&K#K'K+K-K+K+K-K)K(K*K*K)K*K'K"K5KKKAK9K:K:K9K5KKK;K1K:KCKCKBK9K*K'K%K!K K KKKK K K K K +K +K +K K K K KKKKKK"K9KBKLKMKUKWKTKUKTKRKIKMKOKMKOKPKPKPKPKPKPKPKPKPKPKRKSKRKSKPKOKQKRKRKSKRKRKRKRKRKRKSKRKPKQKPKPKPKPKQKQKPKPKRKRKOKHKAKAKEKGKHKKKGKSKqKŒK†KMKHKHKHKzK’K‘K’K’K‘K•K‚K+K1KyKiKCKGKHK6K;K/K*K*K,K‘KΟKΪKβKͺK€K^KJKKKRKSKNK±KœKK’K‘K–KzK4KBK…K{KEKJKAKqK–K’K†KAK>KuK•K‘K’K‘K“KŠKOKFKEK?K\K‘K’K“K”K”K“K‘K“KŠKKK>KvKxK@KEK@KuK–K’K“KKQKBKnK—K“K”K”K“K”K’KRKAKDKKIKVK\K8K=KCKAKBKBK?KAK0K&KLKIKIKFKBK=KAKGKHK@K;K=K=KK>K?K>K@K1K+K)K&KK?K>K>K>K>e]rž(K K KKKKKKKKKKKKKKKKKKK"K)K'K&K*K.K/K%K$K'K'K'K'K#K"K K K$K#K$K#K#K$K$K%K#K!KK5KLK>K9K:K:K;K3K!K>K?K@K?K;K.K=KEKCKDK9K+K*K&KKKK KKKKK K K +KKK K +K +K K K K K KKAKSKUKXKYKZK[K[KXK[KXKVKWKTKLKIKNKPKPKQKQKPKPKPKPKPKRKRKQKQKPKPKPKQKQKQKQKQKQKQKQKQKQKQKPKPKPKQKPKOKRKSKQKPKQKQKQKQKNKFKAKAKDKHKGKGKHKZKnKLKGKHKDKuK”KK’K’KKK†K0K.KxKtKFKIKCKtK„K6K)K/K/K‚KΓKΥKέKK‘KXKJKGKdKͺKZKKͺKK’K‘K”KƒK8K?KK‚KFKKKAKjK–K‘KKGK:KnK“K“K“K“K”K‘KUKFKEK@KVKK•K”K“K”K“K“K’K‘KUK?KmKKDKEK?KlK˜K’K”K”K]K@KdK–K“K”K”K“K“K•K\KCKGKK=K:K/K>K@K>K@KBKAKCK;K!KEKIKJKHKDKAKEKHKHKCK:K:K:K:K:K8K7K=KBKAKIKAK8K:K9K;KqK˜K”KUKeKƒKHKK„K’K1K-K-K&K(K2K-K KKKKKKKKK3KFKDKMKͺKΔKΏK½KΓK£K&KKKKKKKKKKKKKKK@KCKEKK@K=K5K*K*K=K=KK?K?K>e]rŸ(KK KKK KKKKKKKKKKKK KKK K#K)K(K)K,K.K%K K&K%K%K!KKKKKKKKKKKKKKKKK5KIK:K9K9K9K;K2K#K>K>K?K>K;K-K=KEKCKDK8K+K*K&K KKKKKKKK K K +KKK +K K K K K KK KKIKXK[K\K[K]K_K_K_KcK_KVKMKKKKFKHKEK@KK=K=K=K;K9K;K:K9K8K3K2K1K5K:K>K?e]r (K KKKKKKKKKKKKKKK KKK!K%K&K(K)K,K0K'K!K'K&K'K KK K KKKKKKKKKKKKKKKK>KKDKCKEK8K,K*K&K!K!K KKKKK K K K K K +K +K K K K KKKKKKKYKZKZK\K`KbK`KVKGK6K&KKKKKK(KDKOKOKPKPKPKPKPKPKPKPKQKQKQKQKPKPKPKQKPKPKPKPKPKPKPKPKPKPKPKPKQKPKPKPKPKPKPKQKPKMKKKNKPKKKDK@KDKJKJKIKIKIKGKCKsK—KK‘K‘K‘K’KŽK7K)KnK~KEKJKDKpK—KQK+K1K2KDKžKΣKήKΣKΛK{KHKIKXKΣKΠK€KΉK€KK’K’K‹KAK5KpKKLKEKFKZK“K‘K“KYK2K_K“K”K“K”K“K—KdKCKEK@KIKK–K“K”K”K”K”K’K˜KjK;KXKŽKSKBKAKRK’K”K“KšKrK?KRKŒK•K“K“K“K’KšKvKAKHK?KAK{K˜K“K—K—K—K—K—K”K–KlKCKWK’K_K>KBKBK‡K™K–K—K”KVKDKIKHKHKGKFKEKBK?KBKAKBK?K>K@K"K2KKKHKHKEKAK9KCKIKFK=K7K:K:K:K9K7K:KAKCKEKIKKBK>K>K>K?K?K=K;K;K;K;K;K9K8K6K4K3K5K7e]r‘(KKK KKKKKKKK KKKKKKKK$K'K'K'K*K0K*K$K'K'K%K"KKKKKKKKKKKKKKKKKKKAKHK:K9K:K;KK>KKCKBKDK6K+K*K&KK KKK KKK K K K K K +K +K K K KKKKKKQK[K]K]KUKEK3K&KKKKKKKKKKKK2KLKSKQKQKQKPKPKPKPKPKOKNKPKPKQKQKPKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKQKMKOKQKRKRKJKAK?KEKHKEK?KGKGK\K†K“K’K‘K’K‘K’KKNKŠK\KAKCKIKŒK•K’K˜KKDKKK‚K–K“K•K•K”K™K‚KEKGKCK@KpK˜K”K–K–K–K–K—K•K˜KzKGKLKKqK=KCK:KyKœK•K–K˜KcKK@K7K8KIKHKHKEKBK>KBKHKFK?K8K9K9K:K9K7K:KAKEKCKHK=K6K7K;KEKK›KKNKsK‡KGK>K†KšKšKKPKGKxKΗKΒKΕK΄K˜KšKšKšK“KTKaKΑKΔKΕK·KwK—KšK›K–KKrKGKyKΖKΑKΑKΎK>KKGKFKGKFKGKJKBKsKΖKΏKΎKΖKxKAKHKEKCKBKDKEKEKFKHKEK4K2K5K2KGKMKAKCK@KAK8K(K+K3K8K?KGKFK=K.K-K.K-K1KEKGKDKDKFK*KKKKKKK*KDKEKBK‰KΔK½K½K½KΉK@KKKKKKKKKKK KKKK3KDKBK?KkK»KΉKΉKΈK½K™KK +KK(K9KKKZK]KVKRKSKWK^K`K^K[KTKMKBK@KAK@K>K>K>K?K?K=K>K>KK>KKRKQKPKPKQKQKQKPKMKKKNKPKQKQKNKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKQKPKPKPKQKQKNKHK>K@K:K4KEKIKGKPKjK‰K•K“KK”KCK"KdK†KHKJKBKbK˜KwK3K5K2K6KƒK³KΩKέKΩKƒKGKPKRKΐKςKΩKΙKΌK‘K•K“K—KSK0K_K‘KZKCKCKNK‹K’K–KlK2KOKK•K“K”K“K˜KxKBKHKFKBKrK˜K“K”K”K”K”K“K—K€KBKEKƒKgK?KDKCK„K–K’K–KŠKLKEKwK—K”K—K—K—K˜KKMKEKEK>KcK—K—K–K–K–K–K–K–K™K‡KKKIK€KK@KDK6KdKšK”K–KšKvKBKJKuKeKNKHKFKHKDKAKAKBK?K>K>KAK@KGKIKHKEKBK>K;KEKGKCK7K7K8K:K9K7K8K>KDKCKGKBK8K8K;KAKoKšK˜K\K_K‘KSK8KpK™K”K›KdKDK[KΈKΕKΔK½KK›K›K˜KœKfKLK©KΘKΒKΔK‡K‘K›KœK˜K˜K„KMKYKΈKΒKΎKΙKdKK=KHKFKFKGKIKFKSK³KΔKΏKΖKžKEKGKEKCKEKDKCKFKGKEKHK:K0K5K.K}K|K>KEKDKAKBKCK@K>K=KK=KK>K=K;K9K:K:K9K:e]r£(K KKKKKKKK KKKK KKKK!K#K%K'K)K.K'K#K'K%K&K"KKKKKKKKKKKKKKKKKKKKKGKEK8K9K:K:K;K$K.KAK>K>KK}KuKAKEK@KyK˜K’K”K’KSK@KkK—K•K–K–K—K•K“KUKEKEKK>K@K?K>K>K@K=KCKIKGKFKDK?K;KEKGKDK7K7K8K8K8K8K7K=KCKDKHKGK:K9K:K>K^K—KšKnKOKKfK9KZK–K‘KžKyKHKMKŸKΙKΒKΓK₯K™K›KœKKMKGK†KΘKΒKΖK«K•KœK™K˜K—KŒKXKJKœKΗK½KΘK˜KK/KJKFKFKGKIKJKGK‘KΘKΏKΑKΊKZKCKGKEKFK=K8KDKGKEKHKAK3K4K7K‘K€KBKEKGKCKAK@K=K;KK&KKKKKKK1KBKBKEK˜KΑK»K½KΐK°K2KKKKKK KKKKKKKKK8KDKCK?KrKΌKΈKΊK·KΎK“KKK KKK +KK.K@KRKZK]KVKRKTKWK\KZKBK:K8K8K;K@K?K=K=K>K>K=KK>KKrKKBKEK?KlK™K’K“K–K]KAKaK•K•K–K—K—K“K—KcK@KFK=KNKˆK˜K–K—K—K—K–K–K–K–K`KAKcK–KSK>K?KHK’K—K–K™K“KSKBKnK™K—K™KKsKRK@K@K>K?K?K>K?K?KBKJKGKGKEKBK=KCKGKFKKOKKœK…KIKK„KIKFKK˜K›KŒKQKEKKΗKΒKΔK±KšKœKžKsK'KGKhKΏKΏKΒKΎKžK™KK›K—K“KiKFKyKΘKΏKΒK½K4KKGKGKFKGKIKJKFKlKΓKΐKΏKΖKKAKIKFKFKDK>KEKGKFKGKEK9K0KPKΉK½KbKBKGKBK@K>K@KKDKDK@K+K,K.K.K.K?KHKFKFKDK2KKKKKKKK?KEKAKnKΎK»KΎKΎKΕKkKKK KKKKKKKKKKKK!KCKBKAKKK₯K½KΉKΊKΉK»KGKKKKKKKKK(K4KDKVK^KYKOKNKWKJK;K=K8K8K5K-K7K?KAK>KK>KK=KyK–K”K–K”K”KKNKBKDKAKVK‘K—K–K–K–K–K–K–K–KXK=KhK†KDKBK?K`K—K”K’K˜KhKAKVKK—K–K—K—K•K›KnK?KEK?KGKK™K–K—K—K—K–K—K•K™KnKAKXK•KaK=K@K>K†KšK•K–K™K^K@K_K—K•K–K˜KK…KCK?KBKAKAK?K?K>KBKIKGKGKGKCK>KAKIKFK?K7K8K8K7K8K8K7K?KCKBKGK?K7K8KKyKΏKΉKΉKΈKΎKK KKKKKKKK +KgKzKMKBKPK]KZKPKIK=K>K?K=K:K"KK$K0KK>K=K3K5KCKAKDK@K-K+K+K"KKKKKKKKKKK K K K K +K K K K K K K KKK KKKKKKKKKKKKKKKKKKKKKKKKK.KIKRKNKOKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKPKPKPKQKPKPKPKPKPKPKQKPKOKOKQKPKPKPKPKPKQKNKGK>K=KBKIKIKHKGKFK@KNKŒKYKFKEKRKK•K†K;K3K-K}KΛKΚKίKθKΦKZKHKHK~KΩKίKμKΟKšK“K“K—KvK2KCK‹KyKAKHKBKqK˜K“KKEK8KpK˜K”K—K•K”K”KVKAKCKBKOKŠK™K–K—K—K—K–K–KšKcK:K]KKNKDK@KTK–K•K“K™KtKBKRK‹K—K–K—K—K–K›KyKBKEKAKAKtKšK–K—K—K—K–K—K•K›K}KFKMKKrK;KBK9KuK›K•K–K›KoKAKTK’K˜K–K–K˜K•KPK>KBKAKAK?K?K=K@KFKFKGKGKDK?K>KEKEK@K7K7K8K7K8K7K7K:K?KAKGKEK8K7K;KEKkK˜K›KiKYKͺKˆKJKbKK˜KŸKpKFKPK©KΕKΐKΒKšKK KYKK6KKKŒKΖKΏKΓK΅K™KœKœK›K›K˜K\KJKœKΖKΎKΗKšKwKsKBKGKGKGKDKGKGKKΘKΏKΓKΐKcKBKHKDKCKDKDKCKFKFKEKBK7K–KΓKΐKKQKAKDKDKBK?K?K;K:K8K?KCKCK>K*K*K-K-K/KEKGKEKHK6KKKKKKK KKAKEK>K{KΑKΌKΌKΏKšKKKKKKKKKKKKKKKK&KCKCKBKRKͺKΌKΉKΉKΉKΉK>KK KKKKK KgKΉK½KK‰K]KDKIKZK\KJK=K?K=K=K6KKKKK.KAK=KK>K=e]r§(KKKKKKKKKK KKKKK$K(K,K0K+K%K&K&K&K KKK KKKKKKKKKKKK K K K KK"K K"KK5KMK>K>K;K9K:K8K"K8KAK?K=K>K1K6KCKAKDK?K/K,K*K"KKKKKKKKKK K K K K K K K K +K K K K K K K K K KKKKKKKKKKKKKKKKKKKKK KKKK:KPKSKPKPKQKQKQKPKPKPKPKPKPKPKPKPKPKPKPKPKQKOKQKOKQKPKPKPKPKPKPKPKPKPKPKPKPKPKPKQKQKOKMKOKNKGK@KCKFKHKHKFKIKIKbKPKEKDKMKŽK—K‹K?K4K0KMK^KPKKΘKήKeKFKHKrKΤKΝKKΚK—K“K“K—K€K2K>K…K€KCKHKBKgK˜K“K“KLK3KiK—K•K–K—K–K˜KbK@KAK@KKK„K™K–K—K—K—K—K•KšKpK:KRKKXKBKBKJKK˜K–K™KKDKLK„K™K–K–K–K–K™K„KEKDKDK?KjKšK–K–K–K–K–K–K–K™KŠKLKHK€KƒK?KCK;KdKœK•K—KœK}KGKKK…KšK—K™K˜K›K`KK>K?K>K>KFKGKFKGKFKCKK=KKGKQKEK?K>K>K?K(KKKKKwKŠKSK>KAK?e]r¨(KKKKKKKKKKKKKK#K'K*K1K.K#K%K&K&K&K$K#K$K"KKKKKKKKKKKKKKKKKKKKK6KKKOKXKGK;K:K4K#KK4K5KK +KKKKKKŒKcK@KFKCK†K™K•K˜K‹KKKEK{K™K–K–K–K–K˜KKKKBKDK?K^K”K—K—K—K—K—K—K—K˜K’KXKFKpK’KIK@K?KVK—K—K—K›KŒKMKEKwKK˜K˜K˜KœKrK=KBK@K@K?K?K>K?KCKGKGKGKFKBKK@KGKKK=K@K@K?K7KKKK$K•K»K™KYKKKBe]r©(KKKKKKKKKKKKK K&K*K-K+K$K&K&K&K#KKK"K$K!K#K'K'K'K$K#KKK KKKKKKKK KK K +K9KNKmKsKSK;K:K3K$K>K@K>K=K:K/K8KAKBKBK=K-K*K,KK KKK KKKKKKKK K K KKK K K K +K +K +K K K K K KKKKKKK K K K K K K +K K K +K +KK KKKKK%KKKK0KKKQKMKNKQKQKQKPKPKQKQKQKQKPKPKPKQKOKMKOKQKPKPKPKQKQKQKPKPKPKPKPKPKPKPKPKPKQKQKQKQKQKNKNKQKMKCK=K@KGKFKGKHKGKEKGK€K˜KK>K2K2KKK +KK0K/KCKJKGKWKŸKΒKβKΌKeKŒK’K“KŽK>K2KqKŽKKKDK@KZK•K’K›K`K4KZK”K—K–K—K–K›KtKBKEKDKEKuK™K–K—K—K—K—K–KšKƒKDKDKƒKpK?KFK?K{K›K•K˜K’KRKAKnK™K–K–K–K–K—K–KVK?KDK>KRKŽK˜K—K™K™K™K™K˜K–K™KbKCKbK•KUK=KAKHKK›K˜K™K”KYKBKkKœK˜K˜K˜KœKƒKBKBKBKAK?K>K?K?K@KHKIKGKFKBKKFKGK=K5K:K9K7K8K8K5K:K?K>KFKAK7K8K=KHKxK›KšK\KjKΏKŸKcKsK‘KšK›KfKDKVK³KΓKΐKΎK‘KŽK+KKK7KLK“KΗKΏKΕK©K‘KKœK˜K™KRKAKLK›KΘKΎKΗKšKTKeKFKEKGKEKFKKKDK†KΗKΏKΐKΎKiKCKHKGKHKDKDKDKCKDKFKVK΄KΔKΏKΐK·KZK@KDKBKAK>K=KKCKDK?K-K-K-K,K1K?KDKCKCKEK5K"KKKK K KK>KCK@K\KΆK½K°KIK)KKKKKKKKKKKKKKKKK>K@KBK>K…KΎKΈKΉK·K½K}K +K K +K-KGKBKCK”KΏKΈKΊKΊK·K½KžKLKXKŒKQK?KAK>K?K+KKK5K§K³K±KKbKZe]rͺ(KKKKKKKKKKKKK%K)K-K,K$K&K%K&K$KKKKKKKKKK K!K"K!K#K'K(K&K&K KKK K +K +KK K=KTKŒKKaK=K;K1K"K>K@K?KKEKIKHKGKwK½KΆK¨KOKZK—K•K’KDK/KjK”KOKCKAKSK”K•KšKkK3KRKK—K–K—K–K™K}KCKDKBKAKkK™K–K—K—K—K—K–K˜KKJK>KzK}K@KFK>KnKšK•K–K˜K\K?KdK—K–K–K—K˜K—KšKdK@KFKAKJK†KšK—K˜K˜K™K™K˜K—KœKpKCKVK•KdKKBKAK?K>K>K@K>KDKHKGKEKDK=KK=KKKK>K?K9KAKHKGKDKEK?K:KEKGKAK6K7K7K8K8K5K7K9KK=KKCK>KhKΒK‚K)K*K,KKKKKKKKKKKKKKKKK>K?K?KAK‹KΎKΉKΉK·KΒKsKKKK!KFKDKDK•KΒK·KΊKΊKΈKΌK™KFK_KKXK?KCK@K>K.KKVKΆK΄K΄K²K€Kie]r¬(KKKKKKKKKKKK&K)K)K&K%K%K'K#KK KKKKKKKKKKKKKKKKKKKKKK K K K KKCKfKΐKKpK:K9K,K)K@K?K?K;K7K-K>KDKCKCK8K+K,K+KKKKK K KKKKKKK K K KKKKKKKKKKKKKKKKKKK +K K K +K K KK +K K +K K K KKKKKK#KKKKKKKKK3KLKNKPKQKPKOKMKNKQKQKPKQKPKNKQKPKQKQKQKQKPKPKPKPKPKPKQKQKQKPKPKPKPKPKQKOKMKPKQKQKPKPKPKPKOKNKGKKDKFKHKDKEK?KDKKKKK&K;KDKHKFKFKxKnKxK‰K–K•KK•K›KWK,KYK•K]KCKFKFK‰K˜K™K€K6KDK‚K™K–K—K–K˜KKLKDKCK?KUK•K—K–K—K—K–K–K–K™K]K:KdKKKKFKBKVK—K˜K–K›KvKBKSKK›K˜K˜K™K˜K›K|K@KEKDKBKmK›K™K˜K™K™K™K™K˜K›KŠKJKFK~K…K?KCK=K`KK—K˜KžK†KJKIK€KœK˜K˜K—KKlKK?K>K=KBKEKFKFKDK@K:K@KFKCK9K7K8K7K7K7K5K5K:KK7K7KK>K>K=KKKKKKK:KHKEKDKFKbKbKQKvKͺK«K”KœK`K+KPK”KdKBKFKDK„K™K˜KŠK;KK?K?K>K?K>K>K>K?KFKFKFKDK@K;K?KEKEK?K7K8K7K7K7K5K5K8K;KK?K?K=K;K9K9K@KCKEK:K-K.K.K/K4KAKEKDKFK@K-K&KKKKK K K@KCK?KgKMK&K,K)K'KKKKKKKKKKKKKKKKK?KBKDKBK“KΎKΈKΊKΆKΐKmKKKKKBKAKBK,KvKΆK°K³K³K°Kye]r(KKKKKKKKKKK"K+K&K%K%K&K"KKKKKKKKKKKKKKKKKKKKKKKKK KKKK K!KHK‰KΫK±KbK7K9K&K.KDKAKK?KK=KDKDKBK6K5K8K7K6K8K7K6K;KK;KK-K*K+K)K*KK KKKKKKKKKKKKKKK5KDKBK=KhKΈKΈKΊKΉK½K¨K!KKK%KEKDK@KdKΊK»KΆKΈKΊKΉK·KjKDKžKŽK=KBKBKAKKEKRKcK‰KžKtK*KBKKwK>KDK=KqK›K—K—KIK3KiK™K–K—K—K–KœKfK?KFKBKDK}KšK–K˜K™K™K˜K–KœK~K@KHKˆKnK>KEK@K}KžK—K™K”KSKDKpK›K˜K˜K™K˜K˜KœK\K@KDK?KMKŒKK˜K™K˜K™K™K˜K—KKrKAKUK—KiK=KDKK=K=KK‰KŸK›K˜KWKEKdKΎKΐKΐKΒKPK KKKK9KMKžKΖKΏKΖKŠKKK;K@KDKDKCKEKDK@K6K?KBKBK2KK3KFKPK…K‘K}K—K}K/K;K…KK?KEK=KiK›K”KšKQK0KaK˜K–K˜K™K˜KKoK>KGKGKBKuK›K˜K™K™K™K˜K˜K›KˆKEKCKKzK?KEKK>K*K;KHKFKGKEKAK;KAKCKDK9K6K8K7K7K8K7K7K9K9K=KDK?K8K7K;KJK‚KKœKZKZKlK@K8KqK‘KšKŸKjKHKSKͺKΔKΎKΘKfK KKKK*KFK}KΒKΎKΓK²K5K KKKKK8KJKxKΓKΐKΏKΌK˜KuKDKEKGKGKGKHKHKZK·KΓKΏKΖK KHKDKCKDKCK9K9KBKBKHKDKxKΐK½K½KΓK›K@K@K>KKAK4KKK+KK`KšK”KœK[K.KYK•K˜K˜K™K—KKyKAKEKDK?KlKšK˜K˜K™K™K™K˜KšK“KMK>KvK‡KAKEK=KeKK˜K—KKkK?KYK’K™K˜K˜K˜K—KKwK@KEK@KBKsKKšK™K˜KšKšK™K˜K›K‹KLKGK{K‹KBKBKK?K=K?K&K.KHKGKGKDKAK;K>KEKHK=K3K6K7K8K6K5K5K7K:K:KAKCK9K7K9KBKpKžKžKpKGKJK=K8KYKŸKœK’KKIKFKŽKΖKΎKΚK‚KKKKKKDKaKΉKΑKΐKΑKCKK)KKKKDKMKWK·KΓK½KΒKŸKZKCKGKEKEKGKGKIKHK˜KΔK½KΐK½K^KBKFKDKCK;K;KDKCKGKGKVK΄KΐK½KΏKΊK[K=KAK=K=K=KKAKMK•K™K›KœK˜KYK?KhKœK›K›K›KŸKŠKCKAK?K>K?K=K@K+KKGKHKGKCKAK;KKBK=K=K=KK[K—KœK›K›K›K›K›K›K›KKdKDK`KšK\KK@K>K?K=K>K5KKCKIKFKDKCK>K;KEKCKBK7K4K5K5K7K7K4K5K8K:K=KFK;K7K7KK;K}K’KœKŸKcKEKWK±KΔKΑK½K;K KKKK.KHK‚KΗK½KΗK£KMKLK0K0K=KK9KFKyKΓKΎKΏKΌK§KKGKGKEKEK>KAKKKVK±KΑK½KΓK¦KGKDKDKBKDKCK?KAKBKHKDKkK½KΎKΎKΒK¨KHK>K>K>K=KKEKCKCK3K)K,K&KKKKK4KDK>K?K.K(K)K(K)K KKKKKKKKKKKKKKKK(KDKBK?KLK¦KΌKΉKΈKΆKΎKSKK KK+KFKAKJK KΐKΎKΈKΆK΅K½K•KDKiK½K_K=KAKAKKJKK™KK~K1K@K…KœK˜K˜K˜K™K’KPKCKDK?KNKŽKšK˜K™K™K™K™K—KŸKnK;KTK“K^KAKBKBKŒK›K˜K›KKKKEKwKŸK›K™K™KœKœK—KRKAKDK>KRKKžK›KœKœKœKœKœK›K KqKCKQK•KlK:KCK;KzK‘K›K›K KxKDKOKŽKžK›K›K›KŸK_K:K@K>K?K=KKK:KKKFKEKDK?K:KBKFKEK9K4K5K4K7K7K4K5K7K7K9KFKAK6K7K:KBK|K‘KžKfKCK:KKEKDKDKCK@KAKBKGKGKMK§KΒKΎKΏKΐKjK9K@K>K=KKAK8K'K)K)K(K'KKKKKKKKKKKKKKKKK?KAKDK=KzKΎKΈKΈK΅KΌK˜KKKKK=KEK=KsKΌKΏK»KΆKΆK·K³KeKEK£KšK?K>K=K?KGKšK·K²K°e]r΅(K—K–K–K•K•K•K–K–K—K˜K™KšKšK›KœK›K—KK‹K‚KyKqKcKVKKKAK8K0K+K'K"KKKKKKKKKKKKKKKKGKlKΡKΓK}K?K9K7K7KAKBKAK?K6K5K@KFKCKDK>K,K*K+KKKK K K +K K +KKKK KKKKK KKKKKKKKK K K K KKKKKKKKKKKKKKKKKKKKKKKK&K,KƒKŸK˜K—K–K–K–K•K–KšK›KˆKcK5K#K%K(K$KKKK$K;KLKNKJKIKMKKKKKNKNKMKMKMKMKNKMKMKNKNKLKNKMKNKMKMKMKMKNKMKMKNKMKMKMKNKLKNKNKMKNKMKMKMKNKMKIKKKMKIKBK4K4KBKDKEKBKCKBKLK%K=KVK‚KQK'KYK™KXKBKCKDKK›K›K‡K6K9K|KžK˜K˜K˜K™K™KWK?KEK>KIK‰K›K˜K˜K˜K˜K˜K—KKxK?KKKKiK=KCK?KKŸKšK™K–KTKAKmKŸK›K›K›KœKšK›K`K?KDK?KHK„KŸKšKœKœKœK›K›K›K K‚KDKHKŠK€K=KDK;KiK K›KšKŸK‡KHKHKKŸK›KœKšK‘KqK;K@K>K?K=KKkK K‘K|KEK=K;K:KNK•KžK KKLKEKxKΔKΎKΗK“KKKK KK=KOK©KΒK½KΙKoKK,K`K~KWKQKGKIK›KΓKΌKΔKKyKaKDKEKCKDKDKFKDKmKΑKΎK½KΕKŠK@KDKDKCKBKBKBKBKCKGKBK„KΒK½KΌKΓK“K=K=K=K=KKkK΅K³K°e]rΆ(K—K—K—K—K—K—K—K–K–K–K–K–K–K–K—K—K–K—K™K™KšKKžKšK—K•KK‡K~KsKeK[KPKFK>K6K.K'K#KKKKKKKKHKtKΨKΌKwK=KK)K*K)K&KKKK*K?KLKKKLKKKLKMKNKMKMKNKNKMKNKMKNKNKLKMKNKMKMKMKMKMKMKNKMKMKNKMKMKNKMKNKNKMKMKMKMKMKNKMKKKLKLKLKMKHK>K;K?KAKAKCKEKDKAK>K>KqK_K#KOK˜KbK?KGKBK‚K›KšKKKFK?KEK€KK™KšKšKšKšK™KžK…KBKCKˆKvK=KEK=KsK KšK™KœK`K@KaK›KœK›K›KœKšK KmK=KEK@KDKzK K›K›KœKœKœKœK›KžKKKKBK|KKCKBKK?K>K?K>K=KAK*KKGKFKFKDKDK>K=KBKDK@K3K3K5K4K4K4K4K6K8K9KAKDK8K5K7KK‡K KžKK[KEK[KΈKΒKΐKΆK2K KKKK0KEKˆKΔK½KΗKšK!KAKYKGKbKZK?KDKyKΓK½KΏKΉKuKdKHKDKCKDKCKEKHKRK«KΑK½KΒK¬KLKBKDKCKBKBKBKBKCKHKDK]K»KΏK½KΐK³KPKK=KKCKCKCKEK:K)K+K,KKKK K K K K +K K +KKKKKKK KKKKKKKK K K +K K KKKKKKKKKKKKKKKKKKKKKKK$K5K@K†K›K˜K™K™K™K™K™K™K™K™K˜K—K›KK”KqKIK0K-K,K(KKK K2KIKOKKKKKKKMKNKMKNKNKLKNKMKMKNKMKMKOKMKNKNKNKNKMKMKMKNKMKMKMKMKNKMKMKMKMKNKNKNKNKLKKKNKMKKKKKLKLKEK=K;K=KCKDKDKDKFK=KbK`K#KGK“KiK=KGK>K{KœK˜K˜KDK0KkKœK˜K˜K˜K˜KŸKhK=KDK@K@KxK K›KœKœKœKœKœKŸKŽKGKAK|K‚K?KFK>KeKŸK›K›K KnKAKVK–KK›KœK›K›K K{K@KDK@KBKlKžKœK›K›K›KœK›K›KœK–KWKBKjK˜KNK?K@KIK•KK›K›K›K]K?KcKKœK›K›KžK‘KGK=K?K>K?K>K@K3KKCKGKFKGKFK>K;KCKDKBK4K4K5K4K4K4K5K4K7K7K9KEK>K7K8K:KJK‹K‘K›K[K@K=K>K9KsKŸK™K’KoKBKJK‘KΖK½KΕKcKKKKK$KCKhKΎKΏKΒKΊKCKjKŽKTK:K4KEKIKZKΈKΐK½KΑK©KhKCKEKCKDKCKEKIKDKŠKΖK½KΎKΐKlK@KEKDKBKBKBKAK@KEKGKGKKΔK½KΎKΒKvK:K?K=KK?KKsKžK—K™KNK.KbK™K™K›KšKšKŸKuK>KBK?K=KmKžK™K›K›K›K›K›KœK˜KNK?KpKKDKDK>KWKK›KšK KyKAKLKK KœK›K›K›KŸKŠKCKBKAK?K^K™KœK›KKKœKœK›K›KžKeKAKYKK]K;KAK>KˆKŸK›KœK‘KoKAKVK—KKœKœK›KKVK;K?K?K>K?K?K:KK:KFKEKGKDK?K9KAKDKCK6K2K5K5K4K4K4K4K5K5K7KCK@K5K8K8K@KxK£K’KpKAK;K:K8KXKK›K£K…KGKDKƒKΖK½KΓK₯K-KKKKKAKRK¬KΓKΎKΔK{KvKœKšKOK$KPKKKIKœKΔKΌKΐKΆKKiKAKDKDKCKDKHKFKgKΎKΏK½KΕK’KCKDKBKBKBKBKAK@KCKFKCKvKΒK½K½KΓKŸKDK>K=KKFK>KhKK—KœKYK,KVK—K›KœKœK›K KK@KAK>K:KcK›K›KœK›KœKœK›K›KžKYKK>K>K=KK/KGKDKGKDK@K:K>KDKEKKDK6K8K7K9KeKŸK‘K†KGK=K:K9KCK“K›KžK˜KSKBKfKΏKΐKΐKΐKYKKKKK6KHKKΕK½KΔK£K‰K‘K₯K†K5KEKHKEKxKΒKΎKΎK½K–KoKDKCKDKCKCKEKGKNK‘KΒK½KΐK°KQK?KBKBKBKBKBKBKBKEKEKUK±KΐK½KΏK»K_K;K>KK9K8K=KCKDKCKDK7K*K+K)KKKKKK K K K K K +K KKKKK KKKKKKKK +K K KKKKKKKKKKKKKKKKKKKKKKKKK(K:KBK…K›K˜K˜K˜K˜K˜K˜K™K™K™K™K™K™K˜K˜K˜K˜K—K—KœKœKŠKdK@K,K*K'KKK3KJKOKLKMKMKMKNKMKMKMKMKMKLKKKMKMKLKLKNKMKMKMKMKNKNKMKMKMKNKNKLKLKLKMKMKMKMKLKLKLKLKKKKKKKKKKKLKLKEKAK=K=KAKAKBKCKDKGKPK;KAK;K^KK™KŸKbK,KOK“KœK›K›K›K KˆKCKDKCK;KYK—KœK›K›KœK›K›KšK KgK:KXK–KWK?KAKDKKžKšKžK‘KKKCKtKŸKœKK›K›KœK›KVK?KBK?KKKˆK KKKKKKKœK’KƒKDKFK‰KƒKK=KK:K=K=KeK‚KœK’KgKEKQK¨KΑK½KΔKeKKKKK(KFKoKΓKΎKΒKΊKKK‘K KzK\KVKEK\K·KΐKΌKΖK…KHKLKCKDKDKCKEKHKCK€KΓK½KΎKΓKtK>KCKBKBKBKAK?K?KBKHKEKKΔK½KΌKΒK…K:K@K>KK{K»KΌKΉKΆKΆK·K²KcKCK˜KŠKK@K>KEK•e]r»(K™K™K™K™K™K™K™K™K™K™K™K™K™K™K˜K™K™K™K™K™K™K™K™K™K™K™K™K™K™K™K™K™K˜K™K™K™K™K™K™K™K˜KšKKNK*K4KSK―KΧK‘KUK9KK@KBKBKAKBK?K9KSKKšKŸKoK+KFKKK›K›K›KžK’KJKCKDKK:KOK›KžKKŸKšKVK@KhKŸKKžKK‘KŽKCK=K?K=KK2KKCKGKFKEKCK;K:KCKCKDK8K4K5K4K4K4K5K3K5K6K8KCK>K6K6K6KDK…KžKœK_K>KK?KBKGKDKmKΏKΎK½KΏK«KHKK=K;K9K8K8K8K6KK6K'K'K*K*K+KK K K K +KKKK K KKKKKK +K6K@K@K>K]K³KΉK]K%K*K#KKKKKK>K?KPKͺKΏKΉKΆK·K΅K»KKDKbK¬KXK9K@K?K=Kce]rΌ(K˜K˜K˜K˜K™K˜K˜K˜K˜K™K™K™K˜K˜K™K˜K˜K˜K˜K˜K˜K˜K˜K™K™K™K™K™K™K™K˜K˜K™K˜K˜K˜K˜K˜K™K™K˜K›K‹KKK*K:KWKΉKΣK›KOK9KKEKˆK›K˜K›KœKœK›KœKšK˜K˜K˜K˜K™KœK›KœKœK›K˜K˜K˜K˜K—KšKžK”KxKBKKKKK%K@KOKOKMKNKNKNKMKKKMKNKMKMKKKLKNKMKMKMKNKLKJKNKMKNKMKJKKKLKNKMKKKMKNKLKKKLKKKKKKKKKKKKKKKLKLKKKLKIKEK?KK‡KžK›K›K›KœK™KRK>KCK=KJKŒKžK›KœK›KKžKK’KK=KEKŒKtK>K?K-KvK‘KšK›KK]KKhKžKPK=K?KBK“K KKœKŸKeK?KYK˜KžKKKK™KPK;K?K>KK=K;K:K8K7K8K7K8KBKFK@KKΏKΉK»K½K²KWKAKDKDKBK0K(K*K+K)K)K$K K:K@K?K=K-K(K)K*K*K"KK K K +K KKK K +KKKKKKK!K@K@KBKAK”K΅KIK&K*K'KK +K K +KK"KCK>K€KΏKΉKΆK·K³K΄K°KdKBK˜KKK@K@KAe]r½(KšKšKšK™K˜K™KšKšK™K˜K™K™K™KšK˜K™KšKšKšKšKšKšK™K˜K™K™K™K™K™K™KšK™K˜KšKšKšK™K˜K™K™K˜KœK‡KHK(K=K[KΑKΞK“KIK9K;K9K;K@K@K=K6K9K?KBKBKDKCK1K*K+K)KKKKKKK K K K K KKKKKK K +K KKKK +K +K +K K KKKKKKKKKKKKKKKKKKKKKKKKK,K>KEKŠKœK™K›KœKœK›KœKšKšKšKšKšKšKœK›K›KœK›KšKšKšKšKšK™K™K™K€KyKKKKK KK.KJKPKMKMKNKMKLKMKNKMKMKLKMKNKMKMKMKNKMKKKNKMKMKMKJKKKLKMKLKKKMKLKKKKKKKKKKKKKKKKKKKKKKKKKKKKKLKJKIKEK=K;K>KAK@K>KBKBKMKpK—KˆK/K8K}KŸK›K›K›K›KKYK?KCK@KDK„K‘K›KœKœKKžKK K‰KAKAKƒK€KAKCK.KgK‘K›KœKžKjK>KTK•KŸKKKKK‘K€K@KCK@K=KbKKžKKžKžKžKKžKK‘KbK?KZK K`KKKDKDKKBK5K5K7K8K\K™K£KKIKAKK?K3K'K*K+K)K*KK +K K K K K K K K KKKKKKK6KAKAK;KhK£K9K'K*K)K"K KK K +K K5KAKSK¬KΊKΆKΆK΄K³KΊKKCKgK»K]K8K?K?K;e]rΎ(KœKœKœKšK˜K›KœKœK›K˜K˜K˜K›K›K˜K™KœKœKœKœKœKœK›K˜K™K™K™K™K˜KšKœKšK˜K›KœKœK›K˜K˜K˜K—KœKKDK%KAKbKΛKΛKŒKCK8K:K4K:KAK?KKFK‹KžK›KœK›K›KœKœKœKœKœKœKœKœK›KœKœK›KœKœKœKœKœKœKœKœKšK KyKKKKKKKKK9KOKOKMKMKNKNKMKMKMKNKNKMKNKNKNKNKNKNKNKLKKKKKLKLKKKKKKKKKMKIKKKLKKKKKKKKKKKKKKKKKLKLKLKLKLKKKHKKKHKAK;K:K5K.K>KBK@K@KTKkK5K1KtKŸKšK›K›K›KŸKbK>KEKAK@KzK KšKKžKKKKžK’KHK=KtKŒKDKCK+KVKŸKKK KxKAKNKK KKKžKK‘KŽKGKBKCK=KUK—KŸKKžKžKžKKžKK’KsK?KLK˜KtK9KBK7KpK€KœKK’K†KGKEK‚K’KœKKœK£KyKKK;K9K:K:K8K6K7K:KDKDKDK“KΏKΊKΉK½K₯KLKBKCKBK>K,K)K+K*K&K%K$K)K>K>KAK:K)K*K+K+K*K;KK K K +K K KKK K KKKK K K%K@K>K>KHKnK1K'K)K)K*KKKK KKK?K?K‚KΊKΆK΅K΄K΄K΅K±KcKEK’KœK>K>K>K?e]rΏ(K›K›KœK›KšK›K›KœK›KšKšKšK›K›KšK›KœK›K›K›K›KœK›KšK™K˜K™K™K˜K™KšK™K˜K›KœK›K›KšKšKšK™KŸKxK?K'KDKjKΡKΖK„K>K:K5K$K:KAK@KKCK?KLKKŸKKKžKžKKžKœK‘KƒKCKCKˆK†K?KCK:K]K£KœKK K–KNKAKsK£KK KK’K‹K@K>K>K>K=KK+KKEKGKCKBKBK=KK4K5K7KAK…K’K“KhKAK>K;K5KUKŸK‰K’KpKFKCKxKΖKΏKΓK¨KUKKKKK:KIK™KΔK½KΏK΄K K’K‘KKŸK¦KlK@K{KΓK½KΎKΌKͺKƒKGKBKDKCK?K@KHKFK›KΓKΌKΑKΌK]K>KBKBKBKAK?K?K@KDKFKGKŸKΑKΊKΊKΐK{K8K>K;K9K:K:K8K6K8K6K>KDK?KjK»K»K»KΊK½KiK?KFKCKBK2K)K+K)K(K(K&K%K7K@K?K@K/K(K+K+K&K„KKKK K +K +K +K K +K K KKKK K KK:K>K?K?K;K*K&K'K&K'K!KKKKKK*K?KTK―KΈKΆKΆKΆK΅KΊKŽKEKjKΎK`K:K@KAe]rΐ(KœKœK›KœKœKœKœK›KœKœKœKœKœKœKœKœK›KœKœKœKœK›KœKœKšK˜K™K™K™K˜K˜K˜K˜K›KœK›KœKœKœKœK›K KsK>K(KEKrKΦKΏK}K>K;K1K"KKEKKKNKMKMKMKMKMKMKNKLKKKKKKKMKMKKKKKKKKKKKKKKKKKKKLKKKLKLKIKLKKKKKKKKKJKJKLKJKHKHKHKHKIKLKKKHKFKGKIKEKKCK@K9K`KžKžKKKžKKKœKžK\K7K]K˜KSK>KBKGKKŸKœK K’KGKAKvK’KKžKžKKžKŸK[K=KCKAKFK‚K KœKžKžKžKžKKK KKKKAKxK˜KHKAK?KMKœKžKKKŸK\K?KbK K K‘KžKŸK—KKKK=KDKDKCK:K4K4K4K4K4K5K5K5K5K3K>K@K4K5K6K9KjK•KœKKCK?K9K7KCKaKrK’K‘KPKDK]K·KΏKΐK³KrK3KKKK.KFKyKΐKΌKΏKΊK€K€K‘K K’K₯KˆKEK[KΆKΐK½KΎK°K’K[K@KDKDK?K7KEKAKsKΏK½K½KΓKƒKK?KCKHKAKwKΑKΊKΊKΑK’KCKK>K@K:K*K+K*K1K K‰K K +K K +K +K K K K KKKK +K KK*K?K>K?K=K+K$K&K%K&K(KKKKK +KK5KAK…KΈK³K·K·KΆK·K°KbKDK KœK>K=K?e]rΑ(KœKœKœK›K›KœKœKœK›K›K›K›KœKœK›K›KœKœKœKœKœKœK›K›K›K›K›K›K›K›K›K›K›K›KœKœK›K›K›K›KœKKlK>K*KJK}KΪK»KtKK:K9K.K9KFKCKCK=K-K*K+K&KKKKKKK K K K K +KKKKKK K KKK +K K +K K KKKKKKKKKKKKKKKKKKKKKKKKKKK1K@KFK„KžK›K›K›KœK›K–K™KœKœKœK›KœKK›KœK›K›KœK›K›K›K›K›K›KšK’KKK K,K5K1K=KHKUKUKWKNK?K?KGKHKLKMKMKLKLKNKMKMKLKKKLKLKKKKKKKKKKKKKKKLKJKIKKKLKLKKKKKKKKKLKKKHKIKLKKKKKIKHKGKHKIKIKHKHKHKIKHKHKEK=K7K:KAKAKCKCKBKCK_K‰KŸKŸK KƒKAKBK?K8KWKšKžKKžKžKKKœK KhK5KRK™KaK=KBK@K„K‘KœKžKšKRK>KgKŸKžKKKKœK’KjK;KCK@KAKtK’KŸK K K K K K KŸK›KUKKBKBKK£KŸKŸK’KlK@KSKšK’K K KŸK‘K]K:K>K=K=KK7K:K9KXK†KžKœKdKCKKK’KΓKΌKΑK€KKKKKK"KCK\K΅KΎKΎKΐK«K’K’K’K’K₯KšKVKHKœKΓKΎKΐKΆK˜KkK?KDKCKAK@KDKEKVK²KΎK½KΐK¦KGK@KBK@KBK@K>KAKBKEKAKVK΄KΏK»KΌK»K`K:K>KKEKCK@K0K*K*K*K)K)K(K'K;KBKAK>K0K-K*KDK­K΅K4KK K K K K +KKKKKKK K KK;K?K>K@K3K%K&K%K%K'K K KKK +K KK;KYK―K΅KΆKΆK΅K΄KΊKŒKEKhK½K^K:K@e]rΒ(KœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœK›K›KœKœKcK9K,KKK‰KέK³KlK;K=K*K&K>K@K=K:K8K,K;KEKCKCKKKAKAKCKEKDK’KΓKΊKΊKΓK‰K;K>KK>K?K;K*K&K%K%K%K&KKKKK KK0KCK‡K»K΅K΅K΄K΄KΆK―K`KEK’KK@K@e]rΓ(KœKœKœKœKœKœKœKœKœKœKœKœK›K›K›K›KœKœKœKœKœKœKœKœK›K›K›K›K›K›K›K›K›KœKœKœKœKœK›K›KK™K]K7K/KLK”KΫK­KfK;K=K*K*K>K?K>K:K6K+KKAK;KDKK KKžKžKKKœK’K„KKCK:KiK£KœKœK£KlK;KRK”K K K KKžK¦KƒK>KBKAK=KZK›K‘K K K K K K KŸK¦KrK>KMK˜KxK=KDK8KoK€KžK K₯KKIKBK|K£K K KŸK£KKK?KDKDK@K5K2K5K4K5K4K1K2K3K2K5KBK>K6K5K5K=K{KŸKRKK?K?KBKHKBKlK½K»K»KΐK­KKK9KK2K#K#K%K%K%KKKKKKK#KAK[K±KΈK΄K΅K΅K΄KΊKŒKEKiKΌKaK9e]rΔ(KœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœK›KœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœKœK›K›KK–KXK4K4KOK KΩK¨K^K9KKBKEKEK@KJKBK;K?K;K?K…K£KKKžKžKŸKK KK@K7KzKŒKAKCK=K\K KKK€KyK>KHKŠK’K K KžKŸK€K’KFKAK@K;KNK”K£K K K‘K‘K‘K K K€K„KCKDK‰KK>KBKKCKCKAK7K3K4K4K4K4K1K2K3K2K2K>KCK6K5K5K8K^K‹K…K]KFK=K:K9K8KdKpKJK{KXKAKPK¬KΒKΎKΓK^KIK3KKK#KDKcKΌKΎKΎKΎK©K™K˜KœKžKŸK“KSKIKKΓK½KΎKΈK—K^KAKCKBKCK@KEKEKRK―KΏK»KΏKKMK@KBKBK@K>K?K?KBKDKEKNK¨KΏKΊK»KΐKoK9K?K;K9K8K8K8K6K4K5K>KCK@KiKΌKΊKΉKΉKΌKlK>KDKAK@K4K+K)K)K)K)K&K#K3KAK?K@K4K+K‹K½K΄KΏKeKK K K K +K K +K KKK KKKK +K0K?KK:K9K7K+K>KDKCKBK7K)K,K,K!KKKKKKKKKKKKKKKKK K KKK +K K K KKKKKKKKKKKKKKKKKKKKKKKKK!K!K K5K@KGKKžKœKœK›K–K–K•K™KtKCKuK›K˜K—KžKžKKžKžKžKžKžKžKKK KŠKgKmKiKfKgKfKcKaK_K_K]K[KTKLK=K2K%KKKK*KBKNKKKMKNKLKKKKKKKLKIKHKKKKKKKLKKKKKKKKKKKLKJKHKHKHKKKKKHKIKHKHKHKHKHKHKHKHKHKHKHKHKHKIKIKGKFKFKGKHKGKAK;KKIK›K’K K K€KcK>KZKžK‘K K K‘K‘KTK:K=KK:K;K9KJKyKcK{K`KDKEKKΖK½KΔKŒK\KDKKKK@KOK₯KΐK½KΑK°KK†KƒKŠK“KžKfKAKzKΓK½K½KΎK”KxKIKBKBKBKBKBKFKBKŽKΔK»K»KΎKjKK?K?KBKBKGKCK„KΒKΊKΊKΑK™K?K;K:K:K8K8K8K6K5K3K:KDKDKLK£KΎKΉKΈKΎK”KBKBKAKBK=K+K)K)K)K)K'K$K)K>KBKCK;K8KœK½K·KΌK KK K K K +K K KKKK K +KKKKK;K:K=K=K0K$K#K&K&K'KKKKKKK,K@K\K΅KΉK²KK³K³KΉK‰KAKjK½Kbe]rΖ(K›KœKKKKKKKKKKKKKžKKKKKœK›KœKœKœKœK›KœKKKKKœK›KœKœKKœK›KœKœKŸK‰KOK-KK;K9K5K-KAKCKCKCK7K)K*K*KKKKKKKKKKKKKKKKKK K K K K +K K K KKKKKKKKKKKKKKKKKKKK K KKKK K!K K5KBKGKKŸKœKKŸKžKKœK’K|K:KJK†K‘K™KKKžKœKKžKœKœKžKKK K‹KdKbKdKeKcK_KaKbKgKsK{K[K/K#K!K!KKKKKKK2KHKNKKKKKKKKKKKLKIKHKJKLKKKKKKKKKKKKKKKLKKKJKJKJKLKKKHKHKHKHKHKHKHKHKHKHKHKHKHKHKIKHKGKHKHKHKHKGKGKHKFK>K8K:K>KDKHKGKDKDKCK:KpK€KžKœKŸK K‘K KŸKžKQK5KbKœKQK>K@KGK”K‘KŸK’K”KFKK@KxK₯K K K‘K‘K‘K‘K K’KKUK>KfK‘KQK:K@K?KŒK£K KŸK¦KvK?KOK•K€K‘K’KŸK₯KfK7K?K=KKBKBKBKBKGKEKiK½K»KΊKΒK‘K>KAK@K?K?K?K?K@KAKHKCK`KΉK»K»K½K·KWK7K:K:K8K7K6K5K5K2K4KAKEK?K}KΐKΈKΉKΊK΅K[K@KCKBKAK1K*K)K)K)K'K%K%K:KBKAK>KQK­K»KΉKΈKΎKPKK K K +K K +K KKKK K K +K +K +K/K9K;K?K8K$K%K&K%K$K%KKK KKKKAK@KŽKΌK²K²K³K΄KΆK­K_KDK Ke]rΗ(K›KœKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKœK›KœK›KœKœK›KKžKžKžKžKœK›K›KœKžKK›KKK’KƒKIK-K?KYKΑKΟK–KKK8K7K"K5K?K>K;K8K3K-K@KCKCKCK7K)K+K(KKKKKKKKKKKKKKKKKK K K K K +K K K KKKKKKKKKKKKKK KKKKKK K KKKKK!K K6KDKHKK KKžKKKžKžK‘KŒK[K"KPKƒK™KKœK K—KšK˜K›KžKKžKK K‹K`K]K_KkKtK|K†KK˜KK‘KK‚KXK3K K!K$K"KKKKK"KKiK£K K K‘K K‘K‘K K K’KeK=KUK KbK8KAK8K{K₯KŸKŸK₯KˆKDKGK„K₯K‘K£KŸK¦K}K;K@K=KK3K1K4K2K4K5K3K2K/K/K0K>K>K4K5K5K;KSK€K|KdKCKKCKBKBKBKEKEKNKͺKΐKΊKΏK°KNKK>KCKFKHKœKΎKΊKΊKΓK}K6K;K:K8K8K8K6K4K3K2K:KCKAKYK³K»KΉKΈKΐK„K?KEKBKBK:K*K)K)K)K&K&K&K/K>K@K=K[K΄KΊKΉKΈKΏKKK +K K +K +K K KKKKKKK KKK7K8K?K=K.K$K%K$K#K&KKKKKK K1KAKaK±K΅K΅K΅K΅K³KΈKˆKCKhKΌe]rΘ(KKKKKKKKKKKKKžKžKžKKKKKKKKKKKKKKKKžKKKKKKKKKœK‘K|KGK/KBK^KΚKΜKŽKGK9K7K!K7K@K>K;K9K1K.KCKAKAKAK6K)K*K)KKKKKKKKKKKKKKKKKK K K K K K KKKKKKKKKKKKKKKKKKKK K K K K KKK!K!K"K7KCKJKK KKžKK K–KvK€KgK9K%KXKoKvK€K£K“K–K˜KŽK›KžKKKKŸK”KzK„KŽK—KžK K K KŸKKKžK K‘KKgK;K%K K%K$K KKKK,KBKMKKKJKLKIKHKIKIKIKIKIKKKLKIKIKLKJKHKKKLKJKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKHKGKGKGKGKFKGKFKFKGKHKGKBK8K8K;K2K4KEKIKFKHKbK‰K€K£K K KŸK¨KlK2KIK™KiK=KEK=K|K₯KŸK K‘K[K9KZKœK‘K K K‘KŸK¦K{KKsK€K K£K‘K¦KŽKBK>K>K>K=KK4KK@KGKCKDKAK>K>KAKBKAK6K2K3K2K4K1K2K2K0K/K.K9KAK7K5K4K7K_K‘KkK{KMK>K:K:K9KHKQKeK†KdKCKGKšKΔK»KΒKŒK]KJKKKK=KOK©KΒK½KΐK£K†KšK“K–KŸK‘KeK=K}KΓK»KΌK»K«KKIK?KAKBKBKBKDKAKˆKΓKΊKΊKΐKqK9K@K?K?K=K=K=K=KAKEKAKvK½KΊK»KΐK₯KCK:K9K7K8K:K7K4K4K4K9K?KBKAKKΏKΉKΉK½K©KMK@KAKBKK6K%K$K%K%K&K%KKKK KK KCKDKK»K΄K΄K΅K΄KΆK¬K]KEK’e]rΙ(KžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKKK‘KvKEK+KDKhKKΕK†KBK9K4K!K7K?K>K:K9K0K/KAKAKAKBK2K)K,K(KKKKKKKKKKKKKKKKKK K K K K K KKKKKKKKKKKKKKKKKKKK K!K K K!K K"K%K%K#K8KCKJKK KKKžKšKvKKKEKIK.K7KtK|KK˜KKvK‡KŽKŠK›KžKžKžKžKžKžKŸK K’KžKžKžKKKžKžKKKžKžKŸK‘K–KuKKK.K#K&K#KKKKK2KGKMKKKIKHKHKHKIKHKHKJKKKHKIKKKJKHKJKKKIKGKHKIKIKHKHKHKHKHKIKIKIKHKHKHKHKHKIKGKGKFKFKFKFKFKFKFKFKEKGKHK@K5K-K5KBKDKEKHKCKGKeKŽK€K’KžK₯KzK2KBKKyKK?K=KK?K?K=K=KKoKΊK΅KΆK΅KΏK~K K K K K KK +K +KKKKKKKK!K>KKIKkKK€K₯KˆK7K:K„K‰K;KAK8K]K€K K K¦KzK9KHKŽK£K K£K‘K K’K”KHK?K?KKuK›KGK=K=KFK˜K’K‘K’K§KkK=KUKœK€K’K’K‘K€KaK9K?K?KK4K3K3K2K2K2K2K0K.K-K/KBK=K5K8K7K?KƒK¦KˆKbKBK;K9K:K3K5KIKeK~KLK@K_K»KΎKΏKΊKsKNKKKGK.K$KEKiK½K»KΌKΉK‡KŽK•KžK€K§KšKSKGKžKΐKΊKΎK΅K¬K|K=KCKBK?K?KCKDKJK₯KΒKΉK½KΆKUKKK?KAKEKCKK½KΉKΊKΑKŒK9K;K:K9K8K6K4K5K5K0K9KCKCKJK KΌK΅K΅KΌK˜KBKAKBKAK9K(K(K)K&K%K&K$K)K@KBK?KJK₯KΉKΆKΆKΈK―K+KK K K K KKKKK K KKKKK8K=K=K=K4K$K&K&K%K&K$KKKKKK#KAKEK‘KΉK΄K΅K΅K΄KΆK«K^KEe]rΛ(KžKžKžKžKKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKžKKŸKiKAK,KIKzKΪKΌKwK=KK:K9K/K3KAKBKBK@K0K*K+K)KKKKKKKKKKKKKKKKKK K K +K +K K KKKKKKKKKKKKKKKKK K K!K K K K!K K!K!K#K$K#K7KDKEKuKŸKKžKžK™K‘K›K—K£K’KKfKmKxKzKtKkKKžK”KŸK K‘KŸKŸK‘K KžK K‘KŸKŸK K‘K‘K‘K‘KŸKŸK‘K KžKŸKžKžKŸK£K’KŽKjK>K$K#K'K(KKK)KBKJKHKIKJKIKHKIKJKIKHKHKIKKKJKIKIKHKHKIKJKIKHKHKHKIKHKFKHKIKHKHKIKHKHKFKEKFKGKFKFKFKFKFKFKFKGKFKFKGKGKCK:K8K9KKQK‘K’K K₯KˆKK@KK¦K’K’K£K£K£K£K’K’KKVK;KaK£KVK:K@K;KŠK¦K’K‘K©K}K?KHKŽK¦K’K£K‘K¦KwK9K?K>KK9K9K:K?KlK‘K…KOKCKMK¦KΔK½KΐKŒKZKJK_K{K:KKK?KFKCKjK»KΌK»KΎK―KMK7K9K8K8K6K4K5K4K1K3K?KDK?K{K½K·K·KΊKΆK]K>KBK@K?K/K'K)K'K'K&K%K#K6KDKAKK;K9K-K5KCKCKDKAK/K*K,K)KKKKKKKKKKKKKKKKKK K K K K K KKKKKKKKKKKKKKKKK!K!K K K K!K!K K"K$K$K#K#K8KDKFKyK›KžKžKžK›K™K–KxK^K‚K™K–KK{KŠKvKrK•KK˜K’K K K‘K‘K K‘K‘K‘K K‘K‘K‘K‘K‘K‘K K‘K‘K‘K‘K‘K‘K‘K‘K KŸKžK K€K˜KyKMK-K#K+K-KKK0KCKJKFKGKHKKKLKIKHKIKHKHKKKLKIKHKHKIKIKIKIKIKIKIKHKFKHKIKIKIKIKGKFKGKGKFKFKFKFKFKFKFKFKFKFKFKFKFKGKCKDKDK=K:K=K?KBKFKHKFKCKTKpKEK0KfKœKKK?KAKEK–K₯K‘K’K—KFK:KoK¨K’K’K£K£K’K¦KbKK=KqK§K’K’K£K’K’K’K’K’K¦KeKK=K=KK@KBKBK?K7K3K2K4K5K3K2K3K0K.K-K8KEK8K4K5K5KVKœK{KSKHK@K7K9K9K@KXKpKmKPKBKEK‰KΕKΌKΑK¦KyKEKAKdKaK>KHKKΑKΉKΏK©KoKyKK’K₯KͺK‚KBK\KΆK½KΊKΐKžKkKLK?KBK?K?K@KEKBK]KΈKΌKΊKΐKŸKAK>K?K>K:K3K6K=KK@K>K9K8K-K5KCKBKCK?K.K(K*K'KKKKKKKKKKKKKK KKKK K K +K K K KKKKKKKKKKKKKKKKK K K K K!K K!K"K"K#K$K#K#K7KBKMK‘KšKžK KŸK K‘K˜K…KK†KŠKK~KfK€K‰K}K‘KKœK‘K K‘K K K‘K K K K‘K K K‘K‘K‘K‘K‘K K K‘K K K K K K K‘K‘K KŸK K¦K‘K†K\K7K*K'K'K"K"K5KEKHKHKHKJKIKHKHKIKHKIKJKIKHKIKHKGKGKGKGKHKIKHKFKHKIKGKHKIKHKHKGKFKFKFKFKFKFKFKFKFKGKGKGKGKGKGKEKEKFKDKAK>K;KK?KBKBKAK:K1K3K3K3K3K2K3K0K,K-K4KCK;K4K5K7KEKK™KPKDKBK:K:K9K;KGKVKVKUKDKCKgKΐKΎKΏK·K…K]K[KgKˆKcK>KqKΎKΊK½K·KwKcKsK›K¦K©K›KQKHKŸKΑKΊKΌK·KžK`K>KBK?K?KK@K=K8K5K,K7KCKBKBK>K,K(K)K&KKKKKKKKKKKKKK!K KKK K +K K +K KKKKKKKKKKKKKKKKKK!K K!K K K K!K$K"K#K%K#K#K7KBKNK‹K”KœK‘K‘K‘K KK–KŒK—KdKYK‹KKKKK‚KKœK‘K K K‘K K K‘K‘K‘K‘K‘K‘K K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K K‘K‘K K KŸK K£K•KmKXKK9K;KBKAK=K2K2K2K2K2K2K3K0K,K.K.K>K>K4K5K7K9K~K—KFKNKDK;K8K:K7K;KFK9K0KBKEKLK«KΒK½KΎKœK^KjK{KjK…KHKTK³KΎKΊKΐK†KpKbKwK«K¦K§KfK=K}KΑKΊKΌKΌK³KKDKAK?K?KKGKCK^KΆKΊKΉKΊK·KXK2K8K8K6K5K3K2K3K2K1K?KCK?KgKΈK·KΆK΅K»KqK:KCKAK?K5K(K)K)K&K%K$K"K/K?K>K>KaK³K΅KΆK΅KΌK–KKKKKKKKK K K K +K +K KKKK;K8K5K+K8KCKCKDK>K-K*K)K$KKKKKKKKKKKKK!K!K KKK K +K K K K KKKKKKKKKKKKKKK K K K!K K"K#K#K#K$K"K#K$K#K&K8KBKMKK‘KœK K K K‘K’KnKKKrKuKkKvKKtKKxKrK€KœK£K K K K K KŸK K‘K K K‘K‘K K K K K K K K K K K K K‘K’K’K’K’K K K K•KƒKnKrK‡KuKHK&K$K#K#K3KIKLKHKHKIKHKHKIKIKHKHKIKHKGKGKFKFKGKGKGKHKGKGKFKDKIKHKGKGKFKFKFKFKGKFKFKFKFKFKGKFKFKGKFKFKFKFKFKGKEKBKBK>K:K8KK?KAKAK?KBKCK7KKK?K@KDK—K»K΅KΆKΆKΊKDKώKKKKKKKKKK +K K K K +K*KK:K8K4K+K:KCKCKDK>K-K+K)K#KKKKKKKKKKKKK!K!K KKK K +K K K K KKKKKKKKKKKKKKK K K K"K!K#K$K$K$K$K"K#K%K#K&K8KBKLKŒKK›K K K‘KŸK˜K{KdK_KhK]KIKnKƒK|KoK~KšK˜KžK’K£K’K’K£K KžK‘K‘K‘K K K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K’K£KK™K‘K K‘KžK„KƒK…KK–KKIK\K4K/K,K-K;KJKJKHKHKIKHKHKHKHKIKHKFKGKGKGKFKFKGKIKGKGKHKFKIKHKFKFKGKGKGKGKFKFKFKFKFKFKGKEKFKGKFKGKFKFKFKGKDKFKGKEKEK?K8K6K:K=K=KBKAK=K@K>K4K\K©K’K K©K}K5K?K‰K¨K‘K£K£K’K₯K™KJK@K?K;KAK„K§K’K¦K¦K¦K£K£K₯K₯K‘KWK:KcK¦K\K:K?K8K†KͺK€K’K§K†K@KCK‡KͺK€K¦K₯K«K‡KK9K?KCKCK6K.K2K3K3K2K3K2K/K.K,K7KDK7K4K6K6KPK’KŠKJKGK@K9K9KKyKΌK΅KΆK·K·K_K>KBK>K?K.K(K)K'K$K&K%K"K3KAK@K=KoKΊKΆK·K΄KΏK…KKKKKKKKKKK K +K +K +K KK:KK>K;K9K5K*K:KBKBKBK;K+K*K*K#KKKKKKKKKKKKK!K K!KKK K +K K +K K KKKKKKKKKKKKKK!K K!K K"K$K#K#K#K#K$K"K!K!K$K'K8KBKLKŽKtK‘K€KŸK KŸKˆK‘KtKeKkKbKnK…K…KƒKfKK–KK’KzKqKšK•K‡K–K‘KŸK‘K£K K›K£K’K’K’K’K’K’K’K’K’K’K£K’K K’K™K‰K‘K¦K K‘KžK K–KKˆKsK|K‘KqKKKKK(K@KJKIKHKIKIKHKIKIKIKIKIKHKGKFKGKFKFKFKGKGKGKIKHKGKGKFKGKGKGKFKGKFKFKFKGKGKGKGKGKFKFKFKFKFKFKFKGKGKFKGKGKCK;K8K8KK?KK=K9K:K:KK7KCKCKBKBKAK9K=KCKCK:K/K2K3K2K2K3K2K/K.K,K0KBK:K3K5K3K@KtK‡K_KDKBK;K9KK?K?K>K>KDKAKwKΐKΊKΊKΏK…K:K=K=K=K=K=K;K9KK@K>K@K5K(K)K&K%K#K$K#K(KKK9K&K#K&K%K$K&KK KKKKK K K K–KΆK²K΅K΅K³e]r(K‘K‘K‘K‘K‘K‘K‘K K‘K‘K‘K K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K‘K K K₯K{KKK-KDK_KΙKΚKKGK9K7K!K4K?K=K:K9K4K*K>KBKBKBK:K+K*K*K#KKKKKKKKKKKKK!K K"KKK K +K K +K K KKKKKKKKKKKK K K!K!K!K"K"K$K#K#K#K#K$K#KK K%K&K9KCKKKŠKˆK‹K£KœK–KŒKUKXKBKSKbKQKSK|KhK„KˆK“K‡KK“K|KLKyK—KyKpK™KŸKK‘K‘KœK£K’K£K£K£K£K£K£K£K£K£K£K’K‘K KšKŸK•K‰KœK’K£KKKŸKŸKvKqKlKgKKKKKKK,KDKLKHKGKIKHKHKHKHKHKHKGKFKFKFKFKFKFKFKGKIKHKGKGKFKEKDKDKFKFKGKFKFKGKGKGKGKGKFKFKFKFKFKFKFKFKFKFKFKFKFKGKCK=K7K:K=K>KAKAK@K?KJKpK™K¨K—KCK6KmK§K£K₯K¦K€K’K¨KeK:K@K=K8KdK¦K£K¦K₯K¦K¦K¦K¦K€K«KvK;KHKšKƒK;KAK8K\KͺK₯K₯K₯K€KYK=KbK₯K¦K₯K₯K¦K’KTK5K=K=K=K:K9K>KK+KFKCKCKAK@K9K9K@KCK;K1K2K2K2K2K3K2K/K.K-K.K>K=K4K5K5K;KLKyKHK4KEK;K:KK>K=KBKCKUK²K½KΉK»K©KIK;K=KKEK@KKΏKΈKΉKΐK‘K9K8K6K4K5K4K3K1K.K/K4KBKCKAKKΌK΅KΆKΊK¬KRK;K@K>K;K,K'K&K%K$K$K$K K3KAKBK?K~K»K΅KΆK΄KΌKsKKKKKKKKKKKKKK KKK=KKMKvK—KMK1K`K¦K¦K₯K₯K¦K€K©KrK:K@K?K9KSK‘K₯K₯K¦K¦K¦K¦K₯K€K«KˆKK=K=K;K9K;K&K!KCKCKDK@K>K:K9K@KBK?K1K1K3K2K2K3K1K/K.K.K-K:KCK5K4K4K4K^KKBK7KAK@K:K9KKhK»KΉKΉK»K²KOK3K5K5K5K4K3K2K/K0K1K>KDK=KcKΆKΆKΆK΅KΊKuK:K@K=K>K3K%K&K&K&K%K$K K(K@K>K>KVKK·KΆKΆKΈK¦K$KKKKKKKKKKKKK K +K K2KK=K;K9K2K+KBKAKAKBK6K)K+K*K!KKKKKKKKKKKKK#K"K#KKK K K +K +K K KKKKKKKKKKKK K K!K"K"KK K$K#K$K#K$K%K%K&K&K%K&K:KCKJKƒK“KqKnK‹K‰KuKMKmK‚K…K}KŽKsKK’K‰K‚K†K˜K•K|K8KFK{K“K~KiK†K“K“K£K“K”K€K’K’K£K£K£K£K£K£K£K£K£K’K€K›KyK{K”K€K‡KqK}K€KkK•K‡K}KK^KfK#KKKKKKKKK(K?KJKIKGKHKHKHKHKHKGKFKFKFKFKFKFKGKGKGKGKGKFKFKFKGKFKFKGKFKFKFKGKGKGKGKFKFKFKGKFKDKFKGKFKFKFKFKFKGKFKDKDKAKBKDK6K(K/K>KCKDKAK>KRKFK0KSK‘K₯K₯K₯K¦K€K¨K‚K>K@K?K9KHKšK¨K₯K¦K¦K¦K¦K§K¦K¨K–KGK:KsK’KLKKBKBK7K0K1K2K2K1K0K0K-K,K+K6KCK8K4K5K5KHKuKCKIKQK?K9K9KK=KKEK@KoKΎKΈKΈKΐK‘K:K>KK>K=K8K)K&K&K&K%K#K#K#K8KCKBK?K‹K»K΅KΆK΄KΌKbKKKKKKKKKKKKK K KK K>KK1K$K&K(K+KŽK¬K%KK K KKK +K KKŽKΈK΄K΄e]rΥ(K K K K K K’K£K£K£K’K£K£K£K£K£K£K£K£K£K£K’K K‘K£K£K£K£K£K£K£K£K‘K K’K£K£K£K£K’K KdK@K.KIK}KΩKΊKzK=K8K0K"K7K=K=K=K:K1K-K@KBKAKBK5K(K,K*KKKKKKKKKKKKKK$K$K$KK K K K K +K K KKKKKKKKKKKK!K K!K#K#K!K"K$K#K#K#K$K&K&K%K&K%K%K;KCKJKdKƒKhKRKˆK„KKaKOKmKžKKeKxKtKKyK‡KKvK•KvKPKvK‘KwKRKlK|K‰K‹K£KK›K£K£K£K£K’K’K£K£K’K’K’K£K’K£K‘KuKvK‚KlKXKrKˆKMKxK–KqKbKK}KƒK'KKKKKKKKKKK-KEKIKHKGKHKIKIKHKFKFKFKFKFKFKFKFKFKFKFKFKFKFKFKGKFKFKFKFKFKFKFKFKFKFKFKFKGKEKCKEKGKFKFKFKFKFKGKGKDKBK?KDKEK@K9K:K;K=K?KAKBK>K=K:KFK•K­K₯K₯K¦K₯K§K‘KDKAK?K;K>K‹K«K₯K₯K¦K₯K¦K¨K§K¦K’KSK9K`K§K_K9K?K8K‚K­K€K€KͺKKCKAKK«K₯K¨K§K¨K“KCKK8KK=K=KCKDKRKK»KΈK»K°KKK8K>K:K:K:K:K:K9K?KDK@K‚KΌK·KΉKΌK‘K=K5K5K4K5K3K1K3K3K.K2K=KDK>KwK»K΅KΆK·KΈKdKKK7K&K$K'K9K›KΊKiKK KKKKK K KK2KKK‡KΪK΄KpK9K7K-K!K;K=K=K;K7K/K.KBKAKAKBK4K)K)K*KKKKKKKKKKKKKK#K#K$KKK K K K +KKKKKKKKKKKKKK!K!K K#K$K#K$K%K%K%K%K%K%K%K&K%K%K%K:KCKIKdKvKcKtK{KuKcKSKZK†K§KxKOKQK>KXKtKKxKjKxKrKwKˆK‰K|KrK‰K_KwKˆKŸK›KŸK€KŸK’K’K€K₯K’K£K₯K₯K₯K£K£K£K‘KKK’K›K‘KšKšK[KvKuKHK[KK‰K{K)KKKKKKK!K&K-K1K5KBKEKHKIKGKGKIKHKFKGKFKFKFKFKFKGKGKFKFKFKFKFKGKGKGKGKFKFKFKFKFKFKFKFKFKFKGKFKFKFKGKGKGKFKFKFKGKGKFKFKEKFKFKEKEKBKAK=KKAK?KYK‰K¦K©K€K€K§KKLK?K?K;K6KxK«K€K₯K₯K§K§K¨K§K₯K©KbK8KOK€KqK:KAK6KoK¬K¦K§K©KŸKOK=KkK©K₯K¨K§K¨K’KOK:KKAK3K3K5K6KeKžK•KqKDKK=K=K?KEKCKŽKΏKΈKΉKΎKpK5K>K:K:K:K:K:K9K:KAKAK^K³KΉKΉKΉKΊK]K1K6K5K5K4K0K0K0K/K.K9KBK@KSK―KΈK΅K΅K½KŽK;K=KK8K7KAKBK@K3K/K0K/K0K/K.K/K,K*K)K8KDK7K5K5K4KOKKšK‡KPK?K:K9KK>K=K=K>KEKBKkK»KΉKΉKΏK—K=K>K:K:K:K9K:K9K9K@KBKDK›KΎKΈKΈKΐK‡K3K5K5K5K4K0K0K0K0K.K3K@KCKBKKΊK΅KΆKΈK­KQK9K=K=K;K*K&K&K$K$K"K!K!K2K@K?K;KiKΆK΄K΄K³KΊKŒK KKKKKKKKKKKKKKKK6K>KK=KFKiK—K©K­KeK9K@KK‡K—K?KKBKAK4K.K0K/K0K/K/K/K,K)K(K1KCK;K4K4K5KBK‰K•K”KgK?KK\K΄K»K»KΌK΄K‘KTKKEKAKtK»KΆKΉK»K©KFK2K5K5K3K1K3K1K/K0K1K;KBK=KeK΅KΆKΆK³KΊKyK9K=K=K=K1K%K&K$K$K$K#K"K)K?K@K@KIK KΈK΄K΄K΄K³K:KώK +KKKKKKKKKKKK +KK$K=KK=K;K9K8K,K3KBKAKBK?K/K(K)K*KKKKKKKKKKKKK!K#K$K"KKK K +K K KKKKKKKKKKKKK K K"K$K#K$K#K$K%K%K&K%K&K&K$K&K&K%K&K9KAKOKiKiK\KpK„K—KŽK^KkK°KšK{KrKTKmKZK`KJKhK‘KfK|KKfK{KzK’KdKAK…K–K…K‘K€K’KžK₯K¦K₯K¦K¦K¦K¦K¦K¦K¦K¦K¦K₯K₯K€K€K”KoKK—KšKKLKbK–KjKkKQKQKRK[KXKVKZK]K[KYKUKMKLKJKFKEK?KAK@KAKGKGKGKFKFKGKFKFKFKFKFKGKGKGKFKCKDKCKEKGKFKGKGKFKFKFKGKFKFKGKFKFKEKDKEKEKEKEKEKFKEKEKGKFKEKCKDKCKFKFKCKDKEKFKHKEKK?K„K¬K§K§K¦K¬KK=K:K=K;K9K9K:K5KK9KBKAK@K=K:K5K=KDKBK8K,K/K0K/K0K/K/K,K)K(K-K@K=K4K4K4K7K~K‡K\KbKBK=K8K9KK;K;KKGKAKŠKΐKΈKΈKΑKyK7KKyKΈK΄K΄K²KΌK}KKK KKKKKKKKKKK +K KK9KK;K(KvK΄K―K―K·K^KK KKKKK +K KKK=K:K9K6K+K6KCKAK@K>K-K'K+K(KKKKKKKKKKKKK!K#K$K#KKK K +K +K +KKKKKKKKKKKKK!K K"K$K#K#K$K#K#K%K&K%K%K%K&K%K%K%K%K:KBKKK‚K|KFKwK’K’KKœK}KœK‘KgKJKUKpKmKvK…KnKŠKwKhKoK„K†KeK†K{KdKpKzK’K¦K¦K£KžK¦K¦K₯K¦K¦K¦K¦K¦K¦K¦K₯K¦K₯K₯K€K₯K“K‹K‘K›KK K›K KK›K€KPKSKWK]K\K\K]K`K^K\K[KVKUKMKKKJKFKHKGKDKK;K9K:K9K9KK0KDKAK?K>K;K6K:KBKBK>K/K/K0K/K0K/K0K+K)K(K)K9K?K4K5K6K5KhK‰KSKgKLK=K9K:K8KGK'K&KK,KBK?KqKΑKΊK½KΉK«K¨KŸKwK’KnKAKfKΌK»KΊK½KKK KK2KMK;KEKGK?K|KΏKΈKΉKΉK΄K˜KBK;K:K:K9KK?K>KRK¨KΆK΄K²K²K¬K*KKKKKKKKKKKKKK K K,K>K=K=KK?KAK>K=K=K8K7K5K5K‚K¬K§K§K¨K¨K¨K¨K§K¨K­KaK6KRK¦KwK7K?K5KgKK¦K§KͺK€KSK8K`K§K§K¨K©K¨KͺK[K5KKKK…KΈK³K΄K±KΊKiKKKKKKKKKKKKKKKKK:KK=KKAKOK“K„KhKfKKŽKͺKžK’KˆKkK‘K_KaK[K_KmKRK|KK„KVKMKeKK?K?KAK@K=K:K4KpK«K¦K§K¨K¨K¨K¨K§K§KKtK6KCK—K‹K;K>K7KSK¨K©K§K§K¬KhK8KNKŸK©K©K«K§K­KsK4K:K9K:K8K7K:K)KKAKBK?K>K>K:K7K?KBK@K3K.K0K0K.K-K-K.K*K'K(K0K?K7K5K5K4K;KhK†KOK:KDK;K9K:K>K+KKK K;KCKEKKΐKΊK½K³KŸK€K§K₯KžKhK>KŒKΑK·K½K£K"K&K,K'K KK.KDKJK KΎKΊKΌKΉK°KmK7K:K:K:K2K5KDK?K‚KΏKΈKΈKΐK€K5K;K9K7K8K8K8K7K6K@KCKJK’K½KΈK΅KΎK|K0K6K3K2K0K1K/K-K.K,K5K?KCKCKŒKΊK³K΅K΅KKTK;K;K:K9K*K"K$K$K$K!K"K!K,K?K?K>K\K±K΅K΅K΄KΆK’KKKKKKKKKKKKKKKK +K*KK2K2K/K'K$KKKKK#K6KFKEKCKDKDKCKFKGKFKFKFKFKFKGKFKDKDKCKDKDKFKFKDKEKFKFKFKGKFKCKCKCKDKDKDKDKDKDKDKEKCKCKDKCKCKCKCKCKDKCKAKAK@KBKKKMK>K7K9K>K?K?K=K?K=K[K§K¬K§K§K§K§K¨KͺK§K­KˆKK:K6K;K@KCK7K-K.K0K/K/K.K-K+K)K(K+KK7K9K;K-KKKK1K>K?KKΓKΊKΌKΆK€K¦K§KK‚KxKAKmKΌKΉK»KΌKGKK@K(KKKKAKCK€KΐKΉK»K»K°KfK:K=K;K:K8K8KCK?K_KΉKΉK·KΎK₯KDK8K9K8K8K8K8K6K5K;KDK?KKΎK·KΆK»K’K;K1K3K2K/K0K0K.K.K.K.KKgK΄K΄K΅K΅KΊKyK9KK_KvKšKcK‘K¬K‡KtKWK9K'K/K5K+K KKKK+K=KCKDKDKCKFKFKFKGKGKGKGKGKFKDKDKCKDKDKFKFKCKEKFKFKFKFKFKCKCKCKCKCKCKDKDKDKDKEKCKCKDKCKCKCKCKCKCKCKBKAKBKAK?KIKLKCK9K;K=K:K6KK|KK§KͺKͺK¬KšKEK8K:K:K:K9K7K9K;K8KBKBKAK>K:K6K9K?KAK:K-K.K0K/K0K.K-K,K)K(K'K7KBK7K4K5K5KKKuKtK„KRK=KKaKΊKΌK»KΌKK¨K©K–K—KƒKIKRK­KΌKΉKΓKwKK;K=K0K+K*KAKDK`KΈKΌK»K»K΅KKJK;K;K9K8K8K>KCKFK‘KΏKΉKΉKΈKaK4K9K8K8K8K7K7K2K7KEKAK\K΄KΉKΆK·KΊKaK,K4K2K0K0K0K.K.K/K*K5KCKBKGKžKΈK΄KΆK·K‘KDK9K:K:K5K&K$K#K K!K K KK-KBKCK=KiKΆK΄K΅K΅K·KšK!K KKKKKKKKKKKKKK K/K?KK=KKDKdK”K¬K¬K¨K§K©K«K€KPK3KZK«KcK4KK@K>K/K.K0K/K0K/K,K+K(K%K&K0K>K8K4K5K5K?KfKZKhKTK?K=K:K:K8K KK KK8KCKMK₯KΏKΊKΏK³K¦K­K˜K€KŸKaKBK•KΏK·KΏKK,K4K9K?K>K-K/KDKJK‘KΏK»KΎK―KxKZK7K:K:K:K9K=KFK@K}KΐKΉKΈKΏKˆK4K7K8K8K8K5K5K4K6K?KEKEK˜K½K΅KΆK½KŒK1K4K0K0K0K0K/K,K.K-K-K?KCK=KvKΈK²K΄K΄KΉKfK2K;K9K8K.K$K!K!K!K!K!KK#K=KAK@KGKžKΈK΄K΄K΄K΅K`K#KKKKKKKKKKKKKKKK=K=K=K:KiK°KK―K―K―K³KVKK KKKKKe]rΰ(K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K₯K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K¦K₯K§KœKYK:K2KKK„KΧK΄KrK;K8K/K!K8K?K=K;K8K0K/K@K?KBKAK7K)K(K)K#KKKKKKKKKKKKK$K$K&K"KK K K +K K KKKKKKKKKKKK K!K#K#K$K$K%K%K%K&K&K%K&K&K&K%K&K(K(K*K:K?KDKBK9K;KŽK–KXKyK°K₯KœK€K’K§KzK`KoKrKRKYKVKIK‚K˜K©K§K£K£K™K™K’K–K¨K§K¨K¨K¨K¨K§KͺKˆK|K«K¨K“K•K©K§K₯K‰KqKŸK’K©K©K¨K¨K¨K¨K¨K¨K¨K§K§K§K§K§K¦K§K KK€K¨KžK¦K†KzKˆKxKtK¦K¦K¨K«K K€KUK;K+K1K7K)KKK K7KDKDKDKCKEKFKEKFKFKEKFKEKCKEKFKFKEKCKDKCKEKFKDKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKDKCKCKCKCKBKCKBKBKBKAK@KAKBK?KAK=K2K+K0K9K>K=K@KAK=KGKlK™K­K©K§K¨K­K`K2KIK₯KyK4K>K3K`K―K©KͺKͺKͺK[K5KVK₯K¬K¨K‘KŸK©KgK6K>K;K9K9K9K8K8K8K@KBKAK@KK>KAK4K-K0K/K/K/K,K,K*K&K%K-K=K9K4K5K5K:KOKOKWKTK?K=K9K:K8K!KKK$K6K?KBK‡KΑK»K½KΆK«K«K«KŽKKxK?KuK½KΉKΊKΊKCKK KGK>K6K.KAK@K€KΏKΊKΌK·KŽK„K=K8K:K9K9K;KAKAK^K³KΊKΈKΌKͺKBK4K8K7K6K5K5K5K4K;KEKAKrKΌKΆK·KΉK¬KGK/K1K0K/K/K/K,K-K,K)K9KDK?KTKͺK΅K΄K΅KΊK“K;K9K:K:K3K#K!K!K!K K KKK3K?K@KK=K:K6K/K6K?KAKAKAK5K*K+K)K"KKKKKKKKKKKKK$K$K&K"KKK K +K +K KKKKKKKKKKKK!KK K#K$K%K&K&K&K&K&K%K)K)K&K$K)K)K(K*K;K@KFKHKgKHK…KŒK’KpK•K€KlK“K€K’KˆKˆK…KƒK>K=KLKpKKŒK°K₯K¨K€KœKœK K™K₯K¦K¨K§K¨K¦K₯K©K’K}K›K«K’K‰K”KŒK…KœK’K”K˜K£K¦K¨K§K¨K¨K¨K¨K¨K¨K¨K§K§K§K¨K¨K¨K©KͺK‡KK–K…KTKbKRK“K©K¦K€K’K¨K­KͺK’KkKGK3K+K.K/K#KK(K@KGKCKDKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKCKDKCKDKDKDKCKCKCKDKCKAKBKBKBKEKDKCKAKAK@KAKAKBKAKCKAK9K0K1K9K=K@K>K@K>K;KFKqKœK¬K¨KKuK2K>K“KK;K>K8KKK©K«KͺK©KKpK6KFK™KK§K‘K’K₯KsK9K=K9K9K:K7K8K:K9KAKBKAK@K=K:K4K:K@K?K:K-K/K0K-K-K.K.K-K&K%K*K9K=K5K4K4K6KDKBKQKjKDK=K8K9K;K&KKKK)KK`KΆKΊKΉKΌK₯K’KWK5K;K8K8K8K=KDKFK˜KΎKΉKΊK»KgK3K9K5K4K5K5K5K4K8KBKCKQK«KΉKΆK΅K»KnK,K1K0K/K-K/K,K*K+K)K1K?KBK>KŽKΊK³K΄K΅K±KUK6K:K8K7K(K!K!K!K KKKK&K>KAK@KPK¨K³K³K΄K³K±KQK&K+K)K'K#KKK KKKK KKKKK;K9K>K:KoK΄KK―K―K­K―KRKKKK%K'Ke]rβ(K¦K¦K₯K¦K§K§K§K§K§K¨K§K§K§K§K§K§K§K§K§K§K§K§K§K§K§K¨K§K¦K§K§K§K¦K₯K¦K¦K§K¦K©KKSK3K6KNKKΩK©KbK7K6K4K5K9KKMKwKŸK³K‡K3K4K€K£KBK8K:K;K™KKͺK©K°K†K9K:K†K―K«K€KK”K€K=K:K:K:K9K8K9K9K:K;KAKAK?K>KK@KKvK½KΉKΈKΏKŽK4K6K5K5K5K5K5K3K3K?KEK?K‹KΌKΆK΅K»K™K9K1K0K.K.K-K+K+K+K+K+K;KBK>KeKΆK΄K΄K³KΉK~K5K:K8K8K.K!K K K K KKKK7KAKAK>K…K·K±K³K±KΈK…K;KDKLKPKWKiKjKhKGKK K$K(KKKK0KKKBK?K=KQKK’K>K2KlK¬KTK6KK:K9K;K9K9K:K8K;K7KAKAK?K>K:K7K5KK0K-K-K-K.K-K+K*K%K%K$K.K?K7K2K5K4K>K‚KmKVKRK>K:K9K:K8KKKKK1KBKAK’KΑKΊKΌKΉK¬K―K₯K£KK{KKKΏKΊK»KΉK¨K’K=K5K9K8K8K8KBKCKXK°KΊKΈKΌK―KJK3K5K5K5K5K5K2K2K9KCK?KcKΆKΆKΆKΆK΅KUK+K1K/K.K+K*K+K+K+K)K4KAKAKFKŸKΉK΄K΄KΆK€KEK7K9K7K1K$KKKKKK KK-K?K?K>K[K°K³K±K±K²K­K{KqKpKcKPKWKUKQKHKFKNKFK+K1K)KK&K>K=K>K:KvK΄KK°K°K―KKPKK!K1K&Ke]rδ(K§K§K§K§K¨K¨K¨K§K¨K¨K¨K§K§K§K§K§K§K§K§K¨K¨K§K§K§K¨K¨K§K§K§K§K¨K§K§K§K§K¨K¦K«KKKK/KAKQK³KΣKŸKWK6K7K5K4K9KK:K5K4K;K?K?K?K>K/K'K(K(KKKKKKKKKKKKKK&K%K&K!KKK K +K KKKKKKKKKKKK K K!K#K$K%K%K&K%K&K%K&K(K)K)K)K(K)K)K(K*K;K>KNKcKoKK“K’KŸK=K'K0K9KMKXK}KžKKyKoK/KdK‚K}KK‘K«K©K¦K§K§K¨K©K¨K¦K§K¨K§K¨K₯KKK|KjKŸKmK3KK…K‡KˆKKVKeKzKpK‹K¨K©K§K©KͺKͺKͺK¨K§K§K©KͺK¨K§K©KaKQK|K€K+KYK€KŸKmK€K©K£K₯K›K”KKœK¨K©K«K₯K£K¨K­K’K}KVK7K'K$K*K8KCKDKDKCKDKDKDKDKCKEKGKDKCKCKDKCKCKDKCKCKCKCKDKEKBKDKCKAKCKDKBKBKBKBKAKAKAKAKBKDKCKBKBKBKAKAKAKAKBK@K?KAKAKCKAKK?K>K=KK;K7K5KK>KuKΑKΊKΌKΊKK°K¬K₯K°K‘K@K\KΆKΊKΉKΏKwKYK‰K…KSKhK‹KfK?K`KΆK»KΉK»K―K₯KZK1K9K8K5K4KKBKIK KΊKΆK΄K½KK,K1K0K.K,K*K+K)K*K'K,KK/K'K(K'KKKKKKKKKKKKK K&K%K&K!KK K K +K KKKKKKKKKKKKK K"K$K%K&K%K%K%K&K&K&K)K)K(K)K(K(K)K(K*K;K?KKKgKtKaKƒK¬K˜KSKVKjKSKIKSK˜KŠK}K KrK7KVKTKcK“K¨KaKsK«K©K¨K§K§K§K¨K¨K§K§K¨K¨K£KK•KnKKŽK,K\K”KšK£K€KYKyKƒK|KvKŒK£K¨K©K«K«KͺK¨K¨K¨K©K«K¨K¨K¦K—KXKSKrKZKoK‰K{KuK£K£KžKK›KŠK’KpKK₯KKšK’KœK¨K­K―KͺK”KiK@K)K*K7K>KDKDKDKCKDKDKCKDKGKDKCKCKEKCKCKDKCKCKCKCKDKDKCKCKCKAKCKCKAKAKAKAKAKAKAKAKBKCKCKAKAKAKBKBKBKAKAK@K?KAKBKAKAK@KBKKKBK?K?K>K;K6K;K@KBK7K,K-K-K-K-K+K+K+K&K&K%K7K>K1K3K2K6K7KKZK†KGKKoK»KΆK·K½K•K6K5K5K2K3K3K/K0K/K9KEK?K}K½K΅KΆKΊK§K=K+K-K,K+K-K-K)K(K(K(K8KBK?KRK«K΅K΄K³K»K‘KKKKKKK9KsKoKiKUKnKZK?K?K@KKKKCKDKCKCKDKDKDKCKCKCKDKCKCKCKCKCKCKCKCKDKCKCKCKDKBKAK@K@KAKAKAKAKAKAKAKBKBKAKBKBKBKBK@K?K?K@KBK@K>K?K?K>K@KCK@K=K8K6K8K9K8KKK9KKKaK₯K³KŒK9K9K:K9K:K9K7K8K7K;KBK>K?K?K=K6K7K@KBK:K+K,K.K.K-K+K+K,K'K%K%K2K?K5K4K4K3K=KKK`KZKK>K?K>K?KAK?K>K?K;K4K2K5K6KK?K=K@K9K1K4K7K“K―K«KͺKUK+K:K:K5KKKK―KK‘KKGK7K;K9K9K8K7K9K8K7K?K?K@K?KK?K=K.K-K-K-K-K+K+K+K(K%K%K,KKcK΄K΄K΅K²K»KvKKKKKCKyK|KmKoKmKrKsKVK@K>KAK>KsKΆK±K±K±K΄K€KvKwKKKKyKqKhKdKdKDK.K!K.K8K'K2KK,KIKnKΣKΎK„KBK4K5K1K7K=KK?KK>K>K?K>K>K?K>K>K?K>K?K=K7K5K2K5K9KK?K>KK>K?K4K*K*K-K-K+K*K+K(K%K$K&KK?K^KΉKΌK»K»K|K7K'K K9KKKBKHK£K½K·KΏK”KGKdKlKYKYKfKeKCKGK‘K½KΈK»K±KqKDK3K6K4K5K5K9KFK?KiKΌKΈK΅K»KžK/KKKKKK K KK%KEKKAK?KNK¦K΄K±K±K²K²K†KyKKzKzK|KzKqKgKiK]K>K-K&K4K*K(K=K=K=K:KUK¦K­K¬K°K­K²K€K'K,K7e]rι(K§K¨K©K¨K§K©K©K©K©K©K©K©K©K©K©K©K§K©K©K¨K§K§K¨K©K©K©K©K©K©K©K§K¨K§K©K©K©K«K K\K;K.KHKvKΦKΉK}K>K5K6K/K5KK?K:K)K(K)K(KKKKKKKKKKKKK#K&K&K&KKKK K +K KKKKKKKKKKK K!K"K#K%K%K%K&K$K#K'K)K'K)K)K(K)K(K&K)K*K.KK?KAK>K=K>K?K>K?K?K>K?K?K>K>K>K?K@K>K9K4K3K7K:KK=KK;K9K8K6K9K3K;KAK@K?KKJK₯KΎKΊKΐK‰KFK>K*K7KTKLK>K…KΐKΈK½K―KLKGKQKOKVKvKpKHK>K‚KΏKΈKΊKΌKlK9K1K,K)K'K#K K6KBKOK¨KΌK΅KΆKΉK;KK K KKKKKKK?KAKQK«KΉKΆKΆKΈK—KqKRKKKKKKKKKK=KEK>KvKΈK³K΄K²KΊK_KKKKJKjKqKyK}KqKfKsKoKHK9KBKBK>K‚KΆK±K²K±K΅KžKvK€KwKnKnKfKVKWKQKTKOK>K=KK=K>KK?K9K*K(K)K)KKKKKKKKKKKKK#K&K&K&KKK K K +K KKKKKKKKKKK!K K K"K&K&K%K(K"K K'K)K)K(K(K(K(K)K,K+K*K.KK[K©K¦K€KͺK—K‚K K¬K˜KPKRKOK'KdK7KKcKSK„KŸK›K§K“KrKtKhK‹K–KK’K«KͺK¬K‚K…K•K}K„KKZK`KbKrKaKjKŒK~KcKˆKkKcKxKoK›KK‘K­KͺK«K«K«KͺK«KͺKͺKͺK¬K¬KͺK©K²KeK9KvK–K“K‘K‘KhK„K§K1KqK±K©KͺKͺK©KœKK­K¬K¬KͺK¬K­K­K­K­K¬KͺKͺK­K›K#KKKKKK%K?KFKDKCKDKCKCKCKCKCKCKDKCKDKCKAKBKDKBKAKCKCKBKBKBKBKBKBKBKBKBKBKBKBKAKBKBKBKBK@K?K?K>K>K>K?K=K?K?K>K?K>K>K>K?K?KKK?K=K9K5K8K?K?K5K(K+K*K*K+K*K(K)K$K&K$K.K@K5K1K3K1KKJKNK?KgKΉKΉKΉKΏKvKKK8K3KNKMK>KCK@K^K·KΊKΆKΒKƒKKKKKK K K#KFKAK†KΌK΅KΆKΒKrKKKK K K K K K K1KCK>KˆK½K΅KΆKΉK§KlKAKK KKKKKK K K-KAK?KSK«K΅K΄K³KΊK“KKK,KUKnKlKgKpK{KkKmKvKUK4KBKCK?KZK­K±K²K±K²K°KzKlKeKUKTKSKCK:K8KCK5K9KBK;K&KKK:K;K=K:K\K©K¬K―K°K―K΄K‚KK?K7K'K(K)K&KKKKKKKKKKKKK%K&K%K&KKKK K +K KKKKKKKKKKK!K!K#K$K&K%K%K&K%K(K)K(K)K*K*K*K(K)K*K+K*K-K=K>K[K«KͺK«KͺK«KK©K¨K­K‰K]KFKK+KK+KK_K…KvK–K«K’KŒKœK•KŠKhK~K€K«K«K¬K€K¦KŸKxKƒKgKUKYKXKnKCK`K“KyK^KoKfKbKKKvK­K›K¦KͺK«K¬K¦KͺK¬K«K¬KͺKͺK«K«KͺK¬KŸKZKTK}KKK¨K KZKKwKAK«K¬K¬K¬K¬K­K₯KœK¬K­K­K¬K¬K­K¬K¬K­K¬K¬K¬K­K¦K2KKKKKKKK/KDKDKCKCKDKCKCKCKCKCKCKDKCKCKCKBKCKCKBKBKCKCKBKBKBKBKBKBKBKBK@K?KBKBKBKBKAKBKBKAKAKAK@K>K>K?K>K>K>K>K?K>K=K>K?K@K=K7K3K1K6K6K/K8K>K:K=K>K KkK‰K1K5K„K²K«K¨KKͺK–KDK>K?KK@K:K&K)K+K+K)K)K*K(K"K%K%K)K>K6K1K3K2K:K6KKQKrK?K;K9K9KKcK·KΆK·KΆKΆKvK(KKKKKK K K K KKAKBK?KKΊK΄K΄K΅K΅KCKK4KYK]KTKQKVK[KbKDKQKCK$K8KAK?K?KŽKΆK°K²K±K΄K”KbK\K]KKKIKSKKKIK>K.KK@K6K&K(K)K%KKKKKKKKKKKKK&K&K%K&KKKK K +K KKKKKKKKKKK!K!K#K$K%K%K&K&K&K)K(K(K*K+K+K*K(K)K+K+K*K-K=K>K\K¬K«K«KͺKͺK«K©K£K«KYK]KK"KKK,KwKdK„KgKK©K˜K„KžK¨K°K–K|K‰K«K§K©KšKtKoKsK9K6KUKCK(KEKDKCKtKtKƒKTKYK‰KEKgK˜KK­K­K«K¨K£KͺK­K­K£K§KKͺKͺK¬K§KžKjK>K|K¦K©K«K€K\K”KOKvK³KͺK­K­K­K¬K«K™K€K«K«K­K¬K¬K¬K¬K­K­K­K¬K¬KK@KKKKKKKKKK8KDKEKCKDKCKCKCKCKCKCKCKDKCKAKCKDKBKBKDKCKBKBKBKBKBKBKBKBK@K>KAKAKAKAKAKAKAKBKBKBKAK?K?K>K?K?K>K>K?K>K=K>K?K>K>KK=K:K6K4K>K?K=K*K(K+K*K(K(K'K&K"K$K&K$K9KKdK΅K΄K΄K²KΉKxK K%K@KFKBKGKNKKKfKUKK>K@K4K%K(K)K%KKKKKKKKKKKKK%K%K%K'KKKK K K KKKKKKKKKKK K K"K%K#K%K&K(K(K(K)K(K*K+K+K*K(K)K+K+K*K.KKAKBKAKBKAKAKAKAKAKBKBKBK@K>K>K>K>K>K>K>K>K>K>K?K>K>K>K=K=K4K.K3K8K9KK?KKDK—K΄K±K±K―KΆKrKKKKKKK3K8K*K KKK"K1KK%K8K:KK>K@K3K&K)K(K$KKKKKKKKKKKKK&K&K&K&KKKK K K KKKKKKKKKKK K!K#K$K#K%K&K(K)K(K)K(K*K+K*K*K(K)K+K+K+K.K=K=K_K­K«K―KœKXKK>K>K>K>K?K>K>K>K?K?K>K=K>K>K=K9K6K5K8K:KK?KK4K'K(K#K'K%K K*KBK7K1K2K2KKKKK)KBK;K=KKrKΐKΉKΎK­K KK KKKK>KOK­KΊK·KΑK}K K K K +K KKKCKIK’KΌK·KΉKͺK'K +K K K KK KK>KAK]K·KΈK΅KΉK¨K KK K KKKK KKKDK>K_K΅KΆK΅K³KΌK[KK +KKKKKKK KKK:K@K=KxK·K²K²K±KΈKkKAKOK'K!KEKOK3K K-K1K$KKK0K@K?KK8KdK―K―KK―KK―KŽe]rο(K«KͺKͺKͺKͺKͺKͺKͺKͺKͺKͺKͺKͺKͺKͺKͺKͺKͺKͺK«K«KͺKͺK«KͺKͺKͺKͺK«KͺKͺKͺK«KͺKͺKͺK­KvKIK+KBKUKΊKΞKœKTK4K6K#K'K9K9K9K7K4K*K.K@K>K>K@K2K'K*K&K"KKKKKKKKKKKKK(K)K(K&KKK K K K KKKKKKKKKKK!K!K$K#K#K%K%K(K)K(K)K(K*K+K)K(K(K)K*K-K.K1K?K=K_K­K«KK KWKKKYK«K«KŽK^K’K¬K†K(K_KaK{KiKnK•K~K”K‰K~KK―K©KͺKŽKjK’K§K KwK‘K¨K„KKK4K?KcK§K’K€K§K–K‰KŒKVK_KŽK|KqK_K}KuKzKˆK‡KšK`KiKtK]K|KlKNKbK†KŒKbKbKqKUKK‹KuKxK|KK—K­K¬K­K­KK«K«K¦K₯K©KK¬K¨K¨K«K­K¬K¬K¬K«KΆKnK KKK(K,KK%K+K2K9K>KBK?K:K>KBKDKCKDKCKCKCKCKCKDKDKBKAKAKAKAKAKAKAKAKAKAKBK@K?K>K>K@KBKAKAKAKBKBKBKAK>K@KBK?K?K?K>K>K>K>K>K>K>K>K>K?K?K>K>K?K?KK>K;K9K5K2K=K?K;K%K&K(K;K>K.K$K#K'K*KKK;K9K0K1K1KKKKKK?K?K=KK=K>K?K;K=K=K=KCK–K±K«K¬K¬KK§e]rπ(KͺK«K¬K¬K¬K¬K¬K¬K¬K¬K¬K¬K¬K¬K¬K¬K¬K¬K¬K«K«K¬K«KͺK«K¬K¬K¬K«K«K¬K«KͺK«K«KͺK¬KlKEK)KDK[KΕKΘK—KNK4K6K!K)K:K;K:K7K5K+K/K@K>K?K@K3K&K)K(K"KKKKKKKKKKKKK'K(K(K&KKK K K K KKKKKKKKKKK K!K#K$K$K&K'K(K)K(K)K(K)K)K*K)K(K)K+K+K+K/K=KKFKLKLKJKHKGKHKCK:K7K@KEKCKCKCKCKCKCKCKCKBKAKBKAKAKAKAKBKAK@K@KBK?K=K@K@KAKAKAKAKBK@K@KBKAK@K@K@K?K>K?K>K>K>K>K>K>K>K>K>K>K>K>K>K>K?K?K?KK?K=K9K4K2K=K?K>K)K"K'K7KK0K0K2K#KKKKK7K@KK2KKKKKK2KHKKΐK·KΒKwKK +KKKK&K@KnKΎKΈKΈKΌKFKK K KKKK1KAK`K΅K·K΄KΎK„K K KKK K KKKFK@K|K»K΅K΅KΎK†KKKKKKKKKK'KFK=KsKΊK΄K΄K΅K΄K6KKKKKKKKK KKKAK?K;K‰KΊK²K³K±K³KRKK*K,K0K2K4K5K5K4K5K6K7K6KK:KiKK¬K­K¬K¬K°e]rρ(KͺK«K­K­K­K­K­K­K­K­K­K­K­K­K­K­K­K­K­K«K«K­K¬KͺK¬K­K­K¬KͺK«K­K«KͺK¬K¬K«K©KgKAK)KFKdKΟKΕKŽKIK3K5KK+K;K=K;K6K5K*K1K@K>K?K@K1K'K)K)K"KKKKKKKKKKKKK'K(K(K'KKK K K K KKKKKKKKKKK K!K#K$K&K(K)K(K(K)K(K(K(K(K*K*K(K)K+K*K*K-KK_KVKŠKŽKXKMKtK}K¬KKΆKcKuK·K©K˜KKoKeK“KŒKKjK(K[K[KtK|KzKlKƒKkKwKuK„KlKwKsK\K)KKKYKYKzKcK[KHK‹KKNKxK’KK„KoKhK}KQK\K]KOKvK”KxKK‡K­K±K¬K­K­K―K°K°K°K―K°K°K­KK°K°K―K³KKXKPKLKJKOKPKPKMKOKNKOKNKJKIKKKHKBKKBKEKCKCKCKDKDKDKBKAKAKAKAKAKAKAKAKBKBKBK@K>KAKBKBKBKBKBKBK?K>KBKBKBK@K>K>K>K>K>K>K>K>K>K>K>K?K?K?K>K>K>K>K>K>K>K?K=K:K6K1K4K7K7K7K;K;K:KQK…K°K[K5K;K9K8K5K4K7K'KK?K>K?K?K:K6K4K;K?K>K3K%K$K/K?K@K7KKKK(KK$K?K3K0K0K*KKKKK-K@K;K=K?KKKΑKΉKΎK KKKKKKK?KTK―KΌKΆKΏKvKK K +KKKKKDKIK‘KΊK΅KΉKͺK#KKKKKKK +K;KBKYK²K·K΅KΈKͺK'KKKKKKKKKK@K?KRK«KΆK΄K²K½KoKK +K +K K KKKKKK%K:K@KK?K@K.K&K)K)K!KKKKKKKKKKKKK'K&K&K%KK K K K KKKKKKKKKKK!K!K!K#K$K%K'K)K)K(K'K*K*K*K*K*K*K*K*K+K+K*K0K?K>KcKK¬K¬K¬KKŸK…KŽK\KKUKLKxK{K³KKK KoK¬K«K˜K‰K‚KŒK‘KK­K±K‘KWK’K¬K’KšKžKŸKœK£KKgKFKVKvK‡K]KIK\KqK[KjK_KLKyKmKPK=KjKVK>KCK†KZK.KK‘K£K«K K­KͺKƒK(KEKPK:KUK#KXK}KsK}KŽK–K™KšK«K¨K§K±K―K―K―K°K°K°K°K°K°K°K―K³KˆKJKMKPKTKUKWKTKQKQKOKQKQKKKIKKKHKEKHKFK@K9K;KCKEKCKBKBKBKBKAKBKBKAKAKAKBKBKAKAKBKAKAK?K?K?K?K?KAKBKAKAK?K@KBKAKAK?K?KAK@K>K?K>K>K>K?K>K=K>K?K>K>K?K?K?K>K?KK;K7K4K7K?K>K8K)K"K(K=K@K>K1KKK K&K-K;K6K0K2K0KKKKKK@K:K>KEKSK1KKKKKK;KbKΈKΊKΊKΉK6KK KKK +K9KCK–KΏKΆK½K‘KKK K +K KKK?K?K€K½K΄K΅K½KOKK +KKKKKK)KEKCK˜KΌK΅K΅KΌKUKKKKKKKKK K3KEKAKŒKΊK³K³K·K€K-KK!K!K#K&K&K'K(K)K+K3K?K@KGKšK΅K±K²K³K«KMK6K:K9K;K=KK?K?K?K?K?K>K‰K³K―K²K±K΄KŸK}KtK_K'K-K'KKKKKKKKKKK+K;K9KK?K@K.K%K)K)K!KKKKKKKKKKKK K'K%K&K$KK K K +K KKKKKKKKKKK"K!K!K#K$K%K'K)K(K)K(K(K+K*K+K*K*K*K*K)K+K*K0K@K>KcK―K­K­K­K¬K°K‘KTKcKsK`K.KUKK³K—K2KK‹K³K¬K­K―KͺK“KzKqKžK­K¦K‚K§K­KK’KK―K›KzKDK>KIKrK K„KKKGKmKgKnKVKNKRKVKQK:KK^K9K(KgK?KIK§KnK“K°KK¬K¨K–KLK0K5K?KYK+KDKIKjK–K’K—K“K†K K©KͺK°K°K°K°K°K°K°K°K°K°K°K―K΄K”KTKTKTKSKUKXKTKSKTKOKQKPKMKLKOKLKJKJKDKEK=K0K3K>KCKBKAKBKBKAKAKAKAKAKAKAKAKAKAKAKBKBK?K>K?K?K>K@KAKBKAK>K?KBKAKBK?K?KAKAK>K?K>K>K>K?K>KK?K>K>K>K>K>K>K>K=K=K>K=K5K*K(K0K2K2K4K6K-K,KgK9K9K9K7K6K4K7K7KK0KDK?K>K=K:K4K6K?K@K;K1K.K/K9K?K>K?K/KKKK*K=K9K1K2K4KKKKKK;KCKYKhK|KkK KKKK K5KLK§KΎKΈKΒKfKK +KKKK+KAKuKΎKΉKΉKΈK=KK K K KK K2KAK_KΆKΆK΅KΎK…K K +K KKKKKKBK>KuK»K΅K΅KΎKKKKKKKKKKK*KCK>KdKΆK΄K΄K΄K·K^K"K*K)K)K*K)K)K,K,K.K3K>K@K>KtKΆK±K²K°K·KyK4K9K9K:KK?K=K-K&K(K)K!KKKKKKKKKKKK K'K%K'K%KK K K K K KKKKKKKKKK"K!K!K#K#K&K(K)K&K'K)K'K)K*K+K+K*K)K(K"K*K*K0K?K;KeK²KK°K­K¬K«K‘KxK]K1KiKQKcKhKŒKšK0K(K‘K―K¬K©KK†K˜KTKeKwK€K KK―K§K±K›K_K}K£KsK.K$KCKmK†K₯K…KWK5K-KSKŠKjKgK?K6K5K^K—KKaK;KyKOKrK―KšK˜K¬K©K§KͺK€K_K>K-K6KLKMK=KLKgKšK‚K€KKKKK±K―K°K°K°K°K°K°K°K°K°K°K―K²KKWKUKVKUKUKXKWKVKVKQKPKSKPKRKPKKKIKEKAKK@KBKBKBK?K?KAKAKAKAKAKAKAKAKAKBK@K>KAKBKAKAKAKBKBK?K>K?K?K>K>K>K?K?K?K?K>K?K>K?K>K=K=K=K=K=K>K?K=K>K>K9K/K&K)K/K1K0K2KK,KK>K>K;K5K4K;K=K;K4K5K/K.K9K?K>K?K)KKKK6K?K1K/K3K%KKKKK0KHKzKK–K‘K*KKKKK+KCKŒKΑK·KΑK–K KKKKKKAKWK³K»K·KΑKoKK +KKK KKKCKHK‘KΊK΅KΉK«K$KK K +K K +K KK9KAKTK―KΈKΆKΈK±KKIKŸKΈK΄K΄KΉKŽK-K+K.K-K.K,K,K/K0K3K1K8K?KKBK’K΄K―K°K±K―K΅K¦K@K(K3K2K0K4K4K2K0K+KK K#KKK/K=KK?K=K,K'K(K(K KKKKKKKKKKKK K'K&K'K$KK K K K K KKKKKKKKKK!K!K!K$K$K%K(K)K'K$K&K*K(K*K+K*K)K)K)K(K+K+K0K?K;KeK²KK°KK­K―K’KmK;K%K;KlKzKhK„KœK,K2K’K―K―K¨KJKlK{KeKdKiK•K«K―KK©K―K€K:KCKŒKoKKNKhKuKžKKgKMKKKBKWKjK_KKiKbKUKVKšK‘KuKNK|KŠK₯K€K±K©K΄K“KK™KJKOKEK4K2KBKKŒKaK7KeK]K“KK‘KƒK§K±K°K°K°K°K°K°K°K°K°K°K°K°K²K’KWKRKVKWKVKXKWKSKPKLKPKQKQKVK]K`KGK+K*K,K)K"KKKKK5KAKBKAK?K@KBKAKBK@K?KAKAKAKAKAKBKBKBKAKBK@K?KAKBKBKAKAKAKAK?K>K>K>K>K>K?K>K>K?K?K>K?K>K>K>K=K=K=K=K=K>K>K=K=K=K:K;K8K,K%K*K-K0KK K:K:K:KK>K>K;K7K3K7KK>K>K(KKK+K?K3K.K/K,K +KKKK%KAK†KͺK¬K½K^KύKKKKKAKkKΎKΈK»K΄K,KK KKK K:KDKœKΎK·KΏK›KKKKKKKK=K?KKΌK΅K΄K½KZKKKKKKKK0KFKAK“KΌK΅K΄KΎKnK K%K$K$K"K#K&K'K)K9KDK?K}K»K΄K΄K΅K―KHK(K.K-K-K/K0K0K1K3K1K8K?K?K>K„KΆK±K±K±K²K»KΊKΊK»KΊK­K†KUKCKDKDKEKDKDK@KK?K>K+K(K)K(KKKKKKKKKKKKK"K)K)K)K%KKKK K K KKKKKKKKKKK K!K&K&K%K'K(K(K#K$K(K(K*K+K*K*K+K+K+K-K-K1K?KK?K>K>K>K>K>K>K>K>K?K?K?K?K?K=KKK@K=KK>K>K;K9K4K5K>K>K?K:K2K1K)K0K>K>K?K?K#KK K>K9K.K/K1KKKKKK;KyKΌKΈKΑKKKK KK K9KRK­K»K·KΑKYKώKKKKK-K@KyKΎKΈKΈK·KKoKΉK΄K³KΉKœK0K%K(K(K%K&K)K'K$K/KDK>KWK―KΆK΄K³K»KtK*K0K/K/K2K3K2K2K2K2K7K>K@KK>K;K+K'K'K'KKKKKKKKKKKKK$K)K(K(K#KKK K K K KKKKKKKKKK K K"K%K$K%K(K(K'K(K'K(K*K*K+K,K,K*K+K*K-K-K1K?K=KfK³K―K―K²K«KK”K—K\KVKhK~KiK±KKŠK!K]KK―K¨K‘KtKYK@KeK|KMKK°K²K±K­K­KZK"K#KiKFKCKZKcK“K­KœKyKnKfKKKRKdKYKdKsKrKdK/KiK›K‚K’K―KK°KK«K¦KKlKXKmKbKMKK5KRKnKzKK‡KlKOKSK˜K“KvKŽK§K²K­K±K²K²K²K²K²K²K²K²K²K±K°K­K_KQK^KgKvK…K‘KžK¨K±K΄K΅K΅K³K±K²K΄K·K±K—KnKDK+K%K+K(K"KKK2KAKBK@K@KBKBKAKAKAKAKAKAKAKAKAKAKAKBKBK@K@KBKAK@K?K>K?K>K>K>K>K>K>K@K@K?K?K?K?K?K=KK=KKK>K;K9K3K1K>K@K>K@K9K0K0K,K1KKKBKAK’K»K³K³KΉKK9K0K3K2K3K4K3K4K3K4K8KKBKAK>K>K>K>K>K>K>K>K?K?K>K>K>K>K>K>K?K=KK8K&K K4K8K9K;K9K5K4K5KK'KAK>K>K;K8K5K1KK>K@KK8KK*K?K0K-K.K&KKKKK.KNK©K½K·KΏKNKKK K K)K;KtKΐKΈKΉK¬K3KK KKK$K>KHKŸKΌK·KΏKœK(K#K$K$K$K#K)KAK>KKΌK΅K΅K»KgKK'K&K%K%K%K$K6KEK;KŠK»K³K³K½K{K&K*K)K+K+K(K8KK^K7KCK=KlKΈK΄K΄K΄K΄K[K+K3K3K4K5K5K5K4K6K8K8KK?K;KOK£K±K―K―KK²K_K'K/K0K8KHKDK/K1K"K3KKKPKSKSKRKQK@K8K:KK?K>K?K>K=K=K=K>K?K>K>K?K=KK?K=K:K7K3K8KK?K=K>K3K6K2K+K6K?K>K@K4K$K>K4K-K.K-KKK +K K%KAKKΏKΆKΓKKKKKK&K;KXK·K»KΈKΎK]KK%K!K!K#K:K?KK½K·KΊK·KIK"K)K(K%K%K(KKLK€K΅K³K³KΊK‡K.K/K3K4K4K4K5K7K7K6K?KGKAK?KOK§K³K±K±K±K²K΅K΅K΅K΅K―KKyKRKHKHKJKKKLKKKCK>K?K=KƒK΄K­K°K­K±K‘K6K1K5KK=K?K?K7K(K(K&K%KKKKKKKKKKKKK$K'K(K)K KKK K +K KKKKKKKKKKKK K!K$K$K%K'K)K)K(K(K)K+K*K*K*K*K+K*K+K-K-K1K?K:KiKΆK‘KKuKwKŒKNK K$K)KRKK“K§K“KXK&KƒK³K―K―KK£K—KlKbKxKfKfK~K‘K²K±K°KK°KŸK”K’K K‹KˆK€KŒK|K’K«K‘K˜KK–K†KlKDK'K.KKLK™KQK9K„K—KzK K³K¬K—K}KAK—K‡KWKQKEKpK^KoK†K K΄K±K²K³K³K΄K¨K K©K¦K΄K±K²K²K²K²K²K²K²K²K²K²K²K±K±K±K±K±K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K±K±K±K²KΆKΈK°K˜KpKDK,KKK%K5KCKCKBKAK?KAKAK?K>KAK@K>K>K>KAKAK>K?KBK@KBK@K>K?K>K>K=KK>K>K>K>K=K=K=K=K=K=KK=KK;K9K4K6K=K>K?K=K9K7K0K>K2K+K7K>K>K@K4K4K8K-K-K,KKKKK"K;KpKΏK·KΎK¦K)KK#KK!K:KFKK½K·KΑK…K$K(K%K$K#K2K@K^K΅KΉKΈKΏKrK!K+K)K'K(K'K6KDKIK’KΈK³KΆK°KEK&K)K)K)K'K&K&K;K?KKK¦K·K΄K³KΉK_K%K,K+K*K)K0KXK@K+K=KBK>K‚K·K²K΄K΅KͺKDK.K3K5K4K5K5K8K8K5KFKcKAKCK>K†KΆK―K²K±K²K΅K΄K΄K³K²K¨KKcKJKHKKKLKKKMKHKK>K?K4K'K(K&K$KKKKKKKKKKKKK(K)K(K*K!KKK K KKKKKKKKKKK!K KK#K&K%K%K'K)K'K)K+K+K+K+K*K*K+K*K+K.K,K*K0K@KK>K?K?K>K=K=K?K?K?K?K?K?K?K?K?K?K?K>K?K>KK?K>K>KK=K>K?K>K>K?K=KK?K>K9K4K5K;K>K?K8K1K:K.K=K;K0K,K6K>K>K;K6K2K-K)K+K'KKKKK9KWK΅K½K»K»KKKK%K"K!K3K>KKΐKΈKΌK§K2K#K'K#K'K,K?KIK€K»K΅K½KšK.K)K+K*K+K)K-KCKBKKΌK΅KΆKΌKhK&K+K(K)K(K'K%K6KCK>K†K»K³K³KΉK‡K*K,K+K*K+K*K>K5K+K5KAK@K_K²K΅K΄K³KΉKnK-K3K5K5K7K7K7K:K8KNKKRK?KK=K>K@KK³K«KK―K³K…K2K;KQKBK*K>KMKLKNKPKNKTKRKQKMK8K,K7K:KK>K?K3K'K(K'K$KKKKKKKKKKKKK(K)K(K*K KKK K KKKKKKKKKKK K!K K"K%K&K&K(K(K&K+K*K*K*K*K+K*K+K*K,K.K,K*K0K@KK=K=K>K>K?K>K>K?K>K>K>K>K>K>K>K>KK?K?K>K=K=K?K>K=K>K?K?K?K>K=KK>K:K6K2K9K?K>K9K3K=K1K:K?KK@KaKΆK·K΅K½K‘K.K*K)K)K)K)K'K.K@K=KaK΅K³K΄KΆK©K?K(K,K+K+K+K.K.K(K/K?K@KEKšKΈK³K΄KΉK–K6K2K4K5K7K8K8K9KKK>K?K2K(K(K(K#KKKKKKKKKKKKK(K)K(K)KKKK +K K KKKKKKKKKKK!K!K!K#K&K)K)K(K)K(K(K(K(K*K+K*K+K*K,K.K.K-K1K?KK?K>KK?K>K>K?K>KK>K>KK?K>K>K?K?K?K>K=KK=K=K9K1K8K>KK>K@K2K(K(K%K KKKKKKKKKKKKK)K(K(K)KKKK K K KKKKKKKKKK K!K"K#K$K&K'K(K)K(K)K(K)K*K*K+K*K+K*K,K.K-K-K1K?K;KlKΈKcK%KCKK'K5KRK₯K«K©K›K›K¬K₯K KK˜K΄K³K±K°K₯K₯KΈK€K\KnK’KŠK„KuKCKK―K±K΅K’K’K‘K‡KfKjK`K€K¨K¬KΆK·KtKWKyKƒK‡K\K5K(K'KdK—KbK›K³KiKZKyK)KK@K(KUKoKvKŸK²K―K―K€K²K³K³K³K³K³K³K³K³K΄K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΄K΄K³K³K·K»K―K“KaK=K:K>K?K>K>KK?K>K>K?K=KK=K=K=K>K?K>K>K?K>K=K=K=K=K=K;K;K=KK5K$K6K7K8K6K6K4K7K+KK8K>K>K=KK>K?K7K0K?K>K?K;K.K1K;K7K2K=K0K#K!KK'K+KK%K9KbKΌKΉKΌK΄K>K!K'K&K%K8K>K‰KΏKΈKΎK K3K)K*K*K(K.K@KPKͺKΊK΄K½K”K,K*K,K*K+K*K0KDKBKƒKΈK±K³K»KjK'K,K+K*K)K)K(K3KBK>KK»K³K³KΉK“K.K,K,K+K)K'K+K-K,K3K@K>KVK¬K΄K²K²KΉK}K1K5K5K7K8K9K8KBKwK£K±KgK>K@KOK§K³K±K±K±K³K²K°K²K³K²K‘K€KXKLKNKJKLKTKPKAK:K=K;KoK°KK¬K­K―K KLKEKIKDKHKGKAKCKIKGK=K8K-KKKKK0K9K9KK>K>K/K'K(K%K KKKKKKKKKKKKK)K(K(K*KKKK K K KKKKKKKKKK!K K#K$K&K%K%K'K)K(K(K(K)K+K*K*K*K*K*K+K(K-K-K1K>K:KoK΅K¨K„KJKEK[KfKmK‹KͺK§K₯KaK“KͺKK+KoK‰KKΈKœK}KKžKqK0KDKŒKNKxK„KUK—K²K’K«K¦K¨KšK–KK©K˜K‰K­K·K•K]K5KkKzKtK~KWKAKK>K>K?K=K;K?K>K>K>KK>K?K>KK;K"K0K8K7K5K4K4K5K3KK2K@K>K=KK>K@K:K-K=K?K>K@KKK?KŒKΎK΄KΈK­KCK(K-K)K*K+K,K=K@KcK΅K΅K΄KΌK“K-K+K+K,K)K)K(K,K?K?K\K³K΅K΄KΆK°KGK&K+K*K+K+K-K.K,K.KK:KCK@K8K2K*KKKKKK*K:K9K;K9e]r(K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K°K·K‡KNK.K:KJK™KΧKKjK6K5K/KK3K:K9K7K4K1K1K9K>K?K?K=K,K'K(K%K KKKKKKKKKKKKK)K(K(K)KKKK K KKKKKKKKKKK K!K#K$K%K&K(K(K(K(K+K(K(K+K*K*K*K*K+K(K'K/K-K3K@K=KcK±K₯K‰KqKŠKK˜K“K•KŸK–K…KUKzKjK KIKLKyK›KΈK KfKŒK‘K{KzK{KbKVK˜K—KŽK‡K«KŽK˜KͺK΄K·KΆKΆKΆKK’K¬K΅K£KeKKKLKCK9KrKcKNK1K!K2KvKœK‰KKšK“K!KZKvKBK1KGKSKKK™K΄KK₯K΄K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅KΆK΅K΅KΆKΆK΅K΅KΆK΅K΅K΅K΅K΅K΅K΅KΆKΆKΆK³K΅K³K³K΅K‘K©KK@K:K@KAKAK?K>K?K>KK>K>K>K>K?K>K=K>K?K?K>KK>K>K=KK>K=K:K4K1K;K=K>K>K>K?K0K9K@K>K?K@K8K,K0K3K?K>K>K(KKKK+K*K3KAKŽKΐKΈKΑK‡K"K'K&K$K,K?KSK―KΌK·KΏKpK$K+K+K*K(K8K>KmKΌK΅K΅KΌKjK%K*K)K*K+K)K5KCKHK KΉK³K΅K―KGK'K+K+K*K(K)K+K:KBKDK›K»K΄K³KΊKqK$K,K*K+K,K-K.K.K,K6KBK=KfK³K±K±K°K·KdK-K4K5K4K9K;KWKK¬K°KͺKUK?K>K\K―K²K±K²K±K²K±K²K°K±KK™KyKUKOKMKQKWKLKDK=KK:KyK²K«KKK°K™KMKFKFK=K0K4K9K,K KKKKKKK.KK1K8K9K9e]r(K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K²K±KΆK}KHK*K=KMK€KK§KdK3K5K*KK5K:K:K8K4K1K2K:K>K@K?K=K,K'K(K%K KKKKKKKKKKKKK)K(K(K(KKKK +K KKKKKKKKKKK K!K#K$K%K'K)K(K)K(K%K(K*K*K*K*K*K+K*K+K+K.K-K4K@K:K]K²KK K’K~K‘K£K™K¨K³KͺKƒKSK6KKK]K~K©K K±K°KxKrK¦K˜K…KKBKvK•K“K–KKšK·K₯K«KΆK΄K΄K΄K΄K³K³K°K³KΈK²KŽKnKMKZKKŒK‘KWK/K'K@KXKWK…K˜K]KKTKqKNKCKPKnKFK~KΆKžKŽKΉK΅K΄K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K΅K·K΅K΅K·K·K΅K΅K·K΅K΅K΅K΅K΅K΅K΅KΆK·KΆK³K΄K΄K―K”K‰KΆKΊK9KK4KBKBK?K>K?K>KK?K>K>K?K?K?K=KK?K?K>KK?K?K>K=KK>K?K,K)K7K8K8K8K5K5K7K KK@K>K?K>K:K7K0K9K=K>K?K>K?K3K6KAK?K?K?K>K3K)K6K?K>K?K;KKKKK-K5K:KoKΐKΊKΌK€K5K$K&K%K&K;KCK—KΎK·K½K“K-K'K+K*K*K2K?KQK­KΉK΄K½KK.K,K,K+K+K)K0KCK?K„K»K³K³K»KlK%K,K+K*K)K(K)K2KBKKK and released as version 0.9 on April 9, 2004. +The wrapper was slightly modified by Joonas Paalasmaa for the 3.0 version +in March 2012. + +License of L-BFGS-B (Fortran code) +================================== + +The version included here (in lbfgsb.f) is 3.0 (released April 25, 2011). It was +written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal . It +carries the following condition for use: + + """ + This software is freely available, but we expect that all publications + describing work using this software, or all commercial products using it, + quote at least one of the references given below. This software is released + under the BSD License. + + References + * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound + Constrained Optimization, (1995), SIAM Journal on Scientific and + Statistical Computing, 16, 5, pp. 1190-1208. + * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (1997), + ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560. + * J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B, + FORTRAN routines for large scale bound constrained optimization (2011), + ACM Transactions on Mathematical Software, 38, 1. + """ + +The Python wrapper +================== + +This code uses F2PY (http://cens.ioc.ee/projects/f2py2e/) to generate +the wrapper around the Fortran code. + +The Python code and wrapper are copyrighted 2004 by David M. Cooke +. + +Installation +============ + +Make sure you have F2PY, scipy_distutils, and a BLAS library that +scipy_distutils can find. Then, + +$ python setup.py build +$ python setup.py install + +and you're done. + +Example usage +============= + +An example of the usage is given at the bottom of the lbfgsb.py file. +Run it with 'python lbfgsb.py'. + +License for the Python wrapper +============================== + +Copyright (c) 2004 David M. Cooke + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/voice_bridge/scipy/optimize/minpack2.pyd b/voice_bridge/scipy/optimize/minpack2.pyd new file mode 100644 index 0000000000000000000000000000000000000000..621ed0c5a768e1e0deb960a00cf981284c3b08a5 Binary files /dev/null and b/voice_bridge/scipy/optimize/minpack2.pyd differ diff --git a/voice_bridge/scipy/optimize/moduleTNC.pyd b/voice_bridge/scipy/optimize/moduleTNC.pyd new file mode 100644 index 0000000000000000000000000000000000000000..a3fcd11af29057779bdeae1f77f4241809c855ce Binary files /dev/null and b/voice_bridge/scipy/optimize/moduleTNC.pyd differ diff --git a/voice_bridge/scipy/signal/_max_len_seq_inner.pyd b/voice_bridge/scipy/signal/_max_len_seq_inner.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f565362e5fdbd83ecfefea6f4d0cf08633b02eb3 Binary files /dev/null and b/voice_bridge/scipy/signal/_max_len_seq_inner.pyd differ diff --git a/voice_bridge/scipy/signal/_peak_finding_utils.pyd b/voice_bridge/scipy/signal/_peak_finding_utils.pyd new file mode 100644 index 0000000000000000000000000000000000000000..8d2078f1c05a1e13ef4ea1177ef9fc5e963af719 Binary files /dev/null and b/voice_bridge/scipy/signal/_peak_finding_utils.pyd differ diff --git a/voice_bridge/scipy/signal/_sosfilt.pyd b/voice_bridge/scipy/signal/_sosfilt.pyd new file mode 100644 index 0000000000000000000000000000000000000000..b2faab0f7d699965d291b298a9f3490a96437490 Binary files /dev/null and b/voice_bridge/scipy/signal/_sosfilt.pyd differ diff --git a/voice_bridge/scipy/signal/_spectral.pyd b/voice_bridge/scipy/signal/_spectral.pyd new file mode 100644 index 0000000000000000000000000000000000000000..71b6dfc015d998abb98089d0888718912a2c4712 Binary files /dev/null and b/voice_bridge/scipy/signal/_spectral.pyd differ diff --git a/voice_bridge/scipy/signal/_upfirdn_apply.pyd b/voice_bridge/scipy/signal/_upfirdn_apply.pyd new file mode 100644 index 0000000000000000000000000000000000000000..8bf1ab18a735d129cec2dcca0b113a2447737ca8 Binary files /dev/null and b/voice_bridge/scipy/signal/_upfirdn_apply.pyd differ diff --git a/voice_bridge/scipy/signal/sigtools.pyd b/voice_bridge/scipy/signal/sigtools.pyd new file mode 100644 index 0000000000000000000000000000000000000000..7ff8bb0a505c785e1bac357568ad61567f67ea00 Binary files /dev/null and b/voice_bridge/scipy/signal/sigtools.pyd differ diff --git a/voice_bridge/scipy/signal/spline.pyd b/voice_bridge/scipy/signal/spline.pyd new file mode 100644 index 0000000000000000000000000000000000000000..d7787f946e2dad07b43554fcdbdf5af242b891b0 Binary files /dev/null and b/voice_bridge/scipy/signal/spline.pyd differ diff --git a/voice_bridge/scipy/sparse/_csparsetools.pyd b/voice_bridge/scipy/sparse/_csparsetools.pyd new file mode 100644 index 0000000000000000000000000000000000000000..d08577af322a17d5a1e03ae6eb7f3c5e07a2f722 Binary files /dev/null and b/voice_bridge/scipy/sparse/_csparsetools.pyd differ diff --git a/voice_bridge/scipy/sparse/_sparsetools.pyd b/voice_bridge/scipy/sparse/_sparsetools.pyd new file mode 100644 index 0000000000000000000000000000000000000000..208e55dcd1813bf1de40bc8a4c6d2bd0918f16c7 --- /dev/null +++ b/voice_bridge/scipy/sparse/_sparsetools.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc92c708669c8be048bfd22a7c11a3774bcc2d7836a3c9e29a2c7905cc2567d5 +size 2460672 diff --git a/voice_bridge/scipy/sparse/csgraph/_flow.pyd b/voice_bridge/scipy/sparse/csgraph/_flow.pyd new file mode 100644 index 0000000000000000000000000000000000000000..737948d723e24e47f435de94d33413db821407a5 Binary files /dev/null and b/voice_bridge/scipy/sparse/csgraph/_flow.pyd differ diff --git a/voice_bridge/scipy/sparse/csgraph/_matching.pyd b/voice_bridge/scipy/sparse/csgraph/_matching.pyd new file mode 100644 index 0000000000000000000000000000000000000000..dab85064caedf22d4177d9fe4a30be98edf400d2 Binary files /dev/null and b/voice_bridge/scipy/sparse/csgraph/_matching.pyd differ diff --git a/voice_bridge/scipy/sparse/csgraph/_min_spanning_tree.pyd b/voice_bridge/scipy/sparse/csgraph/_min_spanning_tree.pyd new file mode 100644 index 0000000000000000000000000000000000000000..bb649308b8b9534bd7cc4356bd9babd8d82ad0ba Binary files /dev/null and b/voice_bridge/scipy/sparse/csgraph/_min_spanning_tree.pyd differ diff --git a/voice_bridge/scipy/sparse/csgraph/_reordering.pyd b/voice_bridge/scipy/sparse/csgraph/_reordering.pyd new file mode 100644 index 0000000000000000000000000000000000000000..8a06c69a48cf31ec4479cd2ac1a7243ad142c8b9 Binary files /dev/null and b/voice_bridge/scipy/sparse/csgraph/_reordering.pyd differ diff --git a/voice_bridge/scipy/sparse/csgraph/_shortest_path.pyd b/voice_bridge/scipy/sparse/csgraph/_shortest_path.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f80036449e8e391208ab19d37ec112af206600a4 Binary files /dev/null and b/voice_bridge/scipy/sparse/csgraph/_shortest_path.pyd differ diff --git a/voice_bridge/scipy/sparse/csgraph/_tools.pyd b/voice_bridge/scipy/sparse/csgraph/_tools.pyd new file mode 100644 index 0000000000000000000000000000000000000000..c8aabc845376835a0be52260d76c5715f9060b4d Binary files /dev/null and b/voice_bridge/scipy/sparse/csgraph/_tools.pyd differ diff --git a/voice_bridge/scipy/sparse/csgraph/_traversal.pyd b/voice_bridge/scipy/sparse/csgraph/_traversal.pyd new file mode 100644 index 0000000000000000000000000000000000000000..5d1b10b324100da2f294ad9f9e099605c4c004ca Binary files /dev/null and b/voice_bridge/scipy/sparse/csgraph/_traversal.pyd differ diff --git a/voice_bridge/scipy/sparse/linalg/dsolve/SuperLU/License.txt b/voice_bridge/scipy/sparse/linalg/dsolve/SuperLU/License.txt new file mode 100644 index 0000000000000000000000000000000000000000..e003503202d1b65f35debc580dd28d5abdc7602b --- /dev/null +++ b/voice_bridge/scipy/sparse/linalg/dsolve/SuperLU/License.txt @@ -0,0 +1,29 @@ +Copyright (c) 2003, The Regents of the University of California, through +Lawrence Berkeley National Laboratory (subject to receipt of any required +approvals from U.S. Dept. of Energy) + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +(1) Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +(2) Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. +(3) Neither the name of Lawrence Berkeley National Laboratory, U.S. Dept. of +Energy nor the names of its contributors may be used to endorse or promote +products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/voice_bridge/scipy/sparse/linalg/dsolve/_superlu.pyd b/voice_bridge/scipy/sparse/linalg/dsolve/_superlu.pyd new file mode 100644 index 0000000000000000000000000000000000000000..9ffb5213c3eb8eabe4a194a285fd2199aefc8559 Binary files /dev/null and b/voice_bridge/scipy/sparse/linalg/dsolve/_superlu.pyd differ diff --git a/voice_bridge/scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING b/voice_bridge/scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..e87667e1b8c178e53c6a7c6268ebc09ab4b0476c --- /dev/null +++ b/voice_bridge/scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING @@ -0,0 +1,45 @@ + +BSD Software License + +Pertains to ARPACK and P_ARPACK + +Copyright (c) 1996-2008 Rice University. +Developed by D.C. Sorensen, R.B. Lehoucq, C. Yang, and K. Maschhoff. +All rights reserved. + +Arpack has been renamed to arpack-ng. + +Copyright (c) 2001-2011 - Scilab Enterprises +Updated by Allan Cornet, Sylvestre Ledru. + +Copyright (c) 2010 - Jordi GutiΓ©rrez Hermoso (Octave patch) + +Copyright (c) 2007 - SΓ©bastien Fabbro (gentoo patch) + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + +- Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/voice_bridge/scipy/sparse/linalg/eigen/arpack/_arpack.pyd b/voice_bridge/scipy/sparse/linalg/eigen/arpack/_arpack.pyd new file mode 100644 index 0000000000000000000000000000000000000000..357bd5640432e667d63e0cf7d983ba76dc16913e Binary files /dev/null and b/voice_bridge/scipy/sparse/linalg/eigen/arpack/_arpack.pyd differ diff --git a/voice_bridge/scipy/sparse/linalg/isolve/_iterative.pyd b/voice_bridge/scipy/sparse/linalg/isolve/_iterative.pyd new file mode 100644 index 0000000000000000000000000000000000000000..93b1d625a1bbe51b59df657ac8faf12c4792bfc2 Binary files /dev/null and b/voice_bridge/scipy/sparse/linalg/isolve/_iterative.pyd differ diff --git a/voice_bridge/scipy/sparse/tests/data/csc_py2.npz b/voice_bridge/scipy/sparse/tests/data/csc_py2.npz new file mode 100644 index 0000000000000000000000000000000000000000..d4459ff2786fabe4bcf4653d880cbf0afd4bfdcf --- /dev/null +++ b/voice_bridge/scipy/sparse/tests/data/csc_py2.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bac27f1a3eb1fdd102dae39b7dd61ce83e82f096388e344e14285071984d01fa +size 846 diff --git a/voice_bridge/scipy/sparse/tests/data/csc_py3.npz b/voice_bridge/scipy/sparse/tests/data/csc_py3.npz new file mode 100644 index 0000000000000000000000000000000000000000..e40a38584bc4647621601075d946ce46a8e065dc --- /dev/null +++ b/voice_bridge/scipy/sparse/tests/data/csc_py3.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b1b84315c7077417e720512d086a5a6217c2875b818d27704ae9b7237c69dfe +size 851 diff --git a/voice_bridge/scipy/spatial/_distance_pybind.pyd b/voice_bridge/scipy/spatial/_distance_pybind.pyd new file mode 100644 index 0000000000000000000000000000000000000000..5e3b5d1c3ed628bdcf5d246c95ed4c521f0f5eb8 Binary files /dev/null and b/voice_bridge/scipy/spatial/_distance_pybind.pyd differ diff --git a/voice_bridge/scipy/spatial/_distance_wrap.pyd b/voice_bridge/scipy/spatial/_distance_wrap.pyd new file mode 100644 index 0000000000000000000000000000000000000000..f21212cfddb7d7bffbba766a11b5d9e32e729bfb Binary files /dev/null and b/voice_bridge/scipy/spatial/_distance_wrap.pyd differ diff --git a/voice_bridge/scipy/spatial/_hausdorff.pyd b/voice_bridge/scipy/spatial/_hausdorff.pyd new file mode 100644 index 0000000000000000000000000000000000000000..de0c14005bd76d03b9f8493ccd8c85c4149c184f Binary files /dev/null and b/voice_bridge/scipy/spatial/_hausdorff.pyd differ diff --git a/voice_bridge/scipy/spatial/_voronoi.pyd b/voice_bridge/scipy/spatial/_voronoi.pyd new file mode 100644 index 0000000000000000000000000000000000000000..cb1e066505dd1f94d58f4571172aff45c510bce0 Binary files /dev/null and b/voice_bridge/scipy/spatial/_voronoi.pyd differ diff --git a/voice_bridge/scipy/spatial/ckdtree.pyd b/voice_bridge/scipy/spatial/ckdtree.pyd new file mode 100644 index 0000000000000000000000000000000000000000..1e005c72e20470a107e9b39f7e507045c8aaa155 Binary files /dev/null and b/voice_bridge/scipy/spatial/ckdtree.pyd differ diff --git a/voice_bridge/scipy/spatial/qhull.pyd b/voice_bridge/scipy/spatial/qhull.pyd new file mode 100644 index 0000000000000000000000000000000000000000..28a662736cc985fd7ef487390ef42d6404967d72 Binary files /dev/null and b/voice_bridge/scipy/spatial/qhull.pyd differ diff --git a/voice_bridge/scipy/spatial/qhull_src/COPYING.txt b/voice_bridge/scipy/spatial/qhull_src/COPYING.txt new file mode 100644 index 0000000000000000000000000000000000000000..4ac02a07f45d562410025f05305c31d1ec39a28c --- /dev/null +++ b/voice_bridge/scipy/spatial/qhull_src/COPYING.txt @@ -0,0 +1,38 @@ + Qhull, Copyright (c) 1993-2019 + + C.B. Barber + Arlington, MA + + and + + The National Science and Technology Research Center for + Computation and Visualization of Geometric Structures + (The Geometry Center) + University of Minnesota + + email: qhull@qhull.org + +This software includes Qhull from C.B. Barber and The Geometry Center. +Qhull is copyrighted as noted above. Qhull is free software and may +be obtained via http from www.qhull.org. It may be freely copied, modified, +and redistributed under the following conditions: + +1. All copyright notices must remain intact in all files. + +2. A copy of this text file must be distributed along with any copies + of Qhull that you redistribute; this includes copies that you have + modified, or copies of programs or other software products that + include Qhull. + +3. If you modify Qhull, you must include a notice giving the + name of the person performing the modification, the date of + modification, and the reason for such modification. + +4. When distributing modified versions of Qhull, or other software + products that include Qhull, you must provide notice that the original + source code may be obtained as noted above. + +5. There is no warranty or other guarantee of fitness for Qhull, it is + provided solely "as is". Bug reports or fixes may be sent to + qhull_bug@qhull.org; the authors may or may not act on them as + they desire. diff --git a/voice_bridge/scipy/spatial/tests/data/cdist-X1.txt b/voice_bridge/scipy/spatial/tests/data/cdist-X1.txt new file mode 100644 index 0000000000000000000000000000000000000000..833d5bdf2a344f585c5f34faa3e22716b1aa363c --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/cdist-X1.txt @@ -0,0 +1,10 @@ +1.147593763490969421e-01 8.926156143344999849e-01 1.437758624645746330e-02 1.803435962879929022e-02 5.533046214065578949e-01 5.554315640747428118e-01 4.497546637814608950e-02 4.438089247948049376e-01 7.984582810220538507e-01 2.752880789161644692e-01 1.344667112315823809e-01 9.230479561452992199e-01 6.040471462941819913e-01 3.797251652770228247e-01 4.316042735592399149e-01 5.312356915348823705e-01 4.348143005129563310e-01 3.111531488508799681e-01 9.531194313908697424e-04 8.212995023500069269e-02 6.689953269869852726e-01 9.914864535288493430e-01 8.037556036341153565e-01 +9.608925123801395074e-01 2.974451233678974127e-01 9.001110330654185088e-01 5.824163330415995654e-01 7.308574928293812834e-01 2.276154562412870952e-01 7.306791076039623745e-01 8.677244866905511333e-01 9.160806456176984192e-01 6.157216959991280714e-01 5.149053524695440531e-01 3.056427344890983999e-01 9.790557366933895223e-01 4.484995861076724877e-01 4.776550391081165747e-01 7.210436977670631187e-01 9.136399501661039979e-01 4.260275733550000776e-02 5.943900041968954717e-01 3.864571606342745991e-01 9.442027665110838131e-01 4.779949058608601309e-02 6.107551944250865228e-01 +3.297286578103622023e-01 5.980207401936733502e-01 3.673301293561567205e-01 2.585830520887681949e-01 4.660558746104259686e-01 6.083795956610364986e-01 4.535206368070313632e-01 6.873989778785424276e-01 5.130152688495458468e-01 7.665877846542720198e-01 3.444402973525138023e-01 3.583658123644906102e-02 7.924818220986856732e-01 8.746685720522412444e-01 3.010105569182431884e-01 6.012239357385538163e-01 6.233737362204671006e-01 4.830438698668915176e-01 2.317286885842551047e-02 7.585989958123050547e-01 7.108257632278830451e-01 1.551024884178199281e-01 2.665485998155288083e-01 +2.456278068903017253e-02 4.148739837711815648e-01 1.986372227934196655e-01 6.920408530298168825e-01 1.003067576685774398e-01 7.421560456480125190e-01 1.808453980608998313e-01 4.251297882537475870e-01 6.773002683522370004e-01 4.084108792570182445e-01 7.462888013191590897e-01 8.069930220529277776e-01 9.211110587681808903e-01 4.141491046181076108e-01 7.486318689260342829e-01 9.515405507589296263e-01 4.634288892577109742e-03 8.027593488166355762e-01 3.010346805217798405e-01 8.663248877242523127e-01 2.479968181181605447e-01 5.619851096054278017e-01 3.903886764590250857e-01 +7.122019976035700584e-01 6.188878051047785878e-01 7.290897087051201320e-01 6.334802157757637442e-01 5.523084734954342156e-01 5.614937129563645213e-01 2.496741051791574462e-01 5.972227939599233926e-01 1.786590597761109622e-01 2.609525984850900038e-01 7.210438943286010538e-01 2.211429064605652250e-01 9.140497572472672250e-02 1.430242193668443962e-01 7.856446942916397447e-01 4.635256358156553125e-01 5.278744289813760426e-01 3.702808015407184072e-01 5.527073830480792038e-01 6.370732917599846168e-01 9.953487928925482953e-01 3.021789770611936765e-01 3.354901923998221402e-02 +6.509638560895427695e-01 8.387598220902757751e-01 7.761375971745763103e-01 1.481627639227802717e-01 3.529474982902305324e-01 4.883093646287851586e-01 9.652923033658690199e-01 9.500680513565308294e-01 3.061885005078281985e-01 7.271902818906019750e-01 2.358962978196710303e-03 7.359889703223099211e-01 8.988893768074724955e-01 4.135279653937307121e-02 8.516441856688283796e-01 4.889597623270667270e-01 5.575909822114655245e-01 9.010853652261575641e-01 2.912844516556202246e-01 9.088759383368658629e-01 8.104351227460024898e-01 8.080695436776826890e-01 1.430530913253185155e-01 +8.048001196608134400e-01 3.066089444418462762e-02 9.021887554292090661e-01 6.154331491807940591e-02 1.378912575206647784e-02 5.775720193142440673e-01 1.219298963069791464e-01 1.883270243412101808e-01 5.569262398688379356e-02 8.964817777510125651e-02 7.977092785346929782e-01 4.878149375226197293e-01 4.511973131518809410e-02 1.858690046801604323e-01 6.947686471083162063e-01 5.884058794291086025e-01 8.638884676612634816e-01 3.855470871341656336e-01 3.495049047300468059e-01 2.767740932353948136e-01 4.731087031714035218e-01 6.679001673437914288e-01 7.502944200696660682e-01 +6.527328264244687261e-01 8.289483383553154505e-01 9.179741348282299818e-01 1.065639864466713105e-01 6.253616929058514184e-01 5.927750325266062381e-01 3.039157425463192563e-01 2.452766763359194302e-01 6.514027700704632107e-01 5.529218485487964463e-01 4.941158239308394151e-01 6.605306467722642516e-01 2.273688037050677346e-01 4.282616592244774534e-01 2.956128257930247250e-01 1.154803628237965896e-01 9.228220410235263849e-01 6.663525307676617659e-01 1.908852615936970087e-01 9.921383408926374159e-01 4.988716450388516188e-01 1.014900352736023414e-01 3.363930180244284474e-01 +2.914369076275757919e-01 5.196673601143533272e-01 7.420144907858341465e-01 1.768984185504740569e-01 5.296766993228564369e-01 5.922023566159900776e-01 5.965161262020234334e-01 3.810272333046110793e-01 8.368797246118340194e-01 7.896422363801189892e-01 9.655797561098209414e-01 4.430034032346981121e-01 2.780869795706976122e-01 3.047310845416009162e-01 8.051138863500326703e-01 6.731468634690835895e-01 4.743383036815584930e-01 9.530709614322225853e-01 7.753587619850917934e-01 2.801137109357491051e-01 6.182543660889736614e-01 5.005218857766725593e-01 9.071447804755052857e-01 +2.075071644012620453e-01 4.834950086973934802e-01 3.037011473860764532e-01 6.476084284887700937e-01 8.107195771564194020e-01 7.869075869075803364e-01 6.851234019375299633e-01 3.544187468104398331e-02 4.847673235908021017e-01 5.690262846164507726e-01 1.663354142616256803e-01 9.692796809752548537e-01 4.133441725866372485e-01 6.729167604487583665e-01 3.998813427407297283e-01 8.272617414104491695e-01 2.129248316324727774e-01 6.517004761357130249e-01 7.363013506605019520e-01 4.072375306356985636e-01 4.463336683526665238e-01 5.485059309728204102e-01 1.981745754527846071e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/cdist-X2.txt b/voice_bridge/scipy/spatial/tests/data/cdist-X2.txt new file mode 100644 index 0000000000000000000000000000000000000000..fc3ea19674ee36856446c75df98b8c17c53ca51f --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/cdist-X2.txt @@ -0,0 +1,20 @@ +7.680465556300619667e-02 4.675022344069014180e-01 8.955498989131543963e-01 3.816236071436276411e-01 1.109030077070989329e-01 2.318928815459808668e-02 7.477394240984251983e-01 1.202289789304434864e-01 8.007290497575981769e-01 6.795195698871731027e-01 6.568225762396605605e-01 2.231475263228478445e-01 7.064624077661341151e-02 1.081656666815267176e-02 1.592069359090128033e-01 1.363392203645097389e-01 9.277020735447568667e-01 8.103136564528209407e-01 5.229467676276455812e-02 7.708020259874025504e-01 6.527954747473352359e-02 5.516397414886525796e-01 3.653371861367954443e-01 +8.144399106025798085e-01 7.731852525462976633e-01 6.909477620673205589e-01 9.696063817000286633e-01 4.297887511677249694e-01 6.989600553425188156e-01 7.310201335033380543e-01 3.135256147868910048e-01 5.715578037275241829e-01 3.935000744675094531e-01 2.057715781268398825e-01 5.892508589665171881e-01 8.512951599236765476e-01 9.569808799061578775e-01 6.164885878024699561e-01 4.714185430004367294e-01 6.128831737628155363e-01 6.641799309623502845e-01 6.001985185338730711e-01 4.231922889723856995e-01 7.605249308075449077e-01 1.064530958018087281e-01 6.306470691957204444e-01 +4.265470127256254518e-01 5.933766716280767239e-01 3.698589270536845053e-02 2.173799740537294412e-01 3.032679325475639009e-01 4.271831790058847611e-01 1.828944535901013690e-01 4.772333422710156592e-01 2.564773455194128138e-01 7.120329875362141347e-01 8.952243430110462530e-01 1.808777012183288013e-01 3.612151871458374464e-01 3.960999167923041631e-01 1.821669970670747318e-02 8.835474857189200559e-01 1.353104648821573663e-01 3.457291739160937016e-01 1.126467375304566199e-01 4.107293162402323450e-01 4.051719311053743056e-01 4.007382985250427243e-01 1.286905671428811848e-01 +2.910657003883979632e-01 9.616259180685315933e-03 2.033032441536681834e-01 1.096599110293863255e-01 4.191101704605176836e-01 5.462131536027151624e-01 8.393047907010142694e-01 9.046805198676335369e-01 7.009863472176891541e-01 2.508215985039629059e-01 6.754410796667598138e-01 6.740895474032024826e-01 1.358993708621679675e-01 8.219861775211464439e-01 6.322220445623235596e-01 2.766813559002430090e-01 6.575983861590951607e-01 9.515869708336625044e-01 8.654526462353933081e-01 3.450245117834797037e-01 5.649032890631299209e-01 4.717687914789682191e-01 3.296483580510030098e-01 +9.172477457635394016e-01 3.057396583041891436e-01 7.335332344225760082e-01 8.370236206345178509e-01 3.765464253115927695e-01 5.089680319287778199e-01 1.202325719268168003e-01 9.717771065272349240e-01 5.907820104019682050e-01 9.809211614977710880e-01 9.064285003671219698e-01 8.848841466121748489e-01 2.043407730734815297e-01 9.157600394927275511e-01 4.532260315147775831e-01 4.241077335005828397e-01 1.751730149568804240e-01 4.090412146081819911e-01 3.632197861847064058e-02 5.832539334970230360e-01 4.041848151536805434e-01 3.603643989086504629e-01 1.838411383882069261e-01 +2.508806403290032572e-01 4.381403985282813496e-01 4.694787405018008286e-02 6.353900562024634713e-01 1.200813444244532846e-01 6.072397042913001419e-01 9.937255904754030977e-01 4.916670237677555066e-01 3.473845913923001572e-01 3.526875922864345370e-01 5.448595548197197047e-01 2.245096010156972799e-01 9.003258279804994269e-01 3.534560469735994470e-01 2.989266066346342177e-01 4.621024982808636938e-01 9.626538866576676012e-01 9.791401720716153001e-01 7.138514287330390840e-01 9.832862333928654719e-01 3.233999591031431198e-01 5.406467224926423398e-01 9.581890295057201579e-01 +5.210583601680578436e-01 4.598159993059653949e-01 2.111497132057748027e-01 5.949977700916546652e-01 6.342618461422359077e-01 9.888228769705599275e-01 6.096770711536318998e-01 7.548431368960863974e-01 7.490858664860100546e-01 3.186213496546415058e-01 7.895687083231245351e-01 4.178326793268141159e-01 8.095818334534051752e-01 7.886271673523481684e-01 4.038905626506847923e-01 3.652649247094948981e-01 8.267205959224892542e-01 6.433617243328785262e-01 3.117681563249452559e-01 9.675995575054980868e-01 3.675673836358472890e-01 5.863757289184046151e-01 9.099029857959717305e-02 +4.024573981231733821e-01 3.578997554002771864e-01 3.519299868071553705e-01 7.417747693762357653e-01 2.963713903285800644e-01 9.602967989298948348e-01 3.811392331739601458e-01 5.493237898295448840e-01 6.835113342793640578e-01 2.304506220807415184e-01 3.727299857731285471e-01 5.450263991912108752e-01 6.951521210987908761e-01 6.474582745861203747e-01 6.316089475403589004e-01 5.672043967425510758e-02 9.034937506977609445e-01 2.332567550780038079e-01 1.096955741449157085e-02 8.870663813493575578e-01 4.384385452180562526e-01 7.100898998169548060e-01 3.245358176196319056e-01 +9.162009194452818139e-01 5.572224742426723498e-02 3.445910686865658601e-01 9.683564008127462097e-01 9.375063149031520604e-01 9.128188852869822956e-02 9.613605414326487075e-01 5.298598697556915482e-01 6.724799695520149445e-01 1.269103938571825019e-02 1.008406153387807480e-01 8.951105272379104028e-01 1.585460318853607609e-01 6.739986455059543413e-01 5.345419321702655768e-01 6.248843899572337213e-01 3.050288488994817859e-01 1.423645553465189284e-01 1.802121190541096096e-01 9.474646822694763326e-01 2.345716438587298613e-01 9.688281784764296578e-01 1.845165243240991515e-01 +2.548297646910531178e-01 2.580877375379494465e-01 1.355482532666937301e-01 6.478812986505504412e-01 9.971695982152032345e-01 2.606721082477282403e-01 5.483439686378906996e-01 4.409612606704470528e-01 4.396442074915688503e-01 7.414262832597111608e-01 7.308840725375539416e-01 8.072095530497225280e-02 6.829509968656330976e-01 5.700030854230387911e-01 3.801845336730320657e-01 2.481059916867158766e-01 3.977295094395927322e-03 5.749480512407895150e-01 4.112033136603401307e-01 8.676159710377848722e-01 9.062646588480167686e-01 3.326691167317923359e-01 8.498307982774666591e-01 +4.464338109330643345e-01 8.546516760817471914e-01 7.384800352329814466e-01 3.692485164984804502e-02 2.915662689505471583e-02 9.010049994217171898e-01 8.622900253010918892e-01 9.786230638032608065e-01 6.546824077297251909e-01 6.342297560006789903e-01 2.230339826582647955e-01 7.658846744185553446e-01 4.603043831539479491e-01 2.017100469861691225e-01 4.891590639893540482e-01 1.937140918314912419e-01 8.161582138652878626e-01 5.597293607114051106e-02 8.423261093326828153e-02 5.105392204475533990e-02 8.234193902673621057e-01 1.784268309975372002e-01 9.118997881986501408e-02 +8.588746913421980711e-01 1.479641118621310980e-02 1.375875301146138874e-01 7.533888774725254756e-01 5.782592791549248101e-01 9.128573037619659436e-01 1.831275762880391067e-01 3.471382864827737835e-01 4.859524740929310749e-02 8.955146541561730400e-01 4.787220791101074457e-01 4.222803577759057791e-01 8.469923964908064873e-01 6.300290047587608910e-02 1.020873237837905956e-01 3.585612487182909813e-02 6.320107119904569970e-01 5.891245970008752719e-01 1.104698053665007507e-01 4.233226558073774903e-01 4.432217054386708988e-01 2.864765416628194394e-01 2.489777211814803159e-02 +5.343810659756068615e-01 4.829076396403546578e-01 8.364480888953172988e-01 8.931374995414760321e-01 6.034161442354715188e-01 3.578336000768178593e-03 4.100579775972763574e-01 3.968667908067096128e-01 5.897163653686778861e-01 3.003241263928478899e-01 2.520935203143799264e-01 3.112129371563532310e-02 9.052865295974613646e-01 1.172285124002711010e-01 4.840001666149388315e-01 3.424620676348436588e-01 5.526057133826853818e-01 6.346139530261846184e-01 5.747945930485597321e-01 1.389915612177697879e-01 2.413801217666421417e-01 7.829900796662081497e-01 7.213528084845653998e-01 +9.384509283406079483e-01 6.303019601671526750e-01 1.787921522728125323e-01 1.556003868047917127e-02 5.662397078816850948e-01 3.437473614806091371e-01 8.615844972800188462e-01 7.624380237306396246e-01 1.096468347898514883e-01 1.276566836610887323e-01 8.479188493443535757e-01 3.634713454428405432e-01 7.478112314318967613e-01 9.856395696968375253e-01 6.250293654177319080e-02 1.919327272501809567e-01 1.415594476031050153e-01 7.224057351041784925e-01 8.452145259310355208e-01 5.434318833772002755e-01 5.177620959731277228e-02 3.358977598185840518e-01 2.542654881527960375e-01 +4.800909104006243489e-01 3.651345393613150137e-01 3.657093052788148446e-01 8.579662326651369408e-01 5.787694361240260932e-01 6.491966196891312268e-01 3.252508517294879775e-01 8.639694334693422961e-01 3.028097078756678551e-01 6.295814666338699350e-01 7.305627351548695803e-01 6.975931849120264872e-03 8.321205159004851915e-01 2.681809305821257761e-01 3.628869474597150591e-01 9.598981434716586936e-01 5.947913523332928332e-01 7.794864238003402779e-01 2.819511239444029149e-01 5.134200958476284882e-01 7.284684743064278045e-01 3.099571109539331903e-01 1.502222882866774967e-01 +2.463382654375219083e-01 4.465700737264240994e-01 7.180855317941433613e-01 5.056099420785193921e-01 6.182117344332578313e-01 2.370453793561340117e-01 9.831748018047525850e-01 6.397098184531551102e-01 8.260469782208745837e-02 7.474671691560941245e-01 9.963429983418570224e-02 5.450078811081275898e-01 5.370188678062637333e-02 2.774024442708808991e-01 2.082643088545442778e-01 2.704155352788065736e-01 7.225035580445194894e-01 4.866791976239246420e-01 1.357043111201584606e-01 7.911335827987711067e-01 7.278977102006007893e-01 6.880892094410231419e-01 1.029231496520791600e-01 +6.901796117735281566e-01 1.558248977395644275e-01 4.241818789360329855e-01 5.055658246392458199e-01 1.756288758075611467e-01 4.215083703818177652e-01 7.809231602323289945e-01 1.170053878686481141e-01 6.497026323614403243e-01 5.733120641440232479e-01 4.407703406152092551e-01 5.608677124532297498e-01 7.471045703286000039e-01 3.334604336022076732e-01 8.927208811415126011e-01 9.794565286182396191e-01 9.621542824973521313e-01 3.945825239405253981e-01 8.338963875792834157e-01 9.310552325082104286e-01 7.688283033784242271e-01 3.798823731047119567e-01 1.459993613028365278e-02 +7.848623555505630511e-01 2.681039365355797344e-03 7.833208051794043891e-01 8.184381915171493604e-01 4.682581645582317709e-01 2.391069309436419932e-01 1.765377537168698607e-01 9.863494676539893424e-01 4.378412300863872009e-01 7.494505491149090481e-01 1.942180356195394308e-01 9.981402467222395547e-01 7.992190944052800505e-01 1.350875702852057936e-01 4.950149186748543650e-01 7.243422481248201761e-01 3.544596746353472216e-01 8.320192561472177228e-01 9.776840296475269865e-01 7.733852731914863110e-01 2.305732998099923048e-01 9.746878189802981041e-01 7.747723331200035979e-01 +6.521099013127149568e-01 5.452399443648201505e-01 8.146707517183656710e-01 3.827256063695345656e-01 7.954832091744263867e-01 7.834427643148527132e-01 9.661317930643520402e-02 9.215673965718058636e-01 4.914305728788055383e-01 4.105628408027649501e-01 9.844647830893304974e-02 3.974831165301851987e-01 3.857608898053827007e-01 5.520210781401946321e-01 3.445787541654143915e-03 4.552922057017416702e-01 7.456544561760444223e-01 4.753985092154335845e-01 2.821385239833401615e-01 7.560136035104459973e-01 8.453142510471420845e-01 6.679627143276523071e-01 6.910882868284401459e-01 +8.526493480446283302e-01 1.183917973068240315e-01 6.163988861865119517e-01 5.751899460059114455e-01 1.638797964925038375e-01 8.214597298784013235e-01 5.424670654187370156e-01 1.806631819658732763e-01 9.268107278221827672e-01 4.127397378597359445e-01 7.529877485901653733e-01 1.714251090083847018e-01 2.601487784245806179e-01 2.028326156742237263e-01 5.299879450122358948e-01 7.587877062981395193e-01 4.070738595375062996e-01 3.546903049793261875e-01 8.695365138547607176e-01 1.447085661525142619e-01 3.193366245820845606e-01 8.797841086211429795e-01 2.666562188639977071e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/degenerate_pointset.npz b/voice_bridge/scipy/spatial/tests/data/degenerate_pointset.npz new file mode 100644 index 0000000000000000000000000000000000000000..4f22bd3a3c941a683747944a0f12c7914f4b3f07 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/degenerate_pointset.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:048abc1ddd924bf2d4d1f216015552ed9431f9e99546fbf382768eda58788175 +size 22548 diff --git a/voice_bridge/scipy/spatial/tests/data/iris.txt b/voice_bridge/scipy/spatial/tests/data/iris.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d78390c2596beb41b1abff651a729e4e964c36e --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/iris.txt @@ -0,0 +1,150 @@ +5.099999999999999645e+00 3.500000000000000000e+00 1.399999999999999911e+00 2.000000000000000111e-01 +4.900000000000000355e+00 3.000000000000000000e+00 1.399999999999999911e+00 2.000000000000000111e-01 +4.700000000000000178e+00 3.200000000000000178e+00 1.300000000000000044e+00 2.000000000000000111e-01 +4.599999999999999645e+00 3.100000000000000089e+00 1.500000000000000000e+00 2.000000000000000111e-01 +5.000000000000000000e+00 3.600000000000000089e+00 1.399999999999999911e+00 2.000000000000000111e-01 +5.400000000000000355e+00 3.899999999999999911e+00 1.699999999999999956e+00 4.000000000000000222e-01 +4.599999999999999645e+00 3.399999999999999911e+00 1.399999999999999911e+00 2.999999999999999889e-01 +5.000000000000000000e+00 3.399999999999999911e+00 1.500000000000000000e+00 2.000000000000000111e-01 +4.400000000000000355e+00 2.899999999999999911e+00 1.399999999999999911e+00 2.000000000000000111e-01 +4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01 +5.400000000000000355e+00 3.700000000000000178e+00 1.500000000000000000e+00 2.000000000000000111e-01 +4.799999999999999822e+00 3.399999999999999911e+00 1.600000000000000089e+00 2.000000000000000111e-01 +4.799999999999999822e+00 3.000000000000000000e+00 1.399999999999999911e+00 1.000000000000000056e-01 +4.299999999999999822e+00 3.000000000000000000e+00 1.100000000000000089e+00 1.000000000000000056e-01 +5.799999999999999822e+00 4.000000000000000000e+00 1.199999999999999956e+00 2.000000000000000111e-01 +5.700000000000000178e+00 4.400000000000000355e+00 1.500000000000000000e+00 4.000000000000000222e-01 +5.400000000000000355e+00 3.899999999999999911e+00 1.300000000000000044e+00 4.000000000000000222e-01 +5.099999999999999645e+00 3.500000000000000000e+00 1.399999999999999911e+00 2.999999999999999889e-01 +5.700000000000000178e+00 3.799999999999999822e+00 1.699999999999999956e+00 2.999999999999999889e-01 +5.099999999999999645e+00 3.799999999999999822e+00 1.500000000000000000e+00 2.999999999999999889e-01 +5.400000000000000355e+00 3.399999999999999911e+00 1.699999999999999956e+00 2.000000000000000111e-01 +5.099999999999999645e+00 3.700000000000000178e+00 1.500000000000000000e+00 4.000000000000000222e-01 +4.599999999999999645e+00 3.600000000000000089e+00 1.000000000000000000e+00 2.000000000000000111e-01 +5.099999999999999645e+00 3.299999999999999822e+00 1.699999999999999956e+00 5.000000000000000000e-01 +4.799999999999999822e+00 3.399999999999999911e+00 1.899999999999999911e+00 2.000000000000000111e-01 +5.000000000000000000e+00 3.000000000000000000e+00 1.600000000000000089e+00 2.000000000000000111e-01 +5.000000000000000000e+00 3.399999999999999911e+00 1.600000000000000089e+00 4.000000000000000222e-01 +5.200000000000000178e+00 3.500000000000000000e+00 1.500000000000000000e+00 2.000000000000000111e-01 +5.200000000000000178e+00 3.399999999999999911e+00 1.399999999999999911e+00 2.000000000000000111e-01 +4.700000000000000178e+00 3.200000000000000178e+00 1.600000000000000089e+00 2.000000000000000111e-01 +4.799999999999999822e+00 3.100000000000000089e+00 1.600000000000000089e+00 2.000000000000000111e-01 +5.400000000000000355e+00 3.399999999999999911e+00 1.500000000000000000e+00 4.000000000000000222e-01 +5.200000000000000178e+00 4.099999999999999645e+00 1.500000000000000000e+00 1.000000000000000056e-01 +5.500000000000000000e+00 4.200000000000000178e+00 1.399999999999999911e+00 2.000000000000000111e-01 +4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01 +5.000000000000000000e+00 3.200000000000000178e+00 1.199999999999999956e+00 2.000000000000000111e-01 +5.500000000000000000e+00 3.500000000000000000e+00 1.300000000000000044e+00 2.000000000000000111e-01 +4.900000000000000355e+00 3.100000000000000089e+00 1.500000000000000000e+00 1.000000000000000056e-01 +4.400000000000000355e+00 3.000000000000000000e+00 1.300000000000000044e+00 2.000000000000000111e-01 +5.099999999999999645e+00 3.399999999999999911e+00 1.500000000000000000e+00 2.000000000000000111e-01 +5.000000000000000000e+00 3.500000000000000000e+00 1.300000000000000044e+00 2.999999999999999889e-01 +4.500000000000000000e+00 2.299999999999999822e+00 1.300000000000000044e+00 2.999999999999999889e-01 +4.400000000000000355e+00 3.200000000000000178e+00 1.300000000000000044e+00 2.000000000000000111e-01 +5.000000000000000000e+00 3.500000000000000000e+00 1.600000000000000089e+00 5.999999999999999778e-01 +5.099999999999999645e+00 3.799999999999999822e+00 1.899999999999999911e+00 4.000000000000000222e-01 +4.799999999999999822e+00 3.000000000000000000e+00 1.399999999999999911e+00 2.999999999999999889e-01 +5.099999999999999645e+00 3.799999999999999822e+00 1.600000000000000089e+00 2.000000000000000111e-01 +4.599999999999999645e+00 3.200000000000000178e+00 1.399999999999999911e+00 2.000000000000000111e-01 +5.299999999999999822e+00 3.700000000000000178e+00 1.500000000000000000e+00 2.000000000000000111e-01 +5.000000000000000000e+00 3.299999999999999822e+00 1.399999999999999911e+00 2.000000000000000111e-01 +7.000000000000000000e+00 3.200000000000000178e+00 4.700000000000000178e+00 1.399999999999999911e+00 +6.400000000000000355e+00 3.200000000000000178e+00 4.500000000000000000e+00 1.500000000000000000e+00 +6.900000000000000355e+00 3.100000000000000089e+00 4.900000000000000355e+00 1.500000000000000000e+00 +5.500000000000000000e+00 2.299999999999999822e+00 4.000000000000000000e+00 1.300000000000000044e+00 +6.500000000000000000e+00 2.799999999999999822e+00 4.599999999999999645e+00 1.500000000000000000e+00 +5.700000000000000178e+00 2.799999999999999822e+00 4.500000000000000000e+00 1.300000000000000044e+00 +6.299999999999999822e+00 3.299999999999999822e+00 4.700000000000000178e+00 1.600000000000000089e+00 +4.900000000000000355e+00 2.399999999999999911e+00 3.299999999999999822e+00 1.000000000000000000e+00 +6.599999999999999645e+00 2.899999999999999911e+00 4.599999999999999645e+00 1.300000000000000044e+00 +5.200000000000000178e+00 2.700000000000000178e+00 3.899999999999999911e+00 1.399999999999999911e+00 +5.000000000000000000e+00 2.000000000000000000e+00 3.500000000000000000e+00 1.000000000000000000e+00 +5.900000000000000355e+00 3.000000000000000000e+00 4.200000000000000178e+00 1.500000000000000000e+00 +6.000000000000000000e+00 2.200000000000000178e+00 4.000000000000000000e+00 1.000000000000000000e+00 +6.099999999999999645e+00 2.899999999999999911e+00 4.700000000000000178e+00 1.399999999999999911e+00 +5.599999999999999645e+00 2.899999999999999911e+00 3.600000000000000089e+00 1.300000000000000044e+00 +6.700000000000000178e+00 3.100000000000000089e+00 4.400000000000000355e+00 1.399999999999999911e+00 +5.599999999999999645e+00 3.000000000000000000e+00 4.500000000000000000e+00 1.500000000000000000e+00 +5.799999999999999822e+00 2.700000000000000178e+00 4.099999999999999645e+00 1.000000000000000000e+00 +6.200000000000000178e+00 2.200000000000000178e+00 4.500000000000000000e+00 1.500000000000000000e+00 +5.599999999999999645e+00 2.500000000000000000e+00 3.899999999999999911e+00 1.100000000000000089e+00 +5.900000000000000355e+00 3.200000000000000178e+00 4.799999999999999822e+00 1.800000000000000044e+00 +6.099999999999999645e+00 2.799999999999999822e+00 4.000000000000000000e+00 1.300000000000000044e+00 +6.299999999999999822e+00 2.500000000000000000e+00 4.900000000000000355e+00 1.500000000000000000e+00 +6.099999999999999645e+00 2.799999999999999822e+00 4.700000000000000178e+00 1.199999999999999956e+00 +6.400000000000000355e+00 2.899999999999999911e+00 4.299999999999999822e+00 1.300000000000000044e+00 +6.599999999999999645e+00 3.000000000000000000e+00 4.400000000000000355e+00 1.399999999999999911e+00 +6.799999999999999822e+00 2.799999999999999822e+00 4.799999999999999822e+00 1.399999999999999911e+00 +6.700000000000000178e+00 3.000000000000000000e+00 5.000000000000000000e+00 1.699999999999999956e+00 +6.000000000000000000e+00 2.899999999999999911e+00 4.500000000000000000e+00 1.500000000000000000e+00 +5.700000000000000178e+00 2.600000000000000089e+00 3.500000000000000000e+00 1.000000000000000000e+00 +5.500000000000000000e+00 2.399999999999999911e+00 3.799999999999999822e+00 1.100000000000000089e+00 +5.500000000000000000e+00 2.399999999999999911e+00 3.700000000000000178e+00 1.000000000000000000e+00 +5.799999999999999822e+00 2.700000000000000178e+00 3.899999999999999911e+00 1.199999999999999956e+00 +6.000000000000000000e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.600000000000000089e+00 +5.400000000000000355e+00 3.000000000000000000e+00 4.500000000000000000e+00 1.500000000000000000e+00 +6.000000000000000000e+00 3.399999999999999911e+00 4.500000000000000000e+00 1.600000000000000089e+00 +6.700000000000000178e+00 3.100000000000000089e+00 4.700000000000000178e+00 1.500000000000000000e+00 +6.299999999999999822e+00 2.299999999999999822e+00 4.400000000000000355e+00 1.300000000000000044e+00 +5.599999999999999645e+00 3.000000000000000000e+00 4.099999999999999645e+00 1.300000000000000044e+00 +5.500000000000000000e+00 2.500000000000000000e+00 4.000000000000000000e+00 1.300000000000000044e+00 +5.500000000000000000e+00 2.600000000000000089e+00 4.400000000000000355e+00 1.199999999999999956e+00 +6.099999999999999645e+00 3.000000000000000000e+00 4.599999999999999645e+00 1.399999999999999911e+00 +5.799999999999999822e+00 2.600000000000000089e+00 4.000000000000000000e+00 1.199999999999999956e+00 +5.000000000000000000e+00 2.299999999999999822e+00 3.299999999999999822e+00 1.000000000000000000e+00 +5.599999999999999645e+00 2.700000000000000178e+00 4.200000000000000178e+00 1.300000000000000044e+00 +5.700000000000000178e+00 3.000000000000000000e+00 4.200000000000000178e+00 1.199999999999999956e+00 +5.700000000000000178e+00 2.899999999999999911e+00 4.200000000000000178e+00 1.300000000000000044e+00 +6.200000000000000178e+00 2.899999999999999911e+00 4.299999999999999822e+00 1.300000000000000044e+00 +5.099999999999999645e+00 2.500000000000000000e+00 3.000000000000000000e+00 1.100000000000000089e+00 +5.700000000000000178e+00 2.799999999999999822e+00 4.099999999999999645e+00 1.300000000000000044e+00 +6.299999999999999822e+00 3.299999999999999822e+00 6.000000000000000000e+00 2.500000000000000000e+00 +5.799999999999999822e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.899999999999999911e+00 +7.099999999999999645e+00 3.000000000000000000e+00 5.900000000000000355e+00 2.100000000000000089e+00 +6.299999999999999822e+00 2.899999999999999911e+00 5.599999999999999645e+00 1.800000000000000044e+00 +6.500000000000000000e+00 3.000000000000000000e+00 5.799999999999999822e+00 2.200000000000000178e+00 +7.599999999999999645e+00 3.000000000000000000e+00 6.599999999999999645e+00 2.100000000000000089e+00 +4.900000000000000355e+00 2.500000000000000000e+00 4.500000000000000000e+00 1.699999999999999956e+00 +7.299999999999999822e+00 2.899999999999999911e+00 6.299999999999999822e+00 1.800000000000000044e+00 +6.700000000000000178e+00 2.500000000000000000e+00 5.799999999999999822e+00 1.800000000000000044e+00 +7.200000000000000178e+00 3.600000000000000089e+00 6.099999999999999645e+00 2.500000000000000000e+00 +6.500000000000000000e+00 3.200000000000000178e+00 5.099999999999999645e+00 2.000000000000000000e+00 +6.400000000000000355e+00 2.700000000000000178e+00 5.299999999999999822e+00 1.899999999999999911e+00 +6.799999999999999822e+00 3.000000000000000000e+00 5.500000000000000000e+00 2.100000000000000089e+00 +5.700000000000000178e+00 2.500000000000000000e+00 5.000000000000000000e+00 2.000000000000000000e+00 +5.799999999999999822e+00 2.799999999999999822e+00 5.099999999999999645e+00 2.399999999999999911e+00 +6.400000000000000355e+00 3.200000000000000178e+00 5.299999999999999822e+00 2.299999999999999822e+00 +6.500000000000000000e+00 3.000000000000000000e+00 5.500000000000000000e+00 1.800000000000000044e+00 +7.700000000000000178e+00 3.799999999999999822e+00 6.700000000000000178e+00 2.200000000000000178e+00 +7.700000000000000178e+00 2.600000000000000089e+00 6.900000000000000355e+00 2.299999999999999822e+00 +6.000000000000000000e+00 2.200000000000000178e+00 5.000000000000000000e+00 1.500000000000000000e+00 +6.900000000000000355e+00 3.200000000000000178e+00 5.700000000000000178e+00 2.299999999999999822e+00 +5.599999999999999645e+00 2.799999999999999822e+00 4.900000000000000355e+00 2.000000000000000000e+00 +7.700000000000000178e+00 2.799999999999999822e+00 6.700000000000000178e+00 2.000000000000000000e+00 +6.299999999999999822e+00 2.700000000000000178e+00 4.900000000000000355e+00 1.800000000000000044e+00 +6.700000000000000178e+00 3.299999999999999822e+00 5.700000000000000178e+00 2.100000000000000089e+00 +7.200000000000000178e+00 3.200000000000000178e+00 6.000000000000000000e+00 1.800000000000000044e+00 +6.200000000000000178e+00 2.799999999999999822e+00 4.799999999999999822e+00 1.800000000000000044e+00 +6.099999999999999645e+00 3.000000000000000000e+00 4.900000000000000355e+00 1.800000000000000044e+00 +6.400000000000000355e+00 2.799999999999999822e+00 5.599999999999999645e+00 2.100000000000000089e+00 +7.200000000000000178e+00 3.000000000000000000e+00 5.799999999999999822e+00 1.600000000000000089e+00 +7.400000000000000355e+00 2.799999999999999822e+00 6.099999999999999645e+00 1.899999999999999911e+00 +7.900000000000000355e+00 3.799999999999999822e+00 6.400000000000000355e+00 2.000000000000000000e+00 +6.400000000000000355e+00 2.799999999999999822e+00 5.599999999999999645e+00 2.200000000000000178e+00 +6.299999999999999822e+00 2.799999999999999822e+00 5.099999999999999645e+00 1.500000000000000000e+00 +6.099999999999999645e+00 2.600000000000000089e+00 5.599999999999999645e+00 1.399999999999999911e+00 +7.700000000000000178e+00 3.000000000000000000e+00 6.099999999999999645e+00 2.299999999999999822e+00 +6.299999999999999822e+00 3.399999999999999911e+00 5.599999999999999645e+00 2.399999999999999911e+00 +6.400000000000000355e+00 3.100000000000000089e+00 5.500000000000000000e+00 1.800000000000000044e+00 +6.000000000000000000e+00 3.000000000000000000e+00 4.799999999999999822e+00 1.800000000000000044e+00 +6.900000000000000355e+00 3.100000000000000089e+00 5.400000000000000355e+00 2.100000000000000089e+00 +6.700000000000000178e+00 3.100000000000000089e+00 5.599999999999999645e+00 2.399999999999999911e+00 +6.900000000000000355e+00 3.100000000000000089e+00 5.099999999999999645e+00 2.299999999999999822e+00 +5.799999999999999822e+00 2.700000000000000178e+00 5.099999999999999645e+00 1.899999999999999911e+00 +6.799999999999999822e+00 3.200000000000000178e+00 5.900000000000000355e+00 2.299999999999999822e+00 +6.700000000000000178e+00 3.299999999999999822e+00 5.700000000000000178e+00 2.500000000000000000e+00 +6.700000000000000178e+00 3.000000000000000000e+00 5.200000000000000178e+00 2.299999999999999822e+00 +6.299999999999999822e+00 2.500000000000000000e+00 5.000000000000000000e+00 1.899999999999999911e+00 +6.500000000000000000e+00 3.000000000000000000e+00 5.200000000000000178e+00 2.000000000000000000e+00 +6.200000000000000178e+00 3.399999999999999911e+00 5.400000000000000355e+00 2.299999999999999822e+00 +5.900000000000000355e+00 3.000000000000000000e+00 5.099999999999999645e+00 1.800000000000000044e+00 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-boolean-inp.txt b/voice_bridge/scipy/spatial/tests/data/pdist-boolean-inp.txt new file mode 100644 index 0000000000000000000000000000000000000000..0636cc9f4590f2e960f7136d83a5d2cb07c56536 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-boolean-inp.txt @@ -0,0 +1,20 @@ +1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 +1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 +0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 +0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 +1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt b/voice_bridge/scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt new file mode 100644 index 0000000000000000000000000000000000000000..0aff1267ca7fd6f41d38c2273540bb771e9cbd0c --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-chebyshev-ml-iris.txt @@ -0,0 +1 @@ + 5.0000000e-01 4.0000000e-01 5.0000000e-01 1.0000000e-01 4.0000000e-01 5.0000000e-01 1.0000000e-01 7.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 8.0000000e-01 7.0000000e-01 9.0000000e-01 4.0000000e-01 1.0000000e-01 6.0000000e-01 3.0000000e-01 3.0000000e-01 2.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 2.0000000e-01 1.0000000e-01 1.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 6.0000000e-01 7.0000000e-01 4.0000000e-01 3.0000000e-01 4.0000000e-01 4.0000000e-01 7.0000000e-01 1.0000000e-01 1.0000000e-01 1.2000000e+00 7.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 2.0000000e-01 2.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 2.0000000e-01 3.0000000e-01 6.0000000e-01 9.0000000e-01 4.0000000e-01 4.0000000e-01 5.0000000e-01 1.0000000e-01 7.0000000e-01 4.0000000e-01 1.0000000e-01 6.0000000e-01 1.0000000e+00 1.4000000e+00 9.0000000e-01 5.0000000e-01 8.0000000e-01 8.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 3.0000000e-01 5.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 2.0000000e-01 2.0000000e-01 5.0000000e-01 1.1000000e+00 1.2000000e+00 1.0000000e-01 2.0000000e-01 6.0000000e-01 1.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e-01 8.0000000e-01 3.0000000e-01 7.0000000e-01 3.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 2.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 3.0000000e-01 3.0000000e-01 2.0000000e-01 7.0000000e-01 3.0000000e-01 2.0000000e-01 4.0000000e-01 1.1000000e+00 1.2000000e+00 7.0000000e-01 4.0000000e-01 1.0000000e+00 6.0000000e-01 7.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 6.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 7.0000000e-01 9.0000000e-01 1.0000000e+00 2.0000000e-01 3.0000000e-01 8.0000000e-01 2.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 9.0000000e-01 3.0000000e-01 4.0000000e-01 6.0000000e-01 2.0000000e-01 6.0000000e-01 1.0000000e-01 6.0000000e-01 3.0000000e-01 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 5.0000000e-01 8.0000000e-01 3.0000000e-01 4.0000000e-01 2.0000000e-01 3.0000000e-01 8.0000000e-01 3.0000000e-01 2.0000000e-01 4.0000000e-01 1.2000000e+00 1.3000000e+00 8.0000000e-01 5.0000000e-01 1.1000000e+00 7.0000000e-01 8.0000000e-01 6.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 6.0000000e-01 6.0000000e-01 1.0000000e-01 2.0000000e-01 8.0000000e-01 1.0000000e+00 1.1000000e+00 3.0000000e-01 4.0000000e-01 9.0000000e-01 3.0000000e-01 2.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 2.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 1.0000000e-01 7.0000000e-01 4.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 4.0000000e-01 4.0000000e-01 2.0000000e-01 7.0000000e-01 5.0000000e-01 4.0000000e-01 2.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 8.0000000e-01 4.0000000e-01 1.0000000e-01 7.0000000e-01 2.0000000e-01 4.0000000e-01 2.0000000e-01 4.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 2.0000000e-01 2.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 6.0000000e-01 2.0000000e-01 1.0000000e-01 1.3000000e+00 6.0000000e-01 4.0000000e-01 5.0000000e-01 6.0000000e-01 2.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 8.0000000e-01 5.0000000e-01 1.0000000e+00 8.0000000e-01 2.0000000e-01 6.0000000e-01 9.0000000e-01 1.1000000e+00 5.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 8.0000000e-01 7.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 5.0000000e-01 4.0000000e-01 1.6000000e+00 1.0000000e+00 4.0000000e-01 3.0000000e-01 9.0000000e-01 3.0000000e-01 8.0000000e-01 2.0000000e-01 6.0000000e-01 3.0000000e+00 2.8000000e+00 3.2000000e+00 2.3000000e+00 2.9000000e+00 2.8000000e+00 3.0000000e+00 1.6000000e+00 2.9000000e+00 2.2000000e+00 1.9000000e+00 2.5000000e+00 2.3000000e+00 3.0000000e+00 1.9000000e+00 2.7000000e+00 2.8000000e+00 2.4000000e+00 2.8000000e+00 2.2000000e+00 3.1000000e+00 2.3000000e+00 3.2000000e+00 3.0000000e+00 2.6000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.8000000e+00 1.8000000e+00 2.1000000e+00 2.0000000e+00 2.2000000e+00 3.4000000e+00 2.8000000e+00 2.8000000e+00 3.0000000e+00 2.7000000e+00 2.4000000e+00 2.3000000e+00 2.7000000e+00 2.9000000e+00 2.3000000e+00 1.6000000e+00 2.5000000e+00 2.5000000e+00 2.5000000e+00 2.6000000e+00 1.4000000e+00 2.4000000e+00 4.3000000e+00 3.4000000e+00 4.2000000e+00 3.9000000e+00 4.1000000e+00 4.9000000e+00 2.8000000e+00 4.6000000e+00 4.1000000e+00 4.4000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.3000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 5.0000000e+00 5.2000000e+00 3.3000000e+00 4.0000000e+00 3.2000000e+00 5.0000000e+00 3.2000000e+00 4.0000000e+00 4.3000000e+00 3.1000000e+00 3.2000000e+00 3.9000000e+00 4.1000000e+00 4.4000000e+00 4.7000000e+00 3.9000000e+00 3.4000000e+00 3.9000000e+00 4.4000000e+00 3.9000000e+00 3.8000000e+00 3.1000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.4000000e+00 4.2000000e+00 4.0000000e+00 3.5000000e+00 3.3000000e+00 3.5000000e+00 3.7000000e+00 3.4000000e+00 4.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 2.0000000e-01 4.0000000e-01 4.0000000e-01 1.2000000e+00 1.1000000e+00 8.0000000e-01 5.0000000e-01 1.1000000e+00 5.0000000e-01 8.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 6.0000000e-01 6.0000000e-01 2.0000000e-01 3.0000000e-01 8.0000000e-01 7.0000000e-01 9.0000000e-01 3.0000000e-01 4.0000000e-01 9.0000000e-01 3.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 1.1000000e+00 2.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 2.0000000e-01 7.0000000e-01 4.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 6.0000000e-01 3.0000000e-01 4.0000000e-01 2.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 1.0000000e+00 5.0000000e-01 1.0000000e-01 7.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 5.0000000e-01 3.0000000e-01 4.0000000e-01 4.0000000e-01 2.0000000e-01 2.0000000e-01 2.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 3.0000000e-01 6.0000000e-01 1.0000000e-01 2.0000000e-01 1.1000000e+00 6.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 1.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 5.0000000e-01 1.0000000e+00 5.0000000e-01 4.0000000e-01 3.0000000e-01 1.4000000e+00 1.5000000e+00 1.0000000e+00 7.0000000e-01 1.3000000e+00 9.0000000e-01 1.0000000e+00 8.0000000e-01 7.0000000e-01 7.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 8.0000000e-01 8.0000000e-01 3.0000000e-01 4.0000000e-01 1.0000000e+00 1.2000000e+00 1.3000000e+00 5.0000000e-01 6.0000000e-01 1.1000000e+00 5.0000000e-01 1.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 6.0000000e-01 9.0000000e-01 4.0000000e-01 9.0000000e-01 3.0000000e-01 9.0000000e-01 6.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 6.0000000e-01 3.0000000e-01 1.0000000e-01 6.0000000e-01 9.0000000e-01 1.3000000e+00 8.0000000e-01 4.0000000e-01 8.0000000e-01 7.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 1.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 2.0000000e-01 1.0000000e-01 5.0000000e-01 1.0000000e+00 1.1000000e+00 0.0000000e+00 3.0000000e-01 6.0000000e-01 0.0000000e+00 5.0000000e-01 3.0000000e-01 4.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 3.0000000e-01 6.0000000e-01 2.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 6.0000000e-01 7.0000000e-01 1.1000000e+00 4.0000000e-01 7.0000000e-01 2.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 8.0000000e-01 4.0000000e-01 6.0000000e-01 7.0000000e-01 4.0000000e-01 2.0000000e-01 3.0000000e-01 7.0000000e-01 6.0000000e-01 3.0000000e-01 4.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 2.0000000e-01 6.0000000e-01 1.0000000e+00 3.0000000e-01 4.0000000e-01 1.4000000e+00 1.0000000e+00 4.0000000e-01 4.0000000e-01 7.0000000e-01 3.0000000e-01 8.0000000e-01 1.0000000e-01 4.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 4.0000000e-01 5.0000000e-01 1.0000000e+00 1.0000000e+00 6.0000000e-01 3.0000000e-01 9.0000000e-01 4.0000000e-01 6.0000000e-01 3.0000000e-01 6.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 2.0000000e-01 4.0000000e-01 4.0000000e-01 2.0000000e-01 3.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 3.0000000e-01 4.0000000e-01 7.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 1.1000000e+00 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 2.0000000e-01 5.0000000e-01 2.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 5.0000000e-01 1.0000000e+00 1.4000000e+00 9.0000000e-01 5.0000000e-01 9.0000000e-01 8.0000000e-01 6.0000000e-01 7.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 2.0000000e-01 2.0000000e-01 6.0000000e-01 1.1000000e+00 1.2000000e+00 1.0000000e-01 2.0000000e-01 7.0000000e-01 1.0000000e-01 4.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 4.0000000e-01 5.0000000e-01 8.0000000e-01 2.0000000e-01 8.0000000e-01 2.0000000e-01 7.0000000e-01 3.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 1.5000000e+00 1.4000000e+00 1.1000000e+00 8.0000000e-01 1.4000000e+00 8.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 8.0000000e-01 8.0000000e-01 7.0000000e-01 7.0000000e-01 9.0000000e-01 9.0000000e-01 5.0000000e-01 5.0000000e-01 1.1000000e+00 1.1000000e+00 1.2000000e+00 6.0000000e-01 7.0000000e-01 1.2000000e+00 6.0000000e-01 2.0000000e-01 8.0000000e-01 7.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 8.0000000e-01 3.0000000e-01 1.0000000e+00 7.0000000e-01 3.6000000e+00 3.4000000e+00 3.8000000e+00 2.9000000e+00 3.5000000e+00 3.4000000e+00 3.6000000e+00 2.2000000e+00 3.5000000e+00 2.8000000e+00 2.4000000e+00 3.1000000e+00 2.9000000e+00 3.6000000e+00 2.5000000e+00 3.3000000e+00 3.4000000e+00 3.0000000e+00 3.4000000e+00 2.8000000e+00 3.7000000e+00 2.9000000e+00 3.8000000e+00 3.6000000e+00 3.2000000e+00 3.3000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 2.4000000e+00 2.7000000e+00 2.6000000e+00 2.8000000e+00 4.0000000e+00 3.4000000e+00 3.4000000e+00 3.6000000e+00 3.3000000e+00 3.0000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 2.9000000e+00 2.2000000e+00 3.1000000e+00 3.1000000e+00 3.1000000e+00 3.2000000e+00 1.9000000e+00 3.0000000e+00 4.9000000e+00 4.0000000e+00 4.8000000e+00 4.5000000e+00 4.7000000e+00 5.5000000e+00 3.4000000e+00 5.2000000e+00 4.7000000e+00 5.0000000e+00 4.0000000e+00 4.2000000e+00 4.4000000e+00 3.9000000e+00 4.0000000e+00 4.2000000e+00 4.4000000e+00 5.6000000e+00 5.8000000e+00 3.9000000e+00 4.6000000e+00 3.8000000e+00 5.6000000e+00 3.8000000e+00 4.6000000e+00 4.9000000e+00 3.7000000e+00 3.8000000e+00 4.5000000e+00 4.7000000e+00 5.0000000e+00 5.3000000e+00 4.5000000e+00 4.0000000e+00 4.5000000e+00 5.0000000e+00 4.5000000e+00 4.4000000e+00 3.7000000e+00 4.3000000e+00 4.5000000e+00 4.0000000e+00 4.0000000e+00 4.8000000e+00 4.6000000e+00 4.1000000e+00 3.9000000e+00 4.1000000e+00 4.3000000e+00 4.0000000e+00 4.0000000e-01 4.0000000e-01 7.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 7.0000000e-01 1.2000000e+00 7.0000000e-01 1.0000000e+00 1.0000000e+00 8.0000000e-01 6.0000000e-01 6.0000000e-01 1.1000000e+00 1.0000000e+00 6.0000000e-01 6.0000000e-01 3.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 9.0000000e-01 1.4000000e+00 7.0000000e-01 8.0000000e-01 1.7000000e+00 1.4000000e+00 8.0000000e-01 7.0000000e-01 1.0000000e+00 7.0000000e-01 1.2000000e+00 5.0000000e-01 8.0000000e-01 3.5000000e+00 3.3000000e+00 3.7000000e+00 2.8000000e+00 3.4000000e+00 3.3000000e+00 3.5000000e+00 2.1000000e+00 3.4000000e+00 2.7000000e+00 2.3000000e+00 3.0000000e+00 2.8000000e+00 3.5000000e+00 2.4000000e+00 3.2000000e+00 3.3000000e+00 2.9000000e+00 3.3000000e+00 2.7000000e+00 3.6000000e+00 2.8000000e+00 3.7000000e+00 3.5000000e+00 3.1000000e+00 3.2000000e+00 3.6000000e+00 3.8000000e+00 3.3000000e+00 2.3000000e+00 2.6000000e+00 2.5000000e+00 2.7000000e+00 3.9000000e+00 3.3000000e+00 3.3000000e+00 3.5000000e+00 3.2000000e+00 2.9000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.8000000e+00 2.1000000e+00 3.0000000e+00 3.0000000e+00 3.0000000e+00 3.1000000e+00 1.8000000e+00 2.9000000e+00 4.8000000e+00 3.9000000e+00 4.7000000e+00 4.4000000e+00 4.6000000e+00 5.4000000e+00 3.3000000e+00 5.1000000e+00 4.6000000e+00 4.9000000e+00 3.9000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.9000000e+00 4.1000000e+00 4.3000000e+00 5.5000000e+00 5.7000000e+00 3.8000000e+00 4.5000000e+00 3.7000000e+00 5.5000000e+00 3.7000000e+00 4.5000000e+00 4.8000000e+00 3.6000000e+00 3.7000000e+00 4.4000000e+00 4.6000000e+00 4.9000000e+00 5.2000000e+00 4.4000000e+00 3.9000000e+00 4.4000000e+00 4.9000000e+00 4.4000000e+00 4.3000000e+00 3.6000000e+00 4.2000000e+00 4.4000000e+00 3.9000000e+00 3.9000000e+00 4.7000000e+00 4.5000000e+00 4.0000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.9000000e+00 5.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 1.0000000e+00 7.0000000e-01 1.1000000e+00 1.1000000e+00 1.0000000e+00 1.4000000e+00 1.0000000e+00 9.0000000e-01 1.0000000e+00 1.2000000e+00 1.3000000e+00 1.0000000e+00 5.0000000e-01 2.0000000e-01 1.3000000e+00 1.2000000e+00 9.0000000e-01 1.3000000e+00 1.4000000e+00 1.0000000e+00 9.0000000e-01 2.1000000e+00 1.3000000e+00 9.0000000e-01 6.0000000e-01 1.4000000e+00 6.0000000e-01 1.2000000e+00 7.0000000e-01 1.1000000e+00 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 2.0000000e+00 3.1000000e+00 2.4000000e+00 2.4000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 2.1000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.9000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 4.0000000e-01 4.0000000e-01 3.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 8.0000000e-01 7.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 5.0000000e-01 4.0000000e-01 1.6000000e+00 1.0000000e+00 4.0000000e-01 6.0000000e-01 9.0000000e-01 3.0000000e-01 8.0000000e-01 2.0000000e-01 6.0000000e-01 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 6.0000000e-01 3.0000000e-01 3.0000000e-01 2.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 2.0000000e-01 1.0000000e-01 1.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 6.0000000e-01 7.0000000e-01 4.0000000e-01 3.0000000e-01 4.0000000e-01 4.0000000e-01 7.0000000e-01 1.0000000e-01 1.0000000e-01 1.2000000e+00 7.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 2.0000000e-01 2.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 6.0000000e-01 4.0000000e-01 6.0000000e-01 1.1000000e+00 6.0000000e-01 9.0000000e-01 8.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 1.0000000e+00 9.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 7.0000000e-01 4.0000000e-01 8.0000000e-01 1.3000000e+00 6.0000000e-01 7.0000000e-01 1.5000000e+00 1.3000000e+00 7.0000000e-01 6.0000000e-01 9.0000000e-01 6.0000000e-01 1.1000000e+00 4.0000000e-01 7.0000000e-01 3.0000000e+00 2.8000000e+00 3.2000000e+00 2.3000000e+00 2.9000000e+00 2.8000000e+00 3.0000000e+00 1.6000000e+00 2.9000000e+00 2.2000000e+00 1.8000000e+00 2.5000000e+00 2.3000000e+00 3.0000000e+00 1.9000000e+00 2.7000000e+00 2.8000000e+00 2.4000000e+00 2.8000000e+00 2.2000000e+00 3.1000000e+00 2.3000000e+00 3.2000000e+00 3.0000000e+00 2.6000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.8000000e+00 1.8000000e+00 2.1000000e+00 2.0000000e+00 2.2000000e+00 3.4000000e+00 2.8000000e+00 2.8000000e+00 3.0000000e+00 2.7000000e+00 2.4000000e+00 2.3000000e+00 2.7000000e+00 2.9000000e+00 2.3000000e+00 1.6000000e+00 2.5000000e+00 2.5000000e+00 2.5000000e+00 2.6000000e+00 1.3000000e+00 2.4000000e+00 4.3000000e+00 3.4000000e+00 4.2000000e+00 3.9000000e+00 4.1000000e+00 4.9000000e+00 2.8000000e+00 4.6000000e+00 4.1000000e+00 4.4000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.3000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 5.0000000e+00 5.2000000e+00 3.3000000e+00 4.0000000e+00 3.2000000e+00 5.0000000e+00 3.2000000e+00 4.0000000e+00 4.3000000e+00 3.1000000e+00 3.2000000e+00 3.9000000e+00 4.1000000e+00 4.4000000e+00 4.7000000e+00 3.9000000e+00 3.4000000e+00 3.9000000e+00 4.4000000e+00 3.9000000e+00 3.8000000e+00 3.1000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.4000000e+00 4.2000000e+00 4.0000000e+00 3.5000000e+00 3.3000000e+00 3.5000000e+00 3.7000000e+00 3.4000000e+00 4.0000000e-01 1.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 4.0000000e-01 3.0000000e-01 4.0000000e-01 6.0000000e-01 7.0000000e-01 4.0000000e-01 3.0000000e-01 4.0000000e-01 7.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 4.0000000e-01 3.0000000e-01 1.5000000e+00 7.0000000e-01 3.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e-01 6.0000000e-01 2.0000000e-01 5.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 3.0000000e-01 8.0000000e-01 3.0000000e-01 6.0000000e-01 4.0000000e-01 4.0000000e-01 2.0000000e-01 3.0000000e-01 7.0000000e-01 6.0000000e-01 2.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 1.0000000e+00 3.0000000e-01 4.0000000e-01 1.1000000e+00 1.0000000e+00 4.0000000e-01 4.0000000e-01 6.0000000e-01 4.0000000e-01 8.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e+00 2.8000000e+00 3.2000000e+00 2.3000000e+00 2.9000000e+00 2.8000000e+00 3.0000000e+00 1.6000000e+00 2.9000000e+00 2.2000000e+00 1.8000000e+00 2.5000000e+00 2.3000000e+00 3.0000000e+00 1.9000000e+00 2.7000000e+00 2.8000000e+00 2.4000000e+00 2.8000000e+00 2.2000000e+00 3.1000000e+00 2.3000000e+00 3.2000000e+00 3.0000000e+00 2.6000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.8000000e+00 1.8000000e+00 2.1000000e+00 2.0000000e+00 2.2000000e+00 3.4000000e+00 2.8000000e+00 2.8000000e+00 3.0000000e+00 2.7000000e+00 2.4000000e+00 2.3000000e+00 2.7000000e+00 2.9000000e+00 2.3000000e+00 1.6000000e+00 2.5000000e+00 2.5000000e+00 2.5000000e+00 2.6000000e+00 1.3000000e+00 2.4000000e+00 4.3000000e+00 3.4000000e+00 4.2000000e+00 3.9000000e+00 4.1000000e+00 4.9000000e+00 2.8000000e+00 4.6000000e+00 4.1000000e+00 4.4000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.3000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 5.0000000e+00 5.2000000e+00 3.3000000e+00 4.0000000e+00 3.2000000e+00 5.0000000e+00 3.2000000e+00 4.0000000e+00 4.3000000e+00 3.1000000e+00 3.2000000e+00 3.9000000e+00 4.1000000e+00 4.4000000e+00 4.7000000e+00 3.9000000e+00 3.4000000e+00 3.9000000e+00 4.4000000e+00 3.9000000e+00 3.8000000e+00 3.1000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.4000000e+00 4.2000000e+00 4.0000000e+00 3.5000000e+00 3.3000000e+00 3.5000000e+00 3.7000000e+00 3.4000000e+00 5.0000000e-01 4.0000000e-01 4.0000000e-01 7.0000000e-01 3.0000000e-01 2.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 3.0000000e-01 4.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 7.0000000e-01 3.0000000e-01 2.0000000e-01 1.4000000e+00 7.0000000e-01 2.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 5.0000000e-01 2.0000000e-01 4.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 7.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 8.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 9.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 4.0000000e-01 1.3000000e+00 4.0000000e-01 6.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 4.0000000e-01 3.7000000e+00 3.5000000e+00 3.9000000e+00 3.0000000e+00 3.6000000e+00 3.5000000e+00 3.7000000e+00 2.3000000e+00 3.6000000e+00 2.9000000e+00 2.5000000e+00 3.2000000e+00 3.0000000e+00 3.7000000e+00 2.6000000e+00 3.4000000e+00 3.5000000e+00 3.1000000e+00 3.5000000e+00 2.9000000e+00 3.8000000e+00 3.0000000e+00 3.9000000e+00 3.7000000e+00 3.3000000e+00 3.4000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 2.5000000e+00 2.8000000e+00 2.7000000e+00 2.9000000e+00 4.1000000e+00 3.5000000e+00 3.5000000e+00 3.7000000e+00 3.4000000e+00 3.1000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.0000000e+00 2.3000000e+00 3.2000000e+00 3.2000000e+00 3.2000000e+00 3.3000000e+00 2.0000000e+00 3.1000000e+00 5.0000000e+00 4.1000000e+00 4.9000000e+00 4.6000000e+00 4.8000000e+00 5.6000000e+00 3.5000000e+00 5.3000000e+00 4.8000000e+00 5.1000000e+00 4.1000000e+00 4.3000000e+00 4.5000000e+00 4.0000000e+00 4.1000000e+00 4.3000000e+00 4.5000000e+00 5.7000000e+00 5.9000000e+00 4.0000000e+00 4.7000000e+00 3.9000000e+00 5.7000000e+00 3.9000000e+00 4.7000000e+00 5.0000000e+00 3.8000000e+00 3.9000000e+00 4.6000000e+00 4.8000000e+00 5.1000000e+00 5.4000000e+00 4.6000000e+00 4.1000000e+00 4.6000000e+00 5.1000000e+00 4.6000000e+00 4.5000000e+00 3.8000000e+00 4.4000000e+00 4.6000000e+00 4.1000000e+00 4.1000000e+00 4.9000000e+00 4.7000000e+00 4.2000000e+00 4.0000000e+00 4.2000000e+00 4.4000000e+00 4.1000000e+00 3.0000000e-01 3.0000000e-01 1.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 8.0000000e-01 9.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 7.0000000e-01 3.0000000e-01 4.0000000e-01 1.0000000e+00 7.0000000e-01 2.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e+00 2.8000000e+00 3.2000000e+00 2.3000000e+00 2.9000000e+00 2.8000000e+00 3.0000000e+00 1.6000000e+00 2.9000000e+00 2.2000000e+00 1.8000000e+00 2.5000000e+00 2.3000000e+00 3.0000000e+00 1.9000000e+00 2.7000000e+00 2.8000000e+00 2.4000000e+00 2.8000000e+00 2.2000000e+00 3.1000000e+00 2.3000000e+00 3.2000000e+00 3.0000000e+00 2.6000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.8000000e+00 1.8000000e+00 2.1000000e+00 2.0000000e+00 2.2000000e+00 3.4000000e+00 2.8000000e+00 2.8000000e+00 3.0000000e+00 2.7000000e+00 2.4000000e+00 2.3000000e+00 2.7000000e+00 2.9000000e+00 2.3000000e+00 1.6000000e+00 2.5000000e+00 2.5000000e+00 2.5000000e+00 2.6000000e+00 1.3000000e+00 2.4000000e+00 4.3000000e+00 3.4000000e+00 4.2000000e+00 3.9000000e+00 4.1000000e+00 4.9000000e+00 2.8000000e+00 4.6000000e+00 4.1000000e+00 4.4000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.3000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 5.0000000e+00 5.2000000e+00 3.3000000e+00 4.0000000e+00 3.2000000e+00 5.0000000e+00 3.2000000e+00 4.0000000e+00 4.3000000e+00 3.1000000e+00 3.2000000e+00 3.9000000e+00 4.1000000e+00 4.4000000e+00 4.7000000e+00 3.9000000e+00 3.4000000e+00 3.9000000e+00 4.4000000e+00 3.9000000e+00 3.8000000e+00 3.1000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.4000000e+00 4.2000000e+00 4.0000000e+00 3.5000000e+00 3.3000000e+00 3.5000000e+00 3.7000000e+00 3.4000000e+00 4.0000000e-01 3.0000000e-01 4.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 4.0000000e-01 7.0000000e-01 7.0000000e-01 4.0000000e-01 6.0000000e-01 4.0000000e-01 6.0000000e-01 1.1000000e+00 6.0000000e-01 4.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 2.8000000e+00 2.6000000e+00 3.0000000e+00 2.1000000e+00 2.7000000e+00 2.6000000e+00 2.8000000e+00 1.4000000e+00 2.7000000e+00 2.0000000e+00 1.6000000e+00 2.3000000e+00 2.1000000e+00 2.8000000e+00 1.7000000e+00 2.5000000e+00 2.6000000e+00 2.2000000e+00 2.6000000e+00 2.0000000e+00 2.9000000e+00 2.1000000e+00 3.0000000e+00 2.8000000e+00 2.4000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.6000000e+00 1.6000000e+00 1.9000000e+00 1.8000000e+00 2.0000000e+00 3.2000000e+00 2.6000000e+00 2.6000000e+00 2.8000000e+00 2.5000000e+00 2.2000000e+00 2.1000000e+00 2.5000000e+00 2.7000000e+00 2.1000000e+00 1.4000000e+00 2.3000000e+00 2.3000000e+00 2.3000000e+00 2.4000000e+00 1.1000000e+00 2.2000000e+00 4.1000000e+00 3.2000000e+00 4.0000000e+00 3.7000000e+00 3.9000000e+00 4.7000000e+00 2.6000000e+00 4.4000000e+00 3.9000000e+00 4.2000000e+00 3.2000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 3.2000000e+00 3.4000000e+00 3.6000000e+00 4.8000000e+00 5.0000000e+00 3.1000000e+00 3.8000000e+00 3.0000000e+00 4.8000000e+00 3.0000000e+00 3.8000000e+00 4.1000000e+00 2.9000000e+00 3.0000000e+00 3.7000000e+00 3.9000000e+00 4.2000000e+00 4.5000000e+00 3.7000000e+00 3.2000000e+00 3.7000000e+00 4.2000000e+00 3.7000000e+00 3.6000000e+00 2.9000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 3.2000000e+00 4.0000000e+00 3.8000000e+00 3.3000000e+00 3.1000000e+00 3.3000000e+00 3.5000000e+00 3.2000000e+00 4.0000000e-01 5.0000000e-01 4.0000000e-01 3.0000000e-01 2.0000000e-01 4.0000000e-01 1.1000000e+00 1.2000000e+00 1.0000000e-01 4.0000000e-01 5.0000000e-01 1.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 5.0000000e-01 8.0000000e-01 2.0000000e-01 8.0000000e-01 4.0000000e-01 7.0000000e-01 3.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 2.0000000e-01 2.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 3.0000000e-01 4.0000000e-01 5.0000000e-01 3.0000000e-01 6.0000000e-01 2.0000000e-01 3.0000000e-01 1.1000000e+00 6.0000000e-01 2.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 2.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 1.0000000e-01 5.0000000e-01 4.0000000e-01 2.0000000e-01 6.0000000e-01 7.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e-01 2.0000000e-01 1.2000000e+00 8.0000000e-01 4.0000000e-01 4.0000000e-01 5.0000000e-01 3.0000000e-01 6.0000000e-01 2.0000000e-01 2.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 5.0000000e-01 4.0000000e-01 2.0000000e-01 7.0000000e-01 8.0000000e-01 3.0000000e-01 2.0000000e-01 3.0000000e-01 3.0000000e-01 8.0000000e-01 1.0000000e-01 2.0000000e-01 1.1000000e+00 8.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 6.0000000e-01 3.0000000e-01 2.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 1.0000000e-01 7.0000000e-01 9.0000000e-01 1.0000000e+00 2.0000000e-01 4.0000000e-01 8.0000000e-01 2.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 9.0000000e-01 3.0000000e-01 4.0000000e-01 6.0000000e-01 2.0000000e-01 6.0000000e-01 2.0000000e-01 6.0000000e-01 3.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 6.0000000e-01 1.0000000e+00 1.1000000e+00 1.0000000e-01 4.0000000e-01 7.0000000e-01 1.0000000e-01 4.0000000e-01 3.0000000e-01 4.0000000e-01 8.0000000e-01 4.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 2.0000000e-01 6.0000000e-01 2.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 7.0000000e-01 8.0000000e-01 5.0000000e-01 4.0000000e-01 2.0000000e-01 5.0000000e-01 1.0000000e+00 3.0000000e-01 4.0000000e-01 1.1000000e+00 1.0000000e+00 4.0000000e-01 4.0000000e-01 6.0000000e-01 4.0000000e-01 8.0000000e-01 3.0000000e-01 4.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 3.0000000e-01 1.0000000e+00 9.0000000e-01 6.0000000e-01 1.0000000e+00 1.1000000e+00 7.0000000e-01 6.0000000e-01 1.8000000e+00 9.0000000e-01 6.0000000e-01 4.0000000e-01 1.1000000e+00 3.0000000e-01 9.0000000e-01 4.0000000e-01 8.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.1000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.6000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 1.1000000e+00 1.0000000e+00 7.0000000e-01 1.1000000e+00 1.2000000e+00 8.0000000e-01 7.0000000e-01 1.9000000e+00 1.1000000e+00 7.0000000e-01 5.0000000e-01 1.2000000e+00 4.0000000e-01 1.0000000e+00 5.0000000e-01 9.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.2000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.7000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 3.0000000e-01 6.0000000e-01 0.0000000e+00 5.0000000e-01 3.0000000e-01 4.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 3.0000000e-01 6.0000000e-01 2.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 5.0000000e-01 3.0000000e-01 6.0000000e-01 3.0000000e-01 3.0000000e-01 9.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 2.0000000e-01 3.5000000e+00 3.3000000e+00 3.7000000e+00 2.8000000e+00 3.4000000e+00 3.3000000e+00 3.5000000e+00 2.1000000e+00 3.4000000e+00 2.7000000e+00 2.3000000e+00 3.0000000e+00 2.8000000e+00 3.5000000e+00 2.4000000e+00 3.2000000e+00 3.3000000e+00 2.9000000e+00 3.3000000e+00 2.7000000e+00 3.6000000e+00 2.8000000e+00 3.7000000e+00 3.5000000e+00 3.1000000e+00 3.2000000e+00 3.6000000e+00 3.8000000e+00 3.3000000e+00 2.3000000e+00 2.6000000e+00 2.5000000e+00 2.7000000e+00 3.9000000e+00 3.3000000e+00 3.3000000e+00 3.5000000e+00 3.2000000e+00 2.9000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.8000000e+00 2.1000000e+00 3.0000000e+00 3.0000000e+00 3.0000000e+00 3.1000000e+00 1.8000000e+00 2.9000000e+00 4.8000000e+00 3.9000000e+00 4.7000000e+00 4.4000000e+00 4.6000000e+00 5.4000000e+00 3.3000000e+00 5.1000000e+00 4.6000000e+00 4.9000000e+00 3.9000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.9000000e+00 4.1000000e+00 4.3000000e+00 5.5000000e+00 5.7000000e+00 3.8000000e+00 4.5000000e+00 3.7000000e+00 5.5000000e+00 3.7000000e+00 4.5000000e+00 4.8000000e+00 3.6000000e+00 3.7000000e+00 4.4000000e+00 4.6000000e+00 4.9000000e+00 5.2000000e+00 4.4000000e+00 3.9000000e+00 4.4000000e+00 4.9000000e+00 4.4000000e+00 4.3000000e+00 3.6000000e+00 4.2000000e+00 4.4000000e+00 3.9000000e+00 3.9000000e+00 4.7000000e+00 4.5000000e+00 4.0000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.9000000e+00 6.0000000e-01 1.1000000e+00 4.0000000e-01 5.0000000e-01 1.2000000e+00 1.1000000e+00 5.0000000e-01 6.0000000e-01 7.0000000e-01 4.0000000e-01 9.0000000e-01 2.0000000e-01 5.0000000e-01 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 5.0000000e-01 3.0000000e-01 4.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 3.0000000e-01 6.0000000e-01 2.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 7.0000000e-01 6.0000000e-01 7.0000000e-01 2.0000000e-01 6.0000000e-01 8.0000000e-01 4.0000000e-01 8.0000000e-01 2.0000000e-01 9.0000000e-01 6.0000000e-01 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 2.0000000e-01 1.1000000e+00 7.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 5.0000000e-01 3.0000000e-01 1.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 1.2000000e+00 6.0000000e-01 3.0000000e-01 6.0000000e-01 5.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 2.0000000e-01 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 9.0000000e-01 1.2000000e+00 1.5000000e+00 7.0000000e-01 1.5000000e+00 9.0000000e-01 1.4000000e+00 1.0000000e+00 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 6.0000000e-01 7.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 9.0000000e-01 6.0000000e-01 3.4000000e+00 3.2000000e+00 3.6000000e+00 2.7000000e+00 3.3000000e+00 3.2000000e+00 3.4000000e+00 2.0000000e+00 3.3000000e+00 2.6000000e+00 2.2000000e+00 2.9000000e+00 2.7000000e+00 3.4000000e+00 2.3000000e+00 3.1000000e+00 3.2000000e+00 2.8000000e+00 3.2000000e+00 2.6000000e+00 3.5000000e+00 2.7000000e+00 3.6000000e+00 3.4000000e+00 3.0000000e+00 3.1000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 2.2000000e+00 2.5000000e+00 2.4000000e+00 2.6000000e+00 3.8000000e+00 3.2000000e+00 3.2000000e+00 3.4000000e+00 3.1000000e+00 2.8000000e+00 2.7000000e+00 3.1000000e+00 3.3000000e+00 2.7000000e+00 2.0000000e+00 2.9000000e+00 2.9000000e+00 2.9000000e+00 3.0000000e+00 1.7000000e+00 2.8000000e+00 4.7000000e+00 3.8000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.3000000e+00 3.2000000e+00 5.0000000e+00 4.5000000e+00 4.8000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.8000000e+00 4.0000000e+00 4.2000000e+00 5.4000000e+00 5.6000000e+00 3.7000000e+00 4.4000000e+00 3.6000000e+00 5.4000000e+00 3.6000000e+00 4.4000000e+00 4.7000000e+00 3.5000000e+00 3.6000000e+00 4.3000000e+00 4.5000000e+00 4.8000000e+00 5.1000000e+00 4.3000000e+00 3.8000000e+00 4.3000000e+00 4.8000000e+00 4.3000000e+00 4.2000000e+00 3.5000000e+00 4.1000000e+00 4.3000000e+00 3.8000000e+00 3.8000000e+00 4.6000000e+00 4.4000000e+00 3.9000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.8000000e+00 3.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 8.0000000e-01 3.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 2.8000000e+00 2.6000000e+00 3.0000000e+00 2.1000000e+00 2.7000000e+00 2.6000000e+00 2.8000000e+00 1.4000000e+00 2.7000000e+00 2.0000000e+00 1.8000000e+00 2.3000000e+00 2.1000000e+00 2.8000000e+00 1.7000000e+00 2.5000000e+00 2.6000000e+00 2.2000000e+00 2.6000000e+00 2.0000000e+00 2.9000000e+00 2.1000000e+00 3.0000000e+00 2.8000000e+00 2.4000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.6000000e+00 1.6000000e+00 1.9000000e+00 1.8000000e+00 2.0000000e+00 3.2000000e+00 2.6000000e+00 2.6000000e+00 2.8000000e+00 2.5000000e+00 2.2000000e+00 2.1000000e+00 2.5000000e+00 2.7000000e+00 2.1000000e+00 1.5000000e+00 2.3000000e+00 2.3000000e+00 2.3000000e+00 2.4000000e+00 1.3000000e+00 2.2000000e+00 4.1000000e+00 3.2000000e+00 4.0000000e+00 3.7000000e+00 3.9000000e+00 4.7000000e+00 2.6000000e+00 4.4000000e+00 3.9000000e+00 4.2000000e+00 3.2000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 3.2000000e+00 3.4000000e+00 3.6000000e+00 4.8000000e+00 5.0000000e+00 3.1000000e+00 3.8000000e+00 3.0000000e+00 4.8000000e+00 3.0000000e+00 3.8000000e+00 4.1000000e+00 2.9000000e+00 3.0000000e+00 3.7000000e+00 3.9000000e+00 4.2000000e+00 4.5000000e+00 3.7000000e+00 3.2000000e+00 3.7000000e+00 4.2000000e+00 3.7000000e+00 3.6000000e+00 2.9000000e+00 3.5000000e+00 3.7000000e+00 3.2000000e+00 3.2000000e+00 4.0000000e+00 3.8000000e+00 3.3000000e+00 3.1000000e+00 3.3000000e+00 3.5000000e+00 3.2000000e+00 8.0000000e-01 2.0000000e-01 7.0000000e-01 3.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 6.0000000e-01 2.0000000e-01 5.0000000e-01 3.1000000e+00 2.9000000e+00 3.3000000e+00 2.4000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 1.7000000e+00 3.0000000e+00 2.3000000e+00 1.9000000e+00 2.6000000e+00 2.4000000e+00 3.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 2.9000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 3.3000000e+00 3.1000000e+00 2.7000000e+00 2.8000000e+00 3.2000000e+00 3.4000000e+00 2.9000000e+00 1.9000000e+00 2.2000000e+00 2.1000000e+00 2.3000000e+00 3.5000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 2.8000000e+00 3.0000000e+00 2.4000000e+00 1.7000000e+00 2.6000000e+00 2.6000000e+00 2.6000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 4.4000000e+00 3.5000000e+00 4.3000000e+00 4.0000000e+00 4.2000000e+00 5.0000000e+00 2.9000000e+00 4.7000000e+00 4.2000000e+00 4.5000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.4000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 5.1000000e+00 5.3000000e+00 3.4000000e+00 4.1000000e+00 3.3000000e+00 5.1000000e+00 3.3000000e+00 4.1000000e+00 4.4000000e+00 3.2000000e+00 3.3000000e+00 4.0000000e+00 4.2000000e+00 4.5000000e+00 4.8000000e+00 4.0000000e+00 3.5000000e+00 4.0000000e+00 4.5000000e+00 4.0000000e+00 3.9000000e+00 3.2000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.5000000e+00 4.3000000e+00 4.1000000e+00 3.6000000e+00 3.4000000e+00 3.6000000e+00 3.8000000e+00 3.5000000e+00 7.0000000e-01 4.0000000e-01 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 4.0000000e-01 3.2000000e+00 3.0000000e+00 3.4000000e+00 2.5000000e+00 3.1000000e+00 3.0000000e+00 3.2000000e+00 1.8000000e+00 3.1000000e+00 2.4000000e+00 2.0000000e+00 2.7000000e+00 2.5000000e+00 3.2000000e+00 2.1000000e+00 2.9000000e+00 3.0000000e+00 2.6000000e+00 3.0000000e+00 2.4000000e+00 3.3000000e+00 2.5000000e+00 3.4000000e+00 3.2000000e+00 2.8000000e+00 2.9000000e+00 3.3000000e+00 3.5000000e+00 3.0000000e+00 2.0000000e+00 2.3000000e+00 2.2000000e+00 2.4000000e+00 3.6000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.9000000e+00 2.6000000e+00 2.5000000e+00 2.9000000e+00 3.1000000e+00 2.5000000e+00 1.8000000e+00 2.7000000e+00 2.7000000e+00 2.7000000e+00 2.8000000e+00 1.5000000e+00 2.6000000e+00 4.5000000e+00 3.6000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 5.1000000e+00 3.0000000e+00 4.8000000e+00 4.3000000e+00 4.6000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.5000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 5.2000000e+00 5.4000000e+00 3.5000000e+00 4.2000000e+00 3.4000000e+00 5.2000000e+00 3.4000000e+00 4.2000000e+00 4.5000000e+00 3.3000000e+00 3.4000000e+00 4.1000000e+00 4.3000000e+00 4.6000000e+00 4.9000000e+00 4.1000000e+00 3.6000000e+00 4.1000000e+00 4.6000000e+00 4.1000000e+00 4.0000000e+00 3.3000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.6000000e+00 4.4000000e+00 4.2000000e+00 3.7000000e+00 3.5000000e+00 3.7000000e+00 3.9000000e+00 3.6000000e+00 3.3000000e+00 3.1000000e+00 3.5000000e+00 2.6000000e+00 3.2000000e+00 3.1000000e+00 3.3000000e+00 1.9000000e+00 3.2000000e+00 2.5000000e+00 2.1000000e+00 2.8000000e+00 2.6000000e+00 3.3000000e+00 2.2000000e+00 3.0000000e+00 3.1000000e+00 2.7000000e+00 3.1000000e+00 2.5000000e+00 3.4000000e+00 2.6000000e+00 3.5000000e+00 3.3000000e+00 2.9000000e+00 3.0000000e+00 3.4000000e+00 3.6000000e+00 3.1000000e+00 2.1000000e+00 2.4000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 3.0000000e+00 3.2000000e+00 2.6000000e+00 1.9000000e+00 2.8000000e+00 2.8000000e+00 2.8000000e+00 2.9000000e+00 1.6000000e+00 2.7000000e+00 4.6000000e+00 3.7000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.2000000e+00 3.1000000e+00 4.9000000e+00 4.4000000e+00 4.7000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 3.6000000e+00 3.7000000e+00 3.9000000e+00 4.1000000e+00 5.3000000e+00 5.5000000e+00 3.6000000e+00 4.3000000e+00 3.5000000e+00 5.3000000e+00 3.5000000e+00 4.3000000e+00 4.6000000e+00 3.4000000e+00 3.5000000e+00 4.2000000e+00 4.4000000e+00 4.7000000e+00 5.0000000e+00 4.2000000e+00 3.7000000e+00 4.2000000e+00 4.7000000e+00 4.2000000e+00 4.1000000e+00 3.4000000e+00 4.0000000e+00 4.2000000e+00 3.7000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.0000000e+00 3.7000000e+00 6.0000000e-01 2.0000000e-01 1.5000000e+00 5.0000000e-01 1.3000000e+00 7.0000000e-01 2.1000000e+00 4.0000000e-01 1.8000000e+00 2.0000000e+00 1.1000000e+00 1.0000000e+00 9.0000000e-01 1.4000000e+00 3.0000000e-01 1.4000000e+00 1.2000000e+00 1.0000000e+00 1.4000000e+00 1.1000000e+00 9.0000000e-01 7.0000000e-01 9.0000000e-01 6.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 1.0000000e+00 1.3000000e+00 1.5000000e+00 1.5000000e+00 1.2000000e+00 1.0000000e+00 1.6000000e+00 1.0000000e+00 3.0000000e-01 9.0000000e-01 1.4000000e+00 1.5000000e+00 1.5000000e+00 9.0000000e-01 1.2000000e+00 2.0000000e+00 1.4000000e+00 1.3000000e+00 1.3000000e+00 8.0000000e-01 1.9000000e+00 1.3000000e+00 1.3000000e+00 1.2000000e+00 1.2000000e+00 9.0000000e-01 1.1000000e+00 1.9000000e+00 2.1000000e+00 1.6000000e+00 1.1000000e+00 1.4000000e+00 6.0000000e-01 6.0000000e-01 8.0000000e-01 1.3000000e+00 1.2000000e+00 9.0000000e-01 8.0000000e-01 2.0000000e+00 2.2000000e+00 1.0000000e+00 1.0000000e+00 1.4000000e+00 2.0000000e+00 7.0000000e-01 1.0000000e+00 1.3000000e+00 8.0000000e-01 9.0000000e-01 9.0000000e-01 1.1000000e+00 1.4000000e+00 1.7000000e+00 9.0000000e-01 7.0000000e-01 9.0000000e-01 1.4000000e+00 1.0000000e+00 8.0000000e-01 1.0000000e+00 7.0000000e-01 1.0000000e+00 9.0000000e-01 1.2000000e+00 1.2000000e+00 1.1000000e+00 9.0000000e-01 7.0000000e-01 6.0000000e-01 9.0000000e-01 1.1000000e+00 5.0000000e-01 9.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 1.5000000e+00 3.0000000e-01 1.2000000e+00 1.4000000e+00 5.0000000e-01 1.0000000e+00 3.0000000e-01 9.0000000e-01 3.0000000e-01 8.0000000e-01 6.0000000e-01 1.0000000e+00 8.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 4.0000000e-01 3.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 1.0000000e+00 9.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 1.0000000e+00 4.0000000e-01 3.0000000e-01 9.0000000e-01 8.0000000e-01 9.0000000e-01 9.0000000e-01 3.0000000e-01 6.0000000e-01 1.4000000e+00 8.0000000e-01 7.0000000e-01 7.0000000e-01 3.0000000e-01 1.5000000e+00 7.0000000e-01 1.5000000e+00 6.0000000e-01 1.4000000e+00 1.1000000e+00 1.3000000e+00 2.1000000e+00 1.5000000e+00 1.8000000e+00 1.3000000e+00 1.6000000e+00 6.0000000e-01 8.0000000e-01 1.0000000e+00 7.0000000e-01 9.0000000e-01 8.0000000e-01 1.0000000e+00 2.2000000e+00 2.4000000e+00 1.0000000e+00 1.2000000e+00 8.0000000e-01 2.2000000e+00 5.0000000e-01 1.2000000e+00 1.5000000e+00 4.0000000e-01 4.0000000e-01 1.1000000e+00 1.3000000e+00 1.6000000e+00 1.9000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.6000000e+00 1.1000000e+00 1.0000000e+00 4.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 1.4000000e+00 1.2000000e+00 8.0000000e-01 7.0000000e-01 7.0000000e-01 9.0000000e-01 6.0000000e-01 1.4000000e+00 4.0000000e-01 1.2000000e+00 6.0000000e-01 2.0000000e+00 3.0000000e-01 1.7000000e+00 1.9000000e+00 1.0000000e+00 9.0000000e-01 8.0000000e-01 1.3000000e+00 5.0000000e-01 1.3000000e+00 1.1000000e+00 9.0000000e-01 1.3000000e+00 1.0000000e+00 9.0000000e-01 6.0000000e-01 8.0000000e-01 6.0000000e-01 5.0000000e-01 3.0000000e-01 2.0000000e-01 9.0000000e-01 1.4000000e+00 1.4000000e+00 1.4000000e+00 1.1000000e+00 9.0000000e-01 1.5000000e+00 9.0000000e-01 2.0000000e-01 8.0000000e-01 1.3000000e+00 1.4000000e+00 1.4000000e+00 8.0000000e-01 1.1000000e+00 1.9000000e+00 1.3000000e+00 1.2000000e+00 1.2000000e+00 7.0000000e-01 1.9000000e+00 1.2000000e+00 1.1000000e+00 1.1000000e+00 1.0000000e+00 7.0000000e-01 9.0000000e-01 1.7000000e+00 2.0000000e+00 1.4000000e+00 9.0000000e-01 1.2000000e+00 5.0000000e-01 5.0000000e-01 6.0000000e-01 1.2000000e+00 1.1000000e+00 8.0000000e-01 6.0000000e-01 1.8000000e+00 2.0000000e+00 9.0000000e-01 8.0000000e-01 1.3000000e+00 1.8000000e+00 6.0000000e-01 8.0000000e-01 1.1000000e+00 7.0000000e-01 8.0000000e-01 7.0000000e-01 9.0000000e-01 1.2000000e+00 1.5000000e+00 7.0000000e-01 6.0000000e-01 8.0000000e-01 1.2000000e+00 9.0000000e-01 6.0000000e-01 9.0000000e-01 6.0000000e-01 9.0000000e-01 8.0000000e-01 1.1000000e+00 1.0000000e+00 1.0000000e+00 8.0000000e-01 6.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e+00 1.0000000e+00 5.0000000e-01 1.0000000e+00 7.0000000e-01 1.1000000e+00 4.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 1.2000000e+00 7.0000000e-01 4.0000000e-01 7.0000000e-01 2.0000000e-01 9.0000000e-01 6.0000000e-01 9.0000000e-01 7.0000000e-01 9.0000000e-01 1.1000000e+00 1.3000000e+00 1.2000000e+00 6.0000000e-01 5.0000000e-01 2.0000000e-01 3.0000000e-01 4.0000000e-01 1.1000000e+00 7.0000000e-01 1.1000000e+00 1.2000000e+00 8.0000000e-01 7.0000000e-01 2.0000000e-01 4.0000000e-01 7.0000000e-01 3.0000000e-01 7.0000000e-01 4.0000000e-01 7.0000000e-01 6.0000000e-01 7.0000000e-01 1.0000000e+00 5.0000000e-01 2.0000000e+00 1.1000000e+00 1.9000000e+00 1.6000000e+00 1.8000000e+00 2.6000000e+00 6.0000000e-01 2.3000000e+00 1.8000000e+00 2.1000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.0000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 2.7000000e+00 2.9000000e+00 1.0000000e+00 1.7000000e+00 9.0000000e-01 2.7000000e+00 9.0000000e-01 1.7000000e+00 2.0000000e+00 8.0000000e-01 9.0000000e-01 1.6000000e+00 1.8000000e+00 2.1000000e+00 2.4000000e+00 1.6000000e+00 1.1000000e+00 1.6000000e+00 2.2000000e+00 1.6000000e+00 1.5000000e+00 8.0000000e-01 1.4000000e+00 1.6000000e+00 1.4000000e+00 1.1000000e+00 1.9000000e+00 1.7000000e+00 1.2000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 1.1000000e+00 8.0000000e-01 5.0000000e-01 1.6000000e+00 2.0000000e-01 1.3000000e+00 1.5000000e+00 6.0000000e-01 6.0000000e-01 4.0000000e-01 1.0000000e+00 3.0000000e-01 9.0000000e-01 7.0000000e-01 6.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 4.0000000e-01 3.0000000e-01 2.0000000e-01 3.0000000e-01 4.0000000e-01 5.0000000e-01 1.1000000e+00 1.0000000e+00 1.0000000e+00 7.0000000e-01 5.0000000e-01 1.1000000e+00 6.0000000e-01 3.0000000e-01 5.0000000e-01 9.0000000e-01 1.0000000e+00 1.0000000e+00 4.0000000e-01 7.0000000e-01 1.5000000e+00 9.0000000e-01 8.0000000e-01 8.0000000e-01 3.0000000e-01 1.6000000e+00 8.0000000e-01 1.4000000e+00 7.0000000e-01 1.3000000e+00 1.0000000e+00 1.2000000e+00 2.0000000e+00 1.6000000e+00 1.7000000e+00 1.2000000e+00 1.5000000e+00 5.0000000e-01 7.0000000e-01 9.0000000e-01 8.0000000e-01 9.0000000e-01 8.0000000e-01 9.0000000e-01 2.1000000e+00 2.3000000e+00 6.0000000e-01 1.1000000e+00 9.0000000e-01 2.1000000e+00 3.0000000e-01 1.1000000e+00 1.4000000e+00 3.0000000e-01 4.0000000e-01 1.0000000e+00 1.2000000e+00 1.5000000e+00 1.8000000e+00 1.0000000e+00 5.0000000e-01 1.0000000e+00 1.5000000e+00 1.0000000e+00 9.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e+00 8.0000000e-01 7.0000000e-01 1.3000000e+00 1.1000000e+00 8.0000000e-01 4.0000000e-01 6.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 1.2000000e+00 9.0000000e-01 6.0000000e-01 1.0000000e+00 3.0000000e-01 6.0000000e-01 4.0000000e-01 9.0000000e-01 1.0000000e+00 2.0000000e-01 4.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 5.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 9.0000000e-01 1.1000000e+00 1.0000000e+00 3.0000000e-01 1.0000000e+00 7.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 6.0000000e-01 1.0000000e+00 6.0000000e-01 4.0000000e-01 5.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 1.2000000e+00 3.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 1.5000000e+00 4.0000000e-01 1.5000000e+00 6.0000000e-01 1.4000000e+00 1.1000000e+00 1.3000000e+00 2.1000000e+00 8.0000000e-01 1.8000000e+00 1.3000000e+00 1.6000000e+00 8.0000000e-01 8.0000000e-01 1.1000000e+00 7.0000000e-01 1.1000000e+00 1.0000000e+00 1.0000000e+00 2.2000000e+00 2.4000000e+00 6.0000000e-01 1.2000000e+00 7.0000000e-01 2.2000000e+00 6.0000000e-01 1.2000000e+00 1.5000000e+00 5.0000000e-01 5.0000000e-01 1.1000000e+00 1.5000000e+00 1.7000000e+00 2.2000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 2.0000000e+00 1.1000000e+00 1.0000000e+00 5.0000000e-01 1.2000000e+00 1.1000000e+00 1.2000000e+00 6.0000000e-01 1.4000000e+00 1.2000000e+00 1.0000000e+00 6.0000000e-01 8.0000000e-01 1.0000000e+00 6.0000000e-01 1.4000000e+00 4.0000000e-01 1.1000000e+00 1.3000000e+00 5.0000000e-01 1.1000000e+00 4.0000000e-01 1.1000000e+00 4.0000000e-01 7.0000000e-01 6.0000000e-01 1.1000000e+00 8.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 4.0000000e-01 3.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 1.2000000e+00 9.0000000e-01 1.0000000e+00 8.0000000e-01 6.0000000e-01 9.0000000e-01 3.0000000e-01 4.0000000e-01 1.0000000e+00 7.0000000e-01 8.0000000e-01 8.0000000e-01 3.0000000e-01 7.0000000e-01 1.4000000e+00 7.0000000e-01 6.0000000e-01 6.0000000e-01 4.0000000e-01 1.7000000e+00 6.0000000e-01 1.3000000e+00 6.0000000e-01 1.2000000e+00 9.0000000e-01 1.1000000e+00 1.9000000e+00 1.4000000e+00 1.6000000e+00 1.1000000e+00 1.4000000e+00 4.0000000e-01 6.0000000e-01 8.0000000e-01 8.0000000e-01 8.0000000e-01 7.0000000e-01 8.0000000e-01 2.0000000e+00 2.2000000e+00 1.1000000e+00 1.0000000e+00 7.0000000e-01 2.0000000e+00 6.0000000e-01 1.0000000e+00 1.3000000e+00 5.0000000e-01 3.0000000e-01 9.0000000e-01 1.1000000e+00 1.4000000e+00 1.7000000e+00 9.0000000e-01 5.0000000e-01 9.0000000e-01 1.4000000e+00 9.0000000e-01 8.0000000e-01 3.0000000e-01 7.0000000e-01 9.0000000e-01 7.0000000e-01 6.0000000e-01 1.2000000e+00 1.0000000e+00 7.0000000e-01 8.0000000e-01 5.0000000e-01 7.0000000e-01 4.0000000e-01 1.7000000e+00 6.0000000e-01 4.0000000e-01 1.0000000e+00 1.1000000e+00 1.4000000e+00 7.0000000e-01 1.8000000e+00 1.2000000e+00 9.0000000e-01 1.3000000e+00 7.0000000e-01 1.5000000e+00 1.2000000e+00 1.6000000e+00 1.4000000e+00 1.5000000e+00 1.7000000e+00 1.9000000e+00 1.8000000e+00 1.2000000e+00 8.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 1.8000000e+00 1.2000000e+00 1.2000000e+00 1.8000000e+00 1.4000000e+00 8.0000000e-01 7.0000000e-01 1.1000000e+00 1.3000000e+00 9.0000000e-01 1.0000000e-01 9.0000000e-01 9.0000000e-01 9.0000000e-01 1.3000000e+00 3.0000000e-01 8.0000000e-01 2.7000000e+00 1.8000000e+00 2.6000000e+00 2.3000000e+00 2.5000000e+00 3.3000000e+00 1.2000000e+00 3.0000000e+00 2.5000000e+00 2.8000000e+00 1.8000000e+00 2.0000000e+00 2.2000000e+00 1.7000000e+00 1.8000000e+00 2.0000000e+00 2.2000000e+00 3.4000000e+00 3.6000000e+00 1.7000000e+00 2.4000000e+00 1.6000000e+00 3.4000000e+00 1.6000000e+00 2.4000000e+00 2.7000000e+00 1.5000000e+00 1.6000000e+00 2.3000000e+00 2.5000000e+00 2.8000000e+00 3.1000000e+00 2.3000000e+00 1.8000000e+00 2.3000000e+00 2.8000000e+00 2.3000000e+00 2.2000000e+00 1.5000000e+00 2.1000000e+00 2.3000000e+00 2.0000000e+00 1.8000000e+00 2.6000000e+00 2.4000000e+00 1.9000000e+00 1.7000000e+00 1.9000000e+00 2.1000000e+00 1.8000000e+00 1.4000000e+00 1.6000000e+00 7.0000000e-01 7.0000000e-01 5.0000000e-01 1.0000000e+00 2.0000000e-01 1.0000000e+00 8.0000000e-01 7.0000000e-01 1.0000000e+00 7.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 3.0000000e-01 2.0000000e-01 2.0000000e-01 4.0000000e-01 6.0000000e-01 1.1000000e+00 1.1000000e+00 1.1000000e+00 8.0000000e-01 6.0000000e-01 1.2000000e+00 6.0000000e-01 2.0000000e-01 6.0000000e-01 1.0000000e+00 1.1000000e+00 1.1000000e+00 5.0000000e-01 8.0000000e-01 1.6000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 4.0000000e-01 1.6000000e+00 9.0000000e-01 1.4000000e+00 8.0000000e-01 1.3000000e+00 1.0000000e+00 1.2000000e+00 2.0000000e+00 1.7000000e+00 1.7000000e+00 1.2000000e+00 1.5000000e+00 7.0000000e-01 7.0000000e-01 9.0000000e-01 9.0000000e-01 1.1000000e+00 1.0000000e+00 9.0000000e-01 2.1000000e+00 2.3000000e+00 7.0000000e-01 1.1000000e+00 1.0000000e+00 2.1000000e+00 5.0000000e-01 1.1000000e+00 1.4000000e+00 5.0000000e-01 5.0000000e-01 1.0000000e+00 1.2000000e+00 1.5000000e+00 1.8000000e+00 1.0000000e+00 5.0000000e-01 1.0000000e+00 1.5000000e+00 1.1000000e+00 9.0000000e-01 6.0000000e-01 8.0000000e-01 1.1000000e+00 1.0000000e+00 8.0000000e-01 1.3000000e+00 1.2000000e+00 1.0000000e+00 6.0000000e-01 7.0000000e-01 1.0000000e+00 7.0000000e-01 7.0000000e-01 7.0000000e-01 8.0000000e-01 9.0000000e-01 4.0000000e-01 1.5000000e+00 6.0000000e-01 6.0000000e-01 1.0000000e+00 4.0000000e-01 9.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 1.2000000e+00 1.4000000e+00 1.6000000e+00 1.5000000e+00 8.0000000e-01 5.0000000e-01 3.0000000e-01 4.0000000e-01 6.0000000e-01 1.2000000e+00 6.0000000e-01 8.0000000e-01 1.5000000e+00 1.1000000e+00 4.0000000e-01 3.0000000e-01 5.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 2.1000000e+00 1.2000000e+00 2.0000000e+00 1.7000000e+00 1.9000000e+00 2.7000000e+00 6.0000000e-01 2.4000000e+00 1.9000000e+00 2.2000000e+00 1.3000000e+00 1.4000000e+00 1.6000000e+00 1.1000000e+00 1.2000000e+00 1.4000000e+00 1.6000000e+00 2.8000000e+00 3.0000000e+00 1.1000000e+00 1.8000000e+00 1.0000000e+00 2.8000000e+00 1.1000000e+00 1.8000000e+00 2.1000000e+00 1.0000000e+00 1.0000000e+00 1.7000000e+00 2.0000000e+00 2.2000000e+00 2.7000000e+00 1.7000000e+00 1.2000000e+00 1.7000000e+00 2.5000000e+00 1.7000000e+00 1.6000000e+00 9.0000000e-01 1.7000000e+00 1.7000000e+00 1.7000000e+00 1.2000000e+00 2.0000000e+00 1.8000000e+00 1.5000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.2000000e+00 1.0000000e+00 1.0000000e+00 1.2000000e+00 9.0000000e-01 1.7000000e+00 1.0000000e+00 8.0000000e-01 1.2000000e+00 6.0000000e-01 1.3000000e+00 1.1000000e+00 1.4000000e+00 1.2000000e+00 1.4000000e+00 1.6000000e+00 1.8000000e+00 1.7000000e+00 1.0000000e+00 7.0000000e-01 5.0000000e-01 5.0000000e-01 8.0000000e-01 1.6000000e+00 1.0000000e+00 1.4000000e+00 1.7000000e+00 1.3000000e+00 1.0000000e+00 5.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 3.0000000e-01 7.0000000e-01 1.0000000e+00 9.0000000e-01 1.2000000e+00 5.0000000e-01 8.0000000e-01 2.5000000e+00 1.6000000e+00 2.4000000e+00 2.1000000e+00 2.3000000e+00 3.1000000e+00 1.0000000e+00 2.8000000e+00 2.3000000e+00 2.6000000e+00 1.6000000e+00 1.8000000e+00 2.0000000e+00 1.5000000e+00 1.6000000e+00 1.8000000e+00 2.0000000e+00 3.2000000e+00 3.4000000e+00 1.5000000e+00 2.2000000e+00 1.4000000e+00 3.2000000e+00 1.4000000e+00 2.2000000e+00 2.5000000e+00 1.3000000e+00 1.4000000e+00 2.1000000e+00 2.3000000e+00 2.6000000e+00 2.9000000e+00 2.1000000e+00 1.6000000e+00 2.1000000e+00 2.7000000e+00 2.1000000e+00 2.0000000e+00 1.3000000e+00 1.9000000e+00 2.1000000e+00 1.9000000e+00 1.6000000e+00 2.4000000e+00 2.2000000e+00 1.7000000e+00 1.5000000e+00 1.7000000e+00 1.9000000e+00 1.6000000e+00 8.0000000e-01 5.0000000e-01 6.0000000e-01 8.0000000e-01 3.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 6.0000000e-01 2.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 9.0000000e-01 8.0000000e-01 3.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 7.0000000e-01 3.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 4.0000000e-01 9.0000000e-01 3.0000000e-01 3.0000000e-01 2.0000000e-01 3.0000000e-01 1.2000000e+00 2.0000000e-01 1.8000000e+00 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.6000000e+00 2.4000000e+00 1.0000000e+00 2.1000000e+00 1.6000000e+00 1.9000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 8.0000000e-01 9.0000000e-01 1.1000000e+00 1.3000000e+00 2.5000000e+00 2.7000000e+00 8.0000000e-01 1.5000000e+00 7.0000000e-01 2.5000000e+00 7.0000000e-01 1.5000000e+00 1.8000000e+00 6.0000000e-01 7.0000000e-01 1.4000000e+00 1.6000000e+00 1.9000000e+00 2.2000000e+00 1.4000000e+00 9.0000000e-01 1.4000000e+00 1.9000000e+00 1.4000000e+00 1.3000000e+00 6.0000000e-01 1.2000000e+00 1.4000000e+00 1.0000000e+00 9.0000000e-01 1.7000000e+00 1.5000000e+00 1.0000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 7.0000000e-01 7.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 1.0000000e+00 6.0000000e-01 9.0000000e-01 7.0000000e-01 7.0000000e-01 8.0000000e-01 8.0000000e-01 1.0000000e+00 7.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 1.1000000e+00 8.0000000e-01 1.2000000e+00 9.0000000e-01 4.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 8.0000000e-01 4.0000000e-01 1.0000000e+00 5.0000000e-01 8.0000000e-01 7.0000000e-01 7.0000000e-01 1.0000000e+00 6.0000000e-01 2.0000000e+00 1.1000000e+00 1.9000000e+00 1.6000000e+00 1.8000000e+00 2.6000000e+00 1.1000000e+00 2.3000000e+00 1.8000000e+00 2.1000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.0000000e+00 1.4000000e+00 1.3000000e+00 1.5000000e+00 2.7000000e+00 2.9000000e+00 1.0000000e+00 1.7000000e+00 1.0000000e+00 2.7000000e+00 9.0000000e-01 1.7000000e+00 2.0000000e+00 8.0000000e-01 9.0000000e-01 1.6000000e+00 1.8000000e+00 2.1000000e+00 2.4000000e+00 1.6000000e+00 1.1000000e+00 1.6000000e+00 2.1000000e+00 1.6000000e+00 1.5000000e+00 8.0000000e-01 1.4000000e+00 1.6000000e+00 1.3000000e+00 1.1000000e+00 1.9000000e+00 1.7000000e+00 1.3000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 1.1000000e+00 1.1000000e+00 6.0000000e-01 5.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 4.0000000e-01 7.0000000e-01 4.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 2.0000000e-01 1.2000000e+00 9.0000000e-01 1.0000000e+00 8.0000000e-01 4.0000000e-01 7.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 7.0000000e-01 6.0000000e-01 1.0000000e-01 7.0000000e-01 1.4000000e+00 5.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 1.7000000e+00 6.0000000e-01 1.3000000e+00 5.0000000e-01 1.2000000e+00 9.0000000e-01 1.1000000e+00 1.9000000e+00 1.2000000e+00 1.6000000e+00 1.1000000e+00 1.4000000e+00 6.0000000e-01 6.0000000e-01 8.0000000e-01 6.0000000e-01 1.0000000e+00 9.0000000e-01 8.0000000e-01 2.0000000e+00 2.2000000e+00 7.0000000e-01 1.0000000e+00 6.0000000e-01 2.0000000e+00 4.0000000e-01 1.0000000e+00 1.3000000e+00 4.0000000e-01 4.0000000e-01 9.0000000e-01 1.1000000e+00 1.4000000e+00 1.8000000e+00 9.0000000e-01 4.0000000e-01 9.0000000e-01 1.6000000e+00 1.0000000e+00 8.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 1.2000000e+00 1.1000000e+00 9.0000000e-01 5.0000000e-01 6.0000000e-01 9.0000000e-01 4.0000000e-01 1.1000000e+00 9.0000000e-01 5.0000000e-01 9.0000000e-01 4.0000000e-01 1.2000000e+00 5.0000000e-01 1.3000000e+00 1.1000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 1.4000000e+00 9.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 1.5000000e+00 9.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 4.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 7.0000000e-01 6.0000000e-01 5.0000000e-01 2.4000000e+00 1.5000000e+00 2.3000000e+00 2.0000000e+00 2.2000000e+00 3.0000000e+00 9.0000000e-01 2.7000000e+00 2.2000000e+00 2.5000000e+00 1.5000000e+00 1.7000000e+00 1.9000000e+00 1.4000000e+00 1.5000000e+00 1.7000000e+00 1.9000000e+00 3.1000000e+00 3.3000000e+00 1.4000000e+00 2.1000000e+00 1.3000000e+00 3.1000000e+00 1.3000000e+00 2.1000000e+00 2.4000000e+00 1.2000000e+00 1.3000000e+00 2.0000000e+00 2.2000000e+00 2.5000000e+00 2.8000000e+00 2.0000000e+00 1.5000000e+00 2.0000000e+00 2.5000000e+00 2.0000000e+00 1.9000000e+00 1.2000000e+00 1.8000000e+00 2.0000000e+00 1.5000000e+00 1.5000000e+00 2.3000000e+00 2.1000000e+00 1.6000000e+00 1.4000000e+00 1.6000000e+00 1.8000000e+00 1.5000000e+00 1.1000000e+00 9.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 1.0000000e-01 4.0000000e-01 6.0000000e-01 7.0000000e-01 1.0000000e+00 1.2000000e+00 1.2000000e+00 9.0000000e-01 7.0000000e-01 1.3000000e+00 7.0000000e-01 3.0000000e-01 8.0000000e-01 1.1000000e+00 1.2000000e+00 1.2000000e+00 6.0000000e-01 9.0000000e-01 1.7000000e+00 1.1000000e+00 1.0000000e+00 1.0000000e+00 5.0000000e-01 1.6000000e+00 1.0000000e+00 1.6000000e+00 9.0000000e-01 1.5000000e+00 1.2000000e+00 1.4000000e+00 2.2000000e+00 1.8000000e+00 1.9000000e+00 1.4000000e+00 1.7000000e+00 7.0000000e-01 9.0000000e-01 1.1000000e+00 1.0000000e+00 1.0000000e+00 9.0000000e-01 1.1000000e+00 2.3000000e+00 2.5000000e+00 9.0000000e-01 1.3000000e+00 1.1000000e+00 2.3000000e+00 5.0000000e-01 1.3000000e+00 1.6000000e+00 5.0000000e-01 6.0000000e-01 1.2000000e+00 1.4000000e+00 1.7000000e+00 2.0000000e+00 1.2000000e+00 7.0000000e-01 1.2000000e+00 1.7000000e+00 1.2000000e+00 1.1000000e+00 7.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 9.0000000e-01 1.5000000e+00 1.3000000e+00 9.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 8.0000000e-01 5.0000000e-01 8.0000000e-01 6.0000000e-01 3.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e+00 1.2000000e+00 1.1000000e+00 4.0000000e-01 1.0000000e+00 7.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 2.0000000e-01 4.0000000e-01 1.1000000e+00 7.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 1.2000000e+00 3.0000000e-01 3.0000000e-01 3.0000000e-01 6.0000000e-01 1.5000000e+00 4.0000000e-01 1.5000000e+00 6.0000000e-01 1.5000000e+00 1.1000000e+00 1.3000000e+00 2.1000000e+00 7.0000000e-01 1.8000000e+00 1.3000000e+00 1.6000000e+00 9.0000000e-01 8.0000000e-01 1.2000000e+00 5.0000000e-01 9.0000000e-01 8.0000000e-01 1.0000000e+00 2.2000000e+00 2.4000000e+00 8.0000000e-01 1.3000000e+00 5.0000000e-01 2.2000000e+00 7.0000000e-01 1.2000000e+00 1.6000000e+00 6.0000000e-01 5.0000000e-01 1.1000000e+00 1.6000000e+00 1.8000000e+00 2.3000000e+00 1.1000000e+00 7.0000000e-01 1.1000000e+00 2.1000000e+00 1.1000000e+00 1.0000000e+00 4.0000000e-01 1.3000000e+00 1.1000000e+00 1.3000000e+00 6.0000000e-01 1.4000000e+00 1.2000000e+00 1.1000000e+00 7.0000000e-01 9.0000000e-01 9.0000000e-01 6.0000000e-01 5.0000000e-01 2.0000000e-01 8.0000000e-01 3.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 6.0000000e-01 3.0000000e-01 4.0000000e-01 2.0000000e-01 1.0000000e+00 5.0000000e-01 7.0000000e-01 9.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 2.0000000e-01 8.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 1.1000000e+00 3.0000000e-01 1.9000000e+00 1.0000000e+00 1.8000000e+00 1.5000000e+00 1.7000000e+00 2.5000000e+00 9.0000000e-01 2.2000000e+00 1.7000000e+00 2.0000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 1.0000000e+00 1.4000000e+00 1.3000000e+00 1.4000000e+00 2.6000000e+00 2.8000000e+00 9.0000000e-01 1.6000000e+00 1.0000000e+00 2.6000000e+00 8.0000000e-01 1.6000000e+00 1.9000000e+00 8.0000000e-01 8.0000000e-01 1.5000000e+00 1.7000000e+00 2.0000000e+00 2.3000000e+00 1.5000000e+00 1.0000000e+00 1.5000000e+00 2.0000000e+00 1.5000000e+00 1.4000000e+00 8.0000000e-01 1.3000000e+00 1.5000000e+00 1.3000000e+00 1.0000000e+00 1.8000000e+00 1.6000000e+00 1.3000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 1.0000000e+00 6.0000000e-01 1.0000000e+00 6.0000000e-01 4.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 6.0000000e-01 8.0000000e-01 7.0000000e-01 1.0000000e+00 7.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 8.0000000e-01 1.2000000e+00 9.0000000e-01 2.0000000e-01 8.0000000e-01 7.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 1.2000000e+00 6.0000000e-01 8.0000000e-01 7.0000000e-01 7.0000000e-01 1.5000000e+00 6.0000000e-01 1.5000000e+00 6.0000000e-01 1.4000000e+00 1.1000000e+00 1.3000000e+00 2.1000000e+00 1.3000000e+00 1.8000000e+00 1.3000000e+00 1.6000000e+00 1.0000000e+00 8.0000000e-01 1.0000000e+00 5.0000000e-01 9.0000000e-01 1.0000000e+00 1.0000000e+00 2.2000000e+00 2.4000000e+00 5.0000000e-01 1.2000000e+00 6.0000000e-01 2.2000000e+00 5.0000000e-01 1.2000000e+00 1.5000000e+00 6.0000000e-01 8.0000000e-01 1.1000000e+00 1.3000000e+00 1.6000000e+00 1.9000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.6000000e+00 1.2000000e+00 1.0000000e+00 8.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 6.0000000e-01 1.4000000e+00 1.2000000e+00 8.0000000e-01 5.0000000e-01 8.0000000e-01 1.2000000e+00 8.0000000e-01 9.0000000e-01 5.0000000e-01 1.0000000e+00 8.0000000e-01 8.0000000e-01 1.0000000e+00 1.2000000e+00 1.1000000e+00 6.0000000e-01 4.0000000e-01 1.0000000e-01 2.0000000e-01 2.0000000e-01 1.2000000e+00 6.0000000e-01 9.0000000e-01 1.1000000e+00 7.0000000e-01 5.0000000e-01 2.0000000e-01 5.0000000e-01 7.0000000e-01 2.0000000e-01 6.0000000e-01 3.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 9.0000000e-01 3.0000000e-01 2.1000000e+00 1.2000000e+00 2.0000000e+00 1.7000000e+00 1.9000000e+00 2.7000000e+00 7.0000000e-01 2.4000000e+00 1.9000000e+00 2.2000000e+00 1.2000000e+00 1.4000000e+00 1.6000000e+00 1.1000000e+00 1.3000000e+00 1.4000000e+00 1.6000000e+00 2.8000000e+00 3.0000000e+00 1.1000000e+00 1.8000000e+00 1.0000000e+00 2.8000000e+00 1.0000000e+00 1.8000000e+00 2.1000000e+00 9.0000000e-01 1.0000000e+00 1.7000000e+00 1.9000000e+00 2.2000000e+00 2.5000000e+00 1.7000000e+00 1.2000000e+00 1.7000000e+00 2.2000000e+00 1.7000000e+00 1.6000000e+00 9.0000000e-01 1.5000000e+00 1.7000000e+00 1.3000000e+00 1.2000000e+00 2.0000000e+00 1.8000000e+00 1.3000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.2000000e+00 8.0000000e-01 7.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 9.0000000e-01 8.0000000e-01 3.0000000e-01 1.3000000e+00 1.0000000e+00 1.1000000e+00 9.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 9.0000000e-01 7.0000000e-01 8.0000000e-01 6.0000000e-01 4.0000000e-01 8.0000000e-01 1.5000000e+00 6.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 1.8000000e+00 7.0000000e-01 1.2000000e+00 5.0000000e-01 1.2000000e+00 8.0000000e-01 1.0000000e+00 1.8000000e+00 1.0000000e+00 1.5000000e+00 1.0000000e+00 1.3000000e+00 6.0000000e-01 5.0000000e-01 9.0000000e-01 7.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 1.9000000e+00 2.1000000e+00 1.0000000e+00 1.0000000e+00 4.0000000e-01 1.9000000e+00 5.0000000e-01 9.0000000e-01 1.3000000e+00 4.0000000e-01 2.0000000e-01 8.0000000e-01 1.3000000e+00 1.5000000e+00 2.0000000e+00 8.0000000e-01 4.0000000e-01 8.0000000e-01 1.8000000e+00 8.0000000e-01 7.0000000e-01 2.0000000e-01 1.0000000e+00 8.0000000e-01 1.0000000e+00 5.0000000e-01 1.1000000e+00 9.0000000e-01 8.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 9.0000000e-01 7.0000000e-01 3.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e+00 5.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 1.1000000e+00 7.0000000e-01 6.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 1.1000000e+00 5.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 1.0000000e+00 4.0000000e-01 2.0000000e+00 1.1000000e+00 1.9000000e+00 1.6000000e+00 1.8000000e+00 2.6000000e+00 1.2000000e+00 2.3000000e+00 1.8000000e+00 2.1000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.0000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 2.7000000e+00 2.9000000e+00 1.0000000e+00 1.7000000e+00 9.0000000e-01 2.7000000e+00 9.0000000e-01 1.7000000e+00 2.0000000e+00 8.0000000e-01 9.0000000e-01 1.6000000e+00 1.8000000e+00 2.1000000e+00 2.4000000e+00 1.6000000e+00 1.1000000e+00 1.6000000e+00 2.1000000e+00 1.6000000e+00 1.5000000e+00 8.0000000e-01 1.4000000e+00 1.6000000e+00 1.1000000e+00 1.1000000e+00 1.9000000e+00 1.7000000e+00 1.2000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 1.1000000e+00 3.0000000e-01 6.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 1.4000000e+00 1.1000000e+00 1.2000000e+00 1.0000000e+00 3.0000000e-01 9.0000000e-01 9.0000000e-01 6.0000000e-01 5.0000000e-01 8.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 9.0000000e-01 1.6000000e+00 7.0000000e-01 7.0000000e-01 7.0000000e-01 6.0000000e-01 1.9000000e+00 8.0000000e-01 1.1000000e+00 5.0000000e-01 1.0000000e+00 7.0000000e-01 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.4000000e+00 9.0000000e-01 1.2000000e+00 7.0000000e-01 4.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 8.0000000e-01 6.0000000e-01 1.8000000e+00 2.0000000e+00 3.0000000e-01 8.0000000e-01 7.0000000e-01 1.8000000e+00 3.0000000e-01 8.0000000e-01 1.1000000e+00 3.0000000e-01 5.0000000e-01 7.0000000e-01 9.0000000e-01 1.2000000e+00 1.6000000e+00 7.0000000e-01 3.0000000e-01 7.0000000e-01 1.4000000e+00 9.0000000e-01 6.0000000e-01 5.0000000e-01 6.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 1.0000000e+00 1.0000000e+00 8.0000000e-01 4.0000000e-01 5.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 3.0000000e-01 1.2000000e+00 9.0000000e-01 1.0000000e+00 8.0000000e-01 4.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 6.0000000e-01 7.0000000e-01 6.0000000e-01 2.0000000e-01 7.0000000e-01 1.4000000e+00 5.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 1.7000000e+00 6.0000000e-01 1.3000000e+00 7.0000000e-01 1.2000000e+00 9.0000000e-01 1.1000000e+00 1.9000000e+00 1.2000000e+00 1.6000000e+00 1.1000000e+00 1.4000000e+00 8.0000000e-01 7.0000000e-01 9.0000000e-01 8.0000000e-01 1.2000000e+00 1.1000000e+00 8.0000000e-01 2.0000000e+00 2.2000000e+00 6.0000000e-01 1.1000000e+00 8.0000000e-01 2.0000000e+00 6.0000000e-01 1.0000000e+00 1.3000000e+00 6.0000000e-01 6.0000000e-01 9.0000000e-01 1.1000000e+00 1.4000000e+00 1.8000000e+00 1.0000000e+00 4.0000000e-01 9.0000000e-01 1.6000000e+00 1.2000000e+00 8.0000000e-01 6.0000000e-01 9.0000000e-01 1.2000000e+00 1.1000000e+00 7.0000000e-01 1.2000000e+00 1.3000000e+00 1.1000000e+00 7.0000000e-01 8.0000000e-01 1.1000000e+00 6.0000000e-01 2.0000000e-01 5.0000000e-01 7.0000000e-01 4.0000000e-01 8.0000000e-01 9.0000000e-01 9.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 5.0000000e-01 4.0000000e-01 6.0000000e-01 8.0000000e-01 9.0000000e-01 9.0000000e-01 3.0000000e-01 6.0000000e-01 1.4000000e+00 8.0000000e-01 7.0000000e-01 7.0000000e-01 2.0000000e-01 1.3000000e+00 7.0000000e-01 1.7000000e+00 8.0000000e-01 1.6000000e+00 1.3000000e+00 1.5000000e+00 2.3000000e+00 1.5000000e+00 2.0000000e+00 1.5000000e+00 1.8000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 7.0000000e-01 1.1000000e+00 1.0000000e+00 1.2000000e+00 2.4000000e+00 2.6000000e+00 7.0000000e-01 1.4000000e+00 8.0000000e-01 2.4000000e+00 6.0000000e-01 1.4000000e+00 1.7000000e+00 5.0000000e-01 6.0000000e-01 1.3000000e+00 1.5000000e+00 1.8000000e+00 2.1000000e+00 1.3000000e+00 8.0000000e-01 1.3000000e+00 1.8000000e+00 1.3000000e+00 1.2000000e+00 5.0000000e-01 1.1000000e+00 1.3000000e+00 1.0000000e+00 8.0000000e-01 1.6000000e+00 1.4000000e+00 1.0000000e+00 7.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 4.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 1.1000000e+00 1.1000000e+00 8.0000000e-01 7.0000000e-01 1.2000000e+00 6.0000000e-01 3.0000000e-01 7.0000000e-01 1.0000000e+00 1.1000000e+00 1.1000000e+00 5.0000000e-01 8.0000000e-01 1.6000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 4.0000000e-01 1.5000000e+00 9.0000000e-01 1.6000000e+00 8.0000000e-01 1.5000000e+00 1.2000000e+00 1.4000000e+00 2.2000000e+00 1.7000000e+00 1.9000000e+00 1.4000000e+00 1.7000000e+00 7.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 1.0000000e+00 9.0000000e-01 1.1000000e+00 2.3000000e+00 2.5000000e+00 8.0000000e-01 1.3000000e+00 1.0000000e+00 2.3000000e+00 5.0000000e-01 1.3000000e+00 1.6000000e+00 4.0000000e-01 5.0000000e-01 1.2000000e+00 1.4000000e+00 1.7000000e+00 2.0000000e+00 1.2000000e+00 7.0000000e-01 1.2000000e+00 1.7000000e+00 1.2000000e+00 1.1000000e+00 6.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 8.0000000e-01 1.5000000e+00 1.3000000e+00 9.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 7.0000000e-01 3.0000000e-01 8.0000000e-01 1.3000000e+00 1.3000000e+00 1.3000000e+00 1.0000000e+00 8.0000000e-01 1.4000000e+00 8.0000000e-01 3.0000000e-01 5.0000000e-01 1.2000000e+00 1.3000000e+00 1.3000000e+00 7.0000000e-01 1.0000000e+00 1.8000000e+00 1.2000000e+00 1.1000000e+00 1.1000000e+00 6.0000000e-01 1.8000000e+00 1.1000000e+00 1.2000000e+00 1.0000000e+00 1.1000000e+00 8.0000000e-01 1.0000000e+00 1.8000000e+00 1.9000000e+00 1.5000000e+00 1.0000000e+00 1.3000000e+00 6.0000000e-01 5.0000000e-01 7.0000000e-01 1.1000000e+00 1.0000000e+00 9.0000000e-01 7.0000000e-01 1.9000000e+00 2.1000000e+00 8.0000000e-01 9.0000000e-01 1.2000000e+00 1.9000000e+00 5.0000000e-01 9.0000000e-01 1.2000000e+00 6.0000000e-01 7.0000000e-01 8.0000000e-01 1.0000000e+00 1.3000000e+00 1.6000000e+00 8.0000000e-01 5.0000000e-01 8.0000000e-01 1.3000000e+00 1.0000000e+00 7.0000000e-01 8.0000000e-01 7.0000000e-01 1.0000000e+00 9.0000000e-01 1.0000000e+00 1.1000000e+00 1.1000000e+00 9.0000000e-01 5.0000000e-01 6.0000000e-01 9.0000000e-01 9.0000000e-01 7.0000000e-01 1.5000000e+00 1.2000000e+00 1.3000000e+00 1.1000000e+00 7.0000000e-01 1.3000000e+00 7.0000000e-01 3.0000000e-01 7.0000000e-01 1.1000000e+00 1.2000000e+00 1.2000000e+00 6.0000000e-01 1.0000000e+00 1.7000000e+00 1.1000000e+00 1.0000000e+00 1.0000000e+00 7.0000000e-01 2.0000000e+00 1.0000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 6.0000000e-01 8.0000000e-01 1.6000000e+00 1.8000000e+00 1.3000000e+00 8.0000000e-01 1.1000000e+00 3.0000000e-01 3.0000000e-01 5.0000000e-01 1.0000000e+00 9.0000000e-01 6.0000000e-01 5.0000000e-01 1.7000000e+00 1.9000000e+00 8.0000000e-01 7.0000000e-01 1.1000000e+00 1.7000000e+00 4.0000000e-01 7.0000000e-01 1.0000000e+00 5.0000000e-01 6.0000000e-01 6.0000000e-01 8.0000000e-01 1.1000000e+00 1.4000000e+00 6.0000000e-01 4.0000000e-01 6.0000000e-01 1.1000000e+00 7.0000000e-01 5.0000000e-01 7.0000000e-01 4.0000000e-01 7.0000000e-01 6.0000000e-01 9.0000000e-01 9.0000000e-01 8.0000000e-01 6.0000000e-01 5.0000000e-01 3.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 7.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 1.0000000e-01 5.0000000e-01 1.2000000e+00 4.0000000e-01 3.0000000e-01 3.0000000e-01 2.0000000e-01 1.5000000e+00 4.0000000e-01 1.5000000e+00 6.0000000e-01 1.4000000e+00 1.1000000e+00 1.3000000e+00 2.1000000e+00 1.1000000e+00 1.8000000e+00 1.3000000e+00 1.6000000e+00 6.0000000e-01 8.0000000e-01 1.0000000e+00 5.0000000e-01 9.0000000e-01 8.0000000e-01 1.0000000e+00 2.2000000e+00 2.4000000e+00 7.0000000e-01 1.2000000e+00 5.0000000e-01 2.2000000e+00 4.0000000e-01 1.2000000e+00 1.5000000e+00 3.0000000e-01 4.0000000e-01 1.1000000e+00 1.3000000e+00 1.6000000e+00 1.9000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.7000000e+00 1.1000000e+00 1.0000000e+00 3.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 6.0000000e-01 1.4000000e+00 1.2000000e+00 8.0000000e-01 5.0000000e-01 7.0000000e-01 9.0000000e-01 6.0000000e-01 3.0000000e-01 2.0000000e-01 4.0000000e-01 1.6000000e+00 1.0000000e+00 1.0000000e+00 1.2000000e+00 9.0000000e-01 6.0000000e-01 5.0000000e-01 9.0000000e-01 1.1000000e+00 5.0000000e-01 7.0000000e-01 7.0000000e-01 7.0000000e-01 7.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 2.5000000e+00 1.6000000e+00 2.4000000e+00 2.1000000e+00 2.3000000e+00 3.1000000e+00 1.0000000e+00 2.8000000e+00 2.3000000e+00 2.6000000e+00 1.6000000e+00 1.8000000e+00 2.0000000e+00 1.5000000e+00 1.6000000e+00 1.8000000e+00 2.0000000e+00 3.2000000e+00 3.4000000e+00 1.5000000e+00 2.2000000e+00 1.4000000e+00 3.2000000e+00 1.4000000e+00 2.2000000e+00 2.5000000e+00 1.3000000e+00 1.4000000e+00 2.1000000e+00 2.3000000e+00 2.6000000e+00 2.9000000e+00 2.1000000e+00 1.6000000e+00 2.1000000e+00 2.6000000e+00 2.1000000e+00 2.0000000e+00 1.3000000e+00 1.9000000e+00 2.1000000e+00 1.6000000e+00 1.6000000e+00 2.4000000e+00 2.2000000e+00 1.7000000e+00 1.5000000e+00 1.7000000e+00 1.9000000e+00 1.6000000e+00 1.0000000e-01 3.0000000e-01 1.3000000e+00 7.0000000e-01 1.0000000e+00 1.2000000e+00 8.0000000e-01 6.0000000e-01 2.0000000e-01 6.0000000e-01 8.0000000e-01 3.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 8.0000000e-01 4.0000000e-01 2.2000000e+00 1.3000000e+00 2.1000000e+00 1.8000000e+00 2.0000000e+00 2.8000000e+00 7.0000000e-01 2.5000000e+00 2.0000000e+00 2.3000000e+00 1.3000000e+00 1.5000000e+00 1.7000000e+00 1.2000000e+00 1.3000000e+00 1.5000000e+00 1.7000000e+00 2.9000000e+00 3.1000000e+00 1.2000000e+00 1.9000000e+00 1.1000000e+00 2.9000000e+00 1.1000000e+00 1.9000000e+00 2.2000000e+00 1.0000000e+00 1.1000000e+00 1.8000000e+00 2.0000000e+00 2.3000000e+00 2.6000000e+00 1.8000000e+00 1.3000000e+00 1.8000000e+00 2.3000000e+00 1.8000000e+00 1.7000000e+00 1.0000000e+00 1.6000000e+00 1.8000000e+00 1.4000000e+00 1.3000000e+00 2.1000000e+00 1.9000000e+00 1.4000000e+00 1.2000000e+00 1.4000000e+00 1.6000000e+00 1.3000000e+00 3.0000000e-01 1.4000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 8.0000000e-01 6.0000000e-01 3.0000000e-01 7.0000000e-01 9.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 7.0000000e-01 4.0000000e-01 2.3000000e+00 1.4000000e+00 2.2000000e+00 1.9000000e+00 2.1000000e+00 2.9000000e+00 8.0000000e-01 2.6000000e+00 2.1000000e+00 2.4000000e+00 1.4000000e+00 1.6000000e+00 1.8000000e+00 1.3000000e+00 1.4000000e+00 1.6000000e+00 1.8000000e+00 3.0000000e+00 3.2000000e+00 1.3000000e+00 2.0000000e+00 1.2000000e+00 3.0000000e+00 1.2000000e+00 2.0000000e+00 2.3000000e+00 1.1000000e+00 1.2000000e+00 1.9000000e+00 2.1000000e+00 2.4000000e+00 2.7000000e+00 1.9000000e+00 1.4000000e+00 1.9000000e+00 2.4000000e+00 1.9000000e+00 1.8000000e+00 1.1000000e+00 1.7000000e+00 1.9000000e+00 1.4000000e+00 1.4000000e+00 2.2000000e+00 2.0000000e+00 1.5000000e+00 1.3000000e+00 1.5000000e+00 1.7000000e+00 1.4000000e+00 1.2000000e+00 6.0000000e-01 7.0000000e-01 9.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 5.0000000e-01 7.0000000e-01 1.0000000e-01 8.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 4.0000000e-01 9.0000000e-01 2.0000000e-01 2.1000000e+00 1.2000000e+00 2.0000000e+00 1.7000000e+00 1.9000000e+00 2.7000000e+00 9.0000000e-01 2.4000000e+00 1.9000000e+00 2.2000000e+00 1.2000000e+00 1.4000000e+00 1.6000000e+00 1.1000000e+00 1.2000000e+00 1.4000000e+00 1.6000000e+00 2.8000000e+00 3.0000000e+00 1.1000000e+00 1.8000000e+00 1.0000000e+00 2.8000000e+00 1.0000000e+00 1.8000000e+00 2.1000000e+00 9.0000000e-01 1.0000000e+00 1.7000000e+00 1.9000000e+00 2.2000000e+00 2.5000000e+00 1.7000000e+00 1.2000000e+00 1.7000000e+00 2.2000000e+00 1.7000000e+00 1.6000000e+00 9.0000000e-01 1.5000000e+00 1.7000000e+00 1.2000000e+00 1.2000000e+00 2.0000000e+00 1.8000000e+00 1.3000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.2000000e+00 6.0000000e-01 7.0000000e-01 7.0000000e-01 7.0000000e-01 1.0000000e+00 1.1000000e+00 7.0000000e-01 5.0000000e-01 1.1000000e+00 1.8000000e+00 9.0000000e-01 9.0000000e-01 9.0000000e-01 8.0000000e-01 2.1000000e+00 1.0000000e+00 9.0000000e-01 3.0000000e-01 1.1000000e+00 5.0000000e-01 7.0000000e-01 1.6000000e+00 1.1000000e+00 1.3000000e+00 7.0000000e-01 1.2000000e+00 5.0000000e-01 4.0000000e-01 8.0000000e-01 4.0000000e-01 8.0000000e-01 7.0000000e-01 5.0000000e-01 1.7000000e+00 1.8000000e+00 5.0000000e-01 9.0000000e-01 4.0000000e-01 1.7000000e+00 3.0000000e-01 7.0000000e-01 1.2000000e+00 3.0000000e-01 3.0000000e-01 5.0000000e-01 1.2000000e+00 1.4000000e+00 1.9000000e+00 6.0000000e-01 3.0000000e-01 5.0000000e-01 1.7000000e+00 8.0000000e-01 4.0000000e-01 3.0000000e-01 9.0000000e-01 8.0000000e-01 9.0000000e-01 3.0000000e-01 8.0000000e-01 9.0000000e-01 7.0000000e-01 3.0000000e-01 5.0000000e-01 7.0000000e-01 3.0000000e-01 6.0000000e-01 1.3000000e+00 9.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 7.0000000e-01 5.0000000e-01 1.2000000e+00 3.0000000e-01 3.0000000e-01 3.0000000e-01 8.0000000e-01 1.5000000e+00 4.0000000e-01 1.5000000e+00 6.0000000e-01 1.7000000e+00 1.1000000e+00 1.3000000e+00 2.2000000e+00 5.0000000e-01 1.9000000e+00 1.3000000e+00 1.8000000e+00 1.1000000e+00 1.0000000e+00 1.4000000e+00 5.0000000e-01 9.0000000e-01 1.0000000e+00 1.1000000e+00 2.3000000e+00 2.4000000e+00 8.0000000e-01 1.5000000e+00 5.0000000e-01 2.3000000e+00 9.0000000e-01 1.3000000e+00 1.8000000e+00 8.0000000e-01 7.0000000e-01 1.1000000e+00 1.8000000e+00 2.0000000e+00 2.5000000e+00 1.1000000e+00 9.0000000e-01 1.1000000e+00 2.3000000e+00 1.1000000e+00 1.0000000e+00 6.0000000e-01 1.5000000e+00 1.3000000e+00 1.5000000e+00 6.0000000e-01 1.4000000e+00 1.3000000e+00 1.3000000e+00 9.0000000e-01 1.1000000e+00 9.0000000e-01 6.0000000e-01 7.0000000e-01 1.1000000e+00 4.0000000e-01 9.0000000e-01 8.0000000e-01 4.0000000e-01 8.0000000e-01 1.2000000e+00 7.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 1.5000000e+00 6.0000000e-01 1.5000000e+00 7.0000000e-01 1.4000000e+00 1.1000000e+00 1.3000000e+00 2.1000000e+00 1.1000000e+00 1.8000000e+00 1.3000000e+00 1.6000000e+00 6.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 8.0000000e-01 8.0000000e-01 1.0000000e+00 2.2000000e+00 2.4000000e+00 1.2000000e+00 1.2000000e+00 6.0000000e-01 2.2000000e+00 7.0000000e-01 1.2000000e+00 1.5000000e+00 6.0000000e-01 4.0000000e-01 1.1000000e+00 1.3000000e+00 1.6000000e+00 1.9000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.7000000e+00 1.1000000e+00 1.0000000e+00 4.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 7.0000000e-01 1.4000000e+00 1.2000000e+00 7.0000000e-01 9.0000000e-01 7.0000000e-01 9.0000000e-01 6.0000000e-01 8.0000000e-01 1.1000000e+00 1.2000000e+00 1.2000000e+00 6.0000000e-01 9.0000000e-01 1.7000000e+00 1.1000000e+00 1.0000000e+00 1.0000000e+00 5.0000000e-01 1.7000000e+00 1.0000000e+00 1.3000000e+00 9.0000000e-01 1.2000000e+00 9.0000000e-01 1.1000000e+00 1.9000000e+00 1.8000000e+00 1.6000000e+00 1.1000000e+00 1.4000000e+00 5.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 8.0000000e-01 8.0000000e-01 2.0000000e+00 2.2000000e+00 9.0000000e-01 1.0000000e+00 1.1000000e+00 2.0000000e+00 4.0000000e-01 1.0000000e+00 1.3000000e+00 5.0000000e-01 6.0000000e-01 9.0000000e-01 1.1000000e+00 1.4000000e+00 1.7000000e+00 9.0000000e-01 4.0000000e-01 9.0000000e-01 1.4000000e+00 9.0000000e-01 8.0000000e-01 7.0000000e-01 7.0000000e-01 9.0000000e-01 8.0000000e-01 9.0000000e-01 1.2000000e+00 1.0000000e+00 8.0000000e-01 6.0000000e-01 5.0000000e-01 8.0000000e-01 8.0000000e-01 7.0000000e-01 8.0000000e-01 8.0000000e-01 7.0000000e-01 5.0000000e-01 1.3000000e+00 7.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 1.4000000e+00 6.0000000e-01 1.6000000e+00 7.0000000e-01 1.5000000e+00 1.2000000e+00 1.4000000e+00 2.2000000e+00 1.4000000e+00 1.9000000e+00 1.4000000e+00 1.7000000e+00 9.0000000e-01 9.0000000e-01 1.1000000e+00 7.0000000e-01 1.1000000e+00 1.0000000e+00 1.1000000e+00 2.3000000e+00 2.5000000e+00 6.0000000e-01 1.3000000e+00 7.0000000e-01 2.3000000e+00 5.0000000e-01 1.3000000e+00 1.6000000e+00 5.0000000e-01 7.0000000e-01 1.2000000e+00 1.4000000e+00 1.7000000e+00 2.0000000e+00 1.2000000e+00 7.0000000e-01 1.2000000e+00 1.7000000e+00 1.2000000e+00 1.1000000e+00 7.0000000e-01 1.0000000e+00 1.2000000e+00 1.0000000e+00 7.0000000e-01 1.5000000e+00 1.3000000e+00 1.0000000e+00 6.0000000e-01 8.0000000e-01 1.1000000e+00 7.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 3.0000000e-01 1.0000000e-01 1.0000000e-01 6.0000000e-01 1.1000000e+00 2.0000000e-01 1.9000000e+00 1.0000000e+00 1.8000000e+00 1.5000000e+00 1.7000000e+00 2.5000000e+00 7.0000000e-01 2.2000000e+00 1.7000000e+00 2.0000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 9.0000000e-01 1.1000000e+00 1.2000000e+00 1.4000000e+00 2.6000000e+00 2.8000000e+00 9.0000000e-01 1.6000000e+00 8.0000000e-01 2.6000000e+00 8.0000000e-01 1.6000000e+00 1.9000000e+00 7.0000000e-01 8.0000000e-01 1.5000000e+00 1.7000000e+00 2.0000000e+00 2.3000000e+00 1.5000000e+00 1.0000000e+00 1.5000000e+00 2.1000000e+00 1.5000000e+00 1.4000000e+00 7.0000000e-01 1.3000000e+00 1.5000000e+00 1.3000000e+00 1.0000000e+00 1.8000000e+00 1.6000000e+00 1.1000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 1.0000000e+00 4.0000000e-01 6.0000000e-01 3.0000000e-01 7.0000000e-01 2.0000000e-01 5.0000000e-01 4.0000000e-01 7.0000000e-01 1.0000000e+00 3.0000000e-01 2.0000000e+00 1.1000000e+00 1.9000000e+00 1.6000000e+00 1.8000000e+00 2.6000000e+00 6.0000000e-01 2.3000000e+00 1.8000000e+00 2.1000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.0000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 2.7000000e+00 2.9000000e+00 1.0000000e+00 1.7000000e+00 9.0000000e-01 2.7000000e+00 9.0000000e-01 1.7000000e+00 2.0000000e+00 8.0000000e-01 9.0000000e-01 1.6000000e+00 1.8000000e+00 2.1000000e+00 2.4000000e+00 1.6000000e+00 1.1000000e+00 1.6000000e+00 2.2000000e+00 1.6000000e+00 1.5000000e+00 8.0000000e-01 1.4000000e+00 1.6000000e+00 1.4000000e+00 1.1000000e+00 1.9000000e+00 1.7000000e+00 1.2000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 1.1000000e+00 6.0000000e-01 4.0000000e-01 1.1000000e+00 2.0000000e-01 4.0000000e-01 3.0000000e-01 7.0000000e-01 1.4000000e+00 3.0000000e-01 1.6000000e+00 7.0000000e-01 1.6000000e+00 1.2000000e+00 1.4000000e+00 2.2000000e+00 6.0000000e-01 1.9000000e+00 1.4000000e+00 1.7000000e+00 1.0000000e+00 9.0000000e-01 1.3000000e+00 8.0000000e-01 1.2000000e+00 1.1000000e+00 1.1000000e+00 2.3000000e+00 2.5000000e+00 6.0000000e-01 1.4000000e+00 8.0000000e-01 2.3000000e+00 8.0000000e-01 1.3000000e+00 1.7000000e+00 7.0000000e-01 6.0000000e-01 1.2000000e+00 1.7000000e+00 1.9000000e+00 2.4000000e+00 1.2000000e+00 8.0000000e-01 1.2000000e+00 2.2000000e+00 1.2000000e+00 1.1000000e+00 6.0000000e-01 1.4000000e+00 1.2000000e+00 1.4000000e+00 7.0000000e-01 1.5000000e+00 1.3000000e+00 1.2000000e+00 8.0000000e-01 1.0000000e+00 1.1000000e+00 7.0000000e-01 6.0000000e-01 1.3000000e+00 5.0000000e-01 4.0000000e-01 4.0000000e-01 3.0000000e-01 1.6000000e+00 5.0000000e-01 1.4000000e+00 5.0000000e-01 1.3000000e+00 1.0000000e+00 1.2000000e+00 2.0000000e+00 1.2000000e+00 1.7000000e+00 1.2000000e+00 1.5000000e+00 6.0000000e-01 7.0000000e-01 9.0000000e-01 6.0000000e-01 1.0000000e+00 9.0000000e-01 9.0000000e-01 2.1000000e+00 2.3000000e+00 8.0000000e-01 1.1000000e+00 6.0000000e-01 2.1000000e+00 4.0000000e-01 1.1000000e+00 1.4000000e+00 4.0000000e-01 4.0000000e-01 1.0000000e+00 1.2000000e+00 1.5000000e+00 1.8000000e+00 1.0000000e+00 5.0000000e-01 1.0000000e+00 1.6000000e+00 1.0000000e+00 9.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 1.3000000e+00 1.1000000e+00 9.0000000e-01 5.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 8.0000000e-01 2.0000000e-01 4.0000000e-01 3.0000000e-01 4.0000000e-01 1.0000000e+00 2.0000000e-01 2.0000000e+00 1.1000000e+00 1.9000000e+00 1.6000000e+00 1.8000000e+00 2.6000000e+00 9.0000000e-01 2.3000000e+00 1.8000000e+00 2.1000000e+00 1.1000000e+00 1.3000000e+00 1.5000000e+00 1.0000000e+00 1.2000000e+00 1.3000000e+00 1.5000000e+00 2.7000000e+00 2.9000000e+00 1.0000000e+00 1.7000000e+00 9.0000000e-01 2.7000000e+00 9.0000000e-01 1.7000000e+00 2.0000000e+00 8.0000000e-01 9.0000000e-01 1.6000000e+00 1.8000000e+00 2.1000000e+00 2.4000000e+00 1.6000000e+00 1.1000000e+00 1.6000000e+00 2.1000000e+00 1.6000000e+00 1.5000000e+00 8.0000000e-01 1.4000000e+00 1.6000000e+00 1.1000000e+00 1.1000000e+00 1.9000000e+00 1.7000000e+00 1.2000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 1.1000000e+00 9.0000000e-01 9.0000000e-01 9.0000000e-01 1.2000000e+00 3.0000000e-01 8.0000000e-01 2.7000000e+00 1.8000000e+00 2.6000000e+00 2.3000000e+00 2.5000000e+00 3.3000000e+00 1.2000000e+00 3.0000000e+00 2.5000000e+00 2.8000000e+00 1.8000000e+00 2.0000000e+00 2.2000000e+00 1.7000000e+00 1.8000000e+00 2.0000000e+00 2.2000000e+00 3.4000000e+00 3.6000000e+00 1.7000000e+00 2.4000000e+00 1.6000000e+00 3.4000000e+00 1.6000000e+00 2.4000000e+00 2.7000000e+00 1.5000000e+00 1.6000000e+00 2.3000000e+00 2.5000000e+00 2.8000000e+00 3.1000000e+00 2.3000000e+00 1.8000000e+00 2.3000000e+00 2.8000000e+00 2.3000000e+00 2.2000000e+00 1.5000000e+00 2.1000000e+00 2.3000000e+00 1.9000000e+00 1.8000000e+00 2.6000000e+00 2.4000000e+00 1.9000000e+00 1.7000000e+00 1.9000000e+00 2.1000000e+00 1.8000000e+00 3.0000000e-01 2.0000000e-01 6.0000000e-01 1.2000000e+00 1.0000000e-01 1.8000000e+00 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.6000000e+00 2.4000000e+00 7.0000000e-01 2.1000000e+00 1.6000000e+00 1.9000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 8.0000000e-01 1.1000000e+00 1.1000000e+00 1.3000000e+00 2.5000000e+00 2.7000000e+00 8.0000000e-01 1.5000000e+00 7.0000000e-01 2.5000000e+00 7.0000000e-01 1.5000000e+00 1.8000000e+00 6.0000000e-01 7.0000000e-01 1.4000000e+00 1.6000000e+00 1.9000000e+00 2.3000000e+00 1.4000000e+00 9.0000000e-01 1.4000000e+00 2.1000000e+00 1.4000000e+00 1.3000000e+00 6.0000000e-01 1.3000000e+00 1.4000000e+00 1.3000000e+00 9.0000000e-01 1.7000000e+00 1.5000000e+00 1.1000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 1.0000000e-01 5.0000000e-01 1.2000000e+00 2.0000000e-01 1.8000000e+00 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.6000000e+00 2.4000000e+00 8.0000000e-01 2.1000000e+00 1.6000000e+00 1.9000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 8.0000000e-01 1.2000000e+00 1.1000000e+00 1.3000000e+00 2.5000000e+00 2.7000000e+00 8.0000000e-01 1.5000000e+00 8.0000000e-01 2.5000000e+00 7.0000000e-01 1.5000000e+00 1.8000000e+00 6.0000000e-01 7.0000000e-01 1.4000000e+00 1.6000000e+00 1.9000000e+00 2.2000000e+00 1.4000000e+00 9.0000000e-01 1.4000000e+00 2.0000000e+00 1.4000000e+00 1.3000000e+00 6.0000000e-01 1.2000000e+00 1.4000000e+00 1.2000000e+00 9.0000000e-01 1.7000000e+00 1.5000000e+00 1.1000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 5.0000000e-01 1.2000000e+00 1.0000000e-01 1.8000000e+00 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.6000000e+00 2.4000000e+00 8.0000000e-01 2.1000000e+00 1.6000000e+00 1.9000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 8.0000000e-01 1.1000000e+00 1.1000000e+00 1.3000000e+00 2.5000000e+00 2.7000000e+00 8.0000000e-01 1.5000000e+00 7.0000000e-01 2.5000000e+00 7.0000000e-01 1.5000000e+00 1.8000000e+00 6.0000000e-01 7.0000000e-01 1.4000000e+00 1.6000000e+00 1.9000000e+00 2.2000000e+00 1.4000000e+00 9.0000000e-01 1.4000000e+00 2.0000000e+00 1.4000000e+00 1.3000000e+00 6.0000000e-01 1.2000000e+00 1.4000000e+00 1.2000000e+00 9.0000000e-01 1.7000000e+00 1.5000000e+00 1.0000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 1.3000000e+00 5.0000000e-01 1.7000000e+00 8.0000000e-01 1.6000000e+00 1.3000000e+00 1.5000000e+00 2.3000000e+00 1.3000000e+00 2.0000000e+00 1.5000000e+00 1.8000000e+00 8.0000000e-01 1.0000000e+00 1.2000000e+00 7.0000000e-01 1.1000000e+00 1.0000000e+00 1.2000000e+00 2.4000000e+00 2.6000000e+00 7.0000000e-01 1.4000000e+00 7.0000000e-01 2.4000000e+00 6.0000000e-01 1.4000000e+00 1.7000000e+00 5.0000000e-01 6.0000000e-01 1.3000000e+00 1.5000000e+00 1.8000000e+00 2.1000000e+00 1.3000000e+00 8.0000000e-01 1.3000000e+00 1.8000000e+00 1.3000000e+00 1.2000000e+00 5.0000000e-01 1.1000000e+00 1.3000000e+00 1.0000000e+00 8.0000000e-01 1.6000000e+00 1.4000000e+00 1.0000000e+00 7.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 1.1000000e+00 3.0000000e+00 2.1000000e+00 2.9000000e+00 2.6000000e+00 2.8000000e+00 3.6000000e+00 1.5000000e+00 3.3000000e+00 2.8000000e+00 3.1000000e+00 2.1000000e+00 2.3000000e+00 2.5000000e+00 2.0000000e+00 2.1000000e+00 2.3000000e+00 2.5000000e+00 3.7000000e+00 3.9000000e+00 2.0000000e+00 2.7000000e+00 1.9000000e+00 3.7000000e+00 1.9000000e+00 2.7000000e+00 3.0000000e+00 1.8000000e+00 1.9000000e+00 2.6000000e+00 2.8000000e+00 3.1000000e+00 3.4000000e+00 2.6000000e+00 2.1000000e+00 2.6000000e+00 3.1000000e+00 2.6000000e+00 2.5000000e+00 1.8000000e+00 2.4000000e+00 2.6000000e+00 2.1000000e+00 2.1000000e+00 2.9000000e+00 2.7000000e+00 2.2000000e+00 2.0000000e+00 2.2000000e+00 2.4000000e+00 2.1000000e+00 1.9000000e+00 1.0000000e+00 1.8000000e+00 1.5000000e+00 1.7000000e+00 2.5000000e+00 8.0000000e-01 2.2000000e+00 1.7000000e+00 2.0000000e+00 1.0000000e+00 1.2000000e+00 1.4000000e+00 9.0000000e-01 1.1000000e+00 1.2000000e+00 1.4000000e+00 2.6000000e+00 2.8000000e+00 9.0000000e-01 1.6000000e+00 8.0000000e-01 2.6000000e+00 8.0000000e-01 1.6000000e+00 1.9000000e+00 7.0000000e-01 8.0000000e-01 1.5000000e+00 1.7000000e+00 2.0000000e+00 2.3000000e+00 1.5000000e+00 1.0000000e+00 1.5000000e+00 2.0000000e+00 1.5000000e+00 1.4000000e+00 7.0000000e-01 1.3000000e+00 1.5000000e+00 1.2000000e+00 1.0000000e+00 1.8000000e+00 1.6000000e+00 1.1000000e+00 9.0000000e-01 1.1000000e+00 1.3000000e+00 1.0000000e+00 9.0000000e-01 8.0000000e-01 7.0000000e-01 3.0000000e-01 1.3000000e+00 1.5000000e+00 1.0000000e+00 8.0000000e-01 9.0000000e-01 9.0000000e-01 7.0000000e-01 5.0000000e-01 1.0000000e+00 9.0000000e-01 7.0000000e-01 7.0000000e-01 1.4000000e+00 1.4000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.4000000e+00 1.1000000e+00 4.0000000e-01 9.0000000e-01 1.2000000e+00 1.1000000e+00 5.0000000e-01 9.0000000e-01 1.1000000e+00 1.6000000e+00 5.0000000e-01 1.0000000e+00 1.1000000e+00 1.4000000e+00 4.0000000e-01 7.0000000e-01 1.2000000e+00 6.0000000e-01 4.0000000e-01 9.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 8.0000000e-01 6.0000000e-01 9.0000000e-01 1.3000000e+00 5.0000000e-01 7.0000000e-01 1.8000000e+00 9.0000000e-01 1.5000000e+00 9.0000000e-01 1.4000000e+00 7.0000000e-01 6.0000000e-01 1.0000000e+00 2.0000000e-01 5.0000000e-01 6.0000000e-01 7.0000000e-01 1.9000000e+00 1.9000000e+00 5.0000000e-01 1.1000000e+00 2.0000000e-01 1.9000000e+00 5.0000000e-01 9.0000000e-01 1.4000000e+00 4.0000000e-01 3.0000000e-01 6.0000000e-01 1.4000000e+00 1.6000000e+00 2.1000000e+00 6.0000000e-01 5.0000000e-01 5.0000000e-01 1.9000000e+00 7.0000000e-01 6.0000000e-01 3.0000000e-01 1.1000000e+00 9.0000000e-01 1.1000000e+00 0.0000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 5.0000000e-01 7.0000000e-01 7.0000000e-01 3.0000000e-01 8.0000000e-01 6.0000000e-01 7.0000000e-01 2.2000000e+00 4.0000000e-01 5.0000000e-01 6.0000000e-01 8.0000000e-01 7.0000000e-01 4.0000000e-01 1.4000000e+00 1.3000000e+00 7.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 1.1000000e+00 2.0000000e-01 1.5000000e+00 8.0000000e-01 1.0000000e+00 4.0000000e-01 3.0000000e-01 1.1000000e+00 1.0000000e+00 7.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 7.0000000e-01 8.0000000e-01 1.0000000e+00 6.0000000e-01 8.0000000e-01 7.0000000e-01 1.1000000e+00 5.0000000e-01 4.0000000e-01 8.0000000e-01 1.3000000e+00 3.0000000e-01 4.0000000e-01 7.0000000e-01 9.0000000e-01 7.0000000e-01 9.0000000e-01 1.2000000e+00 4.0000000e-01 1.3000000e+00 1.4000000e+00 1.0000000e+00 4.0000000e-01 9.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 2.0000000e-01 1.4000000e+00 1.4000000e+00 7.0000000e-01 6.0000000e-01 7.0000000e-01 1.4000000e+00 7.0000000e-01 4.0000000e-01 9.0000000e-01 8.0000000e-01 7.0000000e-01 3.0000000e-01 9.0000000e-01 1.1000000e+00 1.6000000e+00 4.0000000e-01 5.0000000e-01 4.0000000e-01 1.4000000e+00 6.0000000e-01 2.0000000e-01 8.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 1.1000000e+00 1.6000000e+00 8.0000000e-01 5.0000000e-01 7.0000000e-01 7.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 7.0000000e-01 5.0000000e-01 4.0000000e-01 1.2000000e+00 1.2000000e+00 8.0000000e-01 4.0000000e-01 9.0000000e-01 1.2000000e+00 9.0000000e-01 3.0000000e-01 7.0000000e-01 1.0000000e+00 9.0000000e-01 2.0000000e-01 7.0000000e-01 9.0000000e-01 1.4000000e+00 2.0000000e-01 7.0000000e-01 8.0000000e-01 1.2000000e+00 4.0000000e-01 4.0000000e-01 1.0000000e+00 4.0000000e-01 2.0000000e-01 7.0000000e-01 7.0000000e-01 3.0000000e-01 3.0000000e-01 6.0000000e-01 8.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 2.7000000e+00 3.0000000e-01 9.0000000e-01 6.0000000e-01 1.5000000e+00 1.3000000e+00 1.1000000e+00 1.9000000e+00 1.8000000e+00 1.3000000e+00 1.1000000e+00 8.0000000e-01 4.0000000e-01 1.6000000e+00 9.0000000e-01 2.0000000e+00 2.0000000e-01 1.7000000e+00 9.0000000e-01 6.0000000e-01 1.8000000e+00 1.7000000e+00 1.2000000e+00 8.0000000e-01 5.0000000e-01 8.0000000e-01 1.2000000e+00 1.5000000e+00 1.5000000e+00 5.0000000e-01 1.3000000e+00 1.2000000e+00 1.8000000e+00 1.2000000e+00 1.0000000e+00 1.5000000e+00 1.8000000e+00 8.0000000e-01 9.0000000e-01 1.4000000e+00 1.6000000e+00 1.4000000e+00 1.4000000e+00 1.7000000e+00 2.4000000e+00 1.8000000e+00 2.3000000e+00 1.6000000e+00 1.5000000e+00 1.9000000e+00 8.0000000e-01 9.0000000e-01 1.5000000e+00 1.6000000e+00 2.8000000e+00 2.8000000e+00 1.1000000e+00 2.0000000e+00 7.0000000e-01 2.8000000e+00 1.4000000e+00 1.8000000e+00 2.3000000e+00 1.3000000e+00 1.2000000e+00 1.5000000e+00 2.3000000e+00 2.5000000e+00 3.0000000e+00 1.5000000e+00 1.4000000e+00 1.2000000e+00 2.8000000e+00 1.4000000e+00 1.5000000e+00 1.1000000e+00 2.0000000e+00 1.8000000e+00 2.0000000e+00 9.0000000e-01 1.9000000e+00 1.8000000e+00 1.8000000e+00 1.4000000e+00 1.6000000e+00 1.3000000e+00 1.0000000e+00 6.0000000e-01 7.0000000e-01 1.2000000e+00 1.0000000e+00 8.0000000e-01 1.6000000e+00 1.5000000e+00 1.0000000e+00 8.0000000e-01 9.0000000e-01 6.0000000e-01 1.3000000e+00 6.0000000e-01 1.7000000e+00 4.0000000e-01 1.4000000e+00 6.0000000e-01 3.0000000e-01 1.5000000e+00 1.4000000e+00 9.0000000e-01 5.0000000e-01 2.0000000e-01 9.0000000e-01 9.0000000e-01 1.2000000e+00 1.2000000e+00 5.0000000e-01 1.0000000e+00 9.0000000e-01 1.5000000e+00 9.0000000e-01 7.0000000e-01 1.2000000e+00 1.5000000e+00 5.0000000e-01 7.0000000e-01 1.1000000e+00 1.3000000e+00 1.1000000e+00 1.1000000e+00 1.4000000e+00 1.1000000e+00 7.0000000e-01 5.0000000e-01 5.0000000e-01 1.0000000e+00 9.0000000e-01 7.0000000e-01 5.0000000e-01 1.3000000e+00 1.1000000e+00 8.0000000e-01 7.0000000e-01 1.1000000e+00 1.0000000e+00 9.0000000e-01 8.0000000e-01 7.0000000e-01 1.0000000e+00 9.0000000e-01 3.0000000e-01 5.0000000e-01 7.0000000e-01 1.3000000e+00 4.0000000e-01 7.0000000e-01 6.0000000e-01 1.0000000e+00 9.0000000e-01 6.0000000e-01 1.0000000e+00 6.0000000e-01 6.0000000e-01 7.0000000e-01 9.0000000e-01 7.0000000e-01 8.0000000e-01 6.0000000e-01 8.0000000e-01 6.0000000e-01 9.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 6.0000000e-01 1.5000000e+00 1.4000000e+00 8.0000000e-01 7.0000000e-01 6.0000000e-01 1.0000000e+00 1.4000000e+00 4.0000000e-01 1.6000000e+00 8.0000000e-01 1.2000000e+00 5.0000000e-01 7.0000000e-01 1.3000000e+00 1.2000000e+00 8.0000000e-01 9.0000000e-01 8.0000000e-01 7.0000000e-01 8.0000000e-01 1.0000000e+00 1.1000000e+00 6.0000000e-01 9.0000000e-01 8.0000000e-01 1.3000000e+00 7.0000000e-01 5.0000000e-01 1.0000000e+00 1.4000000e+00 4.0000000e-01 5.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 1.0000000e+00 1.3000000e+00 5.0000000e-01 4.0000000e-01 8.0000000e-01 7.0000000e-01 3.0000000e-01 4.0000000e-01 1.6000000e+00 1.8000000e+00 1.0000000e+00 6.0000000e-01 9.0000000e-01 1.6000000e+00 5.0000000e-01 6.0000000e-01 9.0000000e-01 4.0000000e-01 4.0000000e-01 5.0000000e-01 7.0000000e-01 1.0000000e+00 1.4000000e+00 5.0000000e-01 5.0000000e-01 6.0000000e-01 1.2000000e+00 5.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 6.0000000e-01 3.0000000e-01 7.0000000e-01 2.0000000e-01 3.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 6.0000000e-01 5.0000000e-01 3.0000000e-01 1.4000000e+00 1.6000000e+00 5.0000000e-01 5.0000000e-01 8.0000000e-01 1.4000000e+00 4.0000000e-01 6.0000000e-01 8.0000000e-01 5.0000000e-01 4.0000000e-01 3.0000000e-01 8.0000000e-01 1.0000000e+00 1.5000000e+00 3.0000000e-01 4.0000000e-01 5.0000000e-01 1.3000000e+00 7.0000000e-01 4.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 4.0000000e-01 3.0000000e-01 3.0000000e-01 7.0000000e-01 5.0000000e-01 1.1000000e+00 1.0000000e+00 4.0000000e-01 3.0000000e-01 1.2000000e+00 1.4000000e+00 8.0000000e-01 2.0000000e-01 1.2000000e+00 1.2000000e+00 6.0000000e-01 3.0000000e-01 5.0000000e-01 7.0000000e-01 7.0000000e-01 4.0000000e-01 5.0000000e-01 6.0000000e-01 1.1000000e+00 4.0000000e-01 6.0000000e-01 7.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e-01 3.0000000e-01 4.0000000e-01 1.0000000e+00 4.0000000e-01 4.0000000e-01 3.0000000e-01 5.0000000e-01 3.0000000e-01 6.0000000e-01 9.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 2.0000000e+00 2.0000000e+00 5.0000000e-01 1.2000000e+00 3.0000000e-01 2.0000000e+00 6.0000000e-01 1.0000000e+00 1.5000000e+00 5.0000000e-01 5.0000000e-01 7.0000000e-01 1.5000000e+00 1.7000000e+00 2.2000000e+00 7.0000000e-01 6.0000000e-01 6.0000000e-01 2.0000000e+00 9.0000000e-01 7.0000000e-01 5.0000000e-01 1.2000000e+00 1.0000000e+00 1.2000000e+00 2.0000000e-01 1.1000000e+00 1.0000000e+00 1.0000000e+00 6.0000000e-01 8.0000000e-01 9.0000000e-01 5.0000000e-01 6.0000000e-01 7.0000000e-01 1.9000000e+00 1.9000000e+00 9.0000000e-01 1.1000000e+00 4.0000000e-01 1.9000000e+00 6.0000000e-01 9.0000000e-01 1.4000000e+00 6.0000000e-01 6.0000000e-01 6.0000000e-01 1.4000000e+00 1.6000000e+00 2.1000000e+00 6.0000000e-01 9.0000000e-01 1.0000000e+00 1.9000000e+00 6.0000000e-01 6.0000000e-01 6.0000000e-01 1.1000000e+00 9.0000000e-01 1.1000000e+00 5.0000000e-01 1.0000000e+00 9.0000000e-01 9.0000000e-01 5.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 5.0000000e-01 1.4000000e+00 1.6000000e+00 1.0000000e+00 5.0000000e-01 8.0000000e-01 1.4000000e+00 5.0000000e-01 4.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 4.0000000e-01 8.0000000e-01 1.0000000e+00 1.5000000e+00 4.0000000e-01 8.0000000e-01 9.0000000e-01 1.3000000e+00 3.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 6.0000000e-01 4.0000000e-01 3.0000000e-01 7.0000000e-01 3.0000000e-01 2.0000000e-01 5.0000000e-01 1.2000000e+00 1.4000000e+00 8.0000000e-01 5.0000000e-01 9.0000000e-01 1.2000000e+00 6.0000000e-01 3.0000000e-01 7.0000000e-01 7.0000000e-01 6.0000000e-01 3.0000000e-01 7.0000000e-01 9.0000000e-01 1.4000000e+00 4.0000000e-01 4.0000000e-01 4.0000000e-01 1.2000000e+00 6.0000000e-01 1.0000000e-01 7.0000000e-01 4.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 1.2000000e+00 1.7000000e+00 1.0000000e+00 2.1000000e+00 1.0000000e+00 1.8000000e+00 1.0000000e+00 7.0000000e-01 1.9000000e+00 1.8000000e+00 1.3000000e+00 9.0000000e-01 1.0000000e+00 3.0000000e-01 1.3000000e+00 1.6000000e+00 1.6000000e+00 8.0000000e-01 1.4000000e+00 1.3000000e+00 1.9000000e+00 1.3000000e+00 1.1000000e+00 1.6000000e+00 1.9000000e+00 9.0000000e-01 1.0000000e+00 1.5000000e+00 1.7000000e+00 1.5000000e+00 1.5000000e+00 1.8000000e+00 1.9000000e+00 1.2000000e+00 2.1000000e+00 3.0000000e-01 2.0000000e+00 1.2000000e+00 9.0000000e-01 2.1000000e+00 2.0000000e+00 1.3000000e+00 1.1000000e+00 8.0000000e-01 1.2000000e+00 1.3000000e+00 1.8000000e+00 1.6000000e+00 8.0000000e-01 1.4000000e+00 1.4000000e+00 2.1000000e+00 1.5000000e+00 1.3000000e+00 1.8000000e+00 1.9000000e+00 1.0000000e+00 1.2000000e+00 1.7000000e+00 1.9000000e+00 1.7000000e+00 1.5000000e+00 1.8000000e+00 1.0000000e+00 6.0000000e-01 1.7000000e+00 5.0000000e-01 1.1000000e+00 1.2000000e+00 6.0000000e-01 8.0000000e-01 6.0000000e-01 1.2000000e+00 1.4000000e+00 1.9000000e+00 7.0000000e-01 6.0000000e-01 6.0000000e-01 1.7000000e+00 1.2000000e+00 9.0000000e-01 8.0000000e-01 9.0000000e-01 9.0000000e-01 9.0000000e-01 5.0000000e-01 1.0000000e+00 1.1000000e+00 8.0000000e-01 4.0000000e-01 8.0000000e-01 1.2000000e+00 8.0000000e-01 1.3000000e+00 1.0000000e+00 8.0000000e-01 2.0000000e-01 5.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 1.0000000e+00 5.0000000e-01 8.0000000e-01 9.0000000e-01 8.0000000e-01 6.0000000e-01 5.0000000e-01 9.0000000e-01 3.0000000e-01 2.0000000e-01 6.0000000e-01 1.1000000e+00 2.0000000e-01 2.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 7.0000000e-01 1.0000000e+00 2.1000000e+00 7.0000000e-01 1.1000000e+00 1.6000000e+00 6.0000000e-01 5.0000000e-01 8.0000000e-01 1.6000000e+00 1.8000000e+00 2.3000000e+00 8.0000000e-01 7.0000000e-01 7.0000000e-01 2.1000000e+00 7.0000000e-01 8.0000000e-01 4.0000000e-01 1.3000000e+00 1.1000000e+00 1.3000000e+00 2.0000000e-01 1.2000000e+00 1.1000000e+00 1.1000000e+00 7.0000000e-01 9.0000000e-01 6.0000000e-01 3.0000000e-01 1.8000000e+00 1.0000000e+00 7.0000000e-01 1.9000000e+00 1.8000000e+00 1.3000000e+00 9.0000000e-01 6.0000000e-01 1.0000000e+00 1.3000000e+00 1.6000000e+00 1.6000000e+00 6.0000000e-01 1.4000000e+00 1.3000000e+00 1.9000000e+00 1.3000000e+00 1.1000000e+00 1.6000000e+00 1.9000000e+00 9.0000000e-01 1.0000000e+00 1.5000000e+00 1.7000000e+00 1.5000000e+00 1.5000000e+00 1.8000000e+00 8.0000000e-01 1.1000000e+00 1.0000000e-01 3.0000000e-01 7.0000000e-01 9.0000000e-01 1.2000000e+00 1.6000000e+00 7.0000000e-01 3.0000000e-01 7.0000000e-01 1.4000000e+00 7.0000000e-01 6.0000000e-01 3.0000000e-01 6.0000000e-01 7.0000000e-01 6.0000000e-01 5.0000000e-01 1.0000000e+00 8.0000000e-01 5.0000000e-01 2.0000000e-01 3.0000000e-01 7.0000000e-01 4.0000000e-01 5.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 1.2000000e+00 5.0000000e-01 6.0000000e-01 7.0000000e-01 1.0000000e+00 4.0000000e-01 3.0000000e-01 9.0000000e-01 3.0000000e-01 3.0000000e-01 6.0000000e-01 9.0000000e-01 2.0000000e-01 4.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 8.0000000e-01 1.2000000e+00 1.1000000e+00 8.0000000e-01 2.0000000e-01 4.0000000e-01 7.0000000e-01 8.0000000e-01 9.0000000e-01 1.1000000e+00 5.0000000e-01 9.0000000e-01 8.0000000e-01 1.2000000e+00 6.0000000e-01 6.0000000e-01 9.0000000e-01 1.4000000e+00 5.0000000e-01 7.0000000e-01 8.0000000e-01 1.0000000e+00 8.0000000e-01 1.0000000e+00 1.3000000e+00 2.0000000e-01 8.0000000e-01 1.0000000e+00 1.3000000e+00 1.7000000e+00 8.0000000e-01 3.0000000e-01 8.0000000e-01 1.5000000e+00 8.0000000e-01 7.0000000e-01 2.0000000e-01 7.0000000e-01 8.0000000e-01 7.0000000e-01 4.0000000e-01 1.1000000e+00 9.0000000e-01 5.0000000e-01 3.0000000e-01 4.0000000e-01 6.0000000e-01 3.0000000e-01 7.0000000e-01 1.1000000e+00 1.3000000e+00 1.8000000e+00 7.0000000e-01 3.0000000e-01 7.0000000e-01 1.6000000e+00 7.0000000e-01 6.0000000e-01 1.0000000e-01 8.0000000e-01 7.0000000e-01 8.0000000e-01 3.0000000e-01 1.0000000e+00 8.0000000e-01 6.0000000e-01 5.0000000e-01 4.0000000e-01 5.0000000e-01 2.0000000e-01 8.0000000e-01 1.0000000e+00 1.5000000e+00 1.0000000e-01 6.0000000e-01 7.0000000e-01 1.3000000e+00 6.0000000e-01 3.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 4.0000000e-01 6.0000000e-01 5.0000000e-01 3.0000000e-01 8.0000000e-01 8.0000000e-01 9.0000000e-01 1.1000000e+00 7.0000000e-01 9.0000000e-01 8.0000000e-01 1.2000000e+00 5.0000000e-01 8.0000000e-01 7.0000000e-01 1.4000000e+00 7.0000000e-01 9.0000000e-01 7.0000000e-01 9.0000000e-01 7.0000000e-01 1.0000000e+00 1.3000000e+00 1.0000000e+00 1.0000000e+00 1.1000000e+00 1.3000000e+00 4.0000000e-01 1.1000000e+00 1.0000000e+00 1.4000000e+00 7.0000000e-01 7.0000000e-01 1.0000000e+00 1.6000000e+00 6.0000000e-01 7.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 1.2000000e+00 1.5000000e+00 1.5000000e+00 1.6000000e+00 1.8000000e+00 8.0000000e-01 1.6000000e+00 1.5000000e+00 1.9000000e+00 1.0000000e+00 1.2000000e+00 1.3000000e+00 2.1000000e+00 1.1000000e+00 1.2000000e+00 1.2000000e+00 1.6000000e+00 1.4000000e+00 1.7000000e+00 2.0000000e+00 7.0000000e-01 8.0000000e-01 1.3000000e+00 6.0000000e-01 4.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 4.0000000e-01 6.0000000e-01 5.0000000e-01 5.0000000e-01 1.4000000e+00 9.0000000e-01 4.0000000e-01 3.0000000e-01 6.0000000e-01 9.0000000e-01 8.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e+00 8.0000000e-01 4.0000000e-01 5.0000000e-01 8.0000000e-01 4.0000000e-01 1.6000000e+00 1.0000000e+00 5.0000000e-01 8.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 9.0000000e-01 1.1000000e+00 9.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 1.4000000e+00 1.3000000e+00 1.7000000e+00 8.0000000e-01 1.0000000e+00 1.0000000e+00 1.9000000e+00 9.0000000e-01 1.0000000e+00 1.0000000e+00 1.4000000e+00 1.2000000e+00 1.5000000e+00 1.8000000e+00 6.0000000e-01 8.0000000e-01 6.0000000e-01 4.0000000e-01 6.0000000e-01 7.0000000e-01 5.0000000e-01 4.0000000e-01 4.0000000e-01 9.0000000e-01 4.0000000e-01 2.0000000e-01 6.0000000e-01 7.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 6.0000000e-01 5.0000000e-01 7.0000000e-01 5.0000000e-01 6.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 9.0000000e-01 8.0000000e-01 9.0000000e-01 3.0000000e-01 1.1000000e+00 9.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 6.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 1.1000000e+00 5.0000000e-01 4.0000000e-01 2.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 1.0000000e+00 5.0000000e-01 9.0000000e-01 3.0000000e-01 2.0000000e-01 4.0000000e-01 6.0000000e-01 4.0000000e-01 5.0000000e-01 8.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 2.0000000e-01 6.0000000e-01 4.0000000e-01 7.0000000e-01 1.0000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 5.0000000e-01 7.0000000e-01 7.0000000e-01 3.0000000e-01 2.0000000e-01 7.0000000e-01 9.0000000e-01 7.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 9.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 5.0000000e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-chebyshev-ml.txt b/voice_bridge/scipy/spatial/tests/data/pdist-chebyshev-ml.txt new file mode 100644 index 0000000000000000000000000000000000000000..786486295935319c03a60a349f03328c127935b9 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-chebyshev-ml.txt @@ -0,0 +1 @@ + 8.9084734e-01 9.3573853e-01 9.3507398e-01 9.6040691e-01 9.2918157e-01 9.6617342e-01 9.0430930e-01 9.5753424e-01 8.7106898e-01 9.2169905e-01 9.7401159e-01 8.9013416e-01 9.3956689e-01 9.0041896e-01 9.2588355e-01 9.3849417e-01 8.9713468e-01 9.1481804e-01 9.7500539e-01 9.0012586e-01 9.0962559e-01 8.5860091e-01 8.6981095e-01 8.9995771e-01 8.8070172e-01 9.1456657e-01 8.6711474e-01 9.2593917e-01 8.7560376e-01 8.5193121e-01 9.0898542e-01 8.7765302e-01 8.6555584e-01 8.6093485e-01 9.0447028e-01 8.7614405e-01 9.4803522e-01 8.4998062e-01 7.8398996e-01 8.9538612e-01 8.3902291e-01 9.9039470e-01 9.5480519e-01 8.9152195e-01 9.1623329e-01 7.9094921e-01 9.1777100e-01 9.8972335e-01 9.0429093e-01 8.7646362e-01 9.2136649e-01 9.7178177e-01 8.9610979e-01 9.4710327e-01 9.3612450e-01 9.0241499e-01 7.7992538e-01 8.7262126e-01 9.3325183e-01 8.5796531e-01 9.4267977e-01 6.7224167e-01 7.9568368e-01 8.6411267e-01 9.3311642e-01 9.0160114e-01 9.0698887e-01 8.5833256e-01 9.6902830e-01 9.5072298e-01 8.6808495e-01 9.7879599e-01 8.8060729e-01 8.2818573e-01 8.4366706e-01 8.4506700e-01 9.4532981e-01 9.1792306e-01 7.8917825e-01 9.8337805e-01 8.1751613e-01 9.3037855e-01 9.1618832e-01 8.6568874e-01 8.9751397e-01 8.7923710e-01 8.6814329e-01 9.0330164e-01 8.2426213e-01 9.4644643e-01 8.8431293e-01 8.8497426e-01 9.0633818e-01 9.5537161e-01 8.2167575e-01 8.7771053e-01 9.0681167e-01 8.7626143e-01 8.7463464e-01 9.8033940e-01 9.2920881e-01 9.5108549e-01 9.1287466e-01 8.0052218e-01 9.2409517e-01 8.8252650e-01 8.7873923e-01 9.2989402e-01 9.1985043e-01 9.6172646e-01 8.8223856e-01 9.4477822e-01 8.8310948e-01 9.4461306e-01 9.1875210e-01 9.1233363e-01 9.2124013e-01 9.5460897e-01 8.4640982e-01 9.0882657e-01 9.8169468e-01 9.7828355e-01 8.4150533e-01 8.6888923e-01 9.7138825e-01 8.7988144e-01 9.6720910e-01 8.9450147e-01 9.5331584e-01 8.8871809e-01 8.9736685e-01 8.6258146e-01 9.1331565e-01 9.0968870e-01 9.4833654e-01 9.0536967e-01 9.5099871e-01 8.0251958e-01 9.2526150e-01 9.8971957e-01 9.0340947e-01 9.4955892e-01 9.6838162e-01 8.7534901e-01 9.1178797e-01 9.2649154e-01 9.5260993e-01 9.3178143e-01 9.4943000e-01 8.7816171e-01 9.6506542e-01 8.3422958e-01 9.3443585e-01 9.3220084e-01 8.5706573e-01 8.4666325e-01 9.0474744e-01 9.1080644e-01 9.2406899e-01 8.7901768e-01 9.3265263e-01 9.5992829e-01 9.5696271e-01 9.1932272e-01 8.0937044e-01 9.0904917e-01 8.9516756e-01 9.4797906e-01 8.4159421e-01 9.6773901e-01 9.7099825e-01 9.6941820e-01 9.8174088e-01 9.7569951e-01 9.3655362e-01 8.4130333e-01 9.5994549e-01 8.4235414e-01 9.1429418e-01 9.3418117e-01 8.4600977e-01 8.8166496e-01 8.7594776e-01 8.8571112e-01 9.6308174e-01 9.5315927e-01 8.6997519e-01 8.9383032e-01 9.4686804e-01 9.4399596e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt b/voice_bridge/scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt new file mode 100644 index 0000000000000000000000000000000000000000..6722928a4a4491c74d3fef5205276b532b15dcbf --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-cityblock-ml-iris.txt @@ -0,0 +1 @@ + 7.0000000e-01 8.0000000e-01 1.0000000e+00 2.0000000e-01 1.2000000e+00 7.0000000e-01 3.0000000e-01 1.3000000e+00 8.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.8000000e+00 1.0000000e+00 1.0000000e-01 1.3000000e+00 5.0000000e-01 7.0000000e-01 5.0000000e-01 1.0000000e+00 8.0000000e-01 9.0000000e-01 8.0000000e-01 6.0000000e-01 2.0000000e-01 2.0000000e-01 9.0000000e-01 9.0000000e-01 7.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 5.0000000e-01 8.0000000e-01 1.3000000e+00 2.0000000e-01 3.0000000e-01 2.0000000e+00 1.1000000e+00 7.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 6.7000000e+00 6.0000000e+00 7.0000000e+00 5.3000000e+00 6.6000000e+00 5.5000000e+00 6.1000000e+00 4.0000000e+00 6.4000000e+00 4.6000000e+00 4.5000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.4000000e+00 6.2000000e+00 5.4000000e+00 5.0000000e+00 6.8000000e+00 4.9000000e+00 6.1000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.4000000e+00 4.8000000e+00 4.6000000e+00 5.0000000e+00 6.8000000e+00 5.2000000e+00 5.5000000e+00 6.6000000e+00 6.5000000e+00 4.8000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 5.2000000e+00 4.0000000e+00 5.2000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 3.5000000e+00 5.1000000e+00 8.3000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 5.8000000e+00 9.3000000e+00 8.6000000e+00 9.2000000e+00 7.2000000e+00 7.7000000e+00 8.2000000e+00 7.0000000e+00 7.3000000e+00 7.6000000e+00 7.6000000e+00 1.0200000e+01 1.1100000e+01 7.1000000e+00 8.5000000e+00 6.5000000e+00 1.0400000e+01 7.1000000e+00 8.0000000e+00 8.6000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 9.9000000e+00 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 7.7000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.6000000e+00 8.4000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.3000000e+00 6.6000000e+00 5.0000000e-01 5.0000000e-01 7.0000000e-01 1.9000000e+00 8.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 1.3000000e+00 7.0000000e-01 2.0000000e-01 1.0000000e+00 2.1000000e+00 2.5000000e+00 1.7000000e+00 8.0000000e-01 2.0000000e+00 1.2000000e+00 1.2000000e+00 1.2000000e+00 1.3000000e+00 1.1000000e+00 1.0000000e+00 3.0000000e-01 9.0000000e-01 9.0000000e-01 7.0000000e-01 6.0000000e-01 4.0000000e-01 1.2000000e+00 1.6000000e+00 1.8000000e+00 3.0000000e-01 5.0000000e-01 1.2000000e+00 3.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 1.3000000e+00 8.0000000e-01 1.2000000e+00 1.7000000e+00 2.0000000e-01 1.2000000e+00 5.0000000e-01 1.2000000e+00 4.0000000e-01 6.8000000e+00 6.1000000e+00 6.9000000e+00 5.0000000e+00 6.3000000e+00 5.2000000e+00 6.4000000e+00 3.3000000e+00 6.1000000e+00 4.3000000e+00 4.0000000e+00 5.1000000e+00 5.3000000e+00 5.8000000e+00 4.1000000e+00 6.1000000e+00 5.1000000e+00 4.7000000e+00 6.5000000e+00 4.6000000e+00 6.2000000e+00 5.1000000e+00 6.7000000e+00 5.7000000e+00 5.6000000e+00 5.9000000e+00 6.7000000e+00 6.9000000e+00 5.6000000e+00 4.1000000e+00 4.5000000e+00 4.3000000e+00 4.7000000e+00 6.5000000e+00 4.9000000e+00 6.0000000e+00 6.5000000e+00 6.2000000e+00 4.5000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 4.9000000e+00 3.5000000e+00 4.9000000e+00 4.6000000e+00 4.8000000e+00 5.4000000e+00 3.2000000e+00 4.8000000e+00 8.6000000e+00 6.6000000e+00 8.6000000e+00 7.3000000e+00 8.0000000e+00 9.8000000e+00 5.1000000e+00 9.0000000e+00 8.3000000e+00 9.9000000e+00 7.3000000e+00 7.4000000e+00 7.9000000e+00 6.7000000e+00 7.0000000e+00 7.7000000e+00 7.3000000e+00 1.0900000e+01 1.0800000e+01 6.8000000e+00 8.6000000e+00 6.2000000e+00 1.0100000e+01 6.8000000e+00 8.3000000e+00 8.7000000e+00 6.5000000e+00 6.3000000e+00 7.8000000e+00 8.1000000e+00 9.1000000e+00 1.0600000e+01 7.9000000e+00 6.6000000e+00 7.0000000e+00 9.6000000e+00 8.2000000e+00 7.3000000e+00 6.1000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.6000000e+00 8.7000000e+00 8.7000000e+00 7.7000000e+00 7.2000000e+00 7.2000000e+00 7.8000000e+00 6.3000000e+00 4.0000000e-01 8.0000000e-01 2.0000000e+00 5.0000000e-01 7.0000000e-01 7.0000000e-01 6.0000000e-01 1.4000000e+00 6.0000000e-01 5.0000000e-01 9.0000000e-01 2.0000000e+00 2.6000000e+00 1.6000000e+00 9.0000000e-01 2.1000000e+00 1.3000000e+00 1.3000000e+00 1.3000000e+00 8.0000000e-01 1.2000000e+00 9.0000000e-01 8.0000000e-01 1.0000000e+00 1.0000000e+00 8.0000000e-01 3.0000000e-01 5.0000000e-01 1.3000000e+00 1.7000000e+00 1.9000000e+00 6.0000000e-01 4.0000000e-01 1.1000000e+00 6.0000000e-01 5.0000000e-01 8.0000000e-01 7.0000000e-01 1.2000000e+00 3.0000000e-01 1.3000000e+00 1.8000000e+00 5.0000000e-01 1.3000000e+00 2.0000000e-01 1.3000000e+00 5.0000000e-01 6.9000000e+00 6.2000000e+00 7.2000000e+00 5.5000000e+00 6.8000000e+00 5.7000000e+00 6.5000000e+00 3.8000000e+00 6.6000000e+00 4.8000000e+00 4.5000000e+00 5.6000000e+00 5.8000000e+00 6.3000000e+00 4.6000000e+00 6.4000000e+00 5.6000000e+00 5.2000000e+00 7.0000000e+00 5.1000000e+00 6.3000000e+00 5.6000000e+00 7.2000000e+00 6.2000000e+00 6.1000000e+00 6.4000000e+00 7.2000000e+00 7.4000000e+00 6.1000000e+00 4.6000000e+00 5.0000000e+00 4.8000000e+00 5.2000000e+00 7.0000000e+00 5.4000000e+00 6.1000000e+00 6.8000000e+00 6.7000000e+00 5.0000000e+00 5.3000000e+00 5.5000000e+00 6.1000000e+00 5.4000000e+00 4.0000000e+00 5.4000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 3.7000000e+00 5.3000000e+00 8.7000000e+00 7.1000000e+00 9.1000000e+00 7.8000000e+00 8.5000000e+00 1.0300000e+01 5.6000000e+00 9.5000000e+00 8.8000000e+00 1.0000000e+01 7.4000000e+00 7.9000000e+00 8.4000000e+00 7.2000000e+00 7.5000000e+00 7.8000000e+00 7.8000000e+00 1.1000000e+01 1.1300000e+01 7.3000000e+00 8.7000000e+00 6.7000000e+00 1.0600000e+01 7.3000000e+00 8.4000000e+00 8.8000000e+00 7.0000000e+00 6.8000000e+00 8.3000000e+00 8.6000000e+00 9.6000000e+00 1.0700000e+01 8.4000000e+00 7.1000000e+00 7.5000000e+00 1.0100000e+01 8.3000000e+00 7.6000000e+00 6.6000000e+00 8.3000000e+00 8.6000000e+00 8.2000000e+00 7.1000000e+00 8.8000000e+00 8.8000000e+00 8.2000000e+00 7.7000000e+00 7.7000000e+00 7.9000000e+00 6.8000000e+00 1.0000000e+00 2.0000000e+00 5.0000000e-01 7.0000000e-01 5.0000000e-01 4.0000000e-01 1.4000000e+00 6.0000000e-01 5.0000000e-01 9.0000000e-01 2.4000000e+00 2.6000000e+00 2.0000000e+00 1.1000000e+00 2.1000000e+00 1.3000000e+00 1.3000000e+00 1.3000000e+00 1.0000000e+00 1.2000000e+00 9.0000000e-01 6.0000000e-01 1.0000000e+00 1.0000000e+00 1.0000000e+00 3.0000000e-01 3.0000000e-01 1.3000000e+00 1.7000000e+00 2.1000000e+00 4.0000000e-01 8.0000000e-01 1.5000000e+00 4.0000000e-01 5.0000000e-01 8.0000000e-01 1.1000000e+00 1.2000000e+00 5.0000000e-01 1.3000000e+00 1.8000000e+00 5.0000000e-01 1.3000000e+00 2.0000000e-01 1.3000000e+00 7.0000000e-01 6.9000000e+00 6.2000000e+00 7.0000000e+00 5.3000000e+00 6.6000000e+00 5.5000000e+00 6.5000000e+00 3.6000000e+00 6.4000000e+00 4.6000000e+00 4.3000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.4000000e+00 6.2000000e+00 5.4000000e+00 5.0000000e+00 6.8000000e+00 4.9000000e+00 6.3000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.4000000e+00 4.8000000e+00 4.6000000e+00 5.0000000e+00 6.8000000e+00 5.2000000e+00 6.1000000e+00 6.6000000e+00 6.5000000e+00 4.8000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 5.2000000e+00 3.8000000e+00 5.2000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 3.5000000e+00 5.1000000e+00 8.7000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 5.4000000e+00 9.3000000e+00 8.6000000e+00 1.0000000e+01 7.4000000e+00 7.7000000e+00 8.2000000e+00 7.0000000e+00 7.3000000e+00 7.8000000e+00 7.6000000e+00 1.1000000e+01 1.1100000e+01 7.1000000e+00 8.7000000e+00 6.5000000e+00 1.0400000e+01 7.1000000e+00 8.4000000e+00 8.8000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 1.0700000e+01 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 8.3000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.8000000e+00 8.8000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.9000000e+00 6.6000000e+00 1.2000000e+00 7.0000000e-01 3.0000000e-01 1.3000000e+00 8.0000000e-01 6.0000000e-01 6.0000000e-01 9.0000000e-01 1.7000000e+00 1.4000000e+00 1.8000000e+00 1.0000000e+00 3.0000000e-01 1.3000000e+00 5.0000000e-01 9.0000000e-01 5.0000000e-01 8.0000000e-01 1.0000000e+00 9.0000000e-01 8.0000000e-01 6.0000000e-01 4.0000000e-01 4.0000000e-01 9.0000000e-01 9.0000000e-01 9.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 7.0000000e-01 8.0000000e-01 1.3000000e+00 4.0000000e-01 3.0000000e-01 2.0000000e+00 1.1000000e+00 7.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 6.9000000e+00 6.2000000e+00 7.2000000e+00 5.5000000e+00 6.8000000e+00 5.7000000e+00 6.3000000e+00 4.0000000e+00 6.6000000e+00 4.8000000e+00 4.5000000e+00 5.6000000e+00 5.8000000e+00 6.3000000e+00 4.6000000e+00 6.4000000e+00 5.6000000e+00 5.2000000e+00 7.0000000e+00 5.1000000e+00 6.3000000e+00 5.6000000e+00 7.2000000e+00 6.2000000e+00 6.1000000e+00 6.4000000e+00 7.2000000e+00 7.4000000e+00 6.1000000e+00 4.6000000e+00 5.0000000e+00 4.8000000e+00 5.2000000e+00 7.0000000e+00 5.4000000e+00 5.7000000e+00 6.8000000e+00 6.7000000e+00 5.0000000e+00 5.3000000e+00 5.5000000e+00 6.1000000e+00 5.4000000e+00 4.0000000e+00 5.4000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 3.7000000e+00 5.3000000e+00 8.5000000e+00 7.1000000e+00 9.1000000e+00 7.8000000e+00 8.5000000e+00 1.0300000e+01 5.8000000e+00 9.5000000e+00 8.8000000e+00 9.2000000e+00 7.4000000e+00 7.9000000e+00 8.4000000e+00 7.2000000e+00 7.5000000e+00 7.8000000e+00 7.8000000e+00 1.0200000e+01 1.1300000e+01 7.3000000e+00 8.7000000e+00 6.7000000e+00 1.0600000e+01 7.3000000e+00 8.2000000e+00 8.8000000e+00 7.0000000e+00 6.8000000e+00 8.3000000e+00 8.6000000e+00 9.6000000e+00 9.9000000e+00 8.4000000e+00 7.1000000e+00 7.5000000e+00 1.0100000e+01 7.9000000e+00 7.6000000e+00 6.6000000e+00 8.3000000e+00 8.6000000e+00 8.2000000e+00 7.1000000e+00 8.8000000e+00 8.6000000e+00 8.2000000e+00 7.7000000e+00 7.7000000e+00 7.5000000e+00 6.8000000e+00 1.7000000e+00 1.3000000e+00 2.5000000e+00 1.8000000e+00 6.0000000e-01 1.4000000e+00 2.1000000e+00 2.9000000e+00 1.2000000e+00 1.0000000e+00 4.0000000e-01 1.1000000e+00 5.0000000e-01 7.0000000e-01 7.0000000e-01 7.0000000e-01 2.0000000e+00 1.0000000e+00 1.5000000e+00 1.6000000e+00 1.0000000e+00 1.0000000e+00 1.2000000e+00 1.7000000e+00 1.7000000e+00 7.0000000e-01 9.0000000e-01 9.0000000e-01 1.8000000e+00 1.8000000e+00 1.1000000e+00 1.8000000e+00 2.5000000e+00 1.2000000e+00 1.3000000e+00 3.0000000e+00 2.3000000e+00 1.1000000e+00 6.0000000e-01 1.9000000e+00 7.0000000e-01 2.0000000e+00 7.0000000e-01 1.5000000e+00 6.3000000e+00 5.6000000e+00 6.6000000e+00 4.9000000e+00 6.2000000e+00 5.1000000e+00 5.7000000e+00 4.2000000e+00 6.0000000e+00 4.6000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.7000000e+00 4.0000000e+00 5.8000000e+00 5.0000000e+00 4.6000000e+00 6.4000000e+00 4.5000000e+00 5.7000000e+00 5.0000000e+00 6.6000000e+00 5.6000000e+00 5.5000000e+00 5.8000000e+00 6.6000000e+00 6.8000000e+00 5.5000000e+00 4.0000000e+00 4.4000000e+00 4.2000000e+00 4.6000000e+00 6.4000000e+00 4.8000000e+00 5.1000000e+00 6.2000000e+00 6.1000000e+00 4.4000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 4.8000000e+00 4.2000000e+00 4.8000000e+00 4.5000000e+00 4.7000000e+00 5.3000000e+00 3.7000000e+00 4.7000000e+00 7.9000000e+00 6.5000000e+00 8.5000000e+00 7.2000000e+00 7.9000000e+00 9.7000000e+00 6.0000000e+00 8.9000000e+00 8.2000000e+00 8.6000000e+00 6.8000000e+00 7.3000000e+00 7.8000000e+00 6.6000000e+00 6.9000000e+00 7.2000000e+00 7.2000000e+00 9.2000000e+00 1.0700000e+01 6.7000000e+00 8.1000000e+00 6.1000000e+00 1.0000000e+01 6.7000000e+00 7.6000000e+00 8.2000000e+00 6.4000000e+00 6.2000000e+00 7.7000000e+00 8.0000000e+00 9.0000000e+00 8.9000000e+00 7.8000000e+00 6.5000000e+00 6.9000000e+00 9.5000000e+00 7.3000000e+00 7.0000000e+00 6.0000000e+00 7.7000000e+00 8.0000000e+00 7.6000000e+00 6.5000000e+00 8.2000000e+00 8.0000000e+00 7.6000000e+00 7.1000000e+00 7.1000000e+00 6.9000000e+00 6.2000000e+00 6.0000000e-01 8.0000000e-01 9.0000000e-01 1.3000000e+00 5.0000000e-01 8.0000000e-01 1.2000000e+00 2.1000000e+00 2.3000000e+00 1.5000000e+00 6.0000000e-01 1.8000000e+00 1.0000000e+00 1.2000000e+00 1.0000000e+00 7.0000000e-01 1.1000000e+00 8.0000000e-01 1.1000000e+00 7.0000000e-01 9.0000000e-01 7.0000000e-01 6.0000000e-01 8.0000000e-01 1.0000000e+00 1.6000000e+00 1.8000000e+00 9.0000000e-01 9.0000000e-01 1.2000000e+00 9.0000000e-01 8.0000000e-01 7.0000000e-01 6.0000000e-01 1.3000000e+00 6.0000000e-01 1.0000000e+00 1.5000000e+00 6.0000000e-01 1.2000000e+00 3.0000000e-01 1.2000000e+00 6.0000000e-01 7.0000000e+00 6.3000000e+00 7.3000000e+00 5.6000000e+00 6.9000000e+00 5.8000000e+00 6.4000000e+00 3.9000000e+00 6.7000000e+00 4.9000000e+00 4.6000000e+00 5.7000000e+00 5.9000000e+00 6.4000000e+00 4.7000000e+00 6.5000000e+00 5.7000000e+00 5.3000000e+00 7.1000000e+00 5.2000000e+00 6.4000000e+00 5.7000000e+00 7.3000000e+00 6.3000000e+00 6.2000000e+00 6.5000000e+00 7.3000000e+00 7.5000000e+00 6.2000000e+00 4.7000000e+00 5.1000000e+00 4.9000000e+00 5.3000000e+00 7.1000000e+00 5.5000000e+00 5.8000000e+00 6.9000000e+00 6.8000000e+00 5.1000000e+00 5.4000000e+00 5.6000000e+00 6.2000000e+00 5.5000000e+00 4.1000000e+00 5.5000000e+00 5.2000000e+00 5.4000000e+00 6.0000000e+00 3.8000000e+00 5.4000000e+00 8.6000000e+00 7.2000000e+00 9.2000000e+00 7.9000000e+00 8.6000000e+00 1.0400000e+01 5.7000000e+00 9.6000000e+00 8.9000000e+00 9.7000000e+00 7.5000000e+00 8.0000000e+00 8.5000000e+00 7.3000000e+00 7.6000000e+00 7.9000000e+00 7.9000000e+00 1.0700000e+01 1.1400000e+01 7.4000000e+00 8.8000000e+00 6.8000000e+00 1.0700000e+01 7.4000000e+00 8.3000000e+00 8.9000000e+00 7.1000000e+00 6.9000000e+00 8.4000000e+00 8.7000000e+00 9.7000000e+00 1.0400000e+01 8.5000000e+00 7.2000000e+00 7.6000000e+00 1.0200000e+01 8.0000000e+00 7.7000000e+00 6.7000000e+00 8.4000000e+00 8.7000000e+00 8.3000000e+00 7.2000000e+00 8.9000000e+00 8.7000000e+00 8.3000000e+00 7.8000000e+00 7.8000000e+00 7.6000000e+00 6.9000000e+00 1.2000000e+00 5.0000000e-01 7.0000000e-01 3.0000000e-01 8.0000000e-01 1.6000000e+00 1.7000000e+00 1.9000000e+00 1.3000000e+00 4.0000000e-01 1.4000000e+00 6.0000000e-01 6.0000000e-01 6.0000000e-01 1.1000000e+00 7.0000000e-01 6.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 3.0000000e-01 6.0000000e-01 6.0000000e-01 6.0000000e-01 1.0000000e+00 1.4000000e+00 5.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 1.2000000e+00 1.0000000e-01 4.0000000e-01 1.9000000e+00 1.0000000e+00 6.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 7.0000000e-01 6.0000000e-01 2.0000000e-01 6.6000000e+00 5.9000000e+00 6.9000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.0000000e+00 3.7000000e+00 6.3000000e+00 4.5000000e+00 4.2000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.1000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.0000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 5.4000000e+00 6.5000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 3.7000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.4000000e+00 5.0000000e+00 8.2000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 5.5000000e+00 9.2000000e+00 8.5000000e+00 9.3000000e+00 7.1000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.5000000e+00 7.5000000e+00 1.0300000e+01 1.1000000e+01 7.0000000e+00 8.4000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 7.9000000e+00 8.5000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 1.0000000e+01 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 7.6000000e+00 7.3000000e+00 6.3000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.8000000e+00 8.5000000e+00 8.3000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 7.2000000e+00 6.5000000e+00 9.0000000e-01 1.9000000e+00 1.1000000e+00 6.0000000e-01 6.0000000e-01 2.7000000e+00 3.1000000e+00 2.3000000e+00 1.4000000e+00 2.6000000e+00 1.8000000e+00 1.8000000e+00 1.8000000e+00 1.3000000e+00 1.7000000e+00 1.4000000e+00 9.0000000e-01 1.5000000e+00 1.5000000e+00 1.3000000e+00 8.0000000e-01 8.0000000e-01 1.8000000e+00 2.2000000e+00 2.4000000e+00 9.0000000e-01 1.1000000e+00 1.8000000e+00 9.0000000e-01 2.0000000e-01 1.3000000e+00 1.4000000e+00 9.0000000e-01 4.0000000e-01 1.8000000e+00 2.3000000e+00 6.0000000e-01 1.8000000e+00 5.0000000e-01 1.8000000e+00 1.0000000e+00 7.4000000e+00 6.7000000e+00 7.5000000e+00 5.4000000e+00 6.7000000e+00 5.6000000e+00 7.0000000e+00 3.7000000e+00 6.5000000e+00 4.7000000e+00 4.4000000e+00 5.7000000e+00 5.7000000e+00 6.2000000e+00 4.5000000e+00 6.7000000e+00 5.7000000e+00 5.1000000e+00 6.9000000e+00 5.0000000e+00 6.8000000e+00 5.5000000e+00 7.1000000e+00 6.1000000e+00 6.0000000e+00 6.5000000e+00 7.1000000e+00 7.5000000e+00 6.0000000e+00 4.5000000e+00 4.9000000e+00 4.7000000e+00 5.1000000e+00 6.9000000e+00 5.5000000e+00 6.6000000e+00 7.1000000e+00 6.6000000e+00 5.1000000e+00 5.2000000e+00 5.4000000e+00 6.2000000e+00 5.3000000e+00 3.9000000e+00 5.3000000e+00 5.2000000e+00 5.2000000e+00 5.8000000e+00 3.6000000e+00 5.2000000e+00 9.2000000e+00 7.0000000e+00 9.2000000e+00 7.7000000e+00 8.6000000e+00 1.0400000e+01 5.5000000e+00 9.4000000e+00 8.7000000e+00 1.0500000e+01 7.9000000e+00 7.8000000e+00 8.5000000e+00 7.1000000e+00 7.4000000e+00 8.3000000e+00 7.9000000e+00 1.1500000e+01 1.1200000e+01 7.2000000e+00 9.2000000e+00 6.6000000e+00 1.0500000e+01 7.2000000e+00 8.9000000e+00 9.3000000e+00 6.9000000e+00 6.9000000e+00 8.2000000e+00 8.7000000e+00 9.5000000e+00 1.1200000e+01 8.3000000e+00 7.0000000e+00 7.4000000e+00 1.0200000e+01 8.8000000e+00 7.9000000e+00 6.7000000e+00 8.6000000e+00 8.9000000e+00 8.5000000e+00 7.0000000e+00 9.3000000e+00 9.3000000e+00 8.3000000e+00 7.6000000e+00 7.8000000e+00 8.4000000e+00 6.9000000e+00 1.2000000e+00 6.0000000e-01 3.0000000e-01 1.1000000e+00 2.2000000e+00 2.4000000e+00 1.8000000e+00 9.0000000e-01 1.9000000e+00 1.1000000e+00 1.1000000e+00 1.1000000e+00 1.4000000e+00 1.0000000e+00 9.0000000e-01 4.0000000e-01 8.0000000e-01 8.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 1.1000000e+00 1.3000000e+00 1.9000000e+00 0.0000000e+00 6.0000000e-01 1.3000000e+00 0.0000000e+00 9.0000000e-01 6.0000000e-01 9.0000000e-01 1.6000000e+00 9.0000000e-01 1.1000000e+00 1.6000000e+00 5.0000000e-01 1.1000000e+00 6.0000000e-01 1.1000000e+00 5.0000000e-01 6.7000000e+00 6.0000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 6.3000000e+00 3.4000000e+00 6.2000000e+00 4.4000000e+00 4.1000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 6.1000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.9000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 3.6000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.3000000e+00 4.9000000e+00 8.5000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 5.2000000e+00 9.1000000e+00 8.4000000e+00 9.8000000e+00 7.2000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.6000000e+00 7.4000000e+00 1.0800000e+01 1.0900000e+01 6.9000000e+00 8.5000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 8.2000000e+00 8.6000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 1.0500000e+01 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 8.1000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.6000000e+00 8.6000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.7000000e+00 6.4000000e+00 1.0000000e+00 1.5000000e+00 2.3000000e+00 1.0000000e+00 1.2000000e+00 6.0000000e-01 7.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 5.0000000e-01 1.4000000e+00 1.2000000e+00 1.3000000e+00 1.2000000e+00 1.0000000e+00 4.0000000e-01 6.0000000e-01 1.3000000e+00 1.3000000e+00 5.0000000e-01 7.0000000e-01 7.0000000e-01 1.2000000e+00 1.2000000e+00 5.0000000e-01 1.2000000e+00 1.9000000e+00 6.0000000e-01 9.0000000e-01 2.6000000e+00 1.7000000e+00 1.1000000e+00 1.0000000e+00 1.5000000e+00 5.0000000e-01 1.4000000e+00 1.0000000e-01 9.0000000e-01 6.5000000e+00 5.8000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 5.9000000e+00 4.4000000e+00 6.2000000e+00 4.8000000e+00 4.9000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 5.9000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.3000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 4.4000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.9000000e+00 4.9000000e+00 8.1000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 6.2000000e+00 9.1000000e+00 8.4000000e+00 8.8000000e+00 7.0000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.4000000e+00 7.4000000e+00 9.6000000e+00 1.0900000e+01 6.9000000e+00 8.3000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 7.8000000e+00 8.4000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 9.3000000e+00 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 7.5000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.4000000e+00 8.2000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.1000000e+00 6.4000000e+00 7.0000000e-01 1.5000000e+00 2.0000000e+00 2.2000000e+00 1.6000000e+00 7.0000000e-01 1.5000000e+00 9.0000000e-01 7.0000000e-01 9.0000000e-01 1.0000000e+00 8.0000000e-01 3.0000000e-01 6.0000000e-01 4.0000000e-01 6.0000000e-01 6.0000000e-01 3.0000000e-01 3.0000000e-01 9.0000000e-01 1.3000000e+00 1.7000000e+00 6.0000000e-01 8.0000000e-01 1.1000000e+00 6.0000000e-01 1.1000000e+00 4.0000000e-01 7.0000000e-01 1.8000000e+00 9.0000000e-01 7.0000000e-01 1.2000000e+00 7.0000000e-01 7.0000000e-01 6.0000000e-01 9.0000000e-01 5.0000000e-01 6.7000000e+00 6.0000000e+00 7.0000000e+00 5.3000000e+00 6.6000000e+00 5.5000000e+00 6.1000000e+00 3.6000000e+00 6.4000000e+00 4.6000000e+00 4.3000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.4000000e+00 6.2000000e+00 5.4000000e+00 5.0000000e+00 6.8000000e+00 4.9000000e+00 6.1000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.4000000e+00 4.8000000e+00 4.6000000e+00 5.0000000e+00 6.8000000e+00 5.2000000e+00 5.5000000e+00 6.6000000e+00 6.5000000e+00 4.8000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 5.2000000e+00 3.8000000e+00 5.2000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 3.5000000e+00 5.1000000e+00 8.3000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 5.4000000e+00 9.3000000e+00 8.6000000e+00 9.4000000e+00 7.2000000e+00 7.7000000e+00 8.2000000e+00 7.0000000e+00 7.3000000e+00 7.6000000e+00 7.6000000e+00 1.0400000e+01 1.1100000e+01 7.1000000e+00 8.5000000e+00 6.5000000e+00 1.0400000e+01 7.1000000e+00 8.0000000e+00 8.6000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 1.0100000e+01 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 7.7000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.6000000e+00 8.4000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.3000000e+00 6.6000000e+00 8.0000000e-01 2.3000000e+00 2.7000000e+00 1.9000000e+00 1.0000000e+00 2.2000000e+00 1.4000000e+00 1.4000000e+00 1.4000000e+00 1.3000000e+00 1.3000000e+00 1.0000000e+00 5.0000000e-01 1.1000000e+00 1.1000000e+00 9.0000000e-01 6.0000000e-01 4.0000000e-01 1.4000000e+00 1.6000000e+00 2.0000000e+00 3.0000000e-01 7.0000000e-01 1.4000000e+00 3.0000000e-01 6.0000000e-01 9.0000000e-01 1.0000000e+00 1.3000000e+00 8.0000000e-01 1.4000000e+00 1.9000000e+00 2.0000000e-01 1.4000000e+00 5.0000000e-01 1.4000000e+00 6.0000000e-01 7.0000000e+00 6.3000000e+00 7.1000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.6000000e+00 3.5000000e+00 6.3000000e+00 4.5000000e+00 4.2000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.3000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.4000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 6.2000000e+00 6.7000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 3.7000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.4000000e+00 5.0000000e+00 8.8000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 5.3000000e+00 9.2000000e+00 8.5000000e+00 1.0100000e+01 7.5000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.9000000e+00 7.5000000e+00 1.1100000e+01 1.1000000e+01 7.0000000e+00 8.8000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 8.5000000e+00 8.9000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 1.0800000e+01 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 8.4000000e+00 7.5000000e+00 6.3000000e+00 8.2000000e+00 8.5000000e+00 8.1000000e+00 6.8000000e+00 8.9000000e+00 8.9000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 8.0000000e+00 6.5000000e+00 2.7000000e+00 3.5000000e+00 2.5000000e+00 1.8000000e+00 3.0000000e+00 2.2000000e+00 2.2000000e+00 2.2000000e+00 1.1000000e+00 2.1000000e+00 1.8000000e+00 1.3000000e+00 1.9000000e+00 1.9000000e+00 1.7000000e+00 1.2000000e+00 1.2000000e+00 2.2000000e+00 2.4000000e+00 2.8000000e+00 1.1000000e+00 1.1000000e+00 2.0000000e+00 1.1000000e+00 4.0000000e-01 1.7000000e+00 1.6000000e+00 1.3000000e+00 6.0000000e-01 2.2000000e+00 2.7000000e+00 1.0000000e+00 2.2000000e+00 9.0000000e-01 2.2000000e+00 1.4000000e+00 7.8000000e+00 7.1000000e+00 7.9000000e+00 6.0000000e+00 7.3000000e+00 6.2000000e+00 7.4000000e+00 4.3000000e+00 7.1000000e+00 5.3000000e+00 5.0000000e+00 6.1000000e+00 6.3000000e+00 6.8000000e+00 5.1000000e+00 7.1000000e+00 6.1000000e+00 5.7000000e+00 7.5000000e+00 5.6000000e+00 7.2000000e+00 6.1000000e+00 7.7000000e+00 6.7000000e+00 6.6000000e+00 6.9000000e+00 7.7000000e+00 7.9000000e+00 6.6000000e+00 5.1000000e+00 5.5000000e+00 5.3000000e+00 5.7000000e+00 7.5000000e+00 5.9000000e+00 7.0000000e+00 7.5000000e+00 7.2000000e+00 5.5000000e+00 5.8000000e+00 6.0000000e+00 6.6000000e+00 5.9000000e+00 4.5000000e+00 5.9000000e+00 5.6000000e+00 5.8000000e+00 6.4000000e+00 4.2000000e+00 5.8000000e+00 9.6000000e+00 7.6000000e+00 9.6000000e+00 8.3000000e+00 9.0000000e+00 1.0800000e+01 6.1000000e+00 1.0000000e+01 9.3000000e+00 1.0900000e+01 8.3000000e+00 8.4000000e+00 8.9000000e+00 7.7000000e+00 8.0000000e+00 8.7000000e+00 8.3000000e+00 1.1900000e+01 1.1800000e+01 7.8000000e+00 9.6000000e+00 7.2000000e+00 1.1100000e+01 7.8000000e+00 9.3000000e+00 9.7000000e+00 7.5000000e+00 7.3000000e+00 8.8000000e+00 9.1000000e+00 1.0100000e+01 1.1600000e+01 8.9000000e+00 7.6000000e+00 8.0000000e+00 1.0600000e+01 9.2000000e+00 8.3000000e+00 7.1000000e+00 9.0000000e+00 9.3000000e+00 8.9000000e+00 7.6000000e+00 9.7000000e+00 9.7000000e+00 8.7000000e+00 8.2000000e+00 8.2000000e+00 8.8000000e+00 7.3000000e+00 1.0000000e+00 8.0000000e-01 1.5000000e+00 9.0000000e-01 1.3000000e+00 1.5000000e+00 1.5000000e+00 1.8000000e+00 2.2000000e+00 2.3000000e+00 2.2000000e+00 2.0000000e+00 1.4000000e+00 1.4000000e+00 2.3000000e+00 2.3000000e+00 1.5000000e+00 1.1000000e+00 7.0000000e-01 2.2000000e+00 1.6000000e+00 9.0000000e-01 2.2000000e+00 2.5000000e+00 1.6000000e+00 1.5000000e+00 3.2000000e+00 2.3000000e+00 2.1000000e+00 1.8000000e+00 2.3000000e+00 1.3000000e+00 2.2000000e+00 1.1000000e+00 1.7000000e+00 6.7000000e+00 6.0000000e+00 7.0000000e+00 5.9000000e+00 6.6000000e+00 5.7000000e+00 6.1000000e+00 5.4000000e+00 6.4000000e+00 5.8000000e+00 5.9000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.8000000e+00 6.2000000e+00 5.8000000e+00 5.0000000e+00 6.8000000e+00 5.3000000e+00 6.1000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.6000000e+00 5.4000000e+00 5.2000000e+00 5.0000000e+00 6.8000000e+00 6.0000000e+00 5.5000000e+00 6.6000000e+00 6.5000000e+00 5.2000000e+00 5.7000000e+00 5.9000000e+00 5.9000000e+00 5.2000000e+00 5.4000000e+00 5.6000000e+00 5.1000000e+00 5.3000000e+00 5.7000000e+00 4.9000000e+00 5.3000000e+00 8.3000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 7.2000000e+00 9.3000000e+00 8.6000000e+00 9.0000000e+00 7.2000000e+00 7.7000000e+00 8.2000000e+00 7.2000000e+00 7.3000000e+00 7.6000000e+00 7.6000000e+00 9.6000000e+00 1.1100000e+01 7.1000000e+00 8.5000000e+00 6.9000000e+00 1.0400000e+01 7.1000000e+00 8.0000000e+00 8.6000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 9.3000000e+00 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 7.7000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.6000000e+00 8.4000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.3000000e+00 6.6000000e+00 1.0000000e+00 1.7000000e+00 9.0000000e-01 1.3000000e+00 1.7000000e+00 1.3000000e+00 2.6000000e+00 2.0000000e+00 2.5000000e+00 2.4000000e+00 1.8000000e+00 1.6000000e+00 1.8000000e+00 2.5000000e+00 2.5000000e+00 1.3000000e+00 1.1000000e+00 7.0000000e-01 2.4000000e+00 2.4000000e+00 1.5000000e+00 2.4000000e+00 3.1000000e+00 1.8000000e+00 1.9000000e+00 3.6000000e+00 2.9000000e+00 1.9000000e+00 1.6000000e+00 2.5000000e+00 1.5000000e+00 2.6000000e+00 1.3000000e+00 2.1000000e+00 6.7000000e+00 6.0000000e+00 7.0000000e+00 5.7000000e+00 6.6000000e+00 5.5000000e+00 6.1000000e+00 5.2000000e+00 6.4000000e+00 5.6000000e+00 5.7000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.6000000e+00 6.2000000e+00 5.6000000e+00 5.0000000e+00 6.8000000e+00 5.1000000e+00 6.1000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.4000000e+00 5.2000000e+00 5.0000000e+00 5.0000000e+00 6.8000000e+00 5.8000000e+00 5.5000000e+00 6.6000000e+00 6.5000000e+00 5.0000000e+00 5.5000000e+00 5.7000000e+00 5.9000000e+00 5.2000000e+00 5.2000000e+00 5.4000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 4.7000000e+00 5.1000000e+00 8.3000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 7.0000000e+00 9.3000000e+00 8.6000000e+00 9.0000000e+00 7.2000000e+00 7.7000000e+00 8.2000000e+00 7.0000000e+00 7.3000000e+00 7.6000000e+00 7.6000000e+00 9.6000000e+00 1.1100000e+01 7.1000000e+00 8.5000000e+00 6.7000000e+00 1.0400000e+01 7.1000000e+00 8.0000000e+00 8.6000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 9.3000000e+00 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 7.7000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.6000000e+00 8.4000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.3000000e+00 6.6000000e+00 9.0000000e-01 9.0000000e-01 7.0000000e-01 1.1000000e+00 7.0000000e-01 1.6000000e+00 1.4000000e+00 1.9000000e+00 1.8000000e+00 1.2000000e+00 1.0000000e+00 1.0000000e+00 1.9000000e+00 1.9000000e+00 7.0000000e-01 9.0000000e-01 7.0000000e-01 1.8000000e+00 1.4000000e+00 7.0000000e-01 1.8000000e+00 2.1000000e+00 1.2000000e+00 9.0000000e-01 2.6000000e+00 1.9000000e+00 1.3000000e+00 1.0000000e+00 1.7000000e+00 9.0000000e-01 1.8000000e+00 7.0000000e-01 1.3000000e+00 6.7000000e+00 6.0000000e+00 7.0000000e+00 5.3000000e+00 6.6000000e+00 5.5000000e+00 6.1000000e+00 4.6000000e+00 6.4000000e+00 5.0000000e+00 5.1000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.4000000e+00 6.2000000e+00 5.4000000e+00 5.0000000e+00 6.8000000e+00 4.9000000e+00 6.1000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.4000000e+00 4.8000000e+00 4.6000000e+00 5.0000000e+00 6.8000000e+00 5.2000000e+00 5.5000000e+00 6.6000000e+00 6.5000000e+00 4.8000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 5.2000000e+00 4.6000000e+00 5.2000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 4.1000000e+00 5.1000000e+00 8.3000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 6.4000000e+00 9.3000000e+00 8.6000000e+00 9.0000000e+00 7.2000000e+00 7.7000000e+00 8.2000000e+00 7.0000000e+00 7.3000000e+00 7.6000000e+00 7.6000000e+00 9.6000000e+00 1.1100000e+01 7.1000000e+00 8.5000000e+00 6.5000000e+00 1.0400000e+01 7.1000000e+00 8.0000000e+00 8.6000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 9.3000000e+00 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 7.7000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.6000000e+00 8.4000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.3000000e+00 6.6000000e+00 1.2000000e+00 4.0000000e-01 8.0000000e-01 4.0000000e-01 1.1000000e+00 7.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 3.0000000e-01 3.0000000e-01 1.0000000e+00 1.0000000e+00 6.0000000e-01 1.0000000e+00 1.2000000e+00 9.0000000e-01 7.0000000e-01 6.0000000e-01 9.0000000e-01 1.4000000e+00 3.0000000e-01 2.0000000e-01 1.9000000e+00 1.2000000e+00 6.0000000e-01 9.0000000e-01 8.0000000e-01 6.0000000e-01 9.0000000e-01 6.0000000e-01 4.0000000e-01 6.6000000e+00 5.9000000e+00 6.9000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.0000000e+00 3.9000000e+00 6.3000000e+00 4.5000000e+00 4.4000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.1000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.0000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 5.4000000e+00 6.5000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 3.9000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.4000000e+00 5.0000000e+00 8.2000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 5.7000000e+00 9.2000000e+00 8.5000000e+00 9.1000000e+00 7.1000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.5000000e+00 7.5000000e+00 1.0100000e+01 1.1000000e+01 7.0000000e+00 8.4000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 7.9000000e+00 8.5000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 9.8000000e+00 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 7.6000000e+00 7.3000000e+00 6.3000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.8000000e+00 8.5000000e+00 8.3000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 7.2000000e+00 6.5000000e+00 8.0000000e-01 8.0000000e-01 1.0000000e+00 2.1000000e+00 1.3000000e+00 1.6000000e+00 1.7000000e+00 1.3000000e+00 1.1000000e+00 1.3000000e+00 1.8000000e+00 1.8000000e+00 1.0000000e+00 1.2000000e+00 1.0000000e+00 1.9000000e+00 1.9000000e+00 1.0000000e+00 1.9000000e+00 2.6000000e+00 1.3000000e+00 1.4000000e+00 3.1000000e+00 2.4000000e+00 1.4000000e+00 9.0000000e-01 2.0000000e+00 8.0000000e-01 2.1000000e+00 8.0000000e-01 1.6000000e+00 6.0000000e+00 5.3000000e+00 6.3000000e+00 5.0000000e+00 5.9000000e+00 4.8000000e+00 5.4000000e+00 4.5000000e+00 5.7000000e+00 4.9000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.4000000e+00 3.9000000e+00 5.5000000e+00 4.9000000e+00 4.3000000e+00 6.1000000e+00 4.4000000e+00 5.4000000e+00 4.7000000e+00 6.3000000e+00 5.3000000e+00 5.2000000e+00 5.5000000e+00 6.3000000e+00 6.5000000e+00 5.2000000e+00 3.7000000e+00 4.5000000e+00 4.3000000e+00 4.3000000e+00 6.1000000e+00 5.1000000e+00 4.8000000e+00 5.9000000e+00 5.8000000e+00 4.3000000e+00 4.8000000e+00 5.0000000e+00 5.2000000e+00 4.5000000e+00 4.5000000e+00 4.7000000e+00 4.2000000e+00 4.4000000e+00 5.0000000e+00 4.0000000e+00 4.4000000e+00 7.6000000e+00 6.2000000e+00 8.2000000e+00 6.9000000e+00 7.6000000e+00 9.4000000e+00 6.3000000e+00 8.6000000e+00 7.9000000e+00 8.3000000e+00 6.5000000e+00 7.0000000e+00 7.5000000e+00 6.3000000e+00 6.6000000e+00 6.9000000e+00 6.9000000e+00 8.9000000e+00 1.0400000e+01 6.4000000e+00 7.8000000e+00 6.0000000e+00 9.7000000e+00 6.4000000e+00 7.3000000e+00 7.9000000e+00 6.1000000e+00 5.9000000e+00 7.4000000e+00 7.7000000e+00 8.7000000e+00 8.6000000e+00 7.5000000e+00 6.2000000e+00 6.6000000e+00 9.2000000e+00 7.0000000e+00 6.7000000e+00 5.7000000e+00 7.4000000e+00 7.7000000e+00 7.3000000e+00 6.2000000e+00 7.9000000e+00 7.7000000e+00 7.3000000e+00 6.8000000e+00 6.8000000e+00 6.6000000e+00 5.9000000e+00 1.0000000e+00 2.0000000e-01 1.3000000e+00 9.0000000e-01 1.2000000e+00 1.1000000e+00 7.0000000e-01 5.0000000e-01 7.0000000e-01 1.2000000e+00 1.2000000e+00 8.0000000e-01 6.0000000e-01 1.0000000e+00 1.1000000e+00 1.1000000e+00 1.0000000e+00 1.1000000e+00 1.8000000e+00 5.0000000e-01 6.0000000e-01 2.3000000e+00 1.6000000e+00 8.0000000e-01 5.0000000e-01 1.2000000e+00 2.0000000e-01 1.3000000e+00 4.0000000e-01 8.0000000e-01 6.8000000e+00 6.1000000e+00 7.1000000e+00 5.4000000e+00 6.7000000e+00 5.6000000e+00 6.2000000e+00 4.1000000e+00 6.5000000e+00 4.7000000e+00 4.6000000e+00 5.5000000e+00 5.7000000e+00 6.2000000e+00 4.5000000e+00 6.3000000e+00 5.5000000e+00 5.1000000e+00 6.9000000e+00 5.0000000e+00 6.2000000e+00 5.5000000e+00 7.1000000e+00 6.1000000e+00 6.0000000e+00 6.3000000e+00 7.1000000e+00 7.3000000e+00 6.0000000e+00 4.5000000e+00 4.9000000e+00 4.7000000e+00 5.1000000e+00 6.9000000e+00 5.3000000e+00 5.6000000e+00 6.7000000e+00 6.6000000e+00 4.9000000e+00 5.2000000e+00 5.4000000e+00 6.0000000e+00 5.3000000e+00 4.1000000e+00 5.3000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 3.6000000e+00 5.2000000e+00 8.4000000e+00 7.0000000e+00 9.0000000e+00 7.7000000e+00 8.4000000e+00 1.0200000e+01 5.9000000e+00 9.4000000e+00 8.7000000e+00 9.1000000e+00 7.3000000e+00 7.8000000e+00 8.3000000e+00 7.1000000e+00 7.4000000e+00 7.7000000e+00 7.7000000e+00 9.7000000e+00 1.1200000e+01 7.2000000e+00 8.6000000e+00 6.6000000e+00 1.0500000e+01 7.2000000e+00 8.1000000e+00 8.7000000e+00 6.9000000e+00 6.7000000e+00 8.2000000e+00 8.5000000e+00 9.5000000e+00 9.4000000e+00 8.3000000e+00 7.0000000e+00 7.4000000e+00 1.0000000e+01 7.8000000e+00 7.5000000e+00 6.5000000e+00 8.2000000e+00 8.5000000e+00 8.1000000e+00 7.0000000e+00 8.7000000e+00 8.5000000e+00 8.1000000e+00 7.6000000e+00 7.6000000e+00 7.4000000e+00 6.7000000e+00 1.0000000e+00 1.7000000e+00 7.0000000e-01 8.0000000e-01 9.0000000e-01 7.0000000e-01 5.0000000e-01 5.0000000e-01 1.0000000e+00 1.0000000e+00 4.0000000e-01 1.2000000e+00 1.2000000e+00 1.1000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.8000000e+00 5.0000000e-01 1.0000000e+00 2.5000000e+00 1.6000000e+00 1.0000000e+00 1.1000000e+00 1.4000000e+00 8.0000000e-01 1.3000000e+00 6.0000000e-01 8.0000000e-01 6.0000000e+00 5.3000000e+00 6.3000000e+00 4.6000000e+00 5.9000000e+00 4.8000000e+00 5.4000000e+00 3.9000000e+00 5.7000000e+00 4.3000000e+00 4.4000000e+00 4.7000000e+00 4.9000000e+00 5.4000000e+00 3.7000000e+00 5.5000000e+00 4.7000000e+00 4.3000000e+00 6.1000000e+00 4.2000000e+00 5.4000000e+00 4.7000000e+00 6.3000000e+00 5.3000000e+00 5.2000000e+00 5.5000000e+00 6.3000000e+00 6.5000000e+00 5.2000000e+00 3.7000000e+00 4.1000000e+00 3.9000000e+00 4.3000000e+00 6.1000000e+00 4.5000000e+00 4.8000000e+00 5.9000000e+00 5.8000000e+00 4.1000000e+00 4.4000000e+00 4.6000000e+00 5.2000000e+00 4.5000000e+00 3.9000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.0000000e+00 3.4000000e+00 4.4000000e+00 7.6000000e+00 6.2000000e+00 8.2000000e+00 6.9000000e+00 7.6000000e+00 9.4000000e+00 5.7000000e+00 8.6000000e+00 7.9000000e+00 8.7000000e+00 6.5000000e+00 7.0000000e+00 7.5000000e+00 6.3000000e+00 6.6000000e+00 6.9000000e+00 6.9000000e+00 9.7000000e+00 1.0400000e+01 6.4000000e+00 7.8000000e+00 5.8000000e+00 9.7000000e+00 6.4000000e+00 7.3000000e+00 7.9000000e+00 6.1000000e+00 5.9000000e+00 7.4000000e+00 7.7000000e+00 8.7000000e+00 9.4000000e+00 7.5000000e+00 6.2000000e+00 6.6000000e+00 9.2000000e+00 7.0000000e+00 6.7000000e+00 5.7000000e+00 7.4000000e+00 7.7000000e+00 7.3000000e+00 6.2000000e+00 7.9000000e+00 7.7000000e+00 7.3000000e+00 6.8000000e+00 6.8000000e+00 6.6000000e+00 5.9000000e+00 1.3000000e+00 7.0000000e-01 1.2000000e+00 1.1000000e+00 5.0000000e-01 5.0000000e-01 7.0000000e-01 1.2000000e+00 1.2000000e+00 6.0000000e-01 8.0000000e-01 1.2000000e+00 1.1000000e+00 1.1000000e+00 1.0000000e+00 1.1000000e+00 1.8000000e+00 5.0000000e-01 6.0000000e-01 2.3000000e+00 1.6000000e+00 6.0000000e-01 5.0000000e-01 1.2000000e+00 4.0000000e-01 1.3000000e+00 4.0000000e-01 8.0000000e-01 6.6000000e+00 5.9000000e+00 6.9000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.0000000e+00 3.9000000e+00 6.3000000e+00 4.5000000e+00 4.4000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.1000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.0000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 5.4000000e+00 6.5000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 3.9000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.4000000e+00 5.0000000e+00 8.2000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 5.7000000e+00 9.2000000e+00 8.5000000e+00 8.9000000e+00 7.1000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.5000000e+00 7.5000000e+00 9.7000000e+00 1.1000000e+01 7.0000000e+00 8.4000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 7.9000000e+00 8.5000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 9.4000000e+00 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 7.6000000e+00 7.3000000e+00 6.3000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.8000000e+00 8.5000000e+00 8.3000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 7.2000000e+00 6.5000000e+00 1.8000000e+00 1.3000000e+00 1.6000000e+00 1.4000000e+00 1.2000000e+00 1.2000000e+00 1.1000000e+00 1.3000000e+00 1.7000000e+00 1.7000000e+00 1.9000000e+00 1.4000000e+00 1.0000000e+00 1.3000000e+00 1.4000000e+00 1.1000000e+00 1.2000000e+00 9.0000000e-01 1.8000000e+00 9.0000000e-01 1.5000000e+00 1.8000000e+00 1.3000000e+00 1.3000000e+00 8.0000000e-01 1.3000000e+00 1.1000000e+00 7.7000000e+00 7.0000000e+00 8.0000000e+00 6.3000000e+00 7.6000000e+00 6.5000000e+00 7.1000000e+00 4.6000000e+00 7.4000000e+00 5.6000000e+00 5.3000000e+00 6.4000000e+00 6.6000000e+00 7.1000000e+00 5.4000000e+00 7.2000000e+00 6.4000000e+00 6.0000000e+00 7.8000000e+00 5.9000000e+00 7.1000000e+00 6.4000000e+00 8.0000000e+00 7.0000000e+00 6.9000000e+00 7.2000000e+00 8.0000000e+00 8.2000000e+00 6.9000000e+00 5.4000000e+00 5.8000000e+00 5.6000000e+00 6.0000000e+00 7.8000000e+00 6.2000000e+00 6.5000000e+00 7.6000000e+00 7.5000000e+00 5.8000000e+00 6.1000000e+00 6.3000000e+00 6.9000000e+00 6.2000000e+00 4.8000000e+00 6.2000000e+00 5.9000000e+00 6.1000000e+00 6.7000000e+00 4.5000000e+00 6.1000000e+00 9.3000000e+00 7.9000000e+00 9.9000000e+00 8.6000000e+00 9.3000000e+00 1.1100000e+01 6.4000000e+00 1.0300000e+01 9.6000000e+00 1.0000000e+01 8.2000000e+00 8.7000000e+00 9.2000000e+00 8.0000000e+00 8.3000000e+00 8.6000000e+00 8.6000000e+00 1.1000000e+01 1.2100000e+01 8.1000000e+00 9.5000000e+00 7.5000000e+00 1.1400000e+01 8.1000000e+00 9.0000000e+00 9.6000000e+00 7.8000000e+00 7.6000000e+00 9.1000000e+00 9.4000000e+00 1.0400000e+01 1.0700000e+01 9.2000000e+00 7.9000000e+00 8.3000000e+00 1.0900000e+01 8.7000000e+00 8.4000000e+00 7.4000000e+00 9.1000000e+00 9.4000000e+00 9.0000000e+00 7.9000000e+00 9.6000000e+00 9.4000000e+00 9.0000000e+00 8.5000000e+00 8.5000000e+00 8.3000000e+00 7.6000000e+00 9.0000000e-01 8.0000000e-01 4.0000000e-01 8.0000000e-01 8.0000000e-01 9.0000000e-01 9.0000000e-01 7.0000000e-01 1.5000000e+00 1.9000000e+00 1.0000000e+00 1.0000000e+00 1.3000000e+00 1.0000000e+00 1.7000000e+00 6.0000000e-01 9.0000000e-01 2.2000000e+00 1.5000000e+00 5.0000000e-01 8.0000000e-01 1.1000000e+00 9.0000000e-01 1.2000000e+00 1.1000000e+00 7.0000000e-01 5.9000000e+00 5.2000000e+00 6.2000000e+00 4.5000000e+00 5.8000000e+00 4.7000000e+00 5.3000000e+00 3.2000000e+00 5.6000000e+00 3.8000000e+00 3.7000000e+00 4.6000000e+00 4.8000000e+00 5.3000000e+00 3.6000000e+00 5.4000000e+00 4.6000000e+00 4.2000000e+00 6.0000000e+00 4.1000000e+00 5.3000000e+00 4.6000000e+00 6.2000000e+00 5.2000000e+00 5.1000000e+00 5.4000000e+00 6.2000000e+00 6.4000000e+00 5.1000000e+00 3.6000000e+00 4.0000000e+00 3.8000000e+00 4.2000000e+00 6.0000000e+00 4.4000000e+00 4.9000000e+00 5.8000000e+00 5.7000000e+00 4.0000000e+00 4.3000000e+00 4.5000000e+00 5.1000000e+00 4.4000000e+00 3.2000000e+00 4.4000000e+00 4.1000000e+00 4.3000000e+00 4.9000000e+00 2.7000000e+00 4.3000000e+00 7.5000000e+00 6.1000000e+00 8.1000000e+00 6.8000000e+00 7.5000000e+00 9.3000000e+00 5.0000000e+00 8.5000000e+00 7.8000000e+00 8.8000000e+00 6.4000000e+00 6.9000000e+00 7.4000000e+00 6.2000000e+00 6.5000000e+00 6.8000000e+00 6.8000000e+00 9.8000000e+00 1.0300000e+01 6.3000000e+00 7.7000000e+00 5.7000000e+00 9.6000000e+00 6.3000000e+00 7.2000000e+00 7.8000000e+00 6.0000000e+00 5.8000000e+00 7.3000000e+00 7.6000000e+00 8.6000000e+00 9.5000000e+00 7.4000000e+00 6.1000000e+00 6.5000000e+00 9.1000000e+00 7.1000000e+00 6.6000000e+00 5.6000000e+00 7.3000000e+00 7.6000000e+00 7.2000000e+00 6.1000000e+00 7.8000000e+00 7.6000000e+00 7.2000000e+00 6.7000000e+00 6.7000000e+00 6.7000000e+00 5.8000000e+00 9.0000000e-01 7.0000000e-01 9.0000000e-01 9.0000000e-01 6.0000000e-01 6.0000000e-01 1.2000000e+00 1.6000000e+00 2.0000000e+00 9.0000000e-01 1.1000000e+00 1.4000000e+00 9.0000000e-01 1.4000000e+00 7.0000000e-01 1.0000000e+00 2.1000000e+00 1.2000000e+00 1.0000000e+00 9.0000000e-01 1.0000000e+00 1.0000000e+00 9.0000000e-01 1.2000000e+00 8.0000000e-01 6.4000000e+00 5.7000000e+00 6.7000000e+00 5.0000000e+00 6.3000000e+00 5.2000000e+00 5.8000000e+00 3.3000000e+00 6.1000000e+00 4.3000000e+00 4.0000000e+00 5.1000000e+00 5.3000000e+00 5.8000000e+00 4.1000000e+00 5.9000000e+00 5.1000000e+00 4.7000000e+00 6.5000000e+00 4.6000000e+00 5.8000000e+00 5.1000000e+00 6.7000000e+00 5.7000000e+00 5.6000000e+00 5.9000000e+00 6.7000000e+00 6.9000000e+00 5.6000000e+00 4.1000000e+00 4.5000000e+00 4.3000000e+00 4.7000000e+00 6.5000000e+00 4.9000000e+00 5.2000000e+00 6.3000000e+00 6.2000000e+00 4.5000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 4.9000000e+00 3.5000000e+00 4.9000000e+00 4.6000000e+00 4.8000000e+00 5.4000000e+00 3.2000000e+00 4.8000000e+00 8.0000000e+00 6.6000000e+00 8.6000000e+00 7.3000000e+00 8.0000000e+00 9.8000000e+00 5.1000000e+00 9.0000000e+00 8.3000000e+00 9.1000000e+00 6.9000000e+00 7.4000000e+00 7.9000000e+00 6.7000000e+00 7.0000000e+00 7.3000000e+00 7.3000000e+00 1.0100000e+01 1.0800000e+01 6.8000000e+00 8.2000000e+00 6.2000000e+00 1.0100000e+01 6.8000000e+00 7.7000000e+00 8.3000000e+00 6.5000000e+00 6.3000000e+00 7.8000000e+00 8.1000000e+00 9.1000000e+00 9.8000000e+00 7.9000000e+00 6.6000000e+00 7.0000000e+00 9.6000000e+00 7.4000000e+00 7.1000000e+00 6.1000000e+00 7.8000000e+00 8.1000000e+00 7.7000000e+00 6.6000000e+00 8.3000000e+00 8.1000000e+00 7.7000000e+00 7.2000000e+00 7.2000000e+00 7.0000000e+00 6.3000000e+00 6.0000000e-01 8.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 1.1000000e+00 1.5000000e+00 1.9000000e+00 4.0000000e-01 6.0000000e-01 1.3000000e+00 4.0000000e-01 9.0000000e-01 6.0000000e-01 9.0000000e-01 1.6000000e+00 1.1000000e+00 9.0000000e-01 1.4000000e+00 5.0000000e-01 9.0000000e-01 8.0000000e-01 1.1000000e+00 5.0000000e-01 6.5000000e+00 5.8000000e+00 6.6000000e+00 4.7000000e+00 6.0000000e+00 4.9000000e+00 6.1000000e+00 3.2000000e+00 5.8000000e+00 4.0000000e+00 3.7000000e+00 4.8000000e+00 5.0000000e+00 5.5000000e+00 3.8000000e+00 5.8000000e+00 4.8000000e+00 4.4000000e+00 6.2000000e+00 4.3000000e+00 5.9000000e+00 4.8000000e+00 6.4000000e+00 5.4000000e+00 5.3000000e+00 5.6000000e+00 6.4000000e+00 6.6000000e+00 5.3000000e+00 3.8000000e+00 4.2000000e+00 4.0000000e+00 4.4000000e+00 6.2000000e+00 4.6000000e+00 5.7000000e+00 6.2000000e+00 5.9000000e+00 4.2000000e+00 4.5000000e+00 4.7000000e+00 5.3000000e+00 4.6000000e+00 3.2000000e+00 4.6000000e+00 4.3000000e+00 4.5000000e+00 5.1000000e+00 2.9000000e+00 4.5000000e+00 8.3000000e+00 6.3000000e+00 8.3000000e+00 7.0000000e+00 7.7000000e+00 9.5000000e+00 5.0000000e+00 8.7000000e+00 8.0000000e+00 9.6000000e+00 7.0000000e+00 7.1000000e+00 7.6000000e+00 6.4000000e+00 6.7000000e+00 7.4000000e+00 7.0000000e+00 1.0600000e+01 1.0500000e+01 6.5000000e+00 8.3000000e+00 5.9000000e+00 9.8000000e+00 6.5000000e+00 8.0000000e+00 8.4000000e+00 6.2000000e+00 6.0000000e+00 7.5000000e+00 7.8000000e+00 8.8000000e+00 1.0300000e+01 7.6000000e+00 6.3000000e+00 6.7000000e+00 9.3000000e+00 7.9000000e+00 7.0000000e+00 5.8000000e+00 7.7000000e+00 8.0000000e+00 7.6000000e+00 6.3000000e+00 8.4000000e+00 8.4000000e+00 7.4000000e+00 6.9000000e+00 6.9000000e+00 7.5000000e+00 6.0000000e+00 6.0000000e-01 6.0000000e-01 7.0000000e-01 7.0000000e-01 5.0000000e-01 1.3000000e+00 1.7000000e+00 8.0000000e-01 8.0000000e-01 1.1000000e+00 8.0000000e-01 1.5000000e+00 4.0000000e-01 5.0000000e-01 2.0000000e+00 1.3000000e+00 3.0000000e-01 8.0000000e-01 9.0000000e-01 7.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 6.3000000e+00 5.6000000e+00 6.6000000e+00 4.9000000e+00 6.2000000e+00 5.1000000e+00 5.7000000e+00 3.4000000e+00 6.0000000e+00 4.2000000e+00 3.9000000e+00 5.0000000e+00 5.2000000e+00 5.7000000e+00 4.0000000e+00 5.8000000e+00 5.0000000e+00 4.6000000e+00 6.4000000e+00 4.5000000e+00 5.7000000e+00 5.0000000e+00 6.6000000e+00 5.6000000e+00 5.5000000e+00 5.8000000e+00 6.6000000e+00 6.8000000e+00 5.5000000e+00 4.0000000e+00 4.4000000e+00 4.2000000e+00 4.6000000e+00 6.4000000e+00 4.8000000e+00 5.1000000e+00 6.2000000e+00 6.1000000e+00 4.4000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 4.8000000e+00 3.4000000e+00 4.8000000e+00 4.5000000e+00 4.7000000e+00 5.3000000e+00 3.1000000e+00 4.7000000e+00 7.9000000e+00 6.5000000e+00 8.5000000e+00 7.2000000e+00 7.9000000e+00 9.7000000e+00 5.2000000e+00 8.9000000e+00 8.2000000e+00 9.0000000e+00 6.8000000e+00 7.3000000e+00 7.8000000e+00 6.6000000e+00 6.9000000e+00 7.2000000e+00 7.2000000e+00 1.0000000e+01 1.0700000e+01 6.7000000e+00 8.1000000e+00 6.1000000e+00 1.0000000e+01 6.7000000e+00 7.6000000e+00 8.2000000e+00 6.4000000e+00 6.2000000e+00 7.7000000e+00 8.0000000e+00 9.0000000e+00 9.7000000e+00 7.8000000e+00 6.5000000e+00 6.9000000e+00 9.5000000e+00 7.3000000e+00 7.0000000e+00 6.0000000e+00 7.7000000e+00 8.0000000e+00 7.6000000e+00 6.5000000e+00 8.2000000e+00 8.0000000e+00 7.6000000e+00 7.1000000e+00 7.1000000e+00 6.9000000e+00 6.2000000e+00 2.0000000e-01 9.0000000e-01 9.0000000e-01 5.0000000e-01 7.0000000e-01 1.1000000e+00 8.0000000e-01 8.0000000e-01 5.0000000e-01 8.0000000e-01 1.5000000e+00 2.0000000e-01 5.0000000e-01 2.2000000e+00 1.3000000e+00 7.0000000e-01 1.0000000e+00 1.1000000e+00 5.0000000e-01 1.0000000e+00 3.0000000e-01 5.0000000e-01 6.5000000e+00 5.8000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 5.9000000e+00 4.0000000e+00 6.2000000e+00 4.4000000e+00 4.5000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 5.9000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.3000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 4.0000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.5000000e+00 4.9000000e+00 8.1000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 5.8000000e+00 9.1000000e+00 8.4000000e+00 9.0000000e+00 7.0000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.4000000e+00 7.4000000e+00 1.0000000e+01 1.0900000e+01 6.9000000e+00 8.3000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 7.8000000e+00 8.4000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 9.7000000e+00 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 7.5000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.4000000e+00 8.2000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.1000000e+00 6.4000000e+00 9.0000000e-01 9.0000000e-01 5.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 6.0000000e-01 5.0000000e-01 8.0000000e-01 1.3000000e+00 2.0000000e-01 5.0000000e-01 2.0000000e+00 1.1000000e+00 9.0000000e-01 1.2000000e+00 9.0000000e-01 7.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 6.5000000e+00 5.8000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 5.9000000e+00 4.0000000e+00 6.2000000e+00 4.4000000e+00 4.5000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 5.9000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.3000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 4.0000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.5000000e+00 4.9000000e+00 8.1000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 5.8000000e+00 9.1000000e+00 8.4000000e+00 9.2000000e+00 7.0000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.4000000e+00 7.4000000e+00 1.0200000e+01 1.0900000e+01 6.9000000e+00 8.3000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 7.8000000e+00 8.4000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 9.9000000e+00 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 7.5000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.4000000e+00 8.2000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.1000000e+00 6.4000000e+00 2.0000000e-01 1.2000000e+00 1.6000000e+00 2.0000000e+00 5.0000000e-01 7.0000000e-01 1.4000000e+00 5.0000000e-01 8.0000000e-01 7.0000000e-01 1.0000000e+00 1.5000000e+00 6.0000000e-01 1.0000000e+00 1.5000000e+00 6.0000000e-01 1.0000000e+00 3.0000000e-01 1.2000000e+00 6.0000000e-01 6.6000000e+00 5.9000000e+00 6.9000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.2000000e+00 3.5000000e+00 6.3000000e+00 4.5000000e+00 4.2000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.1000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.0000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 5.8000000e+00 6.5000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 3.7000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.4000000e+00 5.0000000e+00 8.4000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 5.3000000e+00 9.2000000e+00 8.5000000e+00 9.7000000e+00 7.1000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.5000000e+00 7.5000000e+00 1.0700000e+01 1.1000000e+01 7.0000000e+00 8.4000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 8.1000000e+00 8.5000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 1.0400000e+01 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 8.0000000e+00 7.3000000e+00 6.3000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.8000000e+00 8.5000000e+00 8.5000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 7.6000000e+00 6.5000000e+00 1.2000000e+00 1.6000000e+00 2.0000000e+00 3.0000000e-01 7.0000000e-01 1.4000000e+00 3.0000000e-01 8.0000000e-01 7.0000000e-01 1.0000000e+00 1.5000000e+00 8.0000000e-01 1.0000000e+00 1.5000000e+00 4.0000000e-01 1.0000000e+00 5.0000000e-01 1.2000000e+00 6.0000000e-01 6.6000000e+00 5.9000000e+00 6.7000000e+00 5.0000000e+00 6.3000000e+00 5.2000000e+00 6.2000000e+00 3.3000000e+00 6.1000000e+00 4.3000000e+00 4.0000000e+00 5.1000000e+00 5.3000000e+00 5.8000000e+00 4.1000000e+00 5.9000000e+00 5.1000000e+00 4.7000000e+00 6.5000000e+00 4.6000000e+00 6.0000000e+00 5.1000000e+00 6.7000000e+00 5.7000000e+00 5.6000000e+00 5.9000000e+00 6.7000000e+00 6.9000000e+00 5.6000000e+00 4.1000000e+00 4.5000000e+00 4.3000000e+00 4.7000000e+00 6.5000000e+00 4.9000000e+00 5.8000000e+00 6.3000000e+00 6.2000000e+00 4.5000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 4.9000000e+00 3.5000000e+00 4.9000000e+00 4.6000000e+00 4.8000000e+00 5.4000000e+00 3.2000000e+00 4.8000000e+00 8.4000000e+00 6.6000000e+00 8.6000000e+00 7.3000000e+00 8.0000000e+00 9.8000000e+00 5.1000000e+00 9.0000000e+00 8.3000000e+00 9.7000000e+00 7.1000000e+00 7.4000000e+00 7.9000000e+00 6.7000000e+00 7.0000000e+00 7.5000000e+00 7.3000000e+00 1.0700000e+01 1.0800000e+01 6.8000000e+00 8.4000000e+00 6.2000000e+00 1.0100000e+01 6.8000000e+00 8.1000000e+00 8.5000000e+00 6.5000000e+00 6.3000000e+00 7.8000000e+00 8.1000000e+00 9.1000000e+00 1.0400000e+01 7.9000000e+00 6.6000000e+00 7.0000000e+00 9.6000000e+00 8.0000000e+00 7.1000000e+00 6.1000000e+00 7.8000000e+00 8.1000000e+00 7.7000000e+00 6.6000000e+00 8.5000000e+00 8.5000000e+00 7.7000000e+00 7.2000000e+00 7.2000000e+00 7.6000000e+00 6.3000000e+00 1.2000000e+00 1.2000000e+00 1.1000000e+00 1.1000000e+00 6.0000000e-01 1.1000000e+00 1.8000000e+00 5.0000000e-01 8.0000000e-01 2.3000000e+00 1.6000000e+00 8.0000000e-01 1.1000000e+00 1.2000000e+00 1.0000000e+00 1.3000000e+00 6.0000000e-01 8.0000000e-01 6.0000000e+00 5.3000000e+00 6.3000000e+00 4.6000000e+00 5.9000000e+00 4.8000000e+00 5.4000000e+00 3.9000000e+00 5.7000000e+00 4.3000000e+00 4.4000000e+00 4.7000000e+00 4.9000000e+00 5.4000000e+00 3.7000000e+00 5.5000000e+00 4.7000000e+00 4.3000000e+00 6.1000000e+00 4.2000000e+00 5.4000000e+00 4.7000000e+00 6.3000000e+00 5.3000000e+00 5.2000000e+00 5.5000000e+00 6.3000000e+00 6.5000000e+00 5.2000000e+00 3.7000000e+00 4.1000000e+00 3.9000000e+00 4.3000000e+00 6.1000000e+00 4.5000000e+00 4.8000000e+00 5.9000000e+00 5.8000000e+00 4.1000000e+00 4.4000000e+00 4.6000000e+00 5.2000000e+00 4.5000000e+00 3.9000000e+00 4.5000000e+00 4.2000000e+00 4.4000000e+00 5.0000000e+00 3.4000000e+00 4.4000000e+00 7.6000000e+00 6.2000000e+00 8.2000000e+00 6.9000000e+00 7.6000000e+00 9.4000000e+00 5.7000000e+00 8.6000000e+00 7.9000000e+00 8.7000000e+00 6.5000000e+00 7.0000000e+00 7.5000000e+00 6.3000000e+00 6.6000000e+00 6.9000000e+00 6.9000000e+00 9.7000000e+00 1.0400000e+01 6.4000000e+00 7.8000000e+00 5.8000000e+00 9.7000000e+00 6.4000000e+00 7.3000000e+00 7.9000000e+00 6.1000000e+00 5.9000000e+00 7.4000000e+00 7.7000000e+00 8.7000000e+00 9.4000000e+00 7.5000000e+00 6.2000000e+00 6.6000000e+00 9.2000000e+00 7.0000000e+00 6.7000000e+00 5.7000000e+00 7.4000000e+00 7.7000000e+00 7.3000000e+00 6.2000000e+00 7.9000000e+00 7.7000000e+00 7.3000000e+00 6.8000000e+00 6.8000000e+00 6.6000000e+00 5.9000000e+00 6.0000000e-01 1.3000000e+00 1.5000000e+00 1.2000000e+00 1.3000000e+00 2.2000000e+00 9.0000000e-01 1.2000000e+00 2.9000000e+00 2.0000000e+00 1.4000000e+00 1.1000000e+00 1.8000000e+00 6.0000000e-01 1.7000000e+00 6.0000000e-01 1.2000000e+00 7.2000000e+00 6.5000000e+00 7.5000000e+00 5.8000000e+00 7.1000000e+00 6.0000000e+00 6.6000000e+00 4.7000000e+00 6.9000000e+00 5.1000000e+00 5.2000000e+00 5.9000000e+00 6.1000000e+00 6.6000000e+00 4.9000000e+00 6.7000000e+00 5.9000000e+00 5.5000000e+00 7.3000000e+00 5.4000000e+00 6.6000000e+00 5.9000000e+00 7.5000000e+00 6.5000000e+00 6.4000000e+00 6.7000000e+00 7.5000000e+00 7.7000000e+00 6.4000000e+00 4.9000000e+00 5.3000000e+00 5.1000000e+00 5.5000000e+00 7.3000000e+00 5.7000000e+00 6.0000000e+00 7.1000000e+00 7.0000000e+00 5.3000000e+00 5.6000000e+00 5.8000000e+00 6.4000000e+00 5.7000000e+00 4.7000000e+00 5.7000000e+00 5.4000000e+00 5.6000000e+00 6.2000000e+00 4.2000000e+00 5.6000000e+00 8.8000000e+00 7.4000000e+00 9.4000000e+00 8.1000000e+00 8.8000000e+00 1.0600000e+01 6.5000000e+00 9.8000000e+00 9.1000000e+00 9.5000000e+00 7.7000000e+00 8.2000000e+00 8.7000000e+00 7.5000000e+00 7.8000000e+00 8.1000000e+00 8.1000000e+00 1.0100000e+01 1.1600000e+01 7.6000000e+00 9.0000000e+00 7.0000000e+00 1.0900000e+01 7.6000000e+00 8.5000000e+00 9.1000000e+00 7.3000000e+00 7.1000000e+00 8.6000000e+00 8.9000000e+00 9.9000000e+00 9.8000000e+00 8.7000000e+00 7.4000000e+00 7.8000000e+00 1.0400000e+01 8.2000000e+00 7.9000000e+00 6.9000000e+00 8.6000000e+00 8.9000000e+00 8.5000000e+00 7.4000000e+00 9.1000000e+00 8.9000000e+00 8.5000000e+00 8.0000000e+00 8.0000000e+00 7.8000000e+00 7.1000000e+00 1.9000000e+00 1.7000000e+00 8.0000000e-01 1.9000000e+00 2.4000000e+00 1.3000000e+00 1.4000000e+00 3.1000000e+00 2.2000000e+00 1.8000000e+00 1.5000000e+00 2.0000000e+00 1.0000000e+00 1.9000000e+00 8.0000000e-01 1.4000000e+00 7.0000000e+00 6.3000000e+00 7.3000000e+00 5.6000000e+00 6.9000000e+00 5.8000000e+00 6.4000000e+00 5.1000000e+00 6.7000000e+00 5.5000000e+00 5.6000000e+00 5.7000000e+00 5.9000000e+00 6.4000000e+00 4.7000000e+00 6.5000000e+00 5.7000000e+00 5.3000000e+00 7.1000000e+00 5.2000000e+00 6.4000000e+00 5.7000000e+00 7.3000000e+00 6.3000000e+00 6.2000000e+00 6.5000000e+00 7.3000000e+00 7.5000000e+00 6.2000000e+00 4.7000000e+00 5.1000000e+00 4.9000000e+00 5.3000000e+00 7.1000000e+00 5.7000000e+00 5.8000000e+00 6.9000000e+00 6.8000000e+00 5.1000000e+00 5.4000000e+00 5.6000000e+00 6.2000000e+00 5.5000000e+00 5.1000000e+00 5.5000000e+00 5.2000000e+00 5.4000000e+00 6.0000000e+00 4.6000000e+00 5.4000000e+00 8.6000000e+00 7.2000000e+00 9.2000000e+00 7.9000000e+00 8.6000000e+00 1.0400000e+01 6.9000000e+00 9.6000000e+00 8.9000000e+00 9.3000000e+00 7.5000000e+00 8.0000000e+00 8.5000000e+00 7.3000000e+00 7.6000000e+00 7.9000000e+00 7.9000000e+00 9.9000000e+00 1.1400000e+01 7.4000000e+00 8.8000000e+00 6.8000000e+00 1.0700000e+01 7.4000000e+00 8.3000000e+00 8.9000000e+00 7.1000000e+00 6.9000000e+00 8.4000000e+00 8.7000000e+00 9.7000000e+00 9.6000000e+00 8.5000000e+00 7.2000000e+00 7.6000000e+00 1.0200000e+01 8.0000000e+00 7.7000000e+00 6.7000000e+00 8.4000000e+00 8.7000000e+00 8.3000000e+00 7.2000000e+00 8.9000000e+00 8.7000000e+00 8.3000000e+00 7.8000000e+00 7.8000000e+00 7.6000000e+00 6.9000000e+00 6.0000000e-01 1.3000000e+00 0.0000000e+00 9.0000000e-01 6.0000000e-01 9.0000000e-01 1.6000000e+00 9.0000000e-01 1.1000000e+00 1.6000000e+00 5.0000000e-01 1.1000000e+00 6.0000000e-01 1.1000000e+00 5.0000000e-01 6.7000000e+00 6.0000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 6.3000000e+00 3.4000000e+00 6.2000000e+00 4.4000000e+00 4.1000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 6.1000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.9000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 3.6000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.3000000e+00 4.9000000e+00 8.5000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 5.2000000e+00 9.1000000e+00 8.4000000e+00 9.8000000e+00 7.2000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.6000000e+00 7.4000000e+00 1.0800000e+01 1.0900000e+01 6.9000000e+00 8.5000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 8.2000000e+00 8.6000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 1.0500000e+01 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 8.1000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.6000000e+00 8.6000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.7000000e+00 6.4000000e+00 9.0000000e-01 6.0000000e-01 9.0000000e-01 6.0000000e-01 5.0000000e-01 1.6000000e+00 7.0000000e-01 1.1000000e+00 1.6000000e+00 7.0000000e-01 1.1000000e+00 6.0000000e-01 1.1000000e+00 3.0000000e-01 6.7000000e+00 6.0000000e+00 7.0000000e+00 5.3000000e+00 6.6000000e+00 5.5000000e+00 6.3000000e+00 3.8000000e+00 6.4000000e+00 4.6000000e+00 4.3000000e+00 5.4000000e+00 5.6000000e+00 6.1000000e+00 4.4000000e+00 6.2000000e+00 5.4000000e+00 5.0000000e+00 6.8000000e+00 4.9000000e+00 6.1000000e+00 5.4000000e+00 7.0000000e+00 6.0000000e+00 5.9000000e+00 6.2000000e+00 7.0000000e+00 7.2000000e+00 5.9000000e+00 4.4000000e+00 4.8000000e+00 4.6000000e+00 5.0000000e+00 6.8000000e+00 5.2000000e+00 5.9000000e+00 6.6000000e+00 6.5000000e+00 4.8000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 5.2000000e+00 3.8000000e+00 5.2000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 3.5000000e+00 5.1000000e+00 8.5000000e+00 6.9000000e+00 8.9000000e+00 7.6000000e+00 8.3000000e+00 1.0100000e+01 5.6000000e+00 9.3000000e+00 8.6000000e+00 9.8000000e+00 7.2000000e+00 7.7000000e+00 8.2000000e+00 7.0000000e+00 7.3000000e+00 7.6000000e+00 7.6000000e+00 1.0800000e+01 1.1100000e+01 7.1000000e+00 8.5000000e+00 6.5000000e+00 1.0400000e+01 7.1000000e+00 8.2000000e+00 8.6000000e+00 6.8000000e+00 6.6000000e+00 8.1000000e+00 8.4000000e+00 9.4000000e+00 1.0500000e+01 8.2000000e+00 6.9000000e+00 7.3000000e+00 9.9000000e+00 8.1000000e+00 7.4000000e+00 6.4000000e+00 8.1000000e+00 8.4000000e+00 8.0000000e+00 6.9000000e+00 8.6000000e+00 8.6000000e+00 8.0000000e+00 7.5000000e+00 7.5000000e+00 7.7000000e+00 6.6000000e+00 1.3000000e+00 1.6000000e+00 7.0000000e-01 6.0000000e-01 2.3000000e+00 1.4000000e+00 1.2000000e+00 1.5000000e+00 1.4000000e+00 1.0000000e+00 1.3000000e+00 6.0000000e-01 8.0000000e-01 6.4000000e+00 5.7000000e+00 6.7000000e+00 5.0000000e+00 6.3000000e+00 5.2000000e+00 5.8000000e+00 4.5000000e+00 6.1000000e+00 4.9000000e+00 5.0000000e+00 5.1000000e+00 5.3000000e+00 5.8000000e+00 4.1000000e+00 5.9000000e+00 5.1000000e+00 4.7000000e+00 6.5000000e+00 4.6000000e+00 5.8000000e+00 5.1000000e+00 6.7000000e+00 5.7000000e+00 5.6000000e+00 5.9000000e+00 6.7000000e+00 6.9000000e+00 5.6000000e+00 4.1000000e+00 4.5000000e+00 4.3000000e+00 4.7000000e+00 6.5000000e+00 5.1000000e+00 5.2000000e+00 6.3000000e+00 6.2000000e+00 4.5000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 4.9000000e+00 4.5000000e+00 4.9000000e+00 4.6000000e+00 4.8000000e+00 5.4000000e+00 4.0000000e+00 4.8000000e+00 8.0000000e+00 6.6000000e+00 8.6000000e+00 7.3000000e+00 8.0000000e+00 9.8000000e+00 6.3000000e+00 9.0000000e+00 8.3000000e+00 8.9000000e+00 6.9000000e+00 7.4000000e+00 7.9000000e+00 6.7000000e+00 7.0000000e+00 7.3000000e+00 7.3000000e+00 9.9000000e+00 1.0800000e+01 6.8000000e+00 8.2000000e+00 6.2000000e+00 1.0100000e+01 6.8000000e+00 7.7000000e+00 8.3000000e+00 6.5000000e+00 6.3000000e+00 7.8000000e+00 8.1000000e+00 9.1000000e+00 9.6000000e+00 7.9000000e+00 6.6000000e+00 7.0000000e+00 9.6000000e+00 7.4000000e+00 7.1000000e+00 6.1000000e+00 7.8000000e+00 8.1000000e+00 7.7000000e+00 6.6000000e+00 8.3000000e+00 8.1000000e+00 7.7000000e+00 7.2000000e+00 7.2000000e+00 7.0000000e+00 6.3000000e+00 9.0000000e-01 6.0000000e-01 9.0000000e-01 1.6000000e+00 9.0000000e-01 1.1000000e+00 1.6000000e+00 5.0000000e-01 1.1000000e+00 6.0000000e-01 1.1000000e+00 5.0000000e-01 6.7000000e+00 6.0000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 6.3000000e+00 3.4000000e+00 6.2000000e+00 4.4000000e+00 4.1000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 6.1000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.9000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 3.6000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.3000000e+00 4.9000000e+00 8.5000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 5.2000000e+00 9.1000000e+00 8.4000000e+00 9.8000000e+00 7.2000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.6000000e+00 7.4000000e+00 1.0800000e+01 1.0900000e+01 6.9000000e+00 8.5000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 8.2000000e+00 8.6000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 1.0500000e+01 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 8.1000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.6000000e+00 8.6000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.7000000e+00 6.4000000e+00 1.3000000e+00 1.2000000e+00 9.0000000e-01 2.0000000e-01 1.8000000e+00 2.3000000e+00 6.0000000e-01 1.8000000e+00 5.0000000e-01 1.8000000e+00 1.0000000e+00 7.4000000e+00 6.7000000e+00 7.5000000e+00 5.6000000e+00 6.9000000e+00 5.8000000e+00 7.0000000e+00 3.9000000e+00 6.7000000e+00 4.9000000e+00 4.6000000e+00 5.7000000e+00 5.9000000e+00 6.4000000e+00 4.7000000e+00 6.7000000e+00 5.7000000e+00 5.3000000e+00 7.1000000e+00 5.2000000e+00 6.8000000e+00 5.7000000e+00 7.3000000e+00 6.3000000e+00 6.2000000e+00 6.5000000e+00 7.3000000e+00 7.5000000e+00 6.2000000e+00 4.7000000e+00 5.1000000e+00 4.9000000e+00 5.3000000e+00 7.1000000e+00 5.5000000e+00 6.6000000e+00 7.1000000e+00 6.8000000e+00 5.1000000e+00 5.4000000e+00 5.6000000e+00 6.2000000e+00 5.5000000e+00 4.1000000e+00 5.5000000e+00 5.2000000e+00 5.4000000e+00 6.0000000e+00 3.8000000e+00 5.4000000e+00 9.2000000e+00 7.2000000e+00 9.2000000e+00 7.9000000e+00 8.6000000e+00 1.0400000e+01 5.7000000e+00 9.6000000e+00 8.9000000e+00 1.0500000e+01 7.9000000e+00 8.0000000e+00 8.5000000e+00 7.3000000e+00 7.6000000e+00 8.3000000e+00 7.9000000e+00 1.1500000e+01 1.1400000e+01 7.4000000e+00 9.2000000e+00 6.8000000e+00 1.0700000e+01 7.4000000e+00 8.9000000e+00 9.3000000e+00 7.1000000e+00 6.9000000e+00 8.4000000e+00 8.7000000e+00 9.7000000e+00 1.1200000e+01 8.5000000e+00 7.2000000e+00 7.6000000e+00 1.0200000e+01 8.8000000e+00 7.9000000e+00 6.7000000e+00 8.6000000e+00 8.9000000e+00 8.5000000e+00 7.2000000e+00 9.3000000e+00 9.3000000e+00 8.3000000e+00 7.8000000e+00 7.8000000e+00 8.4000000e+00 6.9000000e+00 5.0000000e-01 2.0000000e+00 1.1000000e+00 7.0000000e-01 1.0000000e+00 9.0000000e-01 5.0000000e-01 8.0000000e-01 5.0000000e-01 3.0000000e-01 6.5000000e+00 5.8000000e+00 6.8000000e+00 5.1000000e+00 6.4000000e+00 5.3000000e+00 5.9000000e+00 3.8000000e+00 6.2000000e+00 4.4000000e+00 4.3000000e+00 5.2000000e+00 5.4000000e+00 5.9000000e+00 4.2000000e+00 6.0000000e+00 5.2000000e+00 4.8000000e+00 6.6000000e+00 4.7000000e+00 5.9000000e+00 5.2000000e+00 6.8000000e+00 5.8000000e+00 5.7000000e+00 6.0000000e+00 6.8000000e+00 7.0000000e+00 5.7000000e+00 4.2000000e+00 4.6000000e+00 4.4000000e+00 4.8000000e+00 6.6000000e+00 5.0000000e+00 5.3000000e+00 6.4000000e+00 6.3000000e+00 4.6000000e+00 4.9000000e+00 5.1000000e+00 5.7000000e+00 5.0000000e+00 3.8000000e+00 5.0000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 3.3000000e+00 4.9000000e+00 8.1000000e+00 6.7000000e+00 8.7000000e+00 7.4000000e+00 8.1000000e+00 9.9000000e+00 5.6000000e+00 9.1000000e+00 8.4000000e+00 9.2000000e+00 7.0000000e+00 7.5000000e+00 8.0000000e+00 6.8000000e+00 7.1000000e+00 7.4000000e+00 7.4000000e+00 1.0200000e+01 1.0900000e+01 6.9000000e+00 8.3000000e+00 6.3000000e+00 1.0200000e+01 6.9000000e+00 7.8000000e+00 8.4000000e+00 6.6000000e+00 6.4000000e+00 7.9000000e+00 8.2000000e+00 9.2000000e+00 9.9000000e+00 8.0000000e+00 6.7000000e+00 7.1000000e+00 9.7000000e+00 7.5000000e+00 7.2000000e+00 6.2000000e+00 7.9000000e+00 8.2000000e+00 7.8000000e+00 6.7000000e+00 8.4000000e+00 8.2000000e+00 7.8000000e+00 7.3000000e+00 7.3000000e+00 7.1000000e+00 6.4000000e+00 1.7000000e+00 1.0000000e+00 6.0000000e-01 1.1000000e+00 8.0000000e-01 8.0000000e-01 9.0000000e-01 8.0000000e-01 4.0000000e-01 6.8000000e+00 6.1000000e+00 7.1000000e+00 5.4000000e+00 6.7000000e+00 5.6000000e+00 6.2000000e+00 3.9000000e+00 6.5000000e+00 4.7000000e+00 4.4000000e+00 5.5000000e+00 5.7000000e+00 6.2000000e+00 4.5000000e+00 6.3000000e+00 5.5000000e+00 5.1000000e+00 6.9000000e+00 5.0000000e+00 6.2000000e+00 5.5000000e+00 7.1000000e+00 6.1000000e+00 6.0000000e+00 6.3000000e+00 7.1000000e+00 7.3000000e+00 6.0000000e+00 4.5000000e+00 4.9000000e+00 4.7000000e+00 5.1000000e+00 6.9000000e+00 5.3000000e+00 5.6000000e+00 6.7000000e+00 6.6000000e+00 4.9000000e+00 5.2000000e+00 5.4000000e+00 6.0000000e+00 5.3000000e+00 3.9000000e+00 5.3000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 3.6000000e+00 5.2000000e+00 8.4000000e+00 7.0000000e+00 9.0000000e+00 7.7000000e+00 8.4000000e+00 1.0200000e+01 5.7000000e+00 9.4000000e+00 8.7000000e+00 9.3000000e+00 7.3000000e+00 7.8000000e+00 8.3000000e+00 7.1000000e+00 7.4000000e+00 7.7000000e+00 7.7000000e+00 1.0300000e+01 1.1200000e+01 7.2000000e+00 8.6000000e+00 6.6000000e+00 1.0500000e+01 7.2000000e+00 8.1000000e+00 8.7000000e+00 6.9000000e+00 6.7000000e+00 8.2000000e+00 8.5000000e+00 9.5000000e+00 1.0000000e+01 8.3000000e+00 7.0000000e+00 7.4000000e+00 1.0000000e+01 7.8000000e+00 7.5000000e+00 6.5000000e+00 8.2000000e+00 8.5000000e+00 8.1000000e+00 7.0000000e+00 8.7000000e+00 8.5000000e+00 8.1000000e+00 7.6000000e+00 7.6000000e+00 7.4000000e+00 6.7000000e+00 1.1000000e+00 2.3000000e+00 2.8000000e+00 1.1000000e+00 2.5000000e+00 1.2000000e+00 2.5000000e+00 1.7000000e+00 7.9000000e+00 7.2000000e+00 8.0000000e+00 4.7000000e+00 7.0000000e+00 5.9000000e+00 7.5000000e+00 3.2000000e+00 7.0000000e+00 4.8000000e+00 3.7000000e+00 6.2000000e+00 5.0000000e+00 6.7000000e+00 5.0000000e+00 7.2000000e+00 6.2000000e+00 5.2000000e+00 6.2000000e+00 4.7000000e+00 7.3000000e+00 5.8000000e+00 6.8000000e+00 6.4000000e+00 6.5000000e+00 7.0000000e+00 7.4000000e+00 8.0000000e+00 6.5000000e+00 4.4000000e+00 4.4000000e+00 4.2000000e+00 5.2000000e+00 7.0000000e+00 6.0000000e+00 7.1000000e+00 7.6000000e+00 5.9000000e+00 5.6000000e+00 4.9000000e+00 5.3000000e+00 6.7000000e+00 5.2000000e+00 3.2000000e+00 5.4000000e+00 5.7000000e+00 5.7000000e+00 6.3000000e+00 3.3000000e+00 5.5000000e+00 9.7000000e+00 7.1000000e+00 9.7000000e+00 8.2000000e+00 9.1000000e+00 1.0900000e+01 5.2000000e+00 9.9000000e+00 8.4000000e+00 1.1000000e+01 8.4000000e+00 7.9000000e+00 9.0000000e+00 6.8000000e+00 7.7000000e+00 8.8000000e+00 8.4000000e+00 1.2000000e+01 1.1100000e+01 6.5000000e+00 9.7000000e+00 6.9000000e+00 1.0800000e+01 7.3000000e+00 9.4000000e+00 9.8000000e+00 7.2000000e+00 7.4000000e+00 8.5000000e+00 9.2000000e+00 9.8000000e+00 1.1700000e+01 8.6000000e+00 7.3000000e+00 7.3000000e+00 1.0700000e+01 9.3000000e+00 8.4000000e+00 7.2000000e+00 9.1000000e+00 9.4000000e+00 9.0000000e+00 7.1000000e+00 9.8000000e+00 9.8000000e+00 8.8000000e+00 7.3000000e+00 8.3000000e+00 8.9000000e+00 7.4000000e+00 1.6000000e+00 2.1000000e+00 8.0000000e-01 1.6000000e+00 3.0000000e-01 1.6000000e+00 8.0000000e-01 7.2000000e+00 6.5000000e+00 7.5000000e+00 5.8000000e+00 7.1000000e+00 6.0000000e+00 6.8000000e+00 4.1000000e+00 6.9000000e+00 5.1000000e+00 4.8000000e+00 5.9000000e+00 6.1000000e+00 6.6000000e+00 4.9000000e+00 6.7000000e+00 5.9000000e+00 5.5000000e+00 7.3000000e+00 5.4000000e+00 6.6000000e+00 5.9000000e+00 7.5000000e+00 6.5000000e+00 6.4000000e+00 6.7000000e+00 7.5000000e+00 7.7000000e+00 6.4000000e+00 4.9000000e+00 5.3000000e+00 5.1000000e+00 5.5000000e+00 7.3000000e+00 5.7000000e+00 6.4000000e+00 7.1000000e+00 7.0000000e+00 5.3000000e+00 5.6000000e+00 5.8000000e+00 6.4000000e+00 5.7000000e+00 4.3000000e+00 5.7000000e+00 5.4000000e+00 5.6000000e+00 6.2000000e+00 4.0000000e+00 5.6000000e+00 9.0000000e+00 7.4000000e+00 9.4000000e+00 8.1000000e+00 8.8000000e+00 1.0600000e+01 5.9000000e+00 9.8000000e+00 9.1000000e+00 1.0300000e+01 7.7000000e+00 8.2000000e+00 8.7000000e+00 7.5000000e+00 7.8000000e+00 8.1000000e+00 8.1000000e+00 1.1300000e+01 1.1600000e+01 7.6000000e+00 9.0000000e+00 7.0000000e+00 1.0900000e+01 7.6000000e+00 8.7000000e+00 9.1000000e+00 7.3000000e+00 7.1000000e+00 8.6000000e+00 8.9000000e+00 9.9000000e+00 1.1000000e+01 8.7000000e+00 7.4000000e+00 7.8000000e+00 1.0400000e+01 8.6000000e+00 7.9000000e+00 6.9000000e+00 8.6000000e+00 8.9000000e+00 8.5000000e+00 7.4000000e+00 9.1000000e+00 9.1000000e+00 8.5000000e+00 8.0000000e+00 8.0000000e+00 8.2000000e+00 7.1000000e+00 9.0000000e-01 1.2000000e+00 8.0000000e-01 1.3000000e+00 1.0000000e+00 8.0000000e-01 6.2000000e+00 5.5000000e+00 6.5000000e+00 4.8000000e+00 6.1000000e+00 5.0000000e+00 5.6000000e+00 3.3000000e+00 5.9000000e+00 4.1000000e+00 3.8000000e+00 4.9000000e+00 5.1000000e+00 5.6000000e+00 3.9000000e+00 5.7000000e+00 4.9000000e+00 4.5000000e+00 6.3000000e+00 4.4000000e+00 5.6000000e+00 4.9000000e+00 6.5000000e+00 5.5000000e+00 5.4000000e+00 5.7000000e+00 6.5000000e+00 6.7000000e+00 5.4000000e+00 3.9000000e+00 4.3000000e+00 4.1000000e+00 4.5000000e+00 6.3000000e+00 4.7000000e+00 5.0000000e+00 6.1000000e+00 6.0000000e+00 4.3000000e+00 4.6000000e+00 4.8000000e+00 5.4000000e+00 4.7000000e+00 3.3000000e+00 4.7000000e+00 4.4000000e+00 4.6000000e+00 5.2000000e+00 3.0000000e+00 4.6000000e+00 7.8000000e+00 6.4000000e+00 8.4000000e+00 7.1000000e+00 7.8000000e+00 9.6000000e+00 5.1000000e+00 8.8000000e+00 8.1000000e+00 8.7000000e+00 6.7000000e+00 7.2000000e+00 7.7000000e+00 6.5000000e+00 6.8000000e+00 7.1000000e+00 7.1000000e+00 9.7000000e+00 1.0600000e+01 6.6000000e+00 8.0000000e+00 6.0000000e+00 9.9000000e+00 6.6000000e+00 7.5000000e+00 8.1000000e+00 6.3000000e+00 6.1000000e+00 7.6000000e+00 7.9000000e+00 8.9000000e+00 9.4000000e+00 7.7000000e+00 6.4000000e+00 6.8000000e+00 9.4000000e+00 7.2000000e+00 6.9000000e+00 5.9000000e+00 7.6000000e+00 7.9000000e+00 7.5000000e+00 6.4000000e+00 8.1000000e+00 7.9000000e+00 7.5000000e+00 7.0000000e+00 7.0000000e+00 6.8000000e+00 6.1000000e+00 1.7000000e+00 5.0000000e-01 1.8000000e+00 9.0000000e-01 1.3000000e+00 6.3000000e+00 5.6000000e+00 6.6000000e+00 4.9000000e+00 6.2000000e+00 5.1000000e+00 5.7000000e+00 3.6000000e+00 6.0000000e+00 4.2000000e+00 4.1000000e+00 5.0000000e+00 5.2000000e+00 5.7000000e+00 4.0000000e+00 5.8000000e+00 5.0000000e+00 4.6000000e+00 6.4000000e+00 4.5000000e+00 5.7000000e+00 5.0000000e+00 6.6000000e+00 5.6000000e+00 5.5000000e+00 5.8000000e+00 6.6000000e+00 6.8000000e+00 5.5000000e+00 4.0000000e+00 4.4000000e+00 4.2000000e+00 4.6000000e+00 6.4000000e+00 4.8000000e+00 5.1000000e+00 6.2000000e+00 6.1000000e+00 4.4000000e+00 4.7000000e+00 4.9000000e+00 5.5000000e+00 4.8000000e+00 3.6000000e+00 4.8000000e+00 4.5000000e+00 4.7000000e+00 5.3000000e+00 3.1000000e+00 4.7000000e+00 7.9000000e+00 6.5000000e+00 8.5000000e+00 7.2000000e+00 7.9000000e+00 9.7000000e+00 5.4000000e+00 8.9000000e+00 8.2000000e+00 8.6000000e+00 6.8000000e+00 7.3000000e+00 7.8000000e+00 6.6000000e+00 6.9000000e+00 7.2000000e+00 7.2000000e+00 9.2000000e+00 1.0700000e+01 6.7000000e+00 8.1000000e+00 6.1000000e+00 1.0000000e+01 6.7000000e+00 7.6000000e+00 8.2000000e+00 6.4000000e+00 6.2000000e+00 7.7000000e+00 8.0000000e+00 9.0000000e+00 8.9000000e+00 7.8000000e+00 6.5000000e+00 6.9000000e+00 9.5000000e+00 7.3000000e+00 7.0000000e+00 6.0000000e+00 7.7000000e+00 8.0000000e+00 7.6000000e+00 6.5000000e+00 8.2000000e+00 8.0000000e+00 7.6000000e+00 7.1000000e+00 7.1000000e+00 6.9000000e+00 6.2000000e+00 1.4000000e+00 5.0000000e-01 1.4000000e+00 6.0000000e-01 6.8000000e+00 6.1000000e+00 6.9000000e+00 5.0000000e+00 6.3000000e+00 5.2000000e+00 6.4000000e+00 3.3000000e+00 6.1000000e+00 4.3000000e+00 4.0000000e+00 5.1000000e+00 5.3000000e+00 5.8000000e+00 4.1000000e+00 6.1000000e+00 5.1000000e+00 4.7000000e+00 6.5000000e+00 4.6000000e+00 6.2000000e+00 5.1000000e+00 6.7000000e+00 5.7000000e+00 5.6000000e+00 5.9000000e+00 6.7000000e+00 6.9000000e+00 5.6000000e+00 4.1000000e+00 4.5000000e+00 4.3000000e+00 4.7000000e+00 6.5000000e+00 4.9000000e+00 6.0000000e+00 6.5000000e+00 6.2000000e+00 4.5000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 4.9000000e+00 3.5000000e+00 4.9000000e+00 4.6000000e+00 4.8000000e+00 5.4000000e+00 3.2000000e+00 4.8000000e+00 8.6000000e+00 6.6000000e+00 8.6000000e+00 7.3000000e+00 8.0000000e+00 9.8000000e+00 5.1000000e+00 9.0000000e+00 8.3000000e+00 9.9000000e+00 7.3000000e+00 7.4000000e+00 7.9000000e+00 6.7000000e+00 7.0000000e+00 7.7000000e+00 7.3000000e+00 1.0900000e+01 1.0800000e+01 6.8000000e+00 8.6000000e+00 6.2000000e+00 1.0100000e+01 6.8000000e+00 8.3000000e+00 8.7000000e+00 6.5000000e+00 6.3000000e+00 7.8000000e+00 8.1000000e+00 9.1000000e+00 1.0600000e+01 7.9000000e+00 6.6000000e+00 7.0000000e+00 9.6000000e+00 8.2000000e+00 7.3000000e+00 6.1000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.6000000e+00 8.7000000e+00 8.7000000e+00 7.7000000e+00 7.2000000e+00 7.2000000e+00 7.8000000e+00 6.3000000e+00 1.3000000e+00 4.0000000e-01 8.0000000e-01 6.8000000e+00 6.1000000e+00 7.1000000e+00 5.4000000e+00 6.7000000e+00 5.6000000e+00 6.2000000e+00 4.1000000e+00 6.5000000e+00 4.7000000e+00 4.6000000e+00 5.5000000e+00 5.7000000e+00 6.2000000e+00 4.5000000e+00 6.3000000e+00 5.5000000e+00 5.1000000e+00 6.9000000e+00 5.0000000e+00 6.2000000e+00 5.5000000e+00 7.1000000e+00 6.1000000e+00 6.0000000e+00 6.3000000e+00 7.1000000e+00 7.3000000e+00 6.0000000e+00 4.5000000e+00 4.9000000e+00 4.7000000e+00 5.1000000e+00 6.9000000e+00 5.3000000e+00 5.6000000e+00 6.7000000e+00 6.6000000e+00 4.9000000e+00 5.2000000e+00 5.4000000e+00 6.0000000e+00 5.3000000e+00 4.1000000e+00 5.3000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 3.6000000e+00 5.2000000e+00 8.4000000e+00 7.0000000e+00 9.0000000e+00 7.7000000e+00 8.4000000e+00 1.0200000e+01 5.9000000e+00 9.4000000e+00 8.7000000e+00 9.1000000e+00 7.3000000e+00 7.8000000e+00 8.3000000e+00 7.1000000e+00 7.4000000e+00 7.7000000e+00 7.7000000e+00 9.7000000e+00 1.1200000e+01 7.2000000e+00 8.6000000e+00 6.6000000e+00 1.0500000e+01 7.2000000e+00 8.1000000e+00 8.7000000e+00 6.9000000e+00 6.7000000e+00 8.2000000e+00 8.5000000e+00 9.5000000e+00 9.4000000e+00 8.3000000e+00 7.0000000e+00 7.4000000e+00 1.0000000e+01 7.8000000e+00 7.5000000e+00 6.5000000e+00 8.2000000e+00 8.5000000e+00 8.1000000e+00 7.0000000e+00 8.7000000e+00 8.5000000e+00 8.1000000e+00 7.6000000e+00 7.6000000e+00 7.4000000e+00 6.7000000e+00 1.3000000e+00 5.0000000e-01 6.9000000e+00 6.2000000e+00 7.2000000e+00 5.5000000e+00 6.8000000e+00 5.7000000e+00 6.5000000e+00 3.8000000e+00 6.6000000e+00 4.8000000e+00 4.5000000e+00 5.6000000e+00 5.8000000e+00 6.3000000e+00 4.6000000e+00 6.4000000e+00 5.6000000e+00 5.2000000e+00 7.0000000e+00 5.1000000e+00 6.3000000e+00 5.6000000e+00 7.2000000e+00 6.2000000e+00 6.1000000e+00 6.4000000e+00 7.2000000e+00 7.4000000e+00 6.1000000e+00 4.6000000e+00 5.0000000e+00 4.8000000e+00 5.2000000e+00 7.0000000e+00 5.4000000e+00 6.1000000e+00 6.8000000e+00 6.7000000e+00 5.0000000e+00 5.3000000e+00 5.5000000e+00 6.1000000e+00 5.4000000e+00 4.0000000e+00 5.4000000e+00 5.1000000e+00 5.3000000e+00 5.9000000e+00 3.7000000e+00 5.3000000e+00 8.7000000e+00 7.1000000e+00 9.1000000e+00 7.8000000e+00 8.5000000e+00 1.0300000e+01 5.6000000e+00 9.5000000e+00 8.8000000e+00 1.0000000e+01 7.4000000e+00 7.9000000e+00 8.4000000e+00 7.2000000e+00 7.5000000e+00 7.8000000e+00 7.8000000e+00 1.1000000e+01 1.1300000e+01 7.3000000e+00 8.7000000e+00 6.7000000e+00 1.0600000e+01 7.3000000e+00 8.4000000e+00 8.8000000e+00 7.0000000e+00 6.8000000e+00 8.3000000e+00 8.6000000e+00 9.6000000e+00 1.0700000e+01 8.4000000e+00 7.1000000e+00 7.5000000e+00 1.0100000e+01 8.3000000e+00 7.6000000e+00 6.6000000e+00 8.3000000e+00 8.6000000e+00 8.2000000e+00 7.1000000e+00 8.8000000e+00 8.8000000e+00 8.2000000e+00 7.7000000e+00 7.7000000e+00 7.9000000e+00 6.8000000e+00 8.0000000e-01 6.6000000e+00 5.9000000e+00 6.9000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.0000000e+00 4.3000000e+00 6.3000000e+00 4.7000000e+00 4.8000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.1000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.0000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 5.4000000e+00 6.5000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 4.3000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.8000000e+00 5.0000000e+00 8.2000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 6.1000000e+00 9.2000000e+00 8.5000000e+00 8.9000000e+00 7.1000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.5000000e+00 7.5000000e+00 9.7000000e+00 1.1000000e+01 7.0000000e+00 8.4000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 7.9000000e+00 8.5000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 9.4000000e+00 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 7.6000000e+00 7.3000000e+00 6.3000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.8000000e+00 8.5000000e+00 8.3000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 7.2000000e+00 6.5000000e+00 6.6000000e+00 5.9000000e+00 6.9000000e+00 5.2000000e+00 6.5000000e+00 5.4000000e+00 6.0000000e+00 3.7000000e+00 6.3000000e+00 4.5000000e+00 4.2000000e+00 5.3000000e+00 5.5000000e+00 6.0000000e+00 4.3000000e+00 6.1000000e+00 5.3000000e+00 4.9000000e+00 6.7000000e+00 4.8000000e+00 6.0000000e+00 5.3000000e+00 6.9000000e+00 5.9000000e+00 5.8000000e+00 6.1000000e+00 6.9000000e+00 7.1000000e+00 5.8000000e+00 4.3000000e+00 4.7000000e+00 4.5000000e+00 4.9000000e+00 6.7000000e+00 5.1000000e+00 5.6000000e+00 6.5000000e+00 6.4000000e+00 4.7000000e+00 5.0000000e+00 5.2000000e+00 5.8000000e+00 5.1000000e+00 3.7000000e+00 5.1000000e+00 4.8000000e+00 5.0000000e+00 5.6000000e+00 3.4000000e+00 5.0000000e+00 8.2000000e+00 6.8000000e+00 8.8000000e+00 7.5000000e+00 8.2000000e+00 1.0000000e+01 5.5000000e+00 9.2000000e+00 8.5000000e+00 9.5000000e+00 7.1000000e+00 7.6000000e+00 8.1000000e+00 6.9000000e+00 7.2000000e+00 7.5000000e+00 7.5000000e+00 1.0500000e+01 1.1000000e+01 7.0000000e+00 8.4000000e+00 6.4000000e+00 1.0300000e+01 7.0000000e+00 7.9000000e+00 8.5000000e+00 6.7000000e+00 6.5000000e+00 8.0000000e+00 8.3000000e+00 9.3000000e+00 1.0200000e+01 8.1000000e+00 6.8000000e+00 7.2000000e+00 9.8000000e+00 7.8000000e+00 7.3000000e+00 6.3000000e+00 8.0000000e+00 8.3000000e+00 7.9000000e+00 6.8000000e+00 8.5000000e+00 8.3000000e+00 7.9000000e+00 7.4000000e+00 7.4000000e+00 7.4000000e+00 6.5000000e+00 9.0000000e-01 5.0000000e-01 3.2000000e+00 1.1000000e+00 2.0000000e+00 1.0000000e+00 4.7000000e+00 9.0000000e-01 3.1000000e+00 4.8000000e+00 1.9000000e+00 3.1000000e+00 1.2000000e+00 2.9000000e+00 7.0000000e-01 1.9000000e+00 2.7000000e+00 2.1000000e+00 3.2000000e+00 1.6000000e+00 2.1000000e+00 1.7000000e+00 1.5000000e+00 1.4000000e+00 9.0000000e-01 7.0000000e-01 1.1000000e+00 1.6000000e+00 3.5000000e+00 3.5000000e+00 3.7000000e+00 2.7000000e+00 2.1000000e+00 2.1000000e+00 1.6000000e+00 5.0000000e-01 2.0000000e+00 2.3000000e+00 3.0000000e+00 2.6000000e+00 1.2000000e+00 2.7000000e+00 4.7000000e+00 2.5000000e+00 2.2000000e+00 2.2000000e+00 1.6000000e+00 4.6000000e+00 2.4000000e+00 3.2000000e+00 2.6000000e+00 2.2000000e+00 2.3000000e+00 2.6000000e+00 3.4000000e+00 3.3000000e+00 2.6000000e+00 2.5000000e+00 3.1000000e+00 1.5000000e+00 2.2000000e+00 1.9000000e+00 2.9000000e+00 3.0000000e+00 2.1000000e+00 1.9000000e+00 4.1000000e+00 4.4000000e+00 2.4000000e+00 2.0000000e+00 2.6000000e+00 3.7000000e+00 1.8000000e+00 2.1000000e+00 1.9000000e+00 1.7000000e+00 1.7000000e+00 2.6000000e+00 1.7000000e+00 2.7000000e+00 3.8000000e+00 2.7000000e+00 1.6000000e+00 2.4000000e+00 3.2000000e+00 2.8000000e+00 1.9000000e+00 1.7000000e+00 1.6000000e+00 2.3000000e+00 1.5000000e+00 2.6000000e+00 2.3000000e+00 2.5000000e+00 1.9000000e+00 2.2000000e+00 1.8000000e+00 2.6000000e+00 2.1000000e+00 1.0000000e+00 2.5000000e+00 6.0000000e-01 1.3000000e+00 5.0000000e-01 4.0000000e+00 8.0000000e-01 2.4000000e+00 4.1000000e+00 1.0000000e+00 2.4000000e+00 9.0000000e-01 2.2000000e+00 6.0000000e-01 1.0000000e+00 2.0000000e+00 1.2000000e+00 2.5000000e+00 1.1000000e+00 1.4000000e+00 1.2000000e+00 1.2000000e+00 7.0000000e-01 6.0000000e-01 1.2000000e+00 1.2000000e+00 7.0000000e-01 2.8000000e+00 2.8000000e+00 3.0000000e+00 2.0000000e+00 1.6000000e+00 1.2000000e+00 7.0000000e-01 6.0000000e-01 1.3000000e+00 1.6000000e+00 2.3000000e+00 1.9000000e+00 7.0000000e-01 2.0000000e+00 4.0000000e+00 1.8000000e+00 1.5000000e+00 1.5000000e+00 9.0000000e-01 3.9000000e+00 1.7000000e+00 2.7000000e+00 2.1000000e+00 2.9000000e+00 1.8000000e+00 2.3000000e+00 4.1000000e+00 2.4000000e+00 3.3000000e+00 2.6000000e+00 3.8000000e+00 1.2000000e+00 1.7000000e+00 2.2000000e+00 2.4000000e+00 2.5000000e+00 1.6000000e+00 1.6000000e+00 4.8000000e+00 5.1000000e+00 1.9000000e+00 2.5000000e+00 2.1000000e+00 4.4000000e+00 1.3000000e+00 2.2000000e+00 2.6000000e+00 1.2000000e+00 1.2000000e+00 2.1000000e+00 2.4000000e+00 3.4000000e+00 4.5000000e+00 2.2000000e+00 1.1000000e+00 2.1000000e+00 3.9000000e+00 2.3000000e+00 1.4000000e+00 1.2000000e+00 2.1000000e+00 2.4000000e+00 2.0000000e+00 2.1000000e+00 2.6000000e+00 2.6000000e+00 2.0000000e+00 1.7000000e+00 1.5000000e+00 2.1000000e+00 1.6000000e+00 3.3000000e+00 1.0000000e+00 2.1000000e+00 1.1000000e+00 4.8000000e+00 1.0000000e+00 3.2000000e+00 4.9000000e+00 1.8000000e+00 3.2000000e+00 1.3000000e+00 3.0000000e+00 8.0000000e-01 1.8000000e+00 2.8000000e+00 2.0000000e+00 3.3000000e+00 1.5000000e+00 2.2000000e+00 1.2000000e+00 1.6000000e+00 1.5000000e+00 1.0000000e+00 6.0000000e-01 6.0000000e-01 1.5000000e+00 3.6000000e+00 3.6000000e+00 3.8000000e+00 2.8000000e+00 1.6000000e+00 2.0000000e+00 1.7000000e+00 4.0000000e-01 2.1000000e+00 2.4000000e+00 3.1000000e+00 2.7000000e+00 1.3000000e+00 2.8000000e+00 4.8000000e+00 2.6000000e+00 2.3000000e+00 2.3000000e+00 1.7000000e+00 4.7000000e+00 2.5000000e+00 2.9000000e+00 2.1000000e+00 1.9000000e+00 1.8000000e+00 2.1000000e+00 3.1000000e+00 3.2000000e+00 2.3000000e+00 2.0000000e+00 3.0000000e+00 1.2000000e+00 1.7000000e+00 1.4000000e+00 2.4000000e+00 2.5000000e+00 1.8000000e+00 1.4000000e+00 4.0000000e+00 4.1000000e+00 1.9000000e+00 1.7000000e+00 2.1000000e+00 3.4000000e+00 1.3000000e+00 1.8000000e+00 1.8000000e+00 1.4000000e+00 1.2000000e+00 2.1000000e+00 1.4000000e+00 2.4000000e+00 3.7000000e+00 2.2000000e+00 1.1000000e+00 2.1000000e+00 2.9000000e+00 2.5000000e+00 1.4000000e+00 1.4000000e+00 1.1000000e+00 1.8000000e+00 1.0000000e+00 2.1000000e+00 2.0000000e+00 2.2000000e+00 1.4000000e+00 1.7000000e+00 1.3000000e+00 2.3000000e+00 1.6000000e+00 2.3000000e+00 1.2000000e+00 2.8000000e+00 1.7000000e+00 2.3000000e+00 9.0000000e-01 1.6000000e+00 1.5000000e+00 9.0000000e-01 2.0000000e+00 1.1000000e+00 2.5000000e+00 1.5000000e+00 1.1000000e+00 1.5000000e+00 6.0000000e-01 2.6000000e+00 1.1000000e+00 2.1000000e+00 1.9000000e+00 1.8000000e+00 2.3000000e+00 2.7000000e+00 3.3000000e+00 1.8000000e+00 1.3000000e+00 5.0000000e-01 7.0000000e-01 9.0000000e-01 2.3000000e+00 1.5000000e+00 2.4000000e+00 2.9000000e+00 1.2000000e+00 9.0000000e-01 2.0000000e-01 8.0000000e-01 2.0000000e+00 7.0000000e-01 1.5000000e+00 7.0000000e-01 1.2000000e+00 1.0000000e+00 1.6000000e+00 1.8000000e+00 8.0000000e-01 5.0000000e+00 2.4000000e+00 5.0000000e+00 3.5000000e+00 4.4000000e+00 6.2000000e+00 1.7000000e+00 5.2000000e+00 3.7000000e+00 6.3000000e+00 3.7000000e+00 3.2000000e+00 4.3000000e+00 2.1000000e+00 3.0000000e+00 4.1000000e+00 3.7000000e+00 7.3000000e+00 6.4000000e+00 1.8000000e+00 5.0000000e+00 2.2000000e+00 6.1000000e+00 2.6000000e+00 4.7000000e+00 5.1000000e+00 2.5000000e+00 2.7000000e+00 3.8000000e+00 4.5000000e+00 5.1000000e+00 7.0000000e+00 3.9000000e+00 2.6000000e+00 2.6000000e+00 6.0000000e+00 4.6000000e+00 3.7000000e+00 2.5000000e+00 4.4000000e+00 4.7000000e+00 4.3000000e+00 2.4000000e+00 5.1000000e+00 5.1000000e+00 4.1000000e+00 2.6000000e+00 3.6000000e+00 4.2000000e+00 2.7000000e+00 1.1000000e+00 9.0000000e-01 3.8000000e+00 4.0000000e-01 2.2000000e+00 3.9000000e+00 1.2000000e+00 2.2000000e+00 7.0000000e-01 2.2000000e+00 8.0000000e-01 1.2000000e+00 1.8000000e+00 1.0000000e+00 2.3000000e+00 1.5000000e+00 1.2000000e+00 8.0000000e-01 8.0000000e-01 7.0000000e-01 6.0000000e-01 6.0000000e-01 1.0000000e+00 7.0000000e-01 2.6000000e+00 2.6000000e+00 2.8000000e+00 1.8000000e+00 1.2000000e+00 1.4000000e+00 1.3000000e+00 6.0000000e-01 1.1000000e+00 1.8000000e+00 2.1000000e+00 1.7000000e+00 7.0000000e-01 1.8000000e+00 3.8000000e+00 1.6000000e+00 1.7000000e+00 1.5000000e+00 9.0000000e-01 3.7000000e+00 1.5000000e+00 3.1000000e+00 1.7000000e+00 2.7000000e+00 1.6000000e+00 2.1000000e+00 3.9000000e+00 2.2000000e+00 2.9000000e+00 2.0000000e+00 4.0000000e+00 1.4000000e+00 1.3000000e+00 2.0000000e+00 2.0000000e+00 2.1000000e+00 2.0000000e+00 1.4000000e+00 5.0000000e+00 4.5000000e+00 1.5000000e+00 2.7000000e+00 1.7000000e+00 3.8000000e+00 9.0000000e-01 2.4000000e+00 2.8000000e+00 8.0000000e-01 1.2000000e+00 1.7000000e+00 2.2000000e+00 2.8000000e+00 4.7000000e+00 1.8000000e+00 7.0000000e-01 1.7000000e+00 3.7000000e+00 2.7000000e+00 1.6000000e+00 1.2000000e+00 2.1000000e+00 2.4000000e+00 2.0000000e+00 1.7000000e+00 2.8000000e+00 2.8000000e+00 1.8000000e+00 1.3000000e+00 1.3000000e+00 2.5000000e+00 1.6000000e+00 1.6000000e+00 2.7000000e+00 1.1000000e+00 1.3000000e+00 2.8000000e+00 9.0000000e-01 1.7000000e+00 8.0000000e-01 1.1000000e+00 1.5000000e+00 5.0000000e-01 9.0000000e-01 1.3000000e+00 1.2000000e+00 1.4000000e+00 9.0000000e-01 1.5000000e+00 7.0000000e-01 1.0000000e+00 1.3000000e+00 1.5000000e+00 2.1000000e+00 6.0000000e-01 1.5000000e+00 1.5000000e+00 1.7000000e+00 9.0000000e-01 1.3000000e+00 7.0000000e-01 1.2000000e+00 1.7000000e+00 1.2000000e+00 7.0000000e-01 1.0000000e+00 6.0000000e-01 8.0000000e-01 9.0000000e-01 2.7000000e+00 5.0000000e-01 6.0000000e-01 4.0000000e-01 8.0000000e-01 2.6000000e+00 4.0000000e-01 3.8000000e+00 1.4000000e+00 3.8000000e+00 2.3000000e+00 3.2000000e+00 5.0000000e+00 1.5000000e+00 4.0000000e+00 3.1000000e+00 5.1000000e+00 2.5000000e+00 2.2000000e+00 3.1000000e+00 1.5000000e+00 1.8000000e+00 2.9000000e+00 2.5000000e+00 6.1000000e+00 5.6000000e+00 1.6000000e+00 3.8000000e+00 1.2000000e+00 4.9000000e+00 1.6000000e+00 3.5000000e+00 3.9000000e+00 1.3000000e+00 1.5000000e+00 2.6000000e+00 3.3000000e+00 3.9000000e+00 5.8000000e+00 2.7000000e+00 1.4000000e+00 1.8000000e+00 4.8000000e+00 3.4000000e+00 2.5000000e+00 1.3000000e+00 3.2000000e+00 3.5000000e+00 3.1000000e+00 1.4000000e+00 3.9000000e+00 3.9000000e+00 2.9000000e+00 2.0000000e+00 2.4000000e+00 3.0000000e+00 1.5000000e+00 4.3000000e+00 1.1000000e+00 2.7000000e+00 4.4000000e+00 1.3000000e+00 2.7000000e+00 8.0000000e-01 2.5000000e+00 1.1000000e+00 1.3000000e+00 2.3000000e+00 1.5000000e+00 2.8000000e+00 8.0000000e-01 1.7000000e+00 1.1000000e+00 1.1000000e+00 1.2000000e+00 1.1000000e+00 1.3000000e+00 1.1000000e+00 1.0000000e+00 3.1000000e+00 3.1000000e+00 3.3000000e+00 2.3000000e+00 1.3000000e+00 1.5000000e+00 6.0000000e-01 7.0000000e-01 1.6000000e+00 1.9000000e+00 2.6000000e+00 2.2000000e+00 8.0000000e-01 2.3000000e+00 4.3000000e+00 2.1000000e+00 1.8000000e+00 1.8000000e+00 1.2000000e+00 4.2000000e+00 2.0000000e+00 2.2000000e+00 1.8000000e+00 2.8000000e+00 1.5000000e+00 2.2000000e+00 4.0000000e+00 2.5000000e+00 3.2000000e+00 2.5000000e+00 3.5000000e+00 1.1000000e+00 1.6000000e+00 2.1000000e+00 2.1000000e+00 2.2000000e+00 1.5000000e+00 1.5000000e+00 4.5000000e+00 5.0000000e+00 1.8000000e+00 2.4000000e+00 1.8000000e+00 4.3000000e+00 1.0000000e+00 1.9000000e+00 2.5000000e+00 9.0000000e-01 9.0000000e-01 2.0000000e+00 2.3000000e+00 3.3000000e+00 4.2000000e+00 2.1000000e+00 1.0000000e+00 2.0000000e+00 3.8000000e+00 1.8000000e+00 1.3000000e+00 9.0000000e-01 2.0000000e+00 2.3000000e+00 1.9000000e+00 1.8000000e+00 2.5000000e+00 2.3000000e+00 1.9000000e+00 1.4000000e+00 1.4000000e+00 1.6000000e+00 1.3000000e+00 3.8000000e+00 1.6000000e+00 7.0000000e-01 3.0000000e+00 2.0000000e+00 3.5000000e+00 1.8000000e+00 4.0000000e+00 3.0000000e+00 2.0000000e+00 3.2000000e+00 1.5000000e+00 4.1000000e+00 2.6000000e+00 3.6000000e+00 3.2000000e+00 3.3000000e+00 3.8000000e+00 4.2000000e+00 4.8000000e+00 3.3000000e+00 1.2000000e+00 1.2000000e+00 1.0000000e+00 2.0000000e+00 3.8000000e+00 2.8000000e+00 3.9000000e+00 4.4000000e+00 2.9000000e+00 2.4000000e+00 1.7000000e+00 2.1000000e+00 3.5000000e+00 2.0000000e+00 2.0000000e-01 2.2000000e+00 2.5000000e+00 2.5000000e+00 3.1000000e+00 7.0000000e-01 2.3000000e+00 6.5000000e+00 3.9000000e+00 6.5000000e+00 5.0000000e+00 5.9000000e+00 7.7000000e+00 2.0000000e+00 6.7000000e+00 5.2000000e+00 7.8000000e+00 5.2000000e+00 4.7000000e+00 5.8000000e+00 3.6000000e+00 4.5000000e+00 5.6000000e+00 5.2000000e+00 8.8000000e+00 7.9000000e+00 3.5000000e+00 6.5000000e+00 3.7000000e+00 7.6000000e+00 4.1000000e+00 6.2000000e+00 6.6000000e+00 4.0000000e+00 4.2000000e+00 5.3000000e+00 6.0000000e+00 6.6000000e+00 8.5000000e+00 5.4000000e+00 4.1000000e+00 4.1000000e+00 7.5000000e+00 6.1000000e+00 5.2000000e+00 4.0000000e+00 5.9000000e+00 6.2000000e+00 5.8000000e+00 3.9000000e+00 6.6000000e+00 6.6000000e+00 5.6000000e+00 4.1000000e+00 5.1000000e+00 5.7000000e+00 4.2000000e+00 2.4000000e+00 3.9000000e+00 1.4000000e+00 2.2000000e+00 7.0000000e-01 2.0000000e+00 6.0000000e-01 1.4000000e+00 1.8000000e+00 1.4000000e+00 2.3000000e+00 1.7000000e+00 1.2000000e+00 1.2000000e+00 8.0000000e-01 5.0000000e-01 4.0000000e-01 6.0000000e-01 1.0000000e+00 9.0000000e-01 2.6000000e+00 2.6000000e+00 2.8000000e+00 1.8000000e+00 1.6000000e+00 1.6000000e+00 1.5000000e+00 6.0000000e-01 1.1000000e+00 1.6000000e+00 2.1000000e+00 1.7000000e+00 7.0000000e-01 1.8000000e+00 3.8000000e+00 1.6000000e+00 1.5000000e+00 1.3000000e+00 7.0000000e-01 3.7000000e+00 1.5000000e+00 3.3000000e+00 2.1000000e+00 2.7000000e+00 1.8000000e+00 2.3000000e+00 3.9000000e+00 2.6000000e+00 2.9000000e+00 2.2000000e+00 4.0000000e+00 1.6000000e+00 1.7000000e+00 2.0000000e+00 2.4000000e+00 2.5000000e+00 2.2000000e+00 1.6000000e+00 5.0000000e+00 4.7000000e+00 1.9000000e+00 2.7000000e+00 2.1000000e+00 4.0000000e+00 1.3000000e+00 2.4000000e+00 2.8000000e+00 1.2000000e+00 1.4000000e+00 2.1000000e+00 2.2000000e+00 3.0000000e+00 4.7000000e+00 2.2000000e+00 1.1000000e+00 1.9000000e+00 3.7000000e+00 2.9000000e+00 1.8000000e+00 1.4000000e+00 2.1000000e+00 2.4000000e+00 2.0000000e+00 2.1000000e+00 2.8000000e+00 2.8000000e+00 1.8000000e+00 1.7000000e+00 1.5000000e+00 2.7000000e+00 1.8000000e+00 1.7000000e+00 1.4000000e+00 1.8000000e+00 1.9000000e+00 1.0000000e+00 2.4000000e+00 1.4000000e+00 1.2000000e+00 2.2000000e+00 9.0000000e-01 2.5000000e+00 1.2000000e+00 2.4000000e+00 2.0000000e+00 1.9000000e+00 2.2000000e+00 2.6000000e+00 3.2000000e+00 1.7000000e+00 1.4000000e+00 1.0000000e+00 1.2000000e+00 8.0000000e-01 2.2000000e+00 1.2000000e+00 2.3000000e+00 2.8000000e+00 2.1000000e+00 1.0000000e+00 7.0000000e-01 1.1000000e+00 1.9000000e+00 1.0000000e+00 1.6000000e+00 8.0000000e-01 1.3000000e+00 1.1000000e+00 1.7000000e+00 1.5000000e+00 9.0000000e-01 4.9000000e+00 2.3000000e+00 4.9000000e+00 3.4000000e+00 4.3000000e+00 6.1000000e+00 1.4000000e+00 5.1000000e+00 4.0000000e+00 6.2000000e+00 3.6000000e+00 3.1000000e+00 4.2000000e+00 2.4000000e+00 2.9000000e+00 4.0000000e+00 3.6000000e+00 7.2000000e+00 6.5000000e+00 2.5000000e+00 4.9000000e+00 2.1000000e+00 6.0000000e+00 2.5000000e+00 4.6000000e+00 5.0000000e+00 2.4000000e+00 2.6000000e+00 3.7000000e+00 4.4000000e+00 5.0000000e+00 6.9000000e+00 3.8000000e+00 2.5000000e+00 2.7000000e+00 5.9000000e+00 4.5000000e+00 3.6000000e+00 2.4000000e+00 4.3000000e+00 4.6000000e+00 4.2000000e+00 2.3000000e+00 5.0000000e+00 5.0000000e+00 4.0000000e+00 2.9000000e+00 3.5000000e+00 4.1000000e+00 2.6000000e+00 3.1000000e+00 1.7000000e+00 3.6000000e+00 1.9000000e+00 4.1000000e+00 3.1000000e+00 2.1000000e+00 2.9000000e+00 1.6000000e+00 4.2000000e+00 2.7000000e+00 3.7000000e+00 3.3000000e+00 3.4000000e+00 3.9000000e+00 4.3000000e+00 4.9000000e+00 3.4000000e+00 1.3000000e+00 1.3000000e+00 1.1000000e+00 2.1000000e+00 3.9000000e+00 2.9000000e+00 4.0000000e+00 4.5000000e+00 2.8000000e+00 2.5000000e+00 1.8000000e+00 2.2000000e+00 3.6000000e+00 2.1000000e+00 5.0000000e-01 2.3000000e+00 2.6000000e+00 2.6000000e+00 3.2000000e+00 1.2000000e+00 2.4000000e+00 6.6000000e+00 4.0000000e+00 6.6000000e+00 5.1000000e+00 6.0000000e+00 7.8000000e+00 2.3000000e+00 6.8000000e+00 5.3000000e+00 7.9000000e+00 5.3000000e+00 4.8000000e+00 5.9000000e+00 3.7000000e+00 4.6000000e+00 5.7000000e+00 5.3000000e+00 8.9000000e+00 8.0000000e+00 3.2000000e+00 6.6000000e+00 3.8000000e+00 7.7000000e+00 4.2000000e+00 6.3000000e+00 6.7000000e+00 4.1000000e+00 4.3000000e+00 5.4000000e+00 6.1000000e+00 6.7000000e+00 8.6000000e+00 5.5000000e+00 4.2000000e+00 4.2000000e+00 7.6000000e+00 6.2000000e+00 5.3000000e+00 4.1000000e+00 6.0000000e+00 6.3000000e+00 5.9000000e+00 4.0000000e+00 6.7000000e+00 6.7000000e+00 5.7000000e+00 4.2000000e+00 5.2000000e+00 5.8000000e+00 4.3000000e+00 1.6000000e+00 9.0000000e-01 1.2000000e+00 1.2000000e+00 6.0000000e-01 1.0000000e+00 1.4000000e+00 1.5000000e+00 1.1000000e+00 8.0000000e-01 1.6000000e+00 1.2000000e+00 9.0000000e-01 1.0000000e+00 1.8000000e+00 1.8000000e+00 5.0000000e-01 1.8000000e+00 1.8000000e+00 2.0000000e+00 1.0000000e+00 1.4000000e+00 8.0000000e-01 9.0000000e-01 1.4000000e+00 1.5000000e+00 6.0000000e-01 1.3000000e+00 1.3000000e+00 7.0000000e-01 1.0000000e+00 3.0000000e+00 8.0000000e-01 5.0000000e-01 5.0000000e-01 7.0000000e-01 2.9000000e+00 7.0000000e-01 3.5000000e+00 1.7000000e+00 3.5000000e+00 2.2000000e+00 2.9000000e+00 4.7000000e+00 2.0000000e+00 3.9000000e+00 3.2000000e+00 4.8000000e+00 2.2000000e+00 2.3000000e+00 2.8000000e+00 2.0000000e+00 2.1000000e+00 2.6000000e+00 2.2000000e+00 5.8000000e+00 5.7000000e+00 1.7000000e+00 3.5000000e+00 1.7000000e+00 5.0000000e+00 1.7000000e+00 3.2000000e+00 3.6000000e+00 1.4000000e+00 1.2000000e+00 2.7000000e+00 3.0000000e+00 4.0000000e+00 5.5000000e+00 2.8000000e+00 1.5000000e+00 2.1000000e+00 4.5000000e+00 3.1000000e+00 2.2000000e+00 1.0000000e+00 2.9000000e+00 3.2000000e+00 2.8000000e+00 1.7000000e+00 3.6000000e+00 3.6000000e+00 2.6000000e+00 2.1000000e+00 2.1000000e+00 2.7000000e+00 1.2000000e+00 1.9000000e+00 1.8000000e+00 2.4000000e+00 2.2000000e+00 8.0000000e-01 1.2000000e+00 9.0000000e-01 2.7000000e+00 1.0000000e+00 2.0000000e+00 1.6000000e+00 1.7000000e+00 2.2000000e+00 2.6000000e+00 3.2000000e+00 1.7000000e+00 1.2000000e+00 1.0000000e+00 1.0000000e+00 1.0000000e+00 2.2000000e+00 2.4000000e+00 2.3000000e+00 2.8000000e+00 1.1000000e+00 1.6000000e+00 1.1000000e+00 1.5000000e+00 1.9000000e+00 8.0000000e-01 1.8000000e+00 1.4000000e+00 1.5000000e+00 1.5000000e+00 1.5000000e+00 2.3000000e+00 1.3000000e+00 4.9000000e+00 2.7000000e+00 4.9000000e+00 3.4000000e+00 4.3000000e+00 6.1000000e+00 2.6000000e+00 5.1000000e+00 3.6000000e+00 6.2000000e+00 3.6000000e+00 3.1000000e+00 4.2000000e+00 2.6000000e+00 3.3000000e+00 4.0000000e+00 3.6000000e+00 7.2000000e+00 6.3000000e+00 1.5000000e+00 4.9000000e+00 2.9000000e+00 6.0000000e+00 2.5000000e+00 4.6000000e+00 5.0000000e+00 2.4000000e+00 2.6000000e+00 3.7000000e+00 4.4000000e+00 5.0000000e+00 6.9000000e+00 3.8000000e+00 2.5000000e+00 2.5000000e+00 5.9000000e+00 4.5000000e+00 3.6000000e+00 2.4000000e+00 4.3000000e+00 4.6000000e+00 4.2000000e+00 2.7000000e+00 5.0000000e+00 5.0000000e+00 4.0000000e+00 2.5000000e+00 3.5000000e+00 4.1000000e+00 2.8000000e+00 1.7000000e+00 1.1000000e+00 9.0000000e-01 1.5000000e+00 1.1000000e+00 2.0000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 3.0000000e-01 8.0000000e-01 9.0000000e-01 9.0000000e-01 1.3000000e+00 4.0000000e-01 2.3000000e+00 2.3000000e+00 2.5000000e+00 1.5000000e+00 9.0000000e-01 1.1000000e+00 1.0000000e+00 9.0000000e-01 1.2000000e+00 1.3000000e+00 1.8000000e+00 1.4000000e+00 2.0000000e-01 1.5000000e+00 3.5000000e+00 1.3000000e+00 1.2000000e+00 1.0000000e+00 6.0000000e-01 3.4000000e+00 1.2000000e+00 3.0000000e+00 1.4000000e+00 3.0000000e+00 1.5000000e+00 2.4000000e+00 4.2000000e+00 2.1000000e+00 3.2000000e+00 2.5000000e+00 4.3000000e+00 1.7000000e+00 1.6000000e+00 2.3000000e+00 1.7000000e+00 1.8000000e+00 2.1000000e+00 1.7000000e+00 5.3000000e+00 5.0000000e+00 1.2000000e+00 3.0000000e+00 1.4000000e+00 4.3000000e+00 1.0000000e+00 2.7000000e+00 3.1000000e+00 7.0000000e-01 7.0000000e-01 2.0000000e+00 2.5000000e+00 3.3000000e+00 5.0000000e+00 2.1000000e+00 8.0000000e-01 1.2000000e+00 4.0000000e+00 2.6000000e+00 1.7000000e+00 7.0000000e-01 2.4000000e+00 2.7000000e+00 2.3000000e+00 1.4000000e+00 3.1000000e+00 3.1000000e+00 2.1000000e+00 1.4000000e+00 1.6000000e+00 2.2000000e+00 1.1000000e+00 2.2000000e+00 1.2000000e+00 1.2000000e+00 2.4000000e+00 9.0000000e-01 2.3000000e+00 1.0000000e+00 2.6000000e+00 1.8000000e+00 1.5000000e+00 2.0000000e+00 2.6000000e+00 3.0000000e+00 1.5000000e+00 8.0000000e-01 1.0000000e+00 1.0000000e+00 8.0000000e-01 2.4000000e+00 1.4000000e+00 2.1000000e+00 2.6000000e+00 2.1000000e+00 6.0000000e-01 9.0000000e-01 1.3000000e+00 1.7000000e+00 1.0000000e+00 1.8000000e+00 8.0000000e-01 9.0000000e-01 7.0000000e-01 1.3000000e+00 1.7000000e+00 7.0000000e-01 4.7000000e+00 2.5000000e+00 4.7000000e+00 3.2000000e+00 4.1000000e+00 5.9000000e+00 2.4000000e+00 4.9000000e+00 4.2000000e+00 6.0000000e+00 3.4000000e+00 3.3000000e+00 4.0000000e+00 2.6000000e+00 2.9000000e+00 3.8000000e+00 3.4000000e+00 7.0000000e+00 6.7000000e+00 2.7000000e+00 4.7000000e+00 2.1000000e+00 6.0000000e+00 2.7000000e+00 4.4000000e+00 4.8000000e+00 2.4000000e+00 2.4000000e+00 3.7000000e+00 4.2000000e+00 5.0000000e+00 6.7000000e+00 3.8000000e+00 2.5000000e+00 2.9000000e+00 5.7000000e+00 4.3000000e+00 3.4000000e+00 2.2000000e+00 4.1000000e+00 4.4000000e+00 4.0000000e+00 2.5000000e+00 4.8000000e+00 4.8000000e+00 3.8000000e+00 3.1000000e+00 3.3000000e+00 3.9000000e+00 2.4000000e+00 1.4000000e+00 2.0000000e+00 1.6000000e+00 2.5000000e+00 1.7000000e+00 1.4000000e+00 1.6000000e+00 1.4000000e+00 7.0000000e-01 2.0000000e-01 8.0000000e-01 1.0000000e+00 1.1000000e+00 2.8000000e+00 2.8000000e+00 3.0000000e+00 2.0000000e+00 2.0000000e+00 1.6000000e+00 1.3000000e+00 4.0000000e-01 1.3000000e+00 1.6000000e+00 2.3000000e+00 1.9000000e+00 9.0000000e-01 2.0000000e+00 4.0000000e+00 1.8000000e+00 1.5000000e+00 1.5000000e+00 9.0000000e-01 3.9000000e+00 1.7000000e+00 3.3000000e+00 2.5000000e+00 2.7000000e+00 2.2000000e+00 2.5000000e+00 3.9000000e+00 2.8000000e+00 3.1000000e+00 2.4000000e+00 3.8000000e+00 1.6000000e+00 2.1000000e+00 2.0000000e+00 2.8000000e+00 2.9000000e+00 2.2000000e+00 1.8000000e+00 4.8000000e+00 4.9000000e+00 2.3000000e+00 2.5000000e+00 2.5000000e+00 4.2000000e+00 1.7000000e+00 2.2000000e+00 2.6000000e+00 1.6000000e+00 1.6000000e+00 2.5000000e+00 2.2000000e+00 3.2000000e+00 4.5000000e+00 2.6000000e+00 1.5000000e+00 2.3000000e+00 3.7000000e+00 2.9000000e+00 1.8000000e+00 1.6000000e+00 1.9000000e+00 2.2000000e+00 1.8000000e+00 2.5000000e+00 2.6000000e+00 2.6000000e+00 1.8000000e+00 2.1000000e+00 1.7000000e+00 2.7000000e+00 2.0000000e+00 1.4000000e+00 1.4000000e+00 1.5000000e+00 1.1000000e+00 1.4000000e+00 1.6000000e+00 1.2000000e+00 1.3000000e+00 1.2000000e+00 1.8000000e+00 1.8000000e+00 5.0000000e-01 2.0000000e+00 1.8000000e+00 2.0000000e+00 1.4000000e+00 1.4000000e+00 2.0000000e-01 9.0000000e-01 1.4000000e+00 1.7000000e+00 6.0000000e-01 1.3000000e+00 9.0000000e-01 7.0000000e-01 1.4000000e+00 3.0000000e+00 8.0000000e-01 7.0000000e-01 7.0000000e-01 1.1000000e+00 2.9000000e+00 9.0000000e-01 3.5000000e+00 1.5000000e+00 3.5000000e+00 2.2000000e+00 2.9000000e+00 4.7000000e+00 1.4000000e+00 3.9000000e+00 3.2000000e+00 4.8000000e+00 2.2000000e+00 2.3000000e+00 2.8000000e+00 1.6000000e+00 1.9000000e+00 2.6000000e+00 2.2000000e+00 5.8000000e+00 5.7000000e+00 1.7000000e+00 3.5000000e+00 1.1000000e+00 5.0000000e+00 1.7000000e+00 3.2000000e+00 3.6000000e+00 1.4000000e+00 1.2000000e+00 2.7000000e+00 3.0000000e+00 4.0000000e+00 5.5000000e+00 2.8000000e+00 1.5000000e+00 2.1000000e+00 4.5000000e+00 3.1000000e+00 2.2000000e+00 1.0000000e+00 2.9000000e+00 3.2000000e+00 2.8000000e+00 1.5000000e+00 3.6000000e+00 3.6000000e+00 2.6000000e+00 2.1000000e+00 2.1000000e+00 2.7000000e+00 1.2000000e+00 1.8000000e+00 7.0000000e-01 2.1000000e+00 8.0000000e-01 2.0000000e+00 1.2000000e+00 1.3000000e+00 1.8000000e+00 2.2000000e+00 2.8000000e+00 1.3000000e+00 8.0000000e-01 1.0000000e+00 1.0000000e+00 4.0000000e-01 1.8000000e+00 1.6000000e+00 1.9000000e+00 2.4000000e+00 1.5000000e+00 8.0000000e-01 9.0000000e-01 9.0000000e-01 1.5000000e+00 4.0000000e-01 2.0000000e+00 6.0000000e-01 7.0000000e-01 7.0000000e-01 1.1000000e+00 2.1000000e+00 5.0000000e-01 4.5000000e+00 1.9000000e+00 4.5000000e+00 3.0000000e+00 3.9000000e+00 5.7000000e+00 2.2000000e+00 4.7000000e+00 3.6000000e+00 5.8000000e+00 3.2000000e+00 2.7000000e+00 3.8000000e+00 2.2000000e+00 2.5000000e+00 3.6000000e+00 3.2000000e+00 6.8000000e+00 6.1000000e+00 2.1000000e+00 4.5000000e+00 2.1000000e+00 5.6000000e+00 2.1000000e+00 4.2000000e+00 4.6000000e+00 2.0000000e+00 2.2000000e+00 3.3000000e+00 4.0000000e+00 4.6000000e+00 6.5000000e+00 3.4000000e+00 2.1000000e+00 2.3000000e+00 5.5000000e+00 4.1000000e+00 3.2000000e+00 2.0000000e+00 3.9000000e+00 4.2000000e+00 3.8000000e+00 1.9000000e+00 4.6000000e+00 4.6000000e+00 3.6000000e+00 2.5000000e+00 3.1000000e+00 3.7000000e+00 2.2000000e+00 1.9000000e+00 1.9000000e+00 1.4000000e+00 8.0000000e-01 1.2000000e+00 1.3000000e+00 1.4000000e+00 1.6000000e+00 2.0000000e+00 9.0000000e-01 2.4000000e+00 2.0000000e+00 2.2000000e+00 1.8000000e+00 1.4000000e+00 1.6000000e+00 1.5000000e+00 1.6000000e+00 5.0000000e-01 2.0000000e+00 1.7000000e+00 1.5000000e+00 1.1000000e+00 1.6000000e+00 3.0000000e+00 1.6000000e+00 1.9000000e+00 1.7000000e+00 1.1000000e+00 3.3000000e+00 1.7000000e+00 3.7000000e+00 1.9000000e+00 3.7000000e+00 2.2000000e+00 3.1000000e+00 4.9000000e+00 1.8000000e+00 3.9000000e+00 2.4000000e+00 5.0000000e+00 2.4000000e+00 1.9000000e+00 3.0000000e+00 1.8000000e+00 2.5000000e+00 2.8000000e+00 2.4000000e+00 6.0000000e+00 5.1000000e+00 7.0000000e-01 3.7000000e+00 2.1000000e+00 4.8000000e+00 1.3000000e+00 3.4000000e+00 3.8000000e+00 1.2000000e+00 1.6000000e+00 2.5000000e+00 3.2000000e+00 3.8000000e+00 5.7000000e+00 2.6000000e+00 1.3000000e+00 1.7000000e+00 4.7000000e+00 3.3000000e+00 2.4000000e+00 1.6000000e+00 3.1000000e+00 3.4000000e+00 3.0000000e+00 1.9000000e+00 3.8000000e+00 3.8000000e+00 2.8000000e+00 1.3000000e+00 2.3000000e+00 2.9000000e+00 2.0000000e+00 2.6000000e+00 1.1000000e+00 2.1000000e+00 1.7000000e+00 1.8000000e+00 2.3000000e+00 2.7000000e+00 3.3000000e+00 1.8000000e+00 7.0000000e-01 3.0000000e-01 5.0000000e-01 5.0000000e-01 2.3000000e+00 1.7000000e+00 2.4000000e+00 2.9000000e+00 1.6000000e+00 9.0000000e-01 4.0000000e-01 8.0000000e-01 2.0000000e+00 5.0000000e-01 1.5000000e+00 7.0000000e-01 1.0000000e+00 1.0000000e+00 1.6000000e+00 1.4000000e+00 8.0000000e-01 5.0000000e+00 2.4000000e+00 5.0000000e+00 3.5000000e+00 4.4000000e+00 6.2000000e+00 1.9000000e+00 5.2000000e+00 3.7000000e+00 6.3000000e+00 3.7000000e+00 3.2000000e+00 4.3000000e+00 2.1000000e+00 3.0000000e+00 4.1000000e+00 3.7000000e+00 7.3000000e+00 6.4000000e+00 2.2000000e+00 5.0000000e+00 2.2000000e+00 6.1000000e+00 2.6000000e+00 4.7000000e+00 5.1000000e+00 2.5000000e+00 2.7000000e+00 3.8000000e+00 4.5000000e+00 5.1000000e+00 7.0000000e+00 3.9000000e+00 2.6000000e+00 2.6000000e+00 6.0000000e+00 4.6000000e+00 3.7000000e+00 2.5000000e+00 4.4000000e+00 4.7000000e+00 4.3000000e+00 2.4000000e+00 5.1000000e+00 5.1000000e+00 4.1000000e+00 2.6000000e+00 3.6000000e+00 4.2000000e+00 2.7000000e+00 1.9000000e+00 1.5000000e+00 1.3000000e+00 1.8000000e+00 1.7000000e+00 1.7000000e+00 1.3000000e+00 1.0000000e+00 2.9000000e+00 2.9000000e+00 3.1000000e+00 2.1000000e+00 1.1000000e+00 1.3000000e+00 8.0000000e-01 1.3000000e+00 2.2000000e+00 1.7000000e+00 2.4000000e+00 2.0000000e+00 1.0000000e+00 2.1000000e+00 4.1000000e+00 1.9000000e+00 1.6000000e+00 1.6000000e+00 1.6000000e+00 4.0000000e+00 1.8000000e+00 2.4000000e+00 1.0000000e+00 2.8000000e+00 1.5000000e+00 2.2000000e+00 4.0000000e+00 2.1000000e+00 3.2000000e+00 2.5000000e+00 3.7000000e+00 1.1000000e+00 1.6000000e+00 2.1000000e+00 1.3000000e+00 1.4000000e+00 1.5000000e+00 1.5000000e+00 4.7000000e+00 5.0000000e+00 1.6000000e+00 2.4000000e+00 1.0000000e+00 4.3000000e+00 1.0000000e+00 2.1000000e+00 2.5000000e+00 7.0000000e-01 5.0000000e-01 2.0000000e+00 2.7000000e+00 3.3000000e+00 4.4000000e+00 2.1000000e+00 1.4000000e+00 2.0000000e+00 3.8000000e+00 2.0000000e+00 1.3000000e+00 3.0000000e-01 2.0000000e+00 2.3000000e+00 1.9000000e+00 1.0000000e+00 2.5000000e+00 2.5000000e+00 1.9000000e+00 1.4000000e+00 1.4000000e+00 1.6000000e+00 5.0000000e-01 1.6000000e+00 8.0000000e-01 7.0000000e-01 1.2000000e+00 1.6000000e+00 2.2000000e+00 9.0000000e-01 1.4000000e+00 1.4000000e+00 1.6000000e+00 6.0000000e-01 1.6000000e+00 1.6000000e+00 1.5000000e+00 1.8000000e+00 1.1000000e+00 8.0000000e-01 9.0000000e-01 1.3000000e+00 9.0000000e-01 6.0000000e-01 2.6000000e+00 8.0000000e-01 9.0000000e-01 7.0000000e-01 5.0000000e-01 2.5000000e+00 5.0000000e-01 3.9000000e+00 2.1000000e+00 3.9000000e+00 2.4000000e+00 3.3000000e+00 5.1000000e+00 2.4000000e+00 4.1000000e+00 3.2000000e+00 5.2000000e+00 2.6000000e+00 2.3000000e+00 3.2000000e+00 2.4000000e+00 2.5000000e+00 3.0000000e+00 2.6000000e+00 6.2000000e+00 5.7000000e+00 1.9000000e+00 3.9000000e+00 2.1000000e+00 5.0000000e+00 1.7000000e+00 3.6000000e+00 4.0000000e+00 1.4000000e+00 1.6000000e+00 2.7000000e+00 3.4000000e+00 4.0000000e+00 5.9000000e+00 2.8000000e+00 1.5000000e+00 1.9000000e+00 4.9000000e+00 3.5000000e+00 2.6000000e+00 1.6000000e+00 3.3000000e+00 3.6000000e+00 3.2000000e+00 2.1000000e+00 4.0000000e+00 4.0000000e+00 3.0000000e+00 2.1000000e+00 2.5000000e+00 3.1000000e+00 2.0000000e+00 1.0000000e+00 1.3000000e+00 1.4000000e+00 1.0000000e+00 1.2000000e+00 1.1000000e+00 2.6000000e+00 2.4000000e+00 2.6000000e+00 2.0000000e+00 8.0000000e-01 1.8000000e+00 1.7000000e+00 1.2000000e+00 9.0000000e-01 2.2000000e+00 1.9000000e+00 1.7000000e+00 1.1000000e+00 1.8000000e+00 3.6000000e+00 1.8000000e+00 2.1000000e+00 1.9000000e+00 1.3000000e+00 3.5000000e+00 1.9000000e+00 2.9000000e+00 1.3000000e+00 2.9000000e+00 1.4000000e+00 2.3000000e+00 4.1000000e+00 2.0000000e+00 3.1000000e+00 1.6000000e+00 4.2000000e+00 1.6000000e+00 1.1000000e+00 2.2000000e+00 1.2000000e+00 1.9000000e+00 2.0000000e+00 1.6000000e+00 5.2000000e+00 4.3000000e+00 7.0000000e-01 2.9000000e+00 1.5000000e+00 4.0000000e+00 5.0000000e-01 2.6000000e+00 3.0000000e+00 8.0000000e-01 1.0000000e+00 1.7000000e+00 2.4000000e+00 3.0000000e+00 4.9000000e+00 1.8000000e+00 5.0000000e-01 1.1000000e+00 3.9000000e+00 2.5000000e+00 1.6000000e+00 1.2000000e+00 2.3000000e+00 2.6000000e+00 2.2000000e+00 1.3000000e+00 3.0000000e+00 3.0000000e+00 2.0000000e+00 5.0000000e-01 1.5000000e+00 2.3000000e+00 1.4000000e+00 9.0000000e-01 1.2000000e+00 1.0000000e+00 1.6000000e+00 7.0000000e-01 2.0000000e+00 2.0000000e+00 2.2000000e+00 1.2000000e+00 1.0000000e+00 1.4000000e+00 1.3000000e+00 1.2000000e+00 1.1000000e+00 1.4000000e+00 1.7000000e+00 1.1000000e+00 5.0000000e-01 1.2000000e+00 3.2000000e+00 1.2000000e+00 1.1000000e+00 1.1000000e+00 7.0000000e-01 3.1000000e+00 1.1000000e+00 3.3000000e+00 1.5000000e+00 3.3000000e+00 1.8000000e+00 2.7000000e+00 4.5000000e+00 2.2000000e+00 3.5000000e+00 2.6000000e+00 4.6000000e+00 2.0000000e+00 1.7000000e+00 2.6000000e+00 1.8000000e+00 1.9000000e+00 2.4000000e+00 2.0000000e+00 5.6000000e+00 5.1000000e+00 1.3000000e+00 3.3000000e+00 1.5000000e+00 4.4000000e+00 1.1000000e+00 3.0000000e+00 3.4000000e+00 8.0000000e-01 1.0000000e+00 2.1000000e+00 2.8000000e+00 3.4000000e+00 5.3000000e+00 2.2000000e+00 9.0000000e-01 1.3000000e+00 4.3000000e+00 2.9000000e+00 2.0000000e+00 1.0000000e+00 2.7000000e+00 3.0000000e+00 2.6000000e+00 1.5000000e+00 3.4000000e+00 3.4000000e+00 2.4000000e+00 1.5000000e+00 1.9000000e+00 2.5000000e+00 1.4000000e+00 5.0000000e-01 1.1000000e+00 1.5000000e+00 8.0000000e-01 2.1000000e+00 2.1000000e+00 2.3000000e+00 1.3000000e+00 1.7000000e+00 1.5000000e+00 1.4000000e+00 1.1000000e+00 8.0000000e-01 1.1000000e+00 1.6000000e+00 1.4000000e+00 8.0000000e-01 1.3000000e+00 3.3000000e+00 1.1000000e+00 1.0000000e+00 8.0000000e-01 2.0000000e-01 3.2000000e+00 1.0000000e+00 3.4000000e+00 2.2000000e+00 3.2000000e+00 1.9000000e+00 2.6000000e+00 4.4000000e+00 2.5000000e+00 3.4000000e+00 2.7000000e+00 4.5000000e+00 1.9000000e+00 1.8000000e+00 2.5000000e+00 2.5000000e+00 2.6000000e+00 2.3000000e+00 1.9000000e+00 5.5000000e+00 5.2000000e+00 2.0000000e+00 3.2000000e+00 2.2000000e+00 4.5000000e+00 1.4000000e+00 2.9000000e+00 3.3000000e+00 1.3000000e+00 1.5000000e+00 2.2000000e+00 2.7000000e+00 3.5000000e+00 5.2000000e+00 2.3000000e+00 1.2000000e+00 2.0000000e+00 4.2000000e+00 3.0000000e+00 1.9000000e+00 1.5000000e+00 2.6000000e+00 2.9000000e+00 2.5000000e+00 2.2000000e+00 3.3000000e+00 3.3000000e+00 2.3000000e+00 1.8000000e+00 1.8000000e+00 2.8000000e+00 1.9000000e+00 8.0000000e-01 1.0000000e+00 9.0000000e-01 2.6000000e+00 2.6000000e+00 2.8000000e+00 1.8000000e+00 1.8000000e+00 1.4000000e+00 1.3000000e+00 6.0000000e-01 1.1000000e+00 1.4000000e+00 2.1000000e+00 1.7000000e+00 7.0000000e-01 1.8000000e+00 3.8000000e+00 1.6000000e+00 1.3000000e+00 1.3000000e+00 7.0000000e-01 3.7000000e+00 1.5000000e+00 3.3000000e+00 2.3000000e+00 2.7000000e+00 2.0000000e+00 2.3000000e+00 3.9000000e+00 2.6000000e+00 3.1000000e+00 2.4000000e+00 4.0000000e+00 1.6000000e+00 1.9000000e+00 2.0000000e+00 2.6000000e+00 2.7000000e+00 2.2000000e+00 1.6000000e+00 5.0000000e+00 4.9000000e+00 2.1000000e+00 2.7000000e+00 2.3000000e+00 4.2000000e+00 1.5000000e+00 2.4000000e+00 2.8000000e+00 1.4000000e+00 1.4000000e+00 2.3000000e+00 2.2000000e+00 3.2000000e+00 4.7000000e+00 2.4000000e+00 1.3000000e+00 2.1000000e+00 3.7000000e+00 2.9000000e+00 1.8000000e+00 1.4000000e+00 2.1000000e+00 2.4000000e+00 2.0000000e+00 2.3000000e+00 2.8000000e+00 2.8000000e+00 1.8000000e+00 1.9000000e+00 1.5000000e+00 2.7000000e+00 1.8000000e+00 8.0000000e-01 1.3000000e+00 3.0000000e+00 3.0000000e+00 3.2000000e+00 2.2000000e+00 1.4000000e+00 2.0000000e+00 1.9000000e+00 6.0000000e-01 1.5000000e+00 2.2000000e+00 2.5000000e+00 2.1000000e+00 1.1000000e+00 2.2000000e+00 4.2000000e+00 2.0000000e+00 2.1000000e+00 1.9000000e+00 1.3000000e+00 4.1000000e+00 1.9000000e+00 3.3000000e+00 1.9000000e+00 2.3000000e+00 1.8000000e+00 2.3000000e+00 3.5000000e+00 2.8000000e+00 2.5000000e+00 1.8000000e+00 3.6000000e+00 1.6000000e+00 1.5000000e+00 1.6000000e+00 2.2000000e+00 2.3000000e+00 2.2000000e+00 1.6000000e+00 4.6000000e+00 4.1000000e+00 1.7000000e+00 2.3000000e+00 1.9000000e+00 3.4000000e+00 1.1000000e+00 2.2000000e+00 2.4000000e+00 1.0000000e+00 1.4000000e+00 1.9000000e+00 1.8000000e+00 2.4000000e+00 4.3000000e+00 2.0000000e+00 9.0000000e-01 1.7000000e+00 3.3000000e+00 2.9000000e+00 1.8000000e+00 1.4000000e+00 1.7000000e+00 2.2000000e+00 1.6000000e+00 1.9000000e+00 2.4000000e+00 2.6000000e+00 1.6000000e+00 1.5000000e+00 1.5000000e+00 2.7000000e+00 1.8000000e+00 1.5000000e+00 3.6000000e+00 3.6000000e+00 3.8000000e+00 2.8000000e+00 1.2000000e+00 2.0000000e+00 1.7000000e+00 6.0000000e-01 2.1000000e+00 2.4000000e+00 3.1000000e+00 2.7000000e+00 1.3000000e+00 2.8000000e+00 4.8000000e+00 2.6000000e+00 2.3000000e+00 2.3000000e+00 1.7000000e+00 4.7000000e+00 2.5000000e+00 2.5000000e+00 1.5000000e+00 1.7000000e+00 1.2000000e+00 1.5000000e+00 2.9000000e+00 2.8000000e+00 2.1000000e+00 1.4000000e+00 3.0000000e+00 8.0000000e-01 1.1000000e+00 1.0000000e+00 1.8000000e+00 1.9000000e+00 1.4000000e+00 8.0000000e-01 4.0000000e+00 3.9000000e+00 1.7000000e+00 1.7000000e+00 1.7000000e+00 3.2000000e+00 9.0000000e-01 1.4000000e+00 1.8000000e+00 1.0000000e+00 8.0000000e-01 1.5000000e+00 1.4000000e+00 2.2000000e+00 3.7000000e+00 1.6000000e+00 9.0000000e-01 1.9000000e+00 2.7000000e+00 2.1000000e+00 1.0000000e+00 1.0000000e+00 1.1000000e+00 1.4000000e+00 1.0000000e+00 1.5000000e+00 1.8000000e+00 1.8000000e+00 8.0000000e-01 1.1000000e+00 7.0000000e-01 1.9000000e+00 1.0000000e+00 2.1000000e+00 2.1000000e+00 2.3000000e+00 1.3000000e+00 9.0000000e-01 7.0000000e-01 6.0000000e-01 1.1000000e+00 1.2000000e+00 1.1000000e+00 1.6000000e+00 1.2000000e+00 4.0000000e-01 1.3000000e+00 3.3000000e+00 1.1000000e+00 1.0000000e+00 8.0000000e-01 6.0000000e-01 3.2000000e+00 1.0000000e+00 3.2000000e+00 1.4000000e+00 3.2000000e+00 1.7000000e+00 2.6000000e+00 4.4000000e+00 1.7000000e+00 3.4000000e+00 2.7000000e+00 4.5000000e+00 1.9000000e+00 1.8000000e+00 2.5000000e+00 1.7000000e+00 1.8000000e+00 2.3000000e+00 1.9000000e+00 5.5000000e+00 5.2000000e+00 1.2000000e+00 3.2000000e+00 1.4000000e+00 4.5000000e+00 1.2000000e+00 2.9000000e+00 3.3000000e+00 9.0000000e-01 9.0000000e-01 2.2000000e+00 2.7000000e+00 3.5000000e+00 5.2000000e+00 2.3000000e+00 1.0000000e+00 1.6000000e+00 4.2000000e+00 2.8000000e+00 1.9000000e+00 7.0000000e-01 2.6000000e+00 2.9000000e+00 2.5000000e+00 1.4000000e+00 3.3000000e+00 3.3000000e+00 2.3000000e+00 1.6000000e+00 1.8000000e+00 2.4000000e+00 1.1000000e+00 8.0000000e-01 6.0000000e-01 8.0000000e-01 2.6000000e+00 2.2000000e+00 2.7000000e+00 3.2000000e+00 2.1000000e+00 1.4000000e+00 1.1000000e+00 1.3000000e+00 2.3000000e+00 8.0000000e-01 1.2000000e+00 1.2000000e+00 1.3000000e+00 1.3000000e+00 1.9000000e+00 1.3000000e+00 1.1000000e+00 5.3000000e+00 2.7000000e+00 5.3000000e+00 3.8000000e+00 4.7000000e+00 6.5000000e+00 2.6000000e+00 5.5000000e+00 4.2000000e+00 6.6000000e+00 4.0000000e+00 3.5000000e+00 4.6000000e+00 2.6000000e+00 3.3000000e+00 4.4000000e+00 4.0000000e+00 7.6000000e+00 6.7000000e+00 2.7000000e+00 5.3000000e+00 2.7000000e+00 6.4000000e+00 2.9000000e+00 5.0000000e+00 5.4000000e+00 2.8000000e+00 3.0000000e+00 4.1000000e+00 4.8000000e+00 5.4000000e+00 7.3000000e+00 4.2000000e+00 2.9000000e+00 2.9000000e+00 6.3000000e+00 4.9000000e+00 4.0000000e+00 2.8000000e+00 4.7000000e+00 5.0000000e+00 4.6000000e+00 2.7000000e+00 5.4000000e+00 5.4000000e+00 4.4000000e+00 3.1000000e+00 3.9000000e+00 4.5000000e+00 3.0000000e+00 2.0000000e-01 8.0000000e-01 2.6000000e+00 1.8000000e+00 2.7000000e+00 3.2000000e+00 1.7000000e+00 1.2000000e+00 5.0000000e-01 9.0000000e-01 2.3000000e+00 8.0000000e-01 1.2000000e+00 1.0000000e+00 1.3000000e+00 1.3000000e+00 1.9000000e+00 1.3000000e+00 1.1000000e+00 5.3000000e+00 2.7000000e+00 5.3000000e+00 3.8000000e+00 4.7000000e+00 6.5000000e+00 2.0000000e+00 5.5000000e+00 4.0000000e+00 6.6000000e+00 4.0000000e+00 3.5000000e+00 4.6000000e+00 2.4000000e+00 3.3000000e+00 4.4000000e+00 4.0000000e+00 7.6000000e+00 6.7000000e+00 2.3000000e+00 5.3000000e+00 2.5000000e+00 6.4000000e+00 2.9000000e+00 5.0000000e+00 5.4000000e+00 2.8000000e+00 3.0000000e+00 4.1000000e+00 4.8000000e+00 5.4000000e+00 7.3000000e+00 4.2000000e+00 2.9000000e+00 2.9000000e+00 6.3000000e+00 4.9000000e+00 4.0000000e+00 2.8000000e+00 4.7000000e+00 5.0000000e+00 4.6000000e+00 2.7000000e+00 5.4000000e+00 5.4000000e+00 4.4000000e+00 2.9000000e+00 3.9000000e+00 4.5000000e+00 3.0000000e+00 1.0000000e+00 2.8000000e+00 2.0000000e+00 2.9000000e+00 3.4000000e+00 1.9000000e+00 1.4000000e+00 7.0000000e-01 1.1000000e+00 2.5000000e+00 1.0000000e+00 1.0000000e+00 1.2000000e+00 1.5000000e+00 1.5000000e+00 2.1000000e+00 1.3000000e+00 1.3000000e+00 5.5000000e+00 2.9000000e+00 5.5000000e+00 4.0000000e+00 4.9000000e+00 6.7000000e+00 2.2000000e+00 5.7000000e+00 4.2000000e+00 6.8000000e+00 4.2000000e+00 3.7000000e+00 4.8000000e+00 2.6000000e+00 3.5000000e+00 4.6000000e+00 4.2000000e+00 7.8000000e+00 6.9000000e+00 2.5000000e+00 5.5000000e+00 2.7000000e+00 6.6000000e+00 3.1000000e+00 5.2000000e+00 5.6000000e+00 3.0000000e+00 3.2000000e+00 4.3000000e+00 5.0000000e+00 5.6000000e+00 7.5000000e+00 4.4000000e+00 3.1000000e+00 3.1000000e+00 6.5000000e+00 5.1000000e+00 4.2000000e+00 3.0000000e+00 4.9000000e+00 5.2000000e+00 4.8000000e+00 2.9000000e+00 5.6000000e+00 5.6000000e+00 4.6000000e+00 3.1000000e+00 4.1000000e+00 4.7000000e+00 3.2000000e+00 1.8000000e+00 1.6000000e+00 1.9000000e+00 2.4000000e+00 1.5000000e+00 8.0000000e-01 7.0000000e-01 9.0000000e-01 1.5000000e+00 2.0000000e-01 2.0000000e+00 6.0000000e-01 7.0000000e-01 7.0000000e-01 1.1000000e+00 1.9000000e+00 5.0000000e-01 4.5000000e+00 1.9000000e+00 4.5000000e+00 3.0000000e+00 3.9000000e+00 5.7000000e+00 2.2000000e+00 4.7000000e+00 3.6000000e+00 5.8000000e+00 3.2000000e+00 2.7000000e+00 3.8000000e+00 2.2000000e+00 2.5000000e+00 3.6000000e+00 3.2000000e+00 6.8000000e+00 6.1000000e+00 2.1000000e+00 4.5000000e+00 2.1000000e+00 5.6000000e+00 2.1000000e+00 4.2000000e+00 4.6000000e+00 2.0000000e+00 2.2000000e+00 3.3000000e+00 4.0000000e+00 4.6000000e+00 6.5000000e+00 3.4000000e+00 2.1000000e+00 2.3000000e+00 5.5000000e+00 4.1000000e+00 3.2000000e+00 2.0000000e+00 3.9000000e+00 4.2000000e+00 3.8000000e+00 1.9000000e+00 4.6000000e+00 4.6000000e+00 3.6000000e+00 2.5000000e+00 3.1000000e+00 3.7000000e+00 2.2000000e+00 1.6000000e+00 1.3000000e+00 1.6000000e+00 1.7000000e+00 2.0000000e+00 2.1000000e+00 1.7000000e+00 1.1000000e+00 1.8000000e+00 3.8000000e+00 1.6000000e+00 1.9000000e+00 1.7000000e+00 1.5000000e+00 3.7000000e+00 1.7000000e+00 2.7000000e+00 5.0000000e-01 2.7000000e+00 1.2000000e+00 2.1000000e+00 3.9000000e+00 2.0000000e+00 2.9000000e+00 1.8000000e+00 4.0000000e+00 1.4000000e+00 9.0000000e-01 2.0000000e+00 1.0000000e+00 1.1000000e+00 1.8000000e+00 1.4000000e+00 5.0000000e+00 4.3000000e+00 7.0000000e-01 2.7000000e+00 1.1000000e+00 3.8000000e+00 7.0000000e-01 2.4000000e+00 2.8000000e+00 8.0000000e-01 8.0000000e-01 1.5000000e+00 2.2000000e+00 2.8000000e+00 4.7000000e+00 1.6000000e+00 5.0000000e-01 9.0000000e-01 3.7000000e+00 2.3000000e+00 1.4000000e+00 8.0000000e-01 2.1000000e+00 2.4000000e+00 2.0000000e+00 5.0000000e-01 2.8000000e+00 2.8000000e+00 1.8000000e+00 9.0000000e-01 1.3000000e+00 1.9000000e+00 6.0000000e-01 1.1000000e+00 1.6000000e+00 1.9000000e+00 8.0000000e-01 1.3000000e+00 9.0000000e-01 9.0000000e-01 1.6000000e+00 2.8000000e+00 1.0000000e+00 9.0000000e-01 9.0000000e-01 1.3000000e+00 2.7000000e+00 1.1000000e+00 3.7000000e+00 1.7000000e+00 3.7000000e+00 2.4000000e+00 3.1000000e+00 4.9000000e+00 1.2000000e+00 4.1000000e+00 3.4000000e+00 5.0000000e+00 2.4000000e+00 2.5000000e+00 3.0000000e+00 1.8000000e+00 2.1000000e+00 2.8000000e+00 2.4000000e+00 6.0000000e+00 5.9000000e+00 1.9000000e+00 3.7000000e+00 1.3000000e+00 5.2000000e+00 1.9000000e+00 3.4000000e+00 3.8000000e+00 1.6000000e+00 1.4000000e+00 2.9000000e+00 3.2000000e+00 4.2000000e+00 5.7000000e+00 3.0000000e+00 1.7000000e+00 2.3000000e+00 4.7000000e+00 3.3000000e+00 2.4000000e+00 1.2000000e+00 3.1000000e+00 3.4000000e+00 3.0000000e+00 1.7000000e+00 3.8000000e+00 3.8000000e+00 2.8000000e+00 2.3000000e+00 2.3000000e+00 2.9000000e+00 1.4000000e+00 1.3000000e+00 1.8000000e+00 1.5000000e+00 2.2000000e+00 1.8000000e+00 8.0000000e-01 1.9000000e+00 3.9000000e+00 1.7000000e+00 1.4000000e+00 1.4000000e+00 1.2000000e+00 3.8000000e+00 1.6000000e+00 2.8000000e+00 1.8000000e+00 3.4000000e+00 2.1000000e+00 2.8000000e+00 4.6000000e+00 2.1000000e+00 3.8000000e+00 3.1000000e+00 3.9000000e+00 1.7000000e+00 2.2000000e+00 2.7000000e+00 2.1000000e+00 2.2000000e+00 2.1000000e+00 2.1000000e+00 4.9000000e+00 5.6000000e+00 1.8000000e+00 3.0000000e+00 1.8000000e+00 4.9000000e+00 1.6000000e+00 2.5000000e+00 3.1000000e+00 1.3000000e+00 1.1000000e+00 2.6000000e+00 2.9000000e+00 3.9000000e+00 4.6000000e+00 2.7000000e+00 1.6000000e+00 2.2000000e+00 4.4000000e+00 2.2000000e+00 1.9000000e+00 9.0000000e-01 2.6000000e+00 2.9000000e+00 2.5000000e+00 1.8000000e+00 3.1000000e+00 2.9000000e+00 2.5000000e+00 2.0000000e+00 2.0000000e+00 1.8000000e+00 1.3000000e+00 1.7000000e+00 2.0000000e+00 2.7000000e+00 2.3000000e+00 9.0000000e-01 2.4000000e+00 4.4000000e+00 2.2000000e+00 1.9000000e+00 1.9000000e+00 1.3000000e+00 4.3000000e+00 2.1000000e+00 2.9000000e+00 2.1000000e+00 2.3000000e+00 1.8000000e+00 2.1000000e+00 3.5000000e+00 2.8000000e+00 2.7000000e+00 2.0000000e+00 3.4000000e+00 1.2000000e+00 1.7000000e+00 1.6000000e+00 2.4000000e+00 2.5000000e+00 1.8000000e+00 1.4000000e+00 4.4000000e+00 4.5000000e+00 1.9000000e+00 2.1000000e+00 2.1000000e+00 3.8000000e+00 1.3000000e+00 1.8000000e+00 2.2000000e+00 1.2000000e+00 1.2000000e+00 2.1000000e+00 1.8000000e+00 2.8000000e+00 4.1000000e+00 2.2000000e+00 1.1000000e+00 2.1000000e+00 3.3000000e+00 2.5000000e+00 1.4000000e+00 1.2000000e+00 1.5000000e+00 1.8000000e+00 1.4000000e+00 2.1000000e+00 2.2000000e+00 2.2000000e+00 1.4000000e+00 1.7000000e+00 1.3000000e+00 2.3000000e+00 1.6000000e+00 1.7000000e+00 1.4000000e+00 1.2000000e+00 1.2000000e+00 1.3000000e+00 2.7000000e+00 1.3000000e+00 1.6000000e+00 1.4000000e+00 8.0000000e-01 3.0000000e+00 1.4000000e+00 3.8000000e+00 2.2000000e+00 3.8000000e+00 2.3000000e+00 3.2000000e+00 5.0000000e+00 2.1000000e+00 4.0000000e+00 2.5000000e+00 5.1000000e+00 2.5000000e+00 2.0000000e+00 3.1000000e+00 2.1000000e+00 2.8000000e+00 2.9000000e+00 2.5000000e+00 6.1000000e+00 5.2000000e+00 1.2000000e+00 3.8000000e+00 2.4000000e+00 4.9000000e+00 1.4000000e+00 3.5000000e+00 3.9000000e+00 1.5000000e+00 1.9000000e+00 2.6000000e+00 3.3000000e+00 3.9000000e+00 5.8000000e+00 2.7000000e+00 1.4000000e+00 1.8000000e+00 4.8000000e+00 3.4000000e+00 2.5000000e+00 1.9000000e+00 3.2000000e+00 3.5000000e+00 3.1000000e+00 2.2000000e+00 3.9000000e+00 3.9000000e+00 2.9000000e+00 1.4000000e+00 2.4000000e+00 3.2000000e+00 2.3000000e+00 7.0000000e-01 9.0000000e-01 1.1000000e+00 8.0000000e-01 2.4000000e+00 4.0000000e-01 3.0000000e-01 3.0000000e-01 9.0000000e-01 2.3000000e+00 3.0000000e-01 4.1000000e+00 2.1000000e+00 4.1000000e+00 2.8000000e+00 3.5000000e+00 5.3000000e+00 2.0000000e+00 4.5000000e+00 3.8000000e+00 5.4000000e+00 2.8000000e+00 2.9000000e+00 3.4000000e+00 2.2000000e+00 2.5000000e+00 3.2000000e+00 2.8000000e+00 6.4000000e+00 6.3000000e+00 2.3000000e+00 4.1000000e+00 1.7000000e+00 5.6000000e+00 2.3000000e+00 3.8000000e+00 4.2000000e+00 2.0000000e+00 1.8000000e+00 3.3000000e+00 3.6000000e+00 4.6000000e+00 6.1000000e+00 3.4000000e+00 2.1000000e+00 2.5000000e+00 5.1000000e+00 3.7000000e+00 2.8000000e+00 1.6000000e+00 3.5000000e+00 3.8000000e+00 3.4000000e+00 2.1000000e+00 4.2000000e+00 4.2000000e+00 3.2000000e+00 2.7000000e+00 2.7000000e+00 3.3000000e+00 1.8000000e+00 6.0000000e-01 1.8000000e+00 5.0000000e-01 1.7000000e+00 5.0000000e-01 1.0000000e+00 8.0000000e-01 1.4000000e+00 1.6000000e+00 6.0000000e-01 4.8000000e+00 2.2000000e+00 4.8000000e+00 3.3000000e+00 4.2000000e+00 6.0000000e+00 1.5000000e+00 5.0000000e+00 3.5000000e+00 6.1000000e+00 3.5000000e+00 3.0000000e+00 4.1000000e+00 1.9000000e+00 2.8000000e+00 3.9000000e+00 3.5000000e+00 7.1000000e+00 6.2000000e+00 2.0000000e+00 4.8000000e+00 2.0000000e+00 5.9000000e+00 2.4000000e+00 4.5000000e+00 4.9000000e+00 2.3000000e+00 2.5000000e+00 3.6000000e+00 4.3000000e+00 4.9000000e+00 6.8000000e+00 3.7000000e+00 2.4000000e+00 2.4000000e+00 5.8000000e+00 4.4000000e+00 3.5000000e+00 2.3000000e+00 4.2000000e+00 4.5000000e+00 4.1000000e+00 2.2000000e+00 4.9000000e+00 4.9000000e+00 3.9000000e+00 2.4000000e+00 3.4000000e+00 4.0000000e+00 2.5000000e+00 1.4000000e+00 7.0000000e-01 2.1000000e+00 5.0000000e-01 8.0000000e-01 8.0000000e-01 1.2000000e+00 2.0000000e+00 8.0000000e-01 4.4000000e+00 1.8000000e+00 4.4000000e+00 2.9000000e+00 3.8000000e+00 5.6000000e+00 1.3000000e+00 4.6000000e+00 3.3000000e+00 5.7000000e+00 3.1000000e+00 2.6000000e+00 3.7000000e+00 1.7000000e+00 2.4000000e+00 3.5000000e+00 3.1000000e+00 6.7000000e+00 5.8000000e+00 1.8000000e+00 4.4000000e+00 1.6000000e+00 5.5000000e+00 2.0000000e+00 4.1000000e+00 4.5000000e+00 1.9000000e+00 2.1000000e+00 3.2000000e+00 3.9000000e+00 4.5000000e+00 6.4000000e+00 3.3000000e+00 2.0000000e+00 2.0000000e+00 5.4000000e+00 4.0000000e+00 3.1000000e+00 1.9000000e+00 3.8000000e+00 4.1000000e+00 3.7000000e+00 1.8000000e+00 4.5000000e+00 4.5000000e+00 3.5000000e+00 2.2000000e+00 3.0000000e+00 3.6000000e+00 2.1000000e+00 1.5000000e+00 3.5000000e+00 1.3000000e+00 1.0000000e+00 1.0000000e+00 6.0000000e-01 3.4000000e+00 1.2000000e+00 3.0000000e+00 1.6000000e+00 3.0000000e+00 1.7000000e+00 2.4000000e+00 4.2000000e+00 2.1000000e+00 3.4000000e+00 2.7000000e+00 4.3000000e+00 1.7000000e+00 1.8000000e+00 2.3000000e+00 1.9000000e+00 2.0000000e+00 2.1000000e+00 1.7000000e+00 5.3000000e+00 5.2000000e+00 1.4000000e+00 3.0000000e+00 1.6000000e+00 4.5000000e+00 1.2000000e+00 2.7000000e+00 3.1000000e+00 9.0000000e-01 7.0000000e-01 2.2000000e+00 2.5000000e+00 3.5000000e+00 5.0000000e+00 2.3000000e+00 1.0000000e+00 1.4000000e+00 4.0000000e+00 2.6000000e+00 1.7000000e+00 7.0000000e-01 2.4000000e+00 2.7000000e+00 2.3000000e+00 1.6000000e+00 3.1000000e+00 3.1000000e+00 2.1000000e+00 1.6000000e+00 1.6000000e+00 2.2000000e+00 1.1000000e+00 2.0000000e+00 6.0000000e-01 7.0000000e-01 7.0000000e-01 1.1000000e+00 1.9000000e+00 5.0000000e-01 4.5000000e+00 1.9000000e+00 4.5000000e+00 3.0000000e+00 3.9000000e+00 5.7000000e+00 2.0000000e+00 4.7000000e+00 3.4000000e+00 5.8000000e+00 3.2000000e+00 2.7000000e+00 3.8000000e+00 2.0000000e+00 2.5000000e+00 3.6000000e+00 3.2000000e+00 6.8000000e+00 5.9000000e+00 1.9000000e+00 4.5000000e+00 2.1000000e+00 5.6000000e+00 2.1000000e+00 4.2000000e+00 4.6000000e+00 2.0000000e+00 2.2000000e+00 3.3000000e+00 4.0000000e+00 4.6000000e+00 6.5000000e+00 3.4000000e+00 2.1000000e+00 2.1000000e+00 5.5000000e+00 4.1000000e+00 3.2000000e+00 2.0000000e+00 3.9000000e+00 4.2000000e+00 3.8000000e+00 1.9000000e+00 4.6000000e+00 4.6000000e+00 3.6000000e+00 2.3000000e+00 3.1000000e+00 3.7000000e+00 2.2000000e+00 2.2000000e+00 2.5000000e+00 2.5000000e+00 3.1000000e+00 7.0000000e-01 2.3000000e+00 6.5000000e+00 3.9000000e+00 6.5000000e+00 5.0000000e+00 5.9000000e+00 7.7000000e+00 2.2000000e+00 6.7000000e+00 5.2000000e+00 7.8000000e+00 5.2000000e+00 4.7000000e+00 5.8000000e+00 3.6000000e+00 4.5000000e+00 5.6000000e+00 5.2000000e+00 8.8000000e+00 7.9000000e+00 3.3000000e+00 6.5000000e+00 3.7000000e+00 7.6000000e+00 4.1000000e+00 6.2000000e+00 6.6000000e+00 4.0000000e+00 4.2000000e+00 5.3000000e+00 6.0000000e+00 6.6000000e+00 8.5000000e+00 5.4000000e+00 4.1000000e+00 4.1000000e+00 7.5000000e+00 6.1000000e+00 5.2000000e+00 4.0000000e+00 5.9000000e+00 6.2000000e+00 5.8000000e+00 3.9000000e+00 6.6000000e+00 6.6000000e+00 5.6000000e+00 4.1000000e+00 5.1000000e+00 5.7000000e+00 4.2000000e+00 5.0000000e-01 3.0000000e-01 9.0000000e-01 2.1000000e+00 3.0000000e-01 4.3000000e+00 1.7000000e+00 4.3000000e+00 2.8000000e+00 3.7000000e+00 5.5000000e+00 1.6000000e+00 4.5000000e+00 3.4000000e+00 5.6000000e+00 3.0000000e+00 2.5000000e+00 3.6000000e+00 1.8000000e+00 2.3000000e+00 3.4000000e+00 3.0000000e+00 6.6000000e+00 5.9000000e+00 1.9000000e+00 4.3000000e+00 1.5000000e+00 5.4000000e+00 1.9000000e+00 4.0000000e+00 4.4000000e+00 1.8000000e+00 2.0000000e+00 3.1000000e+00 3.8000000e+00 4.4000000e+00 6.3000000e+00 3.2000000e+00 1.9000000e+00 2.1000000e+00 5.3000000e+00 3.9000000e+00 3.0000000e+00 1.8000000e+00 3.7000000e+00 4.0000000e+00 3.6000000e+00 1.7000000e+00 4.4000000e+00 4.4000000e+00 3.4000000e+00 2.3000000e+00 2.9000000e+00 3.5000000e+00 2.0000000e+00 2.0000000e-01 8.0000000e-01 2.4000000e+00 4.0000000e-01 4.0000000e+00 2.0000000e+00 4.0000000e+00 2.7000000e+00 3.4000000e+00 5.2000000e+00 2.1000000e+00 4.4000000e+00 3.7000000e+00 5.3000000e+00 2.7000000e+00 2.8000000e+00 3.3000000e+00 2.1000000e+00 2.4000000e+00 3.1000000e+00 2.7000000e+00 6.3000000e+00 6.2000000e+00 2.2000000e+00 4.0000000e+00 1.8000000e+00 5.5000000e+00 2.2000000e+00 3.7000000e+00 4.1000000e+00 1.9000000e+00 1.7000000e+00 3.2000000e+00 3.5000000e+00 4.5000000e+00 6.0000000e+00 3.3000000e+00 2.0000000e+00 2.4000000e+00 5.0000000e+00 3.6000000e+00 2.7000000e+00 1.5000000e+00 3.4000000e+00 3.7000000e+00 3.3000000e+00 2.0000000e+00 4.1000000e+00 4.1000000e+00 3.1000000e+00 2.6000000e+00 2.6000000e+00 3.2000000e+00 1.7000000e+00 6.0000000e-01 2.4000000e+00 2.0000000e-01 4.0000000e+00 1.8000000e+00 4.0000000e+00 2.5000000e+00 3.4000000e+00 5.2000000e+00 1.9000000e+00 4.2000000e+00 3.5000000e+00 5.3000000e+00 2.7000000e+00 2.6000000e+00 3.3000000e+00 1.9000000e+00 2.2000000e+00 3.1000000e+00 2.7000000e+00 6.3000000e+00 6.0000000e+00 2.0000000e+00 4.0000000e+00 1.6000000e+00 5.3000000e+00 2.0000000e+00 3.7000000e+00 4.1000000e+00 1.7000000e+00 1.7000000e+00 3.0000000e+00 3.5000000e+00 4.3000000e+00 6.0000000e+00 3.1000000e+00 1.8000000e+00 2.2000000e+00 5.0000000e+00 3.6000000e+00 2.7000000e+00 1.5000000e+00 3.4000000e+00 3.7000000e+00 3.3000000e+00 1.8000000e+00 4.1000000e+00 4.1000000e+00 3.1000000e+00 2.4000000e+00 2.6000000e+00 3.2000000e+00 1.7000000e+00 3.0000000e+00 8.0000000e-01 3.4000000e+00 2.0000000e+00 3.4000000e+00 1.9000000e+00 2.8000000e+00 4.6000000e+00 2.3000000e+00 3.6000000e+00 2.9000000e+00 4.7000000e+00 2.1000000e+00 2.0000000e+00 2.7000000e+00 2.3000000e+00 2.4000000e+00 2.5000000e+00 2.1000000e+00 5.7000000e+00 5.4000000e+00 1.8000000e+00 3.4000000e+00 2.0000000e+00 4.7000000e+00 1.4000000e+00 3.1000000e+00 3.5000000e+00 1.1000000e+00 1.3000000e+00 2.4000000e+00 2.9000000e+00 3.7000000e+00 5.4000000e+00 2.5000000e+00 1.2000000e+00 1.8000000e+00 4.4000000e+00 3.0000000e+00 2.1000000e+00 1.3000000e+00 2.8000000e+00 3.1000000e+00 2.7000000e+00 2.0000000e+00 3.5000000e+00 3.5000000e+00 2.5000000e+00 1.8000000e+00 2.0000000e+00 2.6000000e+00 1.7000000e+00 2.2000000e+00 6.4000000e+00 3.8000000e+00 6.4000000e+00 4.9000000e+00 5.8000000e+00 7.6000000e+00 2.3000000e+00 6.6000000e+00 5.1000000e+00 7.7000000e+00 5.1000000e+00 4.6000000e+00 5.7000000e+00 3.5000000e+00 4.4000000e+00 5.5000000e+00 5.1000000e+00 8.7000000e+00 7.8000000e+00 3.6000000e+00 6.4000000e+00 3.6000000e+00 7.5000000e+00 4.0000000e+00 6.1000000e+00 6.5000000e+00 3.9000000e+00 4.1000000e+00 5.2000000e+00 5.9000000e+00 6.5000000e+00 8.4000000e+00 5.3000000e+00 4.0000000e+00 4.0000000e+00 7.4000000e+00 6.0000000e+00 5.1000000e+00 3.9000000e+00 5.8000000e+00 6.1000000e+00 5.7000000e+00 3.8000000e+00 6.5000000e+00 6.5000000e+00 5.5000000e+00 4.0000000e+00 5.0000000e+00 5.6000000e+00 4.1000000e+00 4.2000000e+00 1.8000000e+00 4.2000000e+00 2.7000000e+00 3.6000000e+00 5.4000000e+00 1.9000000e+00 4.4000000e+00 3.5000000e+00 5.5000000e+00 2.9000000e+00 2.6000000e+00 3.5000000e+00 1.9000000e+00 2.2000000e+00 3.3000000e+00 2.9000000e+00 6.5000000e+00 6.0000000e+00 2.0000000e+00 4.2000000e+00 1.6000000e+00 5.3000000e+00 2.0000000e+00 3.9000000e+00 4.3000000e+00 1.7000000e+00 1.9000000e+00 3.0000000e+00 3.7000000e+00 4.3000000e+00 6.2000000e+00 3.1000000e+00 1.8000000e+00 2.2000000e+00 5.2000000e+00 3.8000000e+00 2.9000000e+00 1.7000000e+00 3.6000000e+00 3.9000000e+00 3.5000000e+00 1.8000000e+00 4.3000000e+00 4.3000000e+00 3.3000000e+00 2.4000000e+00 2.8000000e+00 3.4000000e+00 1.9000000e+00 2.6000000e+00 1.6000000e+00 1.5000000e+00 1.0000000e+00 2.6000000e+00 4.5000000e+00 2.4000000e+00 2.1000000e+00 1.3000000e+00 1.7000000e+00 2.0000000e+00 1.7000000e+00 2.9000000e+00 2.0000000e+00 1.1000000e+00 1.7000000e+00 2.9000000e+00 3.2000000e+00 3.4000000e+00 1.2000000e+00 2.8000000e+00 3.1000000e+00 2.4000000e+00 1.1000000e+00 1.7000000e+00 2.5000000e+00 2.3000000e+00 1.4000000e+00 2.3000000e+00 2.3000000e+00 3.0000000e+00 1.3000000e+00 2.4000000e+00 2.4000000e+00 2.0000000e+00 6.0000000e-01 1.5000000e+00 2.5000000e+00 1.8000000e+00 1.1000000e+00 1.9000000e+00 2.6000000e+00 9.0000000e-01 7.0000000e-01 1.7000000e+00 2.4000000e+00 1.8000000e+00 1.0000000e+00 2.3000000e+00 2.6000000e+00 1.3000000e+00 2.0000000e+00 3.8000000e+00 1.9000000e+00 3.0000000e+00 1.9000000e+00 3.9000000e+00 1.3000000e+00 8.0000000e-01 1.9000000e+00 5.0000000e-01 6.0000000e-01 1.7000000e+00 1.5000000e+00 4.9000000e+00 4.2000000e+00 1.2000000e+00 2.6000000e+00 6.0000000e-01 3.7000000e+00 8.0000000e-01 2.3000000e+00 2.9000000e+00 9.0000000e-01 9.0000000e-01 1.4000000e+00 2.7000000e+00 2.7000000e+00 4.6000000e+00 1.5000000e+00 1.0000000e+00 1.4000000e+00 3.6000000e+00 2.2000000e+00 1.5000000e+00 9.0000000e-01 2.0000000e+00 2.3000000e+00 1.9000000e+00 0.0000000e+00 2.7000000e+00 2.7000000e+00 1.7000000e+00 8.0000000e-01 1.2000000e+00 1.8000000e+00 5.0000000e-01 1.5000000e+00 8.0000000e-01 1.2000000e+00 4.5000000e+00 1.0000000e+00 1.3000000e+00 1.3000000e+00 1.7000000e+00 1.8000000e+00 7.0000000e-01 2.9000000e+00 2.6000000e+00 1.7000000e+00 1.3000000e+00 2.3000000e+00 2.2000000e+00 3.4000000e+00 8.0000000e-01 2.8000000e+00 1.7000000e+00 2.4000000e+00 9.0000000e-01 7.0000000e-01 2.5000000e+00 2.3000000e+00 1.2000000e+00 7.0000000e-01 9.0000000e-01 2.2000000e+00 1.3000000e+00 2.4000000e+00 2.4000000e+00 1.0000000e+00 1.8000000e+00 1.5000000e+00 2.5000000e+00 8.0000000e-01 1.1000000e+00 1.3000000e+00 2.6000000e+00 7.0000000e-01 1.3000000e+00 1.3000000e+00 2.4000000e+00 1.4000000e+00 2.0000000e+00 2.3000000e+00 9.0000000e-01 2.7000000e+00 3.0000000e+00 1.7000000e+00 1.0000000e+00 2.8000000e+00 1.2000000e+00 7.0000000e-01 1.0000000e+00 1.8000000e+00 1.7000000e+00 1.2000000e+00 4.0000000e-01 3.8000000e+00 3.5000000e+00 1.9000000e+00 1.5000000e+00 1.7000000e+00 2.8000000e+00 9.0000000e-01 1.2000000e+00 1.6000000e+00 1.0000000e+00 1.0000000e+00 5.0000000e-01 1.4000000e+00 1.8000000e+00 3.5000000e+00 6.0000000e-01 9.0000000e-01 9.0000000e-01 2.5000000e+00 1.1000000e+00 4.0000000e-01 1.2000000e+00 1.3000000e+00 1.2000000e+00 1.8000000e+00 1.3000000e+00 1.6000000e+00 1.6000000e+00 1.4000000e+00 1.1000000e+00 9.0000000e-01 1.3000000e+00 1.0000000e+00 2.0000000e+00 3.9000000e+00 1.8000000e+00 1.1000000e+00 1.9000000e+00 1.1000000e+00 1.2000000e+00 7.0000000e-01 2.3000000e+00 1.8000000e+00 9.0000000e-01 7.0000000e-01 2.9000000e+00 2.8000000e+00 2.8000000e+00 8.0000000e-01 2.2000000e+00 2.5000000e+00 1.8000000e+00 7.0000000e-01 1.5000000e+00 1.9000000e+00 1.7000000e+00 6.0000000e-01 1.3000000e+00 1.7000000e+00 3.0000000e+00 5.0000000e-01 1.8000000e+00 1.8000000e+00 1.6000000e+00 1.0000000e+00 9.0000000e-01 1.9000000e+00 1.0000000e+00 7.0000000e-01 1.3000000e+00 2.0000000e+00 7.0000000e-01 9.0000000e-01 9.0000000e-01 1.8000000e+00 8.0000000e-01 1.2000000e+00 1.7000000e+00 5.7000000e+00 1.0000000e+00 2.5000000e+00 1.9000000e+00 2.9000000e+00 3.0000000e+00 1.9000000e+00 4.1000000e+00 3.8000000e+00 2.9000000e+00 2.5000000e+00 1.1000000e+00 1.0000000e+00 4.6000000e+00 2.0000000e+00 4.0000000e+00 5.0000000e-01 3.6000000e+00 2.1000000e+00 1.5000000e+00 3.7000000e+00 3.5000000e+00 2.4000000e+00 1.7000000e+00 1.1000000e+00 1.4000000e+00 2.5000000e+00 3.6000000e+00 3.6000000e+00 8.0000000e-01 3.0000000e+00 2.7000000e+00 3.7000000e+00 2.0000000e+00 2.3000000e+00 2.5000000e+00 3.8000000e+00 1.9000000e+00 2.5000000e+00 2.5000000e+00 3.6000000e+00 2.6000000e+00 3.2000000e+00 3.5000000e+00 4.7000000e+00 3.2000000e+00 5.8000000e+00 3.2000000e+00 2.7000000e+00 3.8000000e+00 1.6000000e+00 2.5000000e+00 3.6000000e+00 3.2000000e+00 6.8000000e+00 5.9000000e+00 2.1000000e+00 4.5000000e+00 1.7000000e+00 5.6000000e+00 2.1000000e+00 4.2000000e+00 4.6000000e+00 2.0000000e+00 2.2000000e+00 3.3000000e+00 4.2000000e+00 4.6000000e+00 6.5000000e+00 3.4000000e+00 2.5000000e+00 2.7000000e+00 5.5000000e+00 4.1000000e+00 3.2000000e+00 2.0000000e+00 3.9000000e+00 4.2000000e+00 3.8000000e+00 1.9000000e+00 4.6000000e+00 4.6000000e+00 3.6000000e+00 2.1000000e+00 3.1000000e+00 3.7000000e+00 2.2000000e+00 1.5000000e+00 1.7000000e+00 2.5000000e+00 2.2000000e+00 1.7000000e+00 3.5000000e+00 3.4000000e+00 2.7000000e+00 1.7000000e+00 2.1000000e+00 1.8000000e+00 3.6000000e+00 1.8000000e+00 3.4000000e+00 1.1000000e+00 2.6000000e+00 1.9000000e+00 7.0000000e-01 2.7000000e+00 2.7000000e+00 2.0000000e+00 9.0000000e-01 5.0000000e-01 1.8000000e+00 2.1000000e+00 2.6000000e+00 2.6000000e+00 1.2000000e+00 2.8000000e+00 1.9000000e+00 2.9000000e+00 1.8000000e+00 2.1000000e+00 2.3000000e+00 3.0000000e+00 1.7000000e+00 2.3000000e+00 2.3000000e+00 2.8000000e+00 2.2000000e+00 3.0000000e+00 2.7000000e+00 2.6000000e+00 1.8000000e+00 1.1000000e+00 1.2000000e+00 2.0000000e+00 2.5000000e+00 2.0000000e+00 1.0000000e+00 3.6000000e+00 2.7000000e+00 2.1000000e+00 1.5000000e+00 2.5000000e+00 2.4000000e+00 1.5000000e+00 1.2000000e+00 1.4000000e+00 1.8000000e+00 2.0000000e+00 1.1000000e+00 1.2000000e+00 1.4000000e+00 3.3000000e+00 1.2000000e+00 1.7000000e+00 1.3000000e+00 2.3000000e+00 2.1000000e+00 1.2000000e+00 2.2000000e+00 1.5000000e+00 1.4000000e+00 2.0000000e+00 1.9000000e+00 1.4000000e+00 1.6000000e+00 1.6000000e+00 1.3000000e+00 1.5000000e+00 2.3000000e+00 2.0000000e+00 2.6000000e+00 3.1000000e+00 2.0000000e+00 4.2000000e+00 3.3000000e+00 2.2000000e+00 2.6000000e+00 1.6000000e+00 2.5000000e+00 4.7000000e+00 1.3000000e+00 4.1000000e+00 2.4000000e+00 3.7000000e+00 1.6000000e+00 1.2000000e+00 3.8000000e+00 3.6000000e+00 2.5000000e+00 1.8000000e+00 1.6000000e+00 1.7000000e+00 2.4000000e+00 3.7000000e+00 3.7000000e+00 1.3000000e+00 1.7000000e+00 2.6000000e+00 3.8000000e+00 1.9000000e+00 1.6000000e+00 2.0000000e+00 3.9000000e+00 1.2000000e+00 1.2000000e+00 2.2000000e+00 3.7000000e+00 2.7000000e+00 2.1000000e+00 3.6000000e+00 9.0000000e-01 1.0000000e+00 1.6000000e+00 1.5000000e+00 6.0000000e-01 8.0000000e-01 3.6000000e+00 3.9000000e+00 2.1000000e+00 1.3000000e+00 1.5000000e+00 3.2000000e+00 1.1000000e+00 1.0000000e+00 1.8000000e+00 1.2000000e+00 1.0000000e+00 1.1000000e+00 2.0000000e+00 2.4000000e+00 3.3000000e+00 1.2000000e+00 1.1000000e+00 2.1000000e+00 2.7000000e+00 1.3000000e+00 8.0000000e-01 1.2000000e+00 9.0000000e-01 1.2000000e+00 8.0000000e-01 1.3000000e+00 1.4000000e+00 1.4000000e+00 8.0000000e-01 1.1000000e+00 3.0000000e-01 1.1000000e+00 1.0000000e+00 1.1000000e+00 1.3000000e+00 1.4000000e+00 9.0000000e-01 7.0000000e-01 4.1000000e+00 3.4000000e+00 1.6000000e+00 1.8000000e+00 1.4000000e+00 2.9000000e+00 6.0000000e-01 1.5000000e+00 2.1000000e+00 9.0000000e-01 1.1000000e+00 6.0000000e-01 1.9000000e+00 1.9000000e+00 3.8000000e+00 7.0000000e-01 8.0000000e-01 1.2000000e+00 2.8000000e+00 1.6000000e+00 7.0000000e-01 1.3000000e+00 1.2000000e+00 1.5000000e+00 1.5000000e+00 8.0000000e-01 1.9000000e+00 1.9000000e+00 1.1000000e+00 6.0000000e-01 6.0000000e-01 1.4000000e+00 1.1000000e+00 2.2000000e+00 1.9000000e+00 1.0000000e+00 6.0000000e-01 3.0000000e+00 2.9000000e+00 2.7000000e+00 7.0000000e-01 2.1000000e+00 2.4000000e+00 1.7000000e+00 6.0000000e-01 1.4000000e+00 1.8000000e+00 1.6000000e+00 7.0000000e-01 1.2000000e+00 1.6000000e+00 2.9000000e+00 8.0000000e-01 1.7000000e+00 1.9000000e+00 1.7000000e+00 1.3000000e+00 8.0000000e-01 1.8000000e+00 3.0000000e-01 6.0000000e-01 8.0000000e-01 1.9000000e+00 8.0000000e-01 1.0000000e+00 6.0000000e-01 1.7000000e+00 7.0000000e-01 1.3000000e+00 1.6000000e+00 9.0000000e-01 2.0000000e+00 2.0000000e+00 5.2000000e+00 4.3000000e+00 1.1000000e+00 2.9000000e+00 5.0000000e-01 4.0000000e+00 1.1000000e+00 2.6000000e+00 3.4000000e+00 1.2000000e+00 1.2000000e+00 1.7000000e+00 3.2000000e+00 3.2000000e+00 4.9000000e+00 1.8000000e+00 1.5000000e+00 1.7000000e+00 3.9000000e+00 2.5000000e+00 2.0000000e+00 1.2000000e+00 2.3000000e+00 2.6000000e+00 2.2000000e+00 5.0000000e-01 3.0000000e+00 3.0000000e+00 2.0000000e+00 7.0000000e-01 1.5000000e+00 2.1000000e+00 1.0000000e+00 1.3000000e+00 1.9000000e+00 4.7000000e+00 4.0000000e+00 1.8000000e+00 2.2000000e+00 8.0000000e-01 3.9000000e+00 1.4000000e+00 2.3000000e+00 3.3000000e+00 1.3000000e+00 1.3000000e+00 1.4000000e+00 3.1000000e+00 3.1000000e+00 4.8000000e+00 1.3000000e+00 1.4000000e+00 2.0000000e+00 3.2000000e+00 1.6000000e+00 1.9000000e+00 1.3000000e+00 2.0000000e+00 1.7000000e+00 1.5000000e+00 6.0000000e-01 2.3000000e+00 2.1000000e+00 1.3000000e+00 1.4000000e+00 1.4000000e+00 1.4000000e+00 9.0000000e-01 1.0000000e+00 3.4000000e+00 3.5000000e+00 2.5000000e+00 9.0000000e-01 1.9000000e+00 3.4000000e+00 1.5000000e+00 1.0000000e+00 2.0000000e+00 1.6000000e+00 1.4000000e+00 9.0000000e-01 2.2000000e+00 2.6000000e+00 3.5000000e+00 8.0000000e-01 1.5000000e+00 2.1000000e+00 2.3000000e+00 7.0000000e-01 8.0000000e-01 1.6000000e+00 9.0000000e-01 8.0000000e-01 8.0000000e-01 1.7000000e+00 1.0000000e+00 1.0000000e+00 6.0000000e-01 1.5000000e+00 7.0000000e-01 5.0000000e-01 1.4000000e+00 3.6000000e+00 3.5000000e+00 2.1000000e+00 1.3000000e+00 1.9000000e+00 2.8000000e+00 1.1000000e+00 1.0000000e+00 1.4000000e+00 1.2000000e+00 1.0000000e+00 7.0000000e-01 1.2000000e+00 1.8000000e+00 3.3000000e+00 8.0000000e-01 1.1000000e+00 1.3000000e+00 2.3000000e+00 1.3000000e+00 2.0000000e-01 1.2000000e+00 9.0000000e-01 1.0000000e+00 1.4000000e+00 1.5000000e+00 1.4000000e+00 1.4000000e+00 1.0000000e+00 1.3000000e+00 5.0000000e-01 1.3000000e+00 1.0000000e+00 1.5000000e+00 5.7000000e+00 2.5000000e+00 5.1000000e+00 1.2000000e+00 4.7000000e+00 2.6000000e+00 2.2000000e+00 4.8000000e+00 4.6000000e+00 3.5000000e+00 2.8000000e+00 2.2000000e+00 7.0000000e-01 3.4000000e+00 4.7000000e+00 4.7000000e+00 1.5000000e+00 3.1000000e+00 3.6000000e+00 4.8000000e+00 2.9000000e+00 3.0000000e+00 3.2000000e+00 4.9000000e+00 2.4000000e+00 2.8000000e+00 3.4000000e+00 4.7000000e+00 3.7000000e+00 3.3000000e+00 4.6000000e+00 4.8000000e+00 2.6000000e+00 4.6000000e+00 7.0000000e-01 4.0000000e+00 3.1000000e+00 2.5000000e+00 4.3000000e+00 4.5000000e+00 3.0000000e+00 2.7000000e+00 1.7000000e+00 2.2000000e+00 2.9000000e+00 4.2000000e+00 3.8000000e+00 1.2000000e+00 3.6000000e+00 3.7000000e+00 4.7000000e+00 3.0000000e+00 2.9000000e+00 3.1000000e+00 4.2000000e+00 2.5000000e+00 3.1000000e+00 3.1000000e+00 3.8000000e+00 3.6000000e+00 3.8000000e+00 4.5000000e+00 3.4000000e+00 1.6000000e+00 4.5000000e+00 1.2000000e+00 3.1000000e+00 3.5000000e+00 1.3000000e+00 1.3000000e+00 2.2000000e+00 2.9000000e+00 3.5000000e+00 5.4000000e+00 2.3000000e+00 1.0000000e+00 1.2000000e+00 4.4000000e+00 3.0000000e+00 2.1000000e+00 1.3000000e+00 2.8000000e+00 3.1000000e+00 2.7000000e+00 1.2000000e+00 3.5000000e+00 3.5000000e+00 2.5000000e+00 1.0000000e+00 2.0000000e+00 2.6000000e+00 1.3000000e+00 2.8000000e+00 2.5000000e+00 2.4000000e+00 5.0000000e-01 1.1000000e+00 2.5000000e+00 2.3000000e+00 1.2000000e+00 1.3000000e+00 1.7000000e+00 2.6000000e+00 1.1000000e+00 2.4000000e+00 2.4000000e+00 1.4000000e+00 1.0000000e+00 1.3000000e+00 2.5000000e+00 6.0000000e-01 5.0000000e-01 7.0000000e-01 2.6000000e+00 3.0000000e-01 5.0000000e-01 9.0000000e-01 2.4000000e+00 1.4000000e+00 1.2000000e+00 2.3000000e+00 3.9000000e+00 1.0000000e+00 2.5000000e+00 3.3000000e+00 9.0000000e-01 9.0000000e-01 1.6000000e+00 3.1000000e+00 3.1000000e+00 4.8000000e+00 1.7000000e+00 1.4000000e+00 2.0000000e+00 3.8000000e+00 2.4000000e+00 1.9000000e+00 9.0000000e-01 2.2000000e+00 2.5000000e+00 2.1000000e+00 6.0000000e-01 2.9000000e+00 2.9000000e+00 1.9000000e+00 1.2000000e+00 1.4000000e+00 2.0000000e+00 9.0000000e-01 3.5000000e+00 2.6000000e+00 1.8000000e+00 3.6000000e+00 3.8000000e+00 2.5000000e+00 2.0000000e+00 1.0000000e+00 1.5000000e+00 2.6000000e+00 3.5000000e+00 3.5000000e+00 1.1000000e+00 3.5000000e+00 3.0000000e+00 4.0000000e+00 2.5000000e+00 2.8000000e+00 3.0000000e+00 3.7000000e+00 2.4000000e+00 3.0000000e+00 3.0000000e+00 3.5000000e+00 2.9000000e+00 3.7000000e+00 3.8000000e+00 2.1000000e+00 2.5000000e+00 3.0000000e-01 5.0000000e-01 1.2000000e+00 2.3000000e+00 2.5000000e+00 4.4000000e+00 1.3000000e+00 6.0000000e-01 1.4000000e+00 3.4000000e+00 2.0000000e+00 1.1000000e+00 7.0000000e-01 1.8000000e+00 2.1000000e+00 1.7000000e+00 8.0000000e-01 2.5000000e+00 2.5000000e+00 1.5000000e+00 4.0000000e-01 1.0000000e+00 1.8000000e+00 9.0000000e-01 1.2000000e+00 2.2000000e+00 2.0000000e+00 9.0000000e-01 1.4000000e+00 1.8000000e+00 2.5000000e+00 1.0000000e+00 2.1000000e+00 2.1000000e+00 1.9000000e+00 9.0000000e-01 1.0000000e+00 2.2000000e+00 7.0000000e-01 6.0000000e-01 1.2000000e+00 2.3000000e+00 6.0000000e-01 4.0000000e-01 1.0000000e+00 2.1000000e+00 1.1000000e+00 1.1000000e+00 2.0000000e+00 2.6000000e+00 2.4000000e+00 1.9000000e+00 6.0000000e-01 8.0000000e-01 1.9000000e+00 2.0000000e+00 2.5000000e+00 2.5000000e+00 1.3000000e+00 2.1000000e+00 1.4000000e+00 2.6000000e+00 1.3000000e+00 1.6000000e+00 1.8000000e+00 2.9000000e+00 1.0000000e+00 1.6000000e+00 2.0000000e+00 2.7000000e+00 1.9000000e+00 2.3000000e+00 2.4000000e+00 4.0000000e-01 1.3000000e+00 2.4000000e+00 2.6000000e+00 4.5000000e+00 1.4000000e+00 7.0000000e-01 1.5000000e+00 3.5000000e+00 2.1000000e+00 1.2000000e+00 4.0000000e-01 1.9000000e+00 2.2000000e+00 1.8000000e+00 9.0000000e-01 2.6000000e+00 2.6000000e+00 1.6000000e+00 7.0000000e-01 1.1000000e+00 1.7000000e+00 8.0000000e-01 1.5000000e+00 2.2000000e+00 2.8000000e+00 4.3000000e+00 1.6000000e+00 9.0000000e-01 1.5000000e+00 3.3000000e+00 1.9000000e+00 1.0000000e+00 2.0000000e-01 1.7000000e+00 2.0000000e+00 1.6000000e+00 9.0000000e-01 2.4000000e+00 2.4000000e+00 1.4000000e+00 9.0000000e-01 9.0000000e-01 1.5000000e+00 4.0000000e-01 1.7000000e+00 1.7000000e+00 3.4000000e+00 1.0000000e-01 1.2000000e+00 1.2000000e+00 2.2000000e+00 1.0000000e+00 7.0000000e-01 1.7000000e+00 1.0000000e+00 9.0000000e-01 1.5000000e+00 1.4000000e+00 1.3000000e+00 1.3000000e+00 1.1000000e+00 1.2000000e+00 8.0000000e-01 1.2000000e+00 1.5000000e+00 1.0000000e+00 2.5000000e+00 1.8000000e+00 1.9000000e+00 1.9000000e+00 1.5000000e+00 2.3000000e+00 1.4000000e+00 2.4000000e+00 1.3000000e+00 1.6000000e+00 1.8000000e+00 2.7000000e+00 1.4000000e+00 1.8000000e+00 1.8000000e+00 2.5000000e+00 1.7000000e+00 2.5000000e+00 2.2000000e+00 1.9000000e+00 1.8000000e+00 2.5000000e+00 2.5000000e+00 9.0000000e-01 2.7000000e+00 2.0000000e+00 3.0000000e+00 1.7000000e+00 2.0000000e+00 2.2000000e+00 2.7000000e+00 1.6000000e+00 2.2000000e+00 2.2000000e+00 2.5000000e+00 2.1000000e+00 2.9000000e+00 2.8000000e+00 3.5000000e+00 4.4000000e+00 4.4000000e+00 1.6000000e+00 3.2000000e+00 3.3000000e+00 4.5000000e+00 2.8000000e+00 3.1000000e+00 3.3000000e+00 4.6000000e+00 2.5000000e+00 2.9000000e+00 3.5000000e+00 4.4000000e+00 3.4000000e+00 3.4000000e+00 4.3000000e+00 1.3000000e+00 1.3000000e+00 2.1000000e+00 9.0000000e-01 8.0000000e-01 1.8000000e+00 1.1000000e+00 8.0000000e-01 1.4000000e+00 1.5000000e+00 1.2000000e+00 1.2000000e+00 1.0000000e+00 1.3000000e+00 9.0000000e-01 1.1000000e+00 1.6000000e+00 1.0000000e+00 3.4000000e+00 2.0000000e+00 1.1000000e+00 1.1000000e+00 1.8000000e+00 2.1000000e+00 1.7000000e+00 1.0000000e+00 2.5000000e+00 2.5000000e+00 1.5000000e+00 8.0000000e-01 1.0000000e+00 1.8000000e+00 9.0000000e-01 3.4000000e+00 2.0000000e+00 1.3000000e+00 1.7000000e+00 2.2000000e+00 2.1000000e+00 2.7000000e+00 1.4000000e+00 2.5000000e+00 2.5000000e+00 2.3000000e+00 1.4000000e+00 1.8000000e+00 2.0000000e+00 1.5000000e+00 2.4000000e+00 2.5000000e+00 3.5000000e+00 1.8000000e+00 1.7000000e+00 1.9000000e+00 3.6000000e+00 1.3000000e+00 1.9000000e+00 1.9000000e+00 3.4000000e+00 2.4000000e+00 2.6000000e+00 3.3000000e+00 1.1000000e+00 2.1000000e+00 1.4000000e+00 7.0000000e-01 1.5000000e+00 2.2000000e+00 1.1000000e+00 7.0000000e-01 1.3000000e+00 2.0000000e+00 1.4000000e+00 4.0000000e-01 1.9000000e+00 1.2000000e+00 9.0000000e-01 1.0000000e+00 1.4000000e+00 1.5000000e+00 1.4000000e+00 1.4000000e+00 1.2000000e+00 1.3000000e+00 7.0000000e-01 1.1000000e+00 1.0000000e+00 1.9000000e+00 2.2000000e+00 1.8000000e+00 9.0000000e-01 2.6000000e+00 2.6000000e+00 1.6000000e+00 1.1000000e+00 1.1000000e+00 1.7000000e+00 4.0000000e-01 7.0000000e-01 5.0000000e-01 2.0000000e+00 9.0000000e-01 1.1000000e+00 7.0000000e-01 1.8000000e+00 8.0000000e-01 1.2000000e+00 1.7000000e+00 8.0000000e-01 2.3000000e+00 6.0000000e-01 4.0000000e-01 6.0000000e-01 2.1000000e+00 1.1000000e+00 1.1000000e+00 2.0000000e+00 1.9000000e+00 1.0000000e+00 1.2000000e+00 4.0000000e-01 1.7000000e+00 9.0000000e-01 1.3000000e+00 1.6000000e+00 2.7000000e+00 2.7000000e+00 1.7000000e+00 8.0000000e-01 1.2000000e+00 1.8000000e+00 5.0000000e-01 6.0000000e-01 1.0000000e+00 2.5000000e+00 1.5000000e+00 1.3000000e+00 2.4000000e+00 1.0000000e+00 2.5000000e+00 1.5000000e+00 1.1000000e+00 2.4000000e+00 1.5000000e+00 5.0000000e-01 1.1000000e+00 1.4000000e+00 1.0000000e+00 1.8000000e+00 1.1000000e+00 1.2000000e+00 9.0000000e-01 1.5000000e+00 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-cityblock-ml.txt b/voice_bridge/scipy/spatial/tests/data/pdist-cityblock-ml.txt new file mode 100644 index 0000000000000000000000000000000000000000..8fb22e62200894f9693010ebf45fb23751f4e3a6 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-cityblock-ml.txt @@ -0,0 +1 @@ + 3.2420590e+01 3.3246607e+01 3.0526910e+01 3.5166573e+01 3.1868301e+01 3.6025002e+01 3.2513623e+01 3.6557796e+01 3.3752212e+01 3.4422130e+01 3.2526018e+01 3.2581161e+01 3.3743555e+01 3.6960777e+01 3.4225270e+01 3.2965308e+01 3.4591031e+01 3.4204203e+01 3.4678123e+01 3.5728720e+01 3.0830047e+01 3.1550681e+01 3.3304790e+01 3.2676753e+01 3.2742330e+01 3.1684556e+01 3.2830915e+01 3.2956614e+01 2.7365639e+01 3.3207307e+01 3.3420925e+01 3.4357941e+01 2.8280126e+01 3.4523458e+01 3.2705274e+01 3.2455891e+01 3.1636060e+01 3.1594957e+01 3.1805202e+01 3.3886574e+01 3.3438829e+01 3.3330030e+01 3.4168514e+01 3.0637353e+01 4.2149167e+01 3.6340559e+01 2.9315308e+01 3.5778314e+01 3.7693050e+01 3.2598714e+01 3.2990836e+01 3.4967659e+01 3.9748920e+01 3.6745043e+01 2.7117550e+01 3.6014760e+01 2.9367558e+01 3.3845350e+01 3.5477339e+01 3.1513372e+01 3.2517953e+01 2.4755097e+01 3.0229897e+01 3.4799343e+01 3.3371710e+01 2.9600910e+01 3.3275088e+01 3.3567110e+01 3.4527016e+01 3.4942320e+01 3.2359383e+01 3.2607100e+01 3.1467914e+01 2.9032039e+01 3.3122878e+01 2.8496709e+01 2.9908448e+01 2.9962886e+01 3.0345299e+01 3.1737613e+01 2.8551485e+01 3.2610551e+01 3.3082660e+01 3.3719298e+01 3.6434018e+01 3.6589278e+01 3.3889586e+01 3.8036774e+01 3.1483497e+01 3.4196794e+01 3.5154035e+01 3.5488608e+01 3.6143183e+01 3.3473491e+01 3.4686446e+01 2.8687495e+01 3.5725742e+01 3.0188298e+01 3.3084534e+01 3.3538519e+01 3.6226849e+01 2.9052099e+01 3.6032733e+01 3.0811503e+01 3.2616190e+01 3.3888566e+01 3.3074570e+01 2.9683515e+01 3.0600771e+01 3.4345247e+01 3.6983843e+01 3.3692824e+01 3.3762461e+01 3.4024582e+01 3.3698854e+01 3.1238613e+01 3.4978833e+01 3.4991078e+01 3.4577741e+01 3.3749227e+01 3.4982272e+01 3.0487868e+01 3.2317632e+01 3.1125588e+01 3.4413791e+01 3.1881871e+01 3.1373821e+01 3.0416864e+01 3.2066187e+01 3.1128313e+01 3.0240249e+01 3.0125198e+01 3.1343454e+01 3.5479092e+01 3.4450767e+01 3.2953507e+01 3.4456795e+01 3.0136375e+01 3.3462150e+01 2.9894274e+01 3.1367432e+01 3.2839320e+01 3.1440398e+01 2.9400374e+01 3.1106338e+01 3.1242624e+01 3.5537892e+01 3.3056459e+01 2.8610281e+01 3.4296217e+01 3.5819772e+01 3.2503922e+01 3.0963029e+01 3.4762112e+01 3.4796284e+01 2.9645345e+01 3.4468088e+01 2.6975590e+01 3.3738555e+01 2.8825009e+01 3.2663999e+01 3.2547878e+01 3.2308091e+01 3.2489966e+01 3.0868597e+01 3.2974220e+01 3.0866111e+01 3.8197342e+01 3.0609568e+01 3.5478978e+01 2.9249184e+01 3.6185622e+01 3.1948258e+01 3.2649719e+01 3.3305650e+01 3.4643955e+01 3.6566241e+01 3.4968484e+01 3.2632218e+01 3.6741383e+01 3.5700008e+01 3.1962468e+01 3.1410623e+01 3.0412061e+01 3.3749077e+01 3.5649661e+01 3.7649263e+01 3.2832574e+01 3.1783914e+01 2.8264292e+01 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-correlation-ml-iris.txt b/voice_bridge/scipy/spatial/tests/data/pdist-correlation-ml-iris.txt new file mode 100644 index 0000000000000000000000000000000000000000..f297500381fe33428e6725ea3b10ab6191a46fa3 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-correlation-ml-iris.txt @@ -0,0 +1 @@ + 4.0013388e-03 2.6088954e-05 1.8315482e-03 6.5266850e-04 4.1394685e-04 1.1888069e-03 4.6185289e-04 1.9233577e-03 3.4480388e-03 1.5150632e-05 1.9126718e-03 3.0974734e-03 2.2295833e-04 2.4043394e-03 5.0134320e-03 3.0165570e-03 1.3145239e-04 6.0759419e-04 1.6672981e-03 4.0036132e-03 6.1375191e-04 8.5916540e-03 3.0212269e-03 8.6923503e-03 7.7875235e-03 5.1612907e-04 2.9662451e-04 6.2402983e-04 2.7278440e-03 4.0510347e-03 3.0027154e-03 6.2616145e-03 4.1342211e-03 3.4480388e-03 1.5822510e-03 1.7143312e-03 3.4480388e-03 2.2462074e-04 6.1048465e-04 6.5190641e-04 2.4247873e-02 9.0785596e-04 2.1652052e-04 3.4845573e-03 3.2507646e-03 2.3346511e-03 4.0773355e-04 1.1278223e-04 5.0819669e-04 2.1340893e-01 2.1253858e-01 2.5193073e-01 2.9479565e-01 2.6774348e-01 2.8869785e-01 2.3348217e-01 1.9273490e-01 2.4443270e-01 2.4320510e-01 2.7679421e-01 2.1672263e-01 2.6813840e-01 2.8435705e-01 1.5561363e-01 2.0057173e-01 2.7812139e-01 2.2900256e-01 3.4724680e-01 2.3882260e-01 2.9132931e-01 2.0333645e-01 3.5307051e-01 2.8812452e-01 2.1722530e-01 2.1423111e-01 2.7396952e-01 2.9207940e-01 2.6626182e-01 1.7106032e-01 2.4279706e-01 2.2559055e-01 2.0940857e-01 3.8432412e-01 2.9354670e-01 2.0829958e-01 2.3669414e-01 3.0463326e-01 2.1035851e-01 2.6623117e-01 3.0835417e-01 2.5871089e-01 2.3465249e-01 2.0319416e-01 2.6292582e-01 2.1771735e-01 2.3212816e-01 2.2399387e-01 1.3799316e-01 2.3049526e-01 4.8512087e-01 4.2535066e-01 4.0184471e-01 4.1903049e-01 4.4627199e-01 4.4692268e-01 4.3569888e-01 4.2673251e-01 4.5731950e-01 3.7438176e-01 3.0619251e-01 4.0039114e-01 3.7245195e-01 4.5829878e-01 4.5814844e-01 3.6107062e-01 3.7600936e-01 3.7662883e-01 5.2492832e-01 4.2684428e-01 3.7975064e-01 4.0636707e-01 4.6364339e-01 3.4607190e-01 3.6988036e-01 3.6764668e-01 3.2524634e-01 3.1943549e-01 4.4481193e-01 3.5496498e-01 4.1356534e-01 3.2082320e-01 4.5322964e-01 3.4300770e-01 4.4485158e-01 3.9755578e-01 3.9702418e-01 3.7202285e-01 3.1131344e-01 3.4018064e-01 4.0217537e-01 3.1441868e-01 4.2535066e-01 4.1533176e-01 3.9695242e-01 3.5313531e-01 3.9400199e-01 3.4652657e-01 3.6608320e-01 3.6684161e-01 3.3929143e-03 2.6033698e-03 7.7673212e-03 6.4081099e-03 9.2794464e-03 2.8819447e-03 1.4536586e-03 9.6714455e-04 3.7992387e-03 5.9342609e-03 3.9974031e-04 6.0694735e-03 9.1304628e-03 1.7655983e-02 1.1643899e-02 4.0363794e-03 1.6463709e-03 1.0706739e-02 6.7984475e-04 7.6845878e-03 2.3516587e-02 9.9502337e-05 1.0315881e-02 1.0821735e-03 1.8887942e-03 2.4624674e-03 1.5760536e-03 3.6638868e-03 1.6253664e-03 7.8762517e-04 1.9487010e-02 1.6211862e-02 9.6714455e-04 2.2382105e-03 2.1712385e-03 9.6714455e-04 2.9674185e-03 1.9068589e-03 6.4555509e-03 8.8254342e-03 8.1777355e-03 3.4663084e-03 9.6481454e-03 9.7747764e-05 1.0706793e-02 4.2246850e-03 4.9836128e-03 1.6613867e-03 1.6856078e-01 1.6930583e-01 2.0381801e-01 2.4171317e-01 2.1689289e-01 2.4212069e-01 1.9027913e-01 1.5127382e-01 1.9696970e-01 1.9830901e-01 2.2503195e-01 1.7290786e-01 2.1618942e-01 2.3648275e-01 1.1720113e-01 1.5636322e-01 2.3357633e-01 1.8548772e-01 2.8791738e-01 1.9215793e-01 2.4470933e-01 1.5850128e-01 2.9662484e-01 2.4061109e-01 1.7172858e-01 1.6853658e-01 2.2283143e-01 2.4032537e-01 2.1849821e-01 1.2975740e-01 1.9502432e-01 1.7953012e-01 1.6504306e-01 3.2966120e-01 2.4985160e-01 1.6946778e-01 1.8991741e-01 2.4919698e-01 1.7046257e-01 2.1691882e-01 2.6018338e-01 2.1310883e-01 1.8776864e-01 1.5909082e-01 2.1620321e-01 1.7782256e-01 1.8911127e-01 1.7894094e-01 9.9649433e-02 1.8605378e-01 4.2702260e-01 3.6742642e-01 3.4284735e-01 3.6351462e-01 3.8703088e-01 3.8643219e-01 3.8033312e-01 3.6849688e-01 3.9561383e-01 3.1894438e-01 2.5437467e-01 3.4129756e-01 3.1464030e-01 3.9617829e-01 3.9513061e-01 3.0506617e-01 3.2181902e-01 3.2465822e-01 4.5849285e-01 3.6615509e-01 3.2201598e-01 3.4969244e-01 4.0175045e-01 2.8934891e-01 3.1614551e-01 3.1385643e-01 2.7059114e-01 2.6798818e-01 3.8429703e-01 3.0087709e-01 3.5349775e-01 2.7097983e-01 3.9156246e-01 2.9045741e-01 3.8972428e-01 3.3603762e-01 3.4254663e-01 3.1960575e-01 2.6050327e-01 2.8406500e-01 3.4225008e-01 2.5735736e-01 3.6742642e-01 3.5724408e-01 3.3861338e-01 2.9412113e-01 3.3288556e-01 2.9101723e-01 3.1374321e-01 3.1516519e-01 1.6665208e-03 9.3886805e-04 6.2270349e-04 1.5623211e-03 3.9549141e-04 1.6439076e-03 3.0144044e-03 3.0583810e-05 2.0234943e-03 2.6246966e-03 3.8983492e-04 2.5645160e-03 5.6944285e-03 3.3339055e-03 1.2831328e-04 4.1302346e-04 2.1101667e-03 3.4972086e-03 8.7482704e-04 9.4271115e-03 2.5080125e-03 8.7042936e-03 7.0125369e-03 3.5088415e-04 1.9451019e-04 3.9574419e-04 2.5986219e-03 3.6402032e-03 2.4900748e-03 7.0784673e-03 4.7935149e-03 3.0144044e-03 1.2869381e-03 1.4017897e-03 3.0144044e-03 1.7197161e-04 4.4525534e-04 7.8138074e-04 2.2693053e-02 1.2229281e-03 1.5754704e-04 3.7899670e-03 2.6957902e-03 2.7721274e-03 4.5733371e-04 2.2324575e-04 3.1003560e-04 2.1002397e-01 2.0931874e-01 2.4834042e-01 2.9081912e-01 2.6391644e-01 2.8536997e-01 2.3033428e-01 1.8962461e-01 2.4088505e-01 2.3991724e-01 2.7290008e-01 2.1345771e-01 2.6419304e-01 2.8088883e-01 1.5266518e-01 1.9720348e-01 2.7496549e-01 2.2580939e-01 3.4275207e-01 2.3533918e-01 2.8800391e-01 1.9991219e-01 3.4890690e-01 2.8470246e-01 2.1378625e-01 2.1075909e-01 2.7013290e-01 2.8823552e-01 2.6275308e-01 1.6787442e-01 2.3921107e-01 2.2212337e-01 2.0605918e-01 3.8041788e-01 2.9051019e-01 2.0550537e-01 2.3319159e-01 3.0043218e-01 2.0746652e-01 2.6256228e-01 3.0491843e-01 2.5539836e-01 2.3113139e-01 1.9984797e-01 2.5951290e-01 2.1484809e-01 2.2899468e-01 2.2062653e-01 1.3495710e-01 2.2721304e-01 4.8106467e-01 4.2120214e-01 3.9753504e-01 4.1511036e-01 4.4203186e-01 4.4255637e-01 4.3182495e-01 4.2255527e-01 4.5284888e-01 3.7037516e-01 3.0238339e-01 3.9606802e-01 3.6819494e-01 4.5378706e-01 4.5354234e-01 3.5697379e-01 3.7213234e-01 3.7297305e-01 5.2009409e-01 4.2241474e-01 3.7552000e-01 4.0230555e-01 4.5916611e-01 3.4185974e-01 3.6603540e-01 3.6379117e-01 3.2119479e-01 3.1570003e-01 4.4043880e-01 3.5104995e-01 4.0917086e-01 3.1725229e-01 4.4875464e-01 3.3921954e-01 4.4101743e-01 3.9296628e-01 3.9316311e-01 3.6831351e-01 3.0762140e-01 3.3601664e-01 3.9776902e-01 3.1006892e-01 4.2120214e-01 4.1114584e-01 3.9269988e-01 3.4869458e-01 3.8944692e-01 3.4244384e-01 3.6236872e-01 3.6319420e-01 3.2811792e-03 2.1674206e-03 3.8606330e-03 4.5444049e-04 1.6669051e-04 6.9315236e-04 1.5191179e-03 7.4896915e-04 1.0486334e-03 3.0115188e-03 8.3553530e-03 1.1528814e-02 9.5172421e-03 2.7099731e-03 7.1677618e-04 4.9455548e-03 1.1260396e-03 4.0113810e-03 1.7041109e-02 1.7048436e-03 3.2998306e-03 3.5839458e-03 6.5708756e-04 7.5073414e-04 1.6739794e-03 1.4404874e-04 6.5489426e-04 3.9918560e-03 9.7678136e-03 9.5698494e-03 6.9315236e-04 4.1051921e-03 4.2098821e-03 6.9315236e-04 7.7852178e-04 5.0066998e-04 4.6641147e-03 1.9877450e-02 2.9999880e-03 2.6696154e-03 2.3124511e-03 2.6940762e-03 3.6188953e-03 7.8131154e-04 1.7395433e-03 1.1236329e-03 1.8068407e-01 1.7911784e-01 2.1632614e-01 2.5732132e-01 2.3189913e-01 2.4898678e-01 1.9775994e-01 1.6095697e-01 2.0933838e-01 2.0710048e-01 2.4048237e-01 1.8306865e-01 2.3298507e-01 2.4543409e-01 1.2760114e-01 1.6921167e-01 2.3873595e-01 1.9385405e-01 3.0859675e-01 2.0396213e-01 2.5141439e-01 1.7197884e-01 3.1193260e-01 2.4875015e-01 1.8437187e-01 1.8189435e-01 2.3759181e-01 2.5412614e-01 2.2898904e-01 1.4232827e-01 2.0806035e-01 1.9199138e-01 1.7693745e-01 3.4002004e-01 2.5277786e-01 1.7384453e-01 2.0213849e-01 2.6767804e-01 1.7597754e-01 2.2968366e-01 2.6752309e-01 2.2134041e-01 2.0039734e-01 1.7140536e-01 2.2556075e-01 1.8258974e-01 1.9648053e-01 1.9006393e-01 1.1321036e-01 1.9554999e-01 4.3600761e-01 3.7954719e-01 3.5812457e-01 3.7278397e-01 3.9968201e-01 4.0081518e-01 3.8843945e-01 3.8096257e-01 4.1111272e-01 3.3107441e-01 2.6691137e-01 3.5682832e-01 3.3041468e-01 4.1223120e-01 4.1255773e-01 3.1903974e-01 3.3211211e-01 3.3199965e-01 4.7707633e-01 3.8217339e-01 3.3708398e-01 3.6131644e-01 4.1712805e-01 3.0571403e-01 3.2625308e-01 3.2419755e-01 2.8563873e-01 2.7883266e-01 3.9884997e-01 3.1256889e-01 3.6952768e-01 2.7953122e-01 4.0726631e-01 3.0093515e-01 3.9702583e-01 3.5566883e-01 3.5181973e-01 3.2782384e-01 2.7114516e-01 3.0000941e-01 3.5891976e-01 2.7762255e-01 3.7954719e-01 3.7024637e-01 3.5327118e-01 3.1362016e-01 3.5214869e-01 3.0546169e-01 3.2226324e-01 3.2277503e-01 1.1668032e-04 8.6044327e-05 1.4968429e-03 3.9691382e-03 6.2388252e-03 7.0588028e-04 1.9691519e-03 6.1279520e-03 1.9660913e-04 2.5761274e-03 2.5168387e-03 2.4029967e-03 9.7429318e-04 2.3122381e-03 2.3682626e-04 7.1643085e-03 1.3128642e-04 5.3939078e-03 6.2992904e-03 9.0353935e-03 1.2266741e-02 2.0706893e-03 1.5408774e-03 2.5522607e-03 3.9522692e-03 6.6899152e-03 6.3980861e-03 2.9039306e-03 1.6942568e-03 6.2388252e-03 3.9336207e-03 4.1533642e-03 6.2388252e-03 1.2180733e-03 2.1518176e-03 8.8270219e-04 3.2743785e-02 7.7572782e-05 1.3417853e-03 2.5322339e-03 6.7844678e-03 8.2761566e-04 8.6236157e-04 3.1792685e-04 2.2579028e-03 2.2976852e-01 2.2800773e-01 2.6917639e-01 3.1391346e-01 2.8619117e-01 3.0434544e-01 2.4844431e-01 2.0773635e-01 2.6148944e-01 2.5886513e-01 2.9555853e-01 2.3241008e-01 2.8723433e-01 3.0077518e-01 1.6999944e-01 2.1692490e-01 2.9290302e-01 2.4423212e-01 3.6894940e-01 2.5556289e-01 3.0695160e-01 2.1997775e-01 3.7292526e-01 3.0427758e-01 2.3385552e-01 2.3106102e-01 2.9243468e-01 3.1048744e-01 2.8298832e-01 1.8662712e-01 2.6007204e-01 2.4232174e-01 2.2560003e-01 4.0265980e-01 3.0762748e-01 2.2151447e-01 2.5355085e-01 3.2493368e-01 2.2408199e-01 2.8382455e-01 3.2448802e-01 2.7442187e-01 2.5162228e-01 2.1940897e-01 2.7915380e-01 2.3127910e-01 2.4702051e-01 2.4019206e-01 1.5301315e-01 2.4619533e-01 5.0391216e-01 4.4483392e-01 4.2228707e-01 4.3731273e-01 4.6617394e-01 4.6750253e-01 4.5367757e-01 4.4636521e-01 4.7842690e-01 3.9329580e-01 3.2434154e-01 4.2091234e-01 3.9272980e-01 4.7962551e-01 4.7998875e-01 3.8052757e-01 3.9422077e-01 3.9365636e-01 5.4778535e-01 4.4784024e-01 3.9985415e-01 4.2545240e-01 4.8476479e-01 3.6622351e-01 3.8794414e-01 3.8577592e-01 3.4462011e-01 3.3712507e-01 4.6543611e-01 3.7346604e-01 4.3442200e-01 3.3762412e-01 4.7437523e-01 3.6087712e-01 4.6258851e-01 4.1954525e-01 4.1507031e-01 3.8935328e-01 3.2880662e-01 3.6009673e-01 4.2314218e-01 3.3549029e-01 4.4483392e-01 4.3505302e-01 4.1710447e-01 3.7450914e-01 4.1581746e-01 3.6597096e-01 3.8346411e-01 3.8386195e-01 2.7739415e-04 8.2117467e-04 2.7843462e-03 4.7394226e-03 3.9365385e-04 1.1964598e-03 4.7400628e-03 2.5527396e-04 3.2634446e-03 3.7103657e-03 3.3188195e-03 8.6302611e-04 1.5635411e-03 6.0189508e-04 5.5859876e-03 3.8282951e-04 7.0925635e-03 5.0273924e-03 7.3470160e-03 1.0223636e-02 1.3503463e-03 9.3049535e-04 1.9663208e-03 2.7155903e-03 5.0798223e-03 5.5875952e-03 3.5987384e-03 2.6550151e-03 4.7394226e-03 3.5923878e-03 3.7937786e-03 4.7394226e-03 6.6480476e-04 1.3814035e-03 1.1581699e-03 3.0091048e-02 1.0888067e-04 1.1634967e-03 1.9052023e-03 5.6332034e-03 7.9034466e-04 3.5005887e-04 1.0179107e-04 1.6076683e-03 2.2018795e-01 2.1841167e-01 2.5888963e-01 3.0298911e-01 2.7568827e-01 2.9345793e-01 2.3845526e-01 1.9853879e-01 2.5133209e-01 2.4870042e-01 2.8491736e-01 2.2273608e-01 2.7678086e-01 2.8993699e-01 1.6165333e-01 2.0762176e-01 2.8220337e-01 2.3432093e-01 3.5743296e-01 2.4549712e-01 2.9602684e-01 2.1063470e-01 3.6116406e-01 2.9338620e-01 2.2421094e-01 2.2149308e-01 2.8182182e-01 2.9956540e-01 2.7243830e-01 1.7796817e-01 2.4995634e-01 2.3251210e-01 2.1609445e-01 3.9047864e-01 2.9675065e-01 2.1202908e-01 2.4353030e-01 3.1395037e-01 2.1453916e-01 2.7329907e-01 3.1330772e-01 2.6399518e-01 2.4164659e-01 2.1003834e-01 2.6865531e-01 2.2160867e-01 2.3705638e-01 2.3039039e-01 1.4523591e-01 2.3625874e-01 4.9071957e-01 4.3219633e-01 4.0993016e-01 4.2475440e-01 4.5332327e-01 4.5465382e-01 4.4096188e-01 4.3371359e-01 4.6548638e-01 3.8123224e-01 3.1318896e-01 4.0857587e-01 3.8073064e-01 4.6668253e-01 4.6707002e-01 3.6864305e-01 3.8213773e-01 3.8159377e-01 5.3428461e-01 4.3521869e-01 3.8775353e-01 4.1301893e-01 4.7176152e-01 3.5457739e-01 3.7593537e-01 3.7379344e-01 3.3323181e-01 3.2577190e-01 4.5261005e-01 3.6164142e-01 4.2194514e-01 3.2625778e-01 4.6147754e-01 3.4920516e-01 4.4979521e-01 4.0734174e-01 4.0275102e-01 3.7733353e-01 3.1756791e-01 3.4852048e-01 4.1080565e-01 3.2443308e-01 4.3219633e-01 4.2252463e-01 4.0479526e-01 3.6286403e-01 4.0364435e-01 3.5428112e-01 3.7151258e-01 3.7191203e-01 2.0478784e-03 4.7860280e-03 7.2727783e-03 1.2329906e-03 2.0550268e-03 7.3066158e-03 5.3810576e-04 3.2245377e-03 2.1674888e-03 2.7856851e-03 1.6387773e-03 3.1323423e-03 6.7307381e-05 8.3332066e-03 3.3954200e-04 4.9119026e-03 7.6276175e-03 8.9240504e-03 1.3851706e-02 2.8347122e-03 2.2069601e-03 3.5278340e-03 4.3892066e-03 7.6179982e-03 7.9398397e-03 2.0003115e-03 1.2885024e-03 7.2727783e-03 5.1809110e-03 5.4327573e-03 7.2727783e-03 1.7938257e-03 2.8924302e-03 1.4132511e-03 3.5877316e-02 5.5425651e-05 2.1070391e-03 2.2246722e-03 8.2675293e-03 4.8309816e-04 1.2152730e-03 6.6498554e-04 3.1323983e-03 2.3387083e-01 2.3171300e-01 2.7340823e-01 3.1873143e-01 2.9087099e-01 3.0765106e-01 2.5178166e-01 2.1138210e-01 2.6568600e-01 2.6244481e-01 3.0032621e-01 2.3618270e-01 2.9221666e-01 3.0443867e-01 1.7368514e-01 2.2112593e-01 2.9589691e-01 2.4771596e-01 3.7467992e-01 2.5965416e-01 3.1023215e-01 2.2429067e-01 3.7775475e-01 3.0780450e-01 2.3805322e-01 2.3537447e-01 2.9708156e-01 3.1499475e-01 2.8689460e-01 1.9071775e-01 2.6437980e-01 2.4650321e-01 2.2965596e-01 4.0666307e-01 3.1024566e-01 2.2426781e-01 2.5770992e-01 3.3024819e-01 2.2703835e-01 2.8812097e-01 3.2789845e-01 2.7792695e-01 2.5584753e-01 2.2352457e-01 2.8285984e-01 2.3411709e-01 2.5033634e-01 2.4414228e-01 1.5718330e-01 2.4987685e-01 5.0772777e-01 4.4916856e-01 4.2715006e-01 4.4114868e-01 4.7061118e-01 4.7223859e-01 4.5731921e-01 4.5076024e-01 4.8335950e-01 3.9759816e-01 3.2864932e-01 4.2581769e-01 3.9765561e-01 4.8465345e-01 4.8525127e-01 3.8513607e-01 3.9820652e-01 3.9712803e-01 5.5326906e-01 4.5284577e-01 4.0466523e-01 4.2968979e-01 4.8967945e-01 3.7122633e-01 3.9189300e-01 3.8976354e-01 3.4937792e-01 3.4115590e-01 4.7020033e-01 3.7767688e-01 4.3942138e-01 3.4125876e-01 4.7934102e-01 3.6486747e-01 4.6609349e-01 4.2514437e-01 4.1889348e-01 3.9297481e-01 3.3279384e-01 3.6502235e-01 4.2824304e-01 3.4111505e-01 4.4916856e-01 4.3953404e-01 4.2185806e-01 3.8004789e-01 4.2135185e-01 3.7064704e-01 3.8713375e-01 3.8737322e-01 5.9378937e-04 1.6263483e-03 3.1194349e-04 8.5089275e-04 1.6365846e-03 1.1579874e-03 4.9430863e-03 7.7957878e-03 5.8209267e-03 9.7423596e-04 2.1559031e-04 2.8280232e-03 2.1261057e-03 1.8496545e-03 1.2342594e-02 1.9347552e-03 5.4995961e-03 5.2624400e-03 1.3773080e-04 7.1496401e-05 7.1145768e-04 9.6706058e-04 1.9028496e-03 3.0842001e-03 7.5087003e-03 6.3709632e-03 1.6263483e-03 2.4219636e-03 2.5416684e-03 1.6263483e-03 4.5881830e-05 1.0508341e-04 2.2101780e-03 2.1711060e-02 1.4779987e-03 1.0004664e-03 2.4029906e-03 2.5527616e-03 2.4859397e-03 1.2918144e-04 4.6388898e-04 3.7292268e-04 1.9674800e-01 1.9551090e-01 2.3384591e-01 2.7581575e-01 2.4956432e-01 2.6852776e-01 2.1529223e-01 1.7652116e-01 2.2659936e-01 2.2483764e-01 2.5838801e-01 1.9958250e-01 2.5031938e-01 2.6459593e-01 1.4127677e-01 1.8459488e-01 2.5809699e-01 2.1110498e-01 3.2773890e-01 2.2109976e-01 2.7105695e-01 1.8736680e-01 3.3227725e-01 2.6813261e-01 2.0050552e-01 1.9777445e-01 2.5552383e-01 2.7284453e-01 2.4733170e-01 1.5638593e-01 2.2514733e-01 2.0849921e-01 1.9287136e-01 3.6191982e-01 2.7281863e-01 1.9071038e-01 2.1912633e-01 2.8593968e-01 1.9281596e-01 2.4768186e-01 2.8763479e-01 2.3971138e-01 2.1723753e-01 1.8699948e-01 2.4393940e-01 1.9979806e-01 2.1397418e-01 2.0672587e-01 1.2529166e-01 2.1270878e-01 4.6033946e-01 4.0222602e-01 3.7977808e-01 3.9565939e-01 4.2276626e-01 4.2367223e-01 4.1181849e-01 4.0362790e-01 4.3403229e-01 3.5248619e-01 2.8628855e-01 3.7840466e-01 3.5121985e-01 4.3508597e-01 4.3518504e-01 3.3982310e-01 3.5380487e-01 3.5403534e-01 5.0086936e-01 4.0431760e-01 3.5820087e-01 3.8360776e-01 4.4020334e-01 3.2567463e-01 3.4780712e-01 3.4566354e-01 3.0520609e-01 2.9886193e-01 4.2163480e-01 3.3351442e-01 3.9135037e-01 2.9988740e-01 4.3006423e-01 3.2170490e-01 4.2068520e-01 3.7643926e-01 3.7416586e-01 3.4964963e-01 2.9095264e-01 3.1987117e-01 3.8035231e-01 2.9582266e-01 4.0222602e-01 3.9256940e-01 3.7489846e-01 3.3318735e-01 3.7289799e-01 3.2576051e-01 3.4389968e-01 3.4452790e-01 2.6022528e-04 1.6357171e-03 1.5776794e-03 3.8821876e-04 3.3383101e-03 8.1348232e-03 1.2673676e-02 9.6454978e-03 2.5951087e-03 4.7710550e-04 5.9640558e-03 5.0935416e-04 4.5005321e-03 1.8295131e-02 8.0881241e-04 4.5548809e-03 2.4342161e-03 4.9437559e-04 7.1285200e-04 1.1827453e-03 5.3640347e-04 3.9274104e-04 2.7126384e-03 1.1795471e-02 1.0834355e-02 2.6022528e-04 3.1798475e-03 3.2407554e-03 2.6022528e-04 8.6271302e-04 3.7982619e-04 4.6816553e-03 1.6563494e-02 3.8583498e-03 2.4190920e-03 3.6876972e-03 1.5540748e-03 4.9548680e-03 1.1822578e-03 2.1020911e-03 7.8943086e-04 1.7690932e-01 1.7592254e-01 2.1244374e-01 2.5265403e-01 2.2738778e-01 2.4651370e-01 1.9514374e-01 1.5780110e-01 2.0549282e-01 2.0415245e-01 2.3585798e-01 1.7978451e-01 2.2802648e-01 2.4243736e-01 1.2428265e-01 1.6526016e-01 2.3669421e-01 1.9101658e-01 3.0265561e-01 2.0025949e-01 2.4898144e-01 1.6786898e-01 3.0733331e-01 2.4595698e-01 1.8046568e-01 1.7781045e-01 2.3314059e-01 2.4991164e-01 2.2560913e-01 1.3845901e-01 2.0404813e-01 1.8812805e-01 1.7322140e-01 3.3666075e-01 2.5129679e-01 1.7201674e-01 1.9833201e-01 2.6229128e-01 1.7386253e-01 2.2573363e-01 2.6492814e-01 2.1852944e-01 1.9648937e-01 1.6758641e-01 2.2246603e-01 1.8066071e-01 1.9389274e-01 1.8653629e-01 1.0911288e-01 1.9242842e-01 4.3299781e-01 3.7574545e-01 3.5353088e-01 3.6969868e-01 3.9574789e-01 3.9644649e-01 3.8564737e-01 3.7707427e-01 4.0646475e-01 3.2727058e-01 2.6301141e-01 3.5217112e-01 3.2569733e-01 4.0744443e-01 4.0742674e-01 3.1477415e-01 3.2876954e-01 3.2939944e-01 4.7166141e-01 3.7739415e-01 3.3254323e-01 3.5763917e-01 4.1251084e-01 3.0085067e-01 3.2295735e-01 3.2084322e-01 2.8110727e-01 2.7535557e-01 3.9443856e-01 3.0887601e-01 3.6474538e-01 2.7663010e-01 4.0256676e-01 2.9754793e-01 3.9443683e-01 3.4998590e-01 3.4873324e-01 3.2500314e-01 2.6771990e-01 2.9525187e-01 3.5397733e-01 2.7178914e-01 3.7574545e-01 3.6622316e-01 3.4883278e-01 3.0797294e-01 3.4655783e-01 3.0107914e-01 3.1936679e-01 3.2010756e-01 3.0868881e-03 2.8691382e-03 1.3643967e-04 5.3429196e-03 1.0581034e-02 1.6459895e-02 1.2551534e-02 4.1576220e-03 1.2160579e-03 8.7064401e-03 5.4418849e-05 6.8214146e-03 2.2732626e-02 5.6586090e-04 4.9831593e-03 1.1314391e-03 1.3090644e-03 1.7262411e-03 1.9770194e-03 1.0389613e-03 9.3641634e-05 2.8067647e-03 1.5471006e-02 1.4411876e-02 0.0000000e+00 4.0251529e-03 4.0402030e-03 0.0000000e+00 2.0104773e-03 1.1579294e-03 6.7733512e-03 1.3328703e-02 6.1195429e-03 3.8280621e-03 5.4471697e-03 1.3203495e-03 7.3930866e-03 2.5511055e-03 3.8011014e-03 1.5935161e-03 1.6488937e-01 1.6420208e-01 1.9946134e-01 2.3841307e-01 2.1377841e-01 2.3352484e-01 1.8323739e-01 1.4660873e-01 1.9269692e-01 1.9183992e-01 2.2200730e-01 1.6791595e-01 2.1423078e-01 2.2922867e-01 1.1407468e-01 1.5349471e-01 2.2418179e-01 1.7908948e-01 2.8692621e-01 1.8765985e-01 2.3596576e-01 1.5596498e-01 2.9203641e-01 2.3278981e-01 1.6829150e-01 1.6563525e-01 2.1942293e-01 2.3592536e-01 2.1256421e-01 1.2755291e-01 1.9121342e-01 1.7576718e-01 1.6132929e-01 3.2148865e-01 2.3885357e-01 1.6118125e-01 1.8573297e-01 2.4757051e-01 1.6279862e-01 2.1240660e-01 2.5149161e-01 2.0595435e-01 1.8389163e-01 1.5580881e-01 2.0964365e-01 1.6953436e-01 1.8203376e-01 1.7437090e-01 9.9184065e-02 1.8031354e-01 4.1664204e-01 3.5971944e-01 3.3744612e-01 3.5416868e-01 3.7936058e-01 3.7982315e-01 3.7006184e-01 3.6098196e-01 3.8956200e-01 3.1201251e-01 2.4889912e-01 3.3607844e-01 3.1002022e-01 3.9046127e-01 3.9028467e-01 2.9949936e-01 3.1373736e-01 3.1479561e-01 4.5355800e-01 3.6085089e-01 3.1682959e-01 3.4195608e-01 3.9553949e-01 2.8555816e-01 3.0804941e-01 3.0593834e-01 2.6633772e-01 2.6121338e-01 3.7782244e-01 2.9399549e-01 3.4839508e-01 2.6278429e-01 3.8569374e-01 2.8303600e-01 3.7885421e-01 3.3349568e-01 3.3352424e-01 3.1033775e-01 2.5375578e-01 2.8011018e-01 3.3772574e-01 2.5671985e-01 3.5971944e-01 3.5022311e-01 3.3289784e-01 2.9224130e-01 3.3016010e-01 2.8599652e-01 3.0475125e-01 3.0561759e-01 1.6232219e-03 2.8110674e-03 3.0992704e-04 2.8009412e-03 5.3859157e-03 3.4428307e-03 2.2389965e-04 4.8865483e-04 1.7600776e-03 3.6417064e-03 7.4576509e-04 9.1296419e-03 2.8123816e-03 8.0068765e-03 7.3379880e-03 3.9579356e-04 1.9713653e-04 6.0194434e-04 2.3367622e-03 3.6239908e-03 3.0185118e-03 6.3202257e-03 4.4046298e-03 3.0868881e-03 1.7141363e-03 1.8461990e-03 3.0868881e-03 1.2729358e-04 4.6571753e-04 8.6393185e-04 2.3903803e-02 9.0441478e-04 3.0528309e-04 3.1648190e-03 3.1223857e-03 2.2339929e-03 2.7724170e-04 9.5102356e-05 4.3913729e-04 2.1061066e-01 2.0964712e-01 2.4888926e-01 2.9163776e-01 2.6471732e-01 2.8522189e-01 2.3035873e-01 1.8998369e-01 2.4143327e-01 2.4006731e-01 2.7373417e-01 2.1381600e-01 2.6519773e-01 2.8097864e-01 1.5319882e-01 1.9790190e-01 2.7464921e-01 2.2594124e-01 3.4406181e-01 2.3583526e-01 2.8783351e-01 2.0067586e-01 3.4959337e-01 2.8469567e-01 2.1442337e-01 2.1148410e-01 2.7089383e-01 2.8885413e-01 2.6304757e-01 1.6861411e-01 2.3983808e-01 2.2272157e-01 2.0662897e-01 3.8050402e-01 2.8992390e-01 2.0524163e-01 2.3373909e-01 3.0156183e-01 2.0732304e-01 2.6311208e-01 3.0478863e-01 2.5545595e-01 2.3172924e-01 2.0047959e-01 2.5968741e-01 2.1460680e-01 2.2900957e-01 2.2107714e-01 1.3590375e-01 2.2746759e-01 4.8087096e-01 4.2142796e-01 3.9814594e-01 4.1502871e-01 4.4228995e-01 4.4300705e-01 4.3159396e-01 4.2281768e-01 4.5341178e-01 3.7067256e-01 3.0283474e-01 3.9670954e-01 3.6890449e-01 4.5441105e-01 4.5432159e-01 3.5749751e-01 3.7222314e-01 3.7273757e-01 5.2092654e-01 4.2307513e-01 3.7613897e-01 4.0250144e-01 4.5970761e-01 3.4267709e-01 3.6611458e-01 3.6389952e-01 3.2189682e-01 3.1593975e-01 4.4091119e-01 3.5132753e-01 4.0985044e-01 3.1723537e-01 4.4934566e-01 3.3938050e-01 4.4068685e-01 3.9407776e-01 3.9311145e-01 3.6818097e-01 3.0785214e-01 3.3679573e-01 3.9853670e-01 3.1138703e-01 4.2142796e-01 4.1148317e-01 3.9324794e-01 3.4985868e-01 3.9052142e-01 3.4304315e-01 3.6227810e-01 3.6300235e-01 3.5297445e-03 2.3993465e-03 8.1469449e-03 8.4116551e-03 8.4748907e-03 3.0443320e-03 1.8587915e-03 2.8140158e-03 3.7033592e-03 2.9317197e-03 1.3265454e-02 4.5015236e-03 2.6265400e-03 7.5415562e-03 1.6353241e-03 1.4046892e-03 3.1172532e-03 6.0159720e-04 2.6265400e-03 7.0517547e-03 5.5270478e-03 6.4518235e-03 2.8691382e-03 6.1014438e-03 6.3013427e-03 2.8691382e-03 1.1615614e-03 1.4498289e-03 4.4069463e-03 2.8268404e-02 1.4610903e-03 3.3082950e-03 4.5136771e-04 5.8170191e-03 1.2876475e-03 5.5964987e-04 1.3152056e-03 2.3446691e-03 1.9624436e-01 1.9365486e-01 2.3272539e-01 2.7570910e-01 2.4962218e-01 2.6341222e-01 2.1161968e-01 1.7505868e-01 2.2555325e-01 2.2171510e-01 2.5853430e-01 1.9783603e-01 2.5146360e-01 2.6075203e-01 1.4121466e-01 1.8483453e-01 2.5221719e-01 2.0803875e-01 3.2980663e-01 2.1983733e-01 2.6580206e-01 1.8792174e-01 3.3096405e-01 2.6375229e-01 2.0022607e-01 1.9799659e-01 2.5530781e-01 2.7170116e-01 2.4472945e-01 1.5721426e-01 2.2453551e-01 2.0791989e-01 1.9232403e-01 3.5721533e-01 2.6541719e-01 1.8583170e-01 2.1815822e-01 2.8740780e-01 1.8853156e-01 2.4642239e-01 2.8243457e-01 2.3594931e-01 2.1655773e-01 1.8685267e-01 2.4074340e-01 1.9492750e-01 2.1026662e-01 2.0538411e-01 1.2769778e-01 2.1026662e-01 4.5350142e-01 3.9795187e-01 3.7769065e-01 3.8983341e-01 4.1851849e-01 4.2042793e-01 4.0510768e-01 3.9953508e-01 4.3130939e-01 3.4895812e-01 2.8413412e-01 3.7648942e-01 3.4987508e-01 4.3267862e-01 4.3359924e-01 3.3758650e-01 3.4918773e-01 3.4772733e-01 4.9915788e-01 4.0231358e-01 3.5632097e-01 3.7931174e-01 4.3732697e-01 3.2511301e-01 3.4317696e-01 3.4120231e-01 3.0420772e-01 2.9548175e-01 4.1851849e-01 3.3003548e-01 3.8954108e-01 2.9516106e-01 4.2751620e-01 3.1771485e-01 4.1340131e-01 3.7704473e-01 3.6865281e-01 3.4390717e-01 2.8759676e-01 3.1915503e-01 3.7909158e-01 2.9821954e-01 3.9795187e-01 3.8894782e-01 3.7251565e-01 3.3442155e-01 3.7333196e-01 3.2403985e-01 3.3841983e-01 3.3852014e-01 4.9713816e-03 9.2903476e-03 1.5944722e-02 1.1386125e-02 3.5402300e-03 9.6201776e-04 8.6911918e-03 9.4953207e-05 6.4447745e-03 2.1940505e-02 1.4660203e-04 6.6774582e-03 1.0681613e-03 1.0987220e-03 1.5397922e-03 1.3925202e-03 1.6744918e-03 4.5493239e-04 1.7274513e-03 1.6063124e-02 1.4167352e-02 1.3643967e-04 2.8974888e-03 2.8893579e-03 1.3643967e-04 1.8844794e-03 1.0195101e-03 5.9835494e-03 1.1907169e-02 6.2144210e-03 3.1461009e-03 6.4603159e-03 6.0804116e-04 7.9398115e-03 2.6608779e-03 3.6611489e-03 1.1878605e-03 1.6827914e-01 1.6808878e-01 2.0330224e-01 2.4208927e-01 2.1725982e-01 2.3901101e-01 1.8791090e-01 1.5022413e-01 1.9646923e-01 1.9636982e-01 2.2550082e-01 1.7178784e-01 2.1730098e-01 2.3423675e-01 1.1690649e-01 1.5652486e-01 2.2988556e-01 1.8351752e-01 2.8999988e-01 1.9148148e-01 2.4151451e-01 1.5889324e-01 2.9641885e-01 2.3800995e-01 1.7162027e-01 1.6875690e-01 2.2303923e-01 2.3997805e-01 2.1702847e-01 1.3016155e-01 1.9481438e-01 1.7925806e-01 1.6471086e-01 3.2723835e-01 2.4516700e-01 1.6613136e-01 1.8943302e-01 2.5069233e-01 1.6755119e-01 2.1637456e-01 2.5710224e-01 2.1080083e-01 1.8747239e-01 1.5900127e-01 2.1430731e-01 1.7454101e-01 1.8671199e-01 1.7813660e-01 1.0093430e-01 1.8452279e-01 4.2348788e-01 3.6545764e-01 3.4229982e-01 3.6044592e-01 3.8515670e-01 3.8525367e-01 3.7671058e-01 3.6665866e-01 3.9483186e-01 3.1729700e-01 2.5339335e-01 3.4086300e-01 3.1448994e-01 3.9561710e-01 3.9513385e-01 3.0425707e-01 3.1942407e-01 3.2109044e-01 4.5863634e-01 3.6575820e-01 3.2152631e-01 3.4763745e-01 4.0088503e-01 2.8963041e-01 3.1371721e-01 3.1153634e-01 2.7048666e-01 2.6621754e-01 3.8319922e-01 2.9918600e-01 3.5318564e-01 2.6828212e-01 3.9088644e-01 2.8836369e-01 3.8573578e-01 3.3732047e-01 3.3961180e-01 3.1641339e-01 2.5871452e-01 2.8421639e-01 3.4227214e-01 2.5952711e-01 3.6545764e-01 3.5568934e-01 3.3784386e-01 2.9565951e-01 3.3403769e-01 2.9050448e-01 3.1070970e-01 3.1176741e-01 1.7225353e-03 3.1252650e-03 1.9141697e-03 3.0572974e-04 1.5621195e-03 7.7657730e-04 6.0730841e-03 9.6969946e-05 6.0804497e-03 4.8728559e-03 1.0019276e-02 1.0630759e-02 1.4028596e-03 1.0008907e-03 1.5106647e-03 3.9425625e-03 5.9922357e-03 4.5089760e-03 4.5296071e-03 2.4544132e-03 5.3429196e-03 2.3927105e-03 2.5658780e-03 5.3429196e-03 8.0759380e-04 1.5345623e-03 3.2653860e-04 2.8784931e-02 4.6959359e-04 5.2961514e-04 3.5529133e-03 5.0787251e-03 1.7528094e-03 7.9750642e-04 1.8469304e-04 1.3949343e-03 2.2601194e-01 2.2489065e-01 2.6538416e-01 3.0933559e-01 2.8173337e-01 3.0217927e-01 2.4601138e-01 2.0461695e-01 2.5772236e-01 2.5608721e-01 2.9099324e-01 2.2920421e-01 2.8227951e-01 2.9802519e-01 1.6660589e-01 2.1294215e-01 2.9118999e-01 2.4154604e-01 3.6304829e-01 2.5194222e-01 3.0483437e-01 2.1582549e-01 3.6852765e-01 3.0175865e-01 2.2996178e-01 2.2696306e-01 2.8805620e-01 3.0640689e-01 2.7978083e-01 1.8266242e-01 2.5611673e-01 2.3849427e-01 2.2189938e-01 3.9968280e-01 3.0655638e-01 2.1989254e-01 2.4981061e-01 3.1957344e-01 2.2214967e-01 2.7998655e-01 3.2222399e-01 2.7182647e-01 2.4776514e-01 2.1557967e-01 2.7625427e-01 2.2956697e-01 2.4461597e-01 2.3673300e-01 1.4870158e-01 2.4319918e-01 5.0146439e-01 4.4143183e-01 4.1797463e-01 4.3468983e-01 4.6265612e-01 4.6350594e-01 4.5140079e-01 4.4286970e-01 4.7413628e-01 3.8981309e-01 3.2063561e-01 4.1652733e-01 3.8823357e-01 4.7518267e-01 4.7516433e-01 3.7651294e-01 3.9124904e-01 3.9150135e-01 5.4273573e-01 4.4336023e-01 3.9556546e-01 4.2215915e-01 4.8051706e-01 3.6152094e-01 3.8501414e-01 3.8277781e-01 3.4024930e-01 3.3391004e-01 4.6138930e-01 3.7007400e-01 4.2991865e-01 3.3504580e-01 4.7002175e-01 3.5780202e-01 4.6054739e-01 4.1401655e-01 4.1241306e-01 3.8694896e-01 3.2563422e-01 3.5550143e-01 4.1844379e-01 3.2964964e-01 4.4143183e-01 4.3139176e-01 4.1295631e-01 3.6894646e-01 4.1038562e-01 3.6180237e-01 3.8096707e-01 3.8161756e-01 2.9059655e-03 2.3706161e-04 1.5622923e-03 4.6985391e-03 3.0899699e-03 1.1144685e-02 1.5452086e-03 4.3912180e-03 8.2378168e-03 1.9894938e-02 1.6022015e-02 4.6248887e-03 4.1434850e-03 3.4955495e-03 1.0239539e-02 1.2012056e-02 5.3911313e-03 8.1025404e-03 3.3141536e-03 1.0581034e-02 2.5128050e-03 2.6447621e-03 1.0581034e-02 4.0444636e-03 5.0265914e-03 5.7591609e-04 3.1111838e-02 3.5439088e-03 1.6935296e-03 1.0096079e-02 7.4536930e-03 6.1909555e-03 4.6446420e-03 2.9301624e-03 3.9714148e-03 2.5068010e-01 2.5098525e-01 2.9216380e-01 3.3638336e-01 3.0774280e-01 3.3430586e-01 2.7479523e-01 2.2946729e-01 2.8415608e-01 2.8467354e-01 3.1719659e-01 2.5534962e-01 3.0706036e-01 3.2874800e-01 1.8820706e-01 2.3625921e-01 3.2370915e-01 2.6954509e-01 3.8903955e-01 2.7840834e-01 3.3718014e-01 2.3890051e-01 3.9852630e-01 3.3314087e-01 2.5453269e-01 2.5085481e-01 3.1457631e-01 3.3452242e-01 3.0863300e-01 2.0400912e-01 2.8201538e-01 2.6372052e-01 2.4645516e-01 4.3401653e-01 3.4099078e-01 2.4885351e-01 2.7587880e-01 3.4510687e-01 2.5062404e-01 3.0740449e-01 3.5503881e-01 3.0161400e-01 2.7344211e-01 2.3943815e-01 3.0560987e-01 2.5890901e-01 2.7338361e-01 2.6272911e-01 1.6653571e-01 2.7061027e-01 5.3996294e-01 4.7622847e-01 4.4996413e-01 4.7092027e-01 4.9784258e-01 4.9765553e-01 4.8884384e-01 4.7750758e-01 5.0792579e-01 4.2270376e-01 3.5026703e-01 4.4829833e-01 4.1874266e-01 5.0865069e-01 5.0773433e-01 4.0771387e-01 4.2529697e-01 4.2723724e-01 5.7654330e-01 4.7578493e-01 4.2683086e-01 4.5657562e-01 5.1458642e-01 3.9050926e-01 4.1892650e-01 4.1646419e-01 3.6916765e-01 3.6521270e-01 4.9536279e-01 4.0243409e-01 4.6185742e-01 3.6775616e-01 5.0354755e-01 3.9037876e-01 4.9872590e-01 4.4290716e-01 4.4784995e-01 4.2202156e-01 3.5667822e-01 3.8450868e-01 4.4953729e-01 3.5436611e-01 4.7622847e-01 4.6530251e-01 4.4515647e-01 3.9606646e-01 4.3939461e-01 3.9207838e-01 4.1563418e-01 4.1682072e-01 1.5609953e-03 4.8490070e-03 9.0958370e-03 1.4989091e-03 1.7813868e-02 2.1261488e-03 5.5450301e-04 1.5694148e-02 1.9384410e-02 2.5209737e-02 8.6954304e-03 7.6213095e-03 8.7105486e-03 1.2654300e-02 1.7334342e-02 1.3883194e-02 2.2522789e-03 1.7443565e-04 1.6459895e-02 9.1718824e-03 9.4916987e-03 1.6459895e-02 6.9918464e-03 8.9833757e-03 2.9662983e-03 4.9266036e-02 2.9055422e-03 5.5731976e-03 8.0889759e-03 1.5739049e-02 3.7597322e-03 6.3200695e-03 4.4644236e-03 8.6395939e-03 2.7692079e-01 2.7499135e-01 3.1941212e-01 3.6719225e-01 3.3760508e-01 3.5656667e-01 2.9687968e-01 2.5298236e-01 3.1115442e-01 3.0819394e-01 3.4760947e-01 2.7976620e-01 3.3861131e-01 3.5300749e-01 2.1161836e-01 2.6294922e-01 3.4413759e-01 2.9242527e-01 4.2523864e-01 3.0477646e-01 3.5931443e-01 2.6623770e-01 4.2973462e-01 3.5665121e-01 2.8133678e-01 2.7828246e-01 3.4429599e-01 3.6357656e-01 3.3414792e-01 2.2982660e-01 3.0962503e-01 2.9049749e-01 2.7240490e-01 4.6073277e-01 3.5937207e-01 2.6744708e-01 3.0261187e-01 3.7874332e-01 2.7038484e-01 3.3511133e-01 3.7800754e-01 3.2481692e-01 3.0053112e-01 2.6567213e-01 3.2997477e-01 2.7805229e-01 2.9533686e-01 2.8819780e-01 1.9246360e-01 2.9461561e-01 5.6604002e-01 5.0500114e-01 4.8160322e-01 4.9684759e-01 5.2728223e-01 5.2878056e-01 5.1374073e-01 5.0662675e-01 5.4019822e-01 4.5105935e-01 3.7828674e-01 4.8016813e-01 4.5059090e-01 5.4146236e-01 5.4186046e-01 4.3772456e-01 4.5187440e-01 4.5090747e-01 6.1216296e-01 5.0833718e-01 4.5807252e-01 4.8471571e-01 5.4678369e-01 4.2264992e-01 4.4526564e-01 4.4301133e-01 3.9982165e-01 3.9174986e-01 5.2663733e-01 4.3017652e-01 4.9431552e-01 3.9206379e-01 5.3598953e-01 4.1681568e-01 5.2288475e-01 4.7864082e-01 4.7360696e-01 4.4651927e-01 3.8292357e-01 4.1618542e-01 4.8251093e-01 3.8978460e-01 5.0500114e-01 4.9485605e-01 4.7615846e-01 4.3123359e-01 4.7475023e-01 4.2239197e-01 4.4037545e-01 4.4066833e-01 2.2696323e-03 5.9674345e-03 2.4448133e-03 1.3335241e-02 1.4550127e-03 2.5899751e-03 1.0462849e-02 2.0483763e-02 1.9016265e-02 5.8048658e-03 5.1341121e-03 4.8749246e-03 1.1285550e-02 1.3907377e-02 7.6337585e-03 6.3180508e-03 2.0267734e-03 1.2551534e-02 4.1078635e-03 4.2919330e-03 1.2551534e-02 4.8854397e-03 6.2043549e-03 8.9134425e-04 3.6466805e-02 3.2782414e-03 2.5696799e-03 9.8307089e-03 9.8120264e-03 5.5450409e-03 5.1728098e-03 3.2838118e-03 5.2555481e-03 2.6123905e-01 2.6103645e-01 3.0329747e-01 3.4864780e-01 3.1958423e-01 3.4458162e-01 2.8459400e-01 2.3921225e-01 2.9516933e-01 2.9488851e-01 3.2923215e-01 2.6553322e-01 3.1926739e-01 3.3946038e-01 1.9751618e-01 2.4678743e-01 3.3347141e-01 2.7948950e-01 4.0283567e-01 2.8923185e-01 3.4744486e-01 2.4959732e-01 4.1128993e-01 3.4370645e-01 2.6525258e-01 2.6168140e-01 3.2643822e-01 3.4638099e-01 3.1949478e-01 2.1402441e-01 2.9314941e-01 2.7451414e-01 2.5691069e-01 4.4594923e-01 3.5036767e-01 2.5760360e-01 2.8676453e-01 3.5805235e-01 2.5967094e-01 3.1875975e-01 3.6562012e-01 3.1188296e-01 2.8438818e-01 2.4989387e-01 3.1618152e-01 2.6787548e-01 2.8314006e-01 2.7321717e-01 1.7615044e-01 2.8082637e-01 5.5224352e-01 4.8885819e-01 4.6311762e-01 4.8285549e-01 5.1073035e-01 5.1093091e-01 5.0061869e-01 4.9022404e-01 5.2151073e-01 4.3495694e-01 3.6199911e-01 4.6149506e-01 4.3176986e-01 5.2236122e-01 5.2173621e-01 4.2026138e-01 4.3714978e-01 4.3841019e-01 5.9117531e-01 4.8927834e-01 4.3976762e-01 4.6895974e-01 5.2818477e-01 4.0343191e-01 4.3068816e-01 4.2826096e-01 3.8162190e-01 3.7670056e-01 5.0866155e-01 4.1442940e-01 4.7525840e-01 3.7873655e-01 5.1715063e-01 4.0199874e-01 5.1036877e-01 4.5693051e-01 4.5962906e-01 4.3336423e-01 3.6804311e-01 3.9729031e-01 4.6298867e-01 3.6775880e-01 4.8885819e-01 4.7805886e-01 4.5813974e-01 4.0968699e-01 4.5331588e-01 4.0460089e-01 4.2699969e-01 4.2797954e-01 8.7894099e-04 2.0541035e-03 4.6305984e-03 6.7282118e-04 8.1166178e-03 3.1872717e-03 1.0880321e-02 8.3605717e-03 8.2577841e-04 6.1811314e-04 5.6936180e-04 3.8806955e-03 4.9821714e-03 2.5043588e-03 7.1844876e-03 4.2468498e-03 4.1576220e-03 9.9353221e-04 1.1055786e-03 4.1576220e-03 5.9733473e-04 9.9247040e-04 3.2081104e-04 2.3598005e-02 1.4307976e-03 3.0934240e-05 4.9322953e-03 3.1299098e-03 3.2877683e-03 9.9769768e-04 4.3968696e-04 6.2770216e-04 2.1786575e-01 2.1746374e-01 2.5686278e-01 2.9961632e-01 2.7235218e-01 2.9521657e-01 2.3917491e-01 1.9737534e-01 2.4929109e-01 2.4877587e-01 2.8142376e-01 2.2163964e-01 2.7235613e-01 2.9042411e-01 1.5945203e-01 2.0466964e-01 2.8483074e-01 2.3445929e-01 3.5153649e-01 2.4372083e-01 2.9790924e-01 2.0734139e-01 3.5860995e-01 2.9439415e-01 2.2162942e-01 2.1843466e-01 2.7871280e-01 2.9725443e-01 2.7179832e-01 1.7471289e-01 2.4749127e-01 2.3015922e-01 2.1385216e-01 3.9117409e-01 3.0083879e-01 2.1421404e-01 2.4147478e-01 3.0893498e-01 2.1609222e-01 2.7129944e-01 3.1500957e-01 2.6459749e-01 2.3931769e-01 2.0744985e-01 2.6864595e-01 2.2369874e-01 2.3782351e-01 2.2882042e-01 1.4076875e-01 2.3574843e-01 4.9305396e-01 4.3221625e-01 4.0786305e-01 4.2639713e-01 4.5320351e-01 4.5351054e-01 4.4342223e-01 4.3354113e-01 4.6376094e-01 3.8078797e-01 3.1179824e-01 4.0634323e-01 3.7808795e-01 4.6463298e-01 4.6419251e-01 3.6697079e-01 3.8279432e-01 3.8398235e-01 5.3121485e-01 4.3292773e-01 3.8560639e-01 4.1316779e-01 4.7015919e-01 3.5131205e-01 3.7664303e-01 3.7434225e-01 3.3054253e-01 3.2553357e-01 4.5134808e-01 3.6126826e-01 4.1953095e-01 3.2738535e-01 4.5959627e-01 3.4943014e-01 4.5279702e-01 4.0259529e-01 4.0419917e-01 3.7916847e-01 3.1736132e-01 3.4544845e-01 4.0790313e-01 3.1842804e-01 4.3221625e-01 4.2193584e-01 4.0305492e-01 3.5775833e-01 3.9908964e-01 3.5217975e-01 3.7311479e-01 3.7405234e-01 4.0128787e-03 1.5082918e-03 2.4327589e-03 1.3749582e-02 9.8644164e-04 7.0275865e-03 4.0509182e-03 9.2137987e-06 8.1954627e-05 2.0221322e-04 1.4919720e-03 1.6918938e-03 1.6810350e-03 9.8366461e-03 7.8161988e-03 1.2160579e-03 1.4072237e-03 1.4774681e-03 1.2160579e-03 1.9402564e-04 2.6249834e-05 2.2527118e-03 1.8043780e-02 2.4914194e-03 7.5228825e-04 4.0311549e-03 1.3209671e-03 4.0483306e-03 6.3837247e-04 9.1866746e-04 4.5668021e-05 1.9279238e-01 1.9221899e-01 2.2979083e-01 2.7088361e-01 2.4479658e-01 2.6610561e-01 2.1267629e-01 1.7326091e-01 2.2258210e-01 2.2184784e-01 2.5349552e-01 1.9619155e-01 2.4503806e-01 2.6157291e-01 1.3780150e-01 1.8042756e-01 2.5616217e-01 2.0823320e-01 3.2136818e-01 2.1724300e-01 2.6868196e-01 1.8301959e-01 3.2745065e-01 2.6534429e-01 1.9640162e-01 1.9346456e-01 2.5082861e-01 2.6843057e-01 2.4386055e-01 1.5229091e-01 2.2093793e-01 2.0444992e-01 1.8898039e-01 3.5852616e-01 2.7153123e-01 1.8897970e-01 2.1514921e-01 2.8018592e-01 1.9075855e-01 2.4355737e-01 2.8508289e-01 2.3688721e-01 2.1314274e-01 1.8298617e-01 2.4079289e-01 1.9795777e-01 2.1138990e-01 2.0306053e-01 1.2090533e-01 2.0951271e-01 4.5737829e-01 3.9834085e-01 3.7497739e-01 3.9259152e-01 4.1873643e-01 4.1914498e-01 4.0909121e-01 3.9964410e-01 4.2918834e-01 3.4857995e-01 2.8224226e-01 3.7353062e-01 3.4626594e-01 4.3008323e-01 4.2978752e-01 3.3538250e-01 3.5042715e-01 3.5150644e-01 4.9516533e-01 3.9931312e-01 3.5345241e-01 3.7985840e-01 4.3539662e-01 3.2054198e-01 3.4448056e-01 3.4226906e-01 3.0044482e-01 2.9530625e-01 4.1705793e-01 3.2972732e-01 3.8633770e-01 2.9699130e-01 4.2515986e-01 3.1826259e-01 4.1819279e-01 3.7038673e-01 3.7108888e-01 3.4686702e-01 2.8745249e-01 3.1485734e-01 3.7515178e-01 2.8956004e-01 3.9834085e-01 3.8842721e-01 3.7027395e-01 3.2715736e-01 3.6694987e-01 3.2117964e-01 3.4102816e-01 3.4191835e-01 9.8460504e-03 4.2097646e-04 3.8720695e-03 8.9609139e-03 1.0271114e-02 1.5759765e-02 3.6853519e-03 2.9640455e-03 4.3261623e-03 5.5313131e-03 9.1074119e-03 8.9974672e-03 1.5687499e-03 7.6879483e-04 8.7064401e-03 5.8625582e-03 6.1329529e-03 8.7064401e-03 2.4949105e-03 3.7729184e-03 1.5387455e-03 3.8489345e-02 2.4427633e-04 2.5870999e-03 2.8098846e-03 9.5472550e-03 6.5811256e-04 1.8465019e-03 1.0925425e-03 3.9557876e-03 2.4136396e-01 2.3914246e-01 2.8140056e-01 3.2726072e-01 2.9909180e-01 3.1587284e-01 2.5940651e-01 2.1853199e-01 2.7358477e-01 2.7023320e-01 3.0865618e-01 2.4367721e-01 3.0046030e-01 3.1269475e-01 1.8026794e-01 2.2845050e-01 3.0393397e-01 2.5531598e-01 3.8377418e-01 2.6747174e-01 3.1847415e-01 2.3166294e-01 3.8684625e-01 3.1607055e-01 2.4560721e-01 2.4289838e-01 3.0537010e-01 3.2346477e-01 2.9500292e-01 1.9758720e-01 2.7227129e-01 2.5416529e-01 2.3708995e-01 4.1588028e-01 3.1833018e-01 2.3144395e-01 2.6550973e-01 3.3890880e-01 2.3429142e-01 2.9628566e-01 3.3633600e-01 2.8587728e-01 2.6362949e-01 2.3087874e-01 2.9089607e-01 2.4143368e-01 2.5794041e-01 2.5175590e-01 1.6347149e-01 2.5753099e-01 5.1758017e-01 4.5875138e-01 4.3664497e-01 4.5058511e-01 4.8035455e-01 4.8203651e-01 4.6682330e-01 4.6036369e-01 4.9325486e-01 4.0679312e-01 3.3723664e-01 4.3530687e-01 4.0692383e-01 4.9456817e-01 4.9519341e-01 3.9426880e-01 4.0735509e-01 4.0616607e-01 5.6363656e-01 4.6254311e-01 4.1397979e-01 4.3911880e-01 4.9961302e-01 3.8027310e-01 4.0098514e-01 3.9884545e-01 3.5820683e-01 3.4982569e-01 4.7998848e-01 3.8669536e-01 4.4902142e-01 3.4986296e-01 4.8921417e-01 3.7374272e-01 4.7562308e-01 4.3467328e-01 4.2816870e-01 4.0201263e-01 3.4137888e-01 3.7400811e-01 4.3776854e-01 3.4988231e-01 4.5875138e-01 4.4907175e-01 4.3130086e-01 3.8919296e-01 4.3084858e-01 3.7966236e-01 3.9613519e-01 3.9634370e-01 7.6697767e-03 2.4232895e-02 4.2497383e-04 5.8645021e-03 7.1697323e-04 1.6459430e-03 2.1472725e-03 2.1701877e-03 1.5644328e-03 2.1493500e-04 2.5540450e-03 1.7185148e-02 1.5785034e-02 5.4418849e-05 4.0402433e-03 4.0294443e-03 5.4418849e-05 2.5045043e-03 1.5070390e-03 7.3845654e-03 1.1685406e-02 7.1165327e-03 4.2192668e-03 6.5870297e-03 1.0764642e-03 8.6213220e-03 3.2147266e-03 4.4993834e-03 1.8554035e-03 1.6163874e-01 1.6124582e-01 1.9600517e-01 2.3441471e-01 2.0995191e-01 2.3066019e-01 1.8049424e-01 1.4375647e-01 1.8928742e-01 1.8888935e-01 2.1809263e-01 1.6489764e-01 2.1018407e-01 2.2610729e-01 1.1131536e-01 1.5021138e-01 2.2157821e-01 1.7624974e-01 2.8213828e-01 1.8434338e-01 2.3311339e-01 1.5259174e-01 2.8788764e-01 2.2976085e-01 1.6495975e-01 1.6222817e-01 2.1560367e-01 2.3216078e-01 2.0930930e-01 1.2444580e-01 1.8772709e-01 1.7242257e-01 1.5812464e-01 3.1788081e-01 2.3649618e-01 1.5894784e-01 1.8237277e-01 2.4318751e-01 1.6040650e-01 2.0886424e-01 2.4850015e-01 2.0301750e-01 1.8048879e-01 1.5257957e-01 2.0654604e-01 1.6721007e-01 1.7931046e-01 1.7120054e-01 9.6139593e-02 1.7732284e-01 4.1297254e-01 3.5577891e-01 3.3316436e-01 3.5061301e-01 3.7530173e-01 3.7554177e-01 3.6661833e-01 3.5699377e-01 3.8511507e-01 3.0820403e-01 2.4524565e-01 3.3176869e-01 3.0575678e-01 3.8594227e-01 3.8559393e-01 2.9549451e-01 3.1016061e-01 3.1160241e-01 4.4857854e-01 3.5641190e-01 3.1263400e-01 3.3812708e-01 3.9109335e-01 2.8129908e-01 3.0451362e-01 3.0237858e-01 2.6230725e-01 2.5773246e-01 3.7352449e-01 2.9029824e-01 3.4398832e-01 2.5959514e-01 3.8123049e-01 2.7952979e-01 3.7549545e-01 3.2868655e-01 3.3002382e-01 3.0704202e-01 2.5032682e-01 2.7592132e-01 3.3326893e-01 2.5208920e-01 3.5577891e-01 3.4619466e-01 3.2870702e-01 2.8757710e-01 3.2540565e-01 2.8197541e-01 3.0143248e-01 3.0241595e-01 4.6779054e-03 6.3405358e-03 1.1072067e-02 1.2740784e-02 2.2273681e-03 1.7042107e-03 2.3469652e-03 4.9562680e-03 7.4940361e-03 5.7695726e-03 3.5981535e-03 1.5844867e-03 6.8214146e-03 3.1957366e-03 3.3967393e-03 6.8214146e-03 1.4289215e-03 2.3814252e-03 3.6906895e-04 3.2052685e-02 4.0942979e-04 9.9039775e-04 3.7975123e-03 6.5400285e-03 1.5831617e-03 1.2837357e-03 4.6903663e-04 2.2256509e-03 2.3440889e-01 2.3310632e-01 2.7431935e-01 3.1898347e-01 2.9103053e-01 3.1107217e-01 2.5431417e-01 2.1253549e-01 2.6655333e-01 2.6462531e-01 3.0042601e-01 2.3750489e-01 2.9168949e-01 3.0705757e-01 1.7397041e-01 2.2119992e-01 2.9980584e-01 2.4986709e-01 3.7353610e-01 2.6066067e-01 3.1374027e-01 2.2416418e-01 3.7874518e-01 3.1076095e-01 2.3844790e-01 2.3545346e-01 2.9741154e-01 3.1590317e-01 2.8873621e-01 1.9043311e-01 2.6497335e-01 2.4708373e-01 2.3022342e-01 4.0978464e-01 3.1511039e-01 2.2757419e-01 2.5853132e-01 3.2950090e-01 2.2996799e-01 2.8911377e-01 3.3136481e-01 2.8050509e-01 2.5648758e-01 2.2384164e-01 2.8507826e-01 2.3741411e-01 2.5289158e-01 2.4520543e-01 1.5591712e-01 2.5163277e-01 5.1215829e-01 4.5200156e-01 4.2861009e-01 4.4496538e-01 4.7341919e-01 4.7441196e-01 4.6168878e-01 4.5347805e-01 4.8519916e-01 3.9997999e-01 3.3019092e-01 4.2716946e-01 3.9865809e-01 4.8629326e-01 4.8637186e-01 3.8670691e-01 4.0127624e-01 4.0126667e-01 5.5444310e-01 4.5424309e-01 4.0600299e-01 4.3254422e-01 4.9161364e-01 3.7174460e-01 3.9497344e-01 3.9273809e-01 3.5018083e-01 3.4346685e-01 4.7229362e-01 3.8003558e-01 4.4070091e-01 3.4442025e-01 4.8107176e-01 3.6755422e-01 4.7082182e-01 4.2490414e-01 4.2252116e-01 3.9675769e-01 3.3509282e-01 3.6564070e-01 4.2918040e-01 3.3977682e-01 4.5200156e-01 4.4195847e-01 4.2350677e-01 3.7942836e-01 4.2122143e-01 3.7189771e-01 3.9075300e-01 3.9132598e-01 2.1401129e-02 2.6296569e-02 3.2566498e-02 1.3297832e-02 1.1999623e-02 1.2969856e-02 1.8466773e-02 2.3873417e-02 1.8518842e-02 4.0273028e-03 1.2616800e-03 2.2732626e-02 1.2827409e-02 1.3181090e-02 2.2732626e-02 1.1261737e-02 1.3701796e-02 5.3984869e-03 5.7610599e-02 5.9941455e-03 8.9691119e-03 1.2694193e-02 2.1144882e-02 7.0627421e-03 1.0543898e-02 8.0308076e-03 1.3063205e-02 2.9989796e-01 2.9808728e-01 3.4381381e-01 3.9275825e-01 3.6234621e-01 3.8238034e-01 3.2086021e-01 2.7526551e-01 3.3529881e-01 3.3248525e-01 3.7261196e-01 3.0300865e-01 3.6317903e-01 3.7865155e-01 2.3211430e-01 2.8532877e-01 3.6963026e-01 3.1621648e-01 4.5177033e-01 3.2875625e-01 3.8520789e-01 2.8867443e-01 4.5697798e-01 3.8242988e-01 3.0442487e-01 3.0118555e-01 3.6926913e-01 3.8920975e-01 3.5915749e-01 2.5088735e-01 3.3365900e-01 3.1393007e-01 2.9523721e-01 4.8898239e-01 3.8528778e-01 2.9050533e-01 3.2648373e-01 4.0430761e-01 2.9352285e-01 3.5998625e-01 4.0438969e-01 3.4965804e-01 3.2429593e-01 2.8821126e-01 3.5492121e-01 3.0147553e-01 3.1927061e-01 3.1166517e-01 2.1164881e-01 3.1841810e-01 5.9626248e-01 5.3406968e-01 5.1001241e-01 5.2586196e-01 5.5673824e-01 5.5816878e-01 5.4309284e-01 5.3570978e-01 5.6971774e-01 4.7901704e-01 4.0442977e-01 5.0852772e-01 4.7828326e-01 5.7096471e-01 5.7126109e-01 4.6526268e-01 4.7993533e-01 4.7902644e-01 6.4260357e-01 5.3722757e-01 4.8599135e-01 5.1340958e-01 5.7642669e-01 4.4962350e-01 4.7318809e-01 4.7087433e-01 4.2633712e-01 4.1835467e-01 5.5597597e-01 4.5768215e-01 5.2292685e-01 4.1877489e-01 5.6541976e-01 4.4406435e-01 5.5240885e-01 5.0657081e-01 5.0216108e-01 4.7452659e-01 4.0930806e-01 4.4303784e-01 5.1082488e-01 4.1541892e-01 5.3406968e-01 5.2368299e-01 5.0449934e-01 4.5807263e-01 5.0263701e-01 4.4952858e-01 4.6823972e-01 4.6855914e-01 8.7198254e-03 1.2902931e-03 1.1684021e-03 1.6341127e-03 1.0742763e-03 2.6208124e-03 1.1173357e-03 8.9620141e-04 1.6960301e-02 1.4197837e-02 5.6586090e-04 2.0157829e-03 1.9836256e-03 5.6586090e-04 2.0413623e-03 1.1637665e-03 5.4511725e-03 1.0718252e-02 6.5990100e-03 2.7263257e-03 7.7966832e-03 1.5754002e-04 8.7922744e-03 3.0619296e-03 3.8030551e-03 1.0550593e-03 1.7203244e-01 1.7235723e-01 2.0751307e-01 2.4611820e-01 2.2109400e-01 2.4491699e-01 1.9298922e-01 1.5421652e-01 2.0061094e-01 2.0129686e-01 2.2934517e-01 1.7603988e-01 2.2070889e-01 2.3964854e-01 1.2009621e-01 1.5990877e-01 2.3601974e-01 1.8834267e-01 2.9339046e-01 1.9567575e-01 2.4748464e-01 1.6217086e-01 3.0116422e-01 2.4364046e-01 1.7530938e-01 1.7223213e-01 2.2701156e-01 2.4439785e-01 2.2188150e-01 1.3311613e-01 1.9877894e-01 1.8311301e-01 1.6845668e-01 3.3339209e-01 2.5192873e-01 1.7150268e-01 1.9350184e-01 2.5414415e-01 1.7271733e-01 2.2071334e-01 2.6313203e-01 2.1605186e-01 1.9141813e-01 1.6255272e-01 2.1936783e-01 1.7996851e-01 1.9179556e-01 1.8227672e-01 1.0300751e-01 1.8912086e-01 4.3075013e-01 3.7158844e-01 3.4752045e-01 3.6713693e-01 3.9134187e-01 3.9105984e-01 3.8378198e-01 3.7272538e-01 4.0046876e-01 3.2297138e-01 2.5826699e-01 3.4601238e-01 3.1932057e-01 4.0113556e-01 4.0033459e-01 3.0938917e-01 3.2551476e-01 3.2781102e-01 4.6405632e-01 3.7102765e-01 3.2659021e-01 3.5371449e-01 4.0659862e-01 2.9405620e-01 3.1978995e-01 3.1753736e-01 2.7499746e-01 2.7161605e-01 3.8895029e-01 3.0476818e-01 3.5833770e-01 2.7419176e-01 3.9644448e-01 2.9409120e-01 3.9304592e-01 3.4147602e-01 3.4611220e-01 3.2290811e-01 2.6406812e-01 2.8867894e-01 3.4717424e-01 2.6265032e-01 3.7158844e-01 3.6154105e-01 3.4316147e-01 2.9940560e-01 3.3824889e-01 2.9538166e-01 3.1708459e-01 3.1834036e-01 8.7316989e-03 6.7615313e-03 6.7580543e-03 9.6081666e-03 2.0657605e-03 3.8371349e-03 1.4271025e-02 1.2184393e-02 1.6084765e-02 4.9831593e-03 1.4699244e-02 1.4912454e-02 4.9831593e-03 6.5028816e-03 6.2489819e-03 1.3726162e-02 3.1411524e-02 7.7445772e-03 1.1141212e-02 2.5000477e-03 1.1143820e-02 6.1604304e-03 5.3711085e-03 7.5849080e-03 8.1835455e-03 1.6688130e-01 1.6291433e-01 2.0003066e-01 2.4149909e-01 2.1722570e-01 2.2445012e-01 1.7758122e-01 1.4623090e-01 1.9343386e-01 1.8753794e-01 2.2568158e-01 1.6690274e-01 2.2030573e-01 2.2323949e-01 1.1699096e-01 1.5725818e-01 2.1323159e-01 1.7489777e-01 2.9532205e-01 1.8786273e-01 2.2655558e-01 1.6049736e-01 2.9226070e-01 2.2550223e-01 1.7083413e-01 1.6938578e-01 2.2221647e-01 2.3649790e-01 2.0956346e-01 1.3266766e-01 1.9299947e-01 1.7760033e-01 1.6320695e-01 3.1350837e-01 2.2421748e-01 1.5253844e-01 1.8664517e-01 2.5447595e-01 1.5560691e-01 2.1266187e-01 2.4223633e-01 2.0012528e-01 1.8549850e-01 1.5864276e-01 2.0519641e-01 1.6093217e-01 1.7628990e-01 1.7435363e-01 1.0801101e-01 1.7763987e-01 4.0351234e-01 3.5280598e-01 3.3582666e-01 3.4348161e-01 3.7251839e-01 3.7541634e-01 3.5722931e-01 3.5450932e-01 3.8642215e-01 3.0688243e-01 2.4703841e-01 3.3487550e-01 3.1018257e-01 3.8808655e-01 3.8990045e-01 2.9749231e-01 3.0596368e-01 3.0291072e-01 4.5287211e-01 3.5944458e-01 3.1570031e-01 3.3498082e-01 3.9202879e-01 2.8759651e-01 3.0023966e-01 2.9853735e-01 2.6729925e-01 2.5639363e-01 3.7372548e-01 2.8896987e-01 3.4745520e-01 2.5469579e-01 3.8297689e-01 2.7675896e-01 3.6463006e-01 3.3839246e-01 3.2358656e-01 2.9982548e-01 2.4898596e-01 2.8176933e-01 3.3810925e-01 2.6587897e-01 3.5280598e-01 3.4488951e-01 3.3055981e-01 2.9862862e-01 3.3464036e-01 2.8522813e-01 2.9487313e-01 2.9445589e-01 4.3312037e-03 5.1674385e-03 4.6600432e-03 4.1032398e-03 1.3249619e-03 3.6837745e-03 2.4877830e-02 2.2958753e-02 1.1314391e-03 6.3769043e-03 6.2810489e-03 1.1314391e-03 5.7702159e-03 4.1635898e-03 1.1902706e-02 7.2040189e-03 1.2293784e-02 7.6706139e-03 1.1339253e-02 1.8296747e-03 1.4280945e-02 6.9429868e-03 8.6587607e-03 4.4388406e-03 1.4462991e-01 1.4494206e-01 1.7756526e-01 2.1378771e-01 1.9031042e-01 2.1286860e-01 1.6419734e-01 1.2822571e-01 1.7113301e-01 1.7185341e-01 1.9804950e-01 1.4833973e-01 1.9005998e-01 2.0775634e-01 9.7105565e-02 1.3348080e-01 2.0467358e-01 1.5981923e-01 2.5874568e-01 1.6653873e-01 2.1530133e-01 1.3558698e-01 2.6575883e-01 2.1156399e-01 1.4766732e-01 1.4484877e-01 1.9583483e-01 2.1211626e-01 1.9101641e-01 1.0897783e-01 1.6943757e-01 1.5488355e-01 1.4132668e-01 2.9650216e-01 2.1994601e-01 1.4455458e-01 1.6451956e-01 2.2152336e-01 1.4555202e-01 1.8989624e-01 2.3000345e-01 1.8566766e-01 1.6258902e-01 1.3589561e-01 1.8870744e-01 1.5235300e-01 1.6309487e-01 1.5410176e-01 8.1942750e-02 1.6048904e-01 3.8999083e-01 3.3294245e-01 3.0988325e-01 3.2882841e-01 3.5187047e-01 3.5156617e-01 3.4487977e-01 3.3402051e-01 3.6059894e-01 2.8649814e-01 2.2516617e-01 3.0844683e-01 2.8301726e-01 3.6124416e-01 3.6050400e-01 2.7355262e-01 2.8899505e-01 2.9139815e-01 4.2192237e-01 3.3236960e-01 2.8992253e-01 3.1585310e-01 3.6648886e-01 2.5905330e-01 2.8355149e-01 2.8139447e-01 2.4097820e-01 2.3780572e-01 3.4954046e-01 2.6919588e-01 3.2022959e-01 2.4036369e-01 3.5673540e-01 2.5910558e-01 3.5384915e-01 3.0430469e-01 3.0871278e-01 2.8664912e-01 2.3068275e-01 2.5394489e-01 3.0958210e-01 2.2970603e-01 3.3294245e-01 3.2329736e-01 3.0571743e-01 2.6432145e-01 3.0120271e-01 2.6026006e-01 2.8107880e-01 2.8234890e-01 3.9333334e-05 2.5761851e-04 1.3896575e-03 1.7536063e-03 1.9216331e-03 9.2702772e-03 7.4022776e-03 1.3090644e-03 1.5444025e-03 1.6252591e-03 1.3090644e-03 1.2151407e-04 1.0618022e-05 2.1493524e-03 1.8833153e-02 2.2172388e-03 7.3253783e-04 3.6800655e-03 1.5497854e-03 3.6733784e-03 4.9421897e-04 7.6664670e-04 7.0183041e-05 1.9407897e-01 1.9337938e-01 2.3114302e-01 2.7244640e-01 2.4629979e-01 2.6718506e-01 2.1373312e-01 1.7438911e-01 2.2391848e-01 2.2298785e-01 2.5503185e-01 1.9737603e-01 2.4663659e-01 2.6276403e-01 1.3891547e-01 1.8173656e-01 2.5713590e-01 2.0933386e-01 3.2324703e-01 2.1854256e-01 2.6975489e-01 1.8436571e-01 3.2904939e-01 2.6649404e-01 1.9772084e-01 1.9481826e-01 2.5232520e-01 2.6989379e-01 2.4511828e-01 1.5354404e-01 2.2230840e-01 2.0576951e-01 1.9024966e-01 3.5987888e-01 2.7239312e-01 1.8983400e-01 2.1646886e-01 2.8191043e-01 1.9167912e-01 2.4493869e-01 2.8620678e-01 2.3801287e-01 2.1448199e-01 1.8427005e-01 2.4198503e-01 1.9884520e-01 2.1243903e-01 2.0430599e-01 1.2215728e-01 2.1067715e-01 4.5871846e-01 3.9981904e-01 3.7661173e-01 3.9390784e-01 4.2025710e-01 4.2076082e-01 4.1035332e-01 4.0114215e-01 4.3087129e-01 3.5002279e-01 2.8365040e-01 3.7517763e-01 3.4790525e-01 4.3179681e-01 4.3157518e-01 3.3691557e-01 3.5177012e-01 3.5268567e-01 4.9705280e-01 4.0100475e-01 3.5505921e-01 3.8129652e-01 4.3707681e-01 3.2219168e-01 3.4580868e-01 3.4360838e-01 3.0200588e-01 2.9663386e-01 4.1868171e-01 3.3113109e-01 3.8802094e-01 2.9819354e-01 4.2685142e-01 3.1959000e-01 4.1941543e-01 3.7225097e-01 3.7239048e-01 3.4809183e-01 2.8876177e-01 3.1647952e-01 3.7686145e-01 2.9138730e-01 3.9981904e-01 3.8994709e-01 3.7187123e-01 3.2897937e-01 3.6879196e-01 3.2272643e-01 3.4226532e-01 3.4310531e-01 3.4181612e-04 1.4644608e-03 2.1632573e-03 2.3231555e-03 8.1286280e-03 6.3818616e-03 1.7262411e-03 1.6624580e-03 1.7631926e-03 1.7262411e-03 2.4569053e-05 5.6859581e-05 1.7551896e-03 2.0541034e-02 1.6702311e-03 5.9025530e-04 3.2341087e-03 2.0408948e-03 3.0131078e-03 2.8888789e-04 4.5924146e-04 1.3293684e-04 1.9862431e-01 1.9776069e-01 2.3600084e-01 2.7779005e-01 2.5142895e-01 2.7187023e-01 2.1810101e-01 1.7860088e-01 2.2871390e-01 2.2752286e-01 2.6024955e-01 2.0181434e-01 2.5187795e-01 2.6758248e-01 1.4283878e-01 1.8622396e-01 2.6161990e-01 2.1373552e-01 3.2920505e-01 2.2325952e-01 2.7444329e-01 1.8891553e-01 3.3470963e-01 2.7127428e-01 2.0233024e-01 1.9944956e-01 2.5748056e-01 2.7510316e-01 2.4993365e-01 1.5775267e-01 2.2713624e-01 2.1043184e-01 1.9474801e-01 3.6534538e-01 2.7678503e-01 1.9377126e-01 2.2119834e-01 2.8749014e-01 1.9572304e-01 2.4991164e-01 2.9104517e-01 2.4261230e-01 2.1922346e-01 1.8874518e-01 2.4669621e-01 2.0288553e-01 2.1678869e-01 2.0886650e-01 1.2608625e-01 2.1517216e-01 4.6449792e-01 4.0560127e-01 3.8251194e-01 3.9944814e-01 4.2616553e-01 4.2679596e-01 4.1587264e-01 4.0695550e-01 4.3702628e-01 3.5557170e-01 2.8885981e-01 3.8108822e-01 3.5369655e-01 4.3799369e-01 4.3786355e-01 3.4252750e-01 3.5719184e-01 3.5788232e-01 5.0366583e-01 4.0706759e-01 3.6083966e-01 3.8695887e-01 4.4324858e-01 3.2788328e-01 3.5118655e-01 3.4899273e-01 3.0749578e-01 3.0179987e-01 4.2472010e-01 3.3655063e-01 3.9402558e-01 3.0319627e-01 4.3300399e-01 3.2485825e-01 4.2490754e-01 3.7841034e-01 3.7783349e-01 3.5333537e-01 2.9386653e-01 3.2211174e-01 3.8285611e-01 2.9712888e-01 4.0560127e-01 3.9574961e-01 3.7770634e-01 3.3490218e-01 3.7491174e-01 3.2829405e-01 3.4750328e-01 3.4827580e-01 2.7893549e-03 2.7277911e-03 9.4108270e-04 1.0799149e-02 7.7770747e-03 1.9770194e-03 5.4325768e-04 5.8945201e-04 1.9770194e-03 5.1514113e-04 3.6478892e-04 1.6876295e-03 1.7172006e-02 2.9808015e-03 3.7816236e-04 5.6735840e-03 1.0535544e-03 5.0675384e-03 1.2248969e-03 1.1742680e-03 5.8878380e-05 1.9840502e-01 1.9840646e-01 2.3601178e-01 2.7699198e-01 2.5062921e-01 2.7429513e-01 2.1981939e-01 1.7908511e-01 2.2870797e-01 2.2884763e-01 2.5935890e-01 2.0237179e-01 2.5038976e-01 2.6920634e-01 1.4261267e-01 1.8559031e-01 2.6457877e-01 2.1508394e-01 3.2684946e-01 2.2341437e-01 2.7694800e-01 1.8806980e-01 3.3447269e-01 2.7322826e-01 2.0195195e-01 1.9876628e-01 2.5683031e-01 2.7497319e-01 2.5083242e-01 1.5688509e-01 2.2686098e-01 2.1020792e-01 1.9457412e-01 3.6717222e-01 2.8066992e-01 1.9637709e-01 2.2117475e-01 2.8566080e-01 1.9793610e-01 2.4995417e-01 2.9345115e-01 2.4428403e-01 2.1902512e-01 1.8834472e-01 2.4798811e-01 2.0544380e-01 2.1853532e-01 2.0913443e-01 1.2439434e-01 2.1611468e-01 4.6739623e-01 4.0702486e-01 3.8261075e-01 4.0188705e-01 4.2751037e-01 4.2749738e-01 4.1883120e-01 4.0825884e-01 4.3736523e-01 3.5667688e-01 2.8932174e-01 3.8108192e-01 3.5341594e-01 4.3812879e-01 4.3747647e-01 3.4285168e-01 3.5898948e-01 3.6076578e-01 5.0317788e-01 4.0703744e-01 3.6087589e-01 3.8845434e-01 4.4366743e-01 3.2719263e-01 3.5301257e-01 3.5071770e-01 3.0715395e-01 3.0299726e-01 4.2534595e-01 3.3768843e-01 3.9391146e-01 3.0525217e-01 4.3324318e-01 3.2636388e-01 4.2820876e-01 3.7681443e-01 3.8013938e-01 3.5586945e-01 2.9507753e-01 3.2153910e-01 3.8242816e-01 2.9467977e-01 4.0702486e-01 3.9678588e-01 3.7800850e-01 3.3305200e-01 3.7343525e-01 3.2833826e-01 3.4988792e-01 3.5099795e-01 7.6889988e-04 5.5936308e-03 9.7548717e-03 1.0412603e-02 1.0389613e-03 5.7781385e-03 5.9052415e-03 1.0389613e-03 1.4340775e-03 1.1584691e-03 6.0259991e-03 2.1726707e-02 3.4598658e-03 3.9021886e-03 1.8160607e-03 3.9104990e-03 3.6199548e-03 1.2013216e-03 2.4248068e-03 2.0560641e-03 1.7726458e-01 1.7519789e-01 2.1239850e-01 2.5350621e-01 2.2831460e-01 2.4335474e-01 1.9301815e-01 1.5733953e-01 2.0548992e-01 2.0248973e-01 2.3687424e-01 1.7915768e-01 2.2981656e-01 2.4028800e-01 1.2483249e-01 1.6617415e-01 2.3289941e-01 1.8936545e-01 3.0533170e-01 2.0007138e-01 2.4571617e-01 1.6904051e-01 3.0733551e-01 2.4338605e-01 1.8100871e-01 1.7874491e-01 2.3386295e-01 2.4993365e-01 2.2441376e-01 1.3976123e-01 2.0438723e-01 1.8845276e-01 1.7353191e-01 3.3400984e-01 2.4631085e-01 1.6886156e-01 1.9837340e-01 2.6441281e-01 1.7118963e-01 2.2560913e-01 2.6174131e-01 2.1639026e-01 1.9675554e-01 1.6819987e-01 2.2078897e-01 1.7753606e-01 1.9173589e-01 1.8624992e-01 1.1156657e-01 1.9127963e-01 4.2879129e-01 3.7350470e-01 3.5299849e-01 3.6620603e-01 3.9355975e-01 3.9506070e-01 3.8147101e-01 3.7498057e-01 4.0550993e-01 3.2554347e-01 2.6225589e-01 3.5177359e-01 3.2570798e-01 4.0674255e-01 4.0737898e-01 3.1405475e-01 3.2617446e-01 3.2545044e-01 4.7160294e-01 3.7696723e-01 3.3214002e-01 3.5535112e-01 4.1144267e-01 3.0143840e-01 3.2034131e-01 3.1835849e-01 2.8130938e-01 2.7364725e-01 3.9315179e-01 3.0715437e-01 3.6445309e-01 2.7384746e-01 4.0174546e-01 2.9539622e-01 3.8981435e-01 3.5158797e-01 3.4545598e-01 3.2149982e-01 2.6601526e-01 2.9570609e-01 3.5410378e-01 2.7466513e-01 3.7350470e-01 3.6448827e-01 3.4805708e-01 3.0999906e-01 3.4801807e-01 3.0074518e-01 3.1606363e-01 3.1638243e-01 3.9211584e-03 1.5428451e-02 1.5051422e-02 9.3641634e-05 5.2251987e-03 5.2564865e-03 9.3641634e-05 2.4059863e-03 1.5411207e-03 7.7716003e-03 1.4517538e-02 6.4009679e-03 4.6963365e-03 4.9429047e-03 2.1131701e-03 7.3135995e-03 2.7662314e-03 4.2415669e-03 2.1979302e-03 1.6154877e-01 1.6047147e-01 1.9568300e-01 2.3468417e-01 2.1025394e-01 2.2838581e-01 1.7884422e-01 1.4313754e-01 1.8898793e-01 1.8753794e-01 2.1845622e-01 1.6418759e-01 2.1103286e-01 2.2447017e-01 1.1131548e-01 1.5046796e-01 2.1889519e-01 1.7489777e-01 2.8359045e-01 1.8392350e-01 2.3077309e-01 1.5301294e-01 2.8765002e-01 2.2785871e-01 1.6499082e-01 1.6250540e-01 2.1577964e-01 2.3190311e-01 2.0826660e-01 1.2493129e-01 1.8764327e-01 1.7232008e-01 1.5800409e-01 3.1597925e-01 2.3306671e-01 1.5663136e-01 1.8209705e-01 2.4426589e-01 1.5839676e-01 2.0849973e-01 2.4621874e-01 2.0137281e-01 1.8035428e-01 1.5264734e-01 2.0519641e-01 1.6491824e-01 1.7763987e-01 1.7071018e-01 9.7334595e-02 1.7628990e-01 4.1013753e-01 3.5415853e-01 3.3261378e-01 3.4819629e-01 3.7372548e-01 3.7447405e-01 3.6377706e-01 3.5546726e-01 3.8432564e-01 3.0688243e-01 2.4450319e-01 3.3130276e-01 3.0553473e-01 3.8531353e-01 3.8537934e-01 2.9480414e-01 3.0829006e-01 3.0887441e-01 4.4839112e-01 3.5594109e-01 3.1215369e-01 3.3646690e-01 3.9023537e-01 2.8142851e-01 3.0262656e-01 3.0057353e-01 2.6218143e-01 2.5639363e-01 3.7251839e-01 2.8896987e-01 3.4359589e-01 2.5757650e-01 3.8052352e-01 2.7792266e-01 3.7237623e-01 3.2948525e-01 3.2773210e-01 3.0459398e-01 2.4898596e-01 2.7596310e-01 3.3313575e-01 2.5365041e-01 3.5415853e-01 3.4488951e-01 3.2799981e-01 2.8862094e-01 3.2611271e-01 2.8152144e-01 2.9910812e-01 2.9982464e-01 1.7874931e-02 1.3166667e-02 2.8067647e-03 5.4483543e-04 4.8491153e-04 2.8067647e-03 2.7932574e-03 2.1120566e-03 4.0283644e-03 1.1006031e-02 7.1968081e-03 1.9824114e-03 1.0903705e-02 3.4373671e-04 1.0364284e-02 4.2441401e-03 4.1832512e-03 1.3456492e-03 1.8893734e-01 1.9027350e-01 2.2602463e-01 2.6493124e-01 2.3909907e-01 2.6731994e-01 2.1287273e-01 1.7119179e-01 2.1885648e-01 2.2106766e-01 2.4751153e-01 1.9400411e-01 2.3783986e-01 2.6104566e-01 1.3465024e-01 1.7581619e-01 2.5867158e-01 2.0765719e-01 3.1157756e-01 2.1392913e-01 2.7004976e-01 1.7791371e-01 3.2221942e-01 2.6551484e-01 1.9215572e-01 1.8858682e-01 2.4539617e-01 2.6396374e-01 2.4190361e-01 1.4753547e-01 2.1663955e-01 2.0044864e-01 1.8527044e-01 3.5759997e-01 2.7597540e-01 1.9143761e-01 2.1146655e-01 2.7190276e-01 1.9231433e-01 2.3971651e-01 2.8607417e-01 2.3671699e-01 2.0909909e-01 1.7885289e-01 2.3973819e-01 2.0020124e-01 2.1166375e-01 2.0015528e-01 1.1482314e-01 2.0802247e-01 4.5814031e-01 3.9622068e-01 3.7010544e-01 3.9277065e-01 4.1629822e-01 4.1528170e-01 4.1031284e-01 3.9724817e-01 4.2445794e-01 3.4613203e-01 2.7894605e-01 3.6843870e-01 3.4078468e-01 4.2489846e-01 4.2345869e-01 3.3131814e-01 3.4950009e-01 3.5299308e-01 4.8820349e-01 3.9397890e-01 3.4860920e-01 3.7803314e-01 4.3080058e-01 3.1437592e-01 3.4366436e-01 3.4124618e-01 2.9521701e-01 2.9351505e-01 4.1304236e-01 3.2750551e-01 3.8091343e-01 2.9709224e-01 4.2023724e-01 3.1695286e-01 4.2011817e-01 3.6184591e-01 3.7113199e-01 3.4760954e-01 2.8576301e-01 3.0899799e-01 3.6912287e-01 2.7985004e-01 3.9622068e-01 3.8552168e-01 3.6588486e-01 3.1841134e-01 3.5869504e-01 3.1661834e-01 3.4148439e-01 3.4312143e-01 1.2983249e-03 1.5471006e-02 1.3470267e-02 1.3877317e-02 1.5471006e-02 7.2739782e-03 9.2612860e-03 6.0792360e-03 5.4160667e-02 2.4574168e-03 8.1477262e-03 3.8451807e-03 1.8194605e-02 1.5067164e-03 5.6792549e-03 4.8846913e-03 1.0004616e-02 2.6108642e-01 2.5718656e-01 3.0162187e-01 3.4990044e-01 3.2118558e-01 3.3212064e-01 2.7586622e-01 2.3634761e-01 2.9366681e-01 2.8770633e-01 3.3110877e-01 2.6200119e-01 3.2383134e-01 3.3046521e-01 1.9829106e-01 2.4860521e-01 3.1887574e-01 2.7240198e-01 4.1018982e-01 2.8711577e-01 3.3461178e-01 2.5228731e-01 4.0944673e-01 3.3325697e-01 2.6572836e-01 2.6351234e-01 3.2731613e-01 3.4479515e-01 3.1382719e-01 2.1730674e-01 2.9282566e-01 2.7420761e-01 2.5662111e-01 4.3493335e-01 3.3165524e-01 2.4546083e-01 2.8544328e-01 3.6363165e-01 2.4916639e-01 3.1676067e-01 3.5299851e-01 3.0301330e-01 2.8384547e-01 2.5066938e-01 3.0887732e-01 2.5579752e-01 2.7431070e-01 2.7082223e-01 1.8355500e-01 2.7545202e-01 5.3566205e-01 4.7913827e-01 4.5930782e-01 4.6887050e-01 5.0113940e-01 5.0408511e-01 4.8425611e-01 4.8100390e-01 5.1611676e-01 4.2712908e-01 3.5770237e-01 4.5815125e-01 4.2990067e-01 5.1783166e-01 5.1944925e-01 4.1592191e-01 4.2634771e-01 4.2297928e-01 5.8870710e-01 4.8576650e-01 4.3645958e-01 4.5912638e-01 5.2238783e-01 4.0361731e-01 4.1983156e-01 4.1785974e-01 3.8054730e-01 3.6909926e-01 5.0215992e-01 4.0667536e-01 4.7224138e-01 3.6745640e-01 5.1222367e-01 3.9280819e-01 4.9246039e-01 4.6045456e-01 4.4643751e-01 4.1946868e-01 3.6048019e-01 3.9703515e-01 4.6143668e-01 3.7590038e-01 4.7913827e-01 4.7009550e-01 4.5350904e-01 4.1479012e-01 4.5636227e-01 4.0162438e-01 4.1380491e-01 4.1334093e-01 1.4411876e-02 8.7935053e-03 9.1184252e-03 1.4411876e-02 5.7453578e-03 7.6054776e-03 2.7409931e-03 4.7624677e-02 1.8780439e-03 4.9746662e-03 5.9684857e-03 1.4534556e-02 2.3601867e-03 4.9286760e-03 3.4466344e-03 7.5341006e-03 2.6746877e-01 2.6516664e-01 3.0921139e-01 3.5673992e-01 3.2754140e-01 3.4481450e-01 2.8624779e-01 2.4359422e-01 3.0108022e-01 2.9756219e-01 3.3745616e-01 2.6990583e-01 3.2887946e-01 3.4163113e-01 2.0331657e-01 2.5392638e-01 3.3233847e-01 2.8201820e-01 4.1489235e-01 2.9472025e-01 3.4749512e-01 2.5726587e-01 4.1828245e-01 3.4508966e-01 2.7189140e-01 2.6903465e-01 3.3406888e-01 3.5284896e-01 3.2333666e-01 2.2149953e-01 2.9970099e-01 2.8083524e-01 2.6300357e-01 4.4804341e-01 3.4702929e-01 2.5691578e-01 2.9267142e-01 3.6866600e-01 2.5996213e-01 3.2467728e-01 3.6599412e-01 3.1379996e-01 2.9070461e-01 2.5648991e-01 3.1904849e-01 2.6736905e-01 2.8471636e-01 2.7833907e-01 1.8530093e-01 2.8435281e-01 5.5199539e-01 4.9206433e-01 4.6946939e-01 4.8356355e-01 5.1418554e-01 5.1595196e-01 5.0010048e-01 4.9372697e-01 5.2743722e-01 4.3877522e-01 3.6710197e-01 4.6809646e-01 4.3893984e-01 5.2878375e-01 5.2942069e-01 4.2592703e-01 4.3928640e-01 4.3789059e-01 5.9924430e-01 4.9601715e-01 4.4619754e-01 4.7193405e-01 5.3393309e-01 4.1149136e-01 4.3273356e-01 4.3054493e-01 3.8875406e-01 3.8007672e-01 5.1386048e-01 4.1809676e-01 4.8216133e-01 3.8002405e-01 5.2330849e-01 4.0472333e-01 5.0903363e-01 4.6735136e-01 4.6059448e-01 4.3368525e-01 3.7134977e-01 4.0504167e-01 4.7061436e-01 3.7989795e-01 4.9206433e-01 4.8217825e-01 4.6398354e-01 4.2055878e-01 4.6343857e-01 4.1088828e-01 4.2766517e-01 4.2782051e-01 4.0251529e-03 4.0402030e-03 0.0000000e+00 2.0104773e-03 1.1579294e-03 6.7733512e-03 1.3328703e-02 6.1195429e-03 3.8280621e-03 5.4471697e-03 1.3203495e-03 7.3930866e-03 2.5511055e-03 3.8011014e-03 1.5935161e-03 1.6488937e-01 1.6420208e-01 1.9946134e-01 2.3841307e-01 2.1377841e-01 2.3352484e-01 1.8323739e-01 1.4660873e-01 1.9269692e-01 1.9183992e-01 2.2200730e-01 1.6791595e-01 2.1423078e-01 2.2922867e-01 1.1407468e-01 1.5349471e-01 2.2418179e-01 1.7908948e-01 2.8692621e-01 1.8765985e-01 2.3596576e-01 1.5596498e-01 2.9203641e-01 2.3278981e-01 1.6829150e-01 1.6563525e-01 2.1942293e-01 2.3592536e-01 2.1256421e-01 1.2755291e-01 1.9121342e-01 1.7576718e-01 1.6132929e-01 3.2148865e-01 2.3885357e-01 1.6118125e-01 1.8573297e-01 2.4757051e-01 1.6279862e-01 2.1240660e-01 2.5149161e-01 2.0595435e-01 1.8389163e-01 1.5580881e-01 2.0964365e-01 1.6953436e-01 1.8203376e-01 1.7437090e-01 9.9184065e-02 1.8031354e-01 4.1664204e-01 3.5971944e-01 3.3744612e-01 3.5416868e-01 3.7936058e-01 3.7982315e-01 3.7006184e-01 3.6098196e-01 3.8956200e-01 3.1201251e-01 2.4889912e-01 3.3607844e-01 3.1002022e-01 3.9046127e-01 3.9028467e-01 2.9949936e-01 3.1373736e-01 3.1479561e-01 4.5355800e-01 3.6085089e-01 3.1682959e-01 3.4195608e-01 3.9553949e-01 2.8555816e-01 3.0804941e-01 3.0593834e-01 2.6633772e-01 2.6121338e-01 3.7782244e-01 2.9399549e-01 3.4839508e-01 2.6278429e-01 3.8569374e-01 2.8303600e-01 3.7885421e-01 3.3349568e-01 3.3352424e-01 3.1033775e-01 2.5375578e-01 2.8011018e-01 3.3772574e-01 2.5671985e-01 3.5971944e-01 3.5022311e-01 3.3289784e-01 2.9224130e-01 3.3016010e-01 2.8599652e-01 3.0475125e-01 3.0561759e-01 3.0700267e-06 4.0251529e-03 1.9467123e-03 1.7971688e-03 1.7301608e-03 1.6293141e-02 4.7718752e-03 6.8454231e-04 9.3235659e-03 1.4106675e-03 7.7429721e-03 3.1097831e-03 2.5391665e-03 9.5764453e-04 2.0739268e-01 2.0836358e-01 2.4589773e-01 2.8656370e-01 2.5981614e-01 2.8739169e-01 2.3131893e-01 1.8849968e-01 2.3845740e-01 2.4007299e-01 2.6857108e-01 2.1230255e-01 2.5877066e-01 2.8137449e-01 1.5046312e-01 1.9386671e-01 2.7809337e-01 2.2610426e-01 3.3523485e-01 2.3325794e-01 2.9016640e-01 1.9614834e-01 3.4542234e-01 2.8580893e-01 2.1082239e-01 2.0722978e-01 2.6628244e-01 2.8528108e-01 2.6193836e-01 1.6431358e-01 2.3627495e-01 2.1939954e-01 2.0354344e-01 3.8077948e-01 2.9536596e-01 2.0839572e-01 2.3078002e-01 2.9414568e-01 2.0957408e-01 2.6009226e-01 3.0679254e-01 2.5613081e-01 2.2839644e-01 1.9694047e-01 2.5948108e-01 2.1757899e-01 2.3004331e-01 2.1886202e-01 1.3010315e-01 2.2671016e-01 4.8309156e-01 4.2059741e-01 3.9446827e-01 4.1650857e-01 4.4118694e-01 4.4046177e-01 4.3416675e-01 4.2171124e-01 4.5001131e-01 3.6938174e-01 3.0049261e-01 3.9280494e-01 3.6453208e-01 4.5055186e-01 4.4930066e-01 3.5453263e-01 3.7247610e-01 3.7543144e-01 5.1540859e-01 4.1899010e-01 3.7243361e-01 4.0192295e-01 4.5645786e-01 3.3753686e-01 3.6646355e-01 3.6403348e-01 3.1765048e-01 3.1516192e-01 4.3820660e-01 3.5021371e-01 4.0564147e-01 3.1837605e-01 4.4574076e-01 3.3915719e-01 4.4399141e-01 3.8666018e-01 3.9439941e-01 3.7011128e-01 3.0715348e-01 3.3195062e-01 3.9368522e-01 3.0254596e-01 4.2059741e-01 4.0983289e-01 3.9004780e-01 3.4211299e-01 3.8338579e-01 3.3953411e-01 3.6390344e-01 3.6538415e-01 4.0402030e-03 2.0647746e-03 1.8810615e-03 1.8744038e-03 1.5904825e-02 5.0089073e-03 7.7718458e-04 9.5962272e-03 1.3536136e-03 8.0354537e-03 3.2683077e-03 2.7053207e-03 1.0204136e-03 2.0663566e-01 2.0767698e-01 2.4509318e-01 2.8563097e-01 2.5892354e-01 2.8672996e-01 2.3068420e-01 1.8783705e-01 2.3766371e-01 2.3938906e-01 2.6765771e-01 2.1160129e-01 2.5782529e-01 2.8065126e-01 1.4982077e-01 1.9310124e-01 2.7749364e-01 2.2544624e-01 3.3411471e-01 2.3248629e-01 2.8950769e-01 1.9536146e-01 3.4445558e-01 2.8510792e-01 2.1004612e-01 2.0643532e-01 2.6539182e-01 2.8440424e-01 2.6118254e-01 1.6358877e-01 2.3546280e-01 2.1862058e-01 2.0279724e-01 3.7994304e-01 2.9482551e-01 2.0788172e-01 2.2999781e-01 2.9312113e-01 2.0902225e-01 2.5926765e-01 3.0610117e-01 2.5545076e-01 2.2760393e-01 1.9618808e-01 2.5876275e-01 2.1704368e-01 2.2941326e-01 2.1812470e-01 1.2939132e-01 2.2601611e-01 4.8224276e-01 4.1968249e-01 3.9347110e-01 4.1568549e-01 4.4024422e-01 4.3946585e-01 4.3337086e-01 4.2078495e-01 4.4897602e-01 3.6849705e-01 2.9964288e-01 3.9180103e-01 3.6353845e-01 4.4949934e-01 4.4820701e-01 3.5360058e-01 3.7164688e-01 3.7469388e-01 5.1424729e-01 4.1795596e-01 3.7145645e-01 4.0103422e-01 4.5542290e-01 3.3654347e-01 3.6564394e-01 3.6320808e-01 3.1671115e-01 3.1435391e-01 4.3720666e-01 3.4935506e-01 4.0461470e-01 3.1763784e-01 4.4470146e-01 3.3834387e-01 4.4321597e-01 3.8553656e-01 3.9358898e-01 3.6934910e-01 3.0635763e-01 3.3097385e-01 3.9264615e-01 3.0146236e-01 4.1968249e-01 4.0889669e-01 3.8907226e-01 3.4102273e-01 3.8227517e-01 3.3859771e-01 3.6313559e-01 3.6464431e-01 2.0104773e-03 1.1579294e-03 6.7733512e-03 1.3328703e-02 6.1195429e-03 3.8280621e-03 5.4471697e-03 1.3203495e-03 7.3930866e-03 2.5511055e-03 3.8011014e-03 1.5935161e-03 1.6488937e-01 1.6420208e-01 1.9946134e-01 2.3841307e-01 2.1377841e-01 2.3352484e-01 1.8323739e-01 1.4660873e-01 1.9269692e-01 1.9183992e-01 2.2200730e-01 1.6791595e-01 2.1423078e-01 2.2922867e-01 1.1407468e-01 1.5349471e-01 2.2418179e-01 1.7908948e-01 2.8692621e-01 1.8765985e-01 2.3596576e-01 1.5596498e-01 2.9203641e-01 2.3278981e-01 1.6829150e-01 1.6563525e-01 2.1942293e-01 2.3592536e-01 2.1256421e-01 1.2755291e-01 1.9121342e-01 1.7576718e-01 1.6132929e-01 3.2148865e-01 2.3885357e-01 1.6118125e-01 1.8573297e-01 2.4757051e-01 1.6279862e-01 2.1240660e-01 2.5149161e-01 2.0595435e-01 1.8389163e-01 1.5580881e-01 2.0964365e-01 1.6953436e-01 1.8203376e-01 1.7437090e-01 9.9184065e-02 1.8031354e-01 4.1664204e-01 3.5971944e-01 3.3744612e-01 3.5416868e-01 3.7936058e-01 3.7982315e-01 3.7006184e-01 3.6098196e-01 3.8956200e-01 3.1201251e-01 2.4889912e-01 3.3607844e-01 3.1002022e-01 3.9046127e-01 3.9028467e-01 2.9949936e-01 3.1373736e-01 3.1479561e-01 4.5355800e-01 3.6085089e-01 3.1682959e-01 3.4195608e-01 3.9553949e-01 2.8555816e-01 3.0804941e-01 3.0593834e-01 2.6633772e-01 2.6121338e-01 3.7782244e-01 2.9399549e-01 3.4839508e-01 2.6278429e-01 3.8569374e-01 2.8303600e-01 3.7885421e-01 3.3349568e-01 3.3352424e-01 3.1033775e-01 2.5375578e-01 2.8011018e-01 3.3772574e-01 2.5671985e-01 3.5971944e-01 3.5022311e-01 3.3289784e-01 2.9224130e-01 3.3016010e-01 2.8599652e-01 3.0475125e-01 3.0561759e-01 1.3213080e-04 1.6348389e-03 2.1958653e-02 1.3024074e-03 6.2459052e-04 2.7868209e-03 2.5128724e-03 2.4948096e-03 1.5213665e-04 2.9312203e-04 2.6269769e-04 2.0119891e-01 2.0014287e-01 2.3871519e-01 2.8086210e-01 2.5438649e-01 2.7419983e-01 2.2034638e-01 1.8091150e-01 2.3139686e-01 2.2990542e-01 2.6326545e-01 2.0423778e-01 2.5497864e-01 2.7008054e-01 1.4507978e-01 1.8881738e-01 2.6377709e-01 2.1604374e-01 3.3279319e-01 2.2588101e-01 2.7676493e-01 1.9156801e-01 3.3788258e-01 2.7371187e-01 2.0495838e-01 2.0212622e-01 2.6043406e-01 2.7802264e-01 2.5251791e-01 1.6022437e-01 2.2986910e-01 2.1306976e-01 1.9729190e-01 3.6816423e-01 2.7878040e-01 1.9567818e-01 2.2384809e-01 2.9081290e-01 1.9773475e-01 2.5268382e-01 2.9345893e-01 2.4498738e-01 2.2190073e-01 1.9130390e-01 2.4917712e-01 2.0485354e-01 2.1902087e-01 2.1139045e-01 1.2850635e-01 2.1757950e-01 4.6735126e-01 4.0863686e-01 3.8577163e-01 4.0223139e-01 4.2927752e-01 4.3005363e-01 4.1858115e-01 4.1002225e-01 4.4039161e-01 3.5852229e-01 2.9170762e-01 3.8436621e-01 3.5694453e-01 4.4140604e-01 4.4138801e-01 3.4560659e-01 3.5999045e-01 3.6042881e-01 5.0737364e-01 4.1042921e-01 3.6404336e-01 3.8992213e-01 4.4661300e-01 3.3112795e-01 3.5395838e-01 3.5178032e-01 3.1059068e-01 3.0453530e-01 4.2798873e-01 3.3942843e-01 3.9736659e-01 3.0574030e-01 4.3638021e-01 3.2761075e-01 4.2755962e-01 3.8201356e-01 3.8058140e-01 3.5594943e-01 2.9656762e-01 3.2531047e-01 3.8623001e-01 3.0061288e-01 4.0863686e-01 3.9884323e-01 3.8090672e-01 3.3841042e-01 3.7847943e-01 3.3138355e-01 3.5013260e-01 3.5082807e-01 2.3963109e-03 1.8894374e-02 2.2439950e-03 9.0594652e-04 3.4335324e-03 1.6224592e-03 3.5937956e-03 4.5783206e-04 8.1747242e-04 1.3119947e-04 1.9231717e-01 1.9150735e-01 2.2920314e-01 2.7047035e-01 2.4441394e-01 2.6481790e-01 2.1164496e-01 1.7262115e-01 2.2200818e-01 2.2091128e-01 2.5312771e-01 1.9549847e-01 2.4484600e-01 2.6050695e-01 1.3741417e-01 1.8008449e-01 2.5474237e-01 2.0730811e-01 3.2133505e-01 2.1663183e-01 2.6736983e-01 1.8273137e-01 3.2682425e-01 2.6418461e-01 1.9596515e-01 1.9311641e-01 2.5039748e-01 2.6783585e-01 2.4301257e-01 1.5205635e-01 2.2044020e-01 2.0395985e-01 1.8849810e-01 3.5730736e-01 2.6984619e-01 1.8774203e-01 2.1458977e-01 2.8004857e-01 1.8962388e-01 2.4294411e-01 2.8377929e-01 2.3583758e-01 2.1263399e-01 1.8257496e-01 2.3984158e-01 1.9672007e-01 2.1035264e-01 2.0243724e-01 1.2095575e-01 2.0869633e-01 4.5579154e-01 3.9719792e-01 3.7421582e-01 3.9117796e-01 4.1759891e-01 4.1818270e-01 4.0752420e-01 3.9853286e-01 4.2831769e-01 3.4756630e-01 2.8146997e-01 3.7279915e-01 3.4563396e-01 4.2926782e-01 4.2911586e-01 3.3459449e-01 3.4922354e-01 3.5000846e-01 4.9447633e-01 3.9856686e-01 3.5272682e-01 3.7871114e-01 4.3449948e-01 3.2004729e-01 3.4327468e-01 3.4109263e-01 2.9987407e-01 2.9431934e-01 4.1611792e-01 3.2872232e-01 3.8562509e-01 2.9576964e-01 4.2431958e-01 3.1716861e-01 4.1652649e-01 3.7009695e-01 3.6972936e-01 3.4546743e-01 2.8647014e-01 3.1433567e-01 3.7453491e-01 2.8958559e-01 3.9719792e-01 3.8739786e-01 3.6946063e-01 3.2697767e-01 3.6662992e-01 3.2048201e-01 3.3967430e-01 3.4047263e-01 2.8241524e-02 1.4713791e-03 4.6823304e-04 6.0296352e-03 5.2071190e-03 3.4182869e-03 1.9529381e-03 9.1025104e-04 1.8441969e-03 2.3388660e-01 2.3348281e-01 2.7404368e-01 3.1787596e-01 2.8991570e-01 3.1331298e-01 2.5580945e-01 2.1274052e-01 2.6625865e-01 2.6571335e-01 2.9921857e-01 2.3779036e-01 2.8985951e-01 3.0846478e-01 1.7343111e-01 2.2024476e-01 3.0259960e-01 2.5097696e-01 3.7082192e-01 2.6053197e-01 3.1606460e-01 2.2298918e-01 3.7823337e-01 3.1251210e-01 2.3776110e-01 2.3444923e-01 2.9645330e-01 3.1548927e-01 2.8938607e-01 1.8922982e-01 2.6439849e-01 2.4655891e-01 2.2974757e-01 4.1142478e-01 3.1887022e-01 2.2998734e-01 2.5821680e-01 3.2732410e-01 2.3197317e-01 2.8887378e-01 3.3359010e-01 2.8195681e-01 2.5599107e-01 2.2312925e-01 2.8613603e-01 2.3977955e-01 2.5441596e-01 2.4519140e-01 1.5385241e-01 2.5232104e-01 5.1493631e-01 4.5323214e-01 4.2845834e-01 4.4724986e-01 4.7457294e-01 4.7489714e-01 4.6451744e-01 4.5458397e-01 4.8531016e-01 4.0086775e-01 3.3039510e-01 4.2690815e-01 3.9810528e-01 4.8619268e-01 4.8572861e-01 3.8678000e-01 4.0288412e-01 4.0400345e-01 5.5371159e-01 4.5396321e-01 4.0578110e-01 4.3384503e-01 4.9180921e-01 3.7075981e-01 3.9660943e-01 3.9426853e-01 3.4955388e-01 3.4443823e-01 4.7269976e-01 3.8095285e-01 4.4033063e-01 3.4628429e-01 4.8107810e-01 3.6885113e-01 4.7400892e-01 4.2299055e-01 4.2466221e-01 3.9913020e-01 3.3607659e-01 3.6477722e-01 4.2848136e-01 3.3695835e-01 4.5323214e-01 4.4278413e-01 4.2356519e-01 3.7724050e-01 4.1943108e-01 3.7167660e-01 3.9296882e-01 3.9389283e-01 3.3787081e-02 2.1999955e-02 3.5606869e-02 9.7919779e-03 3.8730614e-02 2.5164827e-02 2.6849118e-02 1.7903003e-02 1.2662420e-01 1.3061214e-01 1.5776511e-01 1.8787898e-01 1.6611889e-01 2.0099100e-01 1.5292701e-01 1.1474225e-01 1.5177722e-01 1.5820095e-01 1.7289957e-01 1.3332512e-01 1.6306194e-01 1.9266388e-01 8.3782583e-02 1.1475472e-01 1.9583103e-01 1.4727273e-01 2.2350713e-01 1.4824286e-01 2.0360977e-01 1.1578494e-01 2.3876064e-01 1.9766401e-01 1.2872248e-01 1.2487918e-01 1.7184761e-01 1.8912261e-01 1.7398714e-01 9.1697206e-02 1.4908080e-01 1.3596066e-01 1.2380963e-01 2.7656824e-01 2.1428571e-01 1.3903777e-01 1.4563130e-01 1.9085533e-01 1.3814153e-01 1.6927103e-01 2.1683317e-01 1.7245190e-01 1.4312164e-01 1.1792045e-01 1.7357532e-01 1.4591400e-01 1.5204372e-01 1.3733781e-01 6.4362895e-02 1.4610010e-01 3.6994724e-01 3.0922665e-01 2.8172128e-01 3.0981046e-01 3.2683078e-01 3.2376302e-01 3.2734273e-01 3.0972403e-01 3.3079457e-01 2.6398840e-01 2.0377846e-01 2.7991998e-01 2.5479336e-01 3.3054260e-01 3.2764011e-01 2.4845492e-01 2.6936921e-01 2.7648193e-01 3.8578015e-01 3.0244531e-01 2.6262990e-01 2.9337350e-01 3.3673416e-01 2.3057418e-01 2.6437275e-01 2.6189855e-01 2.1511723e-01 2.1874548e-01 3.2151629e-01 2.4786804e-01 2.9060022e-01 2.2492263e-01 3.2669967e-01 2.4001378e-01 3.3739817e-01 2.6941492e-01 2.9020759e-01 2.7043921e-01 2.1216140e-01 2.2627991e-01 2.7921868e-01 1.9599823e-01 3.0922665e-01 2.9841488e-01 2.7865157e-01 2.3076134e-01 2.6697481e-01 2.3479056e-01 2.6453179e-01 2.6724220e-01 1.8463349e-03 1.7603525e-03 7.3051568e-03 4.1015204e-04 7.6986614e-04 4.1505661e-04 2.5516535e-03 2.2715123e-01 2.2501703e-01 2.6621562e-01 3.1107232e-01 2.8349906e-01 3.0014485e-01 2.4486000e-01 2.0495206e-01 2.5858102e-01 2.5538755e-01 2.9285644e-01 2.2943000e-01 2.8485147e-01 2.9693404e-01 1.6780939e-01 2.1457958e-01 2.8854018e-01 2.4083271e-01 3.6655716e-01 2.5261802e-01 3.0270375e-01 2.1770871e-01 3.6954821e-01 3.0027680e-01 2.3128364e-01 2.2864524e-01 2.8964053e-01 3.0736347e-01 2.7955757e-01 1.8460266e-01 2.5729263e-01 2.3962699e-01 2.2298939e-01 3.9825299e-01 3.0280555e-01 2.1772566e-01 2.5069761e-01 3.2250864e-01 2.2044120e-01 2.8076678e-01 3.2019205e-01 2.7070478e-01 2.4885895e-01 2.1694127e-01 2.7557358e-01 2.2744661e-01 2.4343281e-01 2.3729093e-01 1.5163937e-01 2.4296011e-01 4.9867564e-01 4.4043288e-01 4.1855224e-01 4.3250726e-01 4.6172807e-01 4.6333132e-01 4.4859207e-01 4.4201028e-01 4.7437581e-01 3.8923920e-01 3.2088439e-01 4.1722959e-01 3.8928568e-01 4.7566007e-01 4.7625383e-01 3.7686783e-01 3.8986190e-01 3.8884081e-01 5.4387473e-01 4.4406349e-01 3.9623886e-01 4.2109532e-01 4.8065646e-01 3.6308022e-01 3.8359994e-01 3.8148432e-01 3.4142085e-01 3.3328357e-01 4.6130540e-01 3.6948155e-01 4.3073385e-01 3.3341169e-01 4.7038237e-01 3.5679054e-01 4.5732763e-01 4.1658437e-01 4.1040883e-01 3.8470265e-01 3.2499997e-01 3.5692838e-01 4.1963835e-01 3.3330111e-01 4.4043288e-01 4.3085698e-01 4.1330060e-01 3.7185723e-01 4.1281711e-01 3.6250007e-01 3.7890193e-01 3.7915604e-01 5.4235993e-03 2.5998420e-03 3.8677378e-03 1.1491859e-03 6.2877149e-04 4.8332372e-04 2.1486898e-01 2.1467854e-01 2.5369922e-01 2.9604720e-01 2.6891688e-01 2.9246555e-01 2.3653315e-01 1.9467620e-01 2.4616446e-01 2.4597983e-01 2.7792166e-01 2.1880759e-01 2.6876410e-01 2.8748855e-01 1.5684504e-01 2.0165721e-01 2.8227299e-01 2.3174975e-01 3.4738814e-01 2.4066356e-01 2.9516630e-01 2.0426320e-01 3.5491429e-01 2.9152419e-01 2.1857252e-01 2.1532575e-01 2.7527993e-01 2.9385335e-01 2.6877316e-01 1.7184619e-01 2.4431046e-01 2.2708854e-01 2.1089056e-01 3.8786051e-01 2.9845589e-01 2.1195408e-01 2.3838722e-01 3.0508869e-01 2.1371586e-01 2.6806783e-01 3.1216051e-01 2.6180490e-01 2.3620065e-01 2.0447438e-01 2.6573552e-01 2.2136814e-01 2.3519672e-01 2.2587537e-01 1.3797486e-01 2.3292930e-01 4.8968343e-01 4.2865553e-01 4.0406082e-01 4.2311568e-01 4.4955467e-01 4.4970134e-01 4.4022012e-01 4.2994584e-01 4.5983149e-01 3.7733154e-01 3.0847182e-01 4.0252124e-01 3.7430526e-01 4.6065171e-01 4.6008825e-01 3.6337620e-01 3.7950484e-01 3.8097080e-01 5.2689771e-01 4.2900834e-01 3.8187058e-01 4.0969052e-01 4.6622956e-01 3.4753954e-01 3.7338441e-01 3.7106677e-01 3.2693971e-01 3.2232718e-01 4.4752724e-01 3.5789640e-01 4.1563695e-01 3.2438944e-01 4.5565562e-01 3.4619907e-01 4.4965557e-01 3.9841763e-01 4.0096111e-01 3.7608333e-01 3.1419485e-01 3.4172782e-01 4.0397551e-01 3.1440281e-01 4.2865553e-01 4.1831326e-01 3.9931889e-01 3.5369559e-01 3.9495166e-01 3.4857729e-01 3.7001412e-01 3.7103653e-01 9.4891023e-03 8.2375707e-04 1.6686708e-03 2.4475618e-03 4.6565778e-03 2.0491959e-01 2.0139969e-01 2.4160221e-01 2.8586265e-01 2.5951451e-01 2.7003471e-01 2.1844761e-01 1.8272084e-01 2.3436629e-01 2.2908460e-01 2.6861177e-01 2.0572725e-01 2.6206958e-01 2.6821933e-01 1.4908933e-01 1.9377976e-01 2.5813756e-01 2.1522061e-01 3.4200281e-01 2.2841742e-01 2.7236034e-01 1.9712279e-01 3.4101997e-01 2.7089218e-01 2.0911514e-01 2.0716704e-01 2.6510721e-01 2.8112922e-01 2.5280689e-01 1.6599180e-01 2.3361443e-01 2.1674754e-01 2.0090153e-01 3.6527528e-01 2.7043122e-01 1.9136502e-01 2.2690330e-01 2.9871211e-01 1.9453190e-01 2.5541554e-01 2.8925890e-01 2.4310626e-01 2.2546391e-01 1.9558871e-01 2.4836004e-01 2.0063248e-01 2.1704777e-01 2.1367234e-01 1.3678274e-01 2.1790721e-01 4.6096128e-01 4.0671584e-01 3.8773373e-01 3.9744288e-01 4.2748862e-01 4.3009711e-01 4.1223718e-01 4.0843810e-01 4.4142325e-01 3.5773584e-01 2.9306179e-01 3.8663461e-01 3.6011993e-01 4.4301487e-01 4.4448732e-01 3.4710733e-01 3.5722182e-01 3.5456119e-01 5.1046759e-01 4.1264841e-01 3.6628224e-01 3.8788026e-01 4.4738884e-01 3.3559118e-01 3.5113614e-01 3.4925623e-01 3.1415212e-01 3.0373195e-01 4.2825709e-01 3.3863565e-01 3.9988417e-01 3.0248234e-01 4.3771445e-01 3.2582243e-01 4.2019828e-01 3.8883040e-01 3.7626984e-01 3.5109855e-01 2.9576205e-01 3.2946337e-01 3.8969349e-01 3.1023698e-01 4.0671584e-01 3.9807129e-01 3.8231042e-01 3.4615104e-01 3.8497123e-01 3.3374807e-01 3.4573433e-01 3.4546409e-01 9.9827550e-03 3.7862368e-03 4.2584946e-03 1.2265791e-03 1.7617949e-01 1.7703821e-01 2.1212343e-01 2.5052805e-01 2.2530971e-01 2.5127302e-01 1.9850334e-01 1.5861692e-01 2.0515179e-01 2.0665158e-01 2.3356888e-01 1.8070269e-01 2.2448284e-01 2.4549391e-01 1.2367511e-01 1.6367665e-01 2.4261509e-01 1.9359579e-01 2.9712394e-01 2.0027260e-01 2.5390638e-01 1.6582785e-01 3.0629971e-01 2.4971132e-01 1.7938888e-01 1.7609087e-01 2.3136866e-01 2.4921343e-01 2.2715314e-01 1.3644726e-01 2.0313672e-01 1.8736201e-01 1.7259709e-01 3.3997757e-01 2.5916969e-01 1.7732718e-01 1.9796934e-01 2.5795325e-01 1.7832877e-01 2.2545225e-01 2.6961076e-01 2.2173788e-01 1.9575867e-01 1.6649347e-01 2.2485533e-01 1.8584864e-01 1.9731550e-01 1.8682160e-01 1.0543428e-01 1.9413835e-01 4.3845459e-01 3.7813833e-01 3.5313427e-01 3.7426892e-01 3.9794201e-01 3.9726718e-01 3.9130315e-01 3.7920848e-01 4.0649770e-01 3.2906316e-01 2.6354873e-01 3.5155280e-01 3.2453882e-01 4.0704153e-01 4.0591144e-01 3.1492301e-01 3.3203733e-01 3.3498583e-01 4.6984073e-01 3.7668481e-01 3.3204804e-01 3.6021418e-01 4.1270518e-01 2.9886261e-01 3.2629567e-01 3.2396944e-01 2.7989788e-01 2.7743776e-01 3.9510115e-01 3.1077003e-01 3.6387709e-01 2.8054253e-01 4.0239292e-01 3.0024699e-01 4.0081171e-01 3.4598760e-01 3.5305310e-01 3.2985030e-01 2.6984561e-01 2.9352511e-01 3.5245796e-01 2.6611615e-01 3.7813833e-01 3.6780473e-01 3.4887718e-01 3.0350575e-01 3.4281917e-01 3.0065558e-01 3.2390437e-01 3.2536506e-01 1.4872402e-03 1.4261828e-03 4.3370414e-03 2.2707675e-01 2.2400493e-01 2.6569285e-01 3.1123078e-01 2.8378554e-01 2.9683242e-01 2.4257696e-01 2.0422363e-01 2.5811380e-01 2.5348079e-01 2.9319960e-01 2.2849166e-01 2.8589147e-01 2.9450563e-01 1.6807288e-01 2.1502765e-01 2.8470332e-01 2.3897250e-01 3.6819788e-01 2.5200370e-01 2.9929335e-01 2.1837402e-01 3.6884033e-01 2.9747447e-01 2.3136263e-01 2.2907381e-01 2.8973378e-01 3.0680413e-01 2.7799960e-01 1.8547528e-01 2.5712752e-01 2.3949352e-01 2.2288675e-01 3.9517754e-01 2.9790586e-01 2.1461980e-01 2.5028856e-01 3.2375470e-01 2.1774627e-01 2.8014020e-01 3.1681085e-01 2.6834640e-01 2.4865097e-01 2.1711982e-01 2.7360556e-01 2.2434130e-01 2.4112622e-01 2.3662943e-01 1.5362542e-01 2.4153192e-01 4.9407199e-01 4.3763583e-01 4.1728329e-01 4.2863716e-01 4.5893922e-01 4.6123811e-01 4.4410036e-01 4.3933832e-01 4.7263926e-01 3.8698450e-01 3.1961653e-01 4.1608078e-01 3.8854631e-01 4.7414193e-01 4.7530563e-01 3.7551546e-01 3.8685112e-01 3.8465728e-01 5.4280771e-01 4.4284025e-01 3.9512469e-01 4.1829234e-01 4.7881043e-01 3.6289720e-01 3.8058317e-01 3.7858579e-01 3.4094528e-01 3.3115887e-01 4.5930198e-01 3.6727055e-01 4.2965213e-01 3.3035276e-01 4.6876907e-01 3.5423104e-01 4.5243416e-01 4.1718566e-01 4.0676646e-01 3.8091352e-01 3.2289623e-01 3.5664111e-01 4.1894270e-01 3.3518670e-01 4.3763583e-01 4.2851737e-01 4.1182914e-01 3.7291664e-01 4.1330116e-01 3.6151280e-01 3.7529785e-01 3.7518553e-01 2.1466294e-04 8.1135795e-04 2.0395325e-01 2.0232995e-01 2.4147692e-01 2.8429935e-01 2.5773107e-01 2.7550146e-01 2.2191392e-01 1.8311464e-01 2.3414066e-01 2.3177254e-01 2.6670079e-01 2.0650082e-01 2.5876572e-01 2.7190067e-01 1.4755667e-01 1.9178198e-01 2.6468375e-01 2.1783813e-01 3.3738596e-01 2.2849816e-01 2.7802206e-01 1.9468150e-01 3.4113386e-01 2.7532568e-01 2.0783209e-01 2.0518365e-01 2.6370466e-01 2.8101900e-01 2.5475268e-01 1.6319612e-01 2.3278117e-01 2.1587899e-01 1.9999930e-01 3.7014440e-01 2.7911349e-01 1.9654509e-01 2.2657275e-01 2.9494234e-01 1.9887441e-01 2.5547815e-01 2.9483790e-01 2.4668408e-01 2.2473055e-01 1.9413008e-01 2.5114079e-01 2.0579387e-01 2.2056413e-01 2.1387343e-01 1.3181835e-01 2.1964768e-01 4.6883654e-01 4.1098091e-01 3.8893009e-01 4.0388763e-01 4.3171852e-01 4.3291890e-01 4.1992695e-01 4.1244869e-01 4.4351517e-01 3.6095375e-01 2.9436456e-01 3.8758959e-01 3.6027619e-01 4.4466450e-01 4.4498549e-01 3.4851465e-01 3.6196876e-01 3.6168475e-01 5.1115719e-01 4.1372900e-01 3.6718805e-01 3.9217531e-01 4.4969696e-01 3.3465314e-01 3.5590188e-01 3.5378490e-01 3.1382959e-01 3.0675099e-01 4.3089908e-01 3.4178657e-01 4.0069101e-01 3.0739171e-01 4.3956059e-01 3.2969272e-01 4.2869495e-01 3.8625531e-01 3.8227459e-01 3.5742723e-01 2.9874912e-01 3.2874265e-01 3.8973129e-01 3.0516637e-01 4.1098091e-01 4.0141982e-01 3.8392089e-01 3.4269880e-01 3.8263609e-01 3.3443392e-01 3.5169446e-01 3.5216636e-01 9.1517643e-04 2.1524074e-01 2.1387759e-01 2.5373107e-01 2.9714785e-01 2.7003778e-01 2.8920001e-01 2.3426287e-01 1.9410928e-01 2.4622443e-01 2.4423617e-01 2.7915984e-01 2.1812334e-01 2.7081916e-01 2.8531140e-01 1.5727134e-01 2.0260081e-01 2.7829111e-01 2.2998414e-01 3.5056990e-01 2.4050939e-01 2.9179089e-01 2.0549251e-01 3.5521699e-01 2.8889714e-01 2.1915718e-01 2.1632646e-01 2.7619378e-01 2.9404932e-01 2.6759110e-01 1.7313032e-01 2.4473695e-01 2.2745766e-01 2.1120369e-01 3.8534612e-01 2.9320974e-01 2.0849982e-01 2.3847660e-01 3.0758274e-01 2.1079215e-01 2.6804702e-01 3.0890781e-01 2.5958017e-01 2.3652855e-01 2.0509955e-01 2.6402277e-01 2.1797131e-01 2.3288914e-01 2.2557846e-01 1.4040598e-01 2.3171289e-01 4.8566100e-01 4.2666426e-01 4.0387929e-01 4.1975185e-01 4.4765854e-01 4.4867725e-01 4.3614287e-01 4.2811681e-01 4.5929331e-01 3.7580128e-01 3.0785579e-01 4.0248311e-01 3.7465557e-01 4.6038945e-01 4.6053443e-01 3.6291292e-01 3.7703492e-01 3.7702914e-01 5.2744561e-01 4.2898757e-01 3.8178676e-01 4.0761255e-01 4.6557959e-01 3.4846205e-01 3.7087965e-01 3.6870032e-01 3.2740185e-01 3.2070546e-01 4.4660679e-01 3.5633353e-01 4.1573756e-01 3.2160319e-01 4.5525495e-01 3.4414484e-01 4.4510906e-01 4.0053809e-01 3.9779009e-01 3.7261777e-01 3.1255985e-01 3.4249381e-01 4.0450836e-01 3.1773210e-01 4.2666426e-01 4.1685273e-01 3.9886436e-01 3.5618753e-01 3.9691207e-01 3.4850257e-01 3.6675459e-01 3.6731925e-01 1.9628524e-01 1.9595152e-01 2.3363495e-01 2.7477230e-01 2.4851118e-01 2.7084921e-01 2.1685944e-01 1.7678722e-01 2.2636857e-01 2.2599655e-01 2.5724256e-01 1.9993208e-01 2.4854721e-01 2.6607935e-01 1.4078995e-01 1.8369551e-01 2.6097312e-01 2.1228442e-01 3.2508389e-01 2.2103528e-01 2.7346255e-01 1.8624671e-01 3.3183707e-01 2.6996197e-01 1.9987779e-01 1.9682726e-01 2.5462868e-01 2.7249816e-01 2.4805238e-01 1.5523356e-01 2.2463473e-01 2.0803519e-01 1.9245485e-01 3.6364465e-01 2.7666661e-01 1.9319723e-01 2.1887484e-01 2.8382186e-01 1.9489313e-01 2.4750809e-01 2.8994226e-01 2.4123726e-01 2.1680288e-01 1.8634448e-01 2.4506974e-01 2.0223799e-01 2.1557050e-01 2.0677515e-01 1.2326853e-01 2.1346465e-01 4.6321322e-01 4.0354170e-01 3.7970298e-01 3.9803381e-01 4.2400584e-01 4.2423988e-01 4.1474064e-01 4.0481837e-01 4.3422436e-01 3.5345426e-01 2.8656464e-01 3.7821934e-01 3.5073976e-01 4.3506541e-01 4.3461895e-01 3.3997143e-01 3.5549675e-01 3.5686166e-01 5.0021759e-01 4.0411514e-01 3.5805390e-01 3.8499293e-01 4.4048074e-01 3.2475847e-01 3.4952686e-01 3.4727706e-01 3.0464816e-01 2.9991017e-01 4.2212289e-01 3.3451084e-01 3.9105629e-01 3.0183284e-01 4.3015090e-01 3.2308204e-01 4.2396798e-01 3.7459262e-01 3.7639485e-01 3.5210962e-01 2.9201270e-01 3.1907654e-01 3.7972706e-01 2.9306033e-01 4.0354170e-01 3.9347753e-01 3.7503434e-01 3.3106203e-01 3.7117506e-01 3.2561211e-01 3.4620183e-01 3.4718282e-01 5.4794269e-04 1.8595150e-03 7.6486933e-03 3.6709934e-03 1.1297771e-02 3.2211024e-03 9.3536855e-04 1.2319946e-03 2.8277178e-03 4.9162548e-03 4.5756064e-04 4.5051431e-03 8.2041162e-03 5.0174936e-03 3.6228961e-04 1.1821527e-02 2.0977093e-03 2.1132418e-02 9.2067605e-04 1.2006919e-02 4.4257629e-04 2.0287770e-02 9.6871594e-03 3.6190304e-05 1.5767395e-04 4.3807979e-03 7.0604016e-03 4.2468409e-03 2.9914879e-03 1.0695136e-03 1.8996353e-04 2.2349983e-05 3.2007300e-02 1.8626112e-02 5.9280137e-03 7.1455899e-04 1.0704644e-02 4.3625889e-03 3.3835317e-03 1.4689427e-02 5.0222514e-03 5.6899707e-04 1.6818673e-04 4.5805421e-03 5.6585758e-03 3.2371570e-03 3.2354233e-04 1.1207565e-02 1.3610741e-03 7.1737216e-02 4.4570224e-02 3.4818230e-02 4.5037397e-02 5.2043753e-02 5.1252208e-02 5.2740741e-02 4.4819381e-02 5.4844272e-02 2.7492078e-02 9.9689819e-03 3.4289978e-02 2.5632216e-02 5.5069559e-02 5.4817042e-02 2.2717491e-02 2.9451557e-02 3.3085492e-02 8.2968826e-02 4.3268668e-02 2.7878625e-02 3.8225012e-02 5.7392372e-02 1.8533113e-02 2.7752880e-02 2.6863838e-02 1.3522246e-02 1.3720834e-02 5.0366251e-02 2.2136217e-02 3.8620607e-02 1.6104597e-02 5.3184340e-02 1.9805362e-02 5.7449954e-02 3.4065200e-02 3.7316903e-02 3.0490470e-02 1.2067932e-02 1.7032969e-02 3.4772493e-02 1.3995163e-02 4.4570224e-02 4.0370599e-02 3.3379869e-02 2.1584097e-02 3.2788169e-02 1.8689520e-02 2.8313400e-02 2.9690702e-02 2.1810130e-03 9.0584977e-03 5.0419708e-03 8.6169391e-03 1.3497185e-03 5.5621011e-04 1.5626898e-03 1.4715788e-03 6.4159866e-03 2.8759099e-05 6.7456588e-03 6.5333041e-03 5.4642837e-03 1.4108585e-03 8.4463171e-03 6.8795048e-04 2.4580787e-02 1.0388021e-03 9.2286561e-03 1.7639378e-03 2.1069750e-02 7.6032385e-03 7.9175727e-04 1.2932915e-03 5.5725803e-03 7.6219263e-03 3.4673161e-03 4.2795834e-03 1.7380404e-03 7.6483144e-04 5.0584546e-04 3.0216137e-02 1.4096242e-02 2.8861882e-03 1.0548294e-03 1.3430225e-02 1.8234348e-03 3.6876972e-03 1.2061426e-02 3.2511669e-03 1.1268214e-03 9.2690858e-04 3.2976984e-03 2.6938518e-03 1.3276664e-03 2.7368625e-04 1.3465991e-02 4.6822375e-04 6.8703048e-02 4.3340061e-02 3.5237451e-02 4.2511976e-02 5.0937508e-02 5.0961712e-02 4.9575019e-02 4.3743233e-02 5.5024281e-02 2.6603107e-02 9.8022959e-03 3.4840370e-02 2.6494597e-02 5.5508568e-02 5.5916268e-02 2.2800218e-02 2.7686924e-02 2.9943438e-02 8.4266289e-02 4.3880838e-02 2.8349604e-02 3.6881146e-02 5.7478275e-02 1.9879185e-02 2.5944410e-02 2.5180647e-02 1.4399125e-02 1.2641568e-02 5.0169037e-02 2.1180767e-02 3.9323276e-02 1.3938661e-02 5.3485998e-02 1.8367828e-02 5.3859405e-02 3.6620288e-02 3.4933732e-02 2.7786245e-02 1.0959937e-02 1.8222006e-02 3.5858197e-02 1.7515874e-02 4.3340061e-02 3.9619354e-02 3.3535832e-02 2.4403195e-02 3.5188163e-02 1.9107255e-02 2.5791231e-02 2.6740983e-02 2.3644893e-03 7.3259797e-04 5.5696730e-03 2.5707992e-03 4.5988577e-03 6.5028014e-05 1.3580837e-03 1.2238983e-03 1.7109321e-03 1.8714646e-03 2.9944669e-03 1.2905109e-02 3.6725511e-03 6.9487479e-03 1.9516582e-03 1.2369689e-02 2.1307052e-04 6.0423668e-03 3.5751225e-03 1.0028727e-02 4.0861108e-03 1.5902610e-03 2.1378325e-03 8.2628456e-04 1.7251178e-03 8.0406810e-04 9.4788056e-03 1.5999623e-04 8.7509151e-04 2.2560004e-03 1.8732619e-02 1.2379328e-02 7.5803848e-03 2.7223442e-04 5.1089250e-03 5.8562851e-03 2.2798360e-04 7.3027135e-03 2.0605789e-03 3.8651941e-04 3.1125030e-03 1.3286042e-03 6.3896783e-03 2.7088758e-03 9.7424018e-04 2.1879745e-02 1.1792881e-03 5.1158538e-02 2.8393852e-02 2.0746802e-02 2.9099880e-02 3.4416089e-02 3.3763806e-02 3.5540899e-02 2.8575694e-02 3.6757319e-02 1.5144171e-02 3.2315336e-03 2.0363783e-02 1.3898176e-02 3.6986917e-02 3.6943012e-02 1.1630303e-02 1.6809451e-02 2.0213112e-02 6.0723696e-02 2.7441217e-02 1.5470812e-02 2.3398093e-02 3.8846172e-02 9.0112370e-03 1.5570624e-02 1.4874310e-02 5.5171614e-03 5.6710746e-03 3.3048406e-02 1.1270408e-02 2.3776077e-02 7.7482583e-03 3.5413096e-02 9.7831561e-03 3.9569593e-02 2.1047269e-02 2.3004495e-02 1.8015778e-02 4.6822715e-03 7.9260822e-03 2.0880056e-02 7.9428824e-03 2.8393852e-02 2.5015882e-02 1.9595645e-02 1.2073748e-02 1.9972960e-02 8.8353740e-03 1.6316616e-02 1.7585650e-02 7.5921246e-04 7.6581519e-03 8.7602132e-03 1.3285408e-02 3.1081233e-03 6.1183864e-03 3.4258252e-04 8.0693053e-03 1.1527108e-03 4.3989827e-03 2.4770294e-02 1.0297780e-02 1.0716436e-02 8.0921947e-03 4.0468338e-03 3.9682629e-03 8.0100500e-03 9.6963228e-03 3.4263963e-03 5.5915243e-03 6.8271736e-03 7.3405400e-03 4.5375223e-04 3.3086446e-04 2.9090971e-03 1.8954919e-02 3.0536401e-03 5.4665114e-03 8.4934379e-03 1.2174442e-02 1.6139108e-02 1.7445625e-02 4.0248005e-03 8.4214697e-04 1.4968032e-02 1.3210674e-03 7.6674238e-03 5.8292743e-03 4.1363018e-03 9.7236406e-03 4.2254644e-03 1.5250051e-02 9.0649565e-03 6.2807525e-03 3.4303230e-02 6.6673397e-03 3.8666536e-02 1.8227060e-02 1.0732501e-02 2.0722480e-02 2.2627004e-02 2.1137328e-02 2.6644819e-02 1.8156140e-02 2.2956019e-02 8.4429075e-03 1.3075172e-03 1.0338488e-02 5.7120788e-03 2.2877982e-02 2.2249653e-02 5.0348740e-03 1.0858478e-02 1.5670368e-02 4.1328272e-02 1.5415087e-02 7.0760309e-03 1.4735153e-02 2.4690692e-02 2.4709117e-03 1.0136092e-02 9.4846153e-03 1.0671740e-03 3.5238125e-03 2.0483172e-02 6.1432951e-03 1.2637418e-02 6.5903261e-03 2.1786492e-02 6.0819601e-03 3.0486925e-02 9.5174641e-03 1.6103409e-02 1.3353819e-02 3.2356456e-03 1.9926682e-03 1.0243487e-02 2.2538296e-03 1.8227060e-02 1.5108364e-02 1.0188754e-02 3.7627514e-03 8.8362152e-03 3.0791096e-03 1.1920070e-02 1.3588585e-02 8.0203401e-03 5.9881494e-03 7.9998519e-03 1.0330180e-03 3.9563353e-03 9.0962406e-05 4.3155161e-03 3.4798297e-04 4.5913809e-03 1.6934319e-02 5.4778348e-03 1.0415690e-02 5.0724002e-03 7.4143773e-03 1.5625174e-03 8.5090504e-03 5.0363885e-03 7.2978538e-03 5.9687085e-03 3.0800386e-03 3.3854665e-03 6.0369763e-05 9.7766526e-04 2.1106407e-03 1.2150960e-02 8.7050457e-04 2.2450596e-03 4.2656078e-03 1.7573546e-02 1.6541615e-02 1.3010703e-02 1.4863348e-03 2.0318224e-03 1.0715058e-02 5.1959240e-04 9.1091800e-03 4.5577281e-03 1.4679901e-03 5.0882044e-03 3.2208315e-03 1.1416637e-02 6.2084512e-03 2.9761078e-03 2.5010280e-02 3.7556543e-03 4.8641699e-02 2.5613136e-02 1.7022941e-02 2.7738185e-02 3.0983818e-02 2.9540677e-02 3.4381880e-02 2.5612023e-02 3.1832087e-02 1.3388731e-02 2.7364434e-03 1.6556686e-02 1.0555947e-02 3.1795721e-02 3.1144834e-02 9.3248236e-03 1.5850102e-02 2.0586917e-02 5.3133864e-02 2.2885657e-02 1.2291162e-02 2.1220606e-02 3.3851933e-02 5.9611654e-03 1.4821288e-02 1.4065156e-02 3.4935277e-03 5.6486577e-03 2.8790647e-02 1.0114487e-02 1.9481208e-02 8.7344459e-03 3.0476411e-02 9.4253713e-03 3.8601009e-02 1.5492941e-02 2.2081481e-02 1.8082534e-02 4.9261084e-03 5.1866248e-03 1.6531969e-02 3.8794099e-03 2.5613136e-02 2.2037756e-02 1.6246851e-02 7.5034139e-03 1.4652134e-02 6.6254167e-03 1.6367612e-02 1.8037074e-02 3.5963937e-03 1.3253217e-02 6.2726815e-03 2.9800542e-03 8.1679414e-03 8.1170586e-03 1.1643531e-02 4.8148791e-04 2.7366574e-02 1.5666050e-02 3.5602925e-04 4.6080605e-03 1.9717886e-02 6.2846724e-03 1.0858138e-05 1.6108248e-02 9.2526017e-03 1.6342536e-04 1.1289374e-02 1.3182521e-02 7.1086671e-03 4.8107615e-03 2.1505038e-03 2.4867835e-02 7.4585494e-03 9.3457001e-03 1.1859292e-02 8.6506765e-03 1.5825531e-03 7.6287797e-03 7.1464196e-03 1.3469318e-02 7.0587993e-03 4.6011946e-03 3.8423475e-04 1.2933991e-03 8.0463849e-03 1.4044313e-02 1.5554975e-03 5.8246512e-03 3.7445923e-03 7.8179143e-03 4.3383609e-02 5.1292419e-03 3.1208966e-02 1.7106674e-02 1.5590525e-02 1.4711786e-02 2.2118312e-02 2.3677515e-02 1.8355265e-02 1.7620984e-02 2.7322237e-02 8.2125300e-03 3.2025084e-03 1.5673958e-02 1.1993026e-02 2.8215754e-02 3.0043744e-02 8.3225318e-03 7.3692394e-03 7.0283195e-03 5.0297559e-02 2.1175797e-02 1.1957606e-02 1.3050611e-02 2.8749586e-02 1.0337000e-02 6.4346633e-03 6.2339245e-03 7.0135662e-03 2.1302546e-03 2.3356399e-02 5.5096838e-03 1.8637999e-02 1.1462623e-03 2.6577654e-02 3.5495923e-03 2.0668821e-02 2.2280093e-02 1.0461671e-02 6.3164945e-03 1.6640428e-03 9.2261377e-03 1.7584245e-02 1.7910761e-02 1.7106674e-02 1.5703153e-02 1.4029303e-02 1.7152691e-02 2.0996496e-02 7.4959989e-03 5.5175599e-03 5.5902559e-03 3.2004692e-03 2.3419030e-03 2.3799055e-04 7.0894399e-03 1.3013007e-03 8.8089668e-03 2.9046172e-03 1.1317768e-02 5.4246845e-03 3.1331888e-03 1.2023171e-04 2.4596565e-02 1.8369557e-03 3.9714617e-03 5.9789699e-03 1.7350836e-02 3.3069650e-03 3.5250221e-03 4.6752456e-03 5.9850395e-03 6.3682068e-03 1.7841601e-03 1.0289989e-02 3.0278347e-03 2.8250687e-03 3.3130538e-03 2.2020473e-02 6.7612416e-03 1.5121522e-03 2.2413165e-03 1.4397994e-02 9.1427197e-04 3.3482085e-03 6.1881321e-03 7.4537620e-04 2.6935839e-03 4.4237033e-03 1.1678481e-03 9.0068892e-04 2.9616334e-06 1.6314282e-03 2.2817755e-02 4.4730602e-04 5.5111404e-02 3.4158218e-02 2.9021577e-02 3.2043073e-02 4.1089749e-02 4.2083473e-02 3.7707137e-02 3.4692829e-02 4.6329738e-02 1.9906109e-02 6.9820129e-03 2.8847990e-02 2.2051442e-02 4.7099521e-02 4.8343924e-02 1.7860817e-02 1.9868896e-02 2.0488578e-02 7.4490873e-02 3.6906910e-02 2.3107397e-02 2.8319511e-02 4.8426947e-02 1.7182411e-02 1.8331671e-02 1.7811306e-02 1.2023707e-02 8.1012404e-03 4.1487060e-02 1.5227136e-02 3.2990763e-02 8.0392167e-03 4.5098943e-02 1.2350504e-02 4.1149609e-02 3.3318195e-02 2.5517275e-02 1.9030633e-02 6.7530681e-03 1.5581926e-02 3.0469465e-02 1.9100479e-02 3.4158218e-02 3.1415243e-02 2.7190491e-02 2.3321177e-02 3.1828581e-02 1.5212907e-02 1.7498443e-02 1.7919743e-02 3.6042298e-03 3.7468828e-03 9.7768723e-03 7.8664808e-04 9.4997776e-03 1.0849175e-02 2.5690262e-03 9.8725606e-04 1.2645853e-02 2.2464135e-03 3.0594913e-02 2.8398433e-03 1.3996699e-02 1.4253747e-03 2.8011999e-02 1.2147431e-02 1.3365379e-03 1.5899927e-03 8.8722552e-03 1.1913349e-02 6.7897739e-03 2.1135064e-03 3.6254453e-03 1.7898196e-03 6.9240260e-04 3.8891340e-02 1.9082524e-02 3.5297395e-03 2.7024302e-03 1.7766622e-02 2.5342822e-03 6.8092652e-03 1.7593080e-02 6.3527044e-03 2.6240465e-03 7.1495011e-04 6.5371413e-03 3.8357773e-03 3.1136154e-03 1.3436442e-03 8.9702416e-03 2.0253115e-03 8.1348350e-02 5.3552444e-02 4.4248656e-02 5.2648527e-02 6.1923652e-02 6.1838703e-02 6.0414517e-02 5.3985372e-02 6.6201925e-02 3.4742321e-02 1.4883289e-02 4.3767758e-02 3.4228373e-02 6.6672589e-02 6.6939218e-02 3.0219175e-02 3.6020463e-02 3.8455365e-02 9.7619080e-02 5.3827294e-02 3.6463335e-02 4.6373140e-02 6.8906148e-02 2.6398982e-02 3.4032020e-02 3.3159361e-02 2.0120628e-02 1.8469842e-02 6.0950543e-02 2.8524017e-02 4.8737718e-02 1.9976319e-02 6.4489083e-02 2.5275791e-02 6.5080438e-02 4.4969016e-02 4.4191042e-02 3.6066964e-02 1.6430448e-02 2.4524789e-02 4.4743887e-02 2.2062365e-02 5.3552444e-02 4.9370398e-02 4.2403953e-02 3.0850071e-02 4.3434971e-02 2.5846885e-02 3.3807321e-02 3.4832107e-02 1.2988107e-03 1.6703175e-03 1.1675843e-03 2.1414615e-03 3.6300242e-03 1.1143098e-02 2.7878082e-03 7.4699291e-03 1.6417856e-03 1.3833381e-02 5.5119833e-05 6.7870415e-03 2.7321459e-03 1.1691818e-02 4.7830259e-03 1.0265050e-03 1.5056614e-03 1.2372212e-03 2.4522309e-03 1.1336677e-03 8.0050923e-03 5.6870746e-05 4.6915622e-04 1.5562032e-03 2.0887194e-02 1.3125417e-02 6.8837382e-03 7.1162950e-05 5.9522504e-03 5.2043728e-03 5.3649750e-04 8.3200456e-03 2.2696632e-03 1.4322584e-04 2.2876100e-03 1.6070427e-03 5.8684470e-03 2.4558869e-03 5.5570590e-04 1.9651663e-02 8.9172519e-04 5.4659464e-02 3.1114613e-02 2.3119551e-02 3.1736271e-02 3.7415588e-02 3.6761852e-02 3.8397207e-02 3.1313890e-02 3.9883303e-02 1.7150840e-02 4.2007421e-03 2.2713095e-02 1.5842502e-02 4.0120080e-02 4.0057089e-02 1.3428356e-02 1.8845203e-02 2.2251079e-02 6.4673609e-02 3.0152373e-02 1.7530451e-02 2.5859899e-02 4.2056336e-02 1.0552527e-02 1.7518114e-02 1.6789150e-02 6.7575680e-03 6.8601014e-03 3.6016991e-02 1.2994930e-02 2.6303908e-02 8.9623653e-03 3.8482980e-02 1.1333705e-02 4.2542187e-02 2.3287141e-02 2.5337550e-02 1.9991634e-02 5.7451264e-03 9.3841364e-03 2.3239540e-02 8.8973019e-03 3.1114613e-02 2.7595638e-02 2.1905943e-02 1.3658827e-02 2.2169613e-02 1.0410758e-02 1.8207584e-02 1.9484294e-02 4.8008453e-03 1.2650854e-03 6.4006179e-03 1.8900850e-03 1.2501460e-02 5.1363105e-03 3.0955001e-03 2.4385845e-04 2.0020418e-02 1.0205842e-03 3.3467158e-03 5.5033857e-03 1.3788482e-02 2.4101593e-03 2.9461849e-03 4.0056193e-03 3.8916824e-03 4.1559722e-03 7.4799809e-04 1.0679921e-02 1.8732675e-03 2.1213449e-03 3.0477499e-03 1.9047032e-02 6.9834653e-03 2.9438547e-03 1.3758132e-03 1.0992441e-02 2.0426183e-03 1.8183256e-03 5.1275661e-03 3.4870074e-04 1.7800086e-03 4.1941671e-03 4.4278656e-04 2.0594764e-03 2.9165015e-04 1.2452133e-03 2.3762069e-02 2.9165015e-04 5.0966362e-02 3.0166832e-02 2.4694798e-02 2.8788397e-02 3.6671707e-02 3.7274280e-02 3.4472569e-02 3.0604199e-02 4.1125938e-02 1.6692997e-02 4.7977197e-03 2.4489085e-02 1.8077937e-02 4.1761735e-02 4.2713992e-02 1.4455884e-02 1.7039782e-02 1.8322490e-02 6.7670508e-02 3.2041093e-02 1.9169462e-02 2.4724400e-02 4.3157837e-02 1.3510623e-02 1.5640518e-02 1.5102883e-02 8.9767687e-03 6.0642978e-03 3.6671707e-02 1.2419694e-02 2.8323093e-02 6.4935421e-03 3.9910361e-02 1.0006286e-02 3.7967777e-02 2.8141888e-02 2.2594035e-02 1.6734859e-02 4.8977754e-03 1.2095984e-02 2.5839912e-02 1.5166518e-02 3.0166832e-02 2.7378800e-02 2.3072794e-02 1.8885101e-02 2.6783663e-02 1.1941964e-02 1.5226991e-02 1.5844953e-02 5.5898027e-03 3.6211515e-04 4.6884489e-03 1.9469863e-02 6.9494812e-03 1.0856564e-02 6.2070186e-03 5.9174289e-03 2.3319364e-03 8.6215938e-03 6.4261295e-03 5.9157019e-03 6.0481987e-03 4.2268867e-03 4.5610911e-03 4.8970997e-05 6.8799458e-04 2.4420024e-03 1.4255275e-02 1.5079078e-03 3.2366850e-03 5.6006336e-03 1.5957034e-02 1.6871464e-02 1.4787237e-02 2.2762765e-03 1.3333689e-03 1.2378081e-02 7.3736127e-04 8.8932355e-03 5.1508306e-03 2.2779875e-03 6.5293055e-03 3.6737889e-03 1.2985539e-02 7.3417742e-03 4.0613774e-03 2.7837547e-02 4.7997557e-03 4.5639701e-02 2.3278576e-02 1.4877176e-02 2.5663407e-02 2.8314352e-02 2.6769516e-02 3.2135703e-02 2.3238033e-02 2.8854470e-02 1.1827077e-02 2.2644401e-03 1.4421001e-02 8.8429015e-03 2.8776903e-02 2.8063687e-02 7.8778602e-03 1.4365876e-02 1.9273282e-02 4.9067035e-02 2.0328263e-02 1.0496416e-02 1.9196637e-02 3.0789735e-02 4.6457607e-03 1.3443289e-02 1.2708604e-02 2.5804695e-03 5.0424172e-03 2.6041403e-02 8.8829912e-03 1.7122028e-02 8.2367466e-03 2.7547637e-02 8.4529986e-03 3.6274794e-02 1.3225786e-02 2.0335647e-02 1.6784857e-02 4.4669319e-03 3.9860599e-03 1.4316384e-02 2.9523808e-03 2.3278576e-02 1.9803941e-02 1.4205949e-02 5.9492702e-03 1.2454271e-02 5.3895801e-03 1.5143895e-02 1.6858271e-02 5.9387347e-03 5.9571758e-03 6.0526515e-03 1.4146691e-03 8.1320415e-03 6.3420687e-04 2.2943426e-02 7.2217865e-04 8.7156753e-03 1.7123420e-03 1.9617600e-02 7.0385795e-03 6.4044399e-04 1.1390332e-03 4.8019256e-03 6.7384180e-03 2.9679315e-03 4.6026472e-03 1.3268878e-03 5.4152853e-04 4.6516773e-04 2.8772311e-02 1.3783757e-02 3.2459159e-03 7.3821107e-04 1.2234521e-02 2.0992860e-03 3.0774015e-03 1.1372571e-02 2.9352447e-03 8.1022588e-04 9.2842537e-04 2.8855444e-03 2.9418595e-03 1.2977062e-03 1.2869337e-04 1.4124612e-02 3.4246729e-04 6.6626445e-02 4.1505551e-02 3.3416051e-02 4.0874061e-02 4.8928444e-02 4.8860648e-02 4.7883828e-02 4.1881126e-02 5.2796241e-02 2.5141882e-02 8.8605790e-03 3.3017625e-02 2.4870522e-02 5.3247026e-02 5.3590923e-02 2.1349209e-02 2.6308756e-02 2.8721666e-02 8.1439460e-02 4.1842449e-02 2.6703197e-02 3.5208876e-02 5.5212555e-02 1.8433774e-02 2.4622301e-02 2.3863121e-02 1.3181398e-02 1.1675955e-02 4.8074316e-02 1.9890418e-02 3.7380285e-02 1.3082557e-02 5.1276398e-02 1.7233265e-02 5.2150081e-02 3.4634218e-02 3.3454504e-02 2.6550201e-02 1.0070918e-02 1.6842626e-02 3.3968952e-02 1.6166125e-02 4.1505551e-02 3.7811539e-02 3.1780349e-02 2.2766214e-02 3.3244526e-02 1.7747224e-02 2.4583452e-02 2.5577750e-02 7.4058632e-03 1.7632325e-02 5.8672157e-03 1.4554963e-02 7.5139440e-03 6.3063306e-03 2.8227254e-03 1.2216172e-02 5.2171506e-03 8.3411571e-03 9.1198919e-03 3.7613430e-03 3.7527482e-03 5.6979631e-04 2.0412760e-03 4.1677011e-03 1.2238157e-02 1.7043766e-03 3.0984992e-03 5.1374687e-03 2.0730806e-02 2.1628161e-02 1.6669142e-02 2.5627983e-03 1.3966031e-03 1.3985502e-02 1.7150229e-03 1.2743810e-02 7.3773848e-03 2.3644296e-03 5.7186323e-03 5.6669191e-03 1.5046711e-02 9.0550035e-03 4.3221940e-03 2.4177800e-02 5.7926331e-03 5.2977640e-02 2.8412846e-02 1.8464622e-02 3.1548301e-02 3.3726407e-02 3.1637531e-02 3.8729422e-02 2.8292534e-02 3.3574657e-02 1.5809867e-02 4.4178873e-03 1.7890807e-02 1.1580028e-02 3.3334020e-02 3.2156672e-02 1.0943899e-02 1.8967943e-02 2.4791881e-02 5.4103864e-02 2.4225163e-02 1.3643641e-02 2.4071431e-02 3.5681100e-02 6.5373199e-03 1.7959936e-02 1.7099713e-02 4.3929724e-03 8.0887873e-03 3.0809355e-02 1.2543658e-02 2.0721585e-02 1.2037199e-02 3.2114506e-02 1.2207760e-02 4.3300211e-02 1.5218252e-02 2.5733556e-02 2.1932604e-02 7.3680112e-03 5.8739609e-03 1.7443010e-02 2.6299180e-03 2.8412846e-02 2.4437667e-02 1.7887656e-02 6.9159049e-03 1.4494165e-02 7.9434468e-03 2.0060731e-02 2.2068579e-02 2.3875094e-02 1.2000415e-02 1.3948342e-03 3.4546351e-03 1.4939730e-02 3.7950422e-03 6.0175173e-04 1.2211196e-02 7.0052335e-03 9.1256063e-05 8.0336825e-03 9.5484059e-03 3.8945506e-03 2.3206010e-03 7.0732153e-04 2.0727990e-02 4.4889370e-03 6.3258821e-03 8.7927798e-03 9.0069682e-03 3.7952179e-03 7.8092546e-03 4.4335690e-03 9.0550736e-03 6.7946167e-03 2.1531805e-03 9.6426638e-04 7.0770423e-04 5.1150602e-03 1.0670446e-02 5.5187206e-04 6.0114634e-03 3.0771240e-03 5.3655604e-03 3.8039285e-02 3.5100982e-03 3.3392676e-02 1.7228333e-02 1.4023464e-02 1.5984642e-02 2.2267037e-02 2.3075907e-02 2.0330300e-02 1.7609739e-02 2.6354220e-02 7.6657981e-03 1.4938436e-03 1.3975046e-02 9.8087979e-03 2.7013992e-02 2.8227679e-02 6.7815824e-03 7.6426994e-03 8.5739542e-03 4.8734857e-02 1.9601188e-02 1.0188446e-02 1.3147209e-02 2.7911001e-02 7.5145131e-03 6.7053137e-03 6.3732135e-03 4.4764490e-03 1.3642012e-03 2.2654181e-02 4.8866188e-03 1.6888522e-02 1.3930577e-03 2.5469441e-02 3.2860894e-03 2.3095694e-02 1.8694362e-02 1.1451986e-02 7.4272413e-03 8.6465452e-04 6.5073035e-03 1.5400191e-02 1.2824348e-02 1.7228333e-02 1.5313622e-02 1.2672619e-02 1.3006211e-02 1.7530867e-02 5.5400420e-03 6.4187909e-03 6.8900377e-03 3.1968437e-03 2.6175875e-02 9.5689725e-03 4.4766639e-02 9.9845360e-03 2.8418681e-02 3.6870693e-03 4.5253718e-02 2.5815239e-02 5.6468536e-03 5.2271680e-03 1.8578768e-02 2.3906603e-02 1.7414675e-02 6.6309054e-04 1.0701959e-02 7.1563943e-03 4.3943049e-03 6.0754711e-02 3.4699507e-02 9.6990814e-03 9.4374842e-03 2.8881695e-02 8.6238198e-03 1.6549411e-02 3.3492594e-02 1.6963829e-02 8.9575119e-03 3.4839312e-03 1.7214511e-02 1.0985852e-02 1.1115876e-02 7.0774378e-03 2.3223923e-03 9.1191389e-03 1.1169736e-01 7.8250518e-02 6.5808362e-02 7.7763348e-02 8.8114611e-02 8.7449743e-02 8.7215232e-02 7.8675005e-02 9.2203874e-02 5.5132383e-02 2.8794481e-02 6.5100824e-02 5.3025891e-02 9.2531519e-02 9.2220151e-02 4.8731252e-02 5.7186075e-02 6.0580024e-02 1.2745253e-01 7.7174441e-02 5.6198920e-02 6.9678402e-02 9.5449745e-02 4.2538093e-02 5.4722713e-02 5.3576276e-02 3.4860888e-02 3.4291475e-02 8.6329226e-02 4.7343142e-02 7.0978985e-02 3.6695928e-02 9.0088005e-02 4.3430772e-02 9.2828505e-02 6.4239353e-02 6.7481588e-02 5.7536867e-02 3.1542812e-02 4.0297150e-02 6.5753742e-02 3.3241661e-02 7.8250518e-02 7.2933055e-02 6.3794057e-02 4.6178781e-02 6.2571051e-02 4.2843100e-02 5.4666081e-02 5.6027673e-02 1.6139749e-02 3.9412782e-03 2.4265145e-02 2.3891707e-03 1.6498373e-02 4.1418612e-05 2.4988702e-02 1.3783039e-02 4.3146851e-04 2.5122323e-04 6.4815288e-03 1.0052630e-02 7.0587637e-03 1.3669961e-03 2.4020339e-03 9.7331181e-04 2.7171014e-04 3.8783427e-02 2.3881244e-02 7.7285447e-03 2.0010737e-03 1.2962382e-02 6.0316172e-03 5.6775409e-03 1.9642810e-02 8.0347010e-03 1.6772041e-03 5.1201506e-05 7.5139419e-03 7.7685816e-03 5.4104161e-03 1.3577183e-03 7.6646931e-03 3.0214900e-03 8.1621829e-02 5.2182884e-02 4.1040324e-02 5.3055858e-02 6.0133087e-02 5.8980137e-02 6.1457708e-02 5.2397706e-02 6.2608146e-02 3.3589093e-02 1.3750660e-02 4.0409320e-02 3.0837807e-02 6.2732754e-02 6.2152008e-02 2.7986481e-02 3.5988306e-02 4.0227814e-02 9.1799153e-02 5.0051858e-02 3.3478326e-02 4.5393111e-02 6.5357720e-02 2.2750138e-02 3.4139497e-02 3.3132950e-02 1.7367166e-02 1.8328224e-02 5.7997026e-02 2.7720666e-02 4.5013892e-02 2.1236242e-02 6.0787869e-02 2.5266646e-02 6.6573172e-02 3.9086224e-02 4.4693577e-02 3.7322864e-02 1.6444309e-02 2.1159286e-02 4.0674593e-02 1.6113197e-02 5.2182884e-02 4.7503868e-02 3.9598661e-02 2.5231075e-02 3.7795470e-02 2.3382629e-02 3.4903444e-02 3.6481922e-02 4.3266011e-03 2.5120283e-02 7.2104610e-03 3.6362605e-04 1.6818393e-02 1.3224351e-02 8.9751703e-04 1.2039448e-02 1.4078561e-02 9.5477546e-03 7.2983976e-03 3.1999143e-03 2.4639885e-02 8.8187561e-03 1.0207680e-02 1.2236717e-02 1.1661588e-02 8.0001293e-04 5.7297070e-03 8.1532898e-03 1.7517743e-02 5.5505771e-03 6.3202290e-03 1.2261175e-03 1.5275601e-03 9.1326114e-03 1.4417917e-02 2.2056571e-03 4.2297788e-03 3.2218738e-03 8.2583968e-03 4.2679879e-02 5.2144338e-03 3.5325690e-02 2.1286357e-02 2.0340751e-02 1.7932964e-02 2.6773752e-02 2.8858059e-02 2.1473619e-02 2.1925037e-02 3.2987864e-02 1.1596147e-02 5.5679058e-03 2.0475433e-02 1.6418368e-02 3.4058657e-02 3.6285268e-02 1.2036020e-02 1.0227865e-02 8.9974606e-03 5.7847652e-02 2.6531721e-02 1.6295053e-02 1.6788576e-02 3.4473710e-02 1.4498141e-02 9.1389529e-03 8.9727022e-03 1.0441422e-02 4.2212624e-03 2.8555402e-02 8.4473824e-03 2.3791713e-02 2.5698702e-03 3.2242281e-02 5.9266749e-03 2.3676462e-02 2.8216759e-02 1.3343464e-02 8.4809495e-03 3.5554866e-03 1.3166972e-02 2.2745344e-02 2.2629651e-02 2.1286357e-02 1.9992085e-02 1.8520730e-02 2.2324446e-02 2.6771848e-02 1.1107369e-02 7.6788410e-03 7.4852977e-03 2.3512869e-02 1.1702196e-03 5.0486445e-03 4.4067902e-03 1.7493736e-02 4.0658633e-03 2.3529372e-03 3.3047066e-03 5.2094902e-03 6.0430183e-03 1.7805903e-03 8.3604250e-03 2.1645857e-03 1.8214590e-03 2.1737913e-03 2.3530298e-02 8.5714286e-03 1.8444368e-03 1.4565864e-03 1.3243081e-02 1.0630397e-03 2.8808992e-03 7.3652636e-03 1.0835317e-03 1.7933322e-03 3.0961279e-03 1.3436220e-03 1.3043704e-03 1.2482375e-04 8.8034137e-04 2.0043242e-02 1.2482375e-04 5.7954445e-02 3.5775683e-02 2.9742194e-02 3.4149795e-02 4.2824229e-02 4.3477315e-02 4.0214177e-02 3.6256651e-02 4.7605297e-02 2.0950874e-02 7.1042975e-03 2.9499916e-02 2.2329817e-02 4.8271691e-02 4.9230028e-02 1.8375611e-02 2.1296157e-02 2.2492082e-02 7.5802989e-02 3.7751582e-02 2.3617894e-02 2.9827991e-02 4.9792717e-02 1.7017775e-02 1.9724142e-02 1.9131907e-02 1.1873908e-02 8.7349825e-03 4.2824229e-02 1.6130090e-02 3.3687060e-02 9.1410950e-03 4.6289887e-02 1.3350026e-02 4.3904542e-02 3.3071376e-02 2.7381594e-02 2.0813943e-02 7.3233676e-03 1.5432666e-02 3.0904955e-02 1.7682128e-02 3.5775683e-02 3.2747980e-02 2.7982984e-02 2.2571206e-02 3.1620085e-02 1.5454693e-02 1.9154732e-02 1.9759156e-02 1.5625666e-02 2.0014241e-02 2.2906319e-02 3.3689213e-03 1.6561417e-02 1.9555072e-02 1.9724522e-02 6.8229205e-03 6.0996359e-03 1.3507575e-02 3.5744409e-02 1.3348521e-02 1.7651623e-02 2.2506296e-02 1.4148535e-02 3.1413076e-02 3.8119216e-02 1.5501824e-02 1.7783197e-03 3.4445966e-02 9.9745771e-03 1.7822684e-02 1.8993068e-02 1.5436010e-02 2.3911299e-02 1.6070505e-02 3.4793519e-02 2.5109394e-02 1.9730191e-02 5.3384757e-02 2.0981999e-02 3.5016323e-02 1.5808551e-02 7.2216559e-03 2.1250521e-02 1.8323057e-02 1.5289849e-02 2.7077232e-02 1.5342317e-02 1.5450153e-02 9.5719689e-03 7.0769801e-03 6.7142336e-03 3.8618160e-03 1.4833874e-02 1.3052654e-02 5.5299114e-03 1.3477582e-02 2.0871841e-02 2.6745939e-02 9.5407500e-03 5.3018257e-03 1.4058965e-02 1.6863810e-02 2.0380823e-03 1.3353665e-02 1.2655783e-02 3.2775616e-03 9.7625272e-03 1.4632542e-02 9.0437643e-03 7.6711708e-03 1.4740530e-02 1.4381411e-02 1.0907741e-02 3.1026769e-02 2.6758954e-03 1.8148668e-02 1.8095332e-02 1.0256426e-02 2.3444102e-03 5.5327937e-03 1.0086149e-03 1.5808551e-02 1.2612237e-02 7.5482105e-03 2.3261323e-04 2.5482034e-03 4.2287654e-03 1.6814254e-02 1.9242327e-02 6.8098804e-03 2.4185222e-03 1.3095995e-02 4.9100577e-03 8.0319599e-04 1.3085229e-03 1.8113887e-03 3.1306373e-03 1.2541646e-03 7.2250359e-03 1.5182032e-04 3.2471009e-04 1.1793141e-03 2.2134336e-02 1.2798604e-02 5.8036245e-03 2.9068060e-05 7.1481800e-03 4.2574873e-03 8.5847073e-04 8.5715637e-03 2.1039195e-03 1.1389023e-04 1.8746094e-03 1.5895426e-03 4.9316123e-03 1.9257560e-03 2.7708506e-04 1.8515651e-02 5.3986259e-04 5.6637702e-02 3.2886175e-02 2.4967826e-02 3.3187187e-02 3.9414817e-02 3.8923256e-02 3.9880016e-02 3.3128964e-02 4.2231952e-02 1.8470852e-02 4.9097912e-03 2.4570551e-02 1.7467535e-02 4.2525770e-02 4.2580968e-02 1.4790908e-02 2.0011146e-02 2.3142926e-02 6.7845285e-02 3.2285576e-02 1.9161137e-02 2.7420216e-02 4.4447416e-02 1.1980855e-02 1.8611023e-02 1.7883360e-02 7.8729229e-03 7.5466400e-03 3.8175619e-02 1.4096206e-02 2.8321778e-02 9.4496873e-03 4.0815141e-02 1.2211823e-02 4.4024579e-02 2.5465922e-02 2.6596134e-02 2.0925904e-02 6.3340227e-03 1.0720439e-02 2.5211490e-02 1.0333158e-02 3.2886175e-02 2.9360405e-02 2.3656085e-02 1.5413665e-02 2.4285913e-02 1.1678199e-02 1.9116491e-02 2.0313205e-02 1.6950975e-02 9.2576768e-03 2.2830227e-04 1.1996475e-02 1.3943618e-02 7.5471408e-03 5.0950912e-03 2.4447813e-03 2.5903324e-02 8.0139448e-03 9.9879335e-03 1.2585436e-02 8.2754975e-03 1.4174352e-03 8.0478199e-03 7.7042954e-03 1.3901912e-02 7.5078674e-03 4.9957099e-03 2.9997761e-04 1.5395926e-03 8.6362248e-03 1.4833108e-02 1.8222710e-03 6.1957159e-03 4.1238914e-03 8.4102568e-03 4.4734160e-02 5.6076899e-03 3.0288189e-02 1.6597244e-02 1.5336169e-02 1.4117002e-02 2.1529085e-02 2.3159309e-02 1.7633168e-02 1.7119251e-02 2.6802924e-02 7.9632454e-03 3.3472740e-03 1.5438450e-02 1.1914773e-02 2.7716384e-02 2.9607261e-02 8.2241873e-03 7.0346123e-03 6.5727745e-03 4.9604200e-02 2.0828511e-02 1.1808642e-02 1.2615158e-02 2.8194719e-02 1.0429441e-02 6.1245969e-03 5.9452412e-03 7.1618546e-03 2.1261487e-03 2.2855826e-02 5.3452154e-03 1.8357871e-02 1.0397070e-03 2.6087088e-02 3.3990115e-03 1.9871685e-02 2.2251351e-02 9.9773797e-03 5.9192173e-03 1.6976055e-03 9.3317377e-03 1.7391404e-02 1.8375579e-02 1.6597244e-02 1.5280892e-02 1.3772451e-02 1.7356806e-02 2.0968489e-02 7.4978418e-03 5.1634523e-03 5.1963190e-03 2.4233623e-02 1.4077025e-02 4.3787020e-04 1.8512542e-04 6.0477386e-03 9.6703484e-03 7.1488601e-03 1.5477068e-03 2.2742281e-03 9.7171923e-04 3.9271863e-04 3.8509554e-02 2.4774335e-02 8.7360281e-03 1.9891111e-03 1.1995846e-02 6.8973897e-03 5.5019459e-03 1.9965157e-02 8.4152844e-03 1.6247783e-03 1.4531219e-04 7.7480972e-03 8.6972032e-03 5.9824611e-03 1.5176827e-03 7.8567259e-03 3.3493021e-03 8.1204385e-02 5.1575806e-02 4.0135980e-02 5.2803481e-02 5.9400299e-02 5.8042463e-02 6.1291964e-02 5.1745941e-02 6.1508632e-02 3.3138226e-02 1.3469654e-02 3.9479896e-02 2.9967140e-02 6.1565597e-02 6.0825097e-02 2.7361698e-02 3.5748038e-02 4.0333571e-02 9.0138895e-02 4.8984196e-02 3.2667108e-02 4.4902460e-02 6.4253355e-02 2.1894327e-02 3.3940189e-02 3.2913172e-02 1.6731486e-02 1.8202458e-02 5.7045856e-02 2.7382191e-02 4.3985602e-02 2.1375556e-02 5.9676750e-02 2.5108522e-02 6.6474949e-02 3.7689168e-02 4.4508235e-02 3.7339453e-02 1.6368548e-02 2.0371230e-02 3.9606943e-02 1.4908799e-02 5.1575806e-02 4.6824025e-02 3.8781771e-02 2.3989539e-02 3.6452117e-02 2.2748990e-02 3.4902979e-02 3.6586492e-02 7.5459794e-03 1.9125850e-02 2.0310725e-02 6.1222293e-03 3.4447226e-03 8.1418452e-03 3.7867035e-02 1.2077489e-02 1.6567789e-02 2.1605044e-02 3.9038067e-03 1.6430417e-02 2.8689503e-02 1.3569150e-02 4.2181405e-03 2.6167927e-02 7.2610935e-03 7.0630535e-03 1.1392972e-02 1.4088158e-02 2.3849569e-02 9.5566395e-03 2.5308356e-02 1.7804542e-02 1.7150362e-02 5.8921861e-02 1.6182419e-02 2.0027139e-02 6.1819655e-03 2.0410396e-03 8.6030014e-03 8.6357480e-03 7.5787415e-03 1.2598904e-02 6.0656768e-03 8.7139108e-03 1.7031846e-03 2.3331644e-03 1.8821103e-03 3.1682605e-04 8.7160052e-03 8.5600328e-03 2.8826583e-04 3.4529372e-03 7.5176091e-03 2.1668627e-02 4.3875084e-03 6.5654007e-04 4.4940457e-03 9.7914071e-03 1.9831433e-04 3.3391345e-03 2.9870521e-03 6.9985633e-04 2.5162956e-03 7.1853248e-03 1.3860021e-03 2.9754449e-03 4.8339512e-03 8.0121089e-03 2.3142577e-03 1.5373296e-02 2.8512223e-03 6.2399212e-03 5.8796015e-03 3.1092372e-03 2.3415322e-04 1.9852251e-03 5.2321529e-03 6.1819655e-03 4.3274341e-03 1.8113019e-03 1.9110595e-03 2.4033410e-03 9.3130265e-05 5.1498910e-03 6.5356041e-03 9.5659910e-03 1.1252405e-02 5.1556774e-03 3.2048021e-03 1.2695843e-03 2.2856756e-02 5.7848184e-03 7.7211451e-03 1.0283621e-02 8.3602499e-03 2.7615733e-03 7.9786726e-03 5.6505970e-03 1.0691105e-02 7.1332727e-03 3.1295602e-03 5.2401241e-04 9.5266815e-04 6.4319329e-03 1.2318850e-02 9.4872145e-04 6.1326072e-03 3.4758657e-03 6.5333653e-03 4.0853883e-02 4.3049208e-03 3.1649609e-02 1.6528693e-02 1.4101758e-02 1.4838879e-02 2.1483291e-02 2.2597676e-02 1.8841242e-02 1.6959579e-02 2.5991410e-02 7.4250762e-03 1.9970150e-03 1.4113092e-02 1.0250250e-02 2.6744340e-02 2.8212793e-02 6.9997737e-03 7.0607843e-03 7.4739664e-03 4.8397396e-02 1.9593840e-02 1.0430940e-02 1.2525634e-02 2.7473469e-02 8.3209543e-03 6.1476473e-03 5.8775226e-03 5.2455782e-03 1.4265861e-03 2.2224591e-02 4.7507930e-03 1.7000338e-02 1.0369313e-03 2.5178358e-02 3.0495461e-03 2.1392322e-02 1.9628131e-02 1.0500631e-02 6.5204272e-03 9.7357638e-04 7.2933833e-03 1.5732817e-02 1.4704532e-02 1.6528693e-02 1.4858839e-02 1.2681737e-02 1.4339393e-02 1.8427144e-02 5.9852538e-03 5.6194230e-03 5.9265324e-03 8.2461319e-05 3.7688699e-03 6.4113525e-03 4.0576095e-03 3.3120738e-03 8.1782115e-04 1.0911416e-04 1.0750826e-04 3.1097496e-02 1.8955233e-02 6.6951403e-03 5.7580225e-04 9.5785875e-03 5.0095389e-03 2.9949836e-03 1.4512717e-02 5.0859668e-03 4.0940931e-04 2.6323499e-04 4.5146422e-03 6.3221754e-03 3.5606197e-03 3.7002765e-04 1.1722539e-02 1.5048339e-03 7.0395338e-02 4.3268177e-02 3.3370831e-02 4.4028795e-02 5.0578307e-02 4.9629423e-02 5.1746432e-02 4.3478758e-02 5.3066435e-02 2.6481479e-02 9.3392135e-03 3.2828923e-02 2.4313691e-02 5.3237993e-02 5.2867243e-02 2.1623243e-02 2.8596153e-02 3.2496284e-02 8.0563616e-02 4.1614554e-02 2.6578058e-02 3.7074597e-02 5.5590704e-02 1.7323162e-02 2.6950223e-02 2.6053254e-02 1.2554151e-02 1.3157434e-02 4.8740123e-02 2.1279987e-02 3.7040705e-02 1.5739340e-02 5.1410982e-02 1.9129885e-02 5.6480014e-02 3.2271228e-02 3.6432542e-02 2.9847396e-02 1.1573499e-02 1.5895178e-02 3.3202746e-02 1.2659655e-02 4.3268177e-02 3.9046279e-02 3.2014714e-02 2.0067384e-02 3.1045819e-02 1.7642117e-02 2.7677552e-02 2.9133996e-02 4.1910853e-03 7.2076003e-03 5.1147587e-03 2.7507325e-03 1.1620134e-03 3.3310681e-04 2.1458858e-04 3.3405406e-02 2.1499526e-02 8.0088547e-03 9.8332257e-04 9.6892205e-03 6.1744631e-03 3.6704596e-03 1.6544301e-02 6.4217725e-03 7.2111274e-04 2.1270559e-04 5.7183304e-03 7.6981750e-03 4.7117366e-03 8.0177817e-04 1.0442534e-02 2.2910923e-03 7.3784227e-02 4.5661689e-02 3.5029236e-02 4.6823533e-02 5.3065363e-02 5.1836541e-02 5.4866908e-02 4.5828107e-02 5.5174565e-02 2.8408213e-02 1.0505539e-02 3.4430852e-02 2.5609623e-02 5.5260802e-02 5.4654205e-02 2.3114484e-02 3.0836583e-02 3.5207839e-02 8.2686924e-02 4.3372344e-02 2.8064998e-02 3.9380344e-02 5.7772002e-02 1.8254235e-02 2.9162191e-02 2.8206341e-02 1.3497345e-02 1.4745279e-02 5.0900941e-02 2.3090436e-02 3.8677830e-02 1.7710943e-02 5.3450122e-02 2.1019262e-02 5.9801906e-02 3.3130692e-02 3.9030212e-02 3.2379101e-02 1.3108535e-02 1.6839016e-02 3.4625964e-02 1.2551681e-02 4.5661689e-02 4.1207031e-02 3.3732757e-02 2.0513590e-02 3.1937443e-02 1.8908607e-02 3.0105345e-02 3.1711397e-02 5.5341249e-04 1.8100865e-03 1.3685556e-02 1.1595059e-03 2.7733190e-03 5.0259130e-03 1.5582273e-02 1.5303863e-02 1.3216313e-02 1.8061219e-03 1.8483286e-03 1.0961959e-02 4.0632184e-04 7.9419645e-03 4.2030049e-03 1.8572156e-03 5.9964081e-03 2.8843938e-03 1.1487297e-02 6.2195738e-03 3.4171878e-03 2.7328194e-02 3.9549773e-03 4.5323537e-02 2.3233798e-02 1.5194289e-02 2.5226902e-02 2.8389486e-02 2.7075250e-02 3.1585337e-02 2.3242139e-02 2.9332670e-02 1.1663012e-02 1.9950589e-03 1.4768251e-02 9.1566367e-03 2.9330780e-02 2.8799195e-02 7.9264454e-03 1.3962831e-02 1.8481959e-02 5.0115855e-02 2.0802698e-02 1.0733745e-02 1.9041166e-02 3.1266982e-02 4.9760517e-03 1.3001310e-02 1.2292231e-02 2.6877167e-03 4.5798906e-03 2.6364594e-02 8.6153969e-03 1.7566530e-02 7.4744318e-03 2.8043210e-02 7.9969256e-03 3.5640074e-02 1.4109863e-02 1.9847472e-02 1.6098385e-02 3.9587699e-03 4.2468630e-03 1.4818700e-02 3.7268555e-03 2.3233798e-02 1.9851942e-02 1.4429507e-02 6.7117241e-03 1.3281147e-02 5.4664867e-03 1.4483311e-02 1.6079322e-02 1.4644608e-03 1.8793440e-02 2.6529941e-03 4.9386280e-03 7.8423223e-03 1.0293845e-02 1.1871124e-02 1.4064748e-02 3.3517168e-03 2.2145112e-03 1.1983758e-02 7.1285200e-04 4.9198989e-03 3.5944894e-03 3.6244119e-03 9.2640212e-03 2.3814700e-03 1.1959906e-02 6.6391160e-03 5.2659669e-03 3.4768199e-02 5.0225808e-03 3.6096256e-02 1.6893661e-02 1.0608435e-02 1.8369401e-02 2.1445998e-02 2.0588370e-02 2.3840862e-02 1.6945577e-02 2.2798129e-02 7.2320779e-03 4.5716975e-04 1.0311574e-02 5.8475513e-03 2.2922553e-02 2.2802937e-02 4.5367353e-03 8.9750660e-03 1.2721749e-02 4.2209187e-02 1.5508457e-02 6.9302761e-03 1.3261817e-02 2.4477020e-02 2.9037640e-03 8.2014592e-03 7.6405998e-03 1.0779902e-03 2.0077167e-03 2.0000571e-02 4.8259503e-03 1.2764273e-02 4.2375783e-03 2.1711543e-02 4.3575270e-03 2.7378197e-02 1.1026559e-02 1.3799894e-02 1.0731586e-02 1.6733633e-03 2.2875292e-03 1.0634216e-02 4.2659077e-03 1.6893661e-02 1.4117780e-02 9.8480141e-03 5.3491991e-03 1.0216132e-02 2.7977678e-03 9.4220767e-03 1.0761419e-02 1.4255275e-02 1.6372127e-03 2.8542706e-03 4.7214122e-03 1.3332443e-02 7.0444510e-03 6.5532524e-03 1.6174955e-03 6.7559932e-03 5.2389698e-03 5.3640347e-04 3.3171183e-03 5.1283913e-04 2.0296126e-03 6.0950963e-03 1.2744293e-04 5.0948120e-03 1.9318373e-03 2.3608728e-03 2.9089432e-02 1.4755776e-03 4.1865124e-02 2.2429622e-02 1.7100637e-02 2.2021240e-02 2.8022948e-02 2.8179896e-02 2.7393595e-02 2.2729266e-02 3.1385076e-02 1.0928177e-02 1.7748140e-03 1.6893502e-02 1.1535806e-02 3.1856326e-02 3.2502251e-02 8.7863970e-03 1.1670124e-02 1.3706425e-02 5.4822004e-02 2.3316700e-02 1.2494202e-02 1.7833364e-02 3.3214276e-02 7.9216177e-03 1.0562159e-02 1.0054198e-02 4.5572702e-03 2.8793276e-03 2.7614983e-02 7.5454514e-03 2.0103411e-02 3.8323098e-03 3.0272091e-02 5.9246169e-03 3.0771609e-02 1.9772265e-02 1.6666117e-02 1.2065056e-02 2.1106407e-03 6.8455106e-03 1.7918371e-02 1.0155544e-02 2.2429622e-02 1.9803941e-02 1.5812785e-02 1.2351590e-02 1.8630292e-02 6.7616623e-03 1.0713593e-02 1.1544341e-02 7.3852619e-03 4.6030790e-03 2.5842708e-03 5.4300453e-02 3.3576641e-02 1.0668428e-02 6.6015056e-03 2.1814225e-02 9.0913366e-03 1.2584515e-02 3.0243821e-02 1.4861127e-02 6.0467884e-03 1.7442631e-03 1.4568415e-02 1.1492675e-02 1.0173171e-02 5.0439803e-03 2.6472861e-03 7.4513739e-03 1.0326079e-01 7.0098941e-02 5.7127711e-02 7.0823391e-02 7.9250920e-02 7.7946311e-02 8.0298570e-02 7.0363352e-02 8.2049228e-02 4.8294128e-02 2.3708406e-02 5.6369153e-02 4.4949110e-02 8.2157728e-02 8.1370723e-02 4.1592461e-02 5.0974970e-02 5.5456655e-02 1.1462128e-01 6.7593968e-02 4.8172633e-02 6.2189094e-02 8.5184220e-02 3.4981615e-02 4.8742735e-02 4.7569879e-02 2.8345798e-02 2.9430788e-02 7.6817222e-02 4.1186102e-02 6.1732592e-02 3.2638549e-02 7.9961084e-02 3.8043139e-02 8.5993831e-02 5.4182844e-02 6.1100206e-02 5.2203816e-02 2.6988218e-02 3.3049806e-02 5.6579852e-02 2.5305632e-02 7.0098941e-02 6.4715083e-02 5.5458622e-02 3.7374533e-02 5.2731313e-02 3.5938214e-02 4.9377578e-02 5.1061578e-02 3.5870177e-04 1.3937749e-03 2.2192133e-02 1.4887667e-02 7.8001469e-03 8.8171759e-05 5.5216302e-03 5.9793748e-03 7.0276376e-04 9.5746848e-03 3.0437686e-03 8.2181914e-05 2.0068727e-03 2.2506604e-03 6.8090409e-03 3.1469229e-03 6.3301284e-04 1.8528100e-02 1.2625125e-03 5.6704255e-02 3.2399710e-02 2.3806821e-02 3.3394775e-02 3.8737297e-02 3.7833623e-02 4.0310184e-02 3.2556548e-02 4.0845247e-02 1.8140183e-02 4.6897646e-03 2.3353931e-02 1.6279158e-02 4.1004664e-02 4.0734447e-02 1.4064354e-02 2.0110135e-02 2.3937572e-02 6.5512797e-02 3.0871191e-02 1.8117892e-02 2.7110445e-02 4.3069544e-02 1.0732705e-02 1.8772743e-02 1.7996705e-02 7.0003565e-03 7.6876035e-03 3.7052349e-02 1.3928781e-02 2.6945955e-02 1.0128877e-02 3.9392715e-02 1.2368176e-02 4.4616010e-02 2.3284644e-02 2.6871325e-02 2.1527670e-02 6.5473767e-03 9.5899926e-03 2.3719212e-02 8.2529644e-03 3.2399710e-02 2.8696517e-02 2.2657048e-02 1.3399527e-02 2.2203050e-02 1.0884330e-02 1.9665180e-02 2.1074599e-02 3.3845415e-04 2.7555720e-02 1.6677843e-02 6.3795274e-03 1.8362843e-04 8.3039101e-03 4.7168779e-03 1.9845708e-03 1.2200549e-02 3.8746792e-03 1.0179352e-04 6.8781671e-04 3.2951534e-03 5.8068152e-03 2.8838183e-03 1.7929502e-04 1.4079350e-02 1.0261263e-03 6.5064614e-02 3.9139337e-02 2.9910766e-02 3.9801101e-02 4.6137717e-02 4.5316574e-02 4.7159535e-02 3.9352955e-02 4.8674767e-02 2.3254889e-02 7.4636086e-03 2.9417103e-02 2.1432655e-02 4.8877998e-02 4.8632496e-02 1.8795647e-02 2.5200016e-02 2.8886320e-02 7.5371106e-02 3.7782169e-02 2.3498806e-02 3.3236011e-02 5.1084684e-02 1.4998262e-02 2.3652548e-02 2.2813769e-02 1.0515841e-02 1.0877794e-02 4.4476659e-02 1.8379623e-02 3.3440220e-02 1.3262540e-02 4.7104689e-02 1.6362923e-02 5.1690239e-02 2.9302173e-02 3.2587374e-02 2.6378456e-02 9.4404077e-03 1.3645256e-02 2.9862026e-02 1.1376615e-02 3.9139337e-02 3.5158865e-02 2.8588131e-02 1.7920264e-02 2.8103943e-02 1.5132216e-02 2.4337714e-02 2.5719532e-02 3.3433714e-02 1.9092201e-02 5.6820829e-03 9.6311300e-04 1.1687721e-02 4.1741711e-03 3.9152052e-03 1.5429746e-02 5.3660274e-03 8.0766059e-04 9.2937667e-05 4.9943675e-03 5.5161799e-03 3.3131521e-03 4.2533013e-04 1.0435178e-02 1.4826896e-03 7.3827823e-02 4.6346021e-02 3.6506307e-02 4.6683572e-02 5.3981470e-02 5.3241058e-02 5.4467293e-02 4.6614647e-02 5.6933591e-02 2.8894083e-02 1.0839802e-02 3.5973376e-02 2.7110780e-02 5.7179243e-02 5.6957917e-02 2.4065468e-02 3.0813759e-02 3.4360438e-02 8.5582303e-02 4.5156230e-02 2.9396418e-02 3.9850269e-02 5.9522160e-02 1.9818286e-02 2.9063291e-02 2.8164144e-02 1.4610346e-02 1.4656069e-02 5.2344759e-02 2.3377306e-02 4.0412481e-02 1.6984268e-02 5.5250411e-02 2.0918953e-02 5.9213325e-02 3.5814189e-02 3.8804285e-02 3.1759139e-02 1.2931018e-02 1.8261938e-02 3.6495356e-02 1.5089913e-02 4.6346021e-02 4.2098942e-02 3.5016667e-02 2.2986158e-02 3.4503485e-02 1.9934983e-02 2.9546649e-02 3.0900707e-02 1.1335824e-02 3.2301226e-02 2.3277392e-02 1.5629192e-02 3.0795664e-02 1.5041716e-02 5.4315810e-03 1.4713782e-02 2.4451968e-02 3.6761202e-02 1.3762814e-02 2.8473446e-02 2.2472023e-02 2.6736705e-02 8.0230549e-02 2.3341323e-02 8.1067950e-03 1.4485807e-03 2.4486060e-03 1.1703837e-03 3.1310445e-03 3.9732842e-03 2.8350922e-03 1.6242765e-03 5.7101906e-03 4.5289508e-04 6.6077306e-03 2.6813364e-03 3.3571693e-03 6.2840633e-03 7.7260784e-03 2.0715363e-03 5.6385417e-05 8.9610646e-04 1.8073537e-02 3.9621048e-03 2.3604444e-03 4.7560860e-04 6.2480089e-03 5.8573026e-03 1.7069942e-04 2.3254450e-04 6.3902073e-03 3.8624324e-03 3.9083302e-03 1.0347438e-03 3.4531305e-03 3.5564063e-03 5.5040957e-03 1.4887365e-03 4.1720167e-03 8.6822743e-03 2.8448886e-04 5.1458480e-04 4.8536300e-03 5.8397245e-03 3.9827712e-03 1.8058791e-02 1.4485807e-03 1.2408841e-03 1.8448387e-03 1.0796845e-02 8.0405443e-03 3.4808504e-03 5.2754145e-04 8.8520211e-04 8.8923137e-03 1.4043268e-02 2.4126814e-02 9.1698320e-03 1.1327807e-02 1.9771649e-03 4.5360079e-03 1.5317882e-02 2.1762150e-02 5.6097757e-03 7.1877025e-03 6.8496039e-03 1.4100359e-02 5.3810596e-02 9.9629280e-03 3.1307769e-02 2.0553715e-02 2.1937453e-02 1.5991855e-02 2.5676129e-02 2.8613140e-02 1.8469501e-02 2.1322428e-02 3.3003652e-02 1.2471005e-02 8.9752587e-03 2.2249180e-02 1.9221338e-02 3.4330234e-02 3.7274155e-02 1.4283960e-02 1.0136722e-02 7.4549090e-03 5.7532344e-02 2.7732649e-02 1.8492242e-02 1.6393364e-02 3.4241314e-02 1.8497141e-02 9.1551303e-03 9.1767161e-03 1.4470512e-02 6.3039218e-03 2.8459911e-02 9.7413243e-03 2.5409718e-02 3.4719577e-03 3.2484401e-02 7.0164005e-03 2.0017467e-02 3.2249701e-02 1.2084843e-02 7.5073944e-03 5.8227226e-03 1.7146634e-02 2.5055287e-02 2.9865216e-02 2.0553715e-02 2.0008085e-02 1.9953689e-02 2.7764157e-02 3.0721044e-02 1.4086953e-02 7.0439235e-03 6.3871700e-03 6.2932024e-03 2.4881477e-02 1.2570656e-04 9.2318283e-03 1.1425023e-02 3.9781027e-03 6.8287051e-03 6.6316807e-03 5.1943433e-03 1.2253382e-04 1.3842125e-03 4.4535183e-03 2.1436401e-02 2.8241776e-03 6.8828353e-02 4.7091014e-02 4.2520568e-02 4.3241334e-02 5.5155458e-02 5.7042053e-02 4.8981701e-02 4.7857882e-02 6.2261278e-02 3.0649818e-02 1.4761608e-02 4.2406580e-02 3.4493935e-02 6.3340671e-02 6.5239814e-02 2.8920518e-02 2.9750953e-02 2.8851937e-02 9.4568410e-02 5.1830706e-02 3.5536784e-02 4.0198717e-02 6.4555077e-02 2.8697450e-02 2.7846993e-02 2.7347316e-02 2.1934395e-02 1.5675611e-02 5.6443004e-02 2.4878970e-02 4.7356921e-02 1.4503825e-02 6.0963940e-02 2.0840976e-02 5.2367097e-02 4.8672170e-02 3.5784762e-02 2.7656172e-02 1.3836981e-02 2.6623769e-02 4.4650658e-02 3.0852069e-02 4.7091014e-02 4.4370840e-02 4.0176310e-02 3.6600204e-02 4.6858384e-02 2.5835018e-02 2.6019603e-02 2.5994222e-02 6.9728196e-03 4.6608676e-03 9.9835054e-04 9.5223148e-03 2.6272609e-03 2.7884021e-05 1.5635547e-03 2.0372846e-03 5.4615556e-03 2.3300808e-03 2.5698617e-04 1.7457304e-02 7.2996870e-04 5.8453388e-02 3.4095678e-02 2.5737672e-02 3.4628989e-02 4.0687367e-02 4.0032201e-02 4.1519056e-02 3.4313166e-02 4.3287054e-02 1.9386832e-02 5.3532424e-03 2.5306754e-02 1.8013458e-02 4.3531307e-02 4.3448361e-02 1.5446633e-02 2.1111383e-02 2.4519102e-02 6.8935827e-02 3.3123525e-02 1.9821803e-02 2.8570801e-02 4.5547846e-02 1.2302601e-02 1.9692146e-02 1.8929129e-02 8.1934144e-03 8.2421848e-03 3.9256624e-02 1.4936345e-02 2.9083818e-02 1.0370393e-02 4.1828239e-02 1.3093356e-02 4.5785106e-02 2.5766235e-02 2.7913838e-02 2.2194996e-02 6.9953757e-03 1.1047300e-02 2.5843766e-02 1.0033677e-02 3.4095678e-02 3.0429687e-02 2.4458901e-02 1.5454278e-02 2.4603652e-02 1.2196519e-02 2.0322394e-02 2.1606954e-02 2.1763592e-02 3.8778874e-03 1.3140898e-02 1.0973585e-02 6.8570288e-03 1.2689294e-02 8.7227464e-03 2.2446768e-02 1.4768924e-02 9.8837253e-03 3.6594036e-02 1.1176832e-02 4.2371132e-02 2.0489598e-02 1.1322791e-02 2.4620431e-02 2.4482623e-02 2.2010230e-02 3.1108475e-02 2.0217050e-02 2.3141445e-02 1.0944826e-02 3.8599335e-03 1.0791452e-02 6.1542383e-03 2.2743482e-02 2.1333545e-02 6.5515291e-03 1.4388718e-02 2.0852825e-02 3.9383251e-02 1.5461941e-02 7.8878514e-03 1.7372104e-02 2.4911352e-02 2.6880230e-03 1.3798611e-02 1.3025794e-02 2.1193973e-03 6.9838956e-03 2.1270749e-02 8.9787629e-03 1.2743052e-02 1.1335890e-02 2.1877049e-02 9.6533919e-03 3.5351983e-02 7.6384883e-03 2.0107777e-02 1.8067074e-02 6.8413076e-03 2.4741061e-03 1.0039063e-02 3.4670280e-04 2.0489598e-02 1.6923143e-02 1.1154236e-02 2.1521967e-03 7.1850256e-03 4.3898059e-03 1.6491142e-02 1.8665860e-02 7.4245740e-03 1.0715228e-02 3.1685550e-03 5.1120383e-03 5.0451240e-03 4.1323270e-03 1.4489604e-04 8.1318290e-04 3.0839979e-03 1.9714350e-02 1.7947024e-03 6.7440625e-02 4.5128820e-02 3.9916292e-02 4.1909560e-02 5.3040678e-02 5.4550431e-02 4.7872699e-02 4.5818357e-02 5.9509133e-02 2.8778013e-02 1.2927960e-02 3.9749997e-02 3.1835574e-02 6.0466685e-02 6.2068316e-02 2.6673332e-02 2.8274822e-02 2.8015780e-02 9.1028220e-02 4.9020613e-02 3.3018114e-02 3.8378554e-02 6.1817956e-02 2.5959804e-02 2.6422336e-02 2.5876047e-02 1.9520884e-02 1.4184191e-02 5.3918305e-02 2.3137863e-02 4.4568283e-02 1.3507580e-02 5.8174666e-02 1.9385371e-02 5.1416653e-02 4.5132833e-02 3.4491897e-02 2.6637939e-02 1.2404699e-02 2.3988175e-02 4.1744067e-02 2.7329966e-02 4.5128820e-02 4.2235431e-02 3.7715182e-02 3.3171215e-02 4.3405180e-02 2.3512609e-02 2.4946638e-02 2.5117607e-02 5.7451096e-03 2.0181202e-03 1.1916495e-03 5.0056293e-03 1.1565469e-03 7.7131020e-03 3.5313465e-03 2.1072743e-03 2.6393551e-02 2.0670282e-03 4.4935473e-02 2.3639481e-02 1.6646699e-02 2.4503230e-02 2.9137719e-02 2.8488385e-02 3.0530237e-02 2.3788614e-02 3.1241308e-02 1.1731452e-02 1.7654569e-03 1.6307697e-02 1.0602623e-02 3.1457386e-02 3.1449229e-02 8.6094127e-03 1.3341698e-02 1.6740022e-02 5.3656632e-02 2.2705161e-02 1.1958802e-02 1.9130319e-02 3.3171817e-02 6.4720106e-03 1.2268562e-02 1.1633623e-02 3.5425115e-03 3.7925712e-03 2.7828397e-02 8.3873180e-03 1.9384549e-02 5.8220699e-03 3.0002594e-02 7.2263237e-03 3.4339797e-02 1.7195852e-02 1.8977924e-02 1.4659749e-02 3.0403500e-03 5.5430802e-03 1.6804744e-02 6.5028482e-03 2.3639481e-02 2.0526700e-02 1.5612876e-02 9.4514606e-03 1.6202837e-02 6.2323724e-03 1.3120120e-02 1.4374340e-02 2.8180986e-03 1.0509318e-02 1.7897176e-02 2.8798358e-03 9.1928702e-03 6.3970688e-03 1.0746267e-02 5.0496414e-02 7.7981540e-03 2.4751970e-02 1.2458649e-02 1.1686269e-02 1.0356316e-02 1.6774912e-02 1.8282513e-02 1.3498932e-02 1.2919728e-02 2.1579044e-02 5.2962835e-03 2.7725666e-03 1.1818152e-02 9.1091800e-03 2.2436834e-02 2.4265521e-02 5.8512682e-03 4.4398598e-03 4.1822455e-03 4.2435194e-02 1.6425553e-02 8.8184300e-03 9.0473617e-03 2.2804959e-02 8.4055012e-03 3.7245140e-03 3.5955501e-03 5.8501155e-03 1.2261597e-03 1.8027694e-02 3.2889908e-03 1.4332649e-02 2.5252408e-04 2.0964539e-02 1.7865385e-03 1.5552416e-02 1.8549588e-02 6.8429145e-03 3.5948443e-03 1.0694978e-03 7.5092190e-03 1.3665818e-02 1.7252830e-02 1.2458649e-02 1.1370103e-02 1.0296741e-02 1.5007456e-02 1.7383467e-02 5.5463863e-03 2.9922345e-03 3.0734531e-03 3.1960387e-03 6.8662451e-03 1.2966654e-04 2.7649067e-03 8.3427366e-04 2.7984284e-03 2.9822521e-02 1.2731780e-03 4.3416537e-02 2.4875338e-02 2.0714079e-02 2.3200351e-02 3.0852531e-02 3.1753284e-02 2.8200300e-02 2.5332686e-02 3.5512031e-02 1.2989773e-02 3.4785945e-03 2.0603808e-02 1.5135483e-02 3.6226615e-02 3.7456354e-02 1.1531484e-02 1.2959687e-02 1.3737739e-02 6.0763799e-02 2.7435536e-02 1.5849420e-02 1.9923216e-02 3.7336849e-02 1.1567919e-02 1.1725372e-02 1.1300051e-02 7.4803077e-03 3.9656480e-03 3.1244978e-02 9.2703065e-03 2.4126524e-02 3.9496417e-03 3.4455719e-02 7.0383738e-03 3.1302098e-02 2.5203523e-02 1.7680030e-02 1.2436482e-02 3.0466203e-03 1.0266971e-02 2.2126952e-02 1.5206492e-02 2.4875338e-02 2.2550174e-02 1.9125072e-02 1.7383181e-02 2.3878119e-02 9.5944653e-03 1.1168635e-02 1.1615557e-02 1.3151667e-03 2.5320521e-03 6.0366399e-03 2.7823080e-03 2.9364031e-04 1.6476106e-02 9.7247557e-04 6.0284770e-02 3.5334723e-02 2.6546579e-02 3.6095524e-02 4.1987790e-02 4.1172424e-02 4.3178382e-02 3.5527420e-02 4.4374359e-02 2.0339225e-02 5.8436205e-03 2.6082735e-02 1.8603485e-02 4.4570054e-02 4.4351810e-02 1.6144347e-02 2.2244142e-02 2.5921932e-02 7.0056396e-02 3.3998717e-02 2.0524111e-02 2.9752453e-02 4.6679420e-02 1.2673386e-02 2.0806267e-02 2.0008665e-02 8.5632030e-03 8.9793060e-03 4.0369526e-02 1.5814708e-02 2.9884891e-02 1.1327921e-02 4.2874486e-02 1.4012365e-02 4.7563178e-02 2.6115270e-02 2.9259206e-02 2.3493076e-02 7.6990791e-03 1.1423166e-02 2.6517929e-02 9.7961479e-03 3.5334723e-02 3.1531604e-02 2.5300172e-02 1.5549538e-02 2.4969819e-02 1.2759840e-02 2.1558690e-02 2.2929217e-02 6.4418336e-03 6.6136731e-03 4.4102106e-03 9.1481615e-04 8.6358065e-03 2.3009102e-03 7.8700864e-02 5.0063075e-02 3.9499858e-02 5.0635713e-02 5.7926279e-02 5.6986449e-02 5.8777354e-02 5.0310505e-02 6.0682610e-02 3.1865387e-02 1.2666075e-02 3.8913195e-02 2.9597893e-02 6.0871779e-02 6.0471723e-02 2.6606279e-02 3.4024273e-02 3.7905442e-02 8.9816272e-02 4.8416482e-02 3.2086335e-02 4.3354864e-02 6.3371751e-02 2.1811352e-02 3.2202223e-02 3.1242353e-02 1.6436605e-02 1.6909958e-02 5.6040246e-02 2.6101775e-02 4.3480802e-02 1.9524603e-02 5.8918015e-02 2.3597831e-02 6.3733101e-02 3.8161980e-02 4.2440703e-02 3.5140814e-02 1.5073633e-02 2.0215594e-02 3.9313992e-02 1.6000311e-02 5.0063075e-02 4.5569203e-02 3.8016354e-02 2.4659247e-02 3.6849168e-02 2.2186423e-02 3.2805330e-02 3.4269626e-02 3.8467105e-03 1.2875815e-03 2.5072619e-03 2.9525973e-02 1.2875815e-03 4.2282453e-02 2.3357390e-02 1.8646096e-02 2.2313442e-02 2.9124686e-02 2.9658732e-02 2.7480923e-02 2.3735668e-02 3.3135090e-02 1.1714292e-02 2.4574323e-03 1.8489812e-02 1.3106245e-02 3.3729170e-02 3.4672500e-02 9.9345371e-03 1.2062833e-02 1.3461486e-02 5.7431716e-02 2.5093254e-02 1.3932087e-02 1.8599958e-02 3.4955869e-02 9.5459229e-03 1.0897419e-02 1.0433767e-02 5.8375168e-03 3.2324242e-03 2.9124686e-02 8.1819777e-03 2.1845036e-02 3.6925062e-03 3.2054971e-02 6.2644754e-03 3.0710474e-02 2.2249402e-02 1.6897068e-02 1.1997033e-02 2.3970471e-03 8.3620725e-03 1.9766550e-02 1.2536577e-02 2.3357390e-02 2.0894448e-02 1.7210222e-02 1.4675128e-02 2.1017625e-02 7.9709601e-03 1.0694253e-02 1.1329979e-02 8.0862109e-04 3.9488260e-03 2.3216413e-02 2.2109950e-03 6.3313725e-02 4.2461361e-02 3.8219621e-02 3.8832114e-02 5.0144792e-02 5.1966249e-02 4.4333772e-02 4.3192281e-02 5.6977170e-02 2.6944997e-02 1.2389873e-02 3.8126276e-02 3.0729621e-02 5.8026007e-02 5.9897200e-02 2.5415278e-02 2.6079828e-02 2.5293603e-02 8.8103550e-02 4.7063061e-02 3.1649890e-02 3.5923563e-02 5.9166899e-02 2.5434205e-02 2.4297609e-02 2.3831192e-02 1.9112973e-02 1.3083158e-02 5.1398726e-02 2.1557413e-02 4.2826183e-02 1.1968109e-02 5.5744441e-02 1.7792619e-02 4.7600638e-02 4.4377594e-02 3.1768113e-02 2.4143930e-02 1.1416637e-02 2.3484839e-02 4.0313562e-02 2.8205265e-02 4.2461361e-02 3.9889484e-02 3.5980112e-02 3.3161057e-02 4.2633793e-02 2.2600989e-02 2.2605649e-02 2.2606045e-02 1.6651142e-03 2.2593118e-02 4.7494657e-04 5.5766903e-02 3.4735344e-02 2.9594132e-02 3.2557240e-02 4.1723024e-02 4.2746173e-02 3.8238608e-02 3.5278801e-02 4.7033204e-02 2.0363061e-02 7.2717611e-03 2.9420951e-02 2.2560431e-02 4.7813714e-02 4.9078515e-02 1.8314288e-02 2.0299319e-02 2.0869007e-02 7.5386883e-02 3.7549246e-02 2.3623644e-02 2.8844458e-02 4.9142563e-02 1.7633849e-02 1.8744071e-02 1.8222205e-02 1.2402012e-02 8.3988430e-03 4.2147698e-02 1.5629140e-02 3.3602383e-02 8.3018288e-03 4.5796447e-02 1.2702505e-02 4.1687026e-02 3.3943329e-02 2.5980350e-02 1.9415550e-02 7.0260502e-03 1.6012157e-02 3.1063717e-02 1.9520820e-02 3.4735344e-02 3.1983672e-02 2.7741891e-02 2.3833980e-02 3.2440091e-02 1.5637054e-02 1.7874227e-02 1.8281297e-02 1.4972117e-02 3.8328039e-04 6.3723448e-02 3.8719440e-02 3.0384566e-02 3.8629777e-02 4.5827446e-02 4.5472032e-02 4.5653244e-02 3.9023449e-02 4.9116846e-02 2.2930601e-02 7.4083466e-03 2.9962579e-02 2.2100137e-02 4.9469036e-02 4.9598054e-02 1.8997460e-02 2.4379661e-02 2.7279745e-02 7.6566902e-02 3.8413283e-02 2.3958684e-02 3.2715136e-02 5.1484761e-02 1.5886249e-02 2.2794916e-02 2.2023153e-02 1.1094162e-02 1.0331639e-02 4.4681292e-02 1.7978253e-02 3.4097516e-02 1.2099271e-02 4.7608429e-02 1.5662138e-02 4.9957613e-02 3.0977854e-02 3.1455087e-02 2.5013254e-02 8.8604244e-03 1.4432007e-02 3.0714741e-02 1.3426163e-02 3.8719440e-02 3.4992868e-02 2.8902863e-02 1.9656006e-02 2.9684992e-02 1.5494562e-02 2.3064694e-02 2.4203450e-02 1.8836956e-02 1.3755831e-01 9.8685385e-02 8.2425470e-02 1.0000651e-01 1.0925759e-01 1.0726669e-01 1.1120208e-01 9.8918033e-02 1.1167083e-01 7.2661493e-02 4.1665349e-02 8.1426911e-02 6.7501542e-02 1.1159988e-01 1.1014878e-01 6.3989575e-02 7.6224987e-02 8.1835804e-02 1.4773346e-01 9.4587815e-02 7.1696153e-02 8.9447560e-02 1.1534202e-01 5.4876111e-02 7.3538690e-02 7.2081812e-02 4.6952332e-02 4.9385723e-02 1.0589901e-01 6.4038977e-02 8.7644093e-02 5.3625558e-02 1.0917520e-01 6.0322002e-02 1.1787190e-01 7.6918821e-02 8.8499122e-02 7.7885622e-02 4.6255911e-02 5.2612368e-02 8.1239445e-02 4.0019052e-02 9.8685385e-02 9.2131722e-02 8.0633770e-02 5.6243805e-02 7.5363077e-02 5.6811044e-02 7.4440381e-02 7.6512320e-02 5.8097767e-02 3.5211195e-02 2.8463781e-02 3.4215950e-02 4.2147698e-02 4.2414656e-02 4.0542228e-02 3.5615822e-02 4.6295613e-02 2.0363061e-02 6.3798220e-03 2.8163995e-02 2.0925233e-02 4.6838114e-02 4.7487902e-02 1.7368541e-02 2.1117782e-02 2.2967145e-02 7.3810390e-02 3.6316652e-02 2.2375879e-02 2.9367287e-02 4.8511603e-02 1.5463833e-02 1.9583828e-02 1.8938570e-02 1.0601468e-02 8.3988430e-03 4.1723024e-02 1.5629140e-02 3.2244606e-02 9.3153170e-03 4.4933305e-02 1.3111918e-02 4.4412271e-02 3.0809608e-02 2.7438827e-02 2.1093197e-02 7.0260502e-03 1.3969417e-02 2.9313944e-02 1.5218585e-02 3.5211195e-02 3.1983672e-02 2.6841235e-02 2.0313092e-02 2.9439905e-02 1.4332974e-02 1.9364178e-02 2.0170126e-02 3.9924060e-03 1.0538198e-02 3.2148883e-03 2.7366019e-03 4.4621109e-03 1.7397347e-03 4.1425525e-03 5.2723146e-03 1.1136640e-02 2.9112354e-02 1.1160954e-02 1.6541883e-02 6.0336652e-03 8.3590571e-03 1.6026960e-02 9.4770245e-03 8.7523451e-03 9.5282841e-03 8.5487892e-03 1.3900105e-02 5.7309995e-03 4.7773544e-03 2.3853571e-02 1.0495078e-02 1.1045936e-02 2.7111420e-02 2.3076623e-02 4.8207470e-03 1.4681091e-02 1.0141190e-02 2.1316125e-02 5.8141126e-03 1.6474359e-02 1.3064397e-03 2.0347529e-02 5.7992324e-03 9.5368199e-03 2.5369333e-02 2.4565418e-02 1.2896533e-02 4.4389699e-02 3.9924060e-03 5.8442541e-03 1.0341765e-02 2.9771308e-02 2.0024906e-02 2.0168043e-02 1.0668382e-02 1.0461554e-02 1.6977050e-03 8.0038314e-04 3.2736649e-04 7.3247771e-04 1.7861084e-03 1.2199790e-05 1.6168506e-03 2.0903986e-03 1.2529506e-02 1.9650001e-03 4.3150624e-03 2.0139182e-03 3.1759995e-03 4.1258242e-03 2.0323215e-03 3.5878910e-03 9.4977361e-03 1.5488203e-03 3.0112961e-03 2.7909675e-04 1.8152664e-03 8.3701176e-03 2.5714996e-03 2.7148441e-03 1.0520423e-02 9.2964166e-03 7.5284963e-04 3.9888853e-03 1.8331853e-03 9.4586856e-03 1.6039412e-03 5.4889906e-03 2.8052973e-03 7.3341015e-03 1.2316222e-03 3.2601592e-03 1.0868316e-02 8.8162037e-03 2.9210959e-03 2.1963225e-02 0.0000000e+00 1.8230646e-04 1.5254865e-03 1.2273284e-02 6.9788794e-03 6.3577637e-03 3.5631537e-03 4.1101326e-03 4.2047124e-03 2.6033919e-03 1.7916004e-03 6.7524244e-03 1.5279872e-03 2.3303877e-03 1.3468412e-03 7.9573793e-03 9.8329441e-06 8.0754860e-04 2.3639655e-03 2.4986002e-03 1.3799583e-03 2.6729899e-03 6.2996248e-03 1.0853281e-02 4.8574547e-04 3.9201723e-04 1.4418237e-03 2.9002781e-03 3.0725233e-03 3.0407572e-03 2.9093497e-03 5.1175009e-03 6.6599859e-03 1.5926877e-03 2.5889139e-03 1.3713626e-04 8.5117654e-03 1.9829707e-03 4.3442759e-03 8.7298845e-03 2.1325966e-03 3.5994846e-03 5.1472858e-03 7.9423211e-03 3.5289513e-03 2.0294631e-04 1.1925423e-02 1.6977050e-03 7.8274456e-04 4.8500499e-05 4.9289073e-03 1.8879551e-03 2.5218165e-03 4.9495642e-03 6.1589498e-03 1.4775011e-03 2.8508338e-03 3.8437964e-04 1.0082748e-03 4.3870644e-03 2.7766403e-03 1.3317076e-02 4.6107743e-03 7.0365754e-03 5.0970174e-03 7.0294656e-03 5.8145812e-03 1.6991258e-03 1.6190058e-03 1.4192459e-02 4.5311136e-03 5.3518685e-03 7.5607221e-04 4.5347742e-03 1.1372343e-02 2.1219078e-03 2.3903563e-03 1.2745790e-02 9.1602079e-03 2.9606388e-03 4.3905218e-03 4.7897027e-03 8.0493172e-03 4.4779450e-03 5.1908739e-03 9.8295198e-04 1.2281083e-02 3.7969538e-04 1.7665186e-03 1.0605818e-02 1.1571194e-02 6.2101277e-03 2.7119271e-02 8.0038314e-04 1.4492761e-03 3.6743113e-03 1.7056692e-02 1.1710246e-02 8.2821554e-03 2.2240766e-03 2.2684005e-03 3.0380682e-04 2.0343287e-03 2.8037036e-04 8.3123377e-04 3.9939935e-03 1.6647447e-02 2.9061444e-03 6.1278767e-03 1.1769582e-03 2.2926522e-03 6.3717175e-03 3.9793414e-03 5.6921961e-03 6.6386310e-03 1.6577942e-03 4.6178179e-03 1.2108195e-03 8.4773320e-04 1.1017765e-02 4.7223063e-03 4.9236953e-03 1.3900771e-02 1.3046714e-02 3.7972280e-04 6.5335900e-03 2.3480655e-03 1.3286542e-02 9.3142474e-04 8.4820747e-03 2.8343361e-03 8.2403259e-03 2.5815546e-03 5.4537681e-03 1.4900562e-02 1.1670902e-02 3.7659184e-03 2.5593433e-02 3.2736649e-04 7.7091107e-04 2.6079554e-03 1.4611634e-02 8.0055625e-03 9.0564615e-03 5.9260895e-03 6.5055071e-03 3.8779943e-03 5.7015696e-04 1.7303569e-04 4.1690936e-03 1.6322670e-02 1.9974930e-03 5.0009825e-03 3.2570724e-04 9.5057293e-04 5.8304604e-03 4.8059418e-03 7.5457291e-03 5.1241997e-03 6.9111123e-04 3.7782470e-03 1.7092344e-03 2.5698914e-04 9.5453465e-03 5.5846827e-03 5.6938512e-03 1.2788699e-02 1.3393372e-02 7.3809633e-06 6.7682476e-03 1.3200598e-03 1.4442150e-02 1.8441195e-04 9.0822620e-03 4.9911804e-03 5.8437761e-03 3.8589879e-03 6.9829557e-03 1.5273413e-02 1.0318402e-02 2.4584659e-03 2.2525564e-02 7.3247771e-04 8.2385517e-04 1.9942911e-03 1.2041849e-02 5.7258386e-03 8.2738022e-03 7.3225251e-03 8.2204997e-03 2.0515682e-03 5.4510656e-03 5.2217407e-03 1.7964531e-02 7.2742632e-03 1.0588415e-02 6.2949332e-03 8.6345788e-03 9.1795241e-03 3.5876856e-03 2.6991302e-03 1.4376485e-02 6.5862079e-03 8.4804528e-03 2.1372463e-03 5.3927480e-03 1.5896874e-02 4.1394106e-03 4.5375167e-03 1.7523452e-02 1.2909742e-02 4.0941134e-03 7.2891356e-03 7.2227860e-03 1.1126344e-02 5.7124492e-03 8.1126160e-03 1.3907276e-04 1.6260954e-02 1.3854449e-03 3.2122771e-03 1.4553993e-02 1.6151019e-02 9.1578470e-03 3.3857588e-02 1.7861084e-03 2.9619943e-03 6.1829720e-03 2.2326762e-02 1.5680260e-02 1.2223630e-02 3.9245535e-03 3.7050303e-03 1.3686449e-03 2.1603250e-03 1.2649428e-02 1.7801502e-03 4.1458992e-03 1.7278919e-03 2.7991042e-03 4.0888611e-03 2.2218452e-03 3.9716339e-03 8.9665631e-03 1.3013529e-03 2.8806398e-03 3.4594790e-04 1.5677728e-03 8.1916354e-03 2.7802616e-03 2.9107124e-03 1.0452698e-02 9.5134829e-03 5.8171764e-04 4.1098444e-03 1.6001879e-03 9.8257537e-03 1.3468918e-03 5.7037585e-03 3.1132824e-03 6.8805675e-03 1.4645264e-03 3.5912866e-03 1.1107885e-02 8.6722521e-03 2.6587395e-03 2.1561220e-02 1.2199790e-05 1.4772036e-04 1.4023821e-03 1.1878859e-02 6.5538274e-03 6.3060622e-03 3.8811059e-03 4.4873121e-03 5.6719205e-03 1.8601099e-02 2.5037679e-03 5.8026549e-03 3.0990295e-05 3.8568498e-04 7.1301713e-03 6.6581271e-03 9.9891912e-03 3.5467136e-03 7.7986655e-04 4.6294369e-03 2.9226786e-03 3.3006174e-05 1.0560037e-02 7.5497742e-03 7.6393818e-03 1.4326258e-02 1.5841028e-02 1.7171643e-04 8.5994033e-03 1.5671556e-03 1.7334506e-02 1.9316046e-05 1.1306536e-02 6.6195881e-03 5.5794173e-03 5.6606012e-03 9.3044548e-03 1.7865075e-02 1.1490070e-02 2.7178516e-03 2.3185889e-02 1.6168506e-03 1.6521970e-03 2.7157547e-03 1.2355858e-02 5.5768992e-03 9.6617637e-03 9.6558503e-03 1.0729574e-02 4.4050618e-03 1.4462579e-03 1.4084514e-03 6.0437228e-03 6.9280724e-03 5.9092958e-04 3.5267150e-04 2.3264461e-03 1.8099026e-02 3.0921484e-03 8.6005594e-04 9.1287231e-04 6.3843974e-03 3.0627943e-03 4.0297343e-04 3.2526954e-04 3.6399798e-03 2.6478661e-03 3.9939935e-03 3.1762278e-04 2.2988741e-03 3.2596799e-03 5.3016678e-03 9.7044163e-04 7.0435695e-03 5.7009823e-03 1.4263218e-03 1.5142545e-03 3.5226184e-03 3.0792135e-03 2.3251410e-03 1.2857609e-02 2.0903986e-03 1.3292907e-03 8.8570158e-04 6.8622586e-03 5.1349839e-03 1.4775225e-03 1.2815928e-03 1.9995407e-03 7.7887386e-03 4.2718762e-03 1.8880208e-02 1.9223691e-02 2.7240761e-03 5.5247078e-03 8.3688264e-03 3.7206770e-02 1.2358468e-02 4.8889738e-03 9.3012979e-03 2.0063076e-02 2.3808756e-03 4.8847228e-03 4.4654367e-03 8.0211221e-04 5.5572260e-04 1.5849918e-02 2.4737465e-03 1.0011166e-02 2.0320189e-03 1.7697241e-02 2.0106232e-03 2.1009310e-02 1.0002251e-02 9.3994441e-03 6.7786672e-03 4.3329436e-04 1.8154672e-03 8.4395042e-03 6.2214471e-03 1.2529506e-02 1.0325253e-02 7.1412079e-03 5.6981926e-03 9.1712120e-03 1.6108637e-03 5.7438691e-03 6.7900644e-03 6.8888062e-04 2.5080626e-03 2.5613963e-03 1.3274318e-03 2.8814590e-03 6.6724246e-03 1.1044919e-02 5.3338455e-04 3.3999007e-04 1.6762239e-03 3.1000026e-03 2.8256325e-03 3.2387801e-03 3.0867064e-03 4.8730365e-03 6.6378483e-03 1.7822362e-03 2.6462661e-03 1.4123935e-04 8.6230682e-03 2.1318487e-03 4.4281251e-03 9.3233552e-03 1.8673327e-03 3.9343847e-03 5.4568897e-03 7.9015195e-03 3.2859077e-03 1.3180217e-04 1.1320265e-02 1.9650001e-03 9.6799653e-04 7.8282421e-05 4.5166083e-03 1.6331972e-03 2.3904078e-03 5.2224500e-03 6.4841213e-03 5.7642096e-03 5.5885978e-03 3.7048759e-04 3.1695738e-03 7.3434699e-03 1.6768125e-02 2.3743894e-03 1.1932462e-04 3.2200650e-03 6.6976868e-03 7.3134819e-04 3.2564607e-03 2.9664521e-03 1.9437509e-03 4.0525881e-03 4.6657625e-03 1.7256268e-03 1.3667365e-03 6.4082141e-03 5.2167922e-03 3.0823107e-03 1.3133486e-02 1.7068449e-03 5.3434665e-03 5.8085657e-03 4.9261084e-03 9.6684380e-04 7.1816432e-04 6.8418503e-03 4.3150624e-03 2.7358698e-03 7.4246714e-04 2.1999970e-03 1.3715192e-03 6.3897855e-04 5.2531215e-03 6.6567087e-03 1.9959041e-04 7.2877827e-03 7.2348769e-03 1.0891220e-02 3.2459725e-03 7.4122702e-04 4.6749634e-03 3.3669230e-03 7.9583955e-05 1.0437521e-02 8.1436642e-03 8.2061472e-03 1.4337740e-02 1.6319841e-02 3.0462366e-04 9.0085125e-03 1.5214225e-03 1.8068443e-02 2.5934558e-05 1.1842923e-02 7.5545176e-03 5.1162105e-03 6.3606028e-03 1.0107865e-02 1.8360102e-02 1.1411116e-02 2.5943653e-03 2.2590562e-02 2.0139182e-03 1.9469619e-03 2.8165944e-03 1.1880076e-02 5.1539049e-03 9.7568917e-03 1.0422756e-02 1.1596876e-02 7.5663186e-03 8.6488322e-03 1.3133604e-02 3.0107568e-03 7.9406152e-04 4.7466095e-03 4.5711602e-03 4.9936224e-04 9.9296431e-03 9.5749791e-03 9.5587917e-03 1.4081306e-02 1.7234890e-02 8.7040364e-04 9.9040316e-03 1.5012448e-03 1.9639569e-02 3.0179394e-04 1.3010570e-02 1.0161281e-02 3.9519870e-03 8.1767554e-03 1.2080688e-02 1.9279439e-02 1.0989202e-02 2.3266303e-03 2.0713423e-02 3.1759995e-03 2.8067172e-03 3.1032715e-03 1.0490119e-02 4.0786420e-03 9.8110939e-03 1.2277454e-02 1.3709414e-02 1.7582839e-03 4.9489607e-03 1.9934064e-02 3.5016865e-03 3.4697267e-04 2.6310541e-03 8.0617166e-03 9.6411709e-04 1.7064375e-03 1.4666650e-03 1.4745772e-03 2.1108592e-03 5.5260764e-03 5.1802495e-04 2.3341578e-03 3.7167048e-03 6.5626516e-03 1.3180061e-03 1.1565092e-02 3.6658759e-03 3.8514458e-03 3.6514318e-03 2.8125792e-03 9.8545506e-04 1.7565517e-03 7.9513446e-03 4.1258242e-03 2.7177731e-03 1.0483123e-03 3.5558284e-03 3.1649242e-03 2.4110016e-04 3.1231514e-03 4.2303212e-03 8.7046320e-04 1.9803680e-02 4.5057787e-03 2.2875453e-03 8.1473256e-04 7.2784764e-03 5.2841523e-03 3.1936914e-05 6.0579771e-05 5.5427829e-03 2.9996038e-03 4.7111715e-03 6.7018226e-04 3.8206460e-03 2.7379425e-03 6.3961850e-03 9.7000606e-04 5.0455840e-03 8.7517746e-03 5.2181192e-04 4.2086901e-04 3.8740476e-03 5.1909001e-03 4.1846614e-03 1.6912712e-02 2.0323215e-03 1.6814934e-03 2.0140995e-03 1.0246238e-02 8.0709165e-03 2.9533168e-03 3.5001524e-04 7.2402168e-04 2.4525121e-02 8.3786268e-03 5.9730003e-03 2.2241688e-03 1.0490949e-02 1.0052150e-02 8.4481649e-04 1.0306755e-03 9.7104895e-03 4.7051149e-03 7.5682486e-03 2.5521754e-03 7.8053009e-03 3.0734717e-03 9.8801938e-03 2.2384251e-03 3.6192976e-03 1.5052125e-02 6.1736712e-04 1.0280118e-04 5.5375454e-03 9.7589612e-03 8.6474768e-03 2.4375316e-02 3.5878910e-03 3.8390770e-03 5.3089764e-03 1.6859170e-02 1.4176552e-02 6.5576789e-03 2.5984028e-04 9.3320862e-05 6.7547274e-03 1.5238284e-02 1.2713534e-02 3.0890016e-03 2.3712664e-02 2.1340343e-02 2.1521507e-02 3.0005294e-02 3.3960619e-02 5.2290080e-03 2.3006673e-02 8.7125215e-03 3.6416136e-02 3.8281789e-03 2.7389250e-02 1.5157530e-02 1.2639929e-02 1.7411617e-02 2.3820503e-02 3.6850697e-02 2.5373970e-02 1.0602298e-02 3.7784525e-02 9.4977361e-03 1.0004799e-02 1.1952848e-02 2.3559319e-02 1.3109247e-02 2.3603108e-02 2.4562463e-02 2.6017612e-02 1.7241949e-03 2.0286689e-03 1.1335916e-03 5.6322483e-03 5.1221157e-03 5.0559971e-03 8.5835151e-03 1.0650923e-02 5.5566853e-04 5.1134826e-03 1.3892193e-04 1.2640034e-02 5.6438567e-04 7.4313522e-03 8.3161300e-03 2.5369950e-03 4.7342848e-03 7.3166190e-03 1.2276381e-02 6.3456614e-03 5.8916640e-04 1.5594819e-02 1.5488203e-03 9.3082136e-04 7.5849611e-04 7.0579696e-03 2.4440449e-03 5.1791902e-03 7.3166180e-03 8.5826500e-03 2.1075364e-03 5.4157920e-03 1.3748154e-03 2.4371345e-03 2.2201300e-03 2.6881175e-03 4.1574302e-03 3.5021271e-03 1.4032793e-03 9.1188034e-04 6.1085103e-03 4.1365224e-03 2.7438304e-03 1.0770631e-02 2.1519140e-03 3.9794011e-03 4.6480198e-03 5.1312578e-03 1.6288841e-03 5.4679156e-04 8.7532006e-03 3.0112961e-03 1.7262374e-03 2.9055719e-04 3.3158230e-03 1.8013008e-03 9.2887256e-04 4.2320076e-03 5.4776329e-03 3.2889511e-03 6.5019207e-03 1.1654311e-03 1.2550970e-03 7.9744383e-03 6.4095512e-03 1.6783080e-03 2.2182877e-03 1.9380758e-03 6.5001122e-03 2.8053456e-03 3.3035106e-03 3.3582234e-03 7.0744470e-03 5.6581852e-04 1.8141689e-03 7.7202001e-03 6.7565678e-03 2.7207024e-03 1.9179880e-02 2.7909675e-04 2.2008347e-04 1.1064738e-03 1.0688407e-02 6.6082062e-03 4.4435997e-03 1.9606397e-03 2.4773484e-03 1.1763001e-02 8.2259536e-03 8.3480149e-03 1.5692475e-02 1.7083720e-02 2.8355914e-04 9.4960515e-03 2.0536401e-03 1.8469924e-02 1.0092056e-04 1.2279622e-02 6.4441301e-03 6.4046448e-03 6.0274458e-03 9.8857207e-03 1.9188640e-02 1.2736270e-02 3.3492132e-03 2.4941935e-02 1.8152664e-03 1.9865894e-03 3.3071796e-03 1.3641872e-02 6.4197769e-03 1.0769048e-02 1.0309787e-02 1.1341762e-02 5.1119032e-03 4.6662587e-03 4.6505160e-04 3.2269116e-03 9.0774431e-03 2.5215213e-03 4.0057628e-03 6.1199030e-03 9.7447895e-03 3.5360157e-03 1.8995080e-02 2.6805580e-03 8.6582433e-03 8.1342278e-03 3.7198152e-03 3.9443681e-05 2.6670326e-03 3.4016785e-03 8.3701176e-03 6.1426721e-03 2.9189037e-03 1.0083175e-03 2.2713681e-03 3.9595770e-04 7.2341855e-03 8.8447009e-03 1.1230173e-05 5.1389929e-03 2.4707449e-03 5.4731760e-03 5.1980845e-04 4.3112681e-03 2.1787917e-03 7.2534046e-03 6.7672528e-04 5.6623211e-03 9.1277214e-03 7.3619628e-04 3.7016392e-04 3.2564607e-03 4.9531882e-03 4.5742792e-03 1.6438776e-02 2.5714996e-03 2.1421258e-03 2.3256067e-03 1.0175775e-02 8.4088183e-03 2.7613050e-03 2.4755840e-04 6.0873636e-04 4.6703189e-03 2.2089026e-03 5.5642336e-03 3.7824549e-04 4.1878969e-03 2.0462491e-03 7.3148116e-03 5.4583143e-04 6.1390745e-03 8.7247882e-03 9.1073145e-04 4.9034572e-04 2.9664521e-03 4.5041455e-03 4.3650355e-03 1.5605807e-02 2.7148441e-03 2.1951828e-03 2.2077213e-03 9.5777719e-03 8.0120192e-03 2.4278861e-03 3.2552080e-04 7.3734328e-04 1.7636357e-03 1.2290813e-02 2.3919392e-03 6.5534766e-03 4.2355076e-03 1.3428854e-02 2.7760250e-03 2.0719142e-02 5.3736931e-03 9.3604766e-03 7.8350168e-03 1.9437509e-03 2.3451546e-04 4.9805974e-03 3.4615227e-03 1.0520423e-02 8.1642247e-03 4.6947450e-03 2.2490758e-03 4.7893620e-03 5.2359029e-04 6.8110246e-03 8.2524171e-03 1.3046714e-02 1.1318623e-03 8.6855001e-03 5.4130623e-04 1.5131426e-02 5.6179632e-04 1.5407719e-02 1.0669118e-02 5.8683852e-03 3.5797271e-03 6.2414181e-05 2.6714308e-03 7.7088950e-03 9.8081718e-03 9.2964166e-03 7.7221233e-03 5.7330158e-03 7.6711675e-03 9.7842038e-03 1.6419007e-03 2.8364539e-03 3.5173281e-03 6.5335900e-03 1.1340078e-03 1.4192754e-02 1.5964523e-04 8.8481511e-03 5.2711173e-03 5.4383051e-03 3.8856537e-03 6.9538378e-03 1.4900562e-02 9.8443835e-03 2.2013577e-03 2.1746585e-02 7.5284963e-04 7.7091107e-04 1.8026328e-03 1.1463429e-02 5.3229428e-03 7.8904169e-03 7.2583306e-03 8.1944055e-03 3.9135288e-03 1.7196090e-03 8.1113282e-03 2.2942459e-04 9.3463740e-03 6.8261058e-03 2.3665724e-03 1.6309596e-03 1.7256268e-03 2.3397606e-03 3.5922701e-03 1.1208568e-02 3.9888853e-03 2.9455145e-03 1.9660659e-03 6.5467152e-03 6.1492489e-03 9.3538874e-04 1.2212079e-03 1.9267388e-03 1.0793365e-02 1.2592720e-03 6.0147390e-03 9.1609131e-03 1.8537817e-03 4.5335508e-03 6.6172179e-03 1.0131803e-02 4.6089934e-03 1.7595473e-04 1.2987364e-02 1.8331853e-03 9.7087402e-04 3.3609260e-04 5.4018686e-03 1.7068395e-03 3.6646912e-03 6.4787894e-03 7.7901480e-03 1.6754655e-02 6.9641544e-04 1.3182224e-02 1.4651385e-02 4.9466239e-03 2.3931163e-03 5.9263439e-04 5.4211666e-03 1.0221921e-02 1.4928620e-02 9.4586856e-03 8.3819492e-03 7.3339215e-03 1.2011676e-02 1.3620169e-02 3.6119007e-03 1.8435886e-03 2.0979826e-03 1.0794235e-02 6.9811950e-03 4.9473584e-03 5.5993500e-03 9.1192858e-03 1.7103671e-02 1.0654852e-02 2.2936354e-03 2.1887320e-02 1.6039412e-03 1.5236521e-03 2.3684718e-03 1.1405606e-02 4.9416620e-03 8.9601309e-03 9.4128890e-03 1.0537337e-02 1.0144176e-02 9.3650956e-03 2.8049928e-03 1.4261619e-03 9.6795720e-04 3.1673467e-03 5.6308712e-03 1.2399657e-02 5.4889906e-03 4.4629844e-03 3.5148873e-03 8.2921709e-03 8.5570702e-03 1.5767832e-03 9.7941910e-04 1.5044401e-03 1.9162648e-02 2.3258011e-03 4.3611697e-03 1.7154674e-02 1.9266334e-02 1.1399040e-02 3.8294804e-02 2.8052973e-03 4.2987486e-03 8.1194560e-03 2.5933333e-02 1.8564970e-02 1.4940871e-02 5.2230072e-03 4.8296564e-03 1.1136786e-02 1.2996602e-02 1.1896568e-02 3.3692870e-03 1.0295085e-03 6.7928621e-03 7.3341015e-03 5.3674586e-03 2.6380919e-03 1.7126363e-03 1.9252416e-05 3.9628066e-03 1.2371798e-02 1.4417093e-02 5.5916770e-04 7.0172680e-03 8.6736288e-03 5.5025710e-03 2.2796596e-02 1.2316222e-03 1.4793666e-03 2.9451077e-03 1.4303806e-02 1.0475492e-02 5.7470026e-03 7.8882742e-04 9.0526661e-04 4.3645694e-03 7.8632956e-03 7.2260963e-03 2.1331409e-02 3.2601592e-03 3.2514734e-03 4.2300937e-03 1.4385386e-02 1.2160577e-02 5.0215597e-03 4.3279739e-05 5.1818418e-05 3.0767343e-03 9.0015857e-03 9.8042907e-03 1.0868316e-02 9.1698320e-03 6.9442206e-03 8.2610449e-03 1.0963075e-02 2.1355881e-03 3.5431999e-03 4.2180590e-03 3.2143555e-03 3.3819870e-03 8.8162037e-03 6.5643841e-03 3.2940615e-03 1.2995092e-03 2.9075684e-03 3.1897532e-04 6.9331287e-03 8.4863836e-03 1.0164058e-02 2.9210959e-03 1.7078622e-03 4.0814516e-04 3.6297378e-03 8.7693352e-04 2.6826046e-03 6.9132822e-03 8.3825591e-03 2.1963225e-02 1.8183756e-02 1.2010413e-02 1.7351817e-03 6.4958425e-03 5.7545512e-03 1.9704198e-02 2.2181841e-02 1.8230646e-04 1.5254865e-03 1.2273284e-02 6.9788794e-03 6.3577637e-03 3.5631537e-03 4.1101326e-03 6.5324999e-04 9.4729463e-03 5.0293123e-03 4.5622977e-03 3.3814637e-03 4.1233865e-03 5.1615257e-03 2.3326030e-03 2.1577255e-03 4.0325048e-03 5.1426622e-03 1.5351757e-03 2.6422594e-03 1.3282600e-02 1.5461999e-02 3.4307479e-03 1.1531055e-02 1.3518551e-02 4.2918744e-03 5.5398788e-03 8.4122900e-05 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-correlation-ml.txt b/voice_bridge/scipy/spatial/tests/data/pdist-correlation-ml.txt new file mode 100644 index 0000000000000000000000000000000000000000..2a17a2a8fb002493fff38c7ed059668867768a7e --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-correlation-ml.txt @@ -0,0 +1 @@ + 9.2507465e-01 9.6528566e-01 8.7255441e-01 1.1287379e+00 8.7318727e-01 1.0767102e+00 9.1419676e-01 1.1503304e+00 9.8074509e-01 1.0135025e+00 1.0495025e+00 9.4794536e-01 9.6829273e-01 1.1345767e+00 1.1048008e+00 9.2407796e-01 1.0228634e+00 9.3853195e-01 9.9377619e-01 1.0407662e+00 9.5048989e-01 9.0465688e-01 9.8056930e-01 8.9777156e-01 9.6357127e-01 9.3864452e-01 9.9754613e-01 9.7271356e-01 8.4383151e-01 9.6981983e-01 9.7510267e-01 1.0112663e+00 7.8730400e-01 1.0299498e+00 9.9307979e-01 9.0239520e-01 8.5428231e-01 8.8972742e-01 8.5933162e-01 9.6625934e-01 9.4175449e-01 9.9120729e-01 1.0503963e+00 8.8223053e-01 1.3261434e+00 1.1063209e+00 8.4058398e-01 1.0844267e+00 1.1153093e+00 1.0092643e+00 8.9585237e-01 1.0599818e+00 1.2321707e+00 1.1359624e+00 8.3503556e-01 1.1792243e+00 7.9159781e-01 1.0830419e+00 1.2181870e+00 9.9888500e-01 1.0227144e+00 6.8557277e-01 9.6836193e-01 1.1061227e+00 1.0883453e+00 9.5681974e-01 9.9436299e-01 1.0304323e+00 1.1273949e+00 1.0735563e+00 1.0582583e+00 9.6040272e-01 1.0032137e+00 8.4900547e-01 1.1035351e+00 8.7867480e-01 9.6433176e-01 9.1850122e-01 8.9337435e-01 1.0449390e+00 8.9639384e-01 9.6704971e-01 1.0084258e+00 1.0528587e+00 1.1764481e+00 1.0913280e+00 1.0136672e+00 1.2737156e+00 9.5130359e-01 1.0367909e+00 1.1983402e+00 1.1319901e+00 1.1117462e+00 1.0343695e+00 1.0838628e+00 7.5266057e-01 1.0763316e+00 8.8067924e-01 9.6734383e-01 9.8800551e-01 1.2265742e+00 7.8833055e-01 1.0338670e+00 8.6666625e-01 9.9039950e-01 9.7142684e-01 9.3138616e-01 8.5849977e-01 8.5486301e-01 1.0516028e+00 1.1105313e+00 9.5943505e-01 9.8845171e-01 1.0566288e+00 9.9712198e-01 9.5545756e-01 1.1817974e+00 9.9128482e-01 1.0117892e+00 1.0979115e+00 1.0493943e+00 9.1318848e-01 9.3157311e-01 8.7073304e-01 1.2459441e+00 9.3412689e-01 1.0482297e+00 9.4224032e-01 9.5134153e-01 9.0857493e-01 9.7264161e-01 8.2900820e-01 9.3140549e-01 1.1330242e+00 1.0333002e+00 1.0117861e+00 1.2053255e+00 8.5291396e-01 1.0148928e+00 8.6641379e-01 9.7080819e-01 9.5457159e-01 9.5207457e-01 9.3539674e-01 9.0769069e-01 9.5322590e-01 1.1181803e+00 9.9765614e-01 7.5370610e-01 1.0807114e+00 1.0804601e+00 9.0214124e-01 8.7101998e-01 1.0167435e+00 1.2045936e+00 8.7300539e-01 1.1054300e+00 7.9145574e-01 1.0279340e+00 8.7623462e-01 1.0034756e+00 1.0386933e+00 9.3910970e-01 1.0028455e+00 9.9868824e-01 9.8752945e-01 9.8319327e-01 1.3110209e+00 8.6180633e-01 1.0993856e+00 8.5912563e-01 1.1303979e+00 9.8690459e-01 9.6910090e-01 9.1456819e-01 1.1525339e+00 1.1064552e+00 1.1062255e+00 9.7226683e-01 1.1091447e+00 1.1072238e+00 9.6544444e-01 9.6681036e-01 9.3247685e-01 9.6854634e-01 1.1035119e+00 1.1317148e+00 9.5557793e-01 9.8908485e-01 7.4873648e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-cosine-ml-iris.txt b/voice_bridge/scipy/spatial/tests/data/pdist-cosine-ml-iris.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b705b348fc3de5041ad9e3d6a1686af61046b2a --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-cosine-ml-iris.txt @@ -0,0 +1 @@ + 1.4208365e-03 1.2652718e-05 8.9939315e-04 2.4232332e-04 9.9747033e-04 9.2045721e-04 2.2040648e-04 8.6480051e-04 1.2354911e-03 5.3650090e-06 1.0275886e-03 1.1695784e-03 2.3556571e-04 1.4590172e-03 1.8981327e-03 1.0939621e-03 1.2392314e-04 3.5850877e-04 8.6078038e-04 1.4490833e-03 8.4059347e-04 3.2873982e-03 2.7359832e-03 4.1316044e-03 2.7719149e-03 1.1814143e-03 1.1431285e-04 2.3850299e-04 1.3446247e-03 1.6406549e-03 1.2070654e-03 2.2241257e-03 1.4969348e-03 1.2354911e-03 7.6154552e-04 9.0853884e-04 1.2354911e-03 1.5825612e-04 2.3716586e-04 2.5806020e-04 8.5870759e-03 4.3447170e-04 2.6103416e-03 3.4026094e-03 1.2625429e-03 1.0000714e-03 2.7088099e-04 4.6161202e-05 1.7993015e-04 7.1619641e-02 7.4013940e-02 8.2336355e-02 9.3599031e-02 8.6542298e-02 9.2667602e-02 8.0934616e-02 6.7002415e-02 7.9695318e-02 8.3991107e-02 8.8330128e-02 7.6449243e-02 8.6123390e-02 9.1414445e-02 5.9767596e-02 6.8589764e-02 9.2363748e-02 7.5261304e-02 1.0768528e-01 7.8250149e-02 9.7383870e-02 6.9410330e-02 1.0895936e-01 9.1644587e-02 7.2677910e-02 7.2208930e-02 8.7635618e-02 9.3586395e-02 8.7700193e-02 5.8825053e-02 7.9271072e-02 7.4136423e-02 7.0977606e-02 1.1670751e-01 9.6691498e-02 7.7157266e-02 7.8793137e-02 9.6187418e-02 7.4355610e-02 8.6677009e-02 9.7286808e-02 8.5214421e-02 7.7419803e-02 6.8888638e-02 8.6192502e-02 7.4757686e-02 7.8851331e-02 7.5042247e-02 5.2484298e-02 7.8023694e-02 1.3991867e-01 1.2655756e-01 1.2099780e-01 1.2515784e-01 1.3134370e-01 1.3306336e-01 1.2911903e-01 1.2854613e-01 1.3655327e-01 1.1601604e-01 9.9632498e-02 1.2063863e-01 1.1404742e-01 1.3409335e-01 1.3451976e-01 1.1368563e-01 1.1469397e-01 1.1505768e-01 1.5479411e-01 1.2906390e-01 1.1634186e-01 1.2299625e-01 1.3892070e-01 1.0732534e-01 1.1401190e-01 1.1254699e-01 1.0266168e-01 1.0210743e-01 1.3111378e-01 1.0950615e-01 1.2501276e-01 1.0108759e-01 1.3297245e-01 1.0624129e-01 1.3360037e-01 1.2002867e-01 1.2233784e-01 1.1387071e-01 1.0061412e-01 1.0649150e-01 1.2174429e-01 1.0147290e-01 1.2655756e-01 1.2438709e-01 1.2138109e-01 1.1044406e-01 1.1910000e-01 1.0821359e-01 1.1609070e-01 1.1329724e-01 1.2085473e-03 1.2060695e-03 2.7592041e-03 3.0736184e-03 3.7201033e-03 1.0861043e-03 7.3910902e-04 3.4790667e-04 1.3491546e-03 2.4493052e-03 1.8482587e-04 2.3308566e-03 3.8997403e-03 6.3069928e-03 4.1362617e-03 1.5079538e-03 7.4890015e-04 4.0049414e-03 3.0763412e-04 3.2877725e-03 8.6909088e-03 1.8863199e-03 4.7592122e-03 4.5180751e-04 1.7148301e-03 8.8703626e-04 5.7128783e-04 1.7151033e-03 8.4814176e-04 4.7551630e-04 6.9313334e-03 5.8126778e-03 3.4790667e-04 9.7078221e-04 1.0390338e-03 3.4790667e-04 1.1371495e-03 7.0598263e-04 2.3100870e-03 3.1332241e-03 2.9870115e-03 3.7693564e-03 5.5008337e-03 2.0081767e-04 3.9261497e-03 1.6237803e-03 1.7731168e-03 5.9153033e-04 5.9997244e-02 6.3706418e-02 7.0131342e-02 8.0131815e-02 7.3670020e-02 8.1412444e-02 7.1132932e-02 5.6572408e-02 6.7223691e-02 7.3993918e-02 7.4363256e-02 6.6371013e-02 7.1106157e-02 7.9730716e-02 5.0610503e-02 5.7285563e-02 8.2536028e-02 6.3695818e-02 9.1877918e-02 6.6044079e-02 8.7700525e-02 5.7975072e-02 9.4407127e-02 7.9385033e-02 6.0900938e-02 6.0521931e-02 7.4070557e-02 8.1073873e-02 7.6438218e-02 4.7634460e-02 6.6728846e-02 6.1732271e-02 5.9656897e-02 1.0363139e-01 8.7312695e-02 6.8806126e-02 6.7142432e-02 8.0911573e-02 6.5091322e-02 7.4541034e-02 8.5313436e-02 7.4229332e-02 6.5328348e-02 5.7461491e-02 7.4891760e-02 6.5136264e-02 6.8598864e-02 6.3641018e-02 4.2790811e-02 6.7276779e-02 1.2872765e-01 1.1385917e-01 1.0708423e-01 1.1221780e-01 1.1844388e-01 1.1798239e-01 1.1767648e-01 1.1356773e-01 1.2073038e-01 1.0467824e-01 8.8441784e-02 1.0671832e-01 1.0091826e-01 1.2051300e-01 1.2244533e-01 1.0247664e-01 1.0203920e-01 1.0334656e-01 1.3764340e-01 1.1314999e-01 1.0390175e-01 1.1148602e-01 1.2274267e-01 9.3929112e-02 1.0239198e-01 9.9372667e-02 9.0109024e-02 9.0770318e-02 1.1749345e-01 9.5509620e-02 1.0956056e-01 8.9331297e-02 1.1936188e-01 9.3207628e-02 1.1935153e-01 1.0516553e-01 1.1204585e-01 1.0191688e-01 8.9582588e-02 9.3806716e-02 1.0922100e-01 8.9087100e-02 1.1385917e-01 1.1193127e-01 1.0978099e-01 9.7766696e-02 1.0448839e-01 9.5849546e-02 1.0619992e-01 1.0212555e-01 7.8301662e-04 3.3186074e-04 9.6097551e-04 9.6384587e-04 1.7160230e-04 7.1714495e-04 1.0915291e-03 1.4406904e-05 9.9431295e-04 1.0280837e-03 3.4520010e-04 1.6070142e-03 2.0814960e-03 1.1810349e-03 9.3270090e-05 2.4892291e-04 9.5000112e-04 1.2447556e-03 8.3736374e-04 3.6303226e-03 2.4141846e-03 3.9965261e-03 2.4688022e-03 1.0115165e-03 6.9871786e-05 1.7487334e-04 1.2251185e-03 1.4398826e-03 9.8199498e-04 2.5137187e-03 1.7466742e-03 1.0915291e-03 7.0690363e-04 8.5846505e-04 1.0915291e-03 1.0992291e-04 1.6427013e-04 2.8562896e-04 8.0123750e-03 5.0490687e-04 2.4076078e-03 3.3222239e-03 1.0270492e-03 1.0987887e-03 2.4862356e-04 7.8815959e-05 1.1120052e-04 7.0071463e-02 7.2494258e-02 8.0694698e-02 9.1816479e-02 8.4823937e-02 9.1055284e-02 7.9406161e-02 6.5540015e-02 7.8075821e-02 8.2418924e-02 8.6586217e-02 7.4908999e-02 8.4375857e-02 8.9771433e-02 5.8365951e-02 6.7055640e-02 9.0792516e-02 7.3755504e-02 1.0570869e-01 7.6652799e-02 9.5758989e-02 6.7858347e-02 1.0707149e-01 9.0015148e-02 7.1111432e-02 7.0634591e-02 8.5909852e-02 9.1841705e-02 8.6060650e-02 5.7382885e-02 7.7642663e-02 7.2560884e-02 6.9439824e-02 1.1486601e-01 9.5132094e-02 7.5722276e-02 7.7186494e-02 9.4329550e-02 7.2913445e-02 8.4999890e-02 9.5631654e-02 8.3632299e-02 7.5814411e-02 6.7360493e-02 8.4581854e-02 7.3324210e-02 7.7335911e-02 7.3484711e-02 5.1093482e-02 7.6474851e-02 1.3800148e-01 1.2463801e-01 1.1904450e-01 1.2328593e-01 1.2938789e-01 1.3104169e-01 1.2726294e-01 1.2658511e-01 1.3448678e-01 1.1418055e-01 9.7888383e-02 1.1868360e-01 1.1213978e-01 1.3206545e-01 1.3251384e-01 1.1184454e-01 1.1286955e-01 1.1328841e-01 1.5256500e-01 1.2703121e-01 1.1444439e-01 1.2112577e-01 1.3684054e-01 1.0544428e-01 1.1220824e-01 1.1073079e-01 1.0084086e-01 1.0036834e-01 1.2912019e-01 1.0768201e-01 1.2300696e-01 9.9385216e-02 1.3095409e-01 1.0446385e-01 1.3171213e-01 1.1800444e-01 1.2052688e-01 1.1209190e-01 9.8892088e-02 1.0463359e-01 1.1979721e-01 9.9600101e-02 1.2463801e-01 1.2247195e-01 1.1948197e-01 1.0852184e-01 1.1709036e-01 1.0637133e-01 1.1433097e-01 1.1154058e-01 1.2829581e-03 8.6520525e-04 1.3042912e-03 2.3052671e-04 6.0609671e-05 6.1408538e-04 7.9384016e-04 2.5551469e-04 9.4346154e-04 1.8930050e-03 4.6203036e-03 3.8649853e-03 3.3273220e-03 9.7135787e-04 2.5836286e-04 1.6395377e-03 4.6720392e-04 1.3833444e-03 6.8585778e-03 1.1817616e-03 1.4184724e-03 1.2935682e-03 4.4534899e-04 4.3337262e-04 9.9734142e-04 6.2957380e-05 2.1802414e-04 1.3452346e-03 3.6759458e-03 3.7514511e-03 6.1408538e-04 2.3527566e-03 2.5967147e-03 6.1408538e-04 3.1896708e-04 3.0643540e-04 1.7034162e-03 7.0964884e-03 1.0371098e-03 1.9760564e-03 1.6993217e-03 9.2490489e-04 1.2129757e-03 2.8785057e-04 7.8777499e-04 6.4144968e-04 5.7636535e-02 5.9786679e-02 6.7275391e-02 7.7706661e-02 7.1288776e-02 7.6308806e-02 6.5987844e-02 5.3398709e-02 6.4839697e-02 6.8887148e-02 7.2874646e-02 6.2111692e-02 7.1088473e-02 7.5274214e-02 4.7295630e-02 5.5048251e-02 7.6266639e-02 6.0532100e-02 9.0997542e-02 6.3501941e-02 8.1155480e-02 5.5841790e-02 9.1620605e-02 7.5304976e-02 5.8627379e-02 5.8302297e-02 7.2188128e-02 7.7632065e-02 7.2128571e-02 4.6353347e-02 6.4522763e-02 5.9860052e-02 5.7075256e-02 9.8501473e-02 8.0208982e-02 6.2676929e-02 6.4117314e-02 8.0306154e-02 5.9903400e-02 7.1264506e-02 8.0454669e-02 6.9667510e-02 6.2855874e-02 5.5234852e-02 7.0611788e-02 6.0083969e-02 6.3933681e-02 6.0638614e-02 4.1119113e-02 6.3291748e-02 1.2072945e-01 1.0797760e-01 1.0284307e-01 1.0630032e-01 1.1246316e-01 1.1377579e-01 1.1035397e-01 1.0939330e-01 1.1704519e-01 9.8543065e-02 8.3389076e-02 1.0253622e-01 9.6610654e-02 1.1523295e-01 1.1624035e-01 9.6621030e-02 9.6718555e-02 9.7003685e-02 1.3426257e-01 1.1013293e-01 9.8838972e-02 1.0496266e-01 1.1920082e-01 9.0400878e-02 9.6352086e-02 9.4617133e-02 8.6118226e-02 8.5443225e-02 1.1226469e-01 9.1815383e-02 1.0642172e-01 8.4132371e-02 1.1413570e-01 8.8823115e-02 1.1373227e-01 1.0228600e-01 1.0454965e-01 9.5917796e-02 8.4129252e-02 8.9732713e-02 1.0404039e-01 8.5714179e-02 1.0797760e-01 1.0611357e-01 1.0375975e-01 9.3828435e-02 1.0141953e-01 9.1231247e-02 9.8764813e-02 9.5558448e-02 7.0033377e-04 3.9650610e-04 5.3529876e-04 1.4703029e-03 2.2471049e-03 2.6137215e-04 9.1585095e-04 2.3098853e-03 3.2779352e-04 1.7003275e-03 9.5035099e-04 8.4163249e-04 3.6423601e-04 8.6760304e-04 2.6110376e-04 2.4965606e-03 5.0990123e-04 2.2208392e-03 3.4995017e-03 3.9813106e-03 4.2652650e-03 1.4776191e-03 5.3856223e-04 9.6152184e-04 1.6178695e-03 2.4296336e-03 2.2824176e-03 1.0483334e-03 6.6735604e-04 2.2471049e-03 1.7166964e-03 1.9224889e-03 2.2471049e-03 4.4953685e-04 7.5090712e-04 3.1050470e-04 1.1530910e-02 8.0837373e-05 2.6173161e-03 2.7612054e-03 2.3974656e-03 3.9140870e-04 3.5730731e-04 1.1232648e-04 8.0278741e-04 7.4728046e-02 7.6441141e-02 8.5477412e-02 9.7141382e-02 8.9947057e-02 9.5081677e-02 8.2962705e-02 6.9633999e-02 8.3013931e-02 8.6069979e-02 9.2215558e-02 7.8736928e-02 9.0603515e-02 9.4074986e-02 6.2034704e-02 7.1640320e-02 9.4150759e-02 7.8195110e-02 1.1214391e-01 8.1468219e-02 9.9059263e-02 7.2514318e-02 1.1269547e-01 9.4545020e-02 7.5842542e-02 7.5358360e-02 9.1332869e-02 9.6662705e-02 9.0277244e-02 6.2066860e-02 8.2644288e-02 7.7554694e-02 7.3959493e-02 1.1955630e-01 9.8181734e-02 7.8602674e-02 8.1755435e-02 1.0058819e-01 7.6248524e-02 8.9701900e-02 9.9938282e-02 8.7676596e-02 8.0619290e-02 7.1976555e-02 8.8793557e-02 7.6779152e-02 8.1107438e-02 7.7952944e-02 5.5245517e-02 8.0550459e-02 1.4162183e-01 1.2912349e-01 1.2423521e-01 1.2779447e-01 1.3393410e-01 1.3660889e-01 1.3105158e-01 1.3208577e-01 1.4040000e-01 1.1817736e-01 1.0200650e-01 1.2388995e-01 1.1706801e-01 1.3699958e-01 1.3682207e-01 1.1586916e-01 1.1739162e-01 1.1729454e-01 1.5902469e-01 1.3308573e-01 1.1901641e-01 1.2511327e-01 1.4289089e-01 1.1059070e-01 1.1627926e-01 1.1550831e-01 1.0561378e-01 1.0446495e-01 1.3405102e-01 1.1291439e-01 1.2888996e-01 1.0359625e-01 1.3590097e-01 1.0925250e-01 1.3665207e-01 1.2379539e-01 1.2392962e-01 1.1624448e-01 1.0286550e-01 1.0945264e-01 1.2440339e-01 1.0449561e-01 1.2912349e-01 1.2690130e-01 1.2362142e-01 1.1341467e-01 1.2276171e-01 1.1097585e-01 1.1759891e-01 1.1534218e-01 1.3143808e-04 7.3710840e-04 1.1313742e-03 2.6277162e-03 9.9332749e-04 4.8298989e-04 2.9659782e-03 1.8303797e-03 3.9657692e-03 1.4753738e-03 1.6266891e-03 7.0233916e-04 8.0313831e-04 3.4526160e-04 2.3291483e-03 1.3867759e-04 4.2228272e-03 1.6991343e-03 2.3223655e-03 3.8453210e-03 4.2904903e-04 9.9302567e-04 1.7706867e-03 9.4981017e-04 1.8259864e-03 2.0820613e-03 2.1473879e-03 2.0420431e-03 2.6277162e-03 3.0779094e-03 3.4332541e-03 2.6277162e-03 6.3280964e-04 1.0576914e-03 9.5198627e-04 1.0925795e-02 3.7286463e-04 7.9546610e-04 9.1841431e-04 2.1468126e-03 4.9129575e-04 4.3562197e-04 7.5083238e-04 1.3686608e-03 6.3901299e-02 6.4740623e-02 7.3708779e-02 8.4613714e-02 7.7866771e-02 8.2261058e-02 7.0449151e-02 5.8874682e-02 7.1767088e-02 7.3210535e-02 8.0660949e-02 6.6601983e-02 8.0033785e-02 8.1391959e-02 5.1369939e-02 6.0897790e-02 8.0716992e-02 6.7403323e-02 9.9203670e-02 7.0276809e-02 8.4922276e-02 6.1688045e-02 9.9339240e-02 8.2362360e-02 6.4928234e-02 6.4360101e-02 7.9641814e-02 8.3721620e-02 7.7549963e-02 5.2617898e-02 7.1414187e-02 6.6946935e-02 6.3031902e-02 1.0509118e-01 8.4332170e-02 6.6064468e-02 7.0064616e-02 8.8758294e-02 6.4379548e-02 7.7371173e-02 8.7052850e-02 7.5305342e-02 6.9340944e-02 6.1339869e-02 7.6377320e-02 6.5179636e-02 6.9093895e-02 6.6669498e-02 4.5609365e-02 6.8684945e-02 1.2445912e-01 1.1341836e-01 1.0935772e-01 1.1262566e-01 1.1789507e-01 1.2147174e-01 1.1488682e-01 1.1752559e-01 1.2531063e-01 1.0271865e-01 8.7888567e-02 1.0902443e-01 1.0234160e-01 1.2080033e-01 1.1990073e-01 1.0043696e-01 1.0286413e-01 1.0252340e-01 1.4292168e-01 1.1866325e-01 1.0381139e-01 1.0919240e-01 1.2785249e-01 9.6570465e-02 1.0127523e-01 1.0149554e-01 9.1688518e-02 9.0323099e-02 1.1822766e-01 9.9584713e-02 1.1452014e-01 9.0018133e-02 1.1983081e-01 9.5741335e-02 1.2190290e-01 1.0915996e-01 1.0773474e-01 1.0161859e-01 8.8729453e-02 9.5169428e-02 1.0868349e-01 9.0278091e-02 1.1341836e-01 1.1118524e-01 1.0767597e-01 9.8555096e-02 1.0809822e-01 9.6490550e-02 1.0179914e-01 1.0040847e-01 9.0953179e-04 1.6478123e-03 3.1324421e-03 9.3747882e-04 6.8074049e-04 3.4285457e-03 1.4256139e-03 3.3141786e-03 8.1135619e-04 1.2040955e-03 7.3894006e-04 1.1469835e-03 5.4914496e-05 3.0238895e-03 1.1512346e-04 2.9874978e-03 2.7356591e-03 2.9755481e-03 4.8570629e-03 9.8132331e-04 1.1267736e-03 1.9187302e-03 1.4320892e-03 2.5472569e-03 2.7129147e-03 1.2621760e-03 1.1868918e-03 3.1324421e-03 3.1260816e-03 3.4622842e-03 3.1324421e-03 7.8737454e-04 1.2923124e-03 7.7291736e-04 1.2676988e-02 1.5795155e-04 1.4073300e-03 1.3093851e-03 2.8558230e-03 2.3589004e-04 5.3160641e-04 6.3306680e-04 1.5563919e-03 6.9394652e-02 7.0160248e-02 7.9549278e-02 9.0909253e-02 8.3929778e-02 8.8133516e-02 7.5949213e-02 6.4094635e-02 7.7538115e-02 7.8838295e-02 8.6828513e-02 7.2078729e-02 8.6190925e-02 8.7328483e-02 5.6305232e-02 6.6307663e-02 8.6433769e-02 7.2861306e-02 1.0610432e-01 7.5977192e-02 9.0782134e-02 6.7147548e-02 1.0605640e-01 8.8295560e-02 7.0476640e-02 6.9912539e-02 8.5755505e-02 8.9909894e-02 8.3415192e-02 5.7694397e-02 7.7198547e-02 7.2551886e-02 6.8485682e-02 1.1174631e-01 9.0047290e-02 7.1258462e-02 7.5770197e-02 9.5276007e-02 6.9606963e-02 8.3332111e-02 9.3091350e-02 8.1019819e-02 7.5041473e-02 6.6748030e-02 8.2172293e-02 7.0413691e-02 7.4567733e-02 7.2221920e-02 5.0422561e-02 7.4234075e-02 1.3135838e-01 1.2029572e-01 1.1630277e-01 1.1941581e-01 1.2489530e-01 1.2871814e-01 1.2159882e-01 1.2460620e-01 1.3270425e-01 1.0925415e-01 9.4076611e-02 1.1596894e-01 1.0908894e-01 1.2799150e-01 1.2695158e-01 1.0694484e-01 1.0944639e-01 1.0895711e-01 1.5084375e-01 1.2591962e-01 1.1052596e-01 1.1587184e-01 1.3530738e-01 1.0320818e-01 1.0775506e-01 1.0806337e-01 9.8123191e-02 9.6541726e-02 1.2533326e-01 1.0616585e-01 1.2166800e-01 9.6181548e-02 1.2699662e-01 1.0216112e-01 1.2885603e-01 1.1626103e-01 1.1421827e-01 1.0807124e-01 9.4882428e-02 1.0171954e-01 1.1554226e-01 9.6763759e-02 1.2029572e-01 1.1801757e-01 1.1438908e-01 1.0525128e-01 1.1515210e-01 1.0301668e-01 1.0810316e-01 1.0676998e-01 2.4407151e-04 6.8243680e-04 1.6882982e-04 4.2217018e-04 8.1245396e-04 8.1915702e-04 2.7980568e-03 2.6783721e-03 2.0076713e-03 3.3526400e-04 9.3506008e-05 1.0407900e-03 7.3148476e-04 9.1895790e-04 4.8425923e-03 1.7878106e-03 2.5638304e-03 1.8092053e-03 6.2482332e-04 4.5470127e-05 3.8680919e-04 4.8577398e-04 7.0932539e-04 1.0773286e-03 2.7081281e-03 2.3916675e-03 6.8243680e-04 1.3234869e-03 1.5152295e-03 6.8243680e-04 1.7279927e-05 4.4719936e-05 7.6774714e-04 7.6386402e-03 5.1509749e-04 2.1386706e-03 2.3673979e-03 8.8641907e-04 8.8317423e-04 5.7646989e-05 1.8767975e-04 1.8238427e-04 6.4591491e-02 6.6891146e-02 7.4787553e-02 8.5653640e-02 7.8909235e-02 8.4481757e-02 7.3468926e-02 6.0165176e-02 7.2232139e-02 7.6459237e-02 8.0572670e-02 6.9287036e-02 7.8547451e-02 8.3338681e-02 5.3514192e-02 6.1787978e-02 8.4336540e-02 6.7840538e-02 9.9351761e-02 7.0839680e-02 8.9318727e-02 6.2598635e-02 1.0029777e-01 8.3444651e-02 6.5618944e-02 6.5228710e-02 7.9886645e-02 8.5622882e-02 7.9922508e-02 5.2526388e-02 7.1863670e-02 6.6948234e-02 6.3994975e-02 1.0763490e-01 8.8479248e-02 6.9931400e-02 7.1440370e-02 8.8224815e-02 6.7118281e-02 7.8968665e-02 8.8858891e-02 7.7432758e-02 7.0109240e-02 6.2023845e-02 7.8396402e-02 6.7393801e-02 7.1380489e-02 6.7813026e-02 4.6767795e-02 7.0645561e-02 1.3044475e-01 1.1734304e-01 1.1197394e-01 1.1577499e-01 1.2198760e-01 1.2346289e-01 1.1982320e-01 1.1899008e-01 1.2683842e-01 1.0736476e-01 9.1564623e-02 1.1164167e-01 1.0538958e-01 1.2475783e-01 1.2551509e-01 1.0524662e-01 1.0574315e-01 1.0607279e-01 1.4459461e-01 1.1962188e-01 1.0766800e-01 1.1407280e-01 1.2909426e-01 9.8905414e-02 1.0524346e-01 1.0359925e-01 9.4433579e-02 9.3820759e-02 1.2176744e-01 1.0065671e-01 1.1574436e-01 9.2625059e-02 1.2364363e-01 9.7538593e-02 1.2367543e-01 1.1121391e-01 1.1355049e-01 1.0493323e-01 9.2419908e-02 9.8167154e-02 1.1298864e-01 9.3668541e-02 1.1734304e-01 1.1533257e-01 1.1267510e-01 1.0222063e-01 1.1031800e-01 9.9779829e-02 1.0752614e-01 1.0448251e-01 3.8330702e-04 7.6710204e-04 5.4934344e-04 6.1141025e-04 1.8880070e-03 4.3782366e-03 4.2558302e-03 3.3445116e-03 9.0730658e-04 1.6460272e-04 1.9935351e-03 2.2277110e-04 1.5935452e-03 7.2001884e-03 1.0201171e-03 1.9163397e-03 8.7300929e-04 4.6754224e-04 3.6671499e-04 7.4258415e-04 2.1567602e-04 1.3361003e-04 9.1168360e-04 4.3156597e-03 4.1158943e-03 3.8330702e-04 1.9019978e-03 2.1146706e-03 3.8330702e-04 3.1982857e-04 2.1854146e-04 1.6719903e-03 5.9155088e-03 1.3110961e-03 2.0595508e-03 2.2774590e-03 5.2912957e-04 1.6598142e-03 4.0619000e-04 8.5702191e-04 4.6128261e-04 5.7335316e-02 5.9791552e-02 6.7034247e-02 7.7315388e-02 7.0912461e-02 7.6541197e-02 6.6231857e-02 5.3290914e-02 6.4524455e-02 6.9096848e-02 7.2330829e-02 6.2164647e-02 7.0266106e-02 7.5359485e-02 4.7211115e-02 5.4717217e-02 7.6724195e-02 6.0437574e-02 9.0217835e-02 6.3227153e-02 8.1624838e-02 5.5479636e-02 9.1272977e-02 7.5343817e-02 5.8299412e-02 5.7952393e-02 7.1727180e-02 7.7451339e-02 7.2165967e-02 4.5880845e-02 6.4164650e-02 5.9464600e-02 5.6817377e-02 9.8638775e-02 8.0838937e-02 6.3146715e-02 6.3916551e-02 7.9536591e-02 6.0201366e-02 7.1084400e-02 8.0631146e-02 6.9793274e-02 6.2554472e-02 5.4911579e-02 7.0667171e-02 6.0374722e-02 6.4102637e-02 6.0460719e-02 4.0707229e-02 6.3307106e-02 1.2135312e-01 1.0820155e-01 1.0273439e-01 1.0658023e-01 1.1268914e-01 1.1365695e-01 1.1088857e-01 1.0931070e-01 1.1680946e-01 9.8829208e-02 8.3503653e-02 1.0241354e-01 9.6518124e-02 1.1527856e-01 1.1644011e-01 9.6835948e-02 9.6892604e-02 9.7403729e-02 1.3389056e-01 1.0978147e-01 9.8889679e-02 1.0531384e-01 1.1893855e-01 9.0170315e-02 9.6654705e-02 9.4696682e-02 8.5999457e-02 8.5628213e-02 1.1232778e-01 9.1692367e-02 1.0609783e-01 8.4336563e-02 1.1417833e-01 8.8845800e-02 1.1399156e-01 1.0186496e-01 1.0510756e-01 9.6245658e-02 8.4341961e-02 8.9608741e-02 1.0408154e-01 8.5412047e-02 1.0820155e-01 1.0631639e-01 1.0398240e-01 9.3623884e-02 1.0104040e-01 9.1224725e-02 9.9332091e-02 9.5993921e-02 1.1067957e-03 1.4791390e-03 7.1256747e-05 2.0377231e-03 4.3755431e-03 5.9630791e-03 4.4970379e-03 1.5921641e-03 6.4984761e-04 3.3935862e-03 1.2039709e-04 3.0970780e-03 8.3950153e-03 2.1890332e-03 3.1326528e-03 5.0256002e-04 1.6389584e-03 6.4717383e-04 7.1019942e-04 8.9864077e-04 3.8255378e-04 1.2286350e-03 5.5229901e-03 5.1766813e-03 0.0000000e+00 1.5860612e-03 1.6773969e-03 0.0000000e+00 8.4337656e-04 4.6746407e-04 2.4549978e-03 4.7529836e-03 2.3235808e-03 4.0683267e-03 4.3260986e-03 6.7336618e-04 2.8454658e-03 1.0918918e-03 1.3756658e-03 5.7784546e-04 5.9573290e-02 6.3070670e-02 6.9597309e-02 7.9911457e-02 7.3480528e-02 7.9923883e-02 7.0144874e-02 5.5923876e-02 6.6635620e-02 7.3192589e-02 7.4096565e-02 6.5836734e-02 7.1022277e-02 7.8555696e-02 5.0423423e-02 5.7089619e-02 8.1093473e-02 6.2483167e-02 9.2251714e-02 6.5399221e-02 8.6573432e-02 5.7873871e-02 9.3861710e-02 7.7914479e-02 6.0545459e-02 6.0328179e-02 7.3725736e-02 8.0652769e-02 7.5662466e-02 4.7500835e-02 6.6267157e-02 6.1219907e-02 5.9246920e-02 1.0235525e-01 8.5584812e-02 6.7627185e-02 6.6676399e-02 8.1028522e-02 6.3874534e-02 7.4037098e-02 8.3735875e-02 7.3091049e-02 6.4875922e-02 5.7134332e-02 7.3898502e-02 6.3669341e-02 6.7483654e-02 6.3032151e-02 4.3195391e-02 6.6465430e-02 1.2757303e-01 1.1294171e-01 1.0654531e-01 1.1074736e-01 1.1756538e-01 1.1705185e-01 1.1633100e-01 1.1230305e-01 1.1988948e-01 1.0404710e-01 8.7981667e-02 1.0622547e-01 1.0061669e-01 1.2008497e-01 1.2242153e-01 1.0217012e-01 1.0084187e-01 1.0181343e-01 1.3714147e-01 1.1243585e-01 1.0356517e-01 1.1071692e-01 1.2181923e-01 9.3742899e-02 1.0137566e-01 9.8085445e-02 8.9840814e-02 8.9979544e-02 1.1682155e-01 9.4340727e-02 1.0893096e-01 8.8036209e-02 1.1887723e-01 9.1995894e-02 1.1718099e-01 1.0529554e-01 1.1115622e-01 1.0048991e-01 8.8823715e-02 9.3647258e-02 1.0909883e-01 8.9729548e-02 1.1294171e-01 1.1121254e-01 1.0947844e-01 9.8164553e-02 1.0458423e-01 9.5468337e-02 1.0529433e-01 1.0077315e-01 9.3075268e-04 1.0661545e-03 2.6597529e-04 1.6036733e-03 2.0280056e-03 1.2436972e-03 1.5688801e-04 3.1850165e-04 8.9411637e-04 1.3235015e-03 8.8731482e-04 3.4816593e-03 2.6719247e-03 3.9091992e-03 2.6159373e-03 1.1443019e-03 7.9601608e-05 2.3028989e-04 1.2135551e-03 1.4956336e-03 1.2137749e-03 2.2449952e-03 1.5932074e-03 1.1067957e-03 8.0827056e-04 9.5525701e-04 1.1067957e-03 1.2520454e-04 1.8684214e-04 3.3283736e-04 8.4659292e-03 4.3428627e-04 2.6431662e-03 3.3043825e-03 1.2192723e-03 9.6672170e-04 2.2673224e-04 4.0165289e-05 1.5556464e-04 7.0885985e-02 7.3312749e-02 8.1555170e-02 9.2789394e-02 8.5768022e-02 9.1811327e-02 8.0210428e-02 6.6299983e-02 7.8898236e-02 8.3277528e-02 8.7497229e-02 7.5768419e-02 8.5268176e-02 9.0575828e-02 5.9182538e-02 6.7899308e-02 9.1577973e-02 7.4436351e-02 1.0683101e-01 7.7459472e-02 9.6638219e-02 6.8724317e-02 1.0805306e-01 9.0746123e-02 7.1944393e-02 7.1498544e-02 8.6811878e-02 9.2796163e-02 8.6929388e-02 5.8156140e-02 7.8485798e-02 7.3355880e-02 7.0259533e-02 1.1577796e-01 9.5890635e-02 7.6480264e-02 7.8047377e-02 9.5335392e-02 7.3634293e-02 8.5899063e-02 9.6384778e-02 8.4415996e-02 7.6657094e-02 6.8177250e-02 8.5395861e-02 7.3990972e-02 7.8093423e-02 7.4294151e-02 5.1950910e-02 7.7279181e-02 1.3907744e-01 1.2568112e-01 1.2011323e-01 1.2420710e-01 1.3046004e-01 1.3207399e-01 1.2824918e-01 1.2752552e-01 1.3553911e-01 1.1524115e-01 9.8893820e-02 1.1975932e-01 1.1323008e-01 1.3322963e-01 1.3377253e-01 1.1295596e-01 1.1379378e-01 1.1416138e-01 1.5374990e-01 1.2806482e-01 1.1555054e-01 1.2219358e-01 1.3787989e-01 1.0651347e-01 1.1318026e-01 1.1161431e-01 1.0188137e-01 1.0132199e-01 1.3022140e-01 1.0855236e-01 1.2404638e-01 1.0022528e-01 1.3210134e-01 1.0532767e-01 1.3250558e-01 1.1917979e-01 1.2157791e-01 1.1297631e-01 9.9847302e-02 1.0571550e-01 1.2097128e-01 1.0080768e-01 1.2568112e-01 1.2354605e-01 1.2062969e-01 1.0973133e-01 1.1825900e-01 1.0742526e-01 1.1535080e-01 1.1244756e-01 1.9470856e-03 1.8498175e-03 4.7714250e-03 2.8358661e-03 3.0255426e-03 1.1308587e-03 6.7035566e-04 9.3284570e-04 1.3935241e-03 9.8369983e-04 5.6854836e-03 1.9144361e-03 1.0961099e-03 2.6770659e-03 6.7637792e-04 7.3922961e-04 1.6168588e-03 1.9795771e-04 8.8027763e-04 2.3819907e-03 2.3199642e-03 2.7913184e-03 1.4791390e-03 3.2257382e-03 3.5250868e-03 1.4791390e-03 4.9791374e-04 7.0216560e-04 1.6800207e-03 1.0022835e-02 5.5855445e-04 1.9786373e-03 9.4684044e-04 1.9956071e-03 4.5593799e-04 2.5049818e-04 7.2992180e-04 1.1563910e-03 6.0779252e-02 6.2273308e-02 7.0462169e-02 8.1326510e-02 7.4767830e-02 7.8734546e-02 6.8077240e-02 5.6059538e-02 6.8181020e-02 7.1050105e-02 7.6799016e-02 6.4482663e-02 7.5580358e-02 7.7962233e-02 4.9642606e-02 5.8153068e-02 7.8105114e-02 6.3436311e-02 9.5564553e-02 6.6739850e-02 8.2930379e-02 5.9008492e-02 9.5418848e-02 7.8187143e-02 6.1832470e-02 6.1508833e-02 7.5927040e-02 8.0793818e-02 7.4771898e-02 4.9618073e-02 6.7927507e-02 6.3289271e-02 6.0099335e-02 1.0140473e-01 8.1745663e-02 6.4187383e-02 6.7135505e-02 8.4768567e-02 6.1827019e-02 7.4354928e-02 8.3103529e-02 7.2159743e-02 6.6094709e-02 5.8361788e-02 7.3251937e-02 6.2103535e-02 6.6220430e-02 6.3584868e-02 4.3971154e-02 6.5863068e-02 1.2260074e-01 1.1066837e-01 1.0619606e-01 1.0899833e-01 1.1518894e-01 1.1739848e-01 1.1240635e-01 1.1296742e-01 1.2096568e-01 1.0086214e-01 8.5896751e-02 1.0590639e-01 9.9771313e-02 1.1830710e-01 1.1878365e-01 9.8989344e-02 9.9484141e-02 9.9300506e-02 1.3860781e-01 1.1421784e-01 1.0167360e-01 1.0723785e-01 1.2323222e-01 9.3791376e-02 9.8729091e-02 9.7617417e-02 8.9196630e-02 8.7906597e-02 1.1533851e-01 9.5241683e-02 1.1037299e-01 8.6684313e-02 1.1722334e-01 9.1866320e-02 1.1676032e-01 1.0620321e-01 1.0631345e-01 9.8352717e-02 8.6492752e-02 9.2837846e-02 1.0689111e-01 8.8947496e-02 1.1066837e-01 1.0877202e-01 1.0619549e-01 9.7005210e-02 1.0523294e-01 9.4129616e-02 1.0043708e-01 9.7689504e-02 1.8508011e-03 3.7513322e-03 6.0039368e-03 4.2304138e-03 1.5191600e-03 7.2789043e-04 3.6236504e-03 2.5132214e-04 3.2740913e-03 8.1034702e-03 2.4941139e-03 4.0964229e-03 6.0206143e-04 1.9190323e-03 6.6472571e-04 5.1664338e-04 1.3616103e-03 7.0613265e-04 1.0312088e-03 5.8211090e-03 5.1401914e-03 7.1256747e-05 1.1045080e-03 1.1556192e-03 7.1256747e-05 9.3818356e-04 5.1597856e-04 2.2957469e-03 4.3308939e-03 2.5276111e-03 4.3800580e-03 5.1684770e-03 5.8668191e-04 3.2395561e-03 1.2942225e-03 1.4104695e-03 4.9075437e-04 6.2060492e-02 6.5820549e-02 7.2324527e-02 8.2688153e-02 7.6151035e-02 8.3216525e-02 7.3194172e-02 5.8477367e-02 6.9270609e-02 7.6249005e-02 7.6681852e-02 6.8643243e-02 7.3331578e-02 8.1706941e-02 5.2792718e-02 5.9477668e-02 8.4501232e-02 6.5244243e-02 9.4903309e-02 6.8042766e-02 9.0018791e-02 6.0245936e-02 9.6930604e-02 8.1064051e-02 6.3026851e-02 6.2769846e-02 7.6368221e-02 8.3589775e-02 7.8680956e-02 4.9590571e-02 6.8853459e-02 6.3691512e-02 6.1750809e-02 1.0592213e-01 8.9183264e-02 7.0750492e-02 6.9363027e-02 8.3535040e-02 6.6868103e-02 7.6873580e-02 8.7073732e-02 7.6162997e-02 6.7469442e-02 5.9546534e-02 7.6928171e-02 6.6694450e-02 7.0471384e-02 6.5682900e-02 4.5133798e-02 6.9312951e-02 1.3167296e-01 1.1664541e-01 1.0993682e-01 1.1453374e-01 1.2132631e-01 1.2063781e-01 1.2028602e-01 1.1588513e-01 1.2343288e-01 1.0759677e-01 9.1184353e-02 1.0959799e-01 1.0389171e-01 1.2371908e-01 1.2606621e-01 1.0559841e-01 1.0439417e-01 1.0553794e-01 1.4077951e-01 1.1579171e-01 1.0695513e-01 1.1441431e-01 1.2538100e-01 9.6825962e-02 1.0496558e-01 1.0155971e-01 9.2933277e-02 9.3305204e-02 1.2046243e-01 9.7626604e-02 1.1224568e-01 9.1421248e-02 1.2250398e-01 9.5336276e-02 1.2112681e-01 1.0839632e-01 1.1495562e-01 1.0414545e-01 9.2136906e-02 9.6777242e-02 1.1252211e-01 9.2559606e-02 1.1664541e-01 1.1484781e-01 1.1301462e-01 1.0121871e-01 1.0770332e-01 9.8720694e-02 1.0901533e-01 1.0446815e-01 7.8277898e-04 1.7490530e-03 1.0345024e-03 5.6185312e-04 1.1591486e-03 1.1405764e-03 2.5549089e-03 1.4484284e-03 2.2580494e-03 4.5713265e-03 5.6870335e-03 4.1902203e-03 2.4320876e-03 6.0369458e-04 6.2286369e-04 2.4521502e-03 2.9038905e-03 2.2436415e-03 1.7675525e-03 9.5896000e-04 2.0377231e-03 8.9090360e-04 9.7827632e-04 2.0377231e-03 7.4516940e-04 8.4824201e-04 4.3724648e-04 1.0582513e-02 7.1366344e-04 4.1221085e-03 4.7945036e-03 2.3833891e-03 1.3170043e-03 8.5049004e-04 2.9093352e-04 6.7142903e-04 7.9558936e-02 8.2158081e-02 9.0820201e-02 1.0264403e-01 9.5277746e-02 1.0150997e-01 8.9387659e-02 7.4718776e-02 8.7974881e-02 9.2637642e-02 9.6993873e-02 8.4761019e-02 9.4485044e-02 1.0025701e-01 6.7224195e-02 7.6433848e-02 1.0126400e-01 8.3185627e-02 1.1729605e-01 8.6461029e-02 1.0658954e-01 7.7314215e-02 1.1857784e-01 1.0034666e-01 8.0683042e-02 8.0237049e-02 9.6300382e-02 1.0267579e-01 9.6489769e-02 6.6032956e-02 8.7552344e-02 8.2099586e-02 7.8915667e-02 1.2662250e-01 1.0571935e-01 8.5387558e-02 8.7148938e-02 1.0518820e-01 8.2417845e-02 9.5416475e-02 1.0627769e-01 9.3794810e-02 8.5650752e-02 7.6704239e-02 9.4843643e-02 8.2753514e-02 8.7142105e-02 8.3165683e-02 5.9528971e-02 8.6320416e-02 1.5080041e-01 1.3699340e-01 1.3123407e-01 1.3538631e-01 1.4196949e-01 1.4361623e-01 1.3957348e-01 1.3881335e-01 1.4720194e-01 1.2611020e-01 1.0906565e-01 1.3086970e-01 1.2408176e-01 1.4489915e-01 1.4541581e-01 1.2374159e-01 1.2456922e-01 1.2489938e-01 1.6613145e-01 1.3940980e-01 1.2649129e-01 1.3333884e-01 1.4960338e-01 1.1706680e-01 1.2394226e-01 1.2226369e-01 1.1222236e-01 1.1158045e-01 1.4175176e-01 1.1903032e-01 1.3525827e-01 1.1036833e-01 1.4372294e-01 1.1569638e-01 1.4384978e-01 1.3029466e-01 1.3261278e-01 1.2368173e-01 1.1003418e-01 1.1624174e-01 1.3214763e-01 1.1113124e-01 1.3699340e-01 1.3478604e-01 1.3174332e-01 1.2045775e-01 1.2933910e-01 1.1801062e-01 1.2611372e-01 1.2312584e-01 2.4038804e-03 9.9356679e-04 1.6379146e-03 2.9968879e-03 2.7777132e-03 5.0292552e-03 2.9485139e-03 1.7810659e-03 7.1187929e-03 1.0385224e-02 6.8192179e-03 4.7007047e-03 2.2536066e-03 1.6978265e-03 5.5995030e-03 5.8830752e-03 3.3086218e-03 3.5101479e-03 1.6089784e-03 4.3755431e-03 1.0574938e-03 1.0592785e-03 4.3755431e-03 2.5472554e-03 2.6641717e-03 1.0704333e-03 1.2070572e-02 2.4953874e-03 6.0785955e-03 8.5181088e-03 3.9627930e-03 3.6619699e-03 2.9233174e-03 1.7757672e-03 2.0595965e-03 9.0904959e-02 9.3846404e-02 1.0296173e-01 1.1510418e-01 1.0729700e-01 1.1521099e-01 1.0181822e-01 8.5988795e-02 1.0000742e-01 1.0504161e-01 1.0917671e-01 9.6459196e-02 1.0622808e-01 1.1358449e-01 7.7434068e-02 8.7329703e-02 1.1478646e-01 9.5582611e-02 1.2982843e-01 9.8465917e-02 1.1998512e-01 8.8160180e-02 1.3221652e-01 1.1399918e-01 9.2024225e-02 9.1371105e-02 1.0854159e-01 1.1533865e-01 1.0916600e-01 7.6158175e-02 9.9423258e-02 9.3687045e-02 9.0204262e-02 1.4138542e-01 1.1970815e-01 9.7663366e-02 9.8994798e-02 1.1736337e-01 9.4681982e-02 1.0777767e-01 1.2034441e-01 1.0671033e-01 9.7400706e-02 8.7763928e-02 1.0767516e-01 9.5332473e-02 9.9630913e-02 9.4930517e-02 6.8563663e-02 9.8462875e-02 1.6617515e-01 1.5176730e-01 1.4543395e-01 1.5072522e-01 1.5691405e-01 1.5882414e-01 1.5479314e-01 1.5416358e-01 1.6247914e-01 1.3996762e-01 1.2197918e-01 1.4500225e-01 1.3765005e-01 1.5950314e-01 1.5937612e-01 1.3710080e-01 1.3913130e-01 1.3976637e-01 1.8183098e-01 1.5420823e-01 1.4014252e-01 1.4766552e-01 1.6507182e-01 1.3020370e-01 1.3818766e-01 1.3684571e-01 1.2517707e-01 1.2500429e-01 1.5651711e-01 1.3335007e-01 1.4978001e-01 1.2432862e-01 1.5834992e-01 1.2988375e-01 1.6032290e-01 1.4373178e-01 1.4688056e-01 1.3840222e-01 1.2332107e-01 1.2926358e-01 1.4578293e-01 1.2292671e-01 1.5176730e-01 1.4922075e-01 1.4546636e-01 1.3299550e-01 1.4276363e-01 1.3134387e-01 1.4008961e-01 1.3766630e-01 5.7728358e-04 1.6620505e-03 3.0662753e-03 5.1693417e-04 6.0968463e-03 8.4744633e-04 8.7364721e-04 5.8106642e-03 6.7399476e-03 8.6083103e-03 3.1702748e-03 2.7104978e-03 3.3164143e-03 4.2509190e-03 5.8084215e-03 4.6709776e-03 9.8526568e-04 3.6786909e-04 5.9630791e-03 3.9566796e-03 4.2657824e-03 5.9630791e-03 2.3876668e-03 3.1383576e-03 1.0693622e-03 1.7187016e-02 9.8571152e-04 3.1367899e-03 3.7448988e-03 5.3108404e-03 1.2637202e-03 2.1359423e-03 1.6418787e-03 3.1352541e-03 8.3834616e-02 8.4243676e-02 9.4832859e-02 1.0703873e-01 9.9466934e-02 1.0403376e-01 9.0316788e-02 7.7914871e-02 9.2850027e-02 9.3299365e-02 1.0296391e-01 8.6082003e-02 1.0248600e-01 1.0318273e-01 6.8824902e-02 8.0299100e-02 1.0157055e-01 8.7955506e-02 1.2341155e-01 9.1142100e-02 1.0580332e-01 8.1173197e-02 1.2350931e-01 1.0460831e-01 8.4991511e-02 8.4254332e-02 1.0174542e-01 1.0573929e-01 9.8648070e-02 7.1061471e-02 9.2438015e-02 8.7508516e-02 8.2750270e-02 1.2928539e-01 1.0529438e-01 8.4835976e-02 9.0594121e-02 1.1204138e-01 8.3577196e-02 9.8754999e-02 1.0957368e-01 9.6258171e-02 8.9990776e-02 8.0898906e-02 9.7508458e-02 8.4744426e-02 8.9161769e-02 8.6853262e-02 6.2377571e-02 8.8830405e-02 1.4853162e-01 1.3772455e-01 1.3389458e-01 1.3729951e-01 1.4254710e-01 1.4753124e-01 1.3874272e-01 1.4343376e-01 1.5191156e-01 1.2542851e-01 1.0950060e-01 1.3351960e-01 1.2589032e-01 1.4575308e-01 1.4361388e-01 1.2273259e-01 1.2665814e-01 1.2593062e-01 1.7100462e-01 1.4482556e-01 1.2707776e-01 1.3245591e-01 1.5480441e-01 1.1982038e-01 1.2429326e-01 1.2550864e-01 1.1421155e-01 1.1236837e-01 1.4320023e-01 1.2379599e-01 1.4017288e-01 1.1252737e-01 1.4478118e-01 1.1925773e-01 1.4807486e-01 1.3379535e-01 1.3019772e-01 1.2505772e-01 1.1047444e-01 1.1793234e-01 1.3214851e-01 1.1203286e-01 1.3772455e-01 1.3511133e-01 1.3062456e-01 1.2117415e-01 1.3256036e-01 1.1928977e-01 1.2368263e-01 1.2328316e-01 7.8697050e-04 2.0732289e-03 9.4315564e-04 4.6001401e-03 8.4240364e-04 1.3015708e-03 4.6297460e-03 7.5292997e-03 6.5572401e-03 2.5566943e-03 1.7941741e-03 1.8226799e-03 3.9920133e-03 4.8070278e-03 2.6490734e-03 2.2737423e-03 8.3198965e-04 4.4970379e-03 1.8707122e-03 2.0801746e-03 4.4970379e-03 1.6864362e-03 2.1518496e-03 3.0908897e-04 1.2802000e-02 1.1444166e-03 2.7605116e-03 4.8428825e-03 3.3840997e-03 1.9469936e-03 1.7958172e-03 1.1565833e-03 1.8697862e-03 8.2127567e-02 8.3518119e-02 9.3314949e-02 1.0506408e-01 9.7541782e-02 1.0397584e-01 9.0341739e-02 7.6817337e-02 9.1083710e-02 9.3231274e-02 1.0048418e-01 8.5524503e-02 9.9112893e-02 1.0267345e-01 6.7846626e-02 7.8515797e-02 1.0225403e-01 8.6849217e-02 1.2022955e-01 8.9502113e-02 1.0655817e-01 7.9297559e-02 1.2165144e-01 1.0391861e-01 8.3203942e-02 8.2410584e-02 9.9528824e-02 1.0443382e-01 9.8019721e-02 6.8818975e-02 9.0543610e-02 8.5485551e-02 8.1186134e-02 1.2894588e-01 1.0651601e-01 8.5580315e-02 8.9213768e-02 1.0886353e-01 8.3754339e-02 9.7441118e-02 1.0932583e-01 9.5882843e-02 8.8282134e-02 7.9128322e-02 9.6917266e-02 8.4873719e-02 8.8927957e-02 8.5532654e-02 6.0384203e-02 8.8123284e-02 1.4980593e-01 1.3770759e-01 1.3282368e-01 1.3741111e-01 1.4254251e-01 1.4639384e-01 1.3970199e-01 1.4238069e-01 1.5040197e-01 1.2563911e-01 1.0916002e-01 1.3240724e-01 1.2489275e-01 1.4520334e-01 1.4360899e-01 1.2274102e-01 1.2644577e-01 1.2642535e-01 1.6908994e-01 1.4294672e-01 1.2654652e-01 1.3286867e-01 1.5320101e-01 1.1837980e-01 1.2451917e-01 1.2497748e-01 1.1312705e-01 1.1222677e-01 1.4268257e-01 1.2261254e-01 1.3839073e-01 1.1239899e-01 1.4421566e-01 1.1854547e-01 1.4805458e-01 1.3176737e-01 1.3127552e-01 1.2532607e-01 1.1042618e-01 1.1684289e-01 1.3160924e-01 1.1043724e-01 1.3770759e-01 1.3504379e-01 1.3066174e-01 1.1987723e-01 1.3066578e-01 1.1856367e-01 1.2478710e-01 1.2391087e-01 3.0983409e-04 7.5828890e-04 1.5917755e-03 4.8958816e-04 3.3744944e-03 2.1101097e-03 4.2400870e-03 2.8698866e-03 7.9583168e-04 2.4610899e-04 3.6436132e-04 1.4311801e-03 1.7294514e-03 8.6738167e-04 2.6111809e-03 1.6704106e-03 1.5921641e-03 8.6161593e-04 1.0547029e-03 1.5921641e-03 2.0427868e-04 3.5845705e-04 1.2194863e-04 8.2981219e-03 4.9180195e-04 1.7380522e-03 3.0734607e-03 1.0728608e-03 1.1397310e-03 3.4603128e-04 1.9200118e-04 2.8845040e-04 6.9779438e-02 7.1836392e-02 8.0319868e-02 9.1354890e-02 8.4353236e-02 9.0635736e-02 7.8599311e-02 6.5140986e-02 7.7899100e-02 8.1486701e-02 8.6472565e-02 7.4062042e-02 8.4619349e-02 8.9336326e-02 5.7575298e-02 6.6635212e-02 8.9946961e-02 7.3779929e-02 1.0532587e-01 7.6464940e-02 9.4587006e-02 6.7402630e-02 1.0673169e-01 8.9925800e-02 7.0797999e-02 7.0219100e-02 8.5721997e-02 9.1187854e-02 8.5377890e-02 5.7235173e-02 7.7431768e-02 7.2498793e-02 6.9063158e-02 1.1428266e-01 9.4218471e-02 7.4725647e-02 7.6703845e-02 9.4228329e-02 7.2261001e-02 8.4462132e-02 9.5360997e-02 8.3133678e-02 7.5506963e-02 6.7041127e-02 8.4070372e-02 7.2903149e-02 7.6784460e-02 7.3115143e-02 5.0413571e-02 7.5926129e-02 1.3636539e-01 1.2353657e-01 1.1821263e-01 1.2258303e-01 1.2822457e-01 1.3051322e-01 1.2599517e-01 1.2631512e-01 1.3406707e-01 1.1278003e-01 9.6722933e-02 1.1783731e-01 1.1110860e-01 1.3080317e-01 1.3063996e-01 1.1029647e-01 1.1216341e-01 1.1248813e-01 1.5199693e-01 1.2674108e-01 1.1318561e-01 1.1969790e-01 1.3653036e-01 1.0458853e-01 1.1112582e-01 1.1028100e-01 9.9890110e-02 9.9356661e-02 1.2805612e-01 1.0750038e-01 1.2261289e-01 9.8791075e-02 1.2975191e-01 1.0408196e-01 1.3162969e-01 1.1713290e-01 1.1886116e-01 1.1132823e-01 9.7814075e-02 1.0357451e-01 1.1833994e-01 9.8179977e-02 1.2353657e-01 1.2124412e-01 1.1787602e-01 1.0709455e-01 1.1618054e-01 1.0529428e-01 1.1269702e-01 1.1053049e-01 1.3650135e-03 5.3926014e-04 9.6875216e-04 5.5085642e-03 1.1951469e-03 2.8096772e-03 1.4033998e-03 3.8702395e-04 1.0970323e-04 3.3218009e-04 5.6326785e-04 5.8024795e-04 5.6723773e-04 3.5935032e-03 3.0059920e-03 6.4984761e-04 1.1677062e-03 1.3673782e-03 6.4984761e-04 7.6378345e-05 6.3488092e-05 8.1586688e-04 6.3954323e-03 8.4458294e-04 1.6959745e-03 2.5316364e-03 4.4648839e-04 1.3649198e-03 2.1646092e-04 4.0910219e-04 1.5323026e-04 6.2114649e-02 6.4461203e-02 7.2168083e-02 8.2712554e-02 7.6067484e-02 8.2127836e-02 7.1085856e-02 5.7869953e-02 6.9689694e-02 7.3941980e-02 7.7755077e-02 6.6772898e-02 7.5730146e-02 8.0858032e-02 5.1159438e-02 5.9263906e-02 8.1982206e-02 6.5662191e-02 9.5957099e-02 6.8346124e-02 8.6755825e-02 6.0017643e-02 9.7272303e-02 8.1103209e-02 6.3092478e-02 6.2637310e-02 7.7107544e-02 8.2765719e-02 7.7320565e-02 5.0179546e-02 6.9272103e-02 6.4477670e-02 6.1520691e-02 1.0482403e-01 8.6201836e-02 6.7723532e-02 6.8849001e-02 8.5128110e-02 6.4954612e-02 7.6260237e-02 8.6474261e-02 7.5037042e-02 6.7540885e-02 5.9554295e-02 7.5917230e-02 6.5334858e-02 6.9084098e-02 6.5352935e-02 4.4308417e-02 6.8222624e-02 1.2733314e-01 1.1424612e-01 1.0876932e-01 1.1294180e-01 1.1881213e-01 1.2027633e-01 1.1690957e-01 1.1601884e-01 1.2357066e-01 1.0430113e-01 8.8647048e-02 1.0842142e-01 1.0217893e-01 1.2134270e-01 1.2195833e-01 1.0207761e-01 1.0292571e-01 1.0341320e-01 1.4095409e-01 1.1639921e-01 1.0445121e-01 1.1097871e-01 1.2583780e-01 9.5736690e-02 1.0236688e-01 1.0085185e-01 9.1372067e-02 9.1009848e-02 1.1849408e-01 9.7905297e-02 1.1253291e-01 9.0050809e-02 1.2026601e-01 9.4848068e-02 1.2106318e-01 1.0772883e-01 1.1055160e-01 1.0223795e-01 8.9621067e-02 9.5003079e-02 1.0961118e-01 9.0242843e-02 1.1424612e-01 1.1217935e-01 1.0939970e-01 9.8767943e-02 1.0686010e-01 9.6691216e-02 1.0462140e-01 1.0177309e-01 3.4212913e-03 2.0350185e-04 2.2645835e-03 3.4346676e-03 3.6178579e-03 5.4115677e-03 1.4013939e-03 1.2010586e-03 1.9342083e-03 1.8303919e-03 3.0241355e-03 3.0182648e-03 8.7783530e-04 7.3200159e-04 3.3935862e-03 3.0016113e-03 3.3110105e-03 3.3935862e-03 9.0468402e-04 1.4291912e-03 6.5529771e-04 1.3482371e-02 1.1883350e-04 1.9129351e-03 1.8189821e-03 3.2224845e-03 2.2840556e-04 6.5009173e-04 5.8224397e-04 1.6275063e-03 7.3184911e-02 7.4026716e-02 8.3609924e-02 9.5240271e-02 8.8102740e-02 9.2390529e-02 7.9966102e-02 6.7767341e-02 8.1513618e-02 8.2938634e-02 9.0997666e-02 7.6011769e-02 9.0234918e-02 9.1578032e-02 5.9804069e-02 7.0034281e-02 9.0682677e-02 7.6686753e-02 1.1071727e-01 7.9918092e-02 9.5151478e-02 7.0899518e-02 1.1069472e-01 9.2509702e-02 7.4297007e-02 7.3732579e-02 8.9920224e-02 9.4250688e-02 8.7608739e-02 6.1121885e-02 8.1169501e-02 7.6375843e-02 7.2267735e-02 1.1652820e-01 9.4360734e-02 7.5150369e-02 7.9755441e-02 9.9609504e-02 7.3443983e-02 8.7507436e-02 9.7438140e-02 8.5130511e-02 7.8978434e-02 7.0471516e-02 8.6314807e-02 7.4241923e-02 7.8526940e-02 7.6102189e-02 5.3711374e-02 7.8190521e-02 1.3653802e-01 1.2528995e-01 1.2121146e-01 1.2435019e-01 1.2997976e-01 1.3382130e-01 1.2659694e-01 1.2959353e-01 1.3786369e-01 1.1404399e-01 9.8548323e-02 1.2087284e-01 1.1387348e-01 1.3314978e-01 1.3209550e-01 1.1169631e-01 1.1419600e-01 1.1368838e-01 1.5633317e-01 1.3093427e-01 1.1535002e-01 1.2078800e-01 1.4049523e-01 1.0785716e-01 1.1249675e-01 1.1275652e-01 1.0267406e-01 1.0105332e-01 1.3042827e-01 1.1078233e-01 1.2662105e-01 1.0064162e-01 1.3213313e-01 1.0672611e-01 1.3386868e-01 1.2116827e-01 1.1908244e-01 1.1278797e-01 9.9360911e-02 1.0635497e-01 1.2047378e-01 1.0130606e-01 1.2528995e-01 1.2297832e-01 1.1929072e-01 1.0997770e-01 1.2004306e-01 1.0767864e-01 1.1284273e-01 1.1147304e-01 2.8709378e-03 9.0791333e-03 1.3407674e-03 2.7138923e-03 2.4677711e-04 1.1444972e-03 7.6121265e-04 8.8810957e-04 7.0009018e-04 1.4757411e-04 9.0393445e-04 6.0883361e-03 5.6961877e-03 1.2039709e-04 1.8816976e-03 2.0265121e-03 1.2039709e-04 8.6239061e-04 5.2686780e-04 2.5538294e-03 4.1367540e-03 2.4461852e-03 3.1968429e-03 3.7345867e-03 3.8813913e-04 2.9749715e-03 1.1153685e-03 1.5845430e-03 6.9221070e-04 5.5493017e-02 5.8687008e-02 6.5182775e-02 7.5153394e-02 6.8882837e-02 7.5289957e-02 6.5516107e-02 5.1904720e-02 6.2434251e-02 6.8395322e-02 6.9725101e-02 6.1264247e-02 6.7000659e-02 7.3915045e-02 4.6337454e-02 5.2993228e-02 7.6203506e-02 5.8574285e-02 8.7235787e-02 6.1228310e-02 8.1349394e-02 5.3727422e-02 8.8860415e-02 7.3529860e-02 5.6419133e-02 5.6136434e-02 6.9314277e-02 7.5768587e-02 7.0921131e-02 4.3887190e-02 6.2047408e-02 5.7247874e-02 5.5122668e-02 9.7063471e-02 8.0589157e-02 6.3015465e-02 6.2273448e-02 7.6485434e-02 5.9534434e-02 6.9400424e-02 7.9100961e-02 6.8556756e-02 6.0632063e-02 5.3105900e-02 6.9320431e-02 5.9485747e-02 6.3075829e-02 5.8812039e-02 3.9389619e-02 6.2057390e-02 1.2120786e-01 1.0709958e-01 1.0095433e-01 1.0522389e-01 1.1158919e-01 1.1144992e-01 1.1038844e-01 1.0698853e-01 1.1429410e-01 9.8230586e-02 8.2643326e-02 1.0062987e-01 9.5029365e-02 1.1396256e-01 1.1592889e-01 9.6297509e-02 9.5506831e-02 9.6444702e-02 1.3110088e-01 1.0707131e-01 9.7795782e-02 1.0475251e-01 1.1626300e-01 8.8405248e-02 9.5813058e-02 9.2970070e-02 8.4549021e-02 8.4699662e-02 1.1089247e-01 8.9470273e-02 1.0356569e-01 8.3077190e-02 1.1281589e-01 8.7056416e-02 1.1197016e-01 9.9670877e-02 1.0510107e-01 9.5157512e-02 8.3537128e-02 8.8197538e-02 1.0308982e-01 8.4141549e-02 1.0709958e-01 1.0532428e-01 1.0341164e-01 9.2382015e-02 9.8953568e-02 8.9983057e-02 9.9390435e-02 9.5301345e-02 3.0771633e-03 2.2394953e-03 3.5785600e-03 4.5455517e-03 7.4728021e-04 1.0553655e-03 1.6471751e-03 1.6255623e-03 2.5337657e-03 2.0402330e-03 1.9207121e-03 1.4230439e-03 3.0970780e-03 2.6131629e-03 2.9423829e-03 3.0970780e-03 7.3620931e-04 1.2096052e-03 5.1210113e-04 1.1445573e-02 3.3743120e-04 9.3762253e-04 1.6709589e-03 2.3328362e-03 6.4367426e-04 6.1292579e-04 6.6980522e-04 1.3596425e-03 6.8771058e-02 6.9631829e-02 7.8939123e-02 9.0076585e-02 8.3106206e-02 8.8021023e-02 7.5587788e-02 6.3613086e-02 7.6977857e-02 7.8355410e-02 8.6058485e-02 7.1477483e-02 8.5350309e-02 8.7042060e-02 5.5585563e-02 6.5564323e-02 8.6285657e-02 7.2679829e-02 1.0489589e-01 7.5454083e-02 9.0429367e-02 6.6346194e-02 1.0535674e-01 8.8189227e-02 6.9809235e-02 6.9152322e-02 8.5027647e-02 8.9182145e-02 8.2908995e-02 5.6974544e-02 7.6567724e-02 7.1978204e-02 6.7853368e-02 1.1142320e-01 9.0045691e-02 7.1019587e-02 7.5131918e-02 9.4265212e-02 6.9407495e-02 8.2681448e-02 9.3017444e-02 8.0734547e-02 7.4408554e-02 6.6081064e-02 8.1800498e-02 7.0361312e-02 7.4293705e-02 7.1683996e-02 4.9412056e-02 7.3791248e-02 1.3087381e-01 1.1972349e-01 1.1554097e-01 1.1917243e-01 1.2428472e-01 1.2815053e-01 1.2126029e-01 1.2425112e-01 1.3208038e-01 1.0854852e-01 9.3334993e-02 1.1518188e-01 1.0821021e-01 1.2711496e-01 1.2583624e-01 1.0606084e-01 1.0908141e-01 1.0877992e-01 1.4997249e-01 1.2525272e-01 1.0965413e-01 1.1522136e-01 1.3472784e-01 1.0229252e-01 1.0728052e-01 1.0776484e-01 9.7267064e-02 9.5981774e-02 1.2460839e-01 1.0582154e-01 1.2096134e-01 9.5922459e-02 1.2615683e-01 1.0184377e-01 1.2900688e-01 1.1512576e-01 1.1363761e-01 1.0783825e-01 9.4308496e-02 1.0078579e-01 1.1452781e-01 9.5389301e-02 1.1972349e-01 1.1733679e-01 1.1347632e-01 1.0398208e-01 1.1403752e-01 1.0220157e-01 1.0755302e-01 1.0649118e-01 1.0147620e-02 1.1247381e-02 1.2064168e-02 6.5374061e-03 4.5897505e-03 4.8228518e-03 7.5563629e-03 9.2308338e-03 7.2321889e-03 1.6011806e-03 5.3396772e-04 8.3950153e-03 4.7739373e-03 4.9472694e-03 8.3950153e-03 4.4998786e-03 5.2439437e-03 2.2809748e-03 2.1071898e-02 2.7056396e-03 6.9610435e-03 7.8780829e-03 8.1126375e-03 3.2257880e-03 4.3393216e-03 3.1425928e-03 4.9174647e-03 1.0007131e-01 1.0106593e-01 1.1217080e-01 1.2532148e-01 1.1715442e-01 1.2248393e-01 1.0792073e-01 9.3860583e-02 1.0976919e-01 1.1122163e-01 1.2044721e-01 1.0322968e-01 1.1921347e-01 1.2149913e-01 8.4186074e-02 9.6291408e-02 1.2023049e-01 1.0442285e-01 1.4247402e-01 1.0795998e-01 1.2499239e-01 9.7246530e-02 1.4298267e-01 1.2269113e-01 1.0132833e-01 1.0059203e-01 1.1928692e-01 1.2425009e-01 1.1675840e-01 8.5744895e-02 1.0931412e-01 1.0376332e-01 9.9002718e-02 1.4971116e-01 1.2434000e-01 1.0215677e-01 1.0769100e-01 1.2998610e-01 1.0050022e-01 1.1660891e-01 1.2830083e-01 1.1408155e-01 1.0679553e-01 9.6866698e-02 1.1540492e-01 1.0158690e-01 1.0644304e-01 1.0354221e-01 7.6665035e-02 1.0598765e-01 1.7102185e-01 1.5912057e-01 1.5467066e-01 1.5843570e-01 1.6430112e-01 1.6898961e-01 1.6040927e-01 1.6442765e-01 1.7347908e-01 1.4613872e-01 1.2882261e-01 1.5426961e-01 1.4623773e-01 1.6767510e-01 1.6569930e-01 1.4326884e-01 1.4700928e-01 1.4639344e-01 1.9375995e-01 1.6572948e-01 1.4772209e-01 1.5370280e-01 1.7644069e-01 1.3951457e-01 1.4477511e-01 1.4552738e-01 1.3362833e-01 1.3186831e-01 1.6485780e-01 1.4332228e-01 1.6088049e-01 1.3176893e-01 1.6660706e-01 1.3873049e-01 1.6938110e-01 1.5434218e-01 1.5143663e-01 1.4540553e-01 1.2987912e-01 1.3768917e-01 1.5323248e-01 1.3136310e-01 1.5912057e-01 1.5638588e-01 1.5175254e-01 1.4128505e-01 1.5308268e-01 1.3923812e-01 1.4444485e-01 1.4370095e-01 2.6770864e-03 1.6033718e-03 4.5840441e-04 2.0276877e-03 2.4561050e-03 1.2787767e-03 1.0310968e-03 1.1138996e-03 7.3996134e-03 6.8122401e-03 2.1890332e-03 3.7576667e-03 4.1081994e-03 2.1890332e-03 1.7366871e-03 1.7496571e-03 3.0804223e-03 5.2624414e-03 3.0566387e-03 8.7139687e-04 2.3320271e-03 9.5854356e-04 3.5582665e-03 1.9033529e-03 2.7781156e-03 2.0582238e-03 4.8466175e-02 5.0119607e-02 5.7333122e-02 6.6635612e-02 6.0625611e-02 6.6546613e-02 5.6002103e-02 4.4610205e-02 5.5428894e-02 5.8355341e-02 6.2731565e-02 5.1936578e-02 6.1589720e-02 6.5254921e-02 3.8121898e-02 4.5707178e-02 6.5954438e-02 5.2310619e-02 7.8737277e-02 5.4221486e-02 6.9833666e-02 4.6306978e-02 8.0104533e-02 6.6061691e-02 4.9289042e-02 4.8703396e-02 6.2029842e-02 6.6459215e-02 6.1626890e-02 3.8139914e-02 5.4977441e-02 5.0955269e-02 4.7810568e-02 8.6879401e-02 6.9864176e-02 5.2960539e-02 5.4195866e-02 6.9397405e-02 5.0805294e-02 6.0771100e-02 7.0699543e-02 5.9928918e-02 5.3277849e-02 4.6137567e-02 6.0647902e-02 5.1507184e-02 5.4535520e-02 5.1271651e-02 3.2182295e-02 5.3653922e-02 1.0655841e-01 9.4809862e-02 8.9943103e-02 9.4257768e-02 9.8930460e-02 1.0102907e-01 9.7318001e-02 9.7573045e-02 1.0420091e-01 8.5339961e-02 7.1237738e-02 8.9593643e-02 8.3615246e-02 1.0099568e-01 1.0093948e-01 8.3072610e-02 8.4968711e-02 8.5464008e-02 1.2001752e-01 9.7755169e-02 8.5475001e-02 9.1482164e-02 1.0648093e-01 7.7912530e-02 8.4002788e-02 8.3421922e-02 7.3855173e-02 7.3645673e-02 9.8656580e-02 8.1073912e-02 9.4018658e-02 7.3420060e-02 1.0008227e-01 7.8012093e-02 1.0282145e-01 8.8787179e-02 9.1014600e-02 8.4377747e-02 7.2315249e-02 7.7005472e-02 8.9946277e-02 7.2132565e-02 9.4809862e-02 9.2717682e-02 8.9711332e-02 7.9921256e-02 8.7947099e-02 7.8589830e-02 8.5634568e-02 8.3685774e-02 3.6322977e-03 2.1046334e-03 3.2662345e-03 4.7576391e-03 8.8436254e-04 1.6169936e-03 5.0909036e-03 5.3937261e-03 6.9592372e-03 3.1326528e-03 7.3963134e-03 7.8155750e-03 3.1326528e-03 2.8162695e-03 2.9888005e-03 5.3841195e-03 1.1640832e-02 3.1058623e-03 3.5268449e-03 8.9518404e-04 4.1579453e-03 2.4418843e-03 2.3174283e-03 3.5804624e-03 3.9289766e-03 4.8779386e-02 4.9807262e-02 5.7295117e-02 6.7429274e-02 6.1528729e-02 6.3805672e-02 5.4672352e-02 4.4305273e-02 5.5263563e-02 5.7527840e-02 6.3439356e-02 5.1886311e-02 6.2844358e-02 6.3392021e-02 3.9105833e-02 4.6659036e-02 6.3315061e-02 5.0431450e-02 8.1151197e-02 5.3901750e-02 6.8036856e-02 4.7518043e-02 7.9948453e-02 6.3397617e-02 4.9788208e-02 4.9648688e-02 6.2516012e-02 6.6678586e-02 6.0874148e-02 3.9322135e-02 5.5164136e-02 5.1029588e-02 4.8159012e-02 8.4641068e-02 6.6410487e-02 5.1100232e-02 5.4352465e-02 7.1152366e-02 4.8870866e-02 6.0790663e-02 6.7705030e-02 5.8185400e-02 5.3489810e-02 4.6729049e-02 5.9304859e-02 4.8888831e-02 5.2875044e-02 5.1050585e-02 3.4854587e-02 5.2844524e-02 1.0451993e-01 9.3506986e-02 8.9732041e-02 9.1442484e-02 9.7717353e-02 9.9707944e-02 9.4802947e-02 9.5351404e-02 1.0312713e-01 8.4850971e-02 7.1294232e-02 8.9511382e-02 8.4075688e-02 1.0102482e-01 1.0205801e-01 8.3487981e-02 8.2948462e-02 8.2499658e-02 1.1982484e-01 9.7067919e-02 8.5815629e-02 9.0584285e-02 1.0517734e-01 7.8728615e-02 8.2465362e-02 8.1175179e-02 7.4451161e-02 7.2780961e-02 9.8027119e-02 7.9186352e-02 9.3572539e-02 7.1178913e-02 9.9960717e-02 7.6001190e-02 9.8069469e-02 9.0444802e-02 8.9768862e-02 8.1715688e-02 7.1540326e-02 7.7885383e-02 9.0855680e-02 7.5245252e-02 9.3506986e-02 9.1968167e-02 9.0109483e-02 8.2320804e-02 8.9509517e-02 7.8843854e-02 8.4366358e-02 8.1217849e-02 2.0130888e-03 1.8101212e-03 1.7679630e-03 1.5452657e-03 5.1945438e-04 1.2854163e-03 8.7808919e-03 8.2238558e-03 5.0256002e-04 2.7264330e-03 2.8467434e-03 5.0256002e-04 1.9812707e-03 1.4461308e-03 4.1136119e-03 2.5737383e-03 4.2121435e-03 4.2634118e-03 5.2197085e-03 6.4299253e-04 4.8925560e-03 2.3856210e-03 3.0302048e-03 1.5954763e-03 5.0938593e-02 5.4618269e-02 6.0341206e-02 6.9748949e-02 6.3744319e-02 7.0794949e-02 6.1613026e-02 4.7841585e-02 5.7516679e-02 6.4388046e-02 6.4176559e-02 5.7259869e-02 6.1078980e-02 6.9247074e-02 4.2780731e-02 4.8566082e-02 7.2247187e-02 5.4065395e-02 8.0844343e-02 5.6422768e-02 7.7401119e-02 4.9239970e-02 8.2976560e-02 6.8666014e-02 5.1795393e-02 5.1539865e-02 6.3922410e-02 7.0731007e-02 6.6410587e-02 3.9578438e-02 5.7098964e-02 5.2392525e-02 5.0683886e-02 9.1729236e-02 7.6796353e-02 5.9700691e-02 5.7658559e-02 7.0380963e-02 5.5891948e-02 6.4553084e-02 7.4314140e-02 6.4188376e-02 5.5862752e-02 4.8638731e-02 6.4818200e-02 5.5721441e-02 5.9023587e-02 5.4325379e-02 3.5659427e-02 5.7806321e-02 1.1646754e-01 1.0183406e-01 9.5237923e-02 9.9907095e-02 1.0622089e-01 1.0525522e-01 1.0561297e-01 1.0087833e-01 1.0779948e-01 9.3522115e-02 7.8070592e-02 9.4910280e-02 8.9633058e-02 1.0829781e-01 1.1081740e-01 9.1634033e-02 9.0337246e-02 9.1650208e-02 1.2399805e-01 1.0057671e-01 9.2649910e-02 9.9949877e-02 1.0962952e-01 8.2940269e-02 9.1028259e-02 8.7623472e-02 7.9432792e-02 8.0075100e-02 1.0524078e-01 8.3828651e-02 9.7259211e-02 7.8329945e-02 1.0714828e-01 8.1794698e-02 1.0616577e-01 9.3567438e-02 1.0077628e-01 9.0271060e-02 7.9035426e-02 8.3003648e-02 9.7873414e-02 7.9050596e-02 1.0183406e-01 1.0015100e-01 9.8558964e-02 8.7141498e-02 9.2951123e-02 8.4912476e-02 9.5252034e-02 9.0710349e-02 8.3242342e-04 1.3658963e-03 5.7230018e-04 8.1918836e-04 9.7205318e-04 4.1873206e-03 3.8010080e-03 1.6389584e-03 2.5918915e-03 2.9169588e-03 1.6389584e-03 5.6093257e-04 7.3131385e-04 1.3995850e-03 7.2853070e-03 1.1548608e-03 5.6661765e-04 1.3645766e-03 9.0462881e-04 1.5033145e-03 5.7640672e-04 1.1086000e-03 1.0041867e-03 5.6613254e-02 5.8073267e-02 6.6069758e-02 7.6238332e-02 6.9820331e-02 7.5150667e-02 6.4021588e-02 5.2221754e-02 6.4043055e-02 6.6631628e-02 7.2118099e-02 6.0001482e-02 7.1017077e-02 7.4033042e-02 4.5299125e-02 5.3744092e-02 7.4268794e-02 6.0257552e-02 8.9540407e-02 6.2698279e-02 7.8447791e-02 5.4448716e-02 9.0401373e-02 7.4795629e-02 5.7546133e-02 5.6993841e-02 7.1300530e-02 7.5820775e-02 7.0342915e-02 4.5615196e-02 6.3638314e-02 5.9296597e-02 5.5885275e-02 9.6910282e-02 7.8117499e-02 6.0390425e-02 6.2700176e-02 7.9484269e-02 5.8304382e-02 6.9718471e-02 7.9589842e-02 6.8316048e-02 6.1785991e-02 5.4151233e-02 6.9205713e-02 5.8981505e-02 6.2496988e-02 5.9482310e-02 3.9268283e-02 6.1808750e-02 1.1700097e-01 1.0528100e-01 1.0062122e-01 1.0448643e-01 1.0962576e-01 1.1218593e-01 1.0740486e-01 1.0838294e-01 1.1564442e-01 9.5242357e-02 8.0565058e-02 1.0027898e-01 9.3976493e-02 1.1211413e-01 1.1185843e-01 9.2981806e-02 9.4878465e-02 9.5039709e-02 1.3246555e-01 1.0898490e-01 9.5760022e-02 1.0161372e-01 1.1802422e-01 8.8109745e-02 9.3746381e-02 9.3301915e-02 8.3669649e-02 8.2970713e-02 1.0958444e-01 9.1015393e-02 1.0506496e-01 8.2570171e-02 1.1114727e-01 8.7646381e-02 1.1324005e-01 9.9872205e-02 1.0076379e-01 9.4009317e-02 8.1526386e-02 8.7041367e-02 1.0052094e-01 8.2193042e-02 1.0528100e-01 1.0314067e-01 9.9984788e-02 9.0308392e-02 9.8939123e-02 8.8538901e-02 9.5061969e-02 9.3157351e-02 1.7227462e-04 7.9314599e-04 8.9844173e-04 8.9518090e-04 2.8880348e-03 2.3244520e-03 6.4717383e-04 8.8327223e-04 1.0385020e-03 6.4717383e-04 4.2082433e-05 2.2535838e-05 6.1632160e-04 7.2421116e-03 6.3599645e-04 2.4204146e-03 3.0244507e-03 7.7555952e-04 1.1490838e-03 1.6721205e-04 1.6116507e-04 5.3985478e-05 6.6593275e-02 6.9140456e-02 7.6991415e-02 8.7909276e-02 8.1079796e-02 8.7140283e-02 7.5972851e-02 6.2229264e-02 7.4340995e-02 7.8983123e-02 8.2638003e-02 7.1602020e-02 8.0355120e-02 8.5886853e-02 5.5471847e-02 6.3724100e-02 8.7131187e-02 7.0026001e-02 1.0150375e-01 7.2956045e-02 9.2179245e-02 6.4526163e-02 1.0277569e-01 8.5954479e-02 6.7618774e-02 6.7207904e-02 8.2005017e-02 8.8025831e-02 8.2391127e-02 5.4193907e-02 7.3937560e-02 6.8913785e-02 6.6018815e-02 1.1053450e-01 9.1432865e-02 7.2512462e-02 7.3622571e-02 9.0228640e-02 6.9559505e-02 8.1277678e-02 9.1538587e-02 7.9923144e-02 7.2198625e-02 6.3968343e-02 8.0855092e-02 6.9835377e-02 7.3807917e-02 6.9953549e-02 4.8370205e-02 7.2961708e-02 1.3388298e-01 1.2040855e-01 1.1476271e-01 1.1886295e-01 1.2510613e-01 1.2637598e-01 1.2310531e-01 1.2187125e-01 1.2970708e-01 1.1033742e-01 9.4233672e-02 1.1441687e-01 1.0810566e-01 1.2778743e-01 1.2861773e-01 1.0813836e-01 1.0864380e-01 1.0911947e-01 1.4755823e-01 1.2232653e-01 1.1049966e-01 1.1716689e-01 1.3196625e-01 1.0144926e-01 1.0821364e-01 1.0641029e-01 9.6993665e-02 9.6571345e-02 1.2478057e-01 1.0328932e-01 1.1842664e-01 9.5377765e-02 1.2666075e-01 1.0023484e-01 1.2682498e-01 1.1377681e-01 1.1674990e-01 1.0792126e-01 9.5167233e-02 1.0076949e-01 1.1586910e-01 9.6070623e-02 1.2040855e-01 1.1835687e-01 1.1566030e-01 1.0480317e-01 1.1289922e-01 1.0248125e-01 1.1065853e-01 1.0752770e-01 1.5518070e-03 1.3360426e-03 6.1855325e-04 3.8684575e-03 2.7981409e-03 7.1019942e-04 2.9249692e-04 3.8108588e-04 7.1019942e-04 3.4811331e-04 2.0628507e-04 6.8481588e-04 6.1413327e-03 1.2640084e-03 3.0810810e-03 4.5162171e-03 6.1490495e-04 2.0823475e-03 6.6391763e-04 4.5940617e-04 4.0824218e-05 6.8980444e-02 7.2049409e-02 7.9659630e-02 9.0518795e-02 8.3601945e-02 9.0707158e-02 7.9362725e-02 6.4838475e-02 7.6843708e-02 8.2367370e-02 8.4912050e-02 7.4621958e-02 8.2116380e-02 8.9210086e-02 5.7968714e-02 6.6009408e-02 9.1025554e-02 7.2793579e-02 1.0368926e-01 7.5498978e-02 9.6154006e-02 6.6776572e-02 1.0567953e-01 8.9202124e-02 6.9982422e-02 6.9528749e-02 8.4404562e-02 9.0968518e-02 8.5580442e-02 5.6059636e-02 7.6365493e-02 7.1188605e-02 6.8464233e-02 1.1430428e-01 9.5645657e-02 7.6165252e-02 7.6295454e-02 9.2254316e-02 7.2920975e-02 8.4113452e-02 9.5088632e-02 8.3209827e-02 7.4687466e-02 6.6271074e-02 8.4049660e-02 7.3195324e-02 7.7054222e-02 7.2597553e-02 5.0198701e-02 7.5958466e-02 1.3864088e-01 1.2443023e-01 1.1820696e-01 1.2296103e-01 1.2918980e-01 1.2996108e-01 1.2761885e-01 1.2545185e-01 1.3315049e-01 1.1429516e-01 9.7708479e-02 1.1783417e-01 1.1147502e-01 1.3162480e-01 1.3265015e-01 1.1194384e-01 1.1244164e-01 1.1326233e-01 1.5100180e-01 1.2549100e-01 1.1411167e-01 1.2131599e-01 1.3539430e-01 1.0451333e-01 1.1218678e-01 1.1003412e-01 1.0016268e-01 1.0019858e-01 1.2861591e-01 1.0655136e-01 1.2158527e-01 9.9029274e-02 1.3048234e-01 1.0368193e-01 1.3098775e-01 1.1671318e-01 1.2117898e-01 1.1194010e-01 9.8811276e-02 1.0398171e-01 1.1952683e-01 9.8904418e-02 1.2443023e-01 1.2231211e-01 1.1957927e-01 1.0792296e-01 1.1588914e-01 1.0590028e-01 1.1501774e-01 1.1169315e-01 2.7458883e-04 1.9079543e-03 3.8013798e-03 4.1957403e-03 8.9864077e-04 3.1780627e-03 3.4575366e-03 8.9864077e-04 6.1086528e-04 6.3429136e-04 2.2515700e-03 7.8191545e-03 1.2413621e-03 2.0952535e-03 1.3093150e-03 1.3748953e-03 1.2388833e-03 4.8114625e-04 1.1404059e-03 1.0969768e-03 5.5584260e-02 5.7510469e-02 6.4993010e-02 7.5368447e-02 6.9057379e-02 7.3499750e-02 6.3437246e-02 5.1298347e-02 6.2635149e-02 6.6332421e-02 7.0700793e-02 5.9790543e-02 6.9153134e-02 7.2596138e-02 4.5395419e-02 5.3097576e-02 7.3376441e-02 5.8208889e-02 8.8752390e-02 6.1292160e-02 7.8242185e-02 5.3906344e-02 8.8989869e-02 7.2610553e-02 5.6579969e-02 5.6297092e-02 6.9966539e-02 7.5157531e-02 6.9589413e-02 4.4676337e-02 6.2364995e-02 5.7806901e-02 5.5013010e-02 9.5419957e-02 7.7143847e-02 6.0075763e-02 6.1882505e-02 7.8193894e-02 5.7405077e-02 6.8884889e-02 7.7590521e-02 6.7073564e-02 6.0698980e-02 5.3256477e-02 6.8055547e-02 5.7544133e-02 6.1428351e-02 5.8436070e-02 3.9618229e-02 6.0914936e-02 1.1719371e-01 1.0478727e-01 9.9928933e-02 1.0301520e-01 1.0921839e-01 1.1065431e-01 1.0694189e-01 1.0626748e-01 1.1395294e-01 9.5522682e-02 8.0692317e-02 9.9640584e-02 9.3821909e-02 1.1210560e-01 1.1313776e-01 9.3722341e-02 9.3659306e-02 9.3792773e-02 1.3107147e-01 1.0721381e-01 9.5955259e-02 1.0180084e-01 1.1608360e-01 8.7786080e-02 9.3279943e-02 9.1618132e-02 8.3505928e-02 8.2622852e-02 1.0912386e-01 8.8978076e-02 1.0355009e-01 8.1236966e-02 1.1101306e-01 8.5950464e-02 1.1026035e-01 9.9640402e-02 1.0131013e-01 9.2768985e-02 8.1325904e-02 8.7087186e-02 1.0113076e-01 8.3368795e-02 1.0478727e-01 1.0299507e-01 1.0075649e-01 9.1267488e-02 9.8760345e-02 8.8473047e-02 9.5602739e-02 9.2388034e-02 1.3192990e-03 5.6049942e-03 5.6260410e-03 3.8255378e-04 2.7098321e-03 2.9260165e-03 3.8255378e-04 8.5873923e-04 6.4551657e-04 2.7466704e-03 5.2436473e-03 2.1741200e-03 2.6488612e-03 2.5599967e-03 7.2546074e-04 2.4454411e-03 9.4817109e-04 1.6251073e-03 9.8969141e-04 5.2867682e-02 5.5519966e-02 6.2241614e-02 7.2187515e-02 6.6023504e-02 7.1524895e-02 6.1872329e-02 4.9090525e-02 5.9694897e-02 6.4712456e-02 6.7154795e-02 5.7947928e-02 6.4972194e-02 7.0357979e-02 4.3573654e-02 5.0441816e-02 7.2068579e-02 5.5680796e-02 8.4591184e-02 5.8459303e-02 7.7051055e-02 5.1193288e-02 8.5601942e-02 7.0117829e-02 5.3803216e-02 5.3535941e-02 6.6621467e-02 7.2473708e-02 6.7439009e-02 4.1816308e-02 5.9366143e-02 5.4760331e-02 5.2431175e-02 9.2978431e-02 7.6152091e-02 5.9131458e-02 5.9321453e-02 7.4096463e-02 5.5985281e-02 6.6256786e-02 7.5362720e-02 6.5046998e-02 5.7885295e-02 5.0561408e-02 6.5880549e-02 5.5988437e-02 5.9621488e-02 5.5930673e-02 3.7267604e-02 5.8818934e-02 1.1596549e-01 1.0264359e-01 9.7070235e-02 1.0079872e-01 1.0704759e-01 1.0746690e-01 1.0547300e-01 1.0308827e-01 1.1044375e-01 9.3809882e-02 7.8755194e-02 9.6766957e-02 9.1194118e-02 1.0959636e-01 1.1126853e-01 9.1984794e-02 9.1378676e-02 9.2003603e-02 1.2714849e-01 1.0351897e-01 9.3695356e-02 1.0013450e-01 1.1244271e-01 8.4898325e-02 9.1456450e-02 8.9057498e-02 8.0952577e-02 8.0704372e-02 1.0658329e-01 8.5941483e-02 1.0000975e-01 7.9159793e-02 1.0848060e-01 8.3337337e-02 1.0760265e-01 9.6215589e-02 1.0020263e-01 9.0833970e-02 7.9520366e-02 8.4523201e-02 9.8885306e-02 8.0736144e-02 1.0264359e-01 1.0090595e-01 9.8957167e-02 8.8690158e-02 9.5447639e-02 8.6121379e-02 9.4585258e-02 9.0802578e-02 6.3934178e-03 4.8940589e-03 1.2286350e-03 9.0839358e-04 1.0654961e-03 1.2286350e-03 9.6426336e-04 7.9049403e-04 1.4335758e-03 3.9777866e-03 2.4355051e-03 2.0107867e-03 4.6162602e-03 1.1770386e-04 3.4806567e-03 1.4352123e-03 1.5474906e-03 6.2303871e-04 6.0828103e-02 6.3671616e-02 7.0896871e-02 8.0864907e-02 7.4298413e-02 8.2081782e-02 7.0784097e-02 5.7058421e-02 6.8417644e-02 7.3405222e-02 7.5846906e-02 6.5924537e-02 7.3406010e-02 8.0389200e-02 5.0100593e-02 5.7793778e-02 8.2174514e-02 6.5233957e-02 9.3014438e-02 6.7186690e-02 8.6645560e-02 5.8420934e-02 9.5569964e-02 8.0832485e-02 6.1703014e-02 6.1092740e-02 7.5364208e-02 8.1326732e-02 7.6506202e-02 4.8648267e-02 6.7850123e-02 6.3129543e-02 6.0279722e-02 1.0421037e-01 8.6793624e-02 6.7911943e-02 6.7612814e-02 8.2552902e-02 6.4996479e-02 7.4977087e-02 8.6383221e-02 7.4662447e-02 6.6198278e-02 5.8199721e-02 7.5325266e-02 6.5572089e-02 6.8824430e-02 6.4314329e-02 4.2494903e-02 6.7537432e-02 1.2698890e-01 1.1333529e-01 1.0720337e-01 1.1257147e-01 1.1782339e-01 1.1890376e-01 1.1671052e-01 1.1501886e-01 1.2195484e-01 1.0334755e-01 8.7523621e-02 1.0680449e-01 1.0051071e-01 1.1973768e-01 1.2022270e-01 1.0080013e-01 1.0231108e-01 1.0334772e-01 1.3872306e-01 1.1462175e-01 1.0296153e-01 1.1014449e-01 1.2423991e-01 9.3883877e-02 1.0176675e-01 1.0022558e-01 8.9772340e-02 9.0213298e-02 1.1713918e-01 9.6981953e-02 1.1075521e-01 8.9707194e-02 1.1871449e-01 9.4162640e-02 1.2118172e-01 1.0525415e-01 1.1008916e-01 1.0200908e-01 8.8850235e-02 9.3264552e-02 1.0788029e-01 8.7700151e-02 1.1333529e-01 1.1110284e-01 1.0804665e-01 9.6438203e-02 1.0447205e-01 9.5256431e-02 1.0425068e-01 1.0162139e-01 4.7487225e-04 5.5229901e-03 5.0553045e-03 5.3192416e-03 5.5229901e-03 2.6440955e-03 3.2948907e-03 2.1817756e-03 1.9232390e-02 1.0060949e-03 5.2893888e-03 3.6565664e-03 6.4838776e-03 7.5722530e-04 2.1347736e-03 1.7405562e-03 3.5508478e-03 8.4906111e-02 8.5684875e-02 9.5941240e-02 1.0863973e-01 1.0110388e-01 1.0421406e-01 9.1634377e-02 7.8911086e-02 9.3575504e-02 9.5023267e-02 1.0394314e-01 8.7940160e-02 1.0307352e-01 1.0372095e-01 7.0927620e-02 8.1814584e-02 1.0248405e-01 8.7682138e-02 1.2553590e-01 9.1822634e-02 1.0758552e-01 8.2848043e-02 1.2459059e-01 1.0427303e-01 8.6172494e-02 8.5771372e-02 1.0276107e-01 1.0743739e-01 1.0000507e-01 7.2180344e-02 9.3352407e-02 8.8118179e-02 8.3972299e-02 1.2999219e-01 1.0601815e-01 8.6245777e-02 9.1942699e-02 1.1341937e-01 8.4389659e-02 1.0016617e-01 1.0942619e-01 9.6927434e-02 9.1068024e-02 8.2112078e-02 9.8354950e-02 8.4918144e-02 8.9929197e-02 8.7860129e-02 6.4913483e-02 8.9916257e-02 1.5108513e-01 1.3966025e-01 1.3579363e-01 1.3800221e-01 1.4462965e-01 1.4853849e-01 1.4049017e-01 1.4366186e-01 1.5284247e-01 1.2813521e-01 1.1198929e-01 1.3549048e-01 1.2835377e-01 1.4847864e-01 1.4775011e-01 1.2601872e-01 1.2764409e-01 1.2670935e-01 1.7255531e-01 1.4567287e-01 1.2987249e-01 1.3506836e-01 1.5547286e-01 1.2211481e-01 1.2607603e-01 1.2598262e-01 1.1656479e-01 1.1426125e-01 1.4534374e-01 1.2395854e-01 1.4127350e-01 1.1320894e-01 1.4734853e-01 1.1969377e-01 1.4703899e-01 1.3646872e-01 1.3305975e-01 1.2588961e-01 1.1250545e-01 1.2058130e-01 1.3549735e-01 1.1610735e-01 1.3966025e-01 1.3745925e-01 1.3401818e-01 1.2501338e-01 1.3525796e-01 1.2173379e-01 1.2646805e-01 1.2458940e-01 5.1766813e-03 3.3038909e-03 3.5089109e-03 5.1766813e-03 2.1988415e-03 2.7782772e-03 1.0688451e-03 1.7030893e-02 8.9726872e-04 4.6231995e-03 4.6736672e-03 5.3382818e-03 1.1647725e-03 1.9758755e-03 1.2750935e-03 2.7128598e-03 8.7833448e-02 8.9010914e-02 9.9253982e-02 1.1183035e-01 1.0412107e-01 1.0885582e-01 9.5547284e-02 8.2028755e-02 9.6837094e-02 9.8811318e-02 1.0697473e-01 9.1242752e-02 1.0570232e-01 1.0798221e-01 7.3423587e-02 8.4439842e-02 1.0715501e-01 9.1521191e-02 1.2824419e-01 9.5124610e-02 1.1205287e-01 8.5389026e-02 1.2841576e-01 1.0878552e-01 8.9048303e-02 8.8474213e-02 1.0589810e-01 1.1091852e-01 1.0379001e-01 7.4413565e-02 9.6465402e-02 9.1132676e-02 8.6893608e-02 1.3487393e-01 1.1110410e-01 9.0320856e-02 9.5130800e-02 1.1613190e-01 8.8387927e-02 1.0357744e-01 1.1422195e-01 1.0103671e-01 9.4160592e-02 8.4870839e-02 1.0232030e-01 8.9160088e-02 9.3889889e-02 9.1104413e-02 6.6493231e-02 9.3508454e-02 1.5639991e-01 1.4440871e-01 1.3995325e-01 1.4327783e-01 1.4942424e-01 1.5327636e-01 1.4579380e-01 1.4864466e-01 1.5749945e-01 1.3242397e-01 1.1574188e-01 1.3959357e-01 1.3216131e-01 1.5281767e-01 1.5172347e-01 1.2991524e-01 1.3242882e-01 1.3190251e-01 1.7712896e-01 1.5002220e-01 1.3380600e-01 1.3964037e-01 1.6023126e-01 1.2562692e-01 1.3071698e-01 1.3077115e-01 1.2010966e-01 1.1841338e-01 1.4987755e-01 1.2848062e-01 1.4548859e-01 1.1783301e-01 1.5172346e-01 1.2426441e-01 1.5309487e-01 1.3983154e-01 1.3778354e-01 1.3093423e-01 1.1660453e-01 1.2409289e-01 1.3931059e-01 1.1865130e-01 1.4440871e-01 1.4196632e-01 1.3805465e-01 1.2801436e-01 1.3865562e-01 1.2553921e-01 1.3109454e-01 1.2958547e-01 1.5860612e-03 1.6773969e-03 0.0000000e+00 8.4337656e-04 4.6746407e-04 2.4549978e-03 4.7529836e-03 2.3235808e-03 4.0683267e-03 4.3260986e-03 6.7336618e-04 2.8454658e-03 1.0918918e-03 1.3756658e-03 5.7784546e-04 5.9573290e-02 6.3070670e-02 6.9597309e-02 7.9911457e-02 7.3480528e-02 7.9923883e-02 7.0144874e-02 5.5923876e-02 6.6635620e-02 7.3192589e-02 7.4096565e-02 6.5836734e-02 7.1022277e-02 7.8555696e-02 5.0423423e-02 5.7089619e-02 8.1093473e-02 6.2483167e-02 9.2251714e-02 6.5399221e-02 8.6573432e-02 5.7873871e-02 9.3861710e-02 7.7914479e-02 6.0545459e-02 6.0328179e-02 7.3725736e-02 8.0652769e-02 7.5662466e-02 4.7500835e-02 6.6267157e-02 6.1219907e-02 5.9246920e-02 1.0235525e-01 8.5584812e-02 6.7627185e-02 6.6676399e-02 8.1028522e-02 6.3874534e-02 7.4037098e-02 8.3735875e-02 7.3091049e-02 6.4875922e-02 5.7134332e-02 7.3898502e-02 6.3669341e-02 6.7483654e-02 6.3032151e-02 4.3195391e-02 6.6465430e-02 1.2757303e-01 1.1294171e-01 1.0654531e-01 1.1074736e-01 1.1756538e-01 1.1705185e-01 1.1633100e-01 1.1230305e-01 1.1988948e-01 1.0404710e-01 8.7981667e-02 1.0622547e-01 1.0061669e-01 1.2008497e-01 1.2242153e-01 1.0217012e-01 1.0084187e-01 1.0181343e-01 1.3714147e-01 1.1243585e-01 1.0356517e-01 1.1071692e-01 1.2181923e-01 9.3742899e-02 1.0137566e-01 9.8085445e-02 8.9840814e-02 8.9979544e-02 1.1682155e-01 9.4340727e-02 1.0893096e-01 8.8036209e-02 1.1887723e-01 9.1995894e-02 1.1718099e-01 1.0529554e-01 1.1115622e-01 1.0048991e-01 8.8823715e-02 9.3647258e-02 1.0909883e-01 8.9729548e-02 1.1294171e-01 1.1121254e-01 1.0947844e-01 9.8164553e-02 1.0458423e-01 9.5468337e-02 1.0529433e-01 1.0077315e-01 1.0959804e-05 1.5860612e-03 1.2066237e-03 9.8801305e-04 9.8752232e-04 6.0992006e-03 2.3026455e-03 4.3295194e-03 6.8464966e-03 1.1467011e-03 3.5017117e-03 1.7326793e-03 1.1803657e-03 5.4725577e-04 7.4912594e-02 7.8462634e-02 8.6080465e-02 9.7055695e-02 8.9913940e-02 9.8246196e-02 8.6359614e-02 7.0875411e-02 8.3096610e-02 8.9373603e-02 9.1095332e-02 8.1132274e-02 8.7784959e-02 9.6471287e-02 6.3584445e-02 7.1727351e-02 9.8741504e-02 7.9297358e-02 1.1001947e-01 8.1763635e-02 1.0392038e-01 7.2462318e-02 1.1283140e-01 9.6493172e-02 7.5904241e-02 7.5362083e-02 9.0691159e-02 9.7798442e-02 9.2550220e-02 6.1191529e-02 8.2519289e-02 7.7118501e-02 7.4418954e-02 1.2241546e-01 1.0373213e-01 8.3271349e-02 8.2617382e-02 9.8302252e-02 7.9806174e-02 9.0742202e-02 1.0274136e-01 9.0294649e-02 8.0841362e-02 7.2046648e-02 9.1054206e-02 8.0166072e-02 8.3952641e-02 7.8850217e-02 5.4962225e-02 8.2583735e-02 1.4771497e-01 1.3277361e-01 1.2596285e-01 1.3150244e-01 1.3764710e-01 1.3814813e-01 1.3643324e-01 1.3364709e-01 1.4127174e-01 1.2228442e-01 1.0501430e-01 1.2555100e-01 1.1896947e-01 1.3983185e-01 1.4080838e-01 1.1967635e-01 1.2050783e-01 1.2165153e-01 1.5932394e-01 1.3324590e-01 1.2180922e-01 1.2960297e-01 1.4356572e-01 1.1163860e-01 1.2028439e-01 1.1797002e-01 1.0728569e-01 1.0776505e-01 1.3685139e-01 1.1414077e-01 1.2924282e-01 1.0675126e-01 1.3867943e-01 1.1135088e-01 1.3991357e-01 1.2389778e-01 1.2963231e-01 1.2019755e-01 1.0634263e-01 1.1117491e-01 1.2727853e-01 1.0546335e-01 1.3277361e-01 1.3050496e-01 1.2753116e-01 1.1493738e-01 1.2310368e-01 1.1333277e-01 1.2330887e-01 1.1999903e-01 1.6773969e-03 1.4044930e-03 1.1476188e-03 1.1728210e-03 6.0850239e-03 2.5611449e-03 4.7720148e-03 7.3487783e-03 1.2981958e-03 3.8044610e-03 1.9624179e-03 1.3573639e-03 6.7050163e-04 7.5875383e-02 7.9591742e-02 8.7130965e-02 9.8136915e-02 9.0968214e-02 9.9474916e-02 8.7612410e-02 7.1886206e-02 8.4072967e-02 9.0657173e-02 9.2037661e-02 8.2322796e-02 8.8559519e-02 9.7661149e-02 6.4636714e-02 7.2692172e-02 1.0010765e-01 8.0266168e-02 1.1103798e-01 8.2745924e-02 1.0537406e-01 7.3430328e-02 1.1396572e-01 9.7599809e-02 7.6869874e-02 7.6340270e-02 9.1668858e-02 9.8974404e-02 9.3760611e-02 6.2005453e-02 8.3489209e-02 7.8020463e-02 7.5407355e-02 1.2375637e-01 1.0517097e-01 8.4594299e-02 8.3682974e-02 9.9214366e-02 8.1008204e-02 9.1862911e-02 1.0394615e-01 9.1478420e-02 8.1837398e-02 7.2994499e-02 9.2227696e-02 8.1322109e-02 8.5126430e-02 7.9879628e-02 5.5860810e-02 8.3714500e-02 1.4946173e-01 1.3427629e-01 1.2730802e-01 1.3293539e-01 1.3918009e-01 1.3947497e-01 1.3805161e-01 1.3491280e-01 1.4255824e-01 1.2381647e-01 1.0639120e-01 1.2689399e-01 1.2032986e-01 1.4134878e-01 1.4247553e-01 1.2120787e-01 1.2187456e-01 1.2309328e-01 1.6066767e-01 1.3444738e-01 1.2325830e-01 1.3118369e-01 1.4483072e-01 1.1290137e-01 1.2175315e-01 1.1925251e-01 1.0857610e-01 1.0914142e-01 1.3832403e-01 1.1530308e-01 1.3045824e-01 1.0804612e-01 1.4018015e-01 1.1257905e-01 1.4124338e-01 1.2516443e-01 1.3130183e-01 1.2160995e-01 1.0773181e-01 1.1250108e-01 1.2878318e-01 1.0678720e-01 1.3427629e-01 1.3201801e-01 1.2910620e-01 1.1632723e-01 1.2438544e-01 1.1469977e-01 1.2494953e-01 1.2148287e-01 8.4337656e-04 4.6746407e-04 2.4549978e-03 4.7529836e-03 2.3235808e-03 4.0683267e-03 4.3260986e-03 6.7336618e-04 2.8454658e-03 1.0918918e-03 1.3756658e-03 5.7784546e-04 5.9573290e-02 6.3070670e-02 6.9597309e-02 7.9911457e-02 7.3480528e-02 7.9923883e-02 7.0144874e-02 5.5923876e-02 6.6635620e-02 7.3192589e-02 7.4096565e-02 6.5836734e-02 7.1022277e-02 7.8555696e-02 5.0423423e-02 5.7089619e-02 8.1093473e-02 6.2483167e-02 9.2251714e-02 6.5399221e-02 8.6573432e-02 5.7873871e-02 9.3861710e-02 7.7914479e-02 6.0545459e-02 6.0328179e-02 7.3725736e-02 8.0652769e-02 7.5662466e-02 4.7500835e-02 6.6267157e-02 6.1219907e-02 5.9246920e-02 1.0235525e-01 8.5584812e-02 6.7627185e-02 6.6676399e-02 8.1028522e-02 6.3874534e-02 7.4037098e-02 8.3735875e-02 7.3091049e-02 6.4875922e-02 5.7134332e-02 7.3898502e-02 6.3669341e-02 6.7483654e-02 6.3032151e-02 4.3195391e-02 6.6465430e-02 1.2757303e-01 1.1294171e-01 1.0654531e-01 1.1074736e-01 1.1756538e-01 1.1705185e-01 1.1633100e-01 1.1230305e-01 1.1988948e-01 1.0404710e-01 8.7981667e-02 1.0622547e-01 1.0061669e-01 1.2008497e-01 1.2242153e-01 1.0217012e-01 1.0084187e-01 1.0181343e-01 1.3714147e-01 1.1243585e-01 1.0356517e-01 1.1071692e-01 1.2181923e-01 9.3742899e-02 1.0137566e-01 9.8085445e-02 8.9840814e-02 8.9979544e-02 1.1682155e-01 9.4340727e-02 1.0893096e-01 8.8036209e-02 1.1887723e-01 9.1995894e-02 1.1718099e-01 1.0529554e-01 1.1115622e-01 1.0048991e-01 8.8823715e-02 9.3647258e-02 1.0909883e-01 8.9729548e-02 1.1294171e-01 1.1121254e-01 1.0947844e-01 9.8164553e-02 1.0458423e-01 9.5468337e-02 1.0529433e-01 1.0077315e-01 6.2742331e-05 5.7500583e-04 7.7277880e-03 4.4752900e-04 1.9151463e-03 2.3881652e-03 8.6221368e-04 8.6997251e-04 5.7712764e-05 1.4261239e-04 1.6337476e-04 6.5357511e-02 6.7564930e-02 7.5599895e-02 8.6482524e-02 7.9693502e-02 8.5392044e-02 7.4152863e-02 6.0880360e-02 7.3094884e-02 7.7108408e-02 8.1483729e-02 6.9905750e-02 7.9537736e-02 8.4220614e-02 5.4020845e-02 6.2478782e-02 8.5095725e-02 6.8776984e-02 1.0023818e-01 7.1693803e-02 8.9974847e-02 6.3277271e-02 1.0126704e-01 8.4456284e-02 6.6380508e-02 6.5944304e-02 8.0776371e-02 8.6401760e-02 8.0678654e-02 5.3232214e-02 7.2705055e-02 6.7807498e-02 6.4728976e-02 1.0860574e-01 8.9250516e-02 7.0532132e-02 7.2192370e-02 8.9155393e-02 6.7825786e-02 7.9751674e-02 8.9848634e-02 7.8255825e-02 7.0908403e-02 6.2757363e-02 7.9213209e-02 6.8195297e-02 7.2146158e-02 6.8587288e-02 4.7220467e-02 7.1390990e-02 1.3114639e-01 1.1816523e-01 1.1284157e-01 1.1675611e-01 1.2280854e-01 1.2450964e-01 1.2061835e-01 1.2011904e-01 1.2793055e-01 1.0801409e-01 9.2203467e-02 1.1250005e-01 1.0614185e-01 1.2553294e-01 1.2604996e-01 1.0581394e-01 1.0665851e-01 1.0697400e-01 1.4569576e-01 1.2071369e-01 1.0835453e-01 1.1475367e-01 1.3023667e-01 9.9676180e-02 1.0601570e-01 1.0459446e-01 9.5152798e-02 9.4544535e-02 1.2261107e-01 1.0171679e-01 1.1677938e-01 9.3513090e-02 1.2443770e-01 9.8519974e-02 1.2493623e-01 1.1202254e-01 1.1414514e-01 1.0583616e-01 9.3110547e-02 9.8863035e-02 1.1361961e-01 9.4163495e-02 1.1816523e-01 1.1608967e-01 1.1325987e-01 1.0277556e-01 1.1111172e-01 1.0049112e-01 1.0810161e-01 1.0529258e-01 8.3201047e-04 6.6553558e-03 8.0817319e-04 2.3666253e-03 2.9401972e-03 6.0430641e-04 1.3109323e-03 2.0042209e-04 2.9042771e-04 6.4823857e-05 6.4369752e-02 6.6980839e-02 7.4622998e-02 8.5371818e-02 7.8644884e-02 8.4714120e-02 7.3778054e-02 6.0124143e-02 7.1976215e-02 7.6757894e-02 8.0112003e-02 6.9445381e-02 7.7800626e-02 8.3451231e-02 5.3558729e-02 6.1563285e-02 8.4824350e-02 6.7739660e-02 9.8727492e-02 7.0619990e-02 8.9865595e-02 6.2353086e-02 1.0002589e-01 8.3464224e-02 6.5377675e-02 6.4985443e-02 7.9506921e-02 8.5546781e-02 8.0035925e-02 5.2147031e-02 7.1577598e-02 6.6610625e-02 6.3822994e-02 1.0780225e-01 8.9119904e-02 7.0461434e-02 7.1328305e-02 8.7570443e-02 6.7451743e-02 7.8878318e-02 8.9018945e-02 7.7592264e-02 6.9886540e-02 6.1789581e-02 7.8498655e-02 6.7684600e-02 7.1587759e-02 6.7704325e-02 4.6526712e-02 7.0724596e-02 1.3117528e-01 1.1766016e-01 1.1197322e-01 1.1607657e-01 1.2231416e-01 1.2340084e-01 1.2042522e-01 1.1891967e-01 1.2666041e-01 1.0778594e-01 9.1812847e-02 1.1163169e-01 1.0543690e-01 1.2494830e-01 1.2593147e-01 1.0563286e-01 1.0596141e-01 1.0649455e-01 1.4431904e-01 1.1933022e-01 1.0786987e-01 1.1454955e-01 1.2887601e-01 9.8812977e-02 1.0562884e-01 1.0369928e-01 9.4451580e-02 9.4102215e-02 1.2194237e-01 1.0054711e-01 1.1549365e-01 9.2857172e-02 1.2382256e-01 9.7583400e-02 1.2385907e-01 1.1095947e-01 1.1423829e-01 1.0528942e-01 9.2735747e-02 9.8196145e-02 1.1321168e-01 9.3608756e-02 1.1766016e-01 1.1565299e-01 1.1307354e-01 1.0223904e-01 1.1010492e-01 9.9908851e-02 1.0821954e-01 1.0496782e-01 9.9295890e-03 5.3473138e-04 2.1693539e-03 3.7076821e-03 1.8222092e-03 1.2380588e-03 7.0429726e-04 3.2576994e-04 6.7027380e-04 7.5169669e-02 7.7062740e-02 8.6033367e-02 9.7417771e-02 9.0184218e-02 9.6534643e-02 8.3912481e-02 7.0270670e-02 8.3626528e-02 8.6844061e-02 9.2545497e-02 7.9257265e-02 9.0777176e-02 9.5235076e-02 6.2208659e-02 7.1858472e-02 9.5535449e-02 7.9390740e-02 1.1189093e-01 8.2130752e-02 1.0013922e-01 7.2643917e-02 1.1330079e-01 9.5998078e-02 7.6221141e-02 7.5580594e-02 9.1730689e-02 9.7114882e-02 9.1048046e-02 6.2224749e-02 8.3137120e-02 7.8094271e-02 7.4382594e-02 1.2083561e-01 9.9831872e-02 7.9711043e-02 8.2237066e-02 1.0057287e-01 7.7408745e-02 9.0227459e-02 1.0148848e-01 8.8790024e-02 8.1095637e-02 7.2322440e-02 8.9773416e-02 7.8184960e-02 8.2186791e-02 7.8567440e-02 5.4863235e-02 8.1347337e-02 1.4275245e-01 1.3005259e-01 1.2482271e-01 1.2925722e-01 1.3482920e-01 1.3759096e-01 1.3236924e-01 1.3338627e-01 1.4130888e-01 1.1880967e-01 1.0247320e-01 1.2443347e-01 1.1741517e-01 1.3747488e-01 1.3688510e-01 1.1618864e-01 1.1858835e-01 1.1879679e-01 1.5963135e-01 1.3387511e-01 1.1938532e-01 1.2588037e-01 1.4388488e-01 1.1083228e-01 1.1728514e-01 1.1680010e-01 1.0591970e-01 1.0525056e-01 1.3476053e-01 1.1410719e-01 1.2959057e-01 1.0487194e-01 1.3643100e-01 1.1046993e-01 1.3880755e-01 1.2375874e-01 1.2479172e-01 1.1764923e-01 1.0361548e-01 1.0965651e-01 1.2456830e-01 1.0392711e-01 1.3005259e-01 1.2763609e-01 1.2394348e-01 1.1308481e-01 1.2275352e-01 1.1138648e-01 1.1846972e-01 1.1666230e-01 1.1839370e-02 9.5901133e-03 1.3740734e-02 3.5338001e-03 1.3550011e-02 8.8526707e-03 9.4699409e-03 6.3350154e-03 4.8467987e-02 5.3749987e-02 5.7717889e-02 6.5676566e-02 6.0050880e-02 7.0693363e-02 6.1877604e-02 4.6729706e-02 5.4665912e-02 6.4242849e-02 5.9590174e-02 5.6493218e-02 5.5132102e-02 6.8254515e-02 4.1974046e-02 4.5979775e-02 7.3375693e-02 5.2912711e-02 7.3889063e-02 5.3879214e-02 7.8254066e-02 4.6403740e-02 7.8645675e-02 6.7578949e-02 4.9098308e-02 4.8671019e-02 5.9827034e-02 6.7861186e-02 6.5136324e-02 3.6759287e-02 5.3980965e-02 4.9365653e-02 4.8462917e-02 9.0010105e-02 7.8830145e-02 6.1496889e-02 5.5380536e-02 6.4107134e-02 5.6871111e-02 6.2034296e-02 7.3759034e-02 6.3674900e-02 5.3119604e-02 4.6143136e-02 6.3811294e-02 5.6759011e-02 5.9030859e-02 5.2419966e-02 3.3258744e-02 5.6880106e-02 1.1655832e-01 1.0005043e-01 9.1684275e-02 9.8727002e-02 1.0425490e-01 1.0138126e-01 1.0565129e-01 9.7538138e-02 1.0314095e-01 9.2386824e-02 7.6571607e-02 9.1274575e-02 8.6336607e-02 1.0505203e-01 1.0829773e-01 9.0077109e-02 8.8858206e-02 9.1549427e-02 1.1781623e-01 9.5530481e-02 9.0068051e-02 9.8963246e-02 1.0479240e-01 7.9118056e-02 9.0207341e-02 8.5766485e-02 7.6442113e-02 7.8991975e-02 1.0229780e-01 8.0971468e-02 9.2462279e-02 7.7616682e-02 1.0394880e-01 7.9854175e-02 1.0493060e-01 8.8086863e-02 1.0105189e-01 8.9771099e-02 7.8157448e-02 7.9782471e-02 9.4953430e-02 7.4768636e-02 1.0005043e-01 9.8253215e-02 9.6744777e-02 8.3113340e-02 8.7744069e-02 8.2353939e-02 9.5830913e-02 9.0799350e-02 2.1341759e-03 1.9110407e-03 2.4747400e-03 1.4759127e-04 2.6111423e-04 2.1389787e-04 9.9338048e-04 7.1511570e-02 7.2884660e-02 8.1954557e-02 9.3478761e-02 8.6419800e-02 9.0995738e-02 7.9065079e-02 6.6366318e-02 7.9643396e-02 8.2112775e-02 8.8849792e-02 7.5066735e-02 8.7600830e-02 9.0111864e-02 5.8883625e-02 6.8498056e-02 8.9863635e-02 7.4803608e-02 1.0853136e-01 7.8097142e-02 9.4627165e-02 6.9370599e-02 1.0871906e-01 9.0676026e-02 7.2617660e-02 7.2144076e-02 8.7900274e-02 9.2810262e-02 8.6387249e-02 5.9335633e-02 7.9309675e-02 7.4397587e-02 7.0710353e-02 1.1504320e-01 9.3692385e-02 7.4629165e-02 7.8263498e-02 9.7248749e-02 7.2487054e-02 8.6011897e-02 9.5823155e-02 8.3806772e-02 7.7264691e-02 6.8838121e-02 8.4948991e-02 7.3047820e-02 7.7341294e-02 7.4548923e-02 5.2558496e-02 7.6909140e-02 1.3627575e-01 1.2430643e-01 1.1978148e-01 1.2301688e-01 1.2902659e-01 1.3200226e-01 1.2597225e-01 1.2757685e-01 1.3584109e-01 1.1348127e-01 9.7761057e-02 1.1945082e-01 1.1270375e-01 1.3215934e-01 1.3182973e-01 1.1125522e-01 1.1287096e-01 1.1260483e-01 1.5425448e-01 1.2876341e-01 1.1448536e-01 1.2024528e-01 1.3833407e-01 1.0647529e-01 1.1163750e-01 1.1113351e-01 1.0149473e-01 1.0013701e-01 1.2927004e-01 1.0879101e-01 1.2459621e-01 9.9330508e-02 1.3108770e-01 1.0504760e-01 1.3186075e-01 1.1958837e-01 1.1892921e-01 1.1162833e-01 9.8541878e-02 1.0525098e-01 1.1976536e-01 1.0049727e-01 1.2430643e-01 1.2212353e-01 1.1885667e-01 1.0916600e-01 1.1853397e-01 1.0665478e-01 1.1270995e-01 1.1063603e-01 1.5750019e-03 2.3061943e-03 2.5267586e-03 1.8816551e-03 2.4916769e-03 2.6490348e-03 5.6749564e-02 5.7171493e-02 6.5939847e-02 7.5932398e-02 6.9508351e-02 7.4656451e-02 6.2624290e-02 5.2024188e-02 6.4437230e-02 6.4937043e-02 7.2745199e-02 5.8611781e-02 7.2652586e-02 7.3573688e-02 4.4171072e-02 5.3594578e-02 7.2641168e-02 6.1050608e-02 8.9566976e-02 6.3051224e-02 7.6010398e-02 5.4225638e-02 9.0321512e-02 7.5195114e-02 5.7639533e-02 5.6852233e-02 7.1710391e-02 7.4949900e-02 6.9318916e-02 4.6230253e-02 6.3971485e-02 6.0026157e-02 5.5798348e-02 9.5989279e-02 7.6232404e-02 5.8444003e-02 6.2302257e-02 8.0224536e-02 5.7278169e-02 6.9150783e-02 7.9467477e-02 6.7704503e-02 6.1864652e-02 5.4241616e-02 6.8592620e-02 5.8516944e-02 6.1759106e-02 5.9360348e-02 3.8591759e-02 6.1151978e-02 1.1324556e-01 1.0303819e-01 9.9201558e-02 1.0318500e-01 1.0721879e-01 1.1147036e-01 1.0460948e-01 1.0828938e-01 1.1523751e-01 9.2280249e-02 7.8300492e-02 9.8832112e-02 9.2088327e-02 1.0954453e-01 1.0774163e-01 8.9704304e-02 9.3649245e-02 9.3477002e-02 1.3171389e-01 1.0896793e-01 9.3248418e-02 9.8534682e-02 1.1788866e-01 8.6730988e-02 9.1550165e-02 9.2743651e-02 8.2023117e-02 8.1036634e-02 1.0749375e-01 9.1211084e-02 1.0479473e-01 8.1639608e-02 1.0872731e-01 8.7298316e-02 1.1342725e-01 9.8507274e-02 9.7018335e-02 9.2572028e-02 7.9424005e-02 8.5123740e-02 9.7519007e-02 7.9504622e-02 1.0303819e-01 1.0060889e-01 9.6540999e-02 8.7528391e-02 9.7464514e-02 8.6517419e-02 9.1407615e-02 9.1076579e-02 4.2759888e-03 1.3998823e-03 1.8495751e-03 2.8301386e-03 3.7334484e-03 5.5009109e-02 5.4964037e-02 6.3820777e-02 7.4331569e-02 6.8062522e-02 7.0406681e-02 5.9595322e-02 4.9880526e-02 6.2237804e-02 6.2265151e-02 7.1113130e-02 5.6594278e-02 7.1451534e-02 6.9994053e-02 4.3126676e-02 5.2381128e-02 6.8520339e-02 5.7597897e-02 8.9058557e-02 6.0751652e-02 7.2510768e-02 5.3201106e-02 8.7849614e-02 7.0985642e-02 5.6028788e-02 5.5591108e-02 6.9920676e-02 7.2931234e-02 6.6633349e-02 4.5295849e-02 6.2044642e-02 5.8063752e-02 5.4097801e-02 9.1903182e-02 7.1473709e-02 5.5091248e-02 6.0376704e-02 7.9310761e-02 5.3854777e-02 6.7042056e-02 7.4968513e-02 6.4284932e-02 5.9983090e-02 5.2745182e-02 6.5463515e-02 5.4553279e-02 5.8476643e-02 5.7183259e-02 3.8909009e-02 5.8515093e-02 1.0925308e-01 9.9624317e-02 9.6622305e-02 9.8607255e-02 1.0383961e-01 1.0792916e-01 1.0029496e-01 1.0408880e-01 1.1184636e-01 8.9608429e-02 7.6157674e-02 9.6354194e-02 9.0095873e-02 1.0708448e-01 1.0618312e-01 8.7751513e-02 8.9756663e-02 8.8888346e-02 1.2885985e-01 1.0591974e-01 9.1173391e-02 9.5499762e-02 1.1429851e-01 8.5048829e-02 8.8069568e-02 8.8675858e-02 8.0228990e-02 7.8160661e-02 1.0454059e-01 8.7419578e-02 1.0196835e-01 7.7696889e-02 1.0615601e-01 8.3458946e-02 1.0727792e-01 9.7348785e-02 9.3770662e-02 8.8214646e-02 7.6638467e-02 8.3519036e-02 9.5849316e-02 7.9674217e-02 9.9624317e-02 9.7643485e-02 9.4514516e-02 8.7075933e-02 9.6245490e-02 8.4428758e-02 8.8195121e-02 8.6900395e-02 3.3656636e-03 1.2813341e-03 1.5508786e-03 5.5126016e-04 5.7906981e-02 6.0932190e-02 6.7790041e-02 7.7661589e-02 7.1242410e-02 7.8649743e-02 6.7963400e-02 5.4275670e-02 6.5215187e-02 7.0651008e-02 7.2495968e-02 6.3298898e-02 6.9915496e-02 7.7039648e-02 4.7868354e-02 5.5074415e-02 7.9095397e-02 6.1872293e-02 8.9630734e-02 6.4010779e-02 8.3786653e-02 5.5727300e-02 9.1943867e-02 7.7179306e-02 5.8787276e-02 5.8290887e-02 7.2059853e-02 7.8226343e-02 7.3481150e-02 4.5978845e-02 6.4704253e-02 5.9980041e-02 5.7435326e-02 1.0050646e-01 8.3655459e-02 6.5308029e-02 6.4668898e-02 7.9130910e-02 6.2159352e-02 7.1908147e-02 8.2739305e-02 7.1488143e-02 6.3159434e-02 5.5376396e-02 7.2163727e-02 6.2508559e-02 6.5824794e-02 6.1340761e-02 4.0472995e-02 6.4599849e-02 1.2380425e-01 1.0993395e-01 1.0373859e-01 1.0879322e-01 1.1440237e-01 1.1495444e-01 1.1335001e-01 1.1089530e-01 1.1789001e-01 1.0041407e-01 8.4708778e-02 1.0336455e-01 9.7357032e-02 1.1642404e-01 1.1748667e-01 9.8081847e-02 9.8747663e-02 9.9805801e-02 1.3456454e-01 1.1060300e-01 9.9947092e-02 1.0709744e-01 1.2004751e-01 9.0729860e-02 9.8544296e-02 9.6489030e-02 8.6759859e-02 8.7175826e-02 1.1367288e-01 9.3122372e-02 1.0688813e-01 8.6280635e-02 1.1536030e-01 9.0496365e-02 1.1670380e-01 1.0195807e-01 1.0724940e-01 9.8472199e-02 8.5899722e-02 9.0288199e-02 1.0497525e-01 8.5251170e-02 1.0993395e-01 1.0787024e-01 1.0524763e-01 9.3789157e-02 1.0121309e-01 9.2226653e-02 1.0148816e-01 9.8306001e-02 5.0782435e-04 6.2020689e-04 1.6756986e-03 7.0476887e-02 7.1501157e-02 8.0709837e-02 9.2336294e-02 8.5349594e-02 8.8954923e-02 7.7301298e-02 6.5155686e-02 7.8476830e-02 8.0402665e-02 8.7884855e-02 7.3650267e-02 8.6974315e-02 8.8299731e-02 5.7895678e-02 6.7595783e-02 8.7665774e-02 7.3314500e-02 1.0776923e-01 7.6895020e-02 9.2474273e-02 6.8514181e-02 1.0728576e-01 8.8812805e-02 7.1614033e-02 7.1214401e-02 8.6845407e-02 9.1428310e-02 8.4778392e-02 5.8702706e-02 7.8223013e-02 7.3389903e-02 6.9650470e-02 1.1290739e-01 9.1229062e-02 7.2678536e-02 7.7045799e-02 9.6518176e-02 7.0683794e-02 8.4677029e-02 9.3753287e-02 8.2039996e-02 7.6152676e-02 6.7885263e-02 8.3273481e-02 7.1166737e-02 7.5618620e-02 7.3311647e-02 5.2129744e-02 7.5414559e-02 1.3361288e-01 1.2212885e-01 1.1803753e-01 1.2062983e-01 1.2681873e-01 1.3003785e-01 1.2339855e-01 1.2552093e-01 1.3397302e-01 1.1145011e-01 9.6072866e-02 1.1773621e-01 1.1108813e-01 1.3021606e-01 1.2991222e-01 1.0941117e-01 1.1074904e-01 1.1019635e-01 1.5245649e-01 1.2709803e-01 1.1272646e-01 1.1805571e-01 1.3644429e-01 1.0507143e-01 1.0948152e-01 1.0908044e-01 1.0002018e-01 9.8259510e-02 1.2725753e-01 1.0697349e-01 1.2296793e-01 9.7285465e-02 1.2913723e-01 1.0312519e-01 1.2920697e-01 1.1832381e-01 1.1655849e-01 1.0932054e-01 9.6669076e-02 1.0377916e-01 1.1803847e-01 9.9488230e-02 1.2212885e-01 1.2004765e-01 1.1693776e-01 1.0790940e-01 1.1723226e-01 1.0500092e-01 1.1038445e-01 1.0827603e-01 1.5232780e-04 4.0007181e-04 6.5289213e-02 6.7204649e-02 7.5443787e-02 8.6444762e-02 7.9660759e-02 8.4749914e-02 7.3546883e-02 6.0641799e-02 7.3012359e-02 7.6531840e-02 8.1598668e-02 6.9499172e-02 7.9923854e-02 8.3724381e-02 5.3799024e-02 6.2453508e-02 8.4253942e-02 6.8486788e-02 1.0054294e-01 7.1576362e-02 8.9122210e-02 6.3281093e-02 1.0114322e-01 8.3993466e-02 6.6335786e-02 6.5925957e-02 8.0814161e-02 8.6165539e-02 8.0247902e-02 5.3368917e-02 7.2666724e-02 6.7816556e-02 6.4623203e-02 1.0800053e-01 8.8235002e-02 6.9728892e-02 7.2010954e-02 8.9470933e-02 6.7181463e-02 7.9529500e-02 8.9243318e-02 7.7739806e-02 7.0823200e-02 6.2720078e-02 7.8762930e-02 6.7548169e-02 7.1608713e-02 6.8390856e-02 4.7339693e-02 7.1004341e-02 1.3006516e-01 1.1747357e-01 1.1247378e-01 1.1599806e-01 1.2210677e-01 1.2411360e-01 1.1962941e-01 1.1968938e-01 1.2763702e-01 1.0730056e-01 9.1685935e-02 1.1214725e-01 1.0578284e-01 1.2500364e-01 1.2540001e-01 1.0518188e-01 1.0602704e-01 1.0612761e-01 1.4550231e-01 1.2054520e-01 1.0786099e-01 1.1396219e-01 1.2996046e-01 9.9460763e-02 1.0527691e-01 1.0405039e-01 9.4844256e-02 9.3945852e-02 1.2206303e-01 1.0136794e-01 1.1659636e-01 9.2882126e-02 1.2391115e-01 9.8044098e-02 1.2415930e-01 1.1196299e-01 1.1316845e-01 1.0506061e-01 9.2491851e-02 9.8554383e-02 1.1313163e-01 9.4057839e-02 1.1747357e-01 1.1542880e-01 1.1260458e-01 1.0255212e-01 1.1101490e-01 1.0006494e-01 1.0712947e-01 1.0442482e-01 3.2637736e-04 7.1228332e-02 7.3360309e-02 8.1848237e-02 9.3197783e-02 8.6156854e-02 9.1733116e-02 8.0052769e-02 6.6468809e-02 7.9277125e-02 8.3130576e-02 8.8073461e-02 7.5751270e-02 8.6114214e-02 9.0610516e-02 5.9264749e-02 6.8242422e-02 9.1255768e-02 7.4667198e-02 1.0756842e-01 7.7803425e-02 9.6260495e-02 6.9087389e-02 1.0845861e-01 9.0870006e-02 7.2305626e-02 7.1861030e-02 8.7311178e-02 9.3009359e-02 8.6960677e-02 5.8641082e-02 7.8892851e-02 7.3809649e-02 7.0554233e-02 1.1577001e-01 9.5424873e-02 7.6110400e-02 7.8288785e-02 9.6097634e-02 7.3451146e-02 8.6123272e-02 9.6377446e-02 8.4403314e-02 7.7003085e-02 6.8529959e-02 8.5437768e-02 7.3849362e-02 7.8032414e-02 7.4531055e-02 5.2299301e-02 7.7339214e-02 1.3853845e-01 1.2553252e-01 1.2026484e-01 1.2407451e-01 1.3030382e-01 1.3228892e-01 1.2783055e-01 1.2774776e-01 1.3586522e-01 1.1498861e-01 9.8800101e-02 1.1991948e-01 1.1333278e-01 1.3320266e-01 1.3352807e-01 1.1273476e-01 1.1373642e-01 1.1391093e-01 1.5417322e-01 1.2849633e-01 1.1550662e-01 1.2189074e-01 1.3824515e-01 1.0674666e-01 1.1296262e-01 1.1166908e-01 1.0200786e-01 1.0119861e-01 1.3020916e-01 1.0880524e-01 1.2443747e-01 1.0015009e-01 1.3208523e-01 1.0543261e-01 1.3248948e-01 1.1957063e-01 1.2107326e-01 1.1278773e-01 9.9690412e-02 1.0583253e-01 1.2090813e-01 1.0100447e-01 1.2553252e-01 1.2339293e-01 1.2039941e-01 1.0985773e-01 1.1861026e-01 1.0744824e-01 1.1483872e-01 1.1213527e-01 6.6878313e-02 6.9715409e-02 7.7364528e-02 8.8171063e-02 8.1336125e-02 8.7984980e-02 7.6788970e-02 6.2680317e-02 7.4638540e-02 7.9773718e-02 8.2744330e-02 7.2225777e-02 8.0191157e-02 8.6590874e-02 5.5913229e-02 6.3969698e-02 8.8177048e-02 7.0519412e-02 1.0143390e-01 7.3287383e-02 9.3242485e-02 6.4744474e-02 1.0311960e-01 8.6619094e-02 6.7881619e-02 6.7447267e-02 8.2187438e-02 8.8483348e-02 8.3036483e-02 5.4275541e-02 7.4193034e-02 6.9119612e-02 6.6341718e-02 1.1134910e-01 9.2648103e-02 7.3522039e-02 7.4021876e-02 9.0141442e-02 7.0408618e-02 8.1718494e-02 9.2345175e-02 8.0646828e-02 7.2501162e-02 6.4223536e-02 8.1515678e-02 7.0682605e-02 7.4552513e-02 7.0364536e-02 4.8478235e-02 7.3560525e-02 1.3517901e-01 1.2131760e-01 1.1535330e-01 1.1982809e-01 1.2602646e-01 1.2698846e-01 1.2430464e-01 1.2251062e-01 1.3021845e-01 1.1127258e-01 9.4973617e-02 1.1499323e-01 1.0869119e-01 1.2854331e-01 1.2950169e-01 1.0899747e-01 1.0948608e-01 1.1017110e-01 1.4797664e-01 1.2271397e-01 1.1121974e-01 1.1817834e-01 1.3245873e-01 1.0189203e-01 1.0916690e-01 1.0716280e-01 9.7527876e-02 9.7386912e-02 1.2555051e-01 1.0384747e-01 1.1883003e-01 9.6216108e-02 1.2741273e-01 1.0091958e-01 1.2779606e-01 1.1407020e-01 1.1794445e-01 1.0890370e-01 9.6003260e-02 1.0130628e-01 1.1658801e-01 9.6417670e-02 1.2131760e-01 1.1923852e-01 1.1654354e-01 1.0526503e-01 1.1322934e-01 1.0313209e-01 1.1184758e-01 1.0860395e-01 7.4548764e-04 4.1909104e-04 1.5729317e-03 7.9277531e-04 2.5985333e-03 2.1157871e-03 2.6363318e-04 2.7214324e-04 2.5006701e-03 1.1999234e-03 1.3641574e-03 2.4228657e-03 1.9193080e-03 1.5580763e-03 9.1547441e-05 4.2498838e-03 5.1587623e-04 4.4451535e-03 1.9710136e-04 5.9468529e-03 1.1637586e-04 4.0879090e-03 1.9577278e-03 7.5438506e-06 5.1321994e-05 9.5746588e-04 1.8362275e-03 1.6353958e-03 8.5567943e-04 2.3534169e-04 2.0155706e-04 2.8744085e-05 6.6464816e-03 6.0500135e-03 3.7609984e-03 3.0183871e-04 2.6908058e-03 1.8208811e-03 9.2994693e-04 3.0287331e-03 1.4489077e-03 1.1766146e-04 3.4058119e-05 1.3255619e-03 1.5104843e-03 1.2375803e-03 1.2042792e-04 2.2770079e-03 7.0925866e-04 1.7863102e-02 1.0250835e-02 7.3485693e-03 9.3316539e-03 1.1683662e-02 1.0169272e-02 1.2909901e-02 9.0217168e-03 1.1005819e-02 9.1383359e-03 4.6331293e-03 7.2658380e-03 6.2625155e-03 1.2163934e-02 1.5604510e-02 9.0849374e-03 6.4499430e-03 7.6430735e-03 1.6734199e-02 8.8633570e-03 7.8095179e-03 1.1083442e-02 1.1720144e-02 4.3192019e-03 7.6217573e-03 5.4833250e-03 3.8602370e-03 4.7396014e-03 1.0813609e-02 4.4772013e-03 7.7667524e-03 3.8237256e-03 1.1654961e-02 4.0471444e-03 1.1640132e-02 6.9842505e-03 1.3199398e-02 6.9808241e-03 4.8195688e-03 4.8204301e-03 9.7963275e-03 5.3012133e-03 1.0250835e-02 1.0025463e-02 1.1016974e-02 6.8718627e-03 6.8391566e-03 5.4173007e-03 1.1780352e-02 7.9156890e-03 7.3587704e-04 1.9839586e-03 1.2219042e-03 1.5405721e-03 4.1726993e-04 3.7297590e-04 1.1835574e-03 6.1737386e-04 2.7177989e-03 1.1395438e-04 5.3634867e-03 1.2095802e-03 9.5552631e-04 7.0211590e-04 1.8297765e-03 1.3098829e-03 5.8522900e-03 1.0011765e-03 2.8317516e-03 7.2859483e-04 4.4710316e-03 2.0244176e-03 7.6733040e-04 6.7685529e-04 2.1301657e-03 1.3518842e-03 6.1205846e-04 2.4644310e-03 1.1938153e-03 1.5975309e-03 5.1076446e-04 5.2791382e-03 3.0339660e-03 1.1907842e-03 3.5011674e-04 4.7257393e-03 3.5465173e-04 7.3647426e-04 2.3478030e-03 5.8214999e-04 7.6861523e-04 8.1449121e-04 5.9893306e-04 4.9750178e-04 2.3193980e-04 3.6882491e-04 2.8017975e-03 9.9802686e-05 1.3102890e-02 7.5969761e-03 6.0828162e-03 7.3064415e-03 8.8657957e-03 9.3996859e-03 9.1529212e-03 8.6958624e-03 1.0707084e-02 5.8653516e-03 2.3848099e-03 6.0110419e-03 4.6435179e-03 9.5841132e-03 1.1495876e-02 5.7351815e-03 4.7762002e-03 5.2279685e-03 1.6343054e-02 9.1684398e-03 5.3835136e-03 7.5338144e-03 1.1677909e-02 3.4411805e-03 4.9237859e-03 4.5530879e-03 2.5678146e-03 2.5808970e-03 8.5897045e-03 4.5950787e-03 7.8389611e-03 2.4182455e-03 9.2053995e-03 3.4479875e-03 1.0780684e-02 6.4366206e-03 8.7029459e-03 4.8244677e-03 2.4820766e-03 3.2994674e-03 6.9725103e-03 3.6004536e-03 7.5969761e-03 7.2055876e-03 7.4680636e-03 4.8244418e-03 6.1219436e-03 3.5524443e-03 7.3785563e-03 5.0327851e-03 4.5958248e-04 1.4323573e-04 1.2376608e-03 1.4854110e-03 8.9326822e-04 1.4516004e-04 1.6538068e-03 6.5338340e-04 1.1299287e-03 2.4074626e-03 6.9551596e-04 2.6949476e-03 7.2730987e-04 2.7608296e-03 6.5430290e-04 2.7448595e-03 1.4433165e-04 4.0981488e-03 7.0217976e-04 2.0017194e-03 8.4737221e-04 3.5875154e-04 4.2590326e-04 3.7194881e-04 5.5222558e-04 6.3134375e-04 2.4465093e-03 1.5551077e-04 5.7141821e-04 4.4617793e-04 3.7751002e-03 4.2307180e-03 3.3297057e-03 8.4238856e-05 1.8060244e-03 1.6735237e-03 1.3470834e-04 1.4289074e-03 6.1420964e-04 1.0488316e-04 6.6710002e-04 4.5037482e-04 1.3817339e-03 8.2351922e-04 1.8947713e-04 4.2527867e-03 4.2014447e-04 1.3415184e-02 6.6868372e-03 4.2910287e-03 5.8862692e-03 7.8406972e-03 6.4938295e-03 9.0912854e-03 5.6561218e-03 7.2322567e-03 6.1641630e-03 2.7540356e-03 4.2344365e-03 3.5847503e-03 8.2354910e-03 1.1610160e-02 6.2859853e-03 3.6577005e-03 4.7344286e-03 1.2033367e-02 5.6148647e-03 4.9101139e-03 7.6703569e-03 7.8664866e-03 2.1500523e-03 4.7726337e-03 2.9188064e-03 1.9107921e-03 2.6804057e-03 7.0603390e-03 2.2776004e-03 4.6992435e-03 1.8838392e-03 7.7943774e-03 1.9124732e-03 7.9102721e-03 4.1636243e-03 9.7823337e-03 4.1721395e-03 2.8414673e-03 2.6224860e-03 6.5787438e-03 3.5255969e-03 6.6868372e-03 6.5865547e-03 7.7526002e-03 4.4665713e-03 4.0404299e-03 3.0508638e-03 8.7371106e-03 5.0758684e-03 1.4741381e-04 1.6125952e-03 2.5451963e-03 2.5693849e-03 7.6673984e-04 2.4419486e-03 4.9090126e-04 2.2351893e-03 2.2914219e-03 9.4806103e-04 4.8625587e-03 2.0262116e-03 3.3701508e-03 1.9166326e-03 1.0683081e-03 9.0452408e-04 4.3648468e-03 1.8983778e-03 7.0324108e-04 1.1499379e-03 1.4013722e-03 1.4463883e-03 3.0992383e-04 2.7889447e-04 9.9370768e-04 4.4215550e-03 7.4609763e-04 1.5322654e-03 1.6670592e-03 2.5458750e-03 4.8164855e-03 4.9491294e-03 7.9622502e-04 9.3705610e-04 3.2918730e-03 3.3244867e-04 1.4917733e-03 1.2997427e-03 8.4964648e-04 1.9833350e-03 9.7663737e-04 3.0207145e-03 1.9767871e-03 1.2186915e-03 6.6584952e-03 1.4294810e-03 1.1447125e-02 4.9138585e-03 2.4483433e-03 4.3479747e-03 5.8020082e-03 4.0665925e-03 7.5951000e-03 3.6015682e-03 4.5184530e-03 5.0362911e-03 2.3873447e-03 2.3916463e-03 2.1166084e-03 5.8005881e-03 9.3326086e-03 5.1921662e-03 2.5583136e-03 3.9040792e-03 8.1942534e-03 3.2098756e-03 3.4610228e-03 6.2351763e-03 5.0378736e-03 9.3974884e-04 3.8135545e-03 1.8905878e-03 1.0935023e-03 2.2739982e-03 4.8352649e-03 1.2660181e-03 2.5068904e-03 1.6511455e-03 5.4229770e-03 1.1989559e-03 6.1054655e-03 2.0081796e-03 8.6788304e-03 3.3159020e-03 2.5546078e-03 1.5534900e-03 4.7975700e-03 2.5264412e-03 4.9138585e-03 4.8875866e-03 6.2264786e-03 3.0375207e-03 1.9750388e-03 2.0217929e-03 8.0063599e-03 4.3571190e-03 1.6611913e-03 2.0167984e-03 1.5438876e-03 3.6494229e-04 2.0217253e-03 4.6462511e-04 1.5312613e-03 2.1707116e-03 9.6955753e-04 3.3920004e-03 1.0848877e-03 3.2679078e-03 1.3217642e-03 1.7681092e-03 4.3634818e-04 4.3762590e-03 9.8982281e-04 1.4702047e-03 1.2309511e-03 6.6753502e-04 6.7519636e-04 2.4877587e-04 3.8105160e-04 8.2183923e-04 3.0399429e-03 3.2267433e-04 8.9534104e-04 8.4272052e-04 3.5139014e-03 4.8234985e-03 4.1432495e-03 3.0463297e-04 1.2193814e-03 2.4923401e-03 1.7011388e-04 1.7584273e-03 1.0444437e-03 3.3026408e-04 1.0723664e-03 7.7198092e-04 2.2859213e-03 1.4276818e-03 5.7687029e-04 4.8466907e-03 8.6936394e-04 1.2850654e-02 6.1088154e-03 3.5569698e-03 5.5719646e-03 7.1488198e-03 5.6571014e-03 8.7634622e-03 5.0674178e-03 6.2538450e-03 5.7468537e-03 2.5694606e-03 3.4855366e-03 2.9211508e-03 7.2454781e-03 1.0560745e-02 5.7784909e-03 3.4230741e-03 4.7152707e-03 1.0513823e-02 4.7114505e-03 4.2697686e-03 7.1803873e-03 6.8688743e-03 1.5447042e-03 4.5401495e-03 2.7361382e-03 1.4855517e-03 2.5857007e-03 6.2270622e-03 2.0576267e-03 3.8519560e-03 2.0130817e-03 6.8461641e-03 1.8250891e-03 7.7109246e-03 3.0910476e-03 9.4645289e-03 4.1075164e-03 2.7837137e-03 2.0667492e-03 5.7442304e-03 2.7416306e-03 6.1088154e-03 5.9744542e-03 7.0784056e-03 3.5969994e-03 3.0144395e-03 2.5834027e-03 8.5675229e-03 5.0446428e-03 9.3410747e-04 2.6108336e-03 1.8631164e-03 1.0514258e-03 2.7757095e-03 1.6027319e-03 5.9009151e-03 9.3507643e-05 4.8585250e-03 3.2279691e-03 7.0814372e-04 1.8064210e-03 4.7021523e-03 1.7906187e-03 1.7438865e-03 3.2726931e-03 2.1092470e-03 4.4199215e-04 2.5742583e-03 2.7576288e-03 2.1865740e-03 8.5954487e-04 4.3875130e-04 6.2385517e-03 2.0706463e-03 2.9895922e-03 2.4900150e-03 1.5452115e-03 1.2972442e-03 2.2089282e-03 1.3825902e-03 4.4047937e-03 1.3378707e-03 8.5528525e-04 1.4287812e-04 2.3200710e-04 1.9018574e-03 3.1193134e-03 2.7835858e-04 1.0440284e-03 6.7774868e-04 1.6434489e-03 8.2787646e-03 9.1805293e-04 8.2830461e-03 3.6334874e-03 2.7936555e-03 2.6701226e-03 4.5417785e-03 4.4199379e-03 4.7638022e-03 3.6853341e-03 5.4544340e-03 3.4799734e-03 1.6014887e-03 2.8117239e-03 2.4093914e-03 5.5035080e-03 8.3987820e-03 4.0331795e-03 1.3946498e-03 1.6013776e-03 9.9231424e-03 4.6841468e-03 3.0824955e-03 4.3724199e-03 6.0877536e-03 1.8755546e-03 2.0838499e-03 1.1387331e-03 1.4636530e-03 1.0937142e-03 4.3877001e-03 1.4753015e-03 3.8552640e-03 2.5868499e-04 5.1223304e-03 7.0168484e-04 4.3920187e-03 3.9679568e-03 5.8082286e-03 1.3720301e-03 1.2437314e-03 1.9911684e-03 4.5361159e-03 3.9571028e-03 3.6334874e-03 3.7423473e-03 5.0666856e-03 3.9975266e-03 3.7265040e-03 1.8650355e-03 5.0595778e-03 1.9211780e-03 1.4802848e-03 2.3288314e-03 6.8065434e-05 4.0423471e-03 2.2193959e-04 7.6466779e-03 9.2054349e-04 2.1021154e-03 2.1770952e-03 5.4412625e-04 2.3412508e-03 6.7735470e-03 2.1146424e-03 1.1473823e-03 2.2087962e-03 4.4699141e-03 2.0616206e-03 2.1327263e-03 2.0380999e-03 3.2539726e-03 1.3598045e-03 3.9670027e-04 4.8634117e-03 2.4197901e-03 3.2204932e-03 1.7622064e-03 3.8856461e-03 1.2334789e-03 4.0452577e-04 1.0887743e-03 6.2435560e-03 2.0292084e-04 1.0829901e-03 1.7786841e-03 4.1228310e-04 1.8859956e-03 2.3286589e-03 5.1772577e-04 4.9172870e-04 2.0007065e-04 1.2918676e-03 5.0330599e-03 3.9898095e-04 9.4287570e-03 5.3866372e-03 4.8647393e-03 5.2819054e-03 6.4598280e-03 8.0073865e-03 6.1852254e-03 7.5566712e-03 9.5128884e-03 3.7267827e-03 1.2843714e-03 4.8237356e-03 3.5085400e-03 7.3800011e-03 8.6672418e-03 3.7373665e-03 3.3006522e-03 3.2553459e-03 1.4768796e-02 8.5372346e-03 3.7597131e-03 5.0208833e-03 1.0540608e-02 2.9216135e-03 2.9987594e-03 3.5026889e-03 1.9346800e-03 1.3415748e-03 6.5619943e-03 4.2356898e-03 7.2096387e-03 1.4370251e-03 7.0771420e-03 2.7742326e-03 8.9575949e-03 5.8574207e-03 5.7119619e-03 3.0499081e-03 1.2067939e-03 2.4757916e-03 5.0772246e-03 3.1235792e-03 5.3866372e-03 5.0347068e-03 5.1609892e-03 3.8212989e-03 5.4644467e-03 2.4256658e-03 4.6018293e-03 2.9278326e-03 8.9754714e-04 1.9308173e-03 2.4780225e-03 8.6351250e-04 4.2337045e-03 2.1236680e-03 7.2381764e-04 2.1294516e-04 3.5897400e-03 7.7335429e-04 6.4715989e-03 7.0701488e-04 5.1814848e-03 2.9010443e-04 5.5409554e-03 2.4566735e-03 3.3429771e-04 3.2970103e-04 2.0470962e-03 2.3835444e-03 1.6383998e-03 1.0044594e-03 8.9092263e-04 8.3257442e-04 1.5048347e-04 7.3947599e-03 5.1765389e-03 2.4819362e-03 5.2929628e-04 4.5409213e-03 9.8467896e-04 1.3414871e-03 3.3640577e-03 1.3544701e-03 5.6680732e-04 2.1931945e-04 1.3672243e-03 8.5440953e-04 8.3314842e-04 2.6161270e-04 1.7366856e-03 5.2799539e-04 1.7649847e-02 1.0764587e-02 8.4383096e-03 9.9924928e-03 1.2273825e-02 1.1788899e-02 1.2850999e-02 1.0630699e-02 1.2964118e-02 9.0438994e-03 4.5288546e-03 8.3580766e-03 6.9988358e-03 1.3044370e-02 1.5771615e-02 8.9276151e-03 7.0025139e-03 7.7681345e-03 1.9248623e-02 1.0890288e-02 8.2148016e-03 1.1050411e-02 1.3845989e-02 5.2067991e-03 7.6819868e-03 6.2976924e-03 4.3733262e-03 4.7066273e-03 1.1733076e-02 5.6953630e-03 9.5825887e-03 4.0184455e-03 1.2555508e-02 4.8201445e-03 1.2938333e-02 8.5012649e-03 1.2613331e-02 7.2187981e-03 4.6491251e-03 5.3739269e-03 1.0213138e-02 5.7106829e-03 1.0764587e-02 1.0424589e-02 1.1013685e-02 7.3861586e-03 8.2413252e-03 5.8213434e-03 1.1031181e-02 7.7860554e-03 2.6482878e-03 4.2794287e-04 1.8319104e-03 1.5871773e-03 1.2340260e-03 2.9761573e-03 6.4181441e-04 3.9337040e-03 3.6570436e-04 2.9155752e-03 1.3382816e-05 5.6692471e-03 6.4631376e-04 2.4262663e-03 1.0063227e-03 2.3414789e-04 3.8352406e-04 2.6670153e-04 1.1841642e-03 1.3362953e-03 1.8214422e-03 1.1939941e-05 1.6841308e-04 4.0205285e-04 4.8423454e-03 5.6256276e-03 4.4224155e-03 3.0861840e-04 1.5326607e-03 2.2893524e-03 5.4845532e-04 1.9363566e-03 1.1483728e-03 6.7060376e-05 4.8890125e-04 9.7136579e-04 1.7676633e-03 1.3375465e-03 2.5372175e-04 4.0846507e-03 8.2917869e-04 1.6077651e-02 8.4460632e-03 5.5177458e-03 7.2638972e-03 9.7215743e-03 7.5597148e-03 1.1218034e-02 6.4234899e-03 8.1582316e-03 8.1119193e-03 4.1520915e-03 5.4624397e-03 4.9276011e-03 1.0142169e-02 1.4194600e-02 8.3023965e-03 4.8434975e-03 6.1466052e-03 1.3239048e-02 6.2447391e-03 6.5902144e-03 9.7724034e-03 8.6919298e-03 3.1454944e-03 6.3952659e-03 3.7622934e-03 3.0039560e-03 4.0045050e-03 8.7466217e-03 2.6847018e-03 5.4026497e-03 2.7900002e-03 9.6269482e-03 2.5687827e-03 8.7444600e-03 5.2241473e-03 1.2215015e-02 5.4919656e-03 4.2251052e-03 3.8716574e-03 8.5147448e-03 4.9424074e-03 8.4460632e-03 8.4294916e-03 9.9304629e-03 6.0757927e-03 5.1533590e-03 4.4115457e-03 1.1061881e-02 6.6918878e-03 4.2077380e-03 2.8084601e-04 8.0255307e-03 1.0009734e-03 2.4531898e-03 2.5259126e-03 4.7784400e-04 2.9272193e-03 6.4204523e-03 2.4613181e-03 8.1446100e-04 2.5131313e-03 4.2288269e-03 2.3479589e-03 2.4787654e-03 2.3219118e-03 3.4016339e-03 1.1923522e-03 3.6609993e-04 5.5309499e-03 2.7130308e-03 3.6876750e-03 2.1157012e-03 3.5197143e-03 1.1211667e-03 5.0040735e-04 1.2581494e-03 6.2453424e-03 4.9316252e-04 1.0956505e-03 1.9034882e-03 5.6262931e-04 2.1623642e-03 2.7381233e-03 6.2275542e-04 9.1647472e-04 4.3469066e-04 1.6016964e-03 5.5073116e-03 6.0170029e-04 8.2861929e-03 4.5965840e-03 4.2124912e-03 4.7715416e-03 5.5699315e-03 7.3813594e-03 5.3659110e-03 7.1660104e-03 8.8786575e-03 2.9267242e-03 8.0445420e-04 4.1635703e-03 2.8364579e-03 6.3397911e-03 7.3211567e-03 2.8639086e-03 2.8974680e-03 2.8626073e-03 1.3776826e-02 8.0525608e-03 2.9632152e-03 4.1293440e-03 9.9396499e-03 2.4124505e-03 2.4179141e-03 3.2613675e-03 1.4673422e-03 9.3714862e-04 5.6839580e-03 4.1506352e-03 6.7187933e-03 1.3083992e-03 6.0861850e-03 2.6486423e-03 8.7285257e-03 5.1099460e-03 4.7657924e-03 2.6722144e-03 7.9417630e-04 1.8862622e-03 4.0883053e-03 2.3510235e-03 4.5965840e-03 4.1923613e-03 4.1490574e-03 2.9383782e-03 4.7171745e-03 1.8344857e-03 3.7646957e-03 2.4446440e-03 3.4307781e-03 6.9180223e-04 1.9272484e-03 5.2229584e-03 1.7410764e-03 5.5262484e-03 1.3512624e-03 1.2930220e-03 5.9265821e-04 7.2336834e-03 1.6622228e-03 1.4643809e-03 1.4579196e-03 1.0665934e-03 1.2486050e-03 4.3399433e-05 1.3778574e-03 2.2390179e-03 3.1206766e-03 3.9982611e-04 7.0342073e-04 1.4793565e-03 4.5495005e-03 7.4063181e-03 6.9037801e-03 1.1215419e-03 3.4551859e-04 4.3750592e-03 1.0242073e-03 2.4559606e-03 2.2544417e-03 6.9180223e-04 1.5393939e-03 1.8990091e-03 3.7039856e-03 2.8782332e-03 1.2534261e-03 6.1345149e-03 2.1154370e-03 1.6389445e-02 8.1714731e-03 4.6971245e-03 6.9480013e-03 9.2874957e-03 6.0735661e-03 1.1557043e-02 5.0795005e-03 6.2695782e-03 8.5942939e-03 4.8981354e-03 4.6386510e-03 4.5456138e-03 9.3051239e-03 1.4039626e-02 8.8459552e-03 4.7683489e-03 6.5513763e-03 1.0462868e-02 4.3969158e-03 6.5113261e-03 1.0098096e-02 6.6155737e-03 2.7131547e-03 6.8044529e-03 3.4785563e-03 3.0414918e-03 4.6799978e-03 7.9508909e-03 2.0423594e-03 3.8146379e-03 3.3303959e-03 8.7946774e-03 2.4271150e-03 7.7798359e-03 3.9016462e-03 1.3149131e-02 5.7850137e-03 5.0633237e-03 3.7856360e-03 8.3083336e-03 4.9921128e-03 8.1714731e-03 8.2786520e-03 1.0189399e-02 5.8912773e-03 3.9649361e-03 4.4771942e-03 1.2268673e-02 7.3587328e-03 6.5787312e-03 1.3233357e-03 1.1455828e-03 1.2416155e-03 1.4002549e-03 2.1211045e-03 6.2047866e-03 1.6380421e-03 2.0343353e-03 1.2366343e-03 4.6737502e-03 2.4960613e-03 1.3635919e-03 1.1869805e-03 2.7420392e-03 1.3311387e-03 5.4030547e-04 3.4496369e-03 1.8301986e-03 2.4543913e-03 1.0360218e-03 4.9823495e-03 2.4358984e-03 7.7653049e-04 6.6085729e-04 5.4666528e-03 3.6251211e-04 9.0034369e-04 2.5308489e-03 6.8325669e-04 1.3068439e-03 1.4466712e-03 7.0328570e-04 7.2909544e-04 3.3427965e-04 8.2319726e-04 3.3236658e-03 2.7101948e-04 1.1415097e-02 6.6763180e-03 5.5975222e-03 6.7637660e-03 7.8425097e-03 9.1076654e-03 7.9396677e-03 8.6849227e-03 1.0537825e-02 4.7340864e-03 1.6981356e-03 5.5204544e-03 4.0238168e-03 8.5090240e-03 9.7837063e-03 4.5130967e-03 4.3644743e-03 4.6397935e-03 1.5929686e-02 9.2384755e-03 4.4702915e-03 6.2885333e-03 1.1607431e-02 3.1082253e-03 4.1080254e-03 4.4476701e-03 2.1448891e-03 1.9903866e-03 7.7241343e-03 4.8610346e-03 7.8308568e-03 2.2349743e-03 8.1986845e-03 3.4906235e-03 1.0785042e-02 6.0455598e-03 7.1678534e-03 4.3085958e-03 1.8327850e-03 2.7372439e-03 5.8248161e-03 2.8157351e-03 6.6763180e-03 6.1912806e-03 6.1165743e-03 3.9060199e-03 5.6854050e-03 2.9112999e-03 5.9286549e-03 4.2459890e-03 4.7261943e-03 7.5564450e-03 3.0589295e-03 9.9138151e-03 2.5436162e-03 2.3081715e-03 1.8344588e-03 1.2327595e-02 2.9913447e-03 3.4407458e-03 3.5166367e-03 2.2912956e-03 2.5985157e-03 1.0536407e-03 4.0148201e-03 5.3033668e-03 3.3636231e-03 1.5135209e-03 1.3379167e-03 2.9432336e-03 8.2210569e-03 1.2378534e-02 1.1256214e-02 3.0883819e-03 6.3877121e-04 7.6741777e-03 3.3091271e-03 5.2745018e-03 5.1312091e-03 2.0746888e-03 2.6973394e-03 4.6616800e-03 6.5765736e-03 5.8096774e-03 2.9820979e-03 7.3187243e-03 4.6783172e-03 2.3410581e-02 1.3144529e-02 8.3391705e-03 1.1305315e-02 1.4457465e-02 9.3485129e-03 1.7555848e-02 7.8838497e-03 9.1347689e-03 1.4043414e-02 9.2390419e-03 8.2683227e-03 8.4558564e-03 1.4291672e-02 2.0441558e-02 1.4348706e-02 8.7091477e-03 1.1171095e-02 1.3577182e-02 6.6301175e-03 1.1191012e-02 1.5865943e-02 9.2559664e-03 5.8683930e-03 1.1694065e-02 6.7001372e-03 6.5334334e-03 8.9560518e-03 1.2574664e-02 4.2523471e-03 6.2343724e-03 6.8791672e-03 1.3649249e-02 5.2782119e-03 1.1089361e-02 6.8777652e-03 1.9771053e-02 1.0157447e-02 9.4893989e-03 7.5319640e-03 1.3411350e-02 8.8625592e-03 1.3144529e-02 1.3390278e-02 1.5951074e-02 1.0173619e-02 7.0905829e-03 8.5554531e-03 1.8741319e-02 1.2400067e-02 4.2985441e-03 2.4763265e-03 1.0193789e-03 1.4380091e-03 3.6495663e-03 1.2003820e-03 2.0584975e-03 2.4845352e-03 1.6034070e-03 3.3059261e-04 1.8671770e-03 2.0088877e-03 1.4284810e-03 4.3098388e-04 2.3514979e-04 5.2698749e-03 1.3824059e-03 2.2360501e-03 1.8501267e-03 1.6468815e-03 1.8319470e-03 2.4154150e-03 8.5423988e-04 3.3259702e-03 1.3633177e-03 3.9750527e-04 2.1357310e-04 1.4089375e-04 1.2605369e-03 2.3906980e-03 1.0696291e-04 1.0906412e-03 5.9076498e-04 1.1304618e-03 7.2732511e-03 6.4012947e-04 8.9598694e-03 3.8314732e-03 2.5915433e-03 2.9763427e-03 4.7600376e-03 4.3053173e-03 5.3266981e-03 3.6190516e-03 5.2272723e-03 3.6549051e-03 1.5047575e-03 2.5888356e-03 2.1634980e-03 5.5055672e-03 8.5050147e-03 4.0885074e-03 1.5151975e-03 1.9844585e-03 9.6042351e-03 4.2899092e-03 2.9941597e-03 4.6609860e-03 5.8520656e-03 1.4443089e-03 2.3092039e-03 1.1621771e-03 1.1296414e-03 1.1169479e-03 4.4204603e-03 1.2641450e-03 3.4620250e-03 3.4971048e-04 5.1226200e-03 6.2752047e-04 4.7853928e-03 3.3731886e-03 6.3070664e-03 1.6662334e-03 1.2828407e-03 1.6597311e-03 4.4397739e-03 3.3292201e-03 3.8314732e-03 3.8867782e-03 5.1669823e-03 3.5417199e-03 3.1734550e-03 1.6842820e-03 5.5385508e-03 2.3108610e-03 1.0105990e-03 4.7523926e-03 2.9847642e-03 9.7105522e-03 2.6675945e-03 5.8410645e-03 1.0628436e-03 8.9773521e-03 5.4199214e-03 1.6519449e-03 1.3847497e-03 4.5932482e-03 4.2245140e-03 3.0278103e-03 1.7095869e-03 2.8675709e-03 2.8000554e-03 1.2095222e-03 1.0633409e-02 6.3588443e-03 2.3146826e-03 1.8579783e-03 7.7501487e-03 1.5279559e-03 3.0361588e-03 6.2292475e-03 2.9996444e-03 2.2092276e-03 1.2670329e-03 3.0585508e-03 1.9631739e-03 1.9448597e-03 1.6026176e-03 7.3057463e-04 1.6268750e-03 1.9360609e-02 1.3271536e-02 1.1367917e-02 1.3356276e-02 1.4871782e-02 1.6028245e-02 1.4980963e-02 1.5218503e-02 1.7630002e-02 1.0242398e-02 5.4451367e-03 1.1236806e-02 9.0841292e-03 1.5577818e-02 1.6739427e-02 9.6435556e-03 9.8426714e-03 1.0341095e-02 2.4442578e-02 1.5482723e-02 9.8424496e-03 1.2530745e-02 1.8870958e-02 7.3605563e-03 9.5556413e-03 9.6316193e-03 6.0281749e-03 6.1257342e-03 1.4596779e-02 9.4938425e-03 1.3739836e-02 6.3629397e-03 1.5181016e-02 7.9755348e-03 1.8111593e-02 1.1305790e-02 1.3376686e-02 9.8378125e-03 5.8135644e-03 6.9819428e-03 1.1631750e-02 6.1445613e-03 1.3271536e-02 1.2517306e-02 1.1992997e-02 8.3045136e-03 1.0906252e-02 7.4644491e-03 1.1536762e-02 9.7524154e-03 4.5852486e-03 9.8595493e-04 5.1231602e-03 5.3641159e-04 6.1291344e-03 9.0289414e-06 4.9719488e-03 2.7623691e-03 1.0135570e-04 4.9630072e-05 1.4573369e-03 2.2050307e-03 1.9067713e-03 6.5935996e-04 5.5655703e-04 4.9349156e-04 5.3910141e-05 7.6334144e-03 6.4541501e-03 3.6168243e-03 4.5600228e-04 3.3337204e-03 1.8551457e-03 1.2329546e-03 3.8576017e-03 1.8263928e-03 3.4082293e-04 2.8939589e-05 1.6992773e-03 1.7320592e-03 1.4340517e-03 2.7567571e-04 1.5103910e-03 8.5202938e-04 1.8535044e-02 1.1041503e-02 8.1659733e-03 1.0422568e-02 1.2515324e-02 1.1445689e-02 1.3674148e-02 1.0415137e-02 1.2394915e-02 9.4870366e-03 4.7984401e-03 8.0611256e-03 6.7876377e-03 1.2924053e-02 1.5886474e-02 9.2445598e-03 7.3109344e-03 8.4847047e-03 1.8293716e-02 1.0175519e-02 8.2306422e-03 1.1561315e-02 1.3225081e-02 4.8091709e-03 8.1976852e-03 6.4746796e-03 4.2076714e-03 5.1007444e-03 1.1683626e-02 5.5527590e-03 8.9342726e-03 4.4973686e-03 1.2442734e-02 4.9468838e-03 1.3343592e-02 7.6599210e-03 1.3497447e-02 7.8132331e-03 5.1004820e-03 5.1592495e-03 1.0155636e-02 5.1788357e-03 1.1041503e-02 1.0666367e-02 1.1283660e-02 6.9627068e-03 7.4793969e-03 5.8082013e-03 1.1981463e-02 8.5614846e-03 3.8688652e-03 7.5179133e-03 3.7407669e-03 3.1676786e-04 4.6173801e-03 4.2799166e-03 2.2501511e-03 4.2413983e-03 4.2204030e-03 4.6109813e-03 1.7937707e-03 8.2328963e-04 8.3021324e-03 4.1326900e-03 5.3767829e-03 3.8579826e-03 2.3734756e-03 1.6523409e-04 8.8835369e-04 2.5288831e-03 7.6911436e-03 1.1035999e-03 1.9572002e-03 1.3626197e-03 8.4501724e-04 3.6117590e-03 4.7013989e-03 1.0056440e-03 1.3674472e-03 9.9055737e-04 2.9505217e-03 8.8795046e-03 1.5067108e-03 6.0099517e-03 3.2148697e-03 3.5475051e-03 3.0620571e-03 4.0315627e-03 6.0415844e-03 3.4044063e-03 5.7826017e-03 7.5784861e-03 2.1065729e-03 8.8233953e-04 3.5528527e-03 2.5792431e-03 5.1245213e-03 6.2842876e-03 2.4095844e-03 1.8446522e-03 1.4167723e-03 1.2151930e-02 7.2262167e-03 2.4839286e-03 2.9019075e-03 8.5383029e-03 2.6181282e-03 1.4064800e-03 2.3288946e-03 1.7187301e-03 6.4203056e-04 4.4078979e-03 3.6116427e-03 6.0606537e-03 7.3312980e-04 4.8772717e-03 2.0701887e-03 6.3495233e-03 5.1782781e-03 3.3738477e-03 1.3922890e-03 5.6378115e-04 2.0528392e-03 3.5526857e-03 3.4142665e-03 3.2148697e-03 3.0384066e-03 3.3899600e-03 3.3852248e-03 4.7610217e-03 1.6939002e-03 2.6027995e-03 1.1563987e-03 4.9937739e-03 2.8043563e-04 6.0192505e-03 1.1047123e-03 3.7340869e-03 9.3891763e-04 5.7693223e-04 8.5742923e-04 1.1063653e-03 2.1518410e-03 1.7873418e-03 1.8443813e-03 4.7874237e-04 4.0562362e-04 6.3806313e-04 5.6896950e-03 5.3990383e-03 4.0838955e-03 7.6270784e-04 3.0255235e-03 1.8702287e-03 1.2256654e-03 1.9416278e-03 1.1863062e-03 4.8488269e-04 7.2384993e-04 1.1782779e-03 1.1397261e-03 1.1824844e-03 4.5012569e-04 4.3416102e-03 9.3532010e-04 1.7514227e-02 9.8600607e-03 7.1630436e-03 8.1480968e-03 1.1292909e-02 9.1710206e-03 1.2255610e-02 7.6051240e-03 9.9403988e-03 9.4620221e-03 5.2227781e-03 7.1398046e-03 6.5861173e-03 1.2201906e-02 1.6541006e-02 9.8696183e-03 5.7022093e-03 6.6806000e-03 1.5779735e-02 7.9447125e-03 8.2548534e-03 1.1179561e-02 1.0455124e-02 4.7301378e-03 7.3678164e-03 4.4876356e-03 4.3705173e-03 4.8557182e-03 1.0520487e-02 3.4331839e-03 7.0769130e-03 3.1675776e-03 1.1611243e-02 3.1862426e-03 9.1961094e-03 7.4495828e-03 1.3429738e-02 6.1002834e-03 5.0525215e-03 5.4421147e-03 1.0502545e-02 7.0546448e-03 9.8600607e-03 9.9581080e-03 1.1708746e-02 8.2306703e-03 7.3218404e-03 5.8241089e-03 1.2089443e-02 7.3253099e-03 3.2913459e-03 8.4499731e-03 4.8569537e-03 8.0216351e-04 3.5049401e-03 4.1209949e-03 4.1951632e-03 1.4430192e-03 2.1132422e-03 3.9581533e-03 7.7606295e-03 2.8139659e-03 3.8070595e-03 4.7702994e-03 3.8227367e-03 9.3108167e-03 1.0381275e-02 3.5346820e-03 5.2926972e-04 8.0977990e-03 2.5864687e-03 3.9952884e-03 4.6152713e-03 3.2808967e-03 5.0209478e-03 3.9992007e-03 7.6245140e-03 5.9395645e-03 4.2477403e-03 1.1072543e-02 4.9514063e-03 1.3733574e-02 6.2131974e-03 2.8230097e-03 5.6455623e-03 6.8638978e-03 3.4892544e-03 9.9148605e-03 3.2690483e-03 3.3086076e-03 7.4423917e-03 5.1197423e-03 2.7541244e-03 3.1219477e-03 6.1841097e-03 1.0726676e-02 7.6421585e-03 4.1594455e-03 6.2994749e-03 5.6115691e-03 2.0176931e-03 4.9542156e-03 8.4395555e-03 3.5502322e-03 1.8230265e-03 6.1478670e-03 3.1652703e-03 2.7049802e-03 4.9489115e-03 5.3042297e-03 1.9463142e-03 1.6986432e-03 4.1722516e-03 5.8173968e-03 2.6602408e-03 6.4952123e-03 1.5042394e-03 1.1842312e-02 5.5188608e-03 5.4735640e-03 2.9735027e-03 6.0829501e-03 4.0306925e-03 6.2131974e-03 6.3472325e-03 8.2596987e-03 4.2451973e-03 1.6828229e-03 3.7151898e-03 1.1574575e-02 7.1331719e-03 5.4811265e-03 5.5565221e-04 2.6852359e-03 1.0154324e-03 1.7597013e-04 3.1956114e-04 3.9523878e-04 1.2388706e-03 1.2655903e-03 1.6805675e-03 3.2406061e-05 1.6284558e-04 3.0055489e-04 4.9827743e-03 5.3992645e-03 4.0722394e-03 2.5388663e-04 1.8315484e-03 2.0093416e-03 5.5333384e-04 1.9333559e-03 1.0417210e-03 4.5631651e-05 3.9213732e-04 8.9628584e-04 1.5141190e-03 1.1540120e-03 1.6596111e-04 3.8108192e-03 6.9068728e-04 1.6110144e-02 8.5829426e-03 5.7514563e-03 7.4081873e-03 9.8865471e-03 7.9109476e-03 1.1246146e-02 6.7496818e-03 8.5805974e-03 8.1149529e-03 4.1088457e-03 5.6967153e-03 5.0837880e-03 1.0378372e-02 1.4309813e-02 8.2945922e-03 4.9454249e-03 6.1635386e-03 1.3817928e-02 6.6600542e-03 6.6926476e-03 9.8029996e-03 9.1474450e-03 3.3107453e-03 6.4113962e-03 3.9012077e-03 3.0861181e-03 3.9737498e-03 8.9756426e-03 2.8871345e-03 5.7720900e-03 2.7832463e-03 9.8623531e-03 2.6826772e-03 9.0033513e-03 5.5466456e-03 1.2137789e-02 5.5287165e-03 4.1655510e-03 3.9755943e-03 8.6395601e-03 5.0222445e-03 8.5829426e-03 8.5444862e-03 9.9728516e-03 6.1969528e-03 5.4521508e-03 4.4886104e-03 1.0936532e-02 6.6624227e-03 6.0879321e-03 5.2071133e-03 3.8816096e-03 5.8831678e-03 5.7002681e-03 6.1926865e-03 2.4416494e-03 1.5149583e-03 1.0539677e-02 5.8224280e-03 7.4292172e-03 5.4190952e-03 2.7168224e-03 2.8895677e-04 1.2473682e-03 3.7477469e-03 9.3088565e-03 2.0944009e-03 2.9293124e-03 2.5720420e-03 1.8897161e-03 5.1660356e-03 6.4102501e-03 2.0138936e-03 2.7413840e-03 2.0664916e-03 4.4338085e-03 1.0353410e-02 2.6118482e-03 4.2150650e-03 2.5222817e-03 3.4164904e-03 3.0582580e-03 3.1593359e-03 6.2318287e-03 2.3862805e-03 6.4839490e-03 7.9101598e-03 1.0920878e-03 5.4699759e-04 3.4046237e-03 2.2291279e-03 4.0543858e-03 4.2004727e-03 1.1970351e-03 2.0117915e-03 1.4112028e-03 1.1968526e-02 7.9088029e-03 1.7082776e-03 1.7239090e-03 9.0238817e-03 2.6874577e-03 9.4975956e-04 2.9722514e-03 1.7009640e-03 5.1343341e-04 3.7130353e-03 4.7806054e-03 6.6267337e-03 1.3220831e-03 3.9245747e-03 2.9403206e-03 7.2757164e-03 5.0266602e-03 1.8157078e-03 1.4701768e-03 3.5054511e-04 1.7883207e-03 2.3616946e-03 2.6617941e-03 2.5222817e-03 2.1723972e-03 1.9530095e-03 2.4533627e-03 4.5559375e-03 1.3609168e-03 1.2016297e-03 8.2861882e-04 4.8139327e-03 2.8209366e-03 1.1052349e-04 3.7194090e-05 1.3880093e-03 2.0960687e-03 1.8746430e-03 7.3307377e-04 5.4600497e-04 5.2531656e-04 7.7399437e-05 7.5260309e-03 6.5039124e-03 3.6958100e-03 4.3794778e-04 3.1691974e-03 1.9589162e-03 1.1747018e-03 3.8950000e-03 1.8610625e-03 3.3531102e-04 5.6607375e-05 1.7087501e-03 1.8728452e-03 1.4997100e-03 3.0123700e-04 1.5391826e-03 8.8147902e-04 1.8294246e-02 1.0829571e-02 7.9341800e-03 1.0299926e-02 1.2275737e-02 1.1220530e-02 1.3508784e-02 1.0262914e-02 1.2147087e-02 9.2823036e-03 4.6531594e-03 7.8245831e-03 6.5524640e-03 1.2610156e-02 1.5512672e-02 9.0061844e-03 7.1992659e-03 8.4158356e-03 1.7921554e-02 9.9492079e-03 7.9854978e-03 1.1342596e-02 1.2982212e-02 4.5954373e-03 8.0580657e-03 6.3922041e-03 4.0251341e-03 4.9895677e-03 1.1418706e-02 5.4787432e-03 8.7107499e-03 4.4679304e-03 1.2142235e-02 4.8867636e-03 1.3297934e-02 7.3458933e-03 1.3299484e-02 7.7358341e-03 4.9917221e-03 4.9413674e-03 9.8545070e-03 4.8770063e-03 1.0829571e-02 1.0435652e-02 1.1007902e-02 6.6474718e-03 7.1727397e-03 5.6106947e-03 1.1815640e-02 8.4729744e-03 1.5166983e-03 3.8470717e-03 4.0414722e-03 1.3464004e-03 1.0521554e-03 2.2256122e-03 8.2355212e-03 2.4972654e-03 3.6758320e-03 4.2952040e-03 1.2466978e-03 5.4237685e-03 7.4233145e-03 2.7434894e-03 1.3920410e-03 5.6705580e-03 1.6006987e-03 1.4795623e-03 2.5504964e-03 2.8217649e-03 4.8039535e-03 2.1716992e-03 5.1387834e-03 3.8511647e-03 3.4066344e-03 1.1638730e-02 3.4223746e-03 9.4998939e-03 3.3659956e-03 1.1438717e-03 2.4828929e-03 3.9653407e-03 1.5796312e-03 5.9993003e-03 1.2126234e-03 1.7377370e-03 4.6402796e-03 3.0884931e-03 1.1351744e-03 1.5562166e-03 3.9244608e-03 8.1556610e-03 5.1855221e-03 1.5226783e-03 2.8914073e-03 4.3431210e-03 9.2550137e-04 2.9272754e-03 5.3162687e-03 2.0104865e-03 7.9177243e-04 3.1975522e-03 8.1317449e-04 1.3785091e-03 2.5714969e-03 2.9511423e-03 2.8161529e-04 5.9442405e-04 1.5991249e-03 3.5615321e-03 5.8433150e-04 3.1011353e-03 9.7656296e-04 8.0983113e-03 2.3700003e-03 3.0461348e-03 1.6450583e-03 4.0872867e-03 3.5603497e-03 3.3659956e-03 3.6518311e-03 5.6844228e-03 3.2852236e-03 1.0303385e-03 1.9470044e-03 7.8778855e-03 3.7000111e-03 1.9300708e-03 2.2574717e-03 1.1284358e-03 1.0646979e-03 1.0312076e-03 5.0621817e-03 1.2060164e-03 1.8000639e-03 2.0650564e-03 2.1487796e-03 3.2333878e-03 4.0195479e-03 1.2411974e-03 2.7183002e-03 2.2726383e-03 8.5094120e-04 2.4056963e-04 6.3782113e-04 1.2937611e-03 2.4889893e-03 5.9722053e-04 1.5621785e-03 1.2375553e-03 1.3358412e-03 8.1240216e-03 1.2311022e-03 1.1517636e-02 5.2909227e-03 3.4509195e-03 3.7153220e-03 6.3154320e-03 4.5407961e-03 7.2027738e-03 3.4054800e-03 5.2043334e-03 5.7490488e-03 3.1731637e-03 3.4719309e-03 3.4347027e-03 7.1701181e-03 1.1379612e-02 6.4467389e-03 2.2692649e-03 2.9960956e-03 9.7363741e-03 4.0178908e-03 4.7338729e-03 6.7982603e-03 5.6061060e-03 2.3699712e-03 3.8192855e-03 1.4133848e-03 2.2956056e-03 2.5088406e-03 5.7314562e-03 9.5862581e-04 3.4107092e-03 9.7091810e-04 6.6778472e-03 7.6184177e-04 4.3247639e-03 4.2032521e-03 8.9746527e-03 2.5881501e-03 2.8171688e-03 3.0086239e-03 6.5256052e-03 5.3225794e-03 5.2909227e-03 5.5860698e-03 7.6013097e-03 5.5376614e-03 4.0938335e-03 3.1000848e-03 8.1789436e-03 3.7376243e-03 3.2267663e-05 8.3707549e-04 1.6905622e-03 1.5718936e-03 9.3681209e-04 1.8762095e-04 1.9684682e-04 4.1277465e-05 6.4440482e-03 6.0518000e-03 3.8422414e-03 2.6274935e-04 2.4665214e-03 1.9088192e-03 8.3864343e-04 2.9786928e-03 1.4388393e-03 8.3975474e-05 5.3416679e-05 1.2919166e-03 1.6159093e-03 1.2740163e-03 1.2266042e-04 2.3692761e-03 7.1636545e-04 1.7563748e-02 9.9676420e-03 7.0439691e-03 9.1049407e-03 1.1370346e-02 9.8276989e-03 1.2673450e-02 8.7383729e-03 1.0632094e-02 8.9036592e-03 4.4721835e-03 6.9588127e-03 5.9822651e-03 1.1786793e-02 1.5215597e-02 8.8361414e-03 6.2553172e-03 7.4909328e-03 1.6226352e-02 8.5173085e-03 7.5296182e-03 1.0823312e-02 1.1340207e-02 4.0626163e-03 7.4274805e-03 5.3041231e-03 3.6467217e-03 4.5925766e-03 1.0472609e-02 4.2980846e-03 7.4369871e-03 3.7248522e-03 1.1287932e-02 3.8978058e-03 1.1428778e-02 6.6149115e-03 1.2975959e-02 6.8214468e-03 4.6824073e-03 4.5741646e-03 9.4691004e-03 5.0187206e-03 9.9676420e-03 9.7385546e-03 1.0722589e-02 6.5565379e-03 6.4801586e-03 5.1854796e-03 1.1596572e-02 7.7633670e-03 9.9158601e-04 1.6686143e-03 1.5457871e-03 9.5071878e-04 3.0701864e-04 3.6362811e-04 4.2548991e-05 6.6288818e-03 6.0417331e-03 3.6564704e-03 2.5834714e-04 2.6458983e-03 1.8764428e-03 8.4820574e-04 3.2578450e-03 1.5209238e-03 1.5090465e-04 6.1935165e-05 1.3587649e-03 1.7158299e-03 1.3006818e-03 1.6628415e-04 2.0535582e-03 7.1188329e-04 1.7324639e-02 9.9147481e-03 7.0600417e-03 9.2840701e-03 1.1302120e-02 1.0064078e-02 1.2589616e-02 9.1080261e-03 1.0920912e-02 8.6425556e-03 4.2412792e-03 6.9624194e-03 5.8567924e-03 1.1639501e-02 1.4739829e-02 8.4651679e-03 6.3684651e-03 7.5923014e-03 1.6475338e-02 8.8252851e-03 7.3097093e-03 1.0588424e-02 1.1694482e-02 3.9807447e-03 7.3452100e-03 5.5353092e-03 3.5104587e-03 4.4753907e-03 1.0430724e-02 4.6273816e-03 7.6755144e-03 3.8450588e-03 1.1170257e-02 4.1281664e-03 1.1986478e-02 6.5423939e-03 1.2634016e-02 6.9257811e-03 4.5247923e-03 4.3922796e-03 9.1553155e-03 4.5676871e-03 9.9147481e-03 9.5995789e-03 1.0353234e-02 6.1627903e-03 6.3902007e-03 5.0235520e-03 1.1247115e-02 7.7428181e-03 9.7824889e-04 1.6624661e-03 3.0059873e-03 2.5249972e-04 6.2343657e-04 1.1719366e-03 4.0275459e-03 6.3575546e-03 5.8712921e-03 7.5872446e-04 5.6741436e-04 3.5908637e-03 6.4996051e-04 1.9776689e-03 1.6840088e-03 4.6219818e-04 1.2917587e-03 1.3743184e-03 3.0187893e-03 2.2373182e-03 9.0828703e-04 5.7426890e-03 1.5775049e-03 1.5168309e-02 7.4080646e-03 4.2538993e-03 6.3033105e-03 8.5100385e-03 5.7899432e-03 1.0520014e-02 4.8624768e-03 6.1169570e-03 7.6384090e-03 4.1091153e-03 4.1981402e-03 3.9923041e-03 8.6172736e-03 1.2997087e-02 7.8717119e-03 4.1667776e-03 5.7584992e-03 1.0387455e-02 4.3546484e-03 5.7746712e-03 9.1054055e-03 6.5301937e-03 2.3054088e-03 5.9614728e-03 3.0379183e-03 2.5067124e-03 3.9126904e-03 7.3239330e-03 1.8370981e-03 3.7021481e-03 2.7152943e-03 8.1304066e-03 2.0347479e-03 7.4172182e-03 3.6502880e-03 1.1915038e-02 5.0530642e-03 4.2458688e-03 3.2213237e-03 7.5116191e-03 4.3860606e-03 7.4080646e-03 7.4726409e-03 9.2071058e-03 5.2421868e-03 3.6675370e-03 3.8308993e-03 1.1024145e-02 6.4520833e-03 3.0133930e-04 5.1037905e-03 1.2017859e-03 2.1951761e-03 1.7686888e-03 1.8224411e-03 2.8788865e-03 3.1740291e-03 7.0420689e-04 2.2350252e-03 2.1605967e-03 1.6074282e-04 9.9312132e-04 6.3910600e-04 1.1068342e-03 2.2522084e-03 4.2563220e-04 2.1383582e-03 1.1781524e-03 1.1965920e-03 6.6826197e-03 8.9701315e-04 8.8374021e-03 3.5249181e-03 1.9006375e-03 3.2674819e-03 4.3522253e-03 3.8602822e-03 5.5032387e-03 3.5903574e-03 4.6280025e-03 3.2207897e-03 1.0860964e-03 1.8528751e-03 1.3287556e-03 4.5637650e-03 7.2040689e-03 3.3452381e-03 1.6438459e-03 2.5042800e-03 8.4078534e-03 3.6412189e-03 2.1968551e-03 4.2884610e-03 5.3072049e-03 5.5962180e-04 2.2924485e-03 1.3972917e-03 4.1775124e-04 1.0120628e-03 3.7672964e-03 1.3901921e-03 2.7802443e-03 7.8074497e-04 4.2513803e-03 8.6439557e-04 5.7017672e-03 1.9558095e-03 6.1305769e-03 2.0764524e-03 1.1750038e-03 7.8121442e-04 3.3536027e-03 1.6840705e-03 3.5249181e-03 3.4031938e-03 4.3169071e-03 2.0323666e-03 1.8104373e-03 1.0313169e-03 5.4663596e-03 2.6797009e-03 4.7017751e-03 1.4050521e-03 2.2967640e-03 1.4490557e-03 2.3196093e-03 1.7015479e-03 1.5767700e-03 5.5713099e-04 3.7392678e-03 8.9324098e-04 2.5002761e-04 8.6482380e-04 1.3585049e-04 1.1239692e-03 1.9761479e-03 8.2927740e-05 9.6014531e-04 3.4965263e-04 8.9552257e-04 5.7645956e-03 3.1975366e-04 8.7743211e-03 4.0156462e-03 2.9085611e-03 3.7265804e-03 4.9611690e-03 5.3200137e-03 5.4291747e-03 4.9090444e-03 6.4181467e-03 3.1699225e-03 9.0457257e-04 2.8707306e-03 2.0343345e-03 5.5511033e-03 7.6886258e-03 3.2962764e-03 1.9800100e-03 2.4033904e-03 1.0905558e-02 5.4434884e-03 2.6411651e-03 4.3219947e-03 7.2333465e-03 1.3483652e-03 2.2580657e-03 1.9023537e-03 8.2889320e-04 8.3511491e-04 4.6996589e-03 2.2192064e-03 4.3941392e-03 6.5409726e-04 5.2313685e-03 1.2879740e-03 6.5768476e-03 3.4674432e-03 5.6567012e-03 2.0849532e-03 8.7522015e-04 1.2925162e-03 3.9107304e-03 2.2116014e-03 4.0156462e-03 3.8265815e-03 4.4724719e-03 2.6832503e-03 3.2108081e-03 1.3825243e-03 4.7909587e-03 2.3944351e-03 1.6956470e-03 1.0108236e-03 9.3414345e-04 1.2241588e-02 1.0680036e-02 6.4457112e-03 2.1113831e-03 4.8851719e-03 3.9417362e-03 3.5304281e-03 6.9169294e-03 4.3255103e-03 1.5489174e-03 5.9380000e-04 4.2050996e-03 3.5612837e-03 3.5981180e-03 1.5473563e-03 1.0469113e-03 2.7881158e-03 2.6084642e-02 1.6915515e-02 1.3046936e-02 1.5813114e-02 1.8728660e-02 1.6622622e-02 2.0172358e-02 1.5047933e-02 1.7480656e-02 1.5121187e-02 8.9988271e-03 1.2921567e-02 1.1505555e-02 1.9202447e-02 2.2948431e-02 1.4807179e-02 1.1989561e-02 1.3522703e-02 2.4353472e-02 1.4534512e-02 1.3492376e-02 1.7699736e-02 1.8260951e-02 8.7551977e-03 1.3397972e-02 1.0605977e-02 8.1065177e-03 9.3705995e-03 1.7600384e-02 8.9170386e-03 1.3238432e-02 8.2394556e-03 1.8591466e-02 8.5466794e-03 1.8371580e-02 1.2139358e-02 2.0015536e-02 1.2667833e-02 9.3753580e-03 9.4099897e-03 1.5925767e-02 9.2577085e-03 1.6915515e-02 1.6530549e-02 1.7369847e-02 1.1736690e-02 1.2003814e-02 1.0321540e-02 1.8104120e-02 1.3766972e-02 1.5141485e-04 3.6186411e-04 5.0640473e-03 5.8846843e-03 4.5372473e-03 2.9889995e-04 1.4503455e-03 2.4048820e-03 5.6473356e-04 2.1694139e-03 1.2760726e-03 5.0001009e-05 4.2762331e-04 1.0752642e-03 1.9285330e-03 1.4459840e-03 2.6007132e-04 3.8461838e-03 8.8079628e-04 1.6321310e-02 8.6239000e-03 5.6156570e-03 7.5378743e-03 9.9017513e-03 7.7506746e-03 1.1476827e-02 6.6644304e-03 8.3385285e-03 8.2160708e-03 4.1911289e-03 5.5513041e-03 4.9688023e-03 1.0246372e-02 1.4224144e-02 8.3426053e-03 5.0473242e-03 6.4114140e-03 1.3391748e-02 6.3862951e-03 6.6425216e-03 9.9154668e-03 8.8906649e-03 3.1510729e-03 6.5653256e-03 3.9767727e-03 3.0148559e-03 4.1087786e-03 8.8891438e-03 2.8692574e-03 5.5219515e-03 2.9822205e-03 9.7392578e-03 2.7559926e-03 9.1569917e-03 5.1978118e-03 1.2376315e-02 5.7353587e-03 4.3202502e-03 3.8711781e-03 8.5388416e-03 4.7844327e-03 8.6239000e-03 8.5671038e-03 9.9856257e-03 5.9819812e-03 5.1316831e-03 4.4550423e-03 1.1213210e-02 6.9205612e-03 3.8131057e-04 6.7888158e-03 7.3363003e-03 5.3694436e-03 6.8516623e-04 1.8924051e-03 2.8895525e-03 1.2584203e-03 3.1106561e-03 1.9851820e-03 2.2993360e-04 3.0705767e-04 1.8065799e-03 2.2681792e-03 1.9895114e-03 4.4561501e-04 3.2950230e-03 1.3536253e-03 1.9442033e-02 1.0986408e-02 7.5586116e-03 9.6139706e-03 1.2432885e-02 9.8077912e-03 1.4077548e-02 8.4254038e-03 1.0370813e-02 1.0478515e-02 5.7856689e-03 7.4894276e-03 6.8497491e-03 1.2864462e-02 1.7242156e-02 1.0603978e-02 6.8089917e-03 8.2980029e-03 1.5941696e-02 8.0862824e-03 8.7712331e-03 1.2406133e-02 1.0894930e-02 4.6791769e-03 8.5829625e-03 5.4740472e-03 4.5017704e-03 5.7029968e-03 1.1309070e-02 4.0232333e-03 7.1989285e-03 4.2761837e-03 1.2289743e-02 4.0060693e-03 1.1002553e-02 7.0316526e-03 1.5021687e-02 7.5484658e-03 5.9167328e-03 5.5390768e-03 1.0934157e-02 6.4525090e-03 1.0986408e-02 1.0943118e-02 1.2502015e-02 7.9589531e-03 6.9802991e-03 6.2208864e-03 1.3663387e-02 8.8999016e-03 6.6062542e-03 5.5827314e-03 3.2137911e-03 2.4648342e-04 3.0882054e-03 1.4855278e-03 8.8238305e-04 3.0312113e-03 1.3048774e-03 1.7074342e-04 4.2409993e-05 1.2055084e-03 1.2882692e-03 1.0204762e-03 8.9780466e-05 2.0467514e-03 5.4370125e-04 1.7241595e-02 9.9776023e-03 7.2970604e-03 9.2205394e-03 1.1400153e-02 1.0313298e-02 1.2454804e-02 9.2538420e-03 1.1254653e-02 8.6534668e-03 4.2479840e-03 7.2108683e-03 6.0807502e-03 1.1908599e-02 1.4998922e-02 8.5435101e-03 6.3304436e-03 7.3992448e-03 1.7031719e-02 9.1810782e-03 7.4786833e-03 1.0592315e-02 1.2037933e-02 4.2382770e-03 7.2750145e-03 5.5074473e-03 3.6741461e-03 4.4146887e-03 1.0634443e-02 4.6823366e-03 8.0104480e-03 3.6842552e-03 1.1424845e-02 4.0945647e-03 1.1857190e-02 7.0169427e-03 1.2513676e-02 6.7752703e-03 4.4459599e-03 4.6022279e-03 9.3965771e-03 4.9465269e-03 9.9776023e-03 9.6902181e-03 1.0478640e-02 6.5221232e-03 6.8355801e-03 5.1573625e-03 1.1079009e-02 7.5518079e-03 2.6192775e-03 5.9216859e-03 4.3727502e-03 5.1296754e-03 5.3199310e-03 2.7670517e-03 1.1371035e-03 2.6020928e-03 5.1188394e-03 7.5382072e-03 2.4476107e-03 5.0616317e-03 3.9293810e-03 5.2302586e-03 1.5095801e-02 4.1435464e-03 4.4658108e-03 8.7058429e-04 4.4660793e-04 2.5555941e-04 1.2381894e-03 8.7244097e-04 2.0362511e-03 8.1311609e-04 1.5164656e-03 1.9481959e-03 1.9017569e-03 4.9004150e-04 8.1438359e-04 1.7141435e-03 4.7093504e-03 2.7065864e-03 6.0393489e-05 4.2915211e-04 3.9373776e-03 1.5669301e-03 1.2950596e-03 2.1025536e-03 1.9522193e-03 1.0322026e-03 8.4218839e-04 9.2905490e-05 1.2346326e-03 1.1781032e-03 9.8002670e-04 7.9339756e-04 1.1328834e-03 6.2948320e-04 1.4629322e-03 3.9084846e-04 1.4164544e-03 1.5298834e-03 3.8721999e-03 2.7001180e-04 1.5287601e-03 1.2669024e-03 2.1037534e-03 3.7134859e-03 8.7058429e-04 1.1775044e-03 2.8495857e-03 2.7086756e-03 1.4023687e-03 1.0191451e-03 3.8647977e-03 9.3179883e-04 1.2988786e-03 3.9786961e-03 9.7659791e-03 1.8948352e-03 3.2059058e-03 1.9902291e-03 1.7004118e-03 5.2879179e-03 6.5830280e-03 1.9368261e-03 2.2268965e-03 1.8962421e-03 4.4774419e-03 1.1162650e-02 2.6447647e-03 4.9365528e-03 3.0323131e-03 3.9972109e-03 2.9343480e-03 3.7523678e-03 6.3980393e-03 2.7120874e-03 6.2404178e-03 8.0807935e-03 1.9416596e-03 1.3024398e-03 4.0245181e-03 3.0697725e-03 5.0283287e-03 5.8270256e-03 2.3505477e-03 2.0375391e-03 1.2652208e-03 1.2527532e-02 8.0556250e-03 2.6750700e-03 2.5225228e-03 9.0764566e-03 3.4699713e-03 1.3308823e-03 2.7664696e-03 2.4678015e-03 9.5729618e-04 4.3858320e-03 4.4729680e-03 6.8828714e-03 1.1547480e-03 4.8183810e-03 2.7179325e-03 6.3087897e-03 6.0843336e-03 2.6715726e-03 1.3701564e-03 8.5085504e-04 2.7129190e-03 3.6187454e-03 4.3357364e-03 3.0323131e-03 2.8888833e-03 3.1752051e-03 4.0026050e-03 5.6110347e-03 2.1520192e-03 2.0021167e-03 9.3001678e-04 2.6139835e-03 9.7625461e-03 4.3820793e-04 2.7906691e-03 3.4740135e-03 1.5471352e-03 3.7324822e-03 3.8774805e-03 1.8049144e-03 1.0166909e-03 9.6586952e-04 2.7620874e-03 5.6379477e-03 1.4267399e-03 9.9420936e-03 6.9534902e-03 7.2046628e-03 7.1611325e-03 8.0864542e-03 1.1050679e-02 7.0236664e-03 1.0684883e-02 1.2982713e-02 4.4982901e-03 2.1523458e-03 7.1621069e-03 5.3786825e-03 9.3055499e-03 9.5487861e-03 4.3879387e-03 5.0693092e-03 4.4931267e-03 1.8805226e-02 1.2143973e-02 5.1616364e-03 5.8432704e-03 1.4239876e-02 5.0396297e-03 4.0643450e-03 5.6688069e-03 3.5911389e-03 2.3422208e-03 8.6067938e-03 6.9700590e-03 1.0540875e-02 2.8498735e-03 9.0488233e-03 4.9258801e-03 1.1781603e-02 8.6822745e-03 5.8089260e-03 4.4337196e-03 2.0267056e-03 4.1354243e-03 6.4148025e-03 4.4976834e-03 6.9534902e-03 6.4275292e-03 5.9584428e-03 5.3006657e-03 8.1437729e-03 3.9023880e-03 4.5299634e-03 3.7988456e-03 2.5123066e-03 1.1946309e-03 2.0097566e-04 1.7928786e-03 5.7143349e-04 1.2667176e-04 4.6644872e-04 4.4712161e-04 1.0683378e-03 5.7468992e-04 7.6554900e-05 3.3664265e-03 2.0582490e-04 1.3553574e-02 7.1051776e-03 4.8941009e-03 6.5257957e-03 8.3098959e-03 7.5373275e-03 9.3233389e-03 6.7621473e-03 8.4467156e-03 6.1291129e-03 2.5944515e-03 4.8248569e-03 3.9028342e-03 8.7515124e-03 1.1627531e-02 6.1178515e-03 4.1203288e-03 5.0572182e-03 1.3529172e-02 6.7984240e-03 5.0687125e-03 7.7357162e-03 9.2015610e-03 2.4873418e-03 4.9175701e-03 3.5355888e-03 2.0473249e-03 2.6605314e-03 7.6511613e-03 3.0628424e-03 5.7307078e-03 2.1169019e-03 8.3320524e-03 2.4574218e-03 9.1102030e-03 4.8098810e-03 9.5736130e-03 4.5225384e-03 2.7314130e-03 2.7690856e-03 6.7080946e-03 3.3506310e-03 7.1051776e-03 6.8798895e-03 7.7001955e-03 4.4569824e-03 4.6305164e-03 3.1848752e-03 8.4170139e-03 5.2045050e-03 6.9296246e-03 2.1360002e-03 3.7784686e-03 3.9888330e-03 1.9332806e-03 3.1165972e-03 3.4622240e-03 6.1843155e-03 4.9689115e-03 2.8276933e-03 8.4143362e-03 3.9488633e-03 1.7390613e-02 8.6823566e-03 4.7125346e-03 7.5297389e-03 9.6551734e-03 5.6073015e-03 1.2632980e-02 4.7967985e-03 5.4595549e-03 9.6779905e-03 6.1919843e-03 4.6437798e-03 4.8811137e-03 9.2634464e-03 1.4470481e-02 9.9239882e-03 5.5116951e-03 7.7393895e-03 8.8498403e-03 3.6104684e-03 7.0913900e-03 1.1068993e-02 5.6668826e-03 3.0002631e-03 7.9032607e-03 4.0760726e-03 3.7279027e-03 5.9729084e-03 7.9919733e-03 2.3383845e-03 3.2272638e-03 4.6096764e-03 8.7690968e-03 3.1364844e-03 7.9578373e-03 3.3962632e-03 1.4631125e-02 6.8663759e-03 6.4834920e-03 4.3461235e-03 8.7377082e-03 5.5255283e-03 8.6823566e-03 8.8561968e-03 1.1024987e-02 6.2645608e-03 3.5745084e-03 5.1960404e-03 1.3983177e-02 8.7265781e-03 1.5360474e-03 2.2738132e-03 6.3957013e-04 1.8470485e-03 1.9436948e-03 8.2753134e-04 1.4870044e-04 1.8575051e-04 1.1511457e-03 4.1570185e-03 4.2481246e-04 1.2022631e-02 7.4558885e-03 6.7319225e-03 7.0097246e-03 8.7236579e-03 1.0059866e-02 8.2686681e-03 9.2543911e-03 1.1644804e-02 5.5866311e-03 2.5062451e-03 6.6946690e-03 5.2622870e-03 9.8836223e-03 1.1452219e-02 5.6355000e-03 4.7339621e-03 4.6436729e-03 1.7620106e-02 1.0378141e-02 5.6675320e-03 7.1003305e-03 1.2674455e-02 4.3836233e-03 4.5891805e-03 4.7259960e-03 3.2437023e-03 2.5249532e-03 8.8366518e-03 5.2434703e-03 8.9867759e-03 2.2946416e-03 9.5089828e-03 3.7537168e-03 1.0503141e-02 7.8427036e-03 7.7804598e-03 4.4127994e-03 2.3620086e-03 3.9941388e-03 7.2903223e-03 4.7424240e-03 7.4558885e-03 7.1215300e-03 7.3539364e-03 5.7328206e-03 7.4284923e-03 3.9643246e-03 6.4306545e-03 4.3729078e-03 1.0852360e-03 3.9664584e-04 4.5123208e-04 1.2414442e-03 2.3150826e-04 1.4183402e-03 7.0426023e-04 4.8163546e-04 5.0741189e-03 3.9595383e-04 1.0937070e-02 5.0339955e-03 3.1168119e-03 4.5426488e-03 6.0432452e-03 5.3182662e-03 7.1300333e-03 4.7641753e-03 6.1358685e-03 4.4885462e-03 1.6916118e-03 3.0636842e-03 2.3987749e-03 6.3937387e-03 9.2584560e-03 4.5899059e-03 2.5786937e-03 3.4750392e-03 1.0541835e-02 4.8350707e-03 3.4370915e-03 5.7977832e-03 6.8277961e-03 1.3144796e-03 3.3638906e-03 2.1338739e-03 1.0504939e-03 1.6413828e-03 5.4173620e-03 1.8780432e-03 3.9014661e-03 1.1794316e-03 6.0207953e-03 1.3473818e-03 6.9135169e-03 3.1440431e-03 7.6724602e-03 2.9974710e-03 1.7751706e-03 1.5977690e-03 4.8531144e-03 2.4494451e-03 5.0339955e-03 4.8947958e-03 5.8464391e-03 3.1299587e-03 2.9881785e-03 1.9161069e-03 6.7804975e-03 3.6799724e-03 6.0827379e-04 2.1616612e-03 3.6536587e-03 6.0346559e-04 1.7560211e-03 1.3150048e-03 2.0808595e-03 9.6848049e-03 1.5050304e-03 8.5509882e-03 3.4934050e-03 2.3873070e-03 2.2266374e-03 4.3437966e-03 3.4432000e-03 4.8822411e-03 2.6118729e-03 4.2515298e-03 3.9280244e-03 2.1763551e-03 2.4215318e-03 2.3726469e-03 5.2469603e-03 8.8156975e-03 4.6543689e-03 1.1653780e-03 1.5634616e-03 8.3770205e-03 3.5034084e-03 3.2683461e-03 4.7014835e-03 4.7252691e-03 1.7941232e-03 2.2843067e-03 6.9328893e-04 1.6436639e-03 1.4866860e-03 4.0234517e-03 7.8970448e-04 2.8726545e-03 3.0564882e-04 4.8340126e-03 3.3681970e-04 3.2031086e-03 3.4962764e-03 6.5051951e-03 1.2935806e-03 1.7549327e-03 2.1673002e-03 4.7736712e-03 4.5514956e-03 3.4934050e-03 3.7661841e-03 5.5497035e-03 4.3927525e-03 3.3294822e-03 2.0638533e-03 5.8922322e-03 2.1421877e-03 1.0254014e-03 1.7967204e-03 2.4056491e-05 4.9606070e-04 1.5493043e-04 7.3786851e-04 5.7634641e-03 2.2821239e-04 9.8983432e-03 4.8234579e-03 3.6558248e-03 4.1003359e-03 5.8726550e-03 5.9033047e-03 6.1776943e-03 5.1708698e-03 7.0154090e-03 4.1004756e-03 1.5716690e-03 3.6383691e-03 2.8864008e-03 6.7180252e-03 9.3042807e-03 4.3918407e-03 2.3314998e-03 2.6748196e-03 1.1913776e-02 5.9266374e-03 3.6227612e-03 5.2997514e-03 7.7658365e-03 2.0655115e-03 2.8598574e-03 2.0567764e-03 1.5031818e-03 1.3330059e-03 5.6389436e-03 2.2318728e-03 4.9186471e-03 7.3077456e-04 6.3335619e-03 1.3529510e-03 6.4463256e-03 4.4695359e-03 6.6841393e-03 2.3650796e-03 1.3974597e-03 2.1137539e-03 5.1383865e-03 3.4413190e-03 4.8234579e-03 4.7468419e-03 5.6953492e-03 3.9468482e-03 4.2095924e-03 2.1520165e-03 5.7168561e-03 2.8181362e-03 2.6785665e-04 8.6267880e-04 1.5080867e-03 1.0657562e-03 9.1259932e-05 3.2834346e-03 5.6400090e-04 1.5768182e-02 8.4481987e-03 5.6912110e-03 7.5460195e-03 9.7379656e-03 8.1209399e-03 1.1078194e-02 7.1125751e-03 8.8544675e-03 7.7247994e-03 3.7380313e-03 5.6213520e-03 4.8502413e-03 1.0137958e-02 1.3681745e-02 7.7719782e-03 4.9953780e-03 6.2009166e-03 1.4059422e-02 6.9488117e-03 6.3504999e-03 9.4504672e-03 9.5001122e-03 3.1213299e-03 6.2307927e-03 4.0875652e-03 2.8287725e-03 3.7400310e-03 8.8571631e-03 3.1845212e-03 5.9757813e-03 2.8285304e-03 9.6559261e-03 2.8616391e-03 9.5937311e-03 5.3787128e-03 1.1658427e-02 5.5682032e-03 3.8827352e-03 3.6680728e-03 8.1913042e-03 4.3904219e-03 8.4481987e-03 8.3060404e-03 9.4592722e-03 5.6422254e-03 5.2631423e-03 4.2070121e-03 1.0442544e-02 6.5544595e-03 1.6777879e-03 1.6905742e-03 1.4559893e-03 2.3696578e-04 1.7661444e-03 8.8626613e-04 1.8929605e-02 1.1198434e-02 8.2126916e-03 1.0357192e-02 1.2691977e-02 1.1286676e-02 1.3897779e-02 1.0124890e-02 1.2174180e-02 9.8437553e-03 5.0898821e-03 8.1181082e-03 6.9678076e-03 1.3160374e-02 1.6460126e-02 9.6980805e-03 7.2881507e-03 8.4988687e-03 1.8112744e-02 9.9154333e-03 8.5214392e-03 1.1906445e-02 1.2939064e-02 4.9240987e-03 8.3797448e-03 6.3232280e-03 4.3836801e-03 5.2916266e-03 1.1816149e-02 5.2785357e-03 8.7418147e-03 4.4568647e-03 1.2648763e-02 4.7832566e-03 1.2911612e-02 7.7565330e-03 1.3968530e-02 7.8153448e-03 5.3323341e-03 5.3832666e-03 1.0542156e-02 5.6397042e-03 1.1198434e-02 1.0902962e-02 1.1744032e-02 7.3882415e-03 7.5970260e-03 6.0277743e-03 1.2454722e-02 8.6970669e-03 6.8936185e-04 2.4185836e-04 6.5755985e-04 5.6966351e-03 2.3035735e-04 9.8692354e-03 4.6426684e-03 3.3219514e-03 3.9792968e-03 5.6658817e-03 5.5182925e-03 6.1624599e-03 4.8454827e-03 6.5522795e-03 4.0009447e-03 1.4714328e-03 3.2975731e-03 2.5896752e-03 6.3872309e-03 9.0422907e-03 4.2582444e-03 2.2027730e-03 2.6736829e-03 1.1276803e-02 5.4428314e-03 3.3876877e-03 5.2005260e-03 7.2827649e-03 1.7342875e-03 2.7931367e-03 1.8984292e-03 1.2582586e-03 1.2661063e-03 5.3418398e-03 1.9890109e-03 4.4646783e-03 7.0136727e-04 6.0091268e-03 1.2000111e-03 6.3087140e-03 3.9600850e-03 6.7054154e-03 2.3290598e-03 1.3528267e-03 1.8394373e-03 4.8617299e-03 3.0806223e-03 4.6426684e-03 4.5595872e-03 5.5227532e-03 3.5904469e-03 3.7269772e-03 1.9354221e-03 5.7786823e-03 2.8379142e-03 1.6160265e-04 9.1316164e-04 4.5133200e-03 3.9636483e-04 1.2999342e-02 7.7990273e-03 6.7507696e-03 6.8565065e-03 9.1164934e-03 9.6187705e-03 8.8289255e-03 8.5171572e-03 1.1041936e-02 6.3870800e-03 3.1070828e-03 6.7336821e-03 5.5817760e-03 1.0377938e-02 1.2745315e-02 6.6393015e-03 4.6463353e-03 4.6865341e-03 1.7095703e-02 9.6546615e-03 6.2703784e-03 7.8942299e-03 1.1919237e-02 4.5195099e-03 4.9936591e-03 4.3235727e-03 3.5370732e-03 2.9138150e-03 9.0955590e-03 4.5162367e-03 8.4195838e-03 2.1592479e-03 9.9327314e-03 3.3056202e-03 9.5169563e-03 7.9097267e-03 8.9017922e-03 4.4124132e-03 2.8482486e-03 4.3995745e-03 8.1140558e-03 5.6448175e-03 7.7990273e-03 7.6396844e-03 8.3822247e-03 6.5839845e-03 7.5512097e-03 4.3854717e-03 7.5528676e-03 4.6977929e-03 6.1532842e-04 4.4755911e-03 9.8852555e-05 1.1287330e-02 6.2297232e-03 5.1189561e-03 5.6158860e-03 7.4118621e-03 7.9100135e-03 7.4458980e-03 7.1082862e-03 9.2159095e-03 4.9487763e-03 1.9996447e-03 5.0869737e-03 4.0020507e-03 8.3714289e-03 1.0564909e-02 5.1004357e-03 3.5205852e-03 3.7366566e-03 1.4679778e-02 7.9564069e-03 4.6553854e-03 6.3553187e-03 1.0091302e-02 3.0597107e-03 3.7953264e-03 3.3093658e-03 2.2411736e-03 1.9075393e-03 7.2834604e-03 3.5416682e-03 6.7651117e-03 1.4482690e-03 7.9807479e-03 2.4205104e-03 8.4840253e-03 5.9704010e-03 7.4790367e-03 3.4333236e-03 1.8691676e-03 2.9457380e-03 6.2606998e-03 3.9378134e-03 6.2297232e-03 6.0227040e-03 6.6475915e-03 4.7553767e-03 5.6493596e-03 2.9997214e-03 6.2979764e-03 3.7014773e-03 2.9064840e-03 2.5951953e-04 1.5239927e-02 8.3975944e-03 6.0152535e-03 7.5736149e-03 9.7205279e-03 8.7194769e-03 1.0665619e-02 7.7107157e-03 9.6513260e-03 7.3495285e-03 3.4129734e-03 5.9493098e-03 4.9891659e-03 1.0300134e-02 1.3429483e-02 7.3762928e-03 4.9967161e-03 5.9349958e-03 1.5151045e-02 7.8143476e-03 6.2821596e-03 9.0873094e-03 1.0390318e-02 3.3768176e-03 5.9427925e-03 4.2522990e-03 2.8707879e-03 3.4399883e-03 9.0442431e-03 3.6087055e-03 6.7269934e-03 2.6557188e-03 9.8316941e-03 3.0241465e-03 9.9672648e-03 5.9844482e-03 1.0964814e-02 5.3765757e-03 3.5060112e-03 3.7251338e-03 8.1205003e-03 4.4033527e-03 8.3975944e-03 8.1986819e-03 9.1329776e-03 5.6810271e-03 5.8010063e-03 4.1618459e-03 9.6695634e-03 6.1457695e-03 3.7030792e-03 2.6425725e-02 1.8599298e-02 1.5467357e-02 1.8635118e-02 2.0442342e-02 2.0541696e-02 2.1259986e-02 1.9510831e-02 2.1931276e-02 1.5294793e-02 9.1682566e-03 1.5285848e-02 1.2953400e-02 2.0823480e-02 2.2513198e-02 1.4419732e-02 1.4350471e-02 1.5480442e-02 2.9209010e-02 1.9066274e-02 1.4266029e-02 1.8093335e-02 2.3152907e-02 1.0464507e-02 1.4480769e-02 1.3685178e-02 9.2298310e-03 1.0125817e-02 1.9700965e-02 1.2730523e-02 1.7262155e-02 1.0225203e-02 2.0352884e-02 1.1535854e-02 2.3443654e-02 1.4503051e-02 1.9462802e-02 1.4723300e-02 9.8246219e-03 1.0395301e-02 1.6330792e-02 8.8773452e-03 1.8599298e-02 1.7720786e-02 1.7175716e-02 1.1774527e-02 1.4190018e-02 1.1287889e-02 1.7334951e-02 1.4959707e-02 1.2176711e-02 6.5819636e-03 5.0346277e-03 6.0397395e-03 7.7827629e-03 7.8599708e-03 8.1942510e-03 7.0899174e-03 9.0304038e-03 5.3331159e-03 2.1084043e-03 4.9833227e-03 3.9086435e-03 8.5223444e-03 1.0895531e-02 5.3819973e-03 3.7720778e-03 4.2725796e-03 1.4381123e-02 7.5978371e-03 4.7474248e-03 6.8472012e-03 9.8808760e-03 2.7678758e-03 4.1999833e-03 3.4360629e-03 2.0712504e-03 2.1185414e-03 7.4497841e-03 3.4016631e-03 6.4264729e-03 1.6858980e-03 8.1251590e-03 2.4514173e-03 8.9161864e-03 5.4840504e-03 8.2209587e-03 3.8745808e-03 2.1076714e-03 2.7824071e-03 6.3561971e-03 3.5186798e-03 6.5819636e-03 6.3377454e-03 6.9667686e-03 4.4964760e-03 5.2124475e-03 2.9917795e-03 7.0285944e-03 4.2716151e-03 1.5675754e-03 4.2437140e-03 2.9528525e-03 1.2801911e-03 5.5300675e-03 5.2313183e-04 7.0373258e-03 7.0028853e-03 1.5817688e-03 4.6017698e-03 4.2905415e-03 3.9763653e-03 1.7507004e-03 9.8088526e-04 2.0205251e-03 3.8651167e-03 2.8498196e-03 8.0083780e-03 8.7219602e-03 2.5429883e-03 8.6090275e-04 7.9906965e-03 6.1518285e-03 2.2097973e-03 5.7963314e-03 5.7460009e-03 4.2262969e-03 2.1872477e-03 8.9731860e-03 7.8793225e-03 5.8991938e-03 1.8999235e-03 7.1341168e-03 7.0170212e-03 6.4409068e-03 6.9062404e-04 3.2402506e-03 4.2346770e-03 4.9489360e-03 1.9844891e-03 6.6121605e-03 1.5675754e-03 1.4409377e-03 1.2937675e-03 4.5154908e-03 6.0031850e-03 4.0189646e-03 1.2526895e-03 2.2326743e-03 6.7976748e-04 4.4251056e-04 5.1964468e-05 1.5314410e-03 4.7093005e-04 2.2872076e-03 2.4409680e-03 6.1996952e-04 1.8531872e-03 7.0675084e-04 7.4672595e-04 2.9977980e-04 1.6320684e-03 1.1279305e-03 6.4871929e-04 6.2074215e-04 4.0109945e-03 3.1610099e-03 4.6600326e-04 4.3207236e-04 3.1187047e-03 1.7495883e-03 3.8892953e-04 1.4972893e-03 1.7881991e-03 1.3932149e-03 1.6264024e-04 3.1616233e-03 2.5553184e-03 1.9652949e-03 2.4129313e-04 2.2301817e-03 3.0218268e-03 1.9168834e-03 1.5054952e-03 6.4024254e-04 1.6084351e-03 1.4082859e-03 5.9336508e-04 3.2376306e-03 1.1102230e-16 6.3653022e-05 8.5949029e-04 1.8263557e-03 1.6881339e-03 9.7411877e-04 1.7845049e-03 6.1330704e-04 7.1368649e-04 9.0151789e-04 6.1896245e-04 2.2461726e-03 1.0406196e-03 1.1172454e-03 1.6730261e-03 1.8311894e-03 1.8071232e-06 2.5364008e-04 8.3907159e-04 3.3874526e-03 2.1375798e-03 4.5487046e-04 1.1672957e-03 2.8115805e-03 1.2138761e-03 7.0446879e-04 1.8201572e-03 1.6098088e-03 5.2152529e-04 1.0325997e-03 6.5974667e-04 9.0603694e-04 1.4060118e-03 4.3618665e-04 1.3469029e-03 7.5850938e-04 1.4500880e-03 6.7416609e-04 1.0299923e-03 2.5247687e-03 4.1453726e-04 3.7399848e-03 9.2294426e-04 1.7551096e-03 7.2749277e-04 1.1614195e-03 2.4439916e-03 6.7976748e-04 8.1575962e-04 2.1038526e-03 1.4799615e-03 3.3653084e-04 6.6936205e-04 3.8899674e-03 1.5033836e-03 6.1892938e-04 8.9565267e-04 1.1187414e-03 1.0854623e-03 1.6238473e-03 1.6997610e-03 2.5416419e-03 7.7939556e-04 1.2163589e-03 1.1645635e-03 3.7479516e-03 2.5673270e-03 2.8413719e-04 3.4882680e-04 3.5538869e-03 2.1358644e-03 1.3529179e-03 1.5262264e-03 2.0757398e-03 1.9224937e-03 7.7565401e-04 6.3114517e-04 2.1204969e-03 1.7089017e-03 5.9516371e-04 1.8253945e-03 1.7466546e-03 1.3725400e-03 9.7938279e-04 1.2586511e-03 1.2414457e-03 2.1946939e-03 3.0180258e-03 3.2655328e-04 2.0574339e-03 1.9692598e-03 1.9049224e-03 4.6812273e-03 4.4251056e-04 7.9383810e-04 2.4056659e-03 3.2171363e-03 2.0291670e-03 1.5121604e-03 3.2358269e-03 7.8092419e-04 1.6056499e-03 4.5150817e-04 2.5183573e-03 2.4854849e-03 8.1663583e-04 2.4301739e-03 9.3222022e-04 1.0572824e-03 1.7339381e-04 1.3855383e-03 1.3424761e-03 1.0242405e-03 9.7281350e-04 3.6981982e-03 3.3860618e-03 6.8052310e-04 4.8835842e-04 3.1532670e-03 2.2405021e-03 6.9163264e-04 1.9828394e-03 2.3495346e-03 1.9517373e-03 1.3548451e-04 3.7877283e-03 2.8247707e-03 2.6466102e-03 1.4866617e-04 2.8614588e-03 3.2185979e-03 2.1613637e-03 1.5430396e-03 1.0217871e-03 2.1845781e-03 1.8662401e-03 6.6257305e-04 3.7358116e-03 5.1964468e-05 1.2907411e-04 9.1062515e-04 2.1316335e-03 1.9388777e-03 1.3908595e-03 1.9526372e-03 9.5243046e-04 3.3534848e-03 2.1612689e-04 1.1409237e-04 3.7511533e-03 4.4135123e-03 6.6848080e-04 1.6638889e-03 1.5639090e-03 5.3797969e-03 4.6512834e-03 1.2304041e-03 2.1424312e-03 1.1209148e-03 3.9967200e-04 2.4397061e-03 3.5566606e-03 2.8773633e-04 1.9980195e-03 2.5921392e-03 1.0825130e-03 2.8538916e-03 3.5346603e-03 9.3770297e-04 1.4682668e-03 3.5169417e-04 2.8507687e-03 1.3414178e-03 1.7057263e-03 1.1395352e-03 1.1778138e-03 6.0686882e-03 1.8828714e-03 4.1140616e-03 2.6499598e-03 2.9837515e-03 5.4227271e-03 1.5314410e-03 2.0381041e-03 4.2816261e-03 3.9075302e-03 1.2227927e-03 2.5264748e-03 6.5268770e-03 2.9950818e-03 4.2523782e-03 4.6456758e-03 7.0284879e-04 2.6206169e-03 2.2973667e-03 2.1186617e-03 1.0662204e-03 1.4771683e-03 1.2714179e-03 1.6200996e-03 9.3400517e-04 6.3864823e-03 5.7928278e-03 1.2702417e-03 3.0680262e-04 5.4794582e-03 3.6431092e-03 7.0967201e-04 2.9368020e-03 3.3273895e-03 2.1071133e-03 1.0635659e-03 5.3575283e-03 5.0378701e-03 2.9935636e-03 1.0701915e-03 3.8904013e-03 4.2643219e-03 4.2480632e-03 6.5030200e-04 1.1722241e-03 2.1985353e-03 2.8568074e-03 1.2361632e-03 4.8426872e-03 4.7093005e-04 4.8520973e-04 9.1253531e-04 3.1519557e-03 3.8819044e-03 2.1030647e-03 9.4974174e-04 6.5992898e-04 2.7415573e-04 4.6934818e-03 4.8265016e-03 1.1075187e-03 2.2246224e-03 2.7094103e-03 7.2547386e-03 5.7651516e-03 1.2879775e-03 2.2498412e-03 1.7969640e-03 2.9988677e-04 3.2873557e-03 4.6281528e-03 3.4411066e-04 2.2557578e-03 3.0860670e-03 7.9349579e-04 3.1195585e-03 3.7700064e-03 1.7729958e-03 8.2927671e-04 3.1955044e-04 2.5127017e-03 2.3924363e-03 1.2355886e-03 6.3306711e-04 1.7271405e-03 7.3517242e-03 1.9482896e-03 4.3986367e-03 3.1257329e-03 4.1788193e-03 6.3227646e-03 2.2872076e-03 2.9291555e-03 5.5970414e-03 4.9483109e-03 1.7952206e-03 3.0186434e-03 7.6597710e-03 3.3340783e-03 5.0895780e-03 5.6951678e-03 1.1683986e-03 2.4173290e-03 2.2930538e-03 6.6766405e-03 6.0598142e-03 2.0196993e-03 3.2103351e-03 7.1260012e-04 2.1040770e-04 3.4457175e-03 4.8676879e-03 4.7880714e-05 2.6160358e-03 3.7638559e-03 1.6534967e-03 3.7166920e-03 4.7397184e-03 1.5966719e-03 1.7314841e-03 3.1225254e-04 3.8216967e-03 2.0442457e-03 2.2822879e-03 1.3421268e-03 1.4265946e-03 7.7649269e-03 2.8740126e-03 5.4160448e-03 3.5155679e-03 4.0306705e-03 6.4534752e-03 2.4409680e-03 3.0445932e-03 5.6145787e-03 4.8826195e-03 1.5492752e-03 3.4872724e-03 8.3107496e-03 4.2635519e-03 8.0787158e-04 1.6588755e-03 9.5088926e-04 1.1215622e-03 1.1319394e-03 1.0749073e-04 1.3651148e-03 1.0419898e-03 7.3951172e-03 5.6721517e-03 2.8717807e-04 1.2167194e-04 6.0929710e-03 1.9467902e-03 2.7497575e-04 2.6021687e-03 1.4711384e-03 7.6653532e-04 1.1979861e-03 4.6497866e-03 4.6528613e-03 2.0491908e-03 1.1214382e-03 3.1087470e-03 5.8331337e-03 2.9008505e-03 5.1851355e-04 1.1077981e-03 7.4180273e-04 1.1189833e-03 3.3213225e-04 1.9786092e-03 6.1996952e-04 3.2630432e-04 1.6788710e-04 1.0717507e-03 2.5327575e-03 6.9916673e-04 4.8860962e-04 5.0860173e-04 1.7852918e-03 8.1925898e-04 2.7637031e-03 3.4415701e-03 7.6048751e-04 1.3454814e-03 1.4168273e-03 9.1102129e-03 5.4634531e-03 7.0608839e-04 1.5253323e-03 6.6930140e-03 9.6900708e-04 7.0882160e-04 2.0455668e-03 3.9468549e-04 1.1422246e-04 2.4972040e-03 3.2985277e-03 4.3265068e-03 9.6084950e-04 2.6351726e-03 1.9457490e-03 6.5435669e-03 2.5549327e-03 2.2787630e-03 1.2770395e-03 6.6117888e-05 4.1362061e-04 1.2709485e-03 9.0111668e-04 1.8531872e-03 1.4879007e-03 1.4236785e-03 8.2396190e-04 2.2228636e-03 3.0260473e-04 1.7887502e-03 9.8733924e-04 2.2593145e-04 8.4529966e-04 3.3545284e-03 2.0967474e-03 4.8795197e-04 1.2181976e-03 2.8660735e-03 1.2480152e-03 6.7735579e-04 1.8245752e-03 1.6711447e-03 4.7937341e-04 1.0455121e-03 6.9960603e-04 8.5898853e-04 1.3874556e-03 4.5677202e-04 1.3792343e-03 7.7973864e-04 1.4683104e-03 6.8342524e-04 1.0561802e-03 2.6555506e-03 3.7342452e-04 3.7432085e-03 9.6667604e-04 1.7304819e-03 6.7810292e-04 1.1254700e-03 2.3291425e-03 7.0675084e-04 8.2471895e-04 2.0738735e-03 1.3951480e-03 2.9534051e-04 6.3569311e-04 3.8846725e-03 1.5360581e-03 9.9102840e-04 2.6998600e-03 1.1403522e-03 5.9435819e-04 1.1584769e-03 4.5439398e-03 2.4115910e-03 2.2376231e-04 1.3051908e-03 3.1170665e-03 2.4639357e-04 6.5093729e-04 1.0408914e-03 3.2170646e-04 6.8086675e-04 7.3723950e-04 1.9464154e-03 1.6938542e-03 1.2010351e-03 8.6567601e-04 1.2605984e-03 4.0415296e-03 6.0024917e-04 2.8241029e-03 9.3145876e-04 8.7987159e-04 1.5390142e-04 6.1621791e-04 1.2031695e-03 7.4672595e-04 6.4561848e-04 1.3132954e-03 5.7671738e-04 4.3996999e-04 1.2646330e-04 2.7819647e-03 1.1571313e-03 1.1519638e-03 1.4945239e-03 1.5137092e-03 1.7542391e-03 3.0584682e-03 3.1624125e-03 6.9423087e-04 8.0741075e-04 2.9564986e-03 2.1264808e-03 1.1956222e-03 2.4834226e-03 2.4046990e-03 2.4389289e-03 1.1428932e-04 4.1572751e-03 2.6270682e-03 3.3826290e-03 1.0906814e-05 3.3779688e-03 3.9749243e-03 1.6265579e-03 2.0735613e-03 1.7238890e-03 2.6977636e-03 1.8191981e-03 5.1180139e-04 3.3068455e-03 2.9977980e-04 3.0495320e-04 9.6732841e-04 1.7461140e-03 1.4560452e-03 1.4920651e-03 2.5772801e-03 1.7019143e-03 1.0341988e-03 4.0085546e-03 3.6492138e-03 7.4340196e-03 7.9703529e-03 1.4966880e-03 7.8440192e-04 7.7789554e-03 4.4532033e-03 2.2360209e-03 5.9110335e-03 4.1723764e-03 3.6115483e-03 1.8645164e-03 8.6587941e-03 6.9790025e-03 5.9223579e-03 1.3508421e-03 6.9679643e-03 8.8100022e-03 4.3884451e-03 9.3956602e-04 3.8113149e-03 3.5744927e-03 3.3082295e-03 7.5316334e-04 3.6491972e-03 1.6320684e-03 1.1884181e-03 4.5803930e-04 2.1497659e-03 4.0210128e-03 2.8148066e-03 1.4099016e-03 2.8014283e-03 2.0075866e-03 1.7351697e-03 8.4094224e-03 6.5677927e-03 3.9418601e-04 3.5055970e-04 7.1718250e-03 2.0973679e-03 6.4851765e-04 3.3744442e-03 1.5292791e-03 9.4071576e-04 1.7144529e-03 5.4753391e-03 5.4188405e-03 2.5828658e-03 1.5301373e-03 3.8027206e-03 7.3328244e-03 3.1192913e-03 6.2731613e-04 1.7741918e-03 8.4678687e-04 1.1412463e-03 3.3683903e-04 1.4744098e-03 1.1279305e-03 6.7560808e-04 1.3395103e-04 7.7360874e-04 2.7301747e-03 8.0481079e-04 5.3215427e-04 1.0082007e-03 2.3386408e-04 4.5650335e-03 2.1085336e-03 8.9398060e-04 1.5597528e-03 2.5626398e-03 8.9643929e-04 4.6401620e-04 2.1474828e-04 9.4200330e-04 7.4363388e-04 8.8544521e-04 1.1214375e-03 1.5513223e-03 4.7369920e-04 1.2949283e-03 5.0107186e-04 1.9686134e-03 1.5839083e-03 3.0734994e-03 1.1037013e-04 1.0184869e-03 9.4459481e-04 1.6215035e-03 3.1102930e-03 6.4871929e-04 8.4571319e-04 2.1956968e-03 2.1853516e-03 1.4074387e-03 6.7079422e-04 3.0128980e-03 5.5375407e-04 5.9745601e-03 3.5596915e-03 1.0825427e-03 1.1193941e-03 3.8619013e-03 1.7868782e-03 2.6699310e-04 7.4652731e-04 1.5644519e-03 7.6914730e-04 1.1868249e-03 2.1537215e-03 2.8904145e-03 6.7050048e-04 1.5688544e-03 1.1553603e-03 2.3985919e-03 2.8520067e-03 2.1226295e-03 2.6532317e-05 9.5443067e-04 1.5325109e-03 1.7403036e-03 3.8293675e-03 6.2074215e-04 7.8759073e-04 1.8993585e-03 2.7454611e-03 2.5765044e-03 1.0363780e-03 2.0443972e-03 1.4335859e-04 1.4016332e-03 5.4805806e-03 6.7405649e-03 6.4579738e-04 5.1748633e-03 6.3487765e-03 4.3874825e-03 6.7084849e-03 8.1049162e-03 2.6677096e-03 4.6345484e-03 1.7447861e-03 7.4893653e-03 2.9062036e-03 5.4610122e-03 3.0610024e-03 2.8530196e-03 9.9111331e-03 5.6471487e-03 8.9070297e-03 6.1633735e-03 5.5983777e-03 9.1911029e-03 4.0109945e-03 4.6927139e-03 7.3914201e-03 7.0233589e-03 3.0645626e-03 6.1283907e-03 1.0939049e-02 7.1096522e-03 3.7615860e-03 5.7439366e-03 2.3434979e-04 2.1625792e-03 4.1436319e-03 1.4386017e-03 3.2647240e-03 4.5819077e-03 2.2920937e-03 1.0610016e-03 6.9564930e-05 3.4218787e-03 2.8472107e-03 1.7665885e-03 1.7134963e-03 1.2022963e-03 8.8958832e-03 3.1032776e-03 5.2642601e-03 3.2432815e-03 4.6032265e-03 6.0005810e-03 3.1610099e-03 3.7408101e-03 6.4052558e-03 4.8371122e-03 1.3589516e-03 3.3936977e-03 9.2321743e-03 4.6963171e-03 5.0386940e-04 4.2984797e-03 8.6973796e-04 3.5526497e-04 1.7666127e-03 7.2192984e-04 6.5802622e-04 6.6868074e-04 3.2431480e-03 2.9035420e-03 1.6594087e-03 6.4854242e-04 2.1612810e-03 4.9124735e-03 1.3725331e-03 1.5050690e-03 9.8777331e-04 7.5260407e-04 4.1403958e-04 1.4453279e-04 1.2645371e-03 4.6600326e-04 2.4792089e-04 4.5627999e-04 4.7560079e-04 1.1214322e-03 2.2091820e-04 1.5167980e-03 7.9735767e-04 5.8276719e-03 2.5886189e-03 4.3280198e-04 2.9205975e-03 2.2021650e-03 1.3597486e-03 9.5466012e-04 5.1915037e-03 4.8194588e-03 2.6659023e-03 8.3877743e-04 3.6595156e-03 5.4665486e-03 3.2603866e-03 3.5246614e-04 1.2546385e-03 1.3738455e-03 1.7317120e-03 4.0173517e-04 2.8852033e-03 4.3207236e-04 2.1872967e-04 1.7262684e-04 1.6274416e-03 2.8950267e-03 1.1933707e-03 5.1057942e-04 6.2761497e-04 3.2515433e-03 4.5844712e-03 2.0397580e-03 4.4892831e-03 5.6236454e-03 2.1663608e-03 1.9457792e-03 4.4715761e-04 4.4562273e-03 2.6795180e-03 2.6855442e-03 1.3224094e-03 1.8950095e-03 8.9432615e-03 3.4981521e-03 6.3671383e-03 4.3127165e-03 4.9533188e-03 7.5137514e-03 3.1187047e-03 3.8229023e-03 6.6814123e-03 5.8612561e-03 2.0597987e-03 4.3006086e-03 9.5397630e-03 5.0775992e-03 1.3692874e-03 9.6403546e-04 1.3966479e-04 8.6664284e-04 1.6597338e-03 1.3212159e-03 1.4578093e-03 1.0432500e-03 1.9220792e-03 8.7918940e-04 4.4168060e-03 5.0670419e-04 4.4696559e-03 1.4208846e-03 1.1004494e-03 1.6436932e-04 1.5507648e-03 1.1191058e-03 1.7495883e-03 1.6621469e-03 2.5414268e-03 8.8363295e-04 4.0096791e-04 3.3482530e-04 4.2345389e-03 1.9374158e-03 1.2770848e-03 1.0395507e-03 3.9095383e-04 9.2829494e-04 2.8992176e-03 3.2975300e-03 9.5393320e-04 1.0925514e-03 1.6776296e-03 3.8507253e-03 2.3805613e-03 1.2082502e-03 2.8135004e-04 4.7639114e-04 8.7356741e-04 7.4310336e-04 2.3860646e-03 3.8892953e-04 3.1610282e-04 8.0643344e-04 1.4728895e-03 2.0712058e-03 4.6512964e-04 1.1229948e-03 8.8791739e-05 1.1956364e-03 1.3055469e-03 1.5870433e-03 3.7223827e-04 1.0219838e-03 4.8252621e-04 2.1760282e-03 1.1580239e-04 1.5036929e-03 1.5974284e-03 4.9006747e-03 5.1331960e-04 1.6812595e-03 1.3616634e-03 2.7816530e-03 3.8434871e-03 1.4972893e-03 1.8359326e-03 3.6884416e-03 3.0552141e-03 1.4996272e-03 1.2110783e-03 4.7697841e-03 1.3648602e-03 3.9191527e-04 1.9808749e-03 1.8113262e-03 2.3841094e-03 7.8154360e-04 2.2164780e-03 1.0439462e-03 5.1766229e-03 1.1157470e-03 3.6934914e-03 1.2626931e-03 5.1659909e-04 5.8959899e-05 1.4097594e-03 7.9470954e-04 1.7881991e-03 1.5872251e-03 2.1165612e-03 7.0368642e-04 9.2823209e-04 1.5533041e-04 3.3160103e-03 1.5048956e-03 2.0235000e-03 2.4966081e-03 3.5807846e-03 4.7284012e-04 2.2730788e-03 1.2853266e-03 5.0519729e-03 2.3762808e-03 2.2444800e-03 6.5500299e-04 2.4750944e-05 4.4815197e-04 1.3292061e-03 1.4780836e-03 1.3932149e-03 1.1924273e-03 1.5277304e-03 1.1896691e-03 2.0673918e-03 2.5400150e-04 1.8225589e-03 5.4227610e-04 2.9880363e-03 1.8314313e-03 2.5238977e-03 5.5757663e-05 2.3698503e-03 2.8213423e-03 1.3040881e-03 2.4190319e-03 1.1221589e-03 2.3332880e-03 1.5386845e-03 7.3215728e-04 3.3801838e-03 1.6264024e-04 2.8457958e-04 1.3051710e-03 1.8856170e-03 1.1599202e-03 1.2252261e-03 2.8396493e-03 1.3307965e-03 7.9081662e-04 1.1208231e-03 3.7513747e-03 2.2532467e-04 1.8915999e-03 1.8066444e-03 7.8138393e-03 1.7396624e-03 2.9887611e-03 2.1626603e-03 4.5716343e-03 4.7705983e-03 3.1616233e-03 3.6125170e-03 5.9695278e-03 4.2653532e-03 1.8208998e-03 2.2513824e-03 7.5822521e-03 3.0950854e-03 2.6638425e-03 2.3329339e-03 1.2833498e-03 1.8503904e-03 7.6221882e-04 7.7332789e-03 2.4545666e-03 4.1819304e-03 2.3678329e-03 3.7122984e-03 4.8118232e-03 2.5553184e-03 3.0167737e-03 5.3596333e-03 3.8038292e-03 8.6990936e-04 2.5148680e-03 7.9707274e-03 3.8479364e-03 3.0845972e-03 3.4545687e-04 3.2912795e-03 2.5772857e-03 4.0614792e-03 4.9082550e-04 6.4992316e-04 1.0688017e-03 2.7640538e-03 2.9796108e-03 1.9652949e-03 2.0639503e-03 3.2701588e-03 2.6492756e-03 2.3500195e-03 8.8549039e-04 3.5901884e-03 9.2048730e-04 3.0276112e-03 3.6195589e-03 1.4632965e-03 2.1763560e-03 1.5224813e-03 2.5475111e-03 1.6768950e-03 5.4331181e-04 3.2527051e-03 2.4129313e-04 2.7799169e-04 1.0453548e-03 1.7267901e-03 1.3027778e-03 1.3633707e-03 2.6503970e-03 1.5759024e-03 2.1731558e-03 1.7887862e-03 5.7263386e-03 8.5148931e-04 1.6383081e-03 1.3458097e-03 3.3594814e-03 3.6389958e-03 2.2301817e-03 2.5225544e-03 4.3534713e-03 3.1787209e-03 1.6954559e-03 1.3061945e-03 5.4111697e-03 1.7803664e-03 4.1741292e-03 7.9197469e-03 2.2821957e-03 5.7150571e-03 5.2344879e-03 5.9478982e-03 9.4860449e-03 3.0218268e-03 3.9157169e-03 7.0424278e-03 7.6315025e-03 4.1845322e-03 4.7751866e-03 8.3019483e-03 3.6628005e-03 5.6294536e-03 2.4239872e-03 2.7647556e-03 9.2421646e-04 1.7874511e-03 2.0292522e-03 1.9168834e-03 1.9458286e-03 3.1481713e-03 1.3908818e-03 1.3324310e-05 1.1782972e-03 5.7639460e-03 3.2058410e-03 2.4140579e-03 2.0956665e-03 3.1463866e-03 1.2261330e-03 4.0134358e-03 1.5054952e-03 1.1240475e-03 4.7360391e-04 2.7309553e-03 5.1301270e-03 2.4137049e-03 8.9922816e-05 1.2592279e-03 8.6164876e-04 1.2616309e-03 1.6936265e-03 3.4914911e-03 6.4024254e-04 8.0798030e-04 1.9823430e-03 2.5037231e-03 2.1768005e-03 8.4515239e-04 2.3117307e-03 2.1993248e-04 5.6812764e-04 1.3931392e-03 1.4308209e-03 1.6084351e-03 1.3403960e-03 1.4811963e-03 1.2106398e-03 2.4235208e-03 3.5787719e-04 1.6247178e-03 6.1681362e-04 9.2162383e-04 6.4491663e-04 1.4082859e-03 1.1774797e-03 1.5785221e-03 4.0122185e-04 7.3237681e-04 6.4530654e-05 2.8852212e-03 1.3991525e-03 1.5175131e-03 5.9336508e-04 2.9036861e-04 2.0099198e-04 5.5129609e-04 1.5179733e-03 6.8841417e-04 1.3866758e-03 1.2880812e-03 3.2376306e-03 2.6196338e-03 2.0860993e-03 2.5676122e-04 1.7880288e-03 9.1594192e-04 3.5976292e-03 3.2249197e-03 6.3653022e-05 8.5949029e-04 1.8263557e-03 1.6881339e-03 9.7411877e-04 1.7845049e-03 6.1330704e-04 4.6521065e-04 1.3489557e-03 1.6857799e-03 7.7549379e-04 1.3461987e-03 6.0478443e-04 1.0485877e-03 2.7784351e-03 1.1708179e-03 5.9587066e-04 1.2049026e-03 1.1592978e-03 4.9960727e-04 2.5830270e-03 2.2390567e-03 9.5053595e-04 5.2396832e-03 2.8622802e-03 2.1952710e-03 8.7416055e-04 1.1307517e-03 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-cosine-ml.txt b/voice_bridge/scipy/spatial/tests/data/pdist-cosine-ml.txt new file mode 100644 index 0000000000000000000000000000000000000000..7c6b67fa43c5fef11101d28dd46f4c1b325b65ee --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-cosine-ml.txt @@ -0,0 +1 @@ + 2.5695885e-01 2.6882042e-01 2.3470353e-01 2.9299329e-01 2.2742702e-01 3.1253572e-01 2.4986352e-01 3.0770122e-01 2.5191977e-01 2.7931567e-01 2.8133743e-01 2.6316239e-01 2.6067201e-01 3.2982339e-01 2.8993002e-01 2.5506356e-01 2.8728051e-01 2.4952121e-01 2.8613379e-01 2.6894157e-01 2.3606353e-01 2.1670935e-01 2.3470242e-01 2.4294172e-01 2.4376454e-01 2.3228195e-01 2.3554918e-01 2.4851241e-01 2.0917546e-01 2.4971488e-01 2.4264224e-01 2.7405461e-01 1.9086415e-01 2.6346574e-01 2.5908801e-01 2.2138495e-01 2.2910721e-01 2.2169919e-01 2.0660065e-01 2.3207102e-01 2.5554688e-01 2.5153751e-01 2.6073682e-01 2.0919640e-01 3.3984433e-01 2.7503792e-01 2.1709889e-01 2.7068095e-01 3.0307041e-01 2.4529612e-01 2.2987015e-01 2.7736967e-01 3.0310708e-01 3.0544316e-01 1.9205388e-01 2.7098021e-01 2.0722466e-01 2.6387343e-01 2.8998308e-01 2.2633010e-01 2.5177075e-01 1.6347011e-01 2.4036389e-01 2.6485871e-01 2.8491965e-01 2.2273619e-01 2.4511873e-01 2.5930533e-01 2.6589995e-01 2.7797191e-01 2.3357373e-01 2.4279909e-01 2.3544532e-01 1.9447286e-01 2.3993534e-01 2.0856243e-01 2.2125251e-01 2.1988206e-01 2.0590152e-01 2.6441952e-01 2.0052739e-01 2.2978496e-01 2.4483670e-01 2.3879510e-01 2.9398425e-01 2.7541852e-01 2.3777469e-01 2.9151131e-01 2.0672752e-01 2.4584031e-01 2.7475025e-01 2.7064343e-01 2.5603684e-01 2.6165327e-01 2.4233155e-01 1.7892657e-01 2.6111203e-01 1.9965682e-01 2.4201634e-01 2.6281353e-01 3.1928221e-01 1.9731963e-01 2.7752862e-01 2.2633080e-01 2.6783167e-01 2.5447186e-01 2.6424243e-01 2.1960672e-01 2.2984242e-01 2.8788736e-01 2.8681630e-01 2.6949787e-01 2.3993685e-01 2.4440073e-01 2.5010397e-01 2.3230769e-01 2.9879682e-01 2.4200592e-01 2.6957748e-01 2.6073240e-01 2.6355347e-01 2.3403674e-01 2.2411413e-01 2.2956729e-01 2.8105976e-01 2.2913304e-01 2.4898608e-01 2.3304000e-01 2.2692988e-01 2.3728251e-01 2.2552243e-01 2.0364084e-01 2.3359511e-01 2.6619167e-01 2.6666588e-01 2.3666880e-01 2.7239113e-01 2.0146697e-01 2.3045559e-01 2.1695523e-01 2.1387991e-01 2.2366404e-01 2.2809635e-01 2.0901297e-01 2.2441100e-01 2.3418882e-01 2.8552218e-01 2.4609015e-01 2.0282492e-01 2.5940295e-01 2.7407006e-01 2.3344890e-01 2.1179142e-01 2.7047821e-01 2.9832768e-01 2.0859082e-01 2.8881331e-01 1.8384598e-01 2.5286491e-01 2.2012615e-01 2.3615775e-01 2.6845565e-01 2.3356355e-01 2.7164193e-01 2.4179380e-01 2.5247973e-01 2.5637548e-01 3.2126483e-01 2.3100774e-01 2.8832546e-01 2.0043257e-01 2.7918333e-01 2.4884522e-01 2.2904723e-01 2.3738940e-01 2.9461278e-01 2.9782005e-01 3.0332073e-01 2.5175971e-01 3.1203784e-01 2.6611535e-01 2.3713507e-01 2.2203585e-01 2.3602325e-01 2.5093670e-01 2.6860434e-01 3.0137874e-01 2.3759606e-01 2.6840346e-01 1.9200556e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-double-inp.txt b/voice_bridge/scipy/spatial/tests/data/pdist-double-inp.txt new file mode 100644 index 0000000000000000000000000000000000000000..7a77021775ddb61d226aa8c4ba60f0af013e4a6c --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-double-inp.txt @@ -0,0 +1,20 @@ +8.278938049410748956e-01 9.035293984476246987e-01 1.862188994679486731e-01 8.921151312310462433e-01 2.061859119379583216e-02 3.440636727385729676e-01 1.533779912830328662e-01 5.701372300009802663e-01 5.510020730211558915e-01 1.792362258426003496e-01 8.086175120876580857e-01 6.115487184317183189e-01 1.233471787164852618e-02 1.441643531871039663e-03 4.044309209045688913e-01 3.561398959499905148e-01 1.281985712929750720e-01 8.663300833847481508e-01 8.696027786291581352e-01 3.611727370363766454e-01 5.283537658772616830e-01 1.440241088090119526e-01 3.112457227138950566e-01 6.031280796897889873e-01 9.230324792742518047e-01 2.332121881136874908e-01 3.192652267403439659e-02 3.466206294995559656e-01 2.988687728046366399e-01 5.116749542048093513e-02 2.584975830914494344e-01 4.302023478042227289e-01 8.003972751713522849e-01 9.364931911368097328e-01 9.737098649964673891e-01 4.718038453972229762e-01 4.526591686607864817e-01 1.056485678520797666e-01 5.883019714285405710e-01 3.846092237676981274e-01 6.461500053435473845e-01 1.013239729848824933e-01 1.216151561651189761e-01 5.159668929484659827e-01 8.452074473510227115e-01 9.885170962247968873e-01 7.623883073490128615e-01 2.291163243615434997e-02 5.775530980802381364e-01 7.820699896828091635e-01 8.239186345842965942e-01 3.391800105260227571e-01 9.546318451614538292e-01 3.789677917867695367e-01 4.526533399649290690e-02 8.366786473238587707e-01 3.082636811049858094e-01 1.173936820793450853e-01 7.631994969169442200e-02 2.997416650722183329e-01 5.795208655160232203e-01 3.942350892542011431e-01 1.175126383297261379e-01 4.928232513950027149e-01 9.421293996225950096e-01 8.365391053841342295e-02 6.868059693571844093e-01 3.589527962429440722e-01 7.592939427166059962e-01 5.623849466131448649e-01 2.110746828032050715e-01 9.824683704668600859e-01 2.661230142246236996e-01 6.162272315007123469e-01 5.023254536607497656e-01 5.202854476669782624e-02 5.835090668842095596e-01 7.864642118889143552e-01 2.504012386867506823e-01 6.728308641135989365e-01 4.610793534576096420e-01 4.820508770515909980e-01 9.720403251022265989e-01 3.100069285263498120e-01 7.681017126461753275e-01 7.956539306007082146e-02 2.593389637887737464e-01 1.137852590403054531e-01 3.885303073284454012e-01 8.599094660075957686e-01 5.215167875918280682e-02 1.620908248572288102e-01 1.859236090457663249e-01 6.247716512610480555e-01 3.415128495520775020e-01 7.034903368378029320e-01 6.037564640019568163e-01 2.338969434423310290e-01 1.002104885609900187e-02 7.866058403969036217e-01 +8.033694116033356369e-01 8.653264545544031572e-01 7.468340410754038539e-01 6.362430919910603278e-01 5.120006306625468628e-02 9.503348372633585450e-01 4.697732609626817935e-01 4.221305288459429317e-01 3.153452119838391354e-01 2.991014843442657556e-01 1.190667967280257811e-01 3.486567714509342109e-01 8.289493649885054660e-01 8.454811050800014049e-01 9.149673018211901265e-01 7.708707837193897738e-01 2.640157732122547785e-01 2.107897022189605396e-01 4.207633055054439408e-01 6.719500284654699174e-01 1.458031684893063007e-01 1.800412735886125493e-02 8.402733435220011149e-02 4.206760156883160295e-02 1.376933515041314227e-01 1.716717341022133692e-01 1.788220727652158892e-01 8.224310433402118869e-01 7.729093666867475898e-01 2.064223621025984556e-01 9.592092036227207741e-01 8.312490243754996344e-01 6.673289360369902834e-01 4.632847903690773261e-02 7.643954098358983762e-01 9.359341525615098023e-01 1.914966319163026176e-01 4.536590469402868031e-01 8.640836016538007147e-01 3.941529178175462444e-02 5.602101995205478469e-01 9.263806161941660067e-01 1.555995325944817820e-01 6.172208102950116348e-01 6.335576752812099866e-01 9.766975460368043649e-02 4.475795689539874278e-02 3.248842796104995934e-01 5.700377122149502540e-01 9.066962967256807504e-01 5.458460621505676347e-01 6.833401285581487405e-01 2.887244409544044155e-01 1.316338647016834784e-01 2.325673305245992140e-01 4.150121963188406760e-01 3.834845466366055833e-01 8.149365773968725302e-01 1.867003849450201702e-01 3.170322173543018707e-01 6.832093662682684476e-01 1.729728518929105618e-01 9.236557359702636250e-01 9.152941252150086360e-01 7.224879983096620384e-01 8.557920626598064517e-01 5.344883059251644974e-01 4.876873274449112783e-01 8.308277804506420949e-01 3.916624489322212410e-01 3.459695122273966916e-01 4.033512499027409604e-01 6.555726444913008155e-01 7.138452409380238173e-01 1.683937314599968094e-01 1.769382143486440961e-01 7.588683655178136700e-01 3.750589892880819010e-01 7.525176245126207197e-01 6.083961152538303052e-01 1.145972309907993258e-01 6.239614485809552580e-01 1.307655482065895880e-01 8.530458750670916190e-01 4.801602070124768584e-01 8.168122189863546989e-02 3.793139622744635675e-01 1.496986997776840189e-01 7.129023878302899186e-01 6.830979237438047358e-01 7.635375943876505644e-01 1.824004963251233402e-01 5.764695848992339444e-01 8.865113248731604223e-01 5.784337085544002388e-01 9.700026628755119562e-01 7.318207347905059112e-01 3.851401393936705331e-01 1.774291851193399161e-01 9.763423229242296220e-01 +9.287178470949695175e-01 1.748282433617460718e-01 9.238531711586964734e-01 8.291274445125006443e-01 9.513259272578692416e-01 7.486316801165745494e-01 6.257378457524477300e-01 2.062711693536473101e-01 3.970721244184766130e-01 2.738325225026445597e-01 8.735038948299954642e-01 5.415282140033768066e-01 5.176317904298315398e-01 5.347036264518250093e-01 7.482056965410627258e-01 4.140672582824351800e-01 8.709067272363142376e-01 9.499605569181273079e-01 5.380266748336398619e-01 4.369252161707162241e-01 8.235722216228258397e-03 4.308187193646527691e-01 6.030581482859224129e-01 7.316831195156517920e-01 5.540499846834291420e-01 2.044203040111662872e-01 8.645251782981867583e-01 1.816095717570278545e-01 9.639119168018674966e-01 3.572031072322333634e-01 5.580226816834680248e-01 5.586629875016585478e-01 7.213854320902782780e-01 8.513998260042524580e-01 6.308764347277173723e-02 4.299855362100638567e-01 8.789303907444128150e-01 9.178850359236285783e-01 2.275205845091231582e-01 1.899395443939643213e-01 7.103070862773533944e-01 9.450015289553428399e-01 1.691856364522159595e-01 7.368719616877857925e-01 9.600189536623833231e-01 5.128846522932454244e-01 6.209162727118655578e-02 7.992250598838029907e-01 9.141050280518014937e-01 1.471297785256820978e-01 7.466162372930541524e-01 4.656107650642931084e-01 6.399324135161845728e-01 2.023617619481610230e-01 1.019104648900100996e-01 4.390693688536728700e-02 9.822620353006089600e-01 2.881951852926285529e-01 6.191575015960482098e-02 8.989580763251467932e-01 4.635958631890454429e-01 1.781973138114967270e-02 7.906911683818984571e-02 6.525270776225711167e-02 3.620583622807886925e-01 2.651673718940715796e-01 5.829372395929610651e-01 2.118159824373908595e-01 5.900287159143694504e-01 9.405929925178391215e-01 9.262415619063500971e-01 5.639581506302312475e-01 4.529556154689695635e-02 2.873819210518682166e-01 5.718545934306838996e-01 9.877670791317306742e-01 4.120364488714320927e-01 9.896078045634184583e-01 3.796586997026456523e-01 1.178183652203194098e-01 6.641068305236120795e-01 4.045960610587706618e-03 2.262690437428437340e-01 7.839938005832693957e-01 7.695391333937223743e-01 3.713918392552509884e-01 4.245533341514018399e-01 1.475072494020331915e-01 6.011975181419888514e-01 5.158174017998343741e-01 1.788706151398071764e-01 8.880707130134481986e-01 6.463351030474082659e-01 6.499920635615744624e-01 8.570273676455353318e-01 6.055019270899113515e-01 2.123561211054603159e-02 2.027688787664126968e-01 1.930834215328548487e-01 5.131906052747271518e-01 +2.599990881903107010e-01 6.767857524909899336e-01 7.188217446352963558e-01 3.037178903357997672e-01 4.252381412838680541e-01 4.070924411439535984e-02 8.426710493038247485e-02 8.301517457289483426e-01 8.254603255702420705e-01 7.258533909453509514e-01 9.958706809470796451e-01 1.323408451651194584e-01 8.523995455245143571e-01 2.572405385832454705e-02 4.715363690065482727e-01 7.920130365690022378e-01 7.613745641534582775e-01 5.108305991695683002e-01 7.908714335912382376e-01 4.641131983754837043e-01 3.112627109831845873e-01 4.218013908715474436e-01 3.291577909008427394e-01 2.538715054071232213e-01 1.362470842487485401e-01 2.716429790290709745e-01 1.485325814161112534e-01 4.514539027544387517e-01 6.900835128673067365e-01 7.793407072946112457e-02 5.938024345270752624e-01 1.497853829906865553e-01 5.399567982652856424e-01 1.419209916759478496e-03 7.719776132867679497e-01 3.130795105576239523e-01 6.670071611167494030e-01 8.900596881158256979e-01 8.011158503301568645e-01 7.089295605187424520e-01 4.671116382997058114e-01 6.682965170673403899e-01 6.524835265739736823e-02 5.454288420771494783e-01 7.751910790556310049e-01 8.192595541387335256e-01 3.098855848167891835e-01 3.689971355659119601e-01 8.666507475054133769e-01 2.749042684253171220e-01 3.566565602478318775e-01 4.838173174723044978e-01 1.032975933616413489e-01 5.063065339610417492e-02 5.791168455729079900e-01 3.573337411289496668e-01 6.714098909652352898e-01 2.917057662433912846e-01 2.654964332620638467e-01 7.171804039048814694e-01 3.314488637898249657e-01 5.230399837442840649e-01 6.866534136026025692e-02 1.252966394621071178e-01 5.349397882659551184e-01 2.841423847455760709e-01 4.158473635710734362e-01 7.197062989831272128e-01 5.123869045047864113e-01 8.675622821594339840e-01 8.097441845042540054e-01 7.317178252133832439e-01 3.300847596465853462e-01 5.922311859141077273e-01 8.852619511417836318e-02 2.673412917259408994e-01 6.878259052441990651e-01 3.223000927116328462e-01 8.859387123976615319e-01 5.722722388300067742e-01 8.254877606669521750e-01 5.705299682290687624e-01 7.046478734972855262e-01 1.316324413616759559e-01 3.056358395675535800e-01 2.396516834600909140e-01 2.041201422493257311e-01 1.610755140653103989e-01 1.617012564641111538e-01 4.449920510036902144e-01 2.731012972755201274e-01 7.826874666257994662e-01 5.193612375350010746e-01 8.688804522977213729e-01 3.742157602758655610e-02 6.649628920608219307e-01 5.978149424619171315e-01 5.345645500553952711e-01 9.443202650415919441e-01 6.105837075491723498e-01 +6.387761328141735584e-01 4.210087412162694109e-01 3.777306694964789324e-01 3.576349403292201634e-01 7.272699618880260619e-01 9.173392803607671731e-02 1.212535698300880593e-01 3.871229381194544183e-01 7.735150198351389284e-01 4.687200483013695962e-01 5.161778571874678923e-01 9.839646447226980674e-01 8.626932748911960713e-01 9.618485576577924245e-01 2.997996427525421170e-01 3.955404657388794654e-01 8.480126027102616870e-01 8.194992325050480808e-01 2.800213436873294492e-01 7.188391466620779324e-01 2.289766105875049584e-01 3.838547514028287644e-01 1.363553401061209369e-01 2.131328253542326134e-01 2.666779468144075960e-02 3.252883844200405994e-01 4.207860197469600605e-01 2.991365385037647595e-01 9.180779845534067229e-01 8.787338732192649937e-01 5.404510999105649471e-01 1.735493827761729335e-01 7.405224640747264386e-01 3.927355563629583157e-01 3.957109873399460298e-01 1.313029813325972128e-01 6.434498219738993274e-01 7.162213694578050127e-01 6.454998257494671821e-01 3.808124530008022424e-01 2.027201015737234435e-01 6.667632842770417900e-01 1.609491052365198405e-01 1.192413785409307536e-02 4.546773323526854815e-01 7.733541911050207940e-01 3.902525737195561284e-01 4.006023779897505133e-01 5.156517815815246930e-01 6.135685498584592112e-01 7.062153114980724844e-01 5.505858882117883324e-01 3.541308807182554919e-01 5.237151122342533771e-01 5.230649229131387745e-01 1.973541027697351957e-01 7.940327858595511712e-01 9.998588700623055603e-01 3.878271015153827994e-01 4.455006584967207139e-01 8.376414508056347907e-01 3.310833863524501597e-01 8.020469097392601832e-01 1.890327633084128989e-01 3.830289472395409511e-01 8.605040171046141051e-02 9.978185524023941433e-01 8.333890591892906263e-01 4.509013468741837061e-01 6.355778557686052599e-01 1.422515991097305088e-01 9.549891485963732940e-01 7.535776302868563148e-01 9.306005301880662106e-01 2.444330347211679522e-01 5.828218427569508142e-01 1.261938242968304591e-01 2.829188731405173352e-01 8.100246952078660190e-01 2.032739130996042975e-01 3.997268448390065565e-01 3.882777703107541667e-01 1.102505652624736765e-01 5.826634725328041498e-01 6.508734477956333864e-01 1.777287661702166011e-01 4.857051012052149286e-02 6.850537712379254351e-01 5.012281307761055071e-01 3.329154880061502286e-01 5.006261767216675374e-01 4.542081454976160115e-01 6.777801995399822532e-01 4.271303586474960445e-01 7.820470659692947413e-01 5.143462618485082904e-01 4.071273891563575997e-02 8.503383643856671226e-01 6.877485768345151795e-01 6.498843855014626580e-01 +5.539512747016193117e-01 6.329206647391879548e-01 2.798533500321682688e-01 4.825977295850051307e-01 7.625297023172977751e-01 9.081309101427640362e-01 4.124792086535029600e-01 3.647019658319609059e-01 7.529595202332928228e-02 3.072404010876803593e-01 7.890673660964639957e-01 4.079781478915127657e-01 1.440519120695739064e-01 2.538968953804546791e-01 1.595028243568367143e-01 9.066545851872198636e-02 6.367601114674349416e-01 7.622263643880089479e-02 3.015728236404162654e-01 2.424070469873378375e-01 5.711440390241000475e-01 5.717001375511508998e-01 2.237853674032181939e-01 7.112101625753678436e-01 4.321054197012103026e-01 2.505322169010260058e-02 5.877307077139551916e-01 4.415771174397812304e-01 3.766022855145171322e-01 9.803490652619811785e-01 1.229258314111529860e-01 8.108351868714478439e-01 8.558595456964329662e-01 2.168217533833206589e-01 2.034022719386595623e-01 8.687457137579783772e-01 9.013327195854559104e-01 8.156766512673154779e-01 2.717576187546973943e-01 1.756417893371479133e-01 7.555856977566548505e-01 6.708809351312817748e-01 8.998789237886926085e-01 1.936367585946979775e-01 7.949724635465026390e-01 3.164799312763589834e-01 5.493048513173155456e-01 1.608917269168268493e-01 3.048667492191803330e-01 5.599401537727016764e-01 5.779501360842279611e-01 1.296714605309662316e-01 9.160752328055997706e-01 8.058674476110374574e-01 4.385508937505578908e-01 9.212419718012100356e-01 2.249887451242467140e-01 6.283927745352599903e-01 3.778992451536005159e-01 3.571958698867505611e-03 7.276526470528231760e-01 9.051678673805297892e-01 8.465837072484881931e-01 4.548317505393462135e-02 3.189318261926020748e-01 4.446388607398673587e-01 4.292356336344156365e-01 4.203980977718795309e-01 4.698059253071955599e-01 6.151991200848159203e-01 8.479986139404802614e-01 9.870993262459623052e-01 3.164206525899861955e-01 6.464672171639846976e-01 8.508781429592480183e-01 4.733667503354813677e-01 8.076014176740163863e-01 6.671443255679101458e-01 6.639213267047979761e-01 3.681688930741919830e-01 4.679870252651611162e-01 1.790041740686979521e-01 8.446070273663058847e-01 3.350737544979878191e-01 6.600272349677447359e-01 4.356083218487936115e-01 7.995134167346013010e-01 9.083660261041469619e-01 9.743975306734570241e-01 8.144839650654719376e-01 6.865011984586443239e-01 1.709747281999153268e-01 8.534933687161740945e-01 9.494753729726415070e-01 8.140124992294850426e-01 8.936241255316055287e-01 9.087976860818796077e-01 9.030687493451383663e-02 4.025785149840914734e-01 9.592005611533803711e-01 +5.714058727476275523e-01 7.913573761505965365e-02 9.301773447377043036e-01 4.302822433307075256e-01 4.618892554175407783e-01 1.882471300213742760e-01 6.231472878215863487e-01 2.350437450940777717e-01 8.483410480771292894e-01 8.580803842040533036e-01 4.246398783388435350e-01 5.667321565946502604e-01 7.247417018955526480e-02 5.373984417482219333e-01 8.794242091541510931e-01 9.699025554453030162e-01 8.254197752548814160e-01 7.739723972867470492e-01 6.365819416181199841e-01 3.451230687021222820e-02 1.829102490094791644e-02 9.179618383026147965e-01 4.481667270072077214e-01 4.771270250445739380e-01 1.588469404953456454e-01 3.766332499200618633e-01 5.057026248713025751e-02 9.125900914275182352e-01 8.438133644246305076e-01 3.282972411719701222e-01 6.042003956122835584e-01 7.423456085393266290e-01 1.389012737541106546e-02 3.674754266702850991e-02 2.126646727703802586e-01 3.085666164246750887e-01 4.303440338750976757e-01 1.749037978865556342e-01 2.177699993322510519e-01 6.675614739991906355e-01 1.926533336347433512e-01 8.032010572660308600e-01 4.611412981769049679e-01 9.907201268457492827e-01 8.973785930837320235e-01 6.286342392657409128e-01 8.111266245859546364e-01 1.154230969025437092e-01 8.382880466301794176e-01 1.053753927827069115e-01 9.921712862234919328e-01 9.041662667920956631e-01 3.626267376021269362e-01 2.262225368932846425e-02 8.669003741626111204e-01 7.597054897704164089e-01 4.700318514995387442e-01 4.338185014241978665e-01 1.205425463362067573e-01 2.413879270602589111e-01 5.483334840461459025e-01 2.042653841254596925e-01 5.452588940366013270e-01 3.164646091706100339e-01 1.878958248945691301e-01 2.188622304737641855e-01 2.970982599823450698e-01 5.952148400199362976e-01 9.614251220149501176e-01 5.446813400697393392e-01 5.900748097930779146e-01 2.653062526715309621e-01 5.459933097767216692e-01 3.174185404661935550e-01 1.412133354129242457e-01 1.487441669790685594e-01 3.953776242211952674e-01 5.274261039692862418e-01 1.756132307607755072e-01 4.481942852746899630e-01 6.390660088765629521e-01 2.860380430081067571e-01 5.866902519902850166e-03 3.026687645174785946e-02 1.952533570196290924e-01 2.154769096186736066e-01 8.920573593276575064e-01 5.644513191915436767e-01 5.551464696654353492e-01 4.378199413349500579e-01 8.685737643974280608e-01 7.493934764293597173e-02 9.556749726352036234e-01 6.386433482536227890e-01 8.714694524097754691e-02 1.722786161701279628e-01 6.526867532768643176e-01 8.950304705281527662e-01 6.158198776753203152e-01 9.587176904005377809e-01 +7.705718397401561948e-01 3.165816092999733655e-01 4.334200859975760878e-01 8.639807015515663657e-01 5.576514209532534849e-03 2.456745447057938625e-01 1.664686313299922338e-01 9.637084729617834133e-01 1.083448720752323569e-01 1.865218070380464388e-01 3.730358890475884426e-01 5.015351872138350542e-01 7.420710795841709562e-01 4.919420674769692248e-01 3.426558201886464872e-02 8.669984854934246199e-01 2.204243734202966376e-01 4.109792246853891662e-01 4.361732572946559472e-01 6.819306998053020763e-02 9.986304248057148447e-01 4.119289455392274313e-01 8.533050041845835487e-01 3.416914861912183632e-01 6.522191951039880697e-01 4.162803668786793088e-01 9.051674379917418189e-02 4.552378661306888397e-02 2.122677193466918633e-01 7.461518531655018105e-01 4.654688019259497489e-01 7.877564083548750373e-01 4.518328005682387127e-01 7.173857464237374248e-01 6.940056370290903498e-02 2.804574410412373764e-01 6.095681113112718652e-01 3.680596478602831123e-01 1.814569150719304025e-01 6.505055517979729807e-01 2.759585245701871026e-01 1.429501104786028431e-01 7.813891153083207808e-02 8.925314279991185540e-01 6.692056941902108091e-01 1.915141341107173822e-01 5.750233129581091562e-01 2.051961006251528108e-01 3.849013692629975614e-01 9.503788222043518807e-01 7.690419386411734282e-01 9.978147530014782607e-01 1.719584162437415298e-01 4.890758882401113894e-01 7.195660736040896399e-01 2.485818040997200828e-01 9.706486601870933928e-01 5.182604282071262558e-01 8.082072245463804983e-01 4.889961284821118248e-01 8.042893959057633158e-01 3.200685313413229593e-01 8.983245016887355661e-01 2.811495336955205371e-01 3.986095833814048417e-01 8.607229214132059436e-01 4.827620119717191960e-01 6.715610252037491623e-01 9.330824374137768329e-01 7.537710530085762750e-01 9.840804224010484269e-01 2.319352541177217564e-01 9.569114943157627229e-01 5.821928104654411351e-01 6.700479524814679788e-01 5.663434680086896211e-01 8.851091082101365526e-01 6.800562815862243315e-01 3.578475213752868589e-01 2.900164669281133367e-01 8.379170683569914235e-02 9.929972839740475177e-02 5.946248553621906741e-01 1.991332889320840405e-01 8.115065723822508792e-01 2.023388190440008616e-01 4.056545651129230823e-01 2.966825350250481552e-01 7.457176343507545546e-01 9.856015771246517954e-01 2.264338016147812160e-01 8.366528670045663141e-01 6.116829813603242849e-01 2.605933184296719274e-01 5.765962146558850643e-01 5.064075092266390188e-01 5.499615769589756287e-01 9.240234698632640020e-01 7.169900155229913530e-02 3.544181364560751168e-01 +8.154844535553099627e-01 4.797965609394789777e-01 7.476703385713100447e-01 9.086708404761600910e-01 3.191752505450355937e-01 7.611128630021511965e-01 6.246790343299296611e-01 1.942001426217137006e-01 2.789860414631386565e-01 3.236359785042408621e-02 3.178191288741717413e-01 8.372264298357038337e-01 8.872692914664047636e-01 9.589758852077276963e-01 3.123722260380168425e-01 8.980164015338999439e-01 7.260784140459818348e-01 6.567013512265649222e-01 1.028743505926521529e-01 6.821705410750319443e-01 6.889838995316139858e-01 5.587525493094736007e-02 6.921487028366646310e-01 3.616312929861494885e-01 1.673758008792780583e-01 6.626504595920326146e-01 9.125680913222075086e-01 1.424077784972291871e-01 6.508496429060767197e-01 6.615417385775157477e-01 9.654167310675311198e-01 5.536662974550183858e-01 7.092622144968085962e-03 6.694595400455760625e-01 1.828533619119211417e-01 3.421514408394116247e-01 1.242580151818144518e-01 9.888774797458224075e-01 9.777955172739735135e-01 4.271370765628749178e-01 1.211608384809655936e-01 1.580132417172936954e-01 3.242705395708289640e-01 3.268994391754735940e-01 5.213767653645562383e-03 4.475169480357120699e-01 9.593245219293577986e-01 6.994304536782350867e-01 7.063863152769014331e-01 8.381620829497931080e-01 2.760441799736219615e-01 3.755200946645842475e-01 3.627729621737311172e-01 9.518310606719182498e-01 3.577273025276901386e-01 3.991159901003488164e-01 4.187060513068554535e-01 7.422605403637314581e-01 6.697944269780702342e-01 6.020599837037767799e-01 1.571185850817550245e-01 7.519860911185742847e-01 6.635775704496444938e-01 9.487848173531471252e-01 7.900030232338028924e-01 4.143783957270819052e-01 5.618429740858444932e-01 3.737804619062014000e-01 6.179941187802344693e-01 6.553638605616040058e-01 1.009709416658691739e-01 4.935037098582963910e-01 5.485489972455533936e-01 1.024147956480448984e-01 1.195764707555347917e-01 4.910516327810896531e-01 3.551185778630389089e-01 3.857601645798814927e-01 2.074975219600547760e-01 2.084038664460790002e-01 5.268616653491025037e-01 6.948014877618717833e-01 6.179744044618615817e-01 7.063658085955483168e-01 7.925757227686872630e-01 6.199016959584816577e-01 1.163676037434490107e-01 7.425752264755586252e-01 5.403115665133301215e-01 2.546191951391015840e-01 6.961300925345208501e-01 4.003013072125547467e-01 5.906120962720950995e-02 5.879915846330325824e-01 1.213602408288709800e-01 3.801780679842765576e-01 1.731477742402802722e-01 4.624568816669496485e-01 3.304453744619206823e-01 8.810445876116090869e-02 +5.140190515373614932e-01 1.419225260054487459e-01 7.777845802285945354e-01 3.327562899409282071e-01 8.916875699762913943e-01 7.212852862736146564e-01 5.727327199433507321e-01 5.897820225918504189e-01 7.318614954542906892e-01 7.393985144455500480e-01 4.531340740296823100e-01 9.903061584426188224e-01 4.213350938331624773e-01 4.542342471963995987e-01 9.788786426453045530e-01 1.881707000343846303e-02 8.005433413647761176e-01 1.523502822273363755e-01 5.630164732287495921e-01 5.946603842470724599e-01 1.225547698678740582e-01 1.531136594724622491e-01 8.157973612638946825e-02 2.752046015644330490e-01 6.809045821946161370e-01 6.455289724528190387e-01 3.830356726830793646e-01 4.446144649678575034e-01 4.969038423960672191e-01 5.497873820641221432e-01 9.471879627821714331e-01 5.933046675329255448e-01 4.099233758501530378e-02 5.790409810134594659e-01 9.546095885251496549e-01 2.608616052375664074e-01 6.910160339170060562e-01 1.293709850476291168e-01 6.407264616302255078e-03 6.186037089828009261e-01 5.537861302543241049e-01 3.527421038298221845e-01 8.033232052121624944e-01 8.128114152830284711e-01 8.319982582278713235e-01 5.939566376046836460e-01 2.291090283499520597e-01 5.438101817725821130e-01 6.881146379117278888e-01 2.421968586304659166e-01 5.874047918543783275e-01 6.210102709484541794e-01 7.041387566450251212e-01 6.959223476278774134e-01 9.133877300988062498e-01 9.230647706207778525e-01 6.856884219815310155e-01 6.997988808693775820e-01 6.177944932528769417e-01 5.512902545683161515e-01 5.818280341729102911e-01 6.538267999985679646e-01 6.946673485935980219e-01 4.817938258357623571e-02 9.352008817207906333e-01 4.774162142215661042e-01 5.768063588692976529e-01 4.589648891483899540e-02 7.998946815651652997e-01 4.434260476954369201e-01 9.850053510925722566e-01 6.648626681529369309e-01 4.606293826856903140e-01 3.309042418210563774e-01 1.438901922508034614e-01 7.986559119276418484e-01 7.037818421334554042e-01 3.605119534240813772e-01 3.785959549258922641e-01 9.562491516841659100e-01 4.997955143590974147e-01 1.029540300938682762e-01 1.819017177001992502e-01 3.665425750262368831e-01 1.688063588370778412e-01 7.030735208313992901e-01 8.922375654244527610e-01 1.055706412056253152e-01 2.664739907746691561e-01 9.906029568647586325e-01 6.043845090140997911e-03 3.495786295043534775e-01 5.989441999519146131e-01 6.776147193866479679e-01 7.012991789852640601e-01 1.825838783477321536e-01 7.612293578749116385e-01 1.564769891240175292e-01 2.762157292905387251e-01 7.641900040015234818e-01 +4.746013333880729768e-01 7.609202966712714788e-01 2.537820854162747830e-01 1.709362234877408460e-01 1.886635378734374813e-01 2.439567014093724229e-02 7.640304718272151741e-01 3.483216170435471382e-01 7.744289278738043514e-01 4.190437573644867353e-01 5.319091476394965934e-02 8.580130976087452233e-01 6.259446446786639529e-01 8.793213970773006150e-01 2.441023074890465994e-01 7.753405549489799098e-01 8.760187573193888300e-01 5.946480724009295393e-02 2.873093046571124631e-01 8.710837851946537924e-01 9.103181731924696596e-01 6.534637257615111272e-01 4.128420398577182793e-01 4.905858108576378607e-01 6.178275806701372108e-02 6.368043900016381320e-01 2.865296941219959148e-01 6.371773028539067241e-01 4.924322796636745325e-01 1.709313290387282080e-01 1.856892551689268700e-01 9.592782603102242289e-01 5.402593764193130976e-02 7.287312244390512506e-01 5.679467572000697073e-01 6.255587794305905724e-02 3.069660218141317953e-01 1.089960430557104232e-01 5.550748245336984965e-01 2.555948886689661803e-01 4.140925514039996980e-01 1.180376445052062628e-01 8.832322629884041820e-01 7.784546946701487169e-02 3.177678935473182698e-01 6.681804863429485764e-02 7.047099396645268854e-01 4.133897376851528582e-01 5.600656990480865627e-01 3.883995683475501837e-01 4.459430113152932362e-01 4.214077227574740681e-01 4.763369230200156235e-01 2.701480661168440545e-01 4.296286564389811824e-01 9.601402258758658936e-01 6.326999441846863359e-01 2.442086919688498670e-01 8.407708423957936938e-01 3.626867985638081437e-01 3.641441713291436733e-01 7.932397565989488530e-01 8.902073520619256941e-01 1.929173010337000838e-01 7.309376779324568973e-01 7.305852858337777977e-01 6.510197444582447313e-01 9.512661608643838695e-01 8.461467164366111016e-01 9.245490147941206605e-01 2.658844813385705663e-01 9.538758859344749208e-01 8.215517204998477041e-01 8.217795540390903097e-01 7.569662091300560780e-01 6.262685322871274218e-01 5.597770510574888725e-01 8.155720175123675197e-01 8.545688745180864965e-01 8.986051518529034610e-01 2.477911506572628708e-01 8.462580108996445860e-01 6.065941220995090255e-01 6.500490804973033665e-01 1.120463882674053169e-01 9.299049132942927010e-02 1.388364074229719858e-02 5.901199124540731367e-01 2.795110110544174464e-02 1.644097083463245124e-01 5.341029647603202646e-01 5.276816677181681570e-01 5.439849107754858304e-01 5.371677986392331405e-02 4.515163125788429488e-01 5.036243367087100964e-01 5.721818679625961801e-01 5.271368612400184617e-03 7.720961020546839304e-01 9.015383457479009266e-01 +8.301526916287945701e-01 8.704609696144033348e-01 2.955689129581380303e-01 1.762209253489944727e-01 2.698172933050072553e-01 1.138095349991521399e-01 4.092588531860634760e-01 8.202978121681584467e-01 2.822241377079557356e-01 6.117376205659387223e-01 7.169923068016897938e-01 9.310256256264415331e-02 3.989664052931106708e-01 1.651874953308862803e-02 7.890202597932294282e-02 9.068686774810821305e-01 5.203866694486933842e-01 4.297748572844445336e-01 5.208786995443430712e-01 2.163224881365530816e-01 7.274307306357226111e-01 1.675784956180090823e-01 5.969822786565782691e-01 8.959750832846602453e-02 1.253794151891943764e-01 5.352628522116801291e-01 2.562706125890066300e-01 6.030433202137867044e-01 8.330717547440393833e-01 9.603613683422040914e-02 7.569714244468559450e-01 3.184801677796517128e-01 1.667069341164499896e-01 3.132470247801235619e-01 6.417752836394801097e-01 6.433909425912354152e-02 4.056860213146201710e-01 3.166772891331335327e-01 9.574059746098845247e-01 1.492907964460536974e-01 8.311513764927496162e-01 6.652928354977717396e-01 2.396804722185036374e-01 5.812361618600220270e-01 9.724228681350225445e-01 2.853983236378453414e-01 5.337719354896472979e-01 6.779446197712412081e-01 5.485102006140557540e-01 9.010109155962182648e-01 5.724439967467525037e-01 5.965540527411405947e-01 1.598667990086183321e-01 1.363934512727023041e-01 5.327536522697270405e-01 4.123866715061276222e-01 4.617251396918636841e-01 6.935944951381239898e-01 4.300337419593377453e-01 1.892407993760835128e-01 1.666936825594794724e-01 4.625634184864588772e-01 4.805197636774838355e-02 7.003542850133466224e-01 2.130226006716084974e-03 8.678863343041013367e-01 4.874478520451258623e-01 7.043560228741558848e-01 6.317719270475393722e-01 5.372392256296196766e-01 2.982649812986511995e-01 1.272558612133412037e-01 2.467337555730741983e-01 6.546893200021091097e-01 6.291921159383098150e-01 8.505920470407707379e-01 4.046520490181828578e-01 3.875732096593392795e-01 8.551517214319142024e-01 4.152602284179877090e-01 9.587779137989138611e-01 6.977437468944928112e-01 3.240620775541913634e-02 4.025873770391376061e-01 5.485549335619134270e-01 7.146066156157020455e-01 3.012702534568838519e-01 3.526414480395153594e-01 3.309707144485515284e-01 4.315687014460974913e-01 6.641934530697197747e-01 2.172886798352815507e-01 4.807480925564590057e-01 5.006795397998469177e-01 5.818100901154411586e-01 2.107716091585690732e-01 6.606606051140029301e-01 9.317629042790995797e-01 9.840326342340242061e-01 5.752000964817773898e-01 +9.843444595454536872e-01 1.339523968066913540e-02 6.082172659959028671e-03 7.828244785439336662e-01 5.069653703872761819e-01 2.804896494365415327e-01 2.112385836660957139e-02 6.016479440778699228e-02 7.457477935084961818e-01 3.445503949245375397e-01 4.063494277166557200e-01 8.630275274433116817e-01 5.948396018456146850e-01 1.400867933474212457e-01 6.997522422654076646e-01 5.766519767930851081e-01 5.419976500582250889e-01 7.474121304089603735e-01 2.951600193008566686e-01 7.980170422334191827e-01 1.829036799578199757e-01 6.317636496261300749e-01 2.812612231140887431e-02 5.464747656105657381e-01 3.909873503320924204e-01 4.940850205957293406e-01 8.157850130814222611e-01 5.111092739445756150e-01 9.336823640685747439e-01 7.157105167170837445e-01 7.778989455994214097e-01 1.398722535910470466e-01 5.642653936300449091e-01 3.218717164845980028e-01 9.717427501967056402e-01 3.665791984428700134e-01 3.874321311211759156e-02 9.437600858738082188e-02 5.679526822961932231e-01 5.141385991358327079e-01 7.497840799582222715e-02 5.736515309094968318e-01 1.928132849879083954e-01 6.924244068001785823e-01 1.748389677952593146e-01 4.469577663506929532e-01 1.738527450963387455e-01 7.195287763517190793e-01 8.861150811892871682e-01 1.058443750714600506e-01 1.941789362229970894e-01 9.188374820700584422e-02 7.706736301449305104e-01 6.718642548609364828e-01 5.981029087121966237e-01 4.832880127232569434e-01 3.073688779938709148e-01 5.156312334804930009e-01 1.777418420119527553e-01 8.885462205165685079e-01 4.486254681289014723e-02 1.345398129556140132e-01 7.467627984379916484e-01 4.384565546058830643e-01 7.217750080760946263e-01 3.949550352625393890e-01 4.307950907642028593e-01 6.087680934849041270e-01 3.294516167246774874e-01 1.316682090209408962e-01 1.824857738754404046e-01 5.332379826483617524e-01 3.567136182864261151e-02 1.976220743086236631e-01 5.849349042822560296e-01 1.133174406357483344e-01 7.711522754393199675e-01 8.557306786807005183e-01 3.038353471344266143e-01 4.422747047768413875e-01 2.537160404215925702e-01 2.372714099723788328e-01 5.906462765375103396e-01 4.849909323133470007e-01 2.692576210504484813e-01 4.540849506602829821e-01 9.664935719107857759e-01 2.044371576459835804e-01 4.505417469690352616e-01 7.110722322201217249e-01 3.051357995214963870e-01 8.978937034341526457e-01 6.090501112506481185e-01 6.595415779178889215e-01 6.565426836745864581e-01 6.565608489824376059e-01 2.679102664248229626e-01 3.819533138204529443e-01 6.609794961162380744e-01 2.289558446859882856e-01 +9.274935298374649140e-01 1.174096651033715855e-01 3.030761852629033637e-01 1.605508209527917174e-01 9.601854834873225775e-01 4.341959513718630648e-01 6.320768160802121560e-01 4.213429090614078110e-01 3.695553969042019160e-01 5.965457437116089556e-01 3.520335041155040479e-01 7.702703502247409961e-01 8.571112772962534709e-01 7.904077282532658844e-01 2.247339318352784554e-01 6.823720204503556097e-01 5.883435710582129996e-02 6.786037033312407596e-01 9.721137137641507886e-01 2.042576970668320557e-01 8.394085754806240862e-01 7.433082729552867862e-01 4.072614159870893147e-01 7.451483066617257123e-01 1.699472962789440045e-01 1.753052015584344314e-01 2.255269204788400428e-01 7.794755643807432799e-01 8.407732260470973662e-01 9.301182862857163558e-01 3.701995309382508648e-01 4.481909027604019657e-01 1.261889085033987001e-01 5.600591735875248833e-01 8.244692493969552061e-01 8.969188061645969601e-01 4.802217973423368313e-01 3.556164122713412201e-02 3.393317823164623270e-01 2.491242957582864292e-01 9.863253789366602797e-01 5.585415885291432625e-01 3.702350606362231344e-01 6.766101432620400535e-01 6.999259389475386284e-01 6.676108316872160220e-01 7.870681827507105544e-01 8.746765411259082024e-01 9.125268371282718727e-01 6.638849997061806452e-01 3.253268113800632522e-01 7.968625619248901337e-01 7.584122525443606211e-01 9.028886850768532701e-01 5.381622293189292083e-02 8.097562873320752752e-01 7.092942088208666895e-01 9.915538877968065323e-01 4.319294903327922652e-01 4.307127933969153721e-01 2.768507739641907772e-01 8.076253078288621046e-01 2.569233696442670967e-01 7.595893829724666979e-01 5.768081727897018673e-01 2.537536777625452045e-01 8.874419624636734616e-01 5.091705681832693342e-01 4.811826624992353585e-01 2.794462461940371290e-01 3.846927898276129021e-01 5.129562951959991679e-01 8.515004062224775794e-01 7.103144978683579858e-01 9.526388607201888847e-01 2.367905569592337889e-01 9.137336039323161740e-01 5.722969943101696710e-02 2.019723935481291255e-01 3.098764675203513619e-02 1.121146613918624357e-01 9.937693067724532314e-01 8.476717958861412772e-02 2.059652110343795917e-01 2.139791918759540446e-01 9.137860316709250919e-01 9.530862653366889425e-03 2.027843281683039400e-03 2.506229951837134484e-01 6.244523528392044165e-01 5.523937894075592325e-01 3.712168074031840792e-01 4.218847794299319665e-01 4.827576239387890711e-01 5.244634168840578425e-01 5.182241092381567604e-01 3.308639956263292881e-03 9.370528021570383448e-01 4.694554875029453012e-01 4.950447554541728135e-01 +1.525818111800841814e-01 4.708012184002630107e-02 3.899035965341954846e-01 3.928304521031263929e-01 5.602286661727436945e-01 9.738256658043862313e-01 9.404465779766183475e-01 5.750862754958349088e-01 9.547546956257608741e-01 2.750275291553152535e-01 1.682773435862793265e-01 5.865928471016079726e-04 8.543378154943809255e-01 3.547649971465383079e-01 5.058056647397523031e-01 9.116332486700751137e-02 7.534666421106954726e-01 3.082429494433007733e-01 4.527145111847344916e-01 5.456680635225539255e-01 2.504131242494785914e-01 2.509240770568589296e-01 3.949236999582302898e-01 8.782959620323271821e-03 2.474641132111736752e-01 8.229417958971670943e-01 3.444225768479134420e-01 4.000027489436257522e-01 4.247741954177396417e-01 2.497745404169693373e-02 4.325768602588443423e-01 7.336592463477830117e-01 7.667663267650381975e-02 4.179022553581047683e-01 8.745172741480690126e-01 9.417705509525042817e-02 2.807522782799587446e-01 8.212710101351362590e-01 2.211181944001613386e-01 4.319929503523877168e-01 1.858636923768219873e-02 6.737037795085246694e-01 7.997187114913413275e-01 2.976552505976116647e-01 3.272347030789168887e-01 5.550935453236346406e-01 9.224109746648162522e-01 3.192827922106745708e-01 3.500098324549234530e-01 7.821988386980260888e-01 4.478417135239194380e-01 1.580956175222456572e-01 5.300807813550156844e-01 5.806154798468634581e-01 9.456842911054151868e-01 7.688127895655872956e-01 8.456527833650537840e-01 1.784229089865225770e-01 8.114517450321339087e-01 8.062506298824222428e-01 2.113482500442499523e-01 2.629226789210241666e-01 6.478686221690072022e-01 6.006672861605766300e-02 7.013679843242253131e-01 8.784753961212666828e-01 3.487138165323044880e-02 4.928426758517070461e-01 5.976224683315064512e-01 7.629063997052759616e-01 2.761721278953045422e-01 7.240740503283805696e-01 6.131065729985127888e-01 1.630885615792579957e-01 8.473783868551159060e-01 8.347614542399306448e-02 8.137265626844719657e-01 8.512508664918938539e-01 2.777097816703766320e-01 1.729154355214796990e-01 2.203382750835449766e-01 6.134780912629795857e-01 3.524352564238901753e-01 5.370314860129862256e-01 8.013986113284543578e-02 2.555842138998117852e-01 6.553915758947851389e-01 9.679125599178584061e-01 2.549566319678178150e-01 4.008180804370896633e-01 9.145789951670967310e-01 2.787926039163850511e-01 8.599455912576436933e-02 9.637558000691170967e-02 8.274101203974880692e-01 1.803747268179315411e-01 2.175735407836230095e-01 7.825994939720237742e-01 7.928519890958951599e-02 8.707949373106749213e-01 +6.398420210047787160e-01 5.739624494012524059e-01 3.359672805578653998e-01 1.130399363175038641e-02 3.349439685346782269e-01 2.315484030880912147e-01 4.575228302577399875e-01 1.149494135594463229e-01 2.888244352925943836e-01 3.625470995156252485e-01 3.795973190611611203e-01 6.567047810450010736e-01 1.484039742710284715e-01 9.273251916560719676e-01 4.334256728976307871e-01 6.734771102219323513e-01 9.125080197222198430e-01 4.974393931097168542e-01 8.301481563280355136e-01 4.526450714147856047e-01 2.414236092573898151e-01 8.070129698367667359e-02 7.260400697427102923e-01 1.396509691839398215e-02 2.496450588391967429e-01 4.335741205447194435e-01 3.089314419194891803e-01 9.543503534526003307e-01 5.457977547458532364e-01 3.139663643587058406e-01 5.034762326753475792e-01 4.756788330475764104e-01 6.849334942793482428e-01 3.880666613022351052e-01 6.483446580176778218e-01 5.217503801099343530e-01 5.371145824070304720e-01 3.121260159429154468e-01 8.314121854062171968e-01 4.538695969561833410e-01 8.598896961203845724e-01 9.961993522734106099e-01 8.865717795946430613e-01 7.828987966783660379e-01 3.412415531643435695e-01 7.421170530151157685e-01 4.484104178639959359e-01 6.793217012099640462e-01 3.756179958191659951e-01 7.821287098222597933e-01 6.227726265188193722e-02 8.552983413221663112e-01 4.824668768009222619e-01 2.241531065858231031e-01 4.939536577599041856e-01 5.129566641128722182e-01 1.057984177672518511e-01 9.541452507300716146e-01 3.396646181755047511e-01 7.452588103611947901e-01 5.315559265659929311e-01 5.493475179850665358e-01 5.214824278139198466e-01 5.150075718147916204e-01 1.196075368500321146e-01 9.035665331176232495e-01 7.522653903639873185e-01 6.638708679914825384e-01 5.584174553800479446e-01 5.015819402508836511e-01 5.507698483308445248e-01 5.978677577011723976e-01 8.450418028759657529e-01 3.266677322748618995e-01 1.321610045897971819e-01 2.394354042746985600e-01 2.723972163557076831e-01 5.523301747352814539e-01 5.518043850608547185e-01 5.283968096837132755e-02 8.192733312104071297e-01 2.277106024970321219e-02 1.414998099027269252e-01 6.517281615256080851e-01 1.811694734825117781e-01 9.472370614713256920e-01 5.454497319021770485e-01 1.364119913158231556e-01 8.446142008509562871e-01 7.671725984742419069e-01 2.461161648406858804e-01 1.421724627107351369e-01 6.290652581179481118e-01 7.094144689448004248e-01 4.419656923472803367e-02 6.614741876652251440e-01 8.712193265403500586e-02 4.734931280852430202e-01 5.382037050480286133e-01 1.396459758005891283e-01 +9.709329844415439670e-01 8.998575745276288229e-01 9.151313462895852568e-01 6.920489275523904471e-01 2.892231405199537919e-01 6.750679746268205550e-01 5.515642485826798280e-01 1.065253097812824956e-01 2.957026803465776510e-01 8.937347659632134400e-01 9.800016515925590310e-01 7.745900896182087436e-01 1.570977683146633774e-01 1.482028765821026273e-01 2.111147779712029271e-01 9.683759902485811200e-01 6.550951580826911425e-01 8.728324682592377703e-01 5.044803166579884257e-01 8.285704754811143991e-01 1.693574499337324735e-02 6.032669995180495182e-02 1.687026879086964692e-01 7.701554026145973619e-01 1.429888016593102718e-01 5.881172815379975827e-02 9.704206919487038396e-01 4.450807650730836951e-01 1.597445784258376689e-01 9.849229394397314152e-01 4.220083573536804744e-01 9.357693600374825671e-01 2.313199262338369033e-01 4.556443403861323294e-01 2.590791012828855822e-01 8.438664994487065085e-01 5.519045677502344427e-01 4.702170125676508050e-01 6.814723205638187897e-01 7.418295483665861001e-01 3.684921032028853904e-01 1.501895844844561845e-01 4.214513377519605308e-01 8.600279963652578408e-01 6.625616611189292238e-01 5.200151456470966105e-01 7.881072743086801058e-01 2.771703241081423519e-01 9.034135930616548071e-01 5.848441705791300738e-01 8.341698181274771473e-01 1.966638677318299777e-01 7.059747894371543042e-01 7.013854316067694716e-01 1.828430942760242983e-01 4.745548949934464966e-01 6.306422394641082452e-01 7.760751707194470939e-01 9.813187212598396547e-01 2.293595795266353266e-01 7.749261876107090830e-01 2.384106107787011819e-01 9.721209688979495223e-01 2.715569353686980714e-01 2.915573577694993146e-01 3.579601509630966349e-01 3.085697512342830962e-01 4.070219981627976047e-01 1.989632411372218579e-01 7.330003339460906542e-01 5.397259604481572381e-01 6.931009942216573849e-01 1.385457419653816080e-01 1.140339999976658358e-01 3.980752590866034613e-01 9.471822621683767540e-01 5.476643721405823895e-01 6.824131903515884279e-02 5.844099130744569992e-01 2.346881692012994236e-01 9.436439228519653000e-01 4.855518260479008141e-02 8.157036123302675579e-01 1.169761256455048581e-01 5.532962903488753970e-01 1.100965596251435308e-01 9.789490602992410029e-01 8.433487462016989733e-01 1.272410782852178013e-01 2.885715258680641160e-01 7.990943955388217779e-01 1.565305358979097727e-01 9.160846960406943129e-02 8.521842244411678147e-01 4.474243106736998099e-01 3.843945818845087015e-01 4.710645906071458944e-01 2.398348154123419729e-01 6.435351435258193087e-01 7.656897921129046658e-01 +4.894328120406804539e-01 7.881019629214267574e-01 6.974585354155089512e-01 2.023858939857701156e-01 1.660984914264745926e-01 4.854517801734643534e-01 2.789848572630315715e-01 2.311636522410289718e-01 9.821076233980715608e-01 1.220641257408076052e-01 2.614036146663852866e-01 7.657560715165320220e-01 3.968360577545695378e-01 4.566023622802184434e-02 1.049701948619241598e-02 9.281162949127452766e-01 4.490137965769909201e-01 2.095846458383606725e-01 9.195504656719085679e-01 9.683515436855471004e-01 9.800174878114910060e-01 5.517610861380117804e-01 6.711570559348770670e-01 5.125258050287277989e-01 2.105581493613526423e-01 8.281813206544574868e-01 4.964783994807770995e-01 7.284974208756571645e-01 1.320629592816270348e-01 6.652194518096135045e-01 9.430156297917950958e-01 7.477263137894260003e-01 2.054087806450300979e-01 4.248209124837907247e-01 7.657518666018259257e-02 1.031614100713345028e-01 4.122242287567021712e-01 4.919658859336810686e-01 3.752650167259050651e-01 4.175771429986683270e-01 6.131376293448997927e-01 5.463797405837259591e-01 3.119918548921774004e-01 6.331762507678504459e-01 5.484632429281035559e-01 6.815448032785871302e-01 8.065695507425107991e-02 8.720129122297424207e-01 8.318188557125294480e-03 2.199323537180564170e-02 8.933872719887463454e-01 1.953120287872067706e-02 2.478721941404590234e-01 5.994061179859005994e-01 6.588362611693047155e-01 6.332808851020984564e-01 3.823849348043323326e-01 5.111091324899629251e-01 7.034808459110406531e-01 4.347681568463539481e-01 4.316973576672314961e-01 9.620411080123215664e-01 6.247837467655984467e-01 8.196961678222113301e-01 5.574601810887074294e-01 8.800635018469276094e-01 8.772255241161972528e-01 5.075275933138404527e-01 8.022583187266906224e-01 2.320670802521890286e-01 1.165626629103270195e-01 4.623759662685936744e-01 7.938327000737943617e-02 7.986374689793115378e-01 6.728842751465858862e-01 8.133909095059230765e-01 1.202639390769081329e-01 1.052937257108800262e-01 8.717600467040409473e-02 2.163819956545051104e-01 6.596483385763984852e-01 1.202843170392309258e-02 1.538789195854695091e-01 3.120247727263308901e-01 3.408168327248596308e-01 3.241861797851740556e-01 3.637074533655986208e-01 1.533669345890729119e-01 4.455921334699539660e-01 5.619140093874478437e-01 1.881731359879111887e-01 9.416670800570559052e-01 1.740018593664415247e-01 7.030242331869680505e-01 5.922055553954849172e-01 9.326211623391688077e-01 6.608322881013140027e-01 7.009721551241574478e-01 1.079126054675583202e-01 6.158176671761947940e-01 +5.185079639625639336e-01 9.613742991518259284e-01 5.555312825626229634e-01 2.647628827924735084e-01 6.003697207460141350e-01 5.392112376769145898e-01 6.781186965667050925e-01 9.908971748181496508e-01 4.124155872095397468e-01 9.814941401724619485e-02 2.684237785531295994e-02 1.774652505962848181e-01 1.707589529595294753e-01 4.640932098465534450e-01 2.882179883914587348e-01 7.276822905806898945e-01 6.145789546745269449e-01 1.100959863917608805e-01 6.798859723042820491e-01 9.096984032948918220e-01 3.971368455178179158e-01 2.959494950971321980e-01 3.742088799298171065e-02 1.960739526210202310e-01 7.536102695342027369e-01 6.680915510628401277e-01 4.136507204312135366e-01 3.613996339406737590e-01 3.605422038261204554e-01 7.098503555159476619e-01 8.093719147087541366e-01 6.344097009128880638e-01 3.990082448083617228e-01 2.805918009906902544e-01 7.078488167363675698e-01 9.969917259866583059e-01 9.442054998992396309e-01 1.329075240769165278e-01 6.810681350588387861e-02 8.503491437913293094e-01 8.347117439165431252e-01 2.381858201903953587e-01 7.884260706938626129e-01 7.109907917419661105e-01 6.390916681983604963e-02 6.174365227062991179e-01 5.085733343630816083e-01 1.716846139694149231e-01 9.065664924270055991e-02 5.625330757196970177e-01 3.539663480209681579e-01 8.937139525947165319e-01 3.981380511900556307e-02 7.403597927449541150e-01 3.803872284089604427e-02 6.729519695709765825e-01 5.306080908840085097e-01 2.091237680402112664e-01 5.902903662907804661e-01 2.094778612095482551e-01 7.323447855684165342e-01 3.644574495843493356e-01 2.006215478057034041e-01 3.737617545555030896e-01 5.253471759602216240e-01 4.287889547869583318e-01 7.086098806190446187e-01 4.510792335515292351e-01 6.383187180169215269e-01 8.779355722397681472e-01 4.221338898667141848e-01 6.375840144651815367e-01 8.683057298299173832e-01 6.093730356952498095e-01 9.297141161056151626e-01 7.770838342807246946e-01 6.549661287008456956e-02 2.835060738158660110e-01 4.474138867374952699e-01 8.530336387421445510e-01 3.160209657891883683e-01 8.301538680518486535e-01 6.646903097549101691e-01 7.187130118106234145e-01 1.651862041735395747e-01 9.578252676762609719e-01 6.490273812885494209e-02 9.777273484666341163e-01 8.930729829254262508e-01 9.851054752118463265e-01 4.094323402286751401e-01 1.139176240124337713e-01 7.612865863899589414e-01 2.266379302491570158e-01 6.998882496157835531e-01 9.945043379099228753e-01 7.111578056749194854e-01 7.806190603886183910e-01 3.410170920712443099e-01 9.446084168886822452e-01 +5.015172758330755931e-01 5.569527971282052237e-01 1.122406928736449094e-01 8.960352822124777461e-01 6.049568585854003810e-02 1.202196001338627918e-01 1.870314295763603196e-01 9.017590029396971296e-01 3.597904628087450485e-01 2.130941062746317671e-01 2.556281834629479111e-01 5.123669364829196438e-01 4.754061129282013409e-01 9.764470380372083369e-01 8.038663983900646848e-01 6.960491266420890666e-01 2.940135977911654264e-01 2.857282759910040326e-03 4.599343225832352999e-02 5.597554495210212977e-01 7.445266674304001908e-01 3.387528030535971180e-01 6.429542922125383031e-01 2.123331785532429627e-01 5.302332654117811739e-01 7.262555377662539557e-01 3.982425859900724507e-01 3.243388301740235402e-01 6.191064123738921898e-01 8.988047781373914580e-01 7.819700328765150088e-01 7.664269102804815992e-01 6.734095355422575757e-03 2.904762329148526945e-01 5.097537644843168625e-01 9.524734606001823423e-01 4.812869576591960463e-01 6.236868013640477493e-01 1.459170943214320726e-01 9.874505139403206844e-01 7.561708982837871407e-01 3.798591332432484924e-01 6.056633451375117438e-01 7.935708170258731764e-01 1.458141583518740569e-01 7.082511296391911237e-01 1.098798009731616343e-02 3.655618484905173160e-01 9.551862303858617009e-01 8.148959351152762487e-02 4.739306219219985294e-02 7.963357515359494876e-01 6.208332695202813944e-01 3.884182264923189409e-01 4.589167647950288531e-01 6.496652974138312775e-01 2.467528128074852889e-01 5.309593064844935206e-01 5.364606369543487574e-01 2.421352989851309756e-01 3.776834556696828660e-02 1.564861233558080267e-01 5.197231021782636740e-01 8.725375120634637494e-01 2.441225493455024820e-01 2.320363366041028330e-01 5.026358683423555185e-01 7.035766000474735771e-01 8.347805591467084563e-01 2.303229841813967393e-01 6.908373419683054850e-01 2.646662377366995056e-01 1.259467197942290007e-01 9.372770922994989595e-01 6.674216272867254940e-01 1.027944489143072238e-01 5.686267290346079806e-01 3.948222804451942958e-01 4.689706944496729868e-01 4.446117700449114807e-02 6.817992275557515081e-01 9.084821829413957106e-01 9.184021015315092518e-01 3.045815734169987632e-01 2.204958624923980537e-03 7.542672057172502553e-01 9.460844786545006269e-01 3.373139094575949848e-02 9.059565314915285494e-01 9.938525461318854504e-01 2.542072661725306437e-01 9.685734112479216229e-02 8.223629541824816203e-01 1.057429056898460118e-01 8.080679390260248063e-01 5.823014244609205914e-01 6.413551528031806725e-01 1.787341975438894170e-01 1.250471413912357388e-01 8.390281297596062782e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt b/voice_bridge/scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt new file mode 100644 index 0000000000000000000000000000000000000000..86de3c7592893bdb4438c9a0e7e60b2e7e5e1727 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-euclidean-ml-iris.txt @@ -0,0 +1 @@ + 5.3851648e-01 5.0990195e-01 6.4807407e-01 1.4142136e-01 6.1644140e-01 5.1961524e-01 1.7320508e-01 9.2195445e-01 4.6904158e-01 3.7416574e-01 3.7416574e-01 5.9160798e-01 9.9498744e-01 8.8317609e-01 1.1045361e+00 5.4772256e-01 1.0000000e-01 7.4161985e-01 3.3166248e-01 4.3588989e-01 3.0000000e-01 6.4807407e-01 4.6904158e-01 5.9160798e-01 5.4772256e-01 3.1622777e-01 1.4142136e-01 1.4142136e-01 5.3851648e-01 5.3851648e-01 3.8729833e-01 6.2449980e-01 8.0622577e-01 4.6904158e-01 3.7416574e-01 4.1231056e-01 4.6904158e-01 8.6602540e-01 1.4142136e-01 1.7320508e-01 1.3490738e+00 7.6811457e-01 4.5825757e-01 6.1644140e-01 5.9160798e-01 3.6055513e-01 5.8309519e-01 3.0000000e-01 2.2360680e-01 4.0037482e+00 3.6166283e+00 4.1641326e+00 3.0935417e+00 3.7920970e+00 3.4161382e+00 3.7854986e+00 2.3452079e+00 3.7496667e+00 2.8879058e+00 2.7037012e+00 3.2280025e+00 3.1464265e+00 3.7000000e+00 2.5806976e+00 3.6276714e+00 3.4351128e+00 3.0099834e+00 3.7682887e+00 2.8827071e+00 3.8535698e+00 3.0757113e+00 4.0472213e+00 3.6578682e+00 3.4161382e+00 3.5972211e+00 4.0472213e+00 4.2449971e+00 3.5312887e+00 2.4939928e+00 2.8178006e+00 2.7018512e+00 2.8948230e+00 4.1352146e+00 3.4117444e+00 3.5199432e+00 3.9115214e+00 3.6180105e+00 3.0000000e+00 3.0215890e+00 3.3120990e+00 3.5958309e+00 3.0099834e+00 2.3874673e+00 3.1527766e+00 3.0740852e+00 3.1256999e+00 3.3451457e+00 2.0904545e+00 3.0577770e+00 5.2848841e+00 4.2083251e+00 5.3018865e+00 4.6904158e+00 5.0566788e+00 6.0950800e+00 3.5916570e+00 5.6364883e+00 5.0477718e+00 5.6391489e+00 4.3566042e+00 4.5199558e+00 4.8538644e+00 4.1904654e+00 4.4170126e+00 4.6260134e+00 4.6454279e+00 6.2401923e+00 6.4984614e+00 4.1412558e+00 5.1215232e+00 4.0286474e+00 6.2112801e+00 4.1097445e+00 4.9699095e+00 5.3122500e+00 3.9774364e+00 4.0074930e+00 4.8404545e+00 5.0970580e+00 5.5461698e+00 6.0141500e+00 4.8805737e+00 4.1605288e+00 4.5705580e+00 5.7887823e+00 4.8918299e+00 4.6065171e+00 3.8961519e+00 4.7968740e+00 5.0199602e+00 4.6368092e+00 4.2083251e+00 5.2573758e+00 5.1361464e+00 4.6540305e+00 4.2766810e+00 4.4598206e+00 4.6508064e+00 4.1400483e+00 3.0000000e-01 3.3166248e-01 6.0827625e-01 1.0908712e+00 5.0990195e-01 4.2426407e-01 5.0990195e-01 1.7320508e-01 8.6602540e-01 4.5825757e-01 1.4142136e-01 6.7823300e-01 1.3601471e+00 1.6278821e+00 1.0535654e+00 5.4772256e-01 1.1747340e+00 8.3666003e-01 7.0710678e-01 7.6157731e-01 7.8102497e-01 5.5677644e-01 6.4807407e-01 2.2360680e-01 5.0000000e-01 5.9160798e-01 5.0000000e-01 3.4641016e-01 2.4494897e-01 6.7823300e-01 1.1489125e+00 1.3416408e+00 1.7320508e-01 3.0000000e-01 7.8740079e-01 1.7320508e-01 5.0990195e-01 4.5825757e-01 5.2915026e-01 8.1853528e-01 5.4772256e-01 6.7823300e-01 9.8488578e-01 1.4142136e-01 8.4852814e-01 3.6055513e-01 8.1240384e-01 3.1622777e-01 4.0963398e+00 3.6864617e+00 4.2367440e+00 2.9698485e+00 3.8118237e+00 3.3911650e+00 3.8600518e+00 2.1470911e+00 3.7881394e+00 2.8053520e+00 2.4617067e+00 3.2449961e+00 3.0413813e+00 3.7121422e+00 2.5592968e+00 3.7000000e+00 3.4336569e+00 2.9715316e+00 3.6918830e+00 2.7928480e+00 3.8935845e+00 3.0740852e+00 4.0187063e+00 3.6565011e+00 3.4467376e+00 3.6510273e+00 4.0804412e+00 4.2953463e+00 3.5383612e+00 2.4186773e+00 2.7000000e+00 2.5787594e+00 2.8548205e+00 4.1170378e+00 3.3985291e+00 3.5972211e+00 3.9786933e+00 3.5580894e+00 2.9983329e+00 2.9291637e+00 3.2434549e+00 3.6221541e+00 2.9546573e+00 2.1794495e+00 3.1032241e+00 3.0789609e+00 3.1144823e+00 3.3645208e+00 1.9131126e+00 3.0298515e+00 5.3385391e+00 4.1809090e+00 5.3572381e+00 4.7085029e+00 5.0911688e+00 6.1595454e+00 3.4799425e+00 5.6868269e+00 5.0408333e+00 5.7471732e+00 4.4192760e+00 4.5210618e+00 4.9020404e+00 4.1340053e+00 4.4022721e+00 4.6808119e+00 4.6829478e+00 6.3694584e+00 6.5314623e+00 4.0620192e+00 5.1903757e+00 4.0024992e+00 6.2617889e+00 4.1060930e+00 5.0428167e+00 5.3898052e+00 3.9812058e+00 4.0311289e+00 4.8518038e+00 5.1584882e+00 5.5919585e+00 6.1546730e+00 4.8918299e+00 4.1689327e+00 4.5475268e+00 5.8600341e+00 4.9598387e+00 4.6508064e+00 3.9153544e+00 4.8600412e+00 5.0724747e+00 4.7021272e+00 4.1809090e+00 5.3207142e+00 5.2067264e+00 4.7000000e+00 4.2497059e+00 4.4988888e+00 4.7180504e+00 4.1533119e+00 2.4494897e-01 5.0990195e-01 1.0862780e+00 2.6457513e-01 4.1231056e-01 4.3588989e-01 3.1622777e-01 8.8317609e-01 3.7416574e-01 2.6457513e-01 5.0000000e-01 1.3638182e+00 1.5874508e+00 1.0099505e+00 5.1961524e-01 1.2369317e+00 7.5498344e-01 8.3066239e-01 7.0000000e-01 5.0990195e-01 6.4807407e-01 6.4031242e-01 4.6904158e-01 5.0990195e-01 6.1644140e-01 5.4772256e-01 3.0000000e-01 3.3166248e-01 7.8102497e-01 1.0535654e+00 1.2845233e+00 3.1622777e-01 3.1622777e-01 8.5440037e-01 3.1622777e-01 3.6055513e-01 4.8989795e-01 4.3588989e-01 9.2736185e-01 3.0000000e-01 6.5574385e-01 9.5916630e-01 2.6457513e-01 7.8102497e-01 1.4142136e-01 8.0622577e-01 3.3166248e-01 4.2766810e+00 3.8496753e+00 4.4158804e+00 3.1543621e+00 3.9974992e+00 3.5510562e+00 4.0112342e+00 2.3065125e+00 3.9749214e+00 2.9495762e+00 2.6476405e+00 3.4029399e+00 3.2588341e+00 3.8794329e+00 2.7202941e+00 3.8807216e+00 3.5749126e+00 3.1527766e+00 3.8961519e+00 2.9782545e+00 4.0311289e+00 3.2588341e+00 4.2071368e+00 3.8314488e+00 3.6318040e+00 3.8340579e+00 4.2731721e+00 4.4698993e+00 3.7027017e+00 2.6153394e+00 2.8879058e+00 2.7712813e+00 3.0364453e+00 4.2825226e+00 3.5298725e+00 3.7322915e+00 4.1545156e+00 3.7669616e+00 3.1464265e+00 3.1032241e+00 3.4073450e+00 3.7854986e+00 3.1400637e+00 2.3537205e+00 3.2680269e+00 3.2326460e+00 3.2726136e+00 3.5425979e+00 2.0856654e+00 3.1953091e+00 5.4726593e+00 4.3347434e+00 5.5290144e+00 4.8682646e+00 5.2469038e+00 6.3364028e+00 3.6083237e+00 5.8660038e+00 5.2249402e+00 5.8940648e+00 4.5738387e+00 4.6936127e+00 5.0695167e+00 4.2918527e+00 4.5442271e+00 4.8270074e+00 4.8456166e+00 6.5207362e+00 6.7178866e+00 4.2508823e+00 5.3488316e+00 4.1436699e+00 6.4467046e+00 4.2813549e+00 5.1942276e+00 5.5587768e+00 4.1496988e+00 4.1856899e+00 5.0149776e+00 5.3385391e+00 5.7775427e+00 6.3126856e+00 5.0537115e+00 4.3416587e+00 4.7169906e+00 6.0406953e+00 5.0921508e+00 4.8062459e+00 4.0669399e+00 5.0269275e+00 5.2287666e+00 4.8682646e+00 4.3347434e+00 5.4753995e+00 5.3535035e+00 4.8641546e+00 4.4305756e+00 4.6615448e+00 4.8487112e+00 4.2988371e+00 6.4807407e-01 1.1661904e+00 3.3166248e-01 5.0000000e-01 3.0000000e-01 3.1622777e-01 1.0000000e+00 3.7416574e-01 2.6457513e-01 5.1961524e-01 1.5297059e+00 1.7146428e+00 1.1661904e+00 6.5574385e-01 1.3228757e+00 8.6602540e-01 8.7749644e-01 8.0622577e-01 7.0710678e-01 6.4807407e-01 5.3851648e-01 4.2426407e-01 5.4772256e-01 7.2111026e-01 6.7823300e-01 1.7320508e-01 2.2360680e-01 8.7749644e-01 1.1704700e+00 1.4247807e+00 3.1622777e-01 5.0990195e-01 1.0049876e+00 3.1622777e-01 3.0000000e-01 5.8309519e-01 6.0827625e-01 8.3666003e-01 3.0000000e-01 7.0000000e-01 9.6953597e-01 2.6457513e-01 8.6602540e-01 1.4142136e-01 9.2195445e-01 4.5825757e-01 4.1773197e+00 3.7336309e+00 4.3058100e+00 2.9849623e+00 3.8729833e+00 3.3926391e+00 3.8897301e+00 2.1118712e+00 3.8548671e+00 2.7784888e+00 2.4515301e+00 3.2680269e+00 3.1080541e+00 3.7376463e+00 2.5806976e+00 3.7762415e+00 3.4205263e+00 3.0000000e+00 3.7496667e+00 2.8160256e+00 3.8923001e+00 3.1304952e+00 4.0620192e+00 3.6851052e+00 3.5114100e+00 3.7229021e+00 4.1545156e+00 4.3497126e+00 3.5623026e+00 2.4698178e+00 2.7202941e+00 2.6038433e+00 2.8913665e+00 4.1279535e+00 3.3674916e+00 3.6069378e+00 4.0422766e+00 3.6262929e+00 2.9966648e+00 2.9376862e+00 3.2357379e+00 3.6482873e+00 2.9899833e+00 2.1633308e+00 3.1080541e+00 3.0838288e+00 3.1224990e+00 3.4132096e+00 1.9157244e+00 3.0446675e+00 5.3357286e+00 4.1773197e+00 5.4064776e+00 4.7222876e+00 5.1097945e+00 6.2153037e+00 3.4205263e+00 5.7384667e+00 5.0813384e+00 5.7844619e+00 4.4519659e+00 4.5530210e+00 4.9457052e+00 4.1303753e+00 4.3965896e+00 4.7010637e+00 4.7095647e+00 6.4140471e+00 6.5901442e+00 4.0877867e+00 5.2297227e+00 3.9862263e+00 6.3229740e+00 4.1436699e+00 5.0695167e+00 5.4387499e+00 4.0124805e+00 4.0472213e+00 4.8733972e+00 5.2172790e+00 5.6550862e+00 6.2153037e+00 4.9132474e+00 4.1988094e+00 4.5552168e+00 5.9321160e+00 4.9628621e+00 4.6690470e+00 3.9268308e+00 4.9101935e+00 5.1048996e+00 4.7602521e+00 4.1773197e+00 5.3497664e+00 5.2325902e+00 4.7455242e+00 4.2883563e+00 4.5332108e+00 4.7191101e+00 4.1496988e+00 6.1644140e-01 4.5825757e-01 2.2360680e-01 9.2195445e-01 5.2915026e-01 4.2426407e-01 3.4641016e-01 6.4031242e-01 9.7467943e-01 9.1651514e-01 1.0862780e+00 5.4772256e-01 1.7320508e-01 7.9372539e-01 2.6457513e-01 5.3851648e-01 2.6457513e-01 5.6568542e-01 5.2915026e-01 5.7445626e-01 6.3245553e-01 3.4641016e-01 2.4494897e-01 2.8284271e-01 5.3851648e-01 5.7445626e-01 5.0000000e-01 5.5677644e-01 7.8102497e-01 5.2915026e-01 4.4721360e-01 5.1961524e-01 5.2915026e-01 8.5440037e-01 2.4494897e-01 1.7320508e-01 1.4000000e+00 7.2801099e-01 4.5825757e-01 5.8309519e-01 6.4031242e-01 3.0000000e-01 5.6568542e-01 3.3166248e-01 3.0000000e-01 4.0607881e+00 3.6633318e+00 4.2190046e+00 3.1480152e+00 3.8496753e+00 3.4568772e+00 3.8249183e+00 2.3874673e+00 3.8078866e+00 2.9223278e+00 2.7586228e+00 3.2710854e+00 3.2186954e+00 3.7456642e+00 2.6267851e+00 3.6851052e+00 3.4669872e+00 3.0626786e+00 3.8340579e+00 2.9376862e+00 3.8845849e+00 3.1336879e+00 4.1036569e+00 3.7067506e+00 3.4741906e+00 3.6551334e+00 4.1085277e+00 4.2965102e+00 3.5763109e+00 2.5573424e+00 2.8740216e+00 2.7604347e+00 2.9495762e+00 4.1785165e+00 3.4380227e+00 3.5510562e+00 3.9648455e+00 3.6864617e+00 3.0364453e+00 3.0708305e+00 3.3541020e+00 3.6400549e+00 3.0659419e+00 2.4372115e+00 3.1968735e+00 3.1128765e+00 3.1670175e+00 3.3985291e+00 2.1424285e+00 3.1032241e+00 5.3131911e+00 4.2461747e+00 5.3507009e+00 4.7307505e+00 5.0960769e+00 6.1457302e+00 3.6166283e+00 5.6877060e+00 5.1009803e+00 5.6762664e+00 4.3977267e+00 4.5683695e+00 4.9010203e+00 4.2308392e+00 4.4508426e+00 4.6626173e+00 4.6882833e+00 6.2785349e+00 6.5536250e+00 4.1964271e+00 5.1643005e+00 4.0607881e+00 6.2657801e+00 4.1605288e+00 5.0079936e+00 5.3591044e+00 4.0249224e+00 4.0472213e+00 4.8836462e+00 5.1497573e+00 5.6017854e+00 6.0572271e+00 4.9234135e+00 4.2083251e+00 4.6141088e+00 5.8438001e+00 4.9203658e+00 4.6454279e+00 3.9344631e+00 4.8445846e+00 5.0616203e+00 4.6861498e+00 4.2461747e+00 5.2971691e+00 5.1730069e+00 4.7010637e+00 4.3301270e+00 4.5044423e+00 4.6786750e+00 4.1737274e+00 9.9498744e-01 7.0000000e-01 1.4594520e+00 1.0099505e+00 3.4641016e-01 8.1240384e-01 1.1618950e+00 1.5716234e+00 6.7823300e-01 6.1644140e-01 4.0000000e-01 5.9160798e-01 3.3166248e-01 3.8729833e-01 5.3851648e-01 4.1231056e-01 1.1224972e+00 6.7823300e-01 8.3066239e-01 1.0099505e+00 6.4807407e-01 5.2915026e-01 6.4807407e-01 1.0148892e+00 1.0246951e+00 5.3851648e-01 4.5825757e-01 4.7958315e-01 1.0099505e+00 9.6953597e-01 6.0827625e-01 1.0099505e+00 1.4177447e+00 6.4807407e-01 7.0000000e-01 1.8814888e+00 1.3000000e+00 6.0827625e-01 3.7416574e-01 1.1269428e+00 3.8729833e-01 1.1224972e+00 3.6055513e-01 8.0622577e-01 3.6124784e+00 3.2465366e+00 3.7868192e+00 2.9444864e+00 3.4698703e+00 3.1543621e+00 3.4073450e+00 2.3280893e+00 3.4146742e+00 2.7055499e+00 2.7147744e+00 2.9189039e+00 2.9832868e+00 3.3896903e+00 2.3366643e+00 3.2588341e+00 3.1464265e+00 2.7784888e+00 3.5468296e+00 2.7073973e+00 3.5085610e+00 2.7928480e+00 3.7709415e+00 3.3674916e+00 3.0935417e+00 3.2465366e+00 3.7121422e+00 3.8832976e+00 3.2264532e+00 2.3194827e+00 2.6758176e+00 2.5729361e+00 2.6608269e+00 3.8470768e+00 3.1400637e+00 3.1448370e+00 3.5411862e+00 3.3867388e+00 2.7239677e+00 2.8407745e+00 3.1032241e+00 3.2726136e+00 2.7892651e+00 2.3748684e+00 2.9223278e+00 2.7910571e+00 2.8548205e+00 3.0347982e+00 2.0566964e+00 2.8053520e+00 4.9061186e+00 3.9255573e+00 4.9223978e+00 4.3566042e+00 4.6978719e+00 5.7052607e+00 3.4263683e+00 5.2659282e+00 4.7349762e+00 5.2057660e+00 3.9774364e+00 4.2011903e+00 4.4833024e+00 3.9370039e+00 4.1146081e+00 4.2497059e+00 4.2918527e+00 5.7913729e+00 6.1343296e+00 3.9179076e+00 4.7275787e+00 3.7483330e+00 5.8360946e+00 3.8013156e+00 4.5760245e+00 4.9173163e+00 3.6633318e+00 3.6742346e+00 4.5066617e+00 4.7222876e+00 5.1788030e+00 5.5596762e+00 4.5453273e+00 3.8457769e+00 4.2883563e+00 5.3916602e+00 4.5022217e+00 4.2473521e+00 3.5693137e+00 4.4124823e+00 4.6411206e+00 4.2497059e+00 3.9255573e+00 4.8682646e+00 4.7391982e+00 4.2848571e+00 3.9887341e+00 4.1024383e+00 4.2649736e+00 3.8183766e+00 4.2426407e-01 5.4772256e-01 4.7958315e-01 8.6602540e-01 3.0000000e-01 4.8989795e-01 6.1644140e-01 1.3601471e+00 1.4933185e+00 9.5393920e-01 5.0990195e-01 1.2083046e+00 6.4807407e-01 8.6023253e-01 6.0000000e-01 4.5825757e-01 6.2449980e-01 5.4772256e-01 6.0827625e-01 4.5825757e-01 6.2449980e-01 6.0827625e-01 3.1622777e-01 4.2426407e-01 8.1240384e-01 9.4868330e-01 1.2083046e+00 4.7958315e-01 5.0000000e-01 9.1651514e-01 4.7958315e-01 4.6904158e-01 5.1961524e-01 4.2426407e-01 1.1090537e+00 3.1622777e-01 5.4772256e-01 8.1853528e-01 4.4721360e-01 6.7823300e-01 2.2360680e-01 7.7459667e-01 4.2426407e-01 4.2308392e+00 3.7854986e+00 4.3669211e+00 3.1272992e+00 3.9560081e+00 3.4899857e+00 3.9344631e+00 2.2781571e+00 3.9357337e+00 2.8827071e+00 2.6495283e+00 3.3361655e+00 3.2634338e+00 3.8209946e+00 2.6627054e+00 3.8353618e+00 3.4942810e+00 3.1160873e+00 3.8794329e+00 2.9495762e+00 3.9420807e+00 3.2202484e+00 4.1701319e+00 3.7828561e+00 3.5916570e+00 3.7907783e+00 4.2391037e+00 4.4147480e+00 3.6414283e+00 2.5980762e+00 2.8653098e+00 2.7549955e+00 2.9983329e+00 4.2225585e+00 3.4423829e+00 3.6414283e+00 4.1024383e+00 3.7549967e+00 3.0740852e+00 3.0626786e+00 3.3555923e+00 3.7229021e+00 3.1064449e+00 2.3388031e+00 3.2140317e+00 3.1654384e+00 3.2093613e+00 3.4957117e+00 2.0639767e+00 3.1400637e+00 5.3758720e+00 4.2638011e+00 5.4680892e+00 4.7989582e+00 5.1710734e+00 6.2801274e+00 3.5312887e+00 5.8137767e+00 5.1797683e+00 5.8077534e+00 4.4977772e+00 4.6368092e+00 5.0049975e+00 4.2272923e+00 4.4609416e+00 4.7423623e+00 4.7780749e+00 6.4397205e+00 6.6708320e+00 4.2190046e+00 5.2744668e+00 4.0620192e+00 6.3992187e+00 4.2284749e+00 5.1137071e+00 5.4963624e+00 4.0902323e+00 4.1121770e+00 4.9477268e+00 5.2886671e+00 5.7314920e+00 6.2401923e+00 4.9849774e+00 4.2871902e+00 4.6626173e+00 5.9883220e+00 4.9939964e+00 4.7318073e+00 3.9912404e+00 4.9618545e+00 5.1526692e+00 4.8031240e+00 4.2638011e+00 5.3972215e+00 5.2678269e+00 4.7968740e+00 4.3840620e+00 4.5934736e+00 4.7497368e+00 4.2178193e+00 7.8740079e-01 3.3166248e-01 5.0000000e-01 2.2360680e-01 4.6904158e-01 9.0553851e-01 1.0440307e+00 1.2369317e+00 7.0000000e-01 2.0000000e-01 8.3666003e-01 4.2426407e-01 4.4721360e-01 3.7416574e-01 6.7082039e-01 3.8729833e-01 4.4721360e-01 4.1231056e-01 2.2360680e-01 2.2360680e-01 2.2360680e-01 3.7416574e-01 3.7416574e-01 4.4721360e-01 7.3484692e-01 9.4868330e-01 3.3166248e-01 3.6055513e-01 5.4772256e-01 3.3166248e-01 7.4833148e-01 1.0000000e-01 2.4494897e-01 1.2288206e+00 6.6332496e-01 4.2426407e-01 6.0827625e-01 4.6904158e-01 4.2426407e-01 4.5825757e-01 4.2426407e-01 1.4142136e-01 3.9648455e+00 3.5623026e+00 4.1170378e+00 2.9866369e+00 3.7296112e+00 3.3256578e+00 3.7282704e+00 2.2113344e+00 3.6918830e+00 2.7802878e+00 2.5690465e+00 3.1543621e+00 3.0545049e+00 3.6249138e+00 2.4959968e+00 3.5818989e+00 3.3481338e+00 2.9206164e+00 3.6837481e+00 2.7820855e+00 3.7815341e+00 3.0049958e+00 3.9686270e+00 3.5791060e+00 3.3555923e+00 3.5454196e+00 3.9912404e+00 4.1892720e+00 3.4554305e+00 2.4020824e+00 2.7110883e+00 2.5942244e+00 2.8089144e+00 4.0509258e+00 3.3181320e+00 3.4583233e+00 3.8613469e+00 3.5383612e+00 2.9137605e+00 2.9189039e+00 3.2093613e+00 3.5242020e+00 2.9206164e+00 2.2561028e+00 3.0577770e+00 2.9899833e+00 3.0397368e+00 3.2771939e+00 1.9697716e+00 2.9698485e+00 5.2191953e+00 4.1206796e+00 5.2478567e+00 4.6162756e+00 4.9899900e+00 6.0448325e+00 3.4741906e+00 5.5803226e+00 4.9749372e+00 5.5973208e+00 4.3000000e+00 4.4474712e+00 4.7968740e+00 4.0975602e+00 4.3358967e+00 4.5661800e+00 4.5793013e+00 6.2040309e+00 6.4420494e+00 4.0472213e+00 5.0695167e+00 3.9395431e+00 6.1587336e+00 4.0373258e+00 4.9142650e+00 5.2621288e+00 3.9051248e+00 3.9357337e+00 4.7686476e+00 5.0447993e+00 5.4927225e+00 5.9849812e+00 4.8093659e+00 4.0865633e+00 4.4833024e+00 5.7463032e+00 4.8311489e+00 4.5398238e+00 3.8223030e+00 4.7455242e+00 4.9628621e+00 4.5902070e+00 4.1206796e+00 5.2009614e+00 5.0823223e+00 4.5989129e+00 4.2000000e+00 4.3977267e+00 4.5891176e+00 4.0607881e+00 5.5677644e-01 1.2845233e+00 6.7082039e-01 4.2426407e-01 3.4641016e-01 1.7916473e+00 1.9974984e+00 1.4317821e+00 9.2736185e-01 1.6124515e+00 1.1489125e+00 1.1575837e+00 1.0862780e+00 8.3066239e-01 9.1104336e-01 8.1240384e-01 6.4031242e-01 8.3066239e-01 1.0049876e+00 9.4339811e-01 4.6904158e-01 4.8989795e-01 1.1401754e+00 1.4491377e+00 1.7029386e+00 5.5677644e-01 7.0000000e-01 1.2569805e+00 5.5677644e-01 1.4142136e-01 8.6602540e-01 8.6023253e-01 6.2449980e-01 3.1622777e-01 9.5916630e-01 1.2609520e+00 4.2426407e-01 1.1575837e+00 3.6055513e-01 1.2083046e+00 7.2111026e-01 4.3794977e+00 3.9230090e+00 4.4977772e+00 3.0886890e+00 4.0435133e+00 3.5383612e+00 4.0767634e+00 2.1794495e+00 4.0360872e+00 2.8930952e+00 2.4939928e+00 3.4336569e+00 3.2326460e+00 3.9012818e+00 2.7367864e+00 3.9711459e+00 3.5707142e+00 3.1511903e+00 3.8768544e+00 2.9427878e+00 4.0570926e+00 3.2969683e+00 4.2083251e+00 3.8457769e+00 3.6905284e+00 3.9102430e+00 4.3324358e+00 4.5287967e+00 3.7229021e+00 2.6134269e+00 2.8337255e+00 2.7184554e+00 3.0413813e+00 4.2720019e+00 3.5085610e+00 3.7920970e+00 4.2320208e+00 3.7656341e+00 3.1543621e+00 3.0561414e+00 3.3615473e+00 3.8183766e+00 3.1320920e+00 2.2293497e+00 3.2449961e+00 3.2465366e+00 3.2771939e+00 3.5860842e+00 2.0049938e+00 3.1937439e+00 5.4972721e+00 4.3104524e+00 5.5821143e+00 4.8795492e+00 5.2706736e+00 6.3953108e+00 3.5028560e+00 5.9143892e+00 5.2316345e+00 5.9757845e+00 4.6292548e+00 4.7053161e+00 5.1176166e+00 4.2485292e+00 4.5276926e+00 4.8692915e+00 4.8774994e+00 6.6174013e+00 6.7557383e+00 4.2071368e+00 5.4074023e+00 4.1158231e+00 6.4984614e+00 4.2965102e+00 5.2488094e+00 5.6258333e+00 4.1677332e+00 4.2083251e+00 5.0259327e+00 5.4009258e+00 5.8300943e+00 6.4265076e+00 5.0645829e+00 4.3588989e+00 4.6968074e+00 6.1155539e+00 5.1322510e+00 4.8383882e+00 4.0853396e+00 5.0892043e+00 5.2735187e+00 4.9386233e+00 4.3104524e+00 5.5235858e+00 5.4064776e+00 4.9142650e+00 4.4294469e+00 4.7010637e+00 4.8887626e+00 4.3023250e+00 7.8740079e-01 3.4641016e-01 1.7320508e-01 7.2801099e-01 1.3114877e+00 1.5556349e+00 1.0099505e+00 5.0000000e-01 1.1000000e+00 7.5498344e-01 6.2449980e-01 7.0000000e-01 7.7459667e-01 5.2915026e-01 5.1961524e-01 2.0000000e-01 4.4721360e-01 5.0990195e-01 4.4721360e-01 2.6457513e-01 1.7320508e-01 6.5574385e-01 1.0440307e+00 1.2609520e+00 0.0000000e+00 3.4641016e-01 7.5498344e-01 0.0000000e+00 5.5677644e-01 3.7416574e-01 5.0000000e-01 9.3808315e-01 5.5677644e-01 6.5574385e-01 8.8317609e-01 2.6457513e-01 7.4161985e-01 3.4641016e-01 7.2801099e-01 2.6457513e-01 4.0435133e+00 3.6359318e+00 4.1856899e+00 2.9478806e+00 3.7709415e+00 3.3421550e+00 3.8065733e+00 2.1307276e+00 3.7389838e+00 2.7748874e+00 2.4556058e+00 3.2031235e+00 3.0133038e+00 3.6619667e+00 2.5258662e+00 3.6523965e+00 3.3852622e+00 2.9223278e+00 3.6687873e+00 2.7586228e+00 3.8457769e+00 3.0364453e+00 3.9799497e+00 3.6027767e+00 3.4014703e+00 3.6055513e+00 4.0348482e+00 4.2497059e+00 3.4942810e+00 2.3874673e+00 2.6720778e+00 2.5495098e+00 2.8178006e+00 4.0718546e+00 3.3496268e+00 3.5425979e+00 3.9293765e+00 3.5284558e+00 2.9495762e+00 2.9000000e+00 3.1984371e+00 3.5707142e+00 2.9189039e+00 2.1679483e+00 3.0626786e+00 3.0248967e+00 3.0675723e+00 3.3181320e+00 1.9104973e+00 2.9883106e+00 5.2924474e+00 4.1436699e+00 5.3113087e+00 4.6583259e+00 5.0467812e+00 6.1081912e+00 3.4525353e+00 5.6329388e+00 4.9979996e+00 5.6973678e+00 4.3749286e+00 4.4821870e+00 4.8600412e+00 4.1060930e+00 4.3760713e+00 4.6411206e+00 4.6324939e+00 6.3071388e+00 6.4876806e+00 4.0286474e+00 5.1468437e+00 3.9686270e+00 6.2112801e+00 4.0706265e+00 4.9919936e+00 5.3329167e+00 3.9446166e+00 3.9874804e+00 4.8114447e+00 5.1029403e+00 5.5443665e+00 6.0917978e+00 4.8538644e+00 4.1194660e+00 4.4933284e+00 5.8180753e+00 4.9142650e+00 4.5978256e+00 3.8729833e+00 4.8176758e+00 5.0338852e+00 4.6690470e+00 4.1436699e+00 5.2744668e+00 5.1652686e+00 4.6669048e+00 4.2201896e+00 4.4575778e+00 4.6722586e+00 4.1060930e+00 6.7823300e-01 9.3273791e-01 1.3674794e+00 5.8309519e-01 7.8740079e-01 3.4641016e-01 3.8729833e-01 3.8729833e-01 3.3166248e-01 3.6055513e-01 3.6055513e-01 9.4868330e-01 6.1644140e-01 7.8102497e-01 8.1240384e-01 5.4772256e-01 2.8284271e-01 3.7416574e-01 8.6602540e-01 8.5440037e-01 3.6055513e-01 4.5825757e-01 5.1961524e-01 7.8740079e-01 7.0710678e-01 3.0000000e-01 7.8740079e-01 1.2369317e+00 4.2426407e-01 5.0000000e-01 1.6792856e+00 1.1357817e+00 6.0827625e-01 5.4772256e-01 9.3273791e-01 3.3166248e-01 9.4868330e-01 1.0000000e-01 5.7445626e-01 3.8065733e+00 3.4554305e+00 3.9824616e+00 3.0708305e+00 3.6496575e+00 3.3331667e+00 3.6290495e+00 2.4124676e+00 3.5916570e+00 2.8705400e+00 2.7730849e+00 3.1176915e+00 3.0822070e+00 3.5791060e+00 2.5099801e+00 3.4496377e+00 3.3496268e+00 2.9257478e+00 3.6851052e+00 2.8372522e+00 3.7349699e+00 2.9597297e+00 3.9370039e+00 3.5411862e+00 3.2695565e+00 3.4322005e+00 3.8858718e+00 4.0841156e+00 3.4190642e+00 2.4372115e+00 2.7928480e+00 2.6795522e+00 2.8142495e+00 4.0348482e+00 3.3436507e+00 3.3778692e+00 3.7389838e+00 3.5199432e+00 2.9154759e+00 2.9849623e+00 3.2603681e+00 3.4684290e+00 2.9359837e+00 2.4494897e+00 3.0886890e+00 2.9782545e+00 3.0380915e+00 3.2140317e+00 2.1424285e+00 2.9782545e+00 5.1487863e+00 4.1243181e+00 5.1332251e+00 4.5628938e+00 4.9183331e+00 5.9118525e+00 3.5972211e+00 5.4635154e+00 4.9173163e+00 5.4497706e+00 4.2023803e+00 4.3965896e+00 4.6968074e+00 4.1255303e+00 4.3324358e+00 4.4833024e+00 4.5011110e+00 6.0282667e+00 6.3300869e+00 4.0681691e+00 4.9547957e+00 3.9560081e+00 6.0315835e+00 3.9912404e+00 4.8062459e+00 5.1283526e+00 3.8600518e+00 3.8858718e+00 4.7148701e+00 4.9173163e+00 5.3721504e+00 5.7887823e+00 4.7560488e+00 4.0336088e+00 4.4665423e+00 5.5991071e+00 4.7486840e+00 4.4631827e+00 3.7815341e+00 4.6292548e+00 4.8682646e+00 4.4698993e+00 4.1243181e+00 5.0970580e+00 4.9779514e+00 4.5033321e+00 4.1701319e+00 4.3162484e+00 4.5110974e+00 4.0323690e+00 4.5825757e-01 8.1853528e-01 1.2328828e+00 1.3638182e+00 8.6023253e-01 3.8729833e-01 9.9498744e-01 5.1961524e-01 6.0827625e-01 4.7958315e-01 6.6332496e-01 4.4721360e-01 3.0000000e-01 4.4721360e-01 2.8284271e-01 4.2426407e-01 4.4721360e-01 2.2360680e-01 3.0000000e-01 6.4031242e-01 8.1853528e-01 1.0816654e+00 3.4641016e-01 4.8989795e-01 7.6811457e-01 3.4641016e-01 6.4031242e-01 3.1622777e-01 3.8729833e-01 1.1832160e+00 5.3851648e-01 4.5825757e-01 6.1644140e-01 4.5825757e-01 5.0000000e-01 3.4641016e-01 5.9160798e-01 3.0000000e-01 3.9912404e+00 3.5637059e+00 4.1327957e+00 2.9444864e+00 3.7336309e+00 3.2848135e+00 3.7188708e+00 2.1307276e+00 3.7013511e+00 2.7166155e+00 2.5000000e+00 3.1336879e+00 3.0463092e+00 3.6041643e+00 2.4698178e+00 3.6027767e+00 3.3015148e+00 2.8948230e+00 3.6742346e+00 2.7477263e+00 3.7483330e+00 3.0033315e+00 3.9547440e+00 3.5580894e+00 3.3630343e+00 3.5608988e+00 4.0049969e+00 4.1928511e+00 3.4336569e+00 2.3874673e+00 2.6720778e+00 2.5573424e+00 2.7892651e+00 4.0174619e+00 3.2588341e+00 3.4365681e+00 3.8729833e+00 3.5369478e+00 2.8740216e+00 2.8757608e+00 3.1575307e+00 3.5057096e+00 2.8982753e+00 2.1863211e+00 3.0166206e+00 2.9546573e+00 3.0049958e+00 3.2726136e+00 1.9157244e+00 2.9376862e+00 5.1874849e+00 4.0779897e+00 5.2488094e+00 4.5891176e+00 4.9689033e+00 6.0506198e+00 3.3882149e+00 5.5812185e+00 4.9618545e+00 5.5982140e+00 4.2918527e+00 4.4305756e+00 4.7937459e+00 4.0521599e+00 4.2953463e+00 4.5497253e+00 4.5628938e+00 6.2112801e+00 6.4459289e+00 4.0162171e+00 5.0665570e+00 3.8897301e+00 6.1660360e+00 4.0236799e+00 4.9030603e+00 5.2649786e+00 3.8884444e+00 3.9115214e+00 4.7465777e+00 5.0517324e+00 5.5009090e+00 6.0041652e+00 4.7874837e+00 4.0681691e+00 4.4463468e+00 5.7645468e+00 4.8052055e+00 4.5188494e+00 3.7947332e+00 4.7486840e+00 4.9537864e+00 4.6000000e+00 4.0779897e+00 5.1903757e+00 5.0714889e+00 4.5978256e+00 4.1844952e+00 4.3874822e+00 4.5617979e+00 4.0224371e+00 5.8309519e-01 1.4317821e+00 1.6941074e+00 1.1269428e+00 6.1644140e-01 1.2569805e+00 8.8317609e-01 7.8740079e-01 8.2462113e-01 7.5498344e-01 6.5574385e-01 6.4807407e-01 3.0000000e-01 5.7445626e-01 6.5574385e-01 5.7445626e-01 3.1622777e-01 2.4494897e-01 7.8740079e-01 1.1747340e+00 1.3928388e+00 1.7320508e-01 3.6055513e-01 8.7177979e-01 1.7320508e-01 4.2426407e-01 5.1961524e-01 5.8309519e-01 7.9372539e-01 4.6904158e-01 7.6157731e-01 1.0344080e+00 2.0000000e-01 8.8317609e-01 3.0000000e-01 8.7177979e-01 3.7416574e-01 4.1785165e+00 3.7643060e+00 4.3162484e+00 3.0298515e+00 3.8897301e+00 3.4496377e+00 3.9344631e+00 2.1886069e+00 3.8639358e+00 2.8618176e+00 2.5019992e+00 3.3181320e+00 3.1064449e+00 3.7788887e+00 2.6324893e+00 3.7828561e+00 3.4942810e+00 3.0315013e+00 3.7643060e+00 2.8530685e+00 3.9623226e+00 3.1511903e+00 4.0877867e+00 3.7188708e+00 3.5242020e+00 3.7322915e+00 4.1581246e+00 4.3737855e+00 3.6083237e+00 2.4879711e+00 2.7586228e+00 2.6362853e+00 2.9240383e+00 4.1797129e+00 3.4539832e+00 3.6687873e+00 4.0583248e+00 3.6304270e+00 3.0610456e+00 2.9899833e+00 3.2954514e+00 3.6905284e+00 3.0215890e+00 2.2248595e+00 3.1638584e+00 3.1400637e+00 3.1780497e+00 3.4380227e+00 1.9748418e+00 3.0951575e+00 5.4092513e+00 4.2449971e+00 5.4350713e+00 4.7738873e+00 5.1633323e+00 6.2353829e+00 3.5256205e+00 5.7584720e+00 5.1097945e+00 5.8283788e+00 4.4977772e+00 4.5934736e+00 4.9809638e+00 4.1988094e+00 4.4743715e+00 4.7592016e+00 4.7528939e+00 6.4459289e+00 6.6075714e+00 4.1231056e+00 5.2706736e+00 4.0669399e+00 6.3364028e+00 4.1809090e+00 5.1176166e+00 5.4635154e+00 4.0558600e+00 4.1024383e+00 4.9234135e+00 5.2316345e+00 5.6683331e+00 6.2337790e+00 4.9648766e+00 4.2355637e+00 4.6021734e+00 5.9447456e+00 5.0338852e+00 4.7191101e+00 3.9862263e+00 4.9416596e+00 5.1526692e+00 4.7906158e+00 4.2449971e+00 5.3972215e+00 5.2867760e+00 4.7843495e+00 4.3243497e+00 4.5760245e+00 4.7916594e+00 4.2178193e+00 1.8083141e+00 2.0420578e+00 1.4662878e+00 1.0099505e+00 1.7320508e+00 1.2165525e+00 1.3190906e+00 1.1747340e+00 6.8556546e-01 1.1180340e+00 1.0295630e+00 8.6602540e-01 9.9498744e-01 1.1090537e+00 1.0344080e+00 6.7823300e-01 7.2111026e-01 1.2727922e+00 1.4764823e+00 1.7262677e+00 7.2801099e-01 7.4161985e-01 1.3190906e+00 7.2801099e-01 2.4494897e-01 9.8488578e-01 9.0553851e-01 7.8102497e-01 3.1622777e-01 1.1135529e+00 1.4177447e+00 6.1644140e-01 1.2409674e+00 4.7958315e-01 1.2884099e+00 8.2462113e-01 4.6882833e+00 4.2391037e+00 4.8135226e+00 3.4322005e+00 4.3692105e+00 3.8729833e+00 4.3931765e+00 2.5238859e+00 4.3577517e+00 3.2295511e+00 2.8390139e+00 3.7589892e+00 3.5707142e+00 4.2308392e+00 3.0643107e+00 4.2836900e+00 3.9000000e+00 3.4856850e+00 4.2154478e+00 3.2832910e+00 4.3794977e+00 3.6235342e+00 4.5442271e+00 4.1773197e+00 4.0124805e+00 4.2272923e+00 4.6551047e+00 4.8507731e+00 4.0521599e+00 2.9478806e+00 3.1764760e+00 3.0610456e+00 3.3749074e+00 4.6076024e+00 3.8379682e+00 4.1060930e+00 4.5486262e+00 4.1012193e+00 3.4828150e+00 3.3970576e+00 3.7013511e+00 4.1448764e+00 3.4684290e+00 2.5748786e+00 3.5818989e+00 3.5749126e+00 3.6083237e+00 3.9115214e+00 2.3452079e+00 3.5270384e+00 5.8189346e+00 4.6454279e+00 5.9059292e+00 5.2105662e+00 5.5982140e+00 6.7186308e+00 3.8379682e+00 6.2401923e+00 5.5668663e+00 6.2872888e+00 4.9487372e+00 5.0378567e+00 5.4415071e+00 4.5858478e+00 4.8559242e+00 5.1894123e+00 5.2048055e+00 6.9260378e+00 7.0851958e+00 4.5497253e+00 5.7271284e+00 4.4474712e+00 6.8242216e+00 4.6281746e+00 5.5686623e+00 5.9455866e+00 4.4977772e+00 4.5354162e+00 5.3572381e+00 5.7227616e+00 6.1554854e+00 6.7305275e+00 5.3953684e+00 4.6904158e+00 5.0338852e+00 6.4342832e+00 5.4497706e+00 5.1643005e+00 4.4124823e+00 5.4092513e+00 5.5955339e+00 5.2545219e+00 4.6454279e+00 5.8455111e+00 5.7245087e+00 5.2354560e+00 4.7644517e+00 5.0259327e+00 5.2057660e+00 4.6314145e+00 5.4772256e-01 4.6904158e-01 8.8881944e-01 5.5677644e-01 7.9372539e-01 8.7749644e-01 8.4261498e-01 1.2806248e+00 1.1489125e+00 1.3601471e+00 1.3416408e+00 1.0954451e+00 8.3666003e-01 8.7177979e-01 1.4177447e+00 1.4035669e+00 8.0622577e-01 6.8556546e-01 4.1231056e-01 1.3114877e+00 1.1313708e+00 5.9160798e-01 1.3114877e+00 1.7233688e+00 9.6953597e-01 9.5393920e-01 2.1447611e+00 1.6155494e+00 1.1000000e+00 1.0295630e+00 1.4317821e+00 8.3066239e-01 1.4560220e+00 6.5574385e-01 1.0816654e+00 3.9711459e+00 3.6851052e+00 4.1713307e+00 3.4684290e+00 3.8961519e+00 3.6810325e+00 3.8665230e+00 2.9017236e+00 3.8236109e+00 3.2832910e+00 3.2511536e+00 3.4205263e+00 3.4292856e+00 3.8716921e+00 2.8670542e+00 3.6469165e+00 3.6905284e+00 3.2771939e+00 3.9974992e+00 3.2233523e+00 4.0211939e+00 3.2526912e+00 4.2284749e+00 3.8444766e+00 3.5199432e+00 3.6496575e+00 4.1036569e+00 4.3011626e+00 3.7188708e+00 2.8106939e+00 3.1968735e+00 3.0886890e+00 3.1591138e+00 4.3474130e+00 3.7067506e+00 3.6400549e+00 3.9446166e+00 3.8196859e+00 3.2649655e+00 3.3749074e+00 3.6455452e+00 3.7536649e+00 3.2863353e+00 2.9291637e+00 3.4554305e+00 3.3181320e+00 3.3808283e+00 3.4914181e+00 2.6057628e+00 3.3271610e+00 5.3916602e+00 4.4485953e+00 5.3282267e+00 4.8352870e+00 5.1623638e+00 6.0835845e+00 4.0249224e+00 5.6595053e+00 5.1749396e+00 5.6053546e+00 4.4249294e+00 4.6636895e+00 4.9091751e+00 4.4654227e+00 4.6357308e+00 4.7138095e+00 4.7476310e+00 6.1562976e+00 6.5169011e+00 4.4056782e+00 5.1487863e+00 4.2906876e+00 6.2080593e+00 4.2649736e+00 5.0159745e+00 5.3103672e+00 4.1376322e+00 4.1641326e+00 4.9769469e+00 5.1068581e+00 5.5587768e+00 5.8932164e+00 5.0159745e+00 4.3116122e+00 4.7801674e+00 5.7471732e+00 4.9809638e+00 4.7138095e+00 4.0693980e+00 4.8238988e+00 5.0813384e+00 4.6518813e+00 4.4485953e+00 5.3047149e+00 5.1807335e+00 4.7138095e+00 4.4530888e+00 4.5530210e+00 4.7507894e+00 4.3335897e+00 6.1644140e-01 1.0908712e+00 6.4031242e-01 8.5440037e-01 1.0816654e+00 9.2195445e-01 1.4628739e+00 1.2727922e+00 1.4177447e+00 1.5811388e+00 1.2247449e+00 1.0488088e+00 1.1401754e+00 1.5779734e+00 1.5968719e+00 1.0440307e+00 6.5574385e-01 3.6055513e-01 1.5556349e+00 1.4352700e+00 9.6436508e-01 1.5556349e+00 1.9313208e+00 1.1832160e+00 1.1618950e+00 2.4289916e+00 1.7916473e+00 1.1618950e+00 9.3808315e-01 1.6703293e+00 8.7749644e-01 1.6431677e+00 8.3066239e-01 1.3228757e+00 3.7907783e+00 3.4842503e+00 3.9874804e+00 3.3926391e+00 3.7443290e+00 3.5171011e+00 3.6400549e+00 2.8705400e+00 3.6715120e+00 3.1464265e+00 3.2572995e+00 3.2403703e+00 3.3970576e+00 3.6945906e+00 2.7349589e+00 3.4785054e+00 3.4899857e+00 3.1654384e+00 3.9115214e+00 3.1416556e+00 3.7854986e+00 3.1272992e+00 4.0914545e+00 3.6878178e+00 3.3749074e+00 3.4899857e+00 3.9572718e+00 4.1109610e+00 3.5425979e+00 2.7568098e+00 3.1336879e+00 3.0397368e+00 3.0495901e+00 4.1689327e+00 3.5014283e+00 3.3955854e+00 3.7603191e+00 3.7403208e+00 3.0886890e+00 3.2726136e+00 3.5114100e+00 3.5679126e+00 3.1843367e+00 2.9154759e+00 3.3166248e+00 3.1448370e+00 3.2171416e+00 3.3391616e+00 2.5903668e+00 3.1827661e+00 5.1215232e+00 4.2555846e+00 5.1156622e+00 4.6238512e+00 4.9325450e+00 5.8711157e+00 3.8652296e+00 5.4598535e+00 5.0059964e+00 5.3347915e+00 4.1952354e+00 4.4799554e+00 4.6968074e+00 4.2918527e+00 4.4192760e+00 4.4698993e+00 4.5343136e+00 5.8855756e+00 6.3253458e+00 4.2883563e+00 4.9122296e+00 4.0853396e+00 6.0133186e+00 4.0951190e+00 4.7686476e+00 5.0892043e+00 3.9572718e+00 3.9547440e+00 4.7696960e+00 4.9132474e+00 5.3721504e+00 5.6364883e+00 4.8062459e+00 4.1340053e+00 4.6054316e+00 5.5434646e+00 4.7085029e+00 4.4877611e+00 3.8600518e+00 4.6076024e+00 4.8476799e+00 4.4384682e+00 4.2555846e+00 5.0616203e+00 4.9254441e+00 4.5011110e+00 4.2976738e+00 4.3416587e+00 4.4799554e+00 4.1133928e+00 5.1961524e-01 5.1961524e-01 3.8729833e-01 6.7082039e-01 4.1231056e-01 9.2736185e-01 7.8740079e-01 1.0049876e+00 1.0488088e+00 7.0710678e-01 5.2915026e-01 5.8309519e-01 1.0535654e+00 1.0630146e+00 5.3851648e-01 4.5825757e-01 3.8729833e-01 1.0099505e+00 8.3666003e-01 4.5825757e-01 1.0099505e+00 1.3601471e+00 6.4807407e-01 5.7445626e-01 1.8384776e+00 1.2369317e+00 6.7082039e-01 6.7823300e-01 1.0908712e+00 4.7958315e-01 1.0862780e+00 3.6055513e-01 7.5498344e-01 3.9509493e+00 3.5972211e+00 4.1303753e+00 3.2664966e+00 3.8105118e+00 3.5142567e+00 3.7643060e+00 2.6191602e+00 3.7603191e+00 3.0397368e+00 2.9949958e+00 3.2680269e+00 3.3015148e+00 3.7483330e+00 2.6720778e+00 3.5972211e+00 3.5071356e+00 3.1304952e+00 3.8704005e+00 3.0413813e+00 3.8665230e+00 3.1304952e+00 4.1158231e+00 3.7282704e+00 3.4365681e+00 3.5860842e+00 4.0521599e+00 4.2284749e+00 3.5791060e+00 2.6419690e+00 3.0000000e+00 2.8948230e+00 3.0000000e+00 4.2047592e+00 3.5014283e+00 3.5057096e+00 3.8858718e+00 3.7134889e+00 3.0822070e+00 3.1733263e+00 3.4568772e+00 3.6318040e+00 3.1272992e+00 2.6608269e+00 3.2710854e+00 3.1543621e+00 3.2109189e+00 3.3837849e+00 2.3302360e+00 3.1543621e+00 5.2602281e+00 4.2766810e+00 5.2678269e+00 4.7180504e+00 5.0507425e+00 6.0522723e+00 3.7603191e+00 5.6187187e+00 5.0852729e+00 5.5479726e+00 4.3243497e+00 4.5486262e+00 4.8270074e+00 4.2778499e+00 4.4508426e+00 4.5934736e+00 4.6497312e+00 6.1400326e+00 6.4768820e+00 4.2602817e+00 5.0705029e+00 4.0951190e+00 6.1822326e+00 4.1436699e+00 4.9295030e+00 5.2706736e+00 4.0074930e+00 4.0274061e+00 4.8569538e+00 5.0734604e+00 5.5226805e+00 5.9016947e+00 4.8928519e+00 4.2035699e+00 4.6551047e+00 5.7227616e+00 4.8528342e+00 4.6086874e+00 3.9217343e+00 4.7528939e+00 4.9819675e+00 4.5760245e+00 4.2766810e+00 5.2172790e+00 5.0813384e+00 4.6173586e+00 4.3255058e+00 4.4485953e+00 4.6162756e+00 4.1785165e+00 7.3484692e-01 3.1622777e-01 4.4721360e-01 2.4494897e-01 6.5574385e-01 4.1231056e-01 6.0000000e-01 5.5677644e-01 2.6457513e-01 1.7320508e-01 1.7320508e-01 5.4772256e-01 5.4772256e-01 3.4641016e-01 6.4807407e-01 8.1240384e-01 5.0000000e-01 3.8729833e-01 4.2426407e-01 5.0000000e-01 8.7177979e-01 1.7320508e-01 1.4142136e-01 1.3453624e+00 7.7459667e-01 3.7416574e-01 5.9160798e-01 5.8309519e-01 3.7416574e-01 5.9160798e-01 3.1622777e-01 2.4494897e-01 3.9749214e+00 3.5818989e+00 4.1340053e+00 3.0594117e+00 3.7589892e+00 3.3852622e+00 3.7496667e+00 2.3130067e+00 3.7215588e+00 2.8478062e+00 2.6758176e+00 3.1890437e+00 3.1224990e+00 3.6687873e+00 2.5396850e+00 3.5958309e+00 3.3985291e+00 2.9849623e+00 3.7349699e+00 2.8530685e+00 3.8131352e+00 3.0413813e+00 4.0162171e+00 3.6318040e+00 3.3852622e+00 3.5651087e+00 4.0187063e+00 4.2107007e+00 3.4957117e+00 2.4637370e+00 2.7874720e+00 2.6739484e+00 2.8618176e+00 4.1024383e+00 3.3749074e+00 3.4813790e+00 3.8794329e+00 3.5888717e+00 2.9647934e+00 2.9866369e+00 3.2832910e+00 3.5637059e+00 2.9782545e+00 2.3558438e+00 3.1192948e+00 3.0430248e+00 3.0919250e+00 3.3136083e+00 2.0493902e+00 3.0232433e+00 5.2421370e+00 4.1689327e+00 5.2668776e+00 4.6572524e+00 5.0179677e+00 6.0646517e+00 3.5510562e+00 5.6089215e+00 5.0169712e+00 5.5991071e+00 4.3162484e+00 4.4833024e+00 4.8155997e+00 4.1484937e+00 4.3680659e+00 4.5814845e+00 4.6119410e+00 6.2088646e+00 6.4668385e+00 4.1109610e+00 5.0813384e+00 3.9849718e+00 6.1830413e+00 4.0718546e+00 4.9325450e+00 5.2829916e+00 3.9382737e+00 3.9686270e+00 4.8020829e+00 5.0705029e+00 5.5163394e+00 5.9849812e+00 4.8404545e+00 4.1303753e+00 4.5453273e+00 5.7532599e+00 4.8476799e+00 4.5727453e+00 3.8561639e+00 4.7581509e+00 4.9769469e+00 4.5923850e+00 4.1689327e+00 5.2182373e+00 5.0921508e+00 4.6097722e+00 4.2379240e+00 4.4204072e+00 4.6065171e+00 4.1024383e+00 6.3245553e-01 5.0990195e-01 6.4807407e-01 1.3228757e+00 8.0622577e-01 1.0099505e+00 1.0723805e+00 8.1853528e-01 6.2449980e-01 7.1414284e-01 1.1747340e+00 1.1489125e+00 5.4772256e-01 6.4807407e-01 5.4772256e-01 1.1000000e+00 1.0535654e+00 5.4772256e-01 1.1000000e+00 1.5811388e+00 7.5498344e-01 8.6023253e-01 1.9621417e+00 1.4899664e+00 8.2462113e-01 6.4031242e-01 1.2409674e+00 6.1644140e-01 1.2922848e+00 4.6904158e-01 9.1651514e-01 3.5014283e+00 3.1827661e+00 3.6891733e+00 2.9291637e+00 3.3896903e+00 3.1368774e+00 3.3615473e+00 2.3769729e+00 3.3211444e+00 2.7404379e+00 2.7313001e+00 2.8930952e+00 2.9034462e+00 3.3436507e+00 2.3302360e+00 3.1606961e+00 3.1511903e+00 2.7331301e+00 3.4770677e+00 2.6795522e+00 3.5014283e+00 2.7294688e+00 3.7054015e+00 3.3120990e+00 3.0099834e+00 3.1543621e+00 3.6097091e+00 3.8065733e+00 3.1906112e+00 2.2737634e+00 2.6551836e+00 2.5475478e+00 2.6210685e+00 3.8144462e+00 3.1638584e+00 3.1272992e+00 3.4539832e+00 3.3015148e+00 2.7221315e+00 2.8319605e+00 3.0951575e+00 3.2280025e+00 2.7477263e+00 2.4062419e+00 2.9103264e+00 2.7748874e+00 2.8390139e+00 2.9698485e+00 2.0928450e+00 2.7856777e+00 4.8928519e+00 3.9166312e+00 4.8456166e+00 4.3162484e+00 4.6583259e+00 5.6124861e+00 3.4828150e+00 5.1749396e+00 4.6636895e+00 5.1468437e+00 3.9306488e+00 4.1496988e+00 4.4192760e+00 3.9331921e+00 4.1206796e+00 4.2201896e+00 4.2391037e+00 5.7105166e+00 6.0398675e+00 3.8704005e+00 4.6690470e+00 3.7603191e+00 5.7349804e+00 3.7496667e+00 4.5265881e+00 4.8321838e+00 3.6207734e+00 3.6455452e+00 4.4654227e+00 4.6249324e+00 5.0803543e+00 5.4607692e+00 4.5066617e+00 3.7894591e+00 4.2449971e+00 5.2915026e+00 4.4877611e+00 4.2035699e+00 3.5482390e+00 4.3428102e+00 4.5945620e+00 4.1821047e+00 3.9166312e+00 4.8176758e+00 4.7000000e+00 4.2296572e+00 3.9370039e+00 4.0521599e+00 4.2544095e+00 3.8065733e+00 5.4772256e-01 1.4142136e-01 7.4161985e-01 5.7445626e-01 6.4807407e-01 8.1853528e-01 4.3588989e-01 3.3166248e-01 4.3588989e-01 7.3484692e-01 7.7459667e-01 5.0990195e-01 3.7416574e-01 5.8309519e-01 7.5498344e-01 6.8556546e-01 5.4772256e-01 7.5498344e-01 1.0862780e+00 4.1231056e-01 3.7416574e-01 1.6278821e+00 9.4868330e-01 4.4721360e-01 4.1231056e-01 8.6023253e-01 1.4142136e-01 7.9372539e-01 2.4494897e-01 5.2915026e-01 3.9268308e+00 3.5341194e+00 4.0902323e+00 3.1080541e+00 3.7429935e+00 3.3704599e+00 3.6905284e+00 2.3937418e+00 3.6972963e+00 2.8618176e+00 2.7820855e+00 3.1638584e+00 3.1796226e+00 3.6414283e+00 2.5436195e+00 3.5594943e+00 3.3660065e+00 2.9916551e+00 3.7696154e+00 2.8879058e+00 3.7603191e+00 3.0413813e+00 4.0162171e+00 3.6124784e+00 3.3674916e+00 3.5369478e+00 3.9987498e+00 4.1725292e+00 3.4727511e+00 2.5079872e+00 2.8372522e+00 2.7294688e+00 2.8757608e+00 4.0828911e+00 3.3421550e+00 3.4146742e+00 3.8379682e+00 3.6193922e+00 2.9410882e+00 3.0166206e+00 3.2893768e+00 3.5298725e+00 2.9983329e+00 2.4474477e+00 3.1224990e+00 3.0166206e+00 3.0757113e+00 3.2954514e+00 2.1400935e+00 3.0199338e+00 5.1749396e+00 4.1496988e+00 5.2191953e+00 4.6162756e+00 4.9699095e+00 6.0116553e+00 3.5623026e+00 5.5623736e+00 4.9989999e+00 5.5181519e+00 4.2626283e+00 4.4609416e+00 4.7717921e+00 4.1460825e+00 4.3428102e+00 4.5265881e+00 4.5661800e+00 6.1163715e+00 6.4311741e+00 4.1303753e+00 5.0239427e+00 3.9623226e+00 6.1392182e+00 4.0570926e+00 4.8672374e+00 5.2220686e+00 3.9179076e+00 3.9306488e+00 4.7686476e+00 5.0229473e+00 5.4781384e+00 5.8940648e+00 4.8072861e+00 4.1036569e+00 4.5232732e+00 5.7061370e+00 4.7770284e+00 4.5199558e+00 3.8196859e+00 4.7095647e+00 4.9264592e+00 4.5486262e+00 4.1496988e+00 5.1584882e+00 5.0289164e+00 4.5705580e+00 4.2355637e+00 4.3794977e+00 4.5365185e+00 4.0607881e+00 5.0990195e-01 1.0816654e+00 4.3588989e-01 6.3245553e-01 5.7445626e-01 4.5825757e-01 3.0000000e-01 3.6055513e-01 7.3484692e-01 6.7823300e-01 2.8284271e-01 7.6157731e-01 8.6023253e-01 6.2449980e-01 6.7082039e-01 4.2426407e-01 6.2449980e-01 1.1489125e+00 3.6055513e-01 5.8309519e-01 1.4798649e+00 1.0954451e+00 5.8309519e-01 5.7445626e-01 7.8740079e-01 5.0990195e-01 8.7749644e-01 3.7416574e-01 5.0990195e-01 3.6110940e+00 3.2511536e+00 3.7775654e+00 2.7784888e+00 3.4161382e+00 3.0822070e+00 3.4322005e+00 2.1095023e+00 3.3630343e+00 2.6095977e+00 2.4494897e+00 2.8896367e+00 2.7802878e+00 3.3436507e+00 2.2605309e+00 3.2419130e+00 3.1192948e+00 2.6551836e+00 3.4073450e+00 2.5495098e+00 3.5298725e+00 2.7110883e+00 3.6810325e+00 3.2939338e+00 3.0364453e+00 3.2140317e+00 3.6565011e+00 3.8716921e+00 3.1843367e+00 2.1470911e+00 2.4959968e+00 2.3769729e+00 2.5475478e+00 3.7907783e+00 3.1128765e+00 3.1874755e+00 3.5312887e+00 3.2434549e+00 2.6776856e+00 2.7055499e+00 2.9899833e+00 3.2403703e+00 2.6627054e+00 2.1377558e+00 2.8266588e+00 2.7386128e+00 2.7928480e+00 2.9765752e+00 1.8439089e+00 2.7239677e+00 4.9598387e+00 3.8858718e+00 4.9295030e+00 4.3393548e+00 4.7095647e+00 5.7113921e+00 3.3391616e+00 5.2516664e+00 4.6765372e+00 5.2848841e+00 4.0062451e+00 4.1641326e+00 4.4911023e+00 3.8768544e+00 4.1133928e+00 4.2906876e+00 4.2860238e+00 5.8694122e+00 6.1139185e+00 3.7920970e+00 4.7644517e+00 3.7255872e+00 5.8215118e+00 3.7549967e+00 4.6162756e+00 4.9325450e+00 3.6290495e+00 3.6674242e+00 4.4922155e+00 4.7085029e+00 5.1584882e+00 5.6338264e+00 4.5354162e+00 3.7973675e+00 4.2166337e+00 5.4055527e+00 4.5672749e+00 4.2532341e+00 3.5623026e+00 4.4317040e+00 4.6722586e+00 4.2790186e+00 3.8858718e+00 4.9040799e+00 4.7947888e+00 4.3023250e+00 3.9242834e+00 4.1060930e+00 4.3289722e+00 3.8118237e+00 7.4161985e-01 4.5825757e-01 6.1644140e-01 7.4161985e-01 3.3166248e-01 3.0000000e-01 3.8729833e-01 6.7823300e-01 7.0710678e-01 4.2426407e-01 5.0990195e-01 6.7823300e-01 7.0000000e-01 6.2449980e-01 5.2915026e-01 7.0000000e-01 1.0295630e+00 3.6055513e-01 3.1622777e-01 1.5394804e+00 9.0553851e-01 3.1622777e-01 4.1231056e-01 7.7459667e-01 2.4494897e-01 7.4161985e-01 2.8284271e-01 4.6904158e-01 3.8858718e+00 3.4856850e+00 4.0459857e+00 3.0298515e+00 3.6864617e+00 3.3136083e+00 3.6441734e+00 2.3086793e+00 3.6482873e+00 2.7874720e+00 2.6944387e+00 3.1032241e+00 3.1096624e+00 3.5888717e+00 2.4718414e+00 3.5114100e+00 3.3090784e+00 2.9342802e+00 3.6972963e+00 2.8178006e+00 3.7067506e+00 2.9782545e+00 3.9560081e+00 3.5623026e+00 3.3136083e+00 3.4856850e+00 3.9484174e+00 4.1218928e+00 3.4146742e+00 2.4351591e+00 2.7622455e+00 2.6551836e+00 2.8089144e+00 4.0261644e+00 3.2848135e+00 3.3674916e+00 3.7907783e+00 3.5524639e+00 2.8827071e+00 2.9427878e+00 3.2280025e+00 3.4785054e+00 2.9308702e+00 2.3600847e+00 3.0577770e+00 2.9631065e+00 3.0166206e+00 3.2403703e+00 2.0445048e+00 2.9563491e+00 5.1244512e+00 4.0865633e+00 5.1710734e+00 4.5661800e+00 4.9173163e+00 5.9699246e+00 3.4885527e+00 5.5208695e+00 4.9446941e+00 5.4763126e+00 4.2107007e+00 4.4022721e+00 4.7191101e+00 4.0755368e+00 4.2731721e+00 4.4710178e+00 4.5177428e+00 6.0868711e+00 6.3827894e+00 4.0644803e+00 4.9739320e+00 3.8961519e+00 6.0967204e+00 3.9949969e+00 4.8218254e+00 5.1836281e+00 3.8561639e+00 3.8742741e+00 4.7116876e+00 4.9829710e+00 5.4323107e+00 5.8668561e+00 4.7486840e+00 4.0521599e+00 4.4743715e+00 5.6586217e+00 4.7265209e+00 4.4732538e+00 3.7616486e+00 4.6583259e+00 4.8713448e+00 4.4911023e+00 4.0865633e+00 5.1097945e+00 4.9769469e+00 4.5110974e+00 4.1689327e+00 4.3243497e+00 4.4855323e+00 4.0062451e+00 9.5916630e-01 9.4339811e-01 9.3808315e-01 7.7459667e-01 7.8740079e-01 7.4833148e-01 7.2801099e-01 8.0622577e-01 9.8488578e-01 9.3273791e-01 1.1532563e+00 7.7459667e-01 6.0000000e-01 9.5393920e-01 7.7459667e-01 7.0000000e-01 7.3484692e-01 5.1961524e-01 1.3416408e+00 5.3851648e-01 8.3066239e-01 1.0677078e+00 7.5498344e-01 8.0622577e-01 5.6568542e-01 8.6602540e-01 6.4031242e-01 4.5880279e+00 4.1641326e+00 4.7370877e+00 3.5651087e+00 4.3474130e+00 3.9127995e+00 4.3162484e+00 2.7313001e+00 4.3197222e+00 3.3196385e+00 3.1000000e+00 3.7389838e+00 3.6823905e+00 4.2272923e+00 3.0757113e+00 4.2023803e+00 3.9115214e+00 3.5355339e+00 4.2965102e+00 3.3808283e+00 4.3416587e+00 3.6193922e+00 4.5825757e+00 4.1928511e+00 3.9786933e+00 4.1665333e+00 4.6216880e+00 4.7979162e+00 4.0484565e+00 3.0166206e+00 3.3015148e+00 3.1906112e+00 3.4146742e+00 4.6411206e+00 3.8652296e+00 4.0261644e+00 4.4766059e+00 4.1653331e+00 3.4899857e+00 3.4971417e+00 3.7907783e+00 4.1243181e+00 3.5270384e+00 2.7892651e+00 3.6414283e+00 3.5791060e+00 3.6262929e+00 3.8923001e+00 2.5039968e+00 3.5594943e+00 5.7680153e+00 4.6850827e+00 5.8506410e+00 5.2057660e+00 5.5686623e+00 6.6580778e+00 3.9749214e+00 6.1991935e+00 5.5874860e+00 6.1692787e+00 4.8805737e+00 5.0428167e+00 5.3907328e+00 4.6540305e+00 4.8713448e+00 5.1283526e+00 5.1749396e+00 6.7926431e+00 7.0590368e+00 4.6486557e+00 5.6524331e+00 4.4821870e+00 6.7808554e+00 4.6335731e+00 5.4954527e+00 5.8719673e+00 4.4944410e+00 4.5144213e+00 5.3525695e+00 5.6674509e+00 6.1139185e+00 6.5825527e+00 5.3888774e+00 4.6936127e+00 5.0842895e+00 6.3553127e+00 5.3786615e+00 5.1283526e+00 4.3954522e+00 5.3394756e+00 5.5371473e+00 5.1730069e+00 4.6850827e+00 5.7810034e+00 5.6462377e+00 5.1788030e+00 4.7947888e+00 4.9849774e+00 5.1351728e+00 4.6281746e+00 4.7958315e-01 4.4721360e-01 2.0000000e-01 4.2426407e-01 4.4721360e-01 5.1961524e-01 4.7958315e-01 3.8729833e-01 9.2195445e-01 1.0723805e+00 5.2915026e-01 6.0000000e-01 6.7082039e-01 5.2915026e-01 9.1104336e-01 3.7416574e-01 5.0000000e-01 1.2489996e+00 8.6602540e-01 2.6457513e-01 5.4772256e-01 5.5677644e-01 5.9160798e-01 6.6332496e-01 5.7445626e-01 4.3588989e-01 3.6646964e+00 3.2465366e+00 3.8105118e+00 2.6627054e+00 3.4088121e+00 3.0149627e+00 3.4132096e+00 1.9131126e+00 3.3852622e+00 2.4535688e+00 2.2781571e+00 2.8248894e+00 2.7495454e+00 3.3120990e+00 2.1587033e+00 3.2710854e+00 3.0298515e+00 2.6191602e+00 3.3555923e+00 2.4677925e+00 3.4568772e+00 2.6795522e+00 3.6496575e+00 3.2771939e+00 3.0413813e+00 3.2310989e+00 3.6823905e+00 3.8704005e+00 3.1320920e+00 2.0832667e+00 2.3958297e+00 2.2847319e+00 2.4859606e+00 3.7336309e+00 3.0033315e+00 3.1416556e+00 3.5496479e+00 3.2202484e+00 2.5961510e+00 2.5942244e+00 2.9034462e+00 3.2109189e+00 2.6000000e+00 1.9544820e+00 2.7386128e+00 2.6814175e+00 2.7221315e+00 2.9614186e+00 1.6401219e+00 2.6476405e+00 4.8918299e+00 3.7907783e+00 4.9284886e+00 4.3011626e+00 4.6636895e+00 5.7367238e+00 3.1559468e+00 5.2773099e+00 4.6583259e+00 5.2782573e+00 3.9724048e+00 4.1194660e+00 4.4698993e+00 3.7603191e+00 3.9887341e+00 4.2308392e+00 4.2638011e+00 5.9076222e+00 6.1261734e+00 3.7296112e+00 4.7423623e+00 3.6041643e+00 5.8532043e+00 3.7054015e+00 4.5956501e+00 4.9598387e+00 3.5721142e+00 3.6083237e+00 4.4395946e+00 4.7455242e+00 5.1826634e+00 5.6947344e+00 4.4766059e+00 3.7749172e+00 4.1844952e+00 5.4267854e+00 4.5022217e+00 4.2261093e+00 3.4928498e+00 4.4192760e+00 4.6281746e+00 4.2520583e+00 3.7907783e+00 4.8764741e+00 4.7497368e+00 4.2591079e+00 3.8639358e+00 4.0681691e+00 4.2602817e+00 3.7389838e+00 5.3851648e-01 4.1231056e-01 5.7445626e-01 6.4031242e-01 3.7416574e-01 4.2426407e-01 7.4833148e-01 9.0553851e-01 1.1747340e+00 5.1961524e-01 7.5498344e-01 9.2736185e-01 5.1961524e-01 8.2462113e-01 5.0000000e-01 6.4807407e-01 1.2922848e+00 7.4833148e-01 5.4772256e-01 5.3851648e-01 6.4807407e-01 5.8309519e-01 5.7445626e-01 7.0710678e-01 5.4772256e-01 3.7629775e+00 3.3241540e+00 3.8974351e+00 2.7055499e+00 3.4971417e+00 3.0232433e+00 3.4727511e+00 1.9000000e+00 3.4626579e+00 2.4677925e+00 2.2803509e+00 2.8896367e+00 2.8160256e+00 3.3496268e+00 2.2338308e+00 3.3749074e+00 3.0413813e+00 2.6400758e+00 3.4423829e+00 2.5019992e+00 3.4957117e+00 2.7694765e+00 3.7080992e+00 3.3000000e+00 3.1272992e+00 3.3301652e+00 3.7696154e+00 3.9534795e+00 3.1843367e+00 2.1563859e+00 2.4310492e+00 2.3173260e+00 2.5475478e+00 3.7589892e+00 2.9949958e+00 3.1874755e+00 3.6373067e+00 3.3045423e+00 2.6172505e+00 2.6305893e+00 2.8948230e+00 3.2526912e+00 2.6551836e+00 1.9621417e+00 2.7622455e+00 2.6944387e+00 2.7495454e+00 3.0298515e+00 1.7088007e+00 2.6870058e+00 4.9355851e+00 3.8236109e+00 5.0059964e+00 4.3301270e+00 4.7180504e+00 5.8051701e+00 3.1352831e+00 5.3310412e+00 4.7106263e+00 5.3600373e+00 4.0509258e+00 4.1833001e+00 4.5530210e+00 3.8039453e+00 4.0546270e+00 4.3092923e+00 4.3092923e+00 5.9674115e+00 6.2016127e+00 3.7656341e+00 4.8270074e+00 3.6386811e+00 5.9203040e+00 3.7815341e+00 4.6551047e+00 5.0169712e+00 3.6455452e+00 3.6619667e+00 4.4966654e+00 4.8052055e+00 5.2583267e+00 5.7671483e+00 4.5398238e+00 3.8131352e+00 4.1785165e+00 5.5335341e+00 4.5585085e+00 4.2626283e+00 3.5454196e+00 4.5122057e+00 4.7148701e+00 4.3760713e+00 3.8236109e+00 4.9446941e+00 4.8321838e+00 4.3669211e+00 3.9446166e+00 4.1448764e+00 4.3150898e+00 3.7643060e+00 4.4721360e-01 5.4772256e-01 4.8989795e-01 3.6055513e-01 2.2360680e-01 6.0827625e-01 1.1269428e+00 1.3152946e+00 2.0000000e-01 4.4721360e-01 7.6811457e-01 2.0000000e-01 6.7082039e-01 4.2426407e-01 5.9160798e-01 9.1651514e-01 7.0000000e-01 6.4031242e-01 8.8317609e-01 3.0000000e-01 8.0622577e-01 4.8989795e-01 7.6811457e-01 3.6055513e-01 3.8845849e+00 3.4785054e+00 4.0249224e+00 2.7766887e+00 3.6027767e+00 3.1859065e+00 3.6537652e+00 1.9748418e+00 3.5749126e+00 2.6191602e+00 2.2912878e+00 3.0430248e+00 2.8354894e+00 3.5028560e+00 2.3622024e+00 3.4899857e+00 3.2341923e+00 2.7604347e+00 3.4899857e+00 2.5903668e+00 3.6945906e+00 2.8670542e+00 3.8105118e+00 3.4438351e+00 3.2357379e+00 3.4409301e+00 3.8678159e+00 4.0865633e+00 3.3331667e+00 2.2135944e+00 2.5019992e+00 2.3790755e+00 2.6495283e+00 3.9115214e+00 3.2031235e+00 3.3955854e+00 3.7682887e+00 3.3511192e+00 2.7964263e+00 2.7331301e+00 3.0413813e+00 3.4132096e+00 2.7495454e+00 2.0049938e+00 2.9017236e+00 2.8722813e+00 2.9103264e+00 3.1543621e+00 1.7406895e+00 2.8266588e+00 5.1410116e+00 3.9837169e+00 5.1487863e+00 4.5011110e+00 4.8877398e+00 5.9472683e+00 3.3045423e+00 5.4726593e+00 4.8311489e+00 5.5443665e+00 4.2166337e+00 4.3162484e+00 4.6968074e+00 3.9420807e+00 4.2154478e+00 4.4833024e+00 4.4743715e+00 6.1595454e+00 6.3206012e+00 3.8587563e+00 4.9869831e+00 3.8118237e+00 6.0481402e+00 3.9025633e+00 4.8373546e+00 5.1768716e+00 3.7788887e+00 3.8288379e+00 4.6486557e+00 4.9436828e+00 5.3795911e+00 5.9439044e+00 4.6904158e+00 3.9585351e+00 4.3370497e+00 5.6524331e+00 4.7634021e+00 4.4429720e+00 3.7148351e+00 4.6551047e+00 4.8723711e+00 4.5033321e+00 3.9837169e+00 5.1166395e+00 5.0079936e+00 4.5011110e+00 4.0484565e+00 4.2953463e+00 4.5221676e+00 3.9522146e+00 3.1622777e-01 3.4641016e-01 4.1231056e-01 4.1231056e-01 4.1231056e-01 7.9372539e-01 9.8488578e-01 4.4721360e-01 4.8989795e-01 6.2449980e-01 4.4721360e-01 8.0622577e-01 2.4494897e-01 3.3166248e-01 1.2489996e+00 7.2801099e-01 2.2360680e-01 5.0990195e-01 5.0000000e-01 4.5825757e-01 5.2915026e-01 4.7958315e-01 3.0000000e-01 3.8275318e+00 3.4088121e+00 3.9749214e+00 2.8337255e+00 3.5805028e+00 3.1733263e+00 3.5707142e+00 2.0639767e+00 3.5524639e+00 2.6115130e+00 2.4351591e+00 2.9899833e+00 2.9257478e+00 3.4741906e+00 2.3280893e+00 3.4380227e+00 3.1843367e+00 2.7820855e+00 3.5355339e+00 2.6362853e+00 3.6124784e+00 2.8530685e+00 3.8209946e+00 3.4380227e+00 3.2109189e+00 3.4000000e+00 3.8522721e+00 4.0373258e+00 3.2969683e+00 2.2583180e+00 2.5651511e+00 2.4535688e+00 2.6570661e+00 3.8961519e+00 3.1527766e+00 3.2939338e+00 3.7148351e+00 3.3985291e+00 2.7531800e+00 2.7622455e+00 3.0610456e+00 3.3719431e+00 2.7712813e+00 2.1118712e+00 2.9017236e+00 2.8372522e+00 2.8827071e+00 3.1288976e+00 1.8083141e+00 2.8124722e+00 5.0467812e+00 3.9534795e+00 5.0941143e+00 4.4609416e+00 4.8259714e+00 5.9000000e+00 3.3045423e+00 5.4396691e+00 4.8270074e+00 5.4350713e+00 4.1352146e+00 4.2883563e+00 4.6368092e+00 3.9268308e+00 4.1533119e+00 4.3931765e+00 4.4249294e+00 6.0580525e+00 6.2952363e+00 3.9000000e+00 4.9061186e+00 3.7643060e+00 6.0183054e+00 3.8768544e+00 4.7539457e+00 5.1185936e+00 3.7416574e+00 3.7709415e+00 4.6054316e+00 4.9071377e+00 5.3497664e+00 5.8455111e+00 4.6432747e+00 3.9382737e+00 4.3416587e+00 5.5955339e+00 4.6572524e+00 4.3840620e+00 3.6551334e+00 4.5858478e+00 4.7937459e+00 4.4226689e+00 3.9534795e+00 5.0378567e+00 4.9112117e+00 4.4294469e+00 4.0385641e+00 4.2343831e+00 4.4147480e+00 3.8961519e+00 1.4142136e-01 5.9160798e-01 5.7445626e-01 3.0000000e-01 6.0827625e-01 7.6811457e-01 5.0990195e-01 4.6904158e-01 3.6055513e-01 5.0990195e-01 9.6436508e-01 1.4142136e-01 3.0000000e-01 1.4071247e+00 8.7749644e-01 4.5825757e-01 5.4772256e-01 6.5574385e-01 3.3166248e-01 6.7823300e-01 2.2360680e-01 3.0000000e-01 3.8742741e+00 3.4957117e+00 4.0373258e+00 2.9983329e+00 3.6715120e+00 3.3090784e+00 3.6674242e+00 2.2759613e+00 3.6249138e+00 2.8000000e+00 2.6324893e+00 3.1176915e+00 3.0364453e+00 3.5846897e+00 2.4779023e+00 3.5014283e+00 3.3316662e+00 2.8982753e+00 3.6578682e+00 2.7802878e+00 3.7456642e+00 2.9597297e+00 3.9319207e+00 3.5411862e+00 3.2939338e+00 3.4727511e+00 3.9217343e+00 4.1231056e+00 3.4190642e+00 2.3874673e+00 2.7202941e+00 2.6038433e+00 2.7856777e+00 4.0249224e+00 3.3136083e+00 3.4073450e+00 3.7868192e+00 3.5028560e+00 2.8948230e+00 2.9240383e+00 3.2109189e+00 3.4799425e+00 2.9017236e+00 2.3151674e+00 3.0495901e+00 2.9647934e+00 3.0182777e+00 3.2264532e+00 2.0174241e+00 2.9512709e+00 5.1759057e+00 4.1048752e+00 5.1797683e+00 4.5760245e+00 4.9426713e+00 5.9690870e+00 3.5128336e+00 5.5108983e+00 4.9295030e+00 5.5190579e+00 4.2402830e+00 4.4056782e+00 4.7349762e+00 4.0914545e+00 4.3185646e+00 4.5144213e+00 4.5276926e+00 6.1139185e+00 6.3741666e+00 4.0336088e+00 5.0029991e+00 3.9306488e+00 6.0844063e+00 3.9962482e+00 4.8518038e+00 5.1865210e+00 3.8652296e+00 3.8961519e+00 4.7275787e+00 4.9699095e+00 5.4203321e+00 5.8847260e+00 4.7686476e+00 4.0435133e+00 4.4575778e+00 5.6630381e+00 4.7822589e+00 4.4899889e+00 3.7868192e+00 4.6765372e+00 4.9050994e+00 4.5188494e+00 4.1048752e+00 5.1400389e+00 5.0219518e+00 4.5387223e+00 4.1653331e+00 4.3439613e+00 4.5420260e+00 4.0323690e+00 5.7445626e-01 5.3851648e-01 3.0000000e-01 7.1414284e-01 8.5440037e-01 4.4721360e-01 3.4641016e-01 3.3166248e-01 4.4721360e-01 9.0000000e-01 1.4142136e-01 2.6457513e-01 1.3114877e+00 8.3066239e-01 5.0000000e-01 6.7823300e-01 5.7445626e-01 4.5825757e-01 6.3245553e-01 3.3166248e-01 2.2360680e-01 3.9509493e+00 3.5749126e+00 4.1133928e+00 3.0446675e+00 3.7389838e+00 3.3808283e+00 3.7509999e+00 2.3108440e+00 3.6959437e+00 2.8600699e+00 2.6551836e+00 3.1906112e+00 3.0789609e+00 3.6592349e+00 2.5416530e+00 3.5749126e+00 3.4088121e+00 2.9631065e+00 3.7067506e+00 2.8337255e+00 3.8275318e+00 3.0232433e+00 3.9949969e+00 3.6138622e+00 3.3630343e+00 3.5440090e+00 3.9899875e+00 4.1976184e+00 3.4914181e+00 2.4372115e+00 2.7676705e+00 2.6495283e+00 2.8460499e+00 4.0963398e+00 3.3911650e+00 3.4942810e+00 3.8626416e+00 3.5538711e+00 2.9698485e+00 2.9782545e+00 3.2756679e+00 3.5566838e+00 2.9597297e+00 2.3452079e+00 3.1144823e+00 3.0413813e+00 3.0903074e+00 3.2969683e+00 2.0469489e+00 3.0182777e+00 5.2602281e+00 4.1749251e+00 5.2564246e+00 4.6540305e+00 5.0209561e+00 6.0473135e+00 3.5721142e+00 5.5883808e+00 4.9979996e+00 5.6053546e+00 4.3197222e+00 4.4754888e+00 4.8104054e+00 4.1545156e+00 4.3874822e+00 4.5934736e+00 4.6065171e+00 6.2048368e+00 6.4459289e+00 4.0902323e+00 5.0823223e+00 4.0012498e+00 6.1595454e+00 4.0632499e+00 4.9355851e+00 5.2687759e+00 3.9344631e+00 3.9724048e+00 4.8010416e+00 5.0477718e+00 5.4936327e+00 5.9741108e+00 4.8414874e+00 4.1170378e+00 4.5310043e+00 5.7367238e+00 4.8672374e+00 4.5716518e+00 3.8626416e+00 4.7528939e+00 4.9819675e+00 4.5912961e+00 4.1749251e+00 5.2211110e+00 5.1029403e+00 4.6108568e+00 4.2272923e+00 4.4192760e+00 4.6270941e+00 4.1109610e+00 1.4142136e-01 7.6157731e-01 1.0392305e+00 1.2961481e+00 2.6457513e-01 5.0000000e-01 9.0553851e-01 2.6457513e-01 4.6904158e-01 4.5825757e-01 5.2915026e-01 9.7467943e-01 4.2426407e-01 5.8309519e-01 8.0622577e-01 3.1622777e-01 7.2111026e-01 2.2360680e-01 7.8740079e-01 3.7416574e-01 4.0422766e+00 3.6041643e+00 4.1749251e+00 2.9017236e+00 3.7536649e+00 3.2832910e+00 3.7603191e+00 2.0518285e+00 3.7296112e+00 2.6888659e+00 2.4041631e+00 3.1511903e+00 3.0149627e+00 3.6193922e+00 2.4718414e+00 3.6455452e+00 3.3090784e+00 2.8896367e+00 3.6537652e+00 2.7202941e+00 3.7735925e+00 3.0149627e+00 3.9534795e+00 3.5679126e+00 3.3882149e+00 3.5958309e+00 4.0311289e+00 4.2249260e+00 3.4467376e+00 2.3685439e+00 2.6324893e+00 2.5159491e+00 2.7838822e+00 4.0187063e+00 3.2603681e+00 3.4785054e+00 3.9127995e+00 3.5242020e+00 2.8827071e+00 2.8460499e+00 3.1368774e+00 3.5270384e+00 2.8861739e+00 2.1047565e+00 3.0049958e+00 2.9664794e+00 3.0099834e+00 3.2924155e+00 1.8493242e+00 2.9359837e+00 5.2172790e+00 4.0743098e+00 5.2820451e+00 4.6054316e+00 4.9919936e+00 6.0876925e+00 3.3451457e+00 5.6124861e+00 4.9689033e+00 5.6524331e+00 4.3278170e+00 4.4407207e+00 4.8238988e+00 4.0360872e+00 4.2965102e+00 4.5814845e+00 4.5880279e+00 6.2745518e+00 6.4699304e+00 3.9924930e+00 5.1048996e+00 3.8858718e+00 6.1975802e+00 4.0323690e+00 4.9426713e+00 5.3075418e+00 3.9000000e+00 3.9306488e+00 4.7602521e+00 5.0882217e+00 5.5308227e+00 6.0728906e+00 4.8010416e+00 4.0816663e+00 4.4452222e+00 5.8051701e+00 4.8414874e+00 4.5464272e+00 3.8118237e+00 4.7853944e+00 4.9849774e+00 4.6378875e+00 4.0743098e+00 5.2258971e+00 5.1097945e+00 4.6270941e+00 4.1833001e+00 4.4136153e+00 4.5978256e+00 4.0360872e+00 7.0710678e-01 1.0862780e+00 1.3190906e+00 1.7320508e-01 4.5825757e-01 8.6023253e-01 1.7320508e-01 5.0990195e-01 4.3588989e-01 5.4772256e-01 9.1104336e-01 5.0990195e-01 6.0000000e-01 8.4261498e-01 2.4494897e-01 7.6157731e-01 3.0000000e-01 7.8740079e-01 3.4641016e-01 3.9874804e+00 3.5594943e+00 4.1218928e+00 2.8460499e+00 3.6972963e+00 3.2434549e+00 3.7229021e+00 2.0074860e+00 3.6728735e+00 2.6551836e+00 2.3452079e+00 3.1096624e+00 2.9410882e+00 3.5749126e+00 2.4269322e+00 3.5902646e+00 3.2787193e+00 2.8372522e+00 3.5874782e+00 2.6645825e+00 3.7443290e+00 2.9580399e+00 3.8974351e+00 3.5199432e+00 3.3316662e+00 3.5397740e+00 3.9711459e+00 4.1749251e+00 3.4029399e+00 2.3043437e+00 2.5748786e+00 2.4556058e+00 2.7294688e+00 3.9761791e+00 3.2357379e+00 3.4496377e+00 3.8613469e+00 3.4554305e+00 2.8478062e+00 2.7964263e+00 3.0951575e+00 3.4842503e+00 2.8301943e+00 2.0518285e+00 2.9614186e+00 2.9291637e+00 2.9698485e+00 3.2403703e+00 1.7944358e+00 2.8913665e+00 5.1903757e+00 4.0373258e+00 5.2345009e+00 4.5661800e+00 4.9537864e+00 6.0382117e+00 3.3211444e+00 5.5623736e+00 4.9162994e+00 5.6169387e+00 4.2883563e+00 4.3931765e+00 4.7780749e+00 3.9962482e+00 4.2638011e+00 4.5464272e+00 4.5464272e+00 6.2377881e+00 6.4156060e+00 3.9370039e+00 5.0635956e+00 3.8548671e+00 6.1441029e+00 3.9824616e+00 4.9061186e+00 5.2621288e+00 3.8535698e+00 3.8923001e+00 4.7180504e+00 5.0368641e+00 5.4763126e+00 6.0315835e+00 4.7592016e+00 4.0348482e+00 4.4022721e+00 5.7515215e+00 4.8145612e+00 4.5088801e+00 3.7749172e+00 4.7391982e+00 4.9446941e+00 4.5902070e+00 4.0373258e+00 5.1874849e+00 5.0744458e+00 4.5814845e+00 4.1303753e+00 4.3703547e+00 4.5716518e+00 4.0037482e+00 7.8740079e-01 8.3666003e-01 6.5574385e-01 5.7445626e-01 3.1622777e-01 6.5574385e-01 1.1135529e+00 3.6055513e-01 4.6904158e-01 1.4387495e+00 1.0583005e+00 4.6904158e-01 6.4031242e-01 7.3484692e-01 5.4772256e-01 8.5440037e-01 3.7416574e-01 4.6904158e-01 3.7202150e+00 3.3541020e+00 3.8871583e+00 2.8774989e+00 3.5199432e+00 3.2031235e+00 3.5355339e+00 2.2022716e+00 3.4799425e+00 2.7000000e+00 2.5455844e+00 2.9849623e+00 2.9000000e+00 3.4612137e+00 2.3473389e+00 3.3451457e+00 3.2264532e+00 2.7874720e+00 3.5057096e+00 2.6645825e+00 3.6249138e+00 2.8124722e+00 3.7934153e+00 3.4249088e+00 3.1464265e+00 3.3181320e+00 3.7696154e+00 3.9736633e+00 3.2893768e+00 2.2561028e+00 2.6057628e+00 2.4919872e+00 2.6551836e+00 3.9051248e+00 3.2202484e+00 3.2863353e+00 3.6373067e+00 3.3526109e+00 2.7874720e+00 2.8071338e+00 3.1144823e+00 3.3555923e+00 2.7730849e+00 2.2293497e+00 2.9376862e+00 2.8600699e+00 2.9051678e+00 3.0886890e+00 1.9078784e+00 2.8319605e+00 5.0477718e+00 3.9824616e+00 5.0299105e+00 4.4530888e+00 4.8062459e+00 5.8223707e+00 3.4278273e+00 5.3721504e+00 4.7906158e+00 5.3712196e+00 4.0951190e+00 4.2638011e+00 4.5836667e+00 3.9635842e+00 4.1809090e+00 4.3692105e+00 4.3965896e+00 5.9774577e+00 6.2209324e+00 3.9064050e+00 4.8518038e+00 3.8105118e+00 5.9371710e+00 3.8496753e+00 4.7148701e+00 5.0487622e+00 3.7215588e+00 3.7643060e+00 4.5891176e+00 4.8301139e+00 5.2697249e+00 5.7428216e+00 4.6270941e+00 3.9166312e+00 4.3520110e+00 5.4972721e+00 4.6497312e+00 4.3646306e+00 3.6565011e+00 4.5210618e+00 4.7528939e+00 4.3485630e+00 3.9824616e+00 4.9969991e+00 4.8733972e+00 4.3760713e+00 4.0149720e+00 4.1976184e+00 4.4113490e+00 3.9153544e+00 3.4641016e-01 1.0440307e+00 9.7467943e-01 7.0710678e-01 1.0440307e+00 1.3784049e+00 7.1414284e-01 6.9282032e-01 1.9519221e+00 1.2247449e+00 8.1240384e-01 5.9160798e-01 1.1916375e+00 3.4641016e-01 1.0908712e+00 4.2426407e-01 8.3666003e-01 3.9974992e+00 3.6345564e+00 4.1725292e+00 3.3196385e+00 3.8665230e+00 3.5185224e+00 3.7868192e+00 2.6514147e+00 3.8013156e+00 3.0675723e+00 3.0430248e+00 3.3090784e+00 3.3630343e+00 3.7656341e+00 2.7294688e+00 3.6537652e+00 3.5114100e+00 3.1448370e+00 3.9458839e+00 3.0789609e+00 3.8832976e+00 3.1921779e+00 4.1581246e+00 3.7349699e+00 3.4871192e+00 3.6428011e+00 4.1024383e+00 4.2743421e+00 3.6110940e+00 2.7037012e+00 3.0446675e+00 2.9376862e+00 3.0479501e+00 4.2201896e+00 3.4942810e+00 3.5185224e+00 3.9306488e+00 3.7815341e+00 3.0935417e+00 3.2155870e+00 3.4583233e+00 3.6496575e+00 3.1733263e+00 2.7073973e+00 3.2939338e+00 3.1559468e+00 3.2280025e+00 3.4234486e+00 2.4124676e+00 3.1843367e+00 5.2782573e+00 4.3034870e+00 5.3084838e+00 4.7275787e+00 5.0793700e+00 6.0811183e+00 3.7696154e+00 5.6373753e+00 5.1176166e+00 5.5830099e+00 4.3669211e+00 4.5912961e+00 4.8754487e+00 4.3208795e+00 4.5055521e+00 4.6400431e+00 4.6679760e+00 6.1473572e+00 6.5192024e+00 4.2965102e+00 5.1166395e+00 4.1255303e+00 6.2120850e+00 4.1976184e+00 4.9527770e+00 5.2867760e+00 4.0583248e+00 4.0583248e+00 4.8928519e+00 5.0941143e+00 5.5614746e+00 5.9160798e+00 4.9345719e+00 4.2213742e+00 4.6432747e+00 5.7844619e+00 4.8785244e+00 4.6184413e+00 3.9534795e+00 4.8062459e+00 5.0348784e+00 4.6572524e+00 4.3034870e+00 5.2507142e+00 5.1273775e+00 4.6893496e+00 4.3886217e+00 4.4944410e+00 4.6411206e+00 4.1892720e+00 1.2609520e+00 1.1357817e+00 7.0710678e-01 1.2609520e+00 1.6309506e+00 9.0000000e-01 8.7177979e-01 2.1517435e+00 1.4899664e+00 9.6953597e-01 7.8102497e-01 1.3928388e+00 6.0000000e-01 1.3453624e+00 5.4772256e-01 1.0295630e+00 3.9471509e+00 3.6207734e+00 4.1364236e+00 3.4029399e+00 3.8587563e+00 3.5805028e+00 3.7815341e+00 2.8017851e+00 3.7881394e+00 3.1670175e+00 3.1843367e+00 3.3361655e+00 3.4132096e+00 3.7920970e+00 2.7838822e+00 3.6180105e+00 3.5707142e+00 3.2046841e+00 3.9736633e+00 3.1559468e+00 3.9089641e+00 3.2078030e+00 4.1797129e+00 3.7696154e+00 3.4813790e+00 3.6180105e+00 4.0804412e+00 4.2532341e+00 3.6386811e+00 2.7658633e+00 3.1320920e+00 3.0282008e+00 3.0967725e+00 4.2602817e+00 3.5707142e+00 3.5298725e+00 3.9025633e+00 3.8026307e+00 3.1543621e+00 3.2954514e+00 3.5440090e+00 3.6715120e+00 3.2264532e+00 2.8478062e+00 3.3630343e+00 3.2124757e+00 3.2832910e+00 3.4351128e+00 2.5337719e+00 3.2403703e+00 5.2820451e+00 4.3497126e+00 5.2782573e+00 4.7465777e+00 5.0793700e+00 6.0415230e+00 3.8871583e+00 5.6124861e+00 5.1234754e+00 5.5344376e+00 4.3508620e+00 4.6000000e+00 4.8528342e+00 4.3737855e+00 4.5365185e+00 4.6292548e+00 4.6701178e+00 6.0901560e+00 6.4853681e+00 4.3474130e+00 5.0852729e+00 4.1785165e+00 6.1749494e+00 4.2071368e+00 4.9345719e+00 5.2545219e+00 4.0706265e+00 4.0755368e+00 4.9010203e+00 5.0645829e+00 5.5272054e+00 5.8446557e+00 4.9406477e+00 4.2402830e+00 4.6904158e+00 5.7253821e+00 4.8744230e+00 4.6249324e+00 3.9761791e+00 4.7728398e+00 5.0129831e+00 4.6119410e+00 4.3497126e+00 5.2297227e+00 5.1019604e+00 4.6615448e+00 4.4022721e+00 4.4855323e+00 4.6411206e+00 4.2249260e+00 3.4641016e-01 7.5498344e-01 0.0000000e+00 5.5677644e-01 3.7416574e-01 5.0000000e-01 9.3808315e-01 5.5677644e-01 6.5574385e-01 8.8317609e-01 2.6457513e-01 7.4161985e-01 3.4641016e-01 7.2801099e-01 2.6457513e-01 4.0435133e+00 3.6359318e+00 4.1856899e+00 2.9478806e+00 3.7709415e+00 3.3421550e+00 3.8065733e+00 2.1307276e+00 3.7389838e+00 2.7748874e+00 2.4556058e+00 3.2031235e+00 3.0133038e+00 3.6619667e+00 2.5258662e+00 3.6523965e+00 3.3852622e+00 2.9223278e+00 3.6687873e+00 2.7586228e+00 3.8457769e+00 3.0364453e+00 3.9799497e+00 3.6027767e+00 3.4014703e+00 3.6055513e+00 4.0348482e+00 4.2497059e+00 3.4942810e+00 2.3874673e+00 2.6720778e+00 2.5495098e+00 2.8178006e+00 4.0718546e+00 3.3496268e+00 3.5425979e+00 3.9293765e+00 3.5284558e+00 2.9495762e+00 2.9000000e+00 3.1984371e+00 3.5707142e+00 2.9189039e+00 2.1679483e+00 3.0626786e+00 3.0248967e+00 3.0675723e+00 3.3181320e+00 1.9104973e+00 2.9883106e+00 5.2924474e+00 4.1436699e+00 5.3113087e+00 4.6583259e+00 5.0467812e+00 6.1081912e+00 3.4525353e+00 5.6329388e+00 4.9979996e+00 5.6973678e+00 4.3749286e+00 4.4821870e+00 4.8600412e+00 4.1060930e+00 4.3760713e+00 4.6411206e+00 4.6324939e+00 6.3071388e+00 6.4876806e+00 4.0286474e+00 5.1468437e+00 3.9686270e+00 6.2112801e+00 4.0706265e+00 4.9919936e+00 5.3329167e+00 3.9446166e+00 3.9874804e+00 4.8114447e+00 5.1029403e+00 5.5443665e+00 6.0917978e+00 4.8538644e+00 4.1194660e+00 4.4933284e+00 5.8180753e+00 4.9142650e+00 4.5978256e+00 3.8729833e+00 4.8176758e+00 5.0338852e+00 4.6690470e+00 4.1436699e+00 5.2744668e+00 5.1652686e+00 4.6669048e+00 4.2201896e+00 4.4575778e+00 4.6722586e+00 4.1060930e+00 5.9160798e-01 3.4641016e-01 6.4031242e-01 3.7416574e-01 3.3166248e-01 1.0392305e+00 6.0827625e-01 6.4031242e-01 9.4868330e-01 3.6055513e-01 7.2801099e-01 4.4721360e-01 6.5574385e-01 2.2360680e-01 4.2059482e+00 3.8131352e+00 4.3588989e+00 3.1796226e+00 3.9572718e+00 3.5707142e+00 3.9887341e+00 2.3874673e+00 3.9268308e+00 3.0033315e+00 2.7147744e+00 3.3970576e+00 3.2372828e+00 3.8716921e+00 2.7239677e+00 3.8183766e+00 3.6027767e+00 3.1527766e+00 3.8755645e+00 2.9916551e+00 4.0410395e+00 3.2280025e+00 4.1904654e+00 3.8236109e+00 3.5874782e+00 3.7788887e+00 4.2190046e+00 4.4294469e+00 3.6972963e+00 2.6038433e+00 2.9086079e+00 2.7892651e+00 3.0298515e+00 4.2918527e+00 3.5749126e+00 3.7269290e+00 4.1036569e+00 3.7349699e+00 3.1654384e+00 3.1288976e+00 3.4423829e+00 3.7749172e+00 3.1368774e+00 2.4207437e+00 3.2893768e+00 3.2449961e+00 3.2848135e+00 3.5142567e+00 2.1330729e+00 3.2046841e+00 5.4799635e+00 4.3577517e+00 5.4909016e+00 4.8682646e+00 5.2392748e+00 6.2904690e+00 3.6932371e+00 5.8266629e+00 5.2057660e+00 5.8566202e+00 4.5497253e+00 4.6808119e+00 5.0378567e+00 4.3197222e+00 4.5661800e+00 4.8145612e+00 4.8311489e+00 6.4730209e+00 6.6745786e+00 4.2579338e+00 5.3169540e+00 4.1773197e+00 6.3984373e+00 4.2649736e+00 5.1730069e+00 5.5172457e+00 4.1376322e+00 4.1833001e+00 5.0089919e+00 5.2915026e+00 5.7288742e+00 6.2489999e+00 5.0477718e+00 4.3301270e+00 4.7296934e+00 5.9791304e+00 5.0921508e+00 4.7979162e+00 4.0693980e+00 4.9869831e+00 5.2057660e+00 4.8207883e+00 4.3577517e+00 5.4534393e+00 5.3329167e+00 4.8311489e+00 4.4170126e+00 4.6400431e+00 4.8507731e+00 4.3150898e+00 7.5498344e-01 1.2083046e+00 4.5825757e-01 5.0990195e-01 1.5652476e+00 1.1401754e+00 7.0710678e-01 8.0622577e-01 8.7177979e-01 5.8309519e-01 9.5393920e-01 3.4641016e-01 5.4772256e-01 3.9166312e+00 3.5818989e+00 4.0951190e+00 3.1527766e+00 3.7509999e+00 3.4612137e+00 3.7682887e+00 2.4919872e+00 3.6972963e+00 2.9883106e+00 2.8248894e+00 3.2419130e+00 3.1416556e+00 3.7040518e+00 2.6210685e+00 3.5566838e+00 3.4914181e+00 3.0347982e+00 3.7563280e+00 2.9291637e+00 3.8807216e+00 3.0577770e+00 4.0360872e+00 3.6619667e+00 3.3734256e+00 3.5369478e+00 3.9837169e+00 4.1988094e+00 3.5411862e+00 2.5159491e+00 2.8757608e+00 2.7586228e+00 2.9137605e+00 4.1581246e+00 3.4914181e+00 3.5298725e+00 3.8535698e+00 3.5916570e+00 3.0512293e+00 3.0822070e+00 3.3793490e+00 3.5972211e+00 3.0315013e+00 2.5159491e+00 3.2046841e+00 3.1144823e+00 3.1654384e+00 3.3256578e+00 2.2045408e+00 3.0951575e+00 5.2971691e+00 4.2497059e+00 5.2516664e+00 4.6957428e+00 5.0497525e+00 6.0299254e+00 3.7215588e+00 5.5821143e+00 5.0249378e+00 5.5883808e+00 4.3324358e+00 4.5099889e+00 4.8155997e+00 4.2391037e+00 4.4564560e+00 4.6162756e+00 4.6314145e+00 6.1717096e+00 6.4358372e+00 4.1617304e+00 5.0813384e+00 4.0865633e+00 6.1424751e+00 4.0987803e+00 4.9446941e+00 5.2564246e+00 3.9736633e+00 4.0162171e+00 4.8373546e+00 5.0348784e+00 5.4799635e+00 5.9245253e+00 4.8774994e+00 4.1545156e+00 4.5934736e+00 5.7043843e+00 4.8969378e+00 4.6010868e+00 3.9127995e+00 4.7476310e+00 4.9929951e+00 4.5793013e+00 4.2497059e+00 5.2297227e+00 5.1117512e+00 4.6162756e+00 4.2684892e+00 4.4384682e+00 4.6604721e+00 4.1725292e+00 5.5677644e-01 3.7416574e-01 5.0000000e-01 9.3808315e-01 5.5677644e-01 6.5574385e-01 8.8317609e-01 2.6457513e-01 7.4161985e-01 3.4641016e-01 7.2801099e-01 2.6457513e-01 4.0435133e+00 3.6359318e+00 4.1856899e+00 2.9478806e+00 3.7709415e+00 3.3421550e+00 3.8065733e+00 2.1307276e+00 3.7389838e+00 2.7748874e+00 2.4556058e+00 3.2031235e+00 3.0133038e+00 3.6619667e+00 2.5258662e+00 3.6523965e+00 3.3852622e+00 2.9223278e+00 3.6687873e+00 2.7586228e+00 3.8457769e+00 3.0364453e+00 3.9799497e+00 3.6027767e+00 3.4014703e+00 3.6055513e+00 4.0348482e+00 4.2497059e+00 3.4942810e+00 2.3874673e+00 2.6720778e+00 2.5495098e+00 2.8178006e+00 4.0718546e+00 3.3496268e+00 3.5425979e+00 3.9293765e+00 3.5284558e+00 2.9495762e+00 2.9000000e+00 3.1984371e+00 3.5707142e+00 2.9189039e+00 2.1679483e+00 3.0626786e+00 3.0248967e+00 3.0675723e+00 3.3181320e+00 1.9104973e+00 2.9883106e+00 5.2924474e+00 4.1436699e+00 5.3113087e+00 4.6583259e+00 5.0467812e+00 6.1081912e+00 3.4525353e+00 5.6329388e+00 4.9979996e+00 5.6973678e+00 4.3749286e+00 4.4821870e+00 4.8600412e+00 4.1060930e+00 4.3760713e+00 4.6411206e+00 4.6324939e+00 6.3071388e+00 6.4876806e+00 4.0286474e+00 5.1468437e+00 3.9686270e+00 6.2112801e+00 4.0706265e+00 4.9919936e+00 5.3329167e+00 3.9446166e+00 3.9874804e+00 4.8114447e+00 5.1029403e+00 5.5443665e+00 6.0917978e+00 4.8538644e+00 4.1194660e+00 4.4933284e+00 5.8180753e+00 4.9142650e+00 4.5978256e+00 3.8729833e+00 4.8176758e+00 5.0338852e+00 4.6690470e+00 4.1436699e+00 5.2744668e+00 5.1652686e+00 4.6669048e+00 4.2201896e+00 4.4575778e+00 4.6722586e+00 4.1060930e+00 8.3066239e-01 7.8740079e-01 7.1414284e-01 2.0000000e-01 9.2736185e-01 1.2369317e+00 4.2426407e-01 1.1045361e+00 3.0000000e-01 1.1575837e+00 6.7823300e-01 4.4497191e+00 3.9962482e+00 4.5727453e+00 3.1937439e+00 4.1267421e+00 3.6304270e+00 4.1496988e+00 2.2912878e+00 4.1170378e+00 2.9883106e+00 2.6153394e+00 3.5142567e+00 3.3361655e+00 3.9874804e+00 2.8195744e+00 4.0435133e+00 3.6565011e+00 3.2449961e+00 3.9761791e+00 3.0430248e+00 4.1352146e+00 3.3808283e+00 4.3023250e+00 3.9357337e+00 3.7709415e+00 3.9862263e+00 4.4147480e+00 4.6076024e+00 3.8078866e+00 2.7073973e+00 2.9376862e+00 2.8231188e+00 3.1320920e+00 4.3646306e+00 3.5958309e+00 3.8626416e+00 4.3069711e+00 3.8626416e+00 3.2388269e+00 3.1559468e+00 3.4612137e+00 3.9012818e+00 3.2264532e+00 2.3430749e+00 3.3391616e+00 3.3316662e+00 3.3645208e+00 3.6687873e+00 2.1071308e+00 3.2832910e+00 5.5749439e+00 4.4022721e+00 5.6621551e+00 4.9668904e+00 5.3535035e+00 6.4761099e+00 3.6041643e+00 5.9983331e+00 5.3244718e+00 6.0440053e+00 4.7042534e+00 4.7937459e+00 5.1971146e+00 4.3439613e+00 4.6130250e+00 4.9446941e+00 4.9608467e+00 6.6850580e+00 6.8425142e+00 4.3104524e+00 5.4827001e+00 4.2047592e+00 6.5825527e+00 4.3840620e+00 5.3244718e+00 5.7035077e+00 4.2532341e+00 4.2906876e+00 5.1127292e+00 5.4817880e+00 5.9135438e+00 6.4915329e+00 5.1507281e+00 4.4474712e+00 4.7937459e+00 6.1919302e+00 5.2057660e+00 4.9203658e+00 4.1677332e+00 5.1652686e+00 5.3507009e+00 5.0109879e+00 4.4022721e+00 5.6008928e+00 5.4799635e+00 4.9909919e+00 4.5210618e+00 4.7812132e+00 4.9618545e+00 4.3874822e+00 2.6457513e-01 1.2727922e+00 7.5498344e-01 4.3588989e-01 6.0000000e-01 5.1961524e-01 4.1231056e-01 5.4772256e-01 3.6055513e-01 1.7320508e-01 3.9153544e+00 3.5242020e+00 4.0718546e+00 2.9715316e+00 3.6905284e+00 3.3060551e+00 3.6945906e+00 2.2181073e+00 3.6496575e+00 2.7748874e+00 2.5709920e+00 3.1272992e+00 3.0232433e+00 3.5958309e+00 2.4738634e+00 3.5355339e+00 3.3316662e+00 2.8948230e+00 3.6523965e+00 2.7622455e+00 3.7589892e+00 2.9698485e+00 3.9370039e+00 3.5496479e+00 3.3151169e+00 3.5014283e+00 3.9471509e+00 4.1496988e+00 3.4278273e+00 2.3748684e+00 2.6944387e+00 2.5768197e+00 2.7820855e+00 4.0274061e+00 3.3075671e+00 3.4307434e+00 3.8183766e+00 3.5028560e+00 2.8948230e+00 2.9034462e+00 3.1953091e+00 3.4942810e+00 2.8948230e+00 2.2583180e+00 3.0397368e+00 2.9681644e+00 3.0182777e+00 3.2419130e+00 1.9672316e+00 2.9478806e+00 5.1951901e+00 4.1024383e+00 5.2086467e+00 4.5891176e+00 4.9608467e+00 6.0024995e+00 3.4785054e+00 5.5398556e+00 4.9416596e+00 5.5587768e+00 4.2661458e+00 4.4170126e+00 4.7602521e+00 4.0816663e+00 4.3185646e+00 4.5365185e+00 4.5475268e+00 6.1611687e+00 6.4007812e+00 4.0236799e+00 5.0328918e+00 3.9255573e+00 6.1155539e+00 4.0062451e+00 4.8805737e+00 5.2211110e+00 3.8755645e+00 3.9089641e+00 4.7402532e+00 5.0019996e+00 5.4497706e+00 5.9371710e+00 4.7812132e+00 4.0558600e+00 4.4598206e+00 5.7000000e+00 4.8052055e+00 4.5099889e+00 3.7973675e+00 4.7063787e+00 4.9295030e+00 4.5497253e+00 4.1024383e+00 5.1672043e+00 5.0497525e+00 4.5628938e+00 4.1701319e+00 4.3646306e+00 4.5639895e+00 4.0398020e+00 1.3000000e+00 6.7823300e-01 4.2426407e-01 6.8556546e-01 5.4772256e-01 4.4721360e-01 5.1961524e-01 4.2426407e-01 2.4494897e-01 4.1060930e+00 3.7054015e+00 4.2626283e+00 3.1591138e+00 3.8820098e+00 3.4957117e+00 3.8704005e+00 2.3895606e+00 3.8483763e+00 2.9410882e+00 2.7531800e+00 3.3030289e+00 3.2357379e+00 3.7868192e+00 2.6476405e+00 3.7242449e+00 3.5057096e+00 3.1000000e+00 3.8483763e+00 2.9597297e+00 3.9242834e+00 3.1606961e+00 4.1340053e+00 3.7509999e+00 3.5099858e+00 3.6918830e+00 4.1460825e+00 4.3347434e+00 3.6110940e+00 2.5748786e+00 2.8896367e+00 2.7766887e+00 2.9748950e+00 4.2154478e+00 3.4770677e+00 3.5972211e+00 4.0062451e+00 3.7067506e+00 3.0740852e+00 3.0886890e+00 3.3882149e+00 3.6823905e+00 3.0903074e+00 2.4351591e+00 3.2264532e+00 3.1559468e+00 3.2031235e+00 3.4351128e+00 2.1307276e+00 3.1336879e+00 5.3535035e+00 4.2755117e+00 5.3907328e+00 4.7738873e+00 5.1341991e+00 6.1919302e+00 3.6345564e+00 5.7358522e+00 5.1371198e+00 5.7210139e+00 4.4350874e+00 4.6000000e+00 4.9365980e+00 4.2508823e+00 4.4698993e+00 4.6957428e+00 4.7318073e+00 6.3364028e+00 6.5924199e+00 4.2213742e+00 5.2019227e+00 4.0865633e+00 6.3111013e+00 4.1880783e+00 5.0527220e+00 5.4101756e+00 4.0533936e+00 4.0828911e+00 4.9173163e+00 5.1990384e+00 5.6435804e+00 6.1155539e+00 4.9547957e+00 4.2497059e+00 4.6604721e+00 5.8804762e+00 4.9598387e+00 4.6914816e+00 3.9686270e+00 4.8805737e+00 5.0941143e+00 4.7127487e+00 4.2755117e+00 5.3376025e+00 5.2086467e+00 4.7275787e+00 4.3520110e+00 4.5387223e+00 4.7180504e+00 4.2130749e+00 9.1104336e-01 1.3674794e+00 1.7262677e+00 7.6811457e-01 1.6462078e+00 9.1651514e-01 1.6278821e+00 1.1269428e+00 4.4530888e+00 4.0124805e+00 4.5607017e+00 3.0479501e+00 4.0718546e+00 3.5958309e+00 4.1821047e+00 2.1587033e+00 4.0816663e+00 2.9359837e+00 2.3811762e+00 3.5071356e+00 3.1685959e+00 3.9610605e+00 2.8035692e+00 4.0373258e+00 3.6578682e+00 3.1906112e+00 3.8183766e+00 2.9410882e+00 4.1557190e+00 3.3316662e+00 4.2047592e+00 3.8961519e+00 3.7376463e+00 3.9648455e+00 4.3588989e+00 4.5803930e+00 3.7802116e+00 2.6191602e+00 2.8106939e+00 2.6944387e+00 3.0692019e+00 4.3058100e+00 3.6027767e+00 3.9230090e+00 4.2988371e+00 3.7215588e+00 3.2465366e+00 3.0545049e+00 3.3926391e+00 3.8923001e+00 3.1432467e+00 2.1771541e+00 3.2832910e+00 3.3391616e+00 3.3481338e+00 3.6400549e+00 1.9824228e+00 3.2449961e+00 5.5830099e+00 4.3416587e+00 5.6258333e+00 4.9335586e+00 5.3244718e+00 6.4366140e+00 3.5213634e+00 5.9539903e+00 5.2325902e+00 6.0712437e+00 4.7053161e+00 4.7254629e+00 5.1633323e+00 4.2497059e+00 4.5596052e+00 4.9416596e+00 4.9376108e+00 6.7275553e+00 6.7594378e+00 4.1701319e+00 5.4708317e+00 4.1605288e+00 6.5222695e+00 4.3139309e+00 5.3329167e+00 5.6956123e+00 4.2000000e+00 4.2731721e+00 5.0586559e+00 5.4516053e+00 5.8532043e+00 6.5352888e+00 5.0950957e+00 4.4011362e+00 4.7275787e+00 6.1457302e+00 5.2297227e+00 4.9132474e+00 4.1521079e+00 5.1429563e+00 5.3272882e+00 4.9839743e+00 4.3416587e+00 5.5910643e+00 5.4808758e+00 4.9537864e+00 4.4192760e+00 4.7528939e+00 4.9909919e+00 4.3749286e+00 8.3666003e-01 1.1180340e+00 4.6904158e-01 9.6953597e-01 2.2360680e-01 1.0488088e+00 6.1644140e-01 4.4452222e+00 3.9912404e+00 4.5727453e+00 3.2434549e+00 4.1412558e+00 3.6469165e+00 4.1400483e+00 2.3515952e+00 4.1267421e+00 3.0149627e+00 2.6981475e+00 3.5199432e+00 3.3896903e+00 3.9974992e+00 2.8337255e+00 4.0435133e+00 3.6619667e+00 3.2695565e+00 4.0211939e+00 3.0822070e+00 4.1303753e+00 3.3985291e+00 4.3301270e+00 3.9509493e+00 3.7815341e+00 3.9912404e+00 4.4283180e+00 4.6119410e+00 3.8183766e+00 2.7440845e+00 2.9849623e+00 2.8722813e+00 3.1575307e+00 4.3829214e+00 3.6013886e+00 3.8470768e+00 4.3069711e+00 3.9038443e+00 3.2449961e+00 3.1937439e+00 3.4899857e+00 3.9064050e+00 3.2572995e+00 2.4103942e+00 3.3630343e+00 3.3376639e+00 3.3763886e+00 3.6796739e+00 2.1633308e+00 3.3015148e+00 5.5677644e+00 4.4204072e+00 5.6656862e+00 4.9749372e+00 5.3572381e+00 6.4791975e+00 3.6373067e+00 6.0049979e+00 5.3469618e+00 6.0274373e+00 4.7000000e+00 4.8104054e+00 5.2009614e+00 4.3714986e+00 4.6260134e+00 4.9406477e+00 4.9648766e+00 6.6640828e+00 6.8571131e+00 4.3520110e+00 5.4790510e+00 4.2190046e+00 6.5916614e+00 4.4022721e+00 5.3169540e+00 5.7000000e+00 4.2673177e+00 4.2953463e+00 5.1244512e+00 5.4854353e+00 5.9236813e+00 6.4699304e+00 5.1623638e+00 4.4609416e+00 4.8145612e+00 6.1951594e+00 5.1942276e+00 4.9203658e+00 4.1725292e+00 5.1652686e+00 5.3507009e+00 5.0109879e+00 4.4204072e+00 5.5973208e+00 5.4726593e+00 4.9949975e+00 4.5475268e+00 4.7853944e+00 4.9497475e+00 4.3920383e+00 4.7958315e-01 6.4807407e-01 5.0990195e-01 6.7082039e-01 5.4772256e-01 4.8989795e-01 3.7868192e+00 3.3570821e+00 3.9331921e+00 2.8178006e+00 3.5425979e+00 3.1432467e+00 3.5128336e+00 2.0663978e+00 3.5227830e+00 2.5709920e+00 2.4535688e+00 2.9376862e+00 2.9342802e+00 3.4380227e+00 2.2825424e+00 3.3955854e+00 3.1352831e+00 2.7730849e+00 3.5142567e+00 2.6267851e+00 3.5468296e+00 2.8195744e+00 3.7934153e+00 3.4161382e+00 3.1780497e+00 3.3600595e+00 3.8223030e+00 3.9887341e+00 3.2526912e+00 2.2516660e+00 2.5592968e+00 2.4556058e+00 2.6324893e+00 3.8587563e+00 3.1032241e+00 3.2280025e+00 3.6701499e+00 3.3852622e+00 2.7110883e+00 2.7386128e+00 3.0430248e+00 3.3316662e+00 2.7513633e+00 2.1189620e+00 2.8722813e+00 2.8035692e+00 2.8460499e+00 3.0951575e+00 1.7944358e+00 2.7784888e+00 4.9699095e+00 3.9012818e+00 5.0398413e+00 4.4147480e+00 4.7644517e+00 5.8532043e+00 3.2603681e+00 5.4018515e+00 4.7927028e+00 5.3581713e+00 4.0681691e+00 4.2402830e+00 4.5771170e+00 3.8742741e+00 4.0767634e+00 4.3162484e+00 4.3760713e+00 5.9958319e+00 6.2513998e+00 3.8807216e+00 4.8373546e+00 3.7013511e+00 5.9791304e+00 3.8288379e+00 4.6893496e+00 5.0724747e+00 3.6891733e+00 3.7134889e+00 4.5497253e+00 4.8713448e+00 5.3094256e+00 5.7879185e+00 4.5836667e+00 3.9038443e+00 4.3197222e+00 5.5389530e+00 4.5760245e+00 4.3324358e+00 3.5958309e+00 4.5232732e+00 4.7212287e+00 4.3485630e+00 3.9012818e+00 4.9709154e+00 4.8321838e+00 4.3577517e+00 3.9924930e+00 4.1737274e+00 4.3335897e+00 3.8405729e+00 9.9498744e-01 3.6055513e-01 9.4868330e-01 5.0000000e-01 7.4161985e-01 3.5791060e+00 3.1654384e+00 3.7336309e+00 2.7622455e+00 3.3852622e+00 2.9883106e+00 3.3120990e+00 2.0784610e+00 3.3406586e+00 2.4939928e+00 2.4839485e+00 2.7892651e+00 2.8530685e+00 3.2634338e+00 2.1817424e+00 3.2093613e+00 2.9765752e+00 2.6267851e+00 3.4263683e+00 2.5357445e+00 3.3719431e+00 2.6870058e+00 3.6523965e+00 3.2372828e+00 3.0116441e+00 3.1843367e+00 3.6469165e+00 3.8078866e+00 3.0967725e+00 2.1725561e+00 2.4939928e+00 2.3916521e+00 2.5179357e+00 3.7013511e+00 2.9495762e+00 3.0282008e+00 3.4785054e+00 3.2787193e+00 2.5573424e+00 2.6589472e+00 2.9137605e+00 3.1511903e+00 2.6419690e+00 2.1400935e+00 2.7495454e+00 2.6324893e+00 2.6962938e+00 2.9308702e+00 1.8411953e+00 2.6476405e+00 4.7864392e+00 3.7669616e+00 4.8507731e+00 4.2308392e+00 4.5880279e+00 5.6453521e+00 3.1906112e+00 5.1932649e+00 4.6281746e+00 5.1478151e+00 3.8884444e+00 4.0877867e+00 4.4022721e+00 3.7709415e+00 3.9661064e+00 4.1496988e+00 4.1856899e+00 5.7480431e+00 6.0671245e+00 3.7669616e+00 4.6529560e+00 3.5791060e+00 5.7758116e+00 3.6891733e+00 4.4877611e+00 4.8518038e+00 3.5468296e+00 3.5496479e+00 4.3897608e+00 4.6583259e+00 5.1166395e+00 5.5362442e+00 4.4294469e+00 3.7269290e+00 4.1388404e+00 5.3525695e+00 4.3920383e+00 4.1352146e+00 3.4380227e+00 4.3439613e+00 4.5541190e+00 4.1928511e+00 3.7669616e+00 4.7812132e+00 4.6540305e+00 4.2071368e+00 3.8716921e+00 4.0062451e+00 4.1509035e+00 3.6715120e+00 8.8317609e-01 3.0000000e-01 8.7177979e-01 3.7416574e-01 4.1206796e+00 3.6945906e+00 4.2555846e+00 2.9563491e+00 3.8223030e+00 3.3852622e+00 3.8626416e+00 2.1142375e+00 3.8065733e+00 2.7766887e+00 2.4372115e+00 3.2388269e+00 3.0545049e+00 3.7148351e+00 2.5475478e+00 3.7188708e+00 3.4190642e+00 2.9782545e+00 3.6945906e+00 2.7892651e+00 3.8807216e+00 3.0805844e+00 4.0236799e+00 3.6646964e+00 3.4612137e+00 3.6674242e+00 4.1000000e+00 4.3046487e+00 3.5355339e+00 2.4228083e+00 2.6925824e+00 2.5748786e+00 2.8548205e+00 4.1121770e+00 3.3778692e+00 3.5916570e+00 3.9937451e+00 3.5693137e+00 2.9883106e+00 2.9154759e+00 3.2341923e+00 3.6249138e+00 2.9546573e+00 2.1517435e+00 3.0935417e+00 3.0757113e+00 3.1080541e+00 3.3734256e+00 1.8814888e+00 3.0232433e+00 5.3235327e+00 4.1641326e+00 5.3646994e+00 4.7063787e+00 5.0852729e+00 6.1741396e+00 3.4394767e+00 5.7026310e+00 5.0467812e+00 5.7489129e+00 4.4170126e+00 4.5188494e+00 4.9040799e+00 4.1121770e+00 4.3749286e+00 4.6701178e+00 4.6850827e+00 6.3835727e+00 6.5436993e+00 4.0595566e+00 5.1903757e+00 3.9774364e+00 6.2793312e+00 4.1036569e+00 5.0428167e+00 5.4046276e+00 3.9761791e+00 4.0236799e+00 4.8456166e+00 5.1778374e+00 5.6080300e+00 6.1757591e+00 4.8836462e+00 4.1737274e+00 4.5497253e+00 5.8736701e+00 4.9457052e+00 4.6508064e+00 3.9051248e+00 4.8641546e+00 5.0665570e+00 4.7021272e+00 4.1641326e+00 5.3188345e+00 5.1990384e+00 4.6957428e+00 4.2449971e+00 4.4966654e+00 4.7031904e+00 4.1412558e+00 8.0622577e-01 2.4494897e-01 5.4772256e-01 3.8755645e+00 3.4856850e+00 4.0385641e+00 3.0626786e+00 3.6945906e+00 3.3136083e+00 3.6414283e+00 2.3515952e+00 3.6428011e+00 2.8195744e+00 2.7386128e+00 3.1192948e+00 3.1256999e+00 3.5860842e+00 2.5039968e+00 3.5114100e+00 3.3151169e+00 2.9308702e+00 3.7242449e+00 2.8354894e+00 3.7148351e+00 2.9949958e+00 3.9635842e+00 3.5510562e+00 3.3166248e+00 3.4885527e+00 3.9458839e+00 4.1243181e+00 3.4234486e+00 2.4596748e+00 2.7874720e+00 2.6776856e+00 2.8266588e+00 4.0286474e+00 3.2908965e+00 3.3674916e+00 3.7881394e+00 3.5693137e+00 2.8896367e+00 2.9698485e+00 3.2310989e+00 3.4756294e+00 2.9478806e+00 2.4062419e+00 3.0708305e+00 2.9597297e+00 3.0232433e+00 3.2434549e+00 2.1118712e+00 2.9698485e+00 5.1322510e+00 4.1036569e+00 5.1710734e+00 4.5617979e+00 4.9234135e+00 5.9581876e+00 3.5199432e+00 5.5045436e+00 4.9446941e+00 5.4763126e+00 4.2201896e+00 4.4136153e+00 4.7275787e+00 4.1048752e+00 4.3104524e+00 4.4888751e+00 4.5133136e+00 6.0638272e+00 6.3796552e+00 4.0767634e+00 4.9819675e+00 3.9217343e+00 6.0835845e+00 4.0124805e+00 4.8197510e+00 5.1662365e+00 3.8742741e+00 3.8845849e+00 4.7222876e+00 4.9648766e+00 5.4249424e+00 5.8412327e+00 4.7634021e+00 4.0472213e+00 4.4586994e+00 5.6621551e+00 4.7370877e+00 4.4665423e+00 3.7749172e+00 4.6669048e+00 4.8877398e+00 4.5155288e+00 4.1036569e+00 5.1137071e+00 4.9909919e+00 4.5354162e+00 4.1928511e+00 4.3358967e+00 4.4966654e+00 4.0112342e+00 8.6602540e-01 4.1231056e-01 4.2532341e+00 3.8131352e+00 4.3863424e+00 3.0967725e+00 3.9623226e+00 3.4914181e+00 3.9686270e+00 2.2315914e+00 3.9420807e+00 2.8809721e+00 2.5787594e+00 3.3555923e+00 3.2186954e+00 3.8301436e+00 2.6720778e+00 3.8548671e+00 3.5128336e+00 3.1016125e+00 3.8548671e+00 2.9240383e+00 3.9761791e+00 3.2218007e+00 4.1617304e+00 3.7815341e+00 3.5986108e+00 3.8052595e+00 4.2426407e+00 4.4339599e+00 3.6537652e+00 2.5729361e+00 2.8319605e+00 2.7166155e+00 2.9899833e+00 4.2261093e+00 3.4612137e+00 3.6837481e+00 4.1231056e+00 3.7296112e+00 3.0886890e+00 3.0446675e+00 3.3421550e+00 3.7376463e+00 3.0919250e+00 2.2847319e+00 3.2093613e+00 3.1764760e+00 3.2171416e+00 3.5028560e+00 2.0273135e+00 3.1416556e+00 5.4175640e+00 4.2743421e+00 5.4909016e+00 4.8145612e+00 5.1971146e+00 6.3000000e+00 3.5270384e+00 5.8266629e+00 5.1788030e+00 5.8566202e+00 4.5321077e+00 4.6465041e+00 5.0299105e+00 4.2308392e+00 4.4866469e+00 4.7812132e+00 4.7979162e+00 6.4853681e+00 6.6805688e+00 4.1964271e+00 5.3094256e+00 4.0804412e+00 6.4109282e+00 4.2367440e+00 5.1497573e+00 5.5208695e+00 4.1036569e+00 4.1352146e+00 4.9648766e+00 5.3028294e+00 5.7428216e+00 6.2841069e+00 5.0039984e+00 4.2930176e+00 4.6572524e+00 6.0124870e+00 5.0408333e+00 4.7560488e+00 4.0149720e+00 4.9909919e+00 5.1865210e+00 4.8373546e+00 4.2743421e+00 5.4313902e+00 5.3103672e+00 4.8270074e+00 4.3852024e+00 4.6184413e+00 4.7968740e+00 4.2402830e+00 5.0990195e-01 3.8496753e+00 3.4856850e+00 4.0211939e+00 3.0757113e+00 3.6810325e+00 3.3436507e+00 3.6551334e+00 2.3937418e+00 3.6262929e+00 2.8653098e+00 2.7604347e+00 3.1352831e+00 3.1032241e+00 3.6000000e+00 2.5199206e+00 3.4885527e+00 3.3570821e+00 2.9410882e+00 3.7080992e+00 2.8460499e+00 3.7496667e+00 2.9849623e+00 3.9610605e+00 3.5623026e+00 3.3015148e+00 3.4684290e+00 3.9230090e+00 4.1170378e+00 3.4380227e+00 2.4515301e+00 2.7982137e+00 2.6851443e+00 2.8301943e+00 4.0509258e+00 3.3451457e+00 3.3970576e+00 3.7749172e+00 3.5468296e+00 2.9240383e+00 2.9899833e+00 3.2649655e+00 3.4899857e+00 2.9512709e+00 2.4351591e+00 3.0967725e+00 2.9899833e+00 3.0495901e+00 3.2403703e+00 2.1307276e+00 2.9899833e+00 5.1672043e+00 4.1352146e+00 5.1672043e+00 4.5836667e+00 4.9416596e+00 5.9497899e+00 3.5846897e+00 5.4990908e+00 4.9446941e+00 5.4836119e+00 4.2296572e+00 4.4204072e+00 4.7275787e+00 4.1340053e+00 4.3428102e+00 4.5066617e+00 4.5265881e+00 6.0671245e+00 6.3671030e+00 4.0841156e+00 4.9859803e+00 3.9623226e+00 6.0704201e+00 4.0149720e+00 4.8342528e+00 5.1643005e+00 3.8820098e+00 3.9051248e+00 4.7370877e+00 4.9547957e+00 5.4101756e+00 5.8326666e+00 4.7780749e+00 4.0570926e+00 4.4833024e+00 5.6409219e+00 4.7686476e+00 4.4866469e+00 3.7986840e+00 4.6626173e+00 4.8959167e+00 4.5044423e+00 4.1352146e+00 5.1254268e+00 5.0049975e+00 4.5332108e+00 4.1928511e+00 4.3428102e+00 4.5299007e+00 4.0459857e+00 4.0422766e+00 3.6428011e+00 4.1940434e+00 3.0364453e+00 3.7986840e+00 3.4000000e+00 3.8131352e+00 2.2516660e+00 3.7643060e+00 2.8442925e+00 2.5961510e+00 3.2295511e+00 3.1000000e+00 3.7013511e+00 2.5632011e+00 3.6565011e+00 3.4278273e+00 2.9883106e+00 3.7349699e+00 2.8390139e+00 3.8652296e+00 3.0708305e+00 4.0336088e+00 3.6537652e+00 3.4263683e+00 3.6180105e+00 4.0607881e+00 4.2649736e+00 3.5298725e+00 2.4556058e+00 2.7622455e+00 2.6438608e+00 2.8722813e+00 4.1243181e+00 3.3985291e+00 3.5468296e+00 3.9382737e+00 3.5916570e+00 2.9916551e+00 2.9765752e+00 3.2771939e+00 3.6027767e+00 2.9816103e+00 2.2912878e+00 3.1256999e+00 3.0692019e+00 3.1144823e+00 3.3496268e+00 2.0049938e+00 3.0397368e+00 5.3047149e+00 4.1928511e+00 5.3254108e+00 4.6957428e+00 5.0695167e+00 6.1237244e+00 3.5369478e+00 5.6586217e+00 5.0447993e+00 5.6841886e+00 4.3806392e+00 4.5188494e+00 4.8733972e+00 4.1629317e+00 4.4068129e+00 4.6465041e+00 4.6593991e+00 6.2952363e+00 6.5145990e+00 4.1060930e+00 5.1497573e+00 4.0124805e+00 6.2345810e+00 4.1060930e+00 4.9989999e+00 5.3450912e+00 3.9761791e+00 4.0137264e+00 4.8435524e+00 5.1234754e+00 5.5668663e+00 6.0745370e+00 4.8836462e+00 4.1617304e+00 4.5585085e+00 5.8206529e+00 4.9173163e+00 4.6227697e+00 3.9000000e+00 4.8228622e+00 5.0408333e+00 4.6636895e+00 4.1928511e+00 5.2829916e+00 5.1643005e+00 4.6722586e+00 4.2638011e+00 4.4743715e+00 4.6754679e+00 4.1412558e+00 6.4031242e-01 2.6457513e-01 1.8867962e+00 6.5574385e-01 1.3784049e+00 7.3484692e-01 2.6776856e+00 5.1961524e-01 2.0322401e+00 2.6532998e+00 1.2288206e+00 1.6278821e+00 9.4868330e-01 1.8083141e+00 4.3588989e-01 1.4317821e+00 1.4866069e+00 1.3000000e+00 1.7832555e+00 1.1747340e+00 1.2124356e+00 1.0148892e+00 1.0049876e+00 7.8740079e-01 5.3851648e-01 4.5825757e-01 5.5677644e-01 1.0677078e+00 1.9104973e+00 1.9467922e+00 2.0124612e+00 1.5394804e+00 1.2041595e+00 1.6278821e+00 1.0583005e+00 3.3166248e-01 1.1832160e+00 1.5394804e+00 1.8000000e+00 1.6552945e+00 9.2736185e-01 1.5264338e+00 2.6324893e+00 1.5716234e+00 1.4212670e+00 1.4282857e+00 9.4868330e-01 2.6608269e+00 1.4899664e+00 1.8439089e+00 1.4491377e+00 1.4071247e+00 1.2449900e+00 1.4628739e+00 2.1213203e+00 2.2427661e+00 1.7029386e+00 1.3964240e+00 1.8357560e+00 8.7749644e-01 1.1045361e+00 1.1000000e+00 1.6217275e+00 1.6613248e+00 1.2369317e+00 1.0440307e+00 2.3430749e+00 2.5495098e+00 1.4491377e+00 1.3490738e+00 1.5874508e+00 2.2383029e+00 9.6953597e-01 1.2609520e+00 1.3747727e+00 9.8488578e-01 1.0246951e+00 1.3490738e+00 1.1532563e+00 1.5905974e+00 2.1023796e+00 1.4035669e+00 9.0553851e-01 1.4071247e+00 1.8165902e+00 1.5297059e+00 1.0816654e+00 1.1000000e+00 1.0000000e+00 1.3820275e+00 9.9498744e-01 1.4491377e+00 1.5132746e+00 1.5198684e+00 1.0908712e+00 1.1489125e+00 9.4868330e-01 1.4071247e+00 1.2529964e+00 6.4807407e-01 1.3820275e+00 4.2426407e-01 8.3066239e-01 2.6457513e-01 2.1400935e+00 4.2426407e-01 1.4352700e+00 2.1563859e+00 6.1644140e-01 1.2884099e+00 4.7958315e-01 1.2569805e+00 3.4641016e-01 8.2462113e-01 1.0099505e+00 1.0198039e+00 1.2845233e+00 6.5574385e-01 7.3484692e-01 8.1240384e-01 6.1644140e-01 4.1231056e-01 3.1622777e-01 6.4807407e-01 6.4807407e-01 5.0000000e-01 1.4491377e+00 1.4491377e+00 1.5297059e+00 1.0295630e+00 8.8317609e-01 1.0198039e+00 4.5825757e-01 3.7416574e-01 9.3273791e-01 9.3808315e-01 1.2609520e+00 1.1269428e+00 3.8729833e-01 1.0295630e+00 2.1118712e+00 1.0099505e+00 8.4261498e-01 8.4261498e-01 4.5825757e-01 2.1424285e+00 9.2195445e-01 1.8083141e+00 1.0630146e+00 1.6881943e+00 1.1832160e+00 1.4933185e+00 2.5000000e+00 1.6673332e+00 2.0566964e+00 1.5362291e+00 2.0880613e+00 7.8740079e-01 1.0246951e+00 1.2489996e+00 1.2165525e+00 1.3000000e+00 1.1313708e+00 1.0677078e+00 2.7166155e+00 2.9068884e+00 1.1874342e+00 1.5264338e+00 1.1000000e+00 2.6343880e+00 7.1414284e-01 1.3784049e+00 1.7262677e+00 6.1644140e-01 6.1644140e-01 1.3152946e+00 1.5427249e+00 1.9697716e+00 2.5436195e+00 1.3638182e+00 7.2801099e-01 1.2922848e+00 2.2203603e+00 1.4387495e+00 1.0488088e+00 6.1644140e-01 1.1958261e+00 1.4560220e+00 1.1224972e+00 1.0630146e+00 1.6613248e+00 1.5937377e+00 1.1224972e+00 9.5393920e-01 8.8881944e-01 1.2369317e+00 8.6023253e-01 1.8574176e+00 5.8309519e-01 1.3152946e+00 6.7082039e-01 2.7018512e+00 5.0990195e-01 2.0149442e+00 2.6514147e+00 1.2247449e+00 1.6370706e+00 8.5440037e-01 1.8601075e+00 5.4772256e-01 1.3638182e+00 1.5033296e+00 1.2083046e+00 1.7916473e+00 1.0535654e+00 1.2569805e+00 8.4852814e-01 9.2736185e-01 8.3066239e-01 6.0000000e-01 3.4641016e-01 3.1622777e-01 1.0049876e+00 1.9748418e+00 1.9544820e+00 2.0346990e+00 1.5684387e+00 1.0099505e+00 1.5556349e+00 1.0344080e+00 2.8284271e-01 1.1357817e+00 1.5427249e+00 1.7804494e+00 1.5968719e+00 8.6602540e-01 1.5362291e+00 2.6570661e+00 1.5427249e+00 1.4247807e+00 1.4177447e+00 9.6436508e-01 2.7147744e+00 1.4866069e+00 1.6155494e+00 1.2529964e+00 1.1874342e+00 9.8994949e-01 1.2124356e+00 1.9364917e+00 2.1354157e+00 1.5000000e+00 1.1401754e+00 1.6673332e+00 6.7823300e-01 8.5440037e-01 8.6023253e-01 1.4352700e+00 1.4662878e+00 1.0295630e+00 7.8740079e-01 2.2045408e+00 2.3515952e+00 1.2767145e+00 1.1357817e+00 1.4247807e+00 2.0542639e+00 7.8102497e-01 1.0392305e+00 1.1832160e+00 8.2462113e-01 8.6023253e-01 1.0908712e+00 9.5916630e-01 1.3928388e+00 1.9974984e+00 1.1489125e+00 7.0000000e-01 1.1789826e+00 1.6522712e+00 1.3228757e+00 8.3666003e-01 9.5916630e-01 7.8102497e-01 1.1575837e+00 8.2462113e-01 1.2529964e+00 1.2884099e+00 1.3114877e+00 8.8317609e-01 9.4339811e-01 7.1414284e-01 1.2124356e+00 1.0677078e+00 1.2845233e+00 7.3484692e-01 1.4899664e+00 9.7467943e-01 1.3892444e+00 5.1961524e-01 8.2462113e-01 8.5440037e-01 5.9160798e-01 1.1045361e+00 7.2801099e-01 1.5000000e+00 8.8881944e-01 5.9160798e-01 8.8881944e-01 3.1622777e-01 1.3638182e+00 7.8102497e-01 1.2369317e+00 1.0535654e+00 1.1224972e+00 1.3674794e+00 1.6093477e+00 1.7578396e+00 9.4868330e-01 6.8556546e-01 3.0000000e-01 4.3588989e-01 5.1961524e-01 1.3076697e+00 8.8881944e-01 1.3416408e+00 1.6155494e+00 8.9442719e-01 7.1414284e-01 2.0000000e-01 5.0990195e-01 1.1045361e+00 4.3588989e-01 9.1104336e-01 4.5825757e-01 7.6157731e-01 6.6332496e-01 9.6953597e-01 1.1135529e+00 5.4772256e-01 2.6608269e+00 1.3490738e+00 2.7018512e+00 1.9519221e+00 2.3537205e+00 3.5071356e+00 9.0000000e-01 3.0232433e+00 2.2293497e+00 3.2295511e+00 1.8734994e+00 1.7378147e+00 2.2516660e+00 1.2529964e+00 1.6613248e+00 2.0760539e+00 1.9974984e+00 3.8974351e+00 3.7868192e+00 1.1401754e+00 2.5806976e+00 1.2489996e+00 3.5874782e+00 1.3638182e+00 2.4433583e+00 2.8195744e+00 1.2767145e+00 1.3820275e+00 2.0639767e+00 2.5903668e+00 2.9376862e+00 3.7762415e+00 2.1047565e+00 1.4628739e+00 1.7378147e+00 3.2771939e+00 2.3706539e+00 1.9874607e+00 1.2767145e+00 2.2803509e+00 2.4186773e+00 2.1931712e+00 1.3490738e+00 2.6664583e+00 2.6019224e+00 2.0904545e+00 1.4282857e+00 1.8493242e+00 2.1587033e+00 1.4525839e+00 8.3066239e-01 5.5677644e-01 2.1587033e+00 2.4494897e-01 1.4832397e+00 2.0856654e+00 7.4833148e-01 1.1045361e+00 4.3588989e-01 1.3638182e+00 4.2426407e-01 9.2736185e-01 1.0000000e+00 6.7823300e-01 1.2449900e+00 8.0622577e-01 7.4833148e-01 4.6904158e-01 5.0990195e-01 3.8729833e-01 3.1622777e-01 3.7416574e-01 5.2915026e-01 5.1961524e-01 1.4628739e+00 1.4000000e+00 1.4899664e+00 1.0392305e+00 7.2111026e-01 1.1224972e+00 7.9372539e-01 3.7416574e-01 6.0827625e-01 1.0677078e+00 1.2206556e+00 1.0816654e+00 4.5825757e-01 9.8994949e-01 2.1071308e+00 1.0099505e+00 9.6436508e-01 9.2195445e-01 4.7958315e-01 2.1840330e+00 9.6436508e-01 1.8027756e+00 9.5393920e-01 1.5652476e+00 1.0677078e+00 1.4035669e+00 2.3685439e+00 1.6431677e+00 1.9052559e+00 1.2884099e+00 2.0928450e+00 8.1240384e-01 8.1853528e-01 1.1401754e+00 1.0677078e+00 1.2449900e+00 1.1401754e+00 9.6953597e-01 2.7092434e+00 2.7221315e+00 8.7749644e-01 1.4730920e+00 1.0723805e+00 2.4698178e+00 4.7958315e-01 1.3638182e+00 1.6431677e+00 4.6904158e-01 6.1644140e-01 1.1704700e+00 1.4071247e+00 1.7944358e+00 2.5396850e+00 1.2247449e+00 5.3851648e-01 1.1000000e+00 2.0904545e+00 1.4866069e+00 1.0000000e+00 6.4807407e-01 1.1180340e+00 1.3928388e+00 1.0677078e+00 9.5393920e-01 1.6062378e+00 1.5811388e+00 1.0392305e+00 6.7082039e-01 8.0622577e-01 1.3152946e+00 8.6023253e-01 8.6023253e-01 1.5264338e+00 9.1104336e-01 7.9372539e-01 1.4899664e+00 4.5825757e-01 8.8881944e-01 4.6904158e-01 9.1104336e-01 1.0535654e+00 3.0000000e-01 5.1961524e-01 8.0622577e-01 7.0710678e-01 7.3484692e-01 6.4031242e-01 8.0622577e-01 4.5825757e-01 7.3484692e-01 9.3273791e-01 1.1445523e+00 1.2041595e+00 3.7416574e-01 1.0630146e+00 8.5440037e-01 9.6436508e-01 6.2449980e-01 7.4161985e-01 4.1231056e-01 7.3484692e-01 1.0816654e+00 7.8740079e-01 4.5825757e-01 6.1644140e-01 3.1622777e-01 4.6904158e-01 5.5677644e-01 1.5066519e+00 3.3166248e-01 3.7416574e-01 3.1622777e-01 5.4772256e-01 1.6552945e+00 4.0000000e-01 2.0736441e+00 8.6023253e-01 2.1447611e+00 1.3527749e+00 1.7832555e+00 2.9495762e+00 9.4339811e-01 2.4617067e+00 1.7406895e+00 2.6248809e+00 1.2845233e+00 1.2247449e+00 1.7000000e+00 9.1104336e-01 1.2569805e+00 1.5132746e+00 1.3892444e+00 3.2634338e+00 3.2863353e+00 8.6023253e-01 2.0099751e+00 8.1240384e-01 3.0545049e+00 8.8317609e-01 1.8248288e+00 2.2158520e+00 7.6811457e-01 7.8102497e-01 1.5297059e+00 2.0174241e+00 2.4103942e+00 3.1527766e+00 1.5842980e+00 8.7177979e-01 1.1916375e+00 2.7568098e+00 1.7720045e+00 1.3527749e+00 6.8556546e-01 1.7262677e+00 1.8734994e+00 1.7000000e+00 8.6023253e-01 2.0808652e+00 2.0322401e+00 1.5905974e+00 1.0295630e+00 1.2884099e+00 1.5556349e+00 8.3066239e-01 2.2561028e+00 5.9160798e-01 1.5000000e+00 2.2759613e+00 7.1414284e-01 1.4662878e+00 4.8989795e-01 1.3964240e+00 5.7445626e-01 7.9372539e-01 1.1532563e+00 1.1269428e+00 1.4212670e+00 4.6904158e-01 9.3273791e-01 8.3066239e-01 6.7082039e-01 6.4807407e-01 5.5677644e-01 7.4161985e-01 5.9160798e-01 5.4772256e-01 1.6278821e+00 1.5842980e+00 1.6763055e+00 1.1874342e+00 7.8102497e-01 9.7467943e-01 3.7416574e-01 4.5825757e-01 1.0862780e+00 1.0148892e+00 1.3638182e+00 1.1747340e+00 4.2426407e-01 1.1789826e+00 2.2383029e+00 1.0908712e+00 9.2736185e-01 9.2736185e-01 6.4807407e-01 2.2847319e+00 1.0295630e+00 1.5811388e+00 9.2736185e-01 1.5556349e+00 1.0049876e+00 1.3038405e+00 2.3748684e+00 1.6278821e+00 1.9390719e+00 1.4317821e+00 1.9157244e+00 6.0827625e-01 9.0553851e-01 1.1090537e+00 1.1180340e+00 1.1401754e+00 9.3273791e-01 9.0000000e-01 2.5632011e+00 2.7892651e+00 1.1832160e+00 1.3638182e+00 9.6953597e-01 2.5238859e+00 6.6332496e-01 1.1874342e+00 1.5968719e+00 5.5677644e-01 4.5825757e-01 1.1489125e+00 1.4525839e+00 1.8734994e+00 2.4207437e+00 1.1958261e+00 6.4807407e-01 1.1747340e+00 2.1213203e+00 1.2083046e+00 8.5440037e-01 4.7958315e-01 1.0677078e+00 1.2845233e+00 1.0246951e+00 9.2736185e-01 1.4798649e+00 1.4035669e+00 9.9498744e-01 9.0553851e-01 7.3484692e-01 1.0000000e+00 6.7082039e-01 2.2181073e+00 8.3666003e-01 4.5825757e-01 1.5556349e+00 1.3190906e+00 1.9519221e+00 9.5916630e-01 2.2583180e+00 1.5937377e+00 1.2409674e+00 1.8493242e+00 9.3273791e-01 2.1283797e+00 1.4764823e+00 2.1863211e+00 1.8973666e+00 1.8947295e+00 2.1494185e+00 2.4859606e+00 2.6419690e+00 1.7748239e+00 8.4852814e-01 7.8740079e-01 7.2111026e-01 1.1401754e+00 2.2135944e+00 1.5165751e+00 2.0024984e+00 2.4372115e+00 1.8083141e+00 1.2569805e+00 9.7467943e-01 1.2845233e+00 1.9104973e+00 1.1747340e+00 1.4142136e-01 1.2165525e+00 1.3601471e+00 1.3379088e+00 1.7406895e+00 3.8729833e-01 1.2369317e+00 3.5085610e+00 2.2248595e+00 3.6290495e+00 2.8530685e+00 3.2572995e+00 4.4440972e+00 1.3928388e+00 3.9560081e+00 3.1843367e+00 4.1012193e+00 2.7276363e+00 2.6739484e+00 3.1654384e+00 2.1307276e+00 2.4839485e+00 2.9291637e+00 2.8982753e+00 4.7749346e+00 4.7465777e+00 2.0952327e+00 3.4770677e+00 2.0518285e+00 4.5343136e+00 2.2912878e+00 3.3196385e+00 3.7229021e+00 2.1771541e+00 2.2360680e+00 2.9849623e+00 3.5014283e+00 3.8807216e+00 4.6443514e+00 3.0232433e+00 2.3685439e+00 2.6324893e+00 4.2107007e+00 3.1953091e+00 2.8670542e+00 2.1118712e+00 3.1796226e+00 3.3136083e+00 3.0692019e+00 2.2248595e+00 3.5637059e+00 3.4727511e+00 2.9832868e+00 2.3811762e+00 2.7440845e+00 2.9647934e+00 2.2891046e+00 1.5811388e+00 2.1610183e+00 8.3666003e-01 1.1401754e+00 5.1961524e-01 1.4142136e+00 3.1622777e-01 1.0295630e+00 1.0099505e+00 8.3666003e-01 1.3000000e+00 9.3273791e-01 7.8740079e-01 6.1644140e-01 5.2915026e-01 3.6055513e-01 2.4494897e-01 3.1622777e-01 5.8309519e-01 6.4031242e-01 1.4832397e+00 1.4628739e+00 1.5362291e+00 1.0862780e+00 8.6023253e-01 1.2247449e+00 8.4261498e-01 3.1622777e-01 7.0000000e-01 1.1224972e+00 1.3152946e+00 1.1618950e+00 5.1961524e-01 1.0488088e+00 2.1679483e+00 1.0954451e+00 9.9498744e-01 9.8488578e-01 5.0000000e-01 2.2383029e+00 1.0344080e+00 1.9104973e+00 1.1357817e+00 1.6093477e+00 1.1575837e+00 1.5066519e+00 2.3769729e+00 1.7944358e+00 1.9052559e+00 1.3638182e+00 2.1307276e+00 9.1651514e-01 9.6436508e-01 1.2247449e+00 1.2727922e+00 1.4525839e+00 1.2727922e+00 1.0392305e+00 2.6907248e+00 2.7549955e+00 1.0246951e+00 1.5459625e+00 1.2609520e+00 2.4738634e+00 6.8556546e-01 1.4212670e+00 1.6309506e+00 6.7823300e-01 7.7459667e-01 1.3000000e+00 1.3784049e+00 1.8055470e+00 2.4959968e+00 1.3638182e+00 6.2449980e-01 1.1618950e+00 2.1142375e+00 1.5968719e+00 1.0677078e+00 8.1240384e-01 1.1874342e+00 1.5033296e+00 1.1747340e+00 1.1357817e+00 1.6792856e+00 1.6792856e+00 1.1747340e+00 8.7749644e-01 9.3273791e-01 1.4317821e+00 1.0000000e+00 9.2195445e-01 8.2462113e-01 1.0295630e+00 1.2206556e+00 5.4772256e-01 1.6309506e+00 7.8740079e-01 7.4833148e-01 1.2727922e+00 5.3851648e-01 1.3076697e+00 9.1651514e-01 1.5033296e+00 1.2247449e+00 1.2845233e+00 1.5165751e+00 1.8384776e+00 1.9078784e+00 1.0246951e+00 7.6157731e-01 5.2915026e-01 6.1644140e-01 6.3245553e-01 1.4560220e+00 7.0710678e-01 1.2369317e+00 1.7492856e+00 1.2767145e+00 5.4772256e-01 3.8729833e-01 6.2449980e-01 1.1789826e+00 6.4807407e-01 8.4852814e-01 5.0990195e-01 6.8556546e-01 6.2449980e-01 1.1000000e+00 9.7467943e-01 5.5677644e-01 2.6814175e+00 1.4317821e+00 2.8618176e+00 2.0736441e+00 2.4556058e+00 3.6918830e+00 7.6157731e-01 3.2202484e+00 2.4617067e+00 3.2954514e+00 1.9339080e+00 1.9104973e+00 2.3874673e+00 1.3638182e+00 1.6763055e+00 2.1118712e+00 2.1213203e+00 3.9924930e+00 4.0087405e+00 1.4525839e+00 2.6814175e+00 1.2369317e+00 3.8026307e+00 1.5394804e+00 2.5179357e+00 2.9698485e+00 1.4071247e+00 1.4352700e+00 2.1977261e+00 2.7820855e+00 3.1527766e+00 3.8871583e+00 2.2315914e+00 1.6340135e+00 1.9261360e+00 3.4626579e+00 2.3643181e+00 2.0784610e+00 1.3038405e+00 2.4062419e+00 2.5099801e+00 2.3021729e+00 1.4317821e+00 2.7604347e+00 2.6570661e+00 2.2000000e+00 1.6462078e+00 1.9570386e+00 2.1330729e+00 1.4764823e+00 1.5968719e+00 1.1357817e+00 1.9026298e+00 1.1269428e+00 2.2516660e+00 1.6155494e+00 1.2206556e+00 1.6522712e+00 8.8317609e-01 2.1400935e+00 1.4798649e+00 2.0371549e+00 1.8248288e+00 1.8708287e+00 2.1283797e+00 2.3937418e+00 2.5748786e+00 1.7492856e+00 9.2195445e-01 7.1414284e-01 6.7082039e-01 1.1532563e+00 2.1000000e+00 1.5524175e+00 2.0784610e+00 2.4062419e+00 1.6370706e+00 1.3453624e+00 9.1651514e-01 1.2083046e+00 1.8920888e+00 1.1357817e+00 3.6055513e-01 1.1958261e+00 1.4212670e+00 1.3711309e+00 1.7262677e+00 7.2111026e-01 1.2569805e+00 3.4467376e+00 2.1213203e+00 3.5185224e+00 2.7477263e+00 3.1591138e+00 4.3104524e+00 1.3228757e+00 3.8183766e+00 3.0116441e+00 4.0509258e+00 2.6925824e+00 2.5495098e+00 3.0740852e+00 1.9974984e+00 2.4083189e+00 2.8861739e+00 2.8089144e+00 4.7127487e+00 4.5716518e+00 1.8814888e+00 3.4029399e+00 1.9899749e+00 4.3783559e+00 2.1863211e+00 3.2603681e+00 3.6290495e+00 2.1000000e+00 2.1931712e+00 2.8670542e+00 3.3896903e+00 3.7376463e+00 4.5891176e+00 2.9068884e+00 2.2671568e+00 2.4779023e+00 4.0914545e+00 3.1654384e+00 2.7946377e+00 2.0808652e+00 3.1048349e+00 3.2357379e+00 3.0116441e+00 2.1213203e+00 3.4828150e+00 3.4161382e+00 2.9103264e+00 2.2360680e+00 2.6720778e+00 2.9495762e+00 2.2383029e+00 9.6953597e-01 5.5677644e-01 7.0710678e-01 8.3666003e-01 4.2426407e-01 6.0000000e-01 9.0553851e-01 7.6811457e-01 7.0000000e-01 4.0000000e-01 9.4868330e-01 6.4807407e-01 5.5677644e-01 7.3484692e-01 1.1045361e+00 1.1489125e+00 3.3166248e-01 9.6953597e-01 9.1651514e-01 1.0099505e+00 5.2915026e-01 9.5916630e-01 5.8309519e-01 5.1961524e-01 9.4868330e-01 8.5440037e-01 3.7416574e-01 7.0000000e-01 6.7082039e-01 4.5825757e-01 5.4772256e-01 1.5362291e+00 4.6904158e-01 3.6055513e-01 3.0000000e-01 3.8729833e-01 1.5779734e+00 3.6055513e-01 2.1189620e+00 1.0344080e+00 2.1656408e+00 1.4899664e+00 1.8466185e+00 3.0016662e+00 1.1747340e+00 2.5436195e+00 1.8814888e+00 2.5806976e+00 1.2083046e+00 1.3076697e+00 1.6911535e+00 1.0862780e+00 1.2922848e+00 1.4628739e+00 1.4628739e+00 3.2588341e+00 3.3660065e+00 1.1357817e+00 1.9824228e+00 9.3273791e-01 3.1272992e+00 9.1104336e-01 1.8275667e+00 2.2494444e+00 7.6157731e-01 7.8740079e-01 1.6155494e+00 2.0639767e+00 2.4617067e+00 3.1192948e+00 1.6552945e+00 1.0049876e+00 1.4730920e+00 2.7367864e+00 1.7578396e+00 1.4282857e+00 6.7823300e-01 1.6763055e+00 1.8493242e+00 1.5684387e+00 1.0344080e+00 2.0928450e+00 1.9949937e+00 1.5099669e+00 1.1000000e+00 1.2688578e+00 1.5264338e+00 9.4868330e-01 1.0723805e+00 9.4868330e-01 1.2727922e+00 1.1401754e+00 5.4772256e-01 7.3484692e-01 5.1961524e-01 1.5132746e+00 6.7823300e-01 1.1135529e+00 9.4868330e-01 9.1104336e-01 1.1489125e+00 1.3416408e+00 1.6186414e+00 9.9498744e-01 7.0710678e-01 5.8309519e-01 6.1644140e-01 5.8309519e-01 1.3490738e+00 1.2247449e+00 1.4317821e+00 1.4282857e+00 5.9160798e-01 9.4868330e-01 6.5574385e-01 7.8102497e-01 1.0816654e+00 4.8989795e-01 1.2247449e+00 7.3484692e-01 9.0000000e-01 8.4261498e-01 8.4261498e-01 1.3820275e+00 7.4161985e-01 2.7477263e+00 1.5198684e+00 2.5826343e+00 1.9442222e+00 2.3600847e+00 3.3421550e+00 1.4282857e+00 2.8478062e+00 2.1118712e+00 3.1717503e+00 1.8601075e+00 1.7058722e+00 2.1771541e+00 1.4764823e+00 1.8894444e+00 2.1307276e+00 1.9442222e+00 3.7656341e+00 3.6262929e+00 1.1180340e+00 2.5278449e+00 1.5264338e+00 3.3970576e+00 1.3379088e+00 2.4083189e+00 2.6608269e+00 1.2961481e+00 1.4491377e+00 2.0712315e+00 2.3832751e+00 2.7459060e+00 3.5958309e+00 2.1260292e+00 1.3820275e+00 1.7000000e+00 3.1032241e+00 2.4596748e+00 1.9646883e+00 1.3856406e+00 2.1886069e+00 2.4124676e+00 2.1260292e+00 1.5198684e+00 2.6343880e+00 2.6153394e+00 2.0639767e+00 1.4106736e+00 1.8248288e+00 2.2649503e+00 1.5811388e+00 1.2124356e+00 7.0000000e-01 5.5677644e-01 8.0622577e-01 7.4161985e-01 1.0677078e+00 5.4772256e-01 7.1414284e-01 5.0000000e-01 2.2360680e-01 5.0990195e-01 5.9160798e-01 7.1414284e-01 7.4161985e-01 2.4494897e-01 1.3601471e+00 1.2288206e+00 1.3304135e+00 9.0000000e-01 5.0000000e-01 7.4161985e-01 5.8309519e-01 6.4031242e-01 7.0710678e-01 7.9372539e-01 1.0099505e+00 7.6157731e-01 1.4142136e-01 8.4261498e-01 1.9209373e+00 7.4161985e-01 6.7823300e-01 6.4807407e-01 4.2426407e-01 2.0346990e+00 7.3484692e-01 1.7606817e+00 7.3484692e-01 1.7146428e+00 1.0049876e+00 1.4212670e+00 2.5219040e+00 1.3152946e+00 2.0396078e+00 1.3747727e+00 2.2068076e+00 8.7749644e-01 8.6023253e-01 1.2767145e+00 8.7749644e-01 1.1224972e+00 1.1618950e+00 9.8488578e-01 2.8301943e+00 2.8809721e+00 7.7459667e-01 1.5937377e+00 8.1240384e-01 2.6324893e+00 5.2915026e-01 1.4177447e+00 1.7748239e+00 4.3588989e-01 4.5825757e-01 1.1832160e+00 1.5716234e+00 1.9773720e+00 2.7018512e+00 1.2449900e+00 4.6904158e-01 9.4868330e-01 2.3108440e+00 1.4491377e+00 9.6436508e-01 4.3588989e-01 1.2884099e+00 1.4866069e+00 1.2845233e+00 7.3484692e-01 1.6822604e+00 1.6522712e+00 1.1958261e+00 7.3484692e-01 8.8317609e-01 1.2489996e+00 6.0827625e-01 1.3784049e+00 9.2736185e-01 6.4807407e-01 1.3038405e+00 5.3851648e-01 1.3674794e+00 6.4807407e-01 1.5427249e+00 1.2165525e+00 1.0630146e+00 1.2884099e+00 1.7029386e+00 1.8275667e+00 1.0049876e+00 4.4721360e-01 5.8309519e-01 6.0000000e-01 4.2426407e-01 1.5937377e+00 9.4868330e-01 1.1445523e+00 1.5811388e+00 1.2206556e+00 5.0990195e-01 5.7445626e-01 8.6602540e-01 1.1269428e+00 5.4772256e-01 9.4868330e-01 6.3245553e-01 6.2449980e-01 6.0827625e-01 9.2195445e-01 9.0000000e-01 5.1961524e-01 2.8017851e+00 1.6401219e+00 2.8618176e+00 2.1771541e+00 2.5436195e+00 3.6945906e+00 1.2727922e+00 3.2295511e+00 2.5416530e+00 3.2771939e+00 1.9078784e+00 1.9824228e+00 2.3874673e+00 1.6186414e+00 1.8734994e+00 2.1494185e+00 2.1633308e+00 3.9547440e+00 4.0484565e+00 1.6278821e+00 2.6814175e+00 1.4798649e+00 3.8105118e+00 1.5716234e+00 2.5337719e+00 2.9427878e+00 1.4352700e+00 1.4832397e+00 2.3000000e+00 2.7386128e+00 3.1400637e+00 3.7986840e+00 2.3366643e+00 1.6703293e+00 2.0856654e+00 3.4161382e+00 2.4392622e+00 2.1307276e+00 1.3638182e+00 2.3685439e+00 2.5416530e+00 2.2315914e+00 1.6401219e+00 2.7964263e+00 2.6870058e+00 2.1863211e+00 1.7233688e+00 1.9672316e+00 2.2022716e+00 1.6124515e+00 1.1135529e+00 1.1045361e+00 1.0392305e+00 1.3820275e+00 9.8488578e-01 7.8740079e-01 8.8317609e-01 7.6157731e-01 3.8729833e-01 1.4142136e-01 5.0990195e-01 6.7823300e-01 7.4161985e-01 1.4899664e+00 1.5427249e+00 1.6062378e+00 1.1224972e+00 1.0862780e+00 1.3114877e+00 7.9372539e-01 3.1622777e-01 9.0000000e-01 1.1489125e+00 1.4035669e+00 1.3152946e+00 6.4031242e-01 1.1224972e+00 2.2135944e+00 1.1916375e+00 1.0440307e+00 1.0440307e+00 5.5677644e-01 2.2293497e+00 1.0908712e+00 1.9924859e+00 1.3076697e+00 1.7058722e+00 1.3416408e+00 1.6278821e+00 2.4799194e+00 1.9235384e+00 2.0420578e+00 1.5748016e+00 2.1447611e+00 9.4868330e-01 1.1445523e+00 1.3114877e+00 1.4422205e+00 1.5459625e+00 1.3114877e+00 1.1916375e+00 2.7239677e+00 2.8827071e+00 1.2922848e+00 1.5968719e+00 1.3820275e+00 2.5961510e+00 8.5440037e-01 1.4899664e+00 1.7262677e+00 8.1240384e-01 8.8317609e-01 1.4525839e+00 1.5033296e+00 1.9287302e+00 2.5079872e+00 1.5033296e+00 8.6602540e-01 1.4317821e+00 2.1702534e+00 1.6401219e+00 1.2083046e+00 9.0553851e-01 1.2369317e+00 1.5620499e+00 1.1575837e+00 1.3076697e+00 1.7549929e+00 1.7146428e+00 1.2083046e+00 1.0630146e+00 1.0246951e+00 1.4662878e+00 1.1401754e+00 7.3484692e-01 1.0000000e+00 8.7749644e-01 5.5677644e-01 7.6157731e-01 9.4868330e-01 6.4807407e-01 8.5440037e-01 1.0099505e+00 1.2569805e+00 1.2247449e+00 4.1231056e-01 1.1916375e+00 1.0099505e+00 1.1224972e+00 7.6157731e-01 7.8740079e-01 2.0000000e-01 5.7445626e-01 1.1224972e+00 1.0148892e+00 4.4721360e-01 7.4161985e-01 5.1961524e-01 5.1961524e-01 7.3484692e-01 1.5937377e+00 4.6904158e-01 4.3588989e-01 3.8729833e-01 6.7082039e-01 1.7058722e+00 5.0000000e-01 1.9570386e+00 8.0622577e-01 2.1377558e+00 1.3416408e+00 1.7291616e+00 2.9614186e+00 8.8317609e-01 2.4959968e+00 1.8000000e+00 2.5455844e+00 1.2083046e+00 1.2369317e+00 1.6733201e+00 8.7177979e-01 1.1180340e+00 1.4000000e+00 1.3784049e+00 3.2218007e+00 3.3120990e+00 1.0246951e+00 1.9519221e+00 6.7082039e-01 3.0886890e+00 9.1104336e-01 1.7606817e+00 2.2226111e+00 7.6157731e-01 7.0710678e-01 1.5000000e+00 2.0639767e+00 2.4494897e+00 3.1288976e+00 1.5427249e+00 9.4339811e-01 1.2767145e+00 2.7586228e+00 1.6340135e+00 1.3190906e+00 5.8309519e-01 1.6941074e+00 1.8000000e+00 1.6431677e+00 8.0622577e-01 2.0199010e+00 1.9339080e+00 1.5297059e+00 1.0723805e+00 1.2449900e+00 1.4035669e+00 7.3484692e-01 9.0553851e-01 3.6055513e-01 1.1789826e+00 4.4721360e-01 1.0862780e+00 7.0710678e-01 7.2801099e-01 9.8994949e-01 1.2884099e+00 1.4832397e+00 7.0000000e-01 6.1644140e-01 5.2915026e-01 5.8309519e-01 2.8284271e-01 1.1832160e+00 8.1240384e-01 1.0246951e+00 1.2569805e+00 7.6811457e-01 4.6904158e-01 4.7958315e-01 4.7958315e-01 7.6811457e-01 2.4494897e-01 1.2000000e+00 3.7416574e-01 3.8729833e-01 3.8729833e-01 5.7445626e-01 1.3228757e+00 3.3166248e-01 2.5436195e+00 1.3453624e+00 2.4959968e+00 1.7832555e+00 2.2158520e+00 3.2848135e+00 1.2247449e+00 2.7874720e+00 2.0928450e+00 3.0033315e+00 1.6552945e+00 1.6155494e+00 2.0639767e+00 1.3638182e+00 1.7233688e+00 1.9339080e+00 1.7832555e+00 3.6083237e+00 3.6262929e+00 1.1618950e+00 2.3895606e+00 1.3000000e+00 3.3734256e+00 1.2369317e+00 2.2226111e+00 2.5416530e+00 1.1401754e+00 1.2083046e+00 1.9570386e+00 2.3021729e+00 2.7166155e+00 3.4510868e+00 2.0149442e+00 1.2288206e+00 1.5842980e+00 3.0643107e+00 2.2248595e+00 1.7663522e+00 1.1224972e+00 2.0663978e+00 2.2759613e+00 2.0149442e+00 1.3453624e+00 2.4859606e+00 2.4454039e+00 1.9493589e+00 1.3820275e+00 1.6703293e+00 2.0074860e+00 1.3190906e+00 9.8488578e-01 1.1269428e+00 8.1240384e-01 5.0990195e-01 7.0710678e-01 7.8102497e-01 9.0553851e-01 9.0553851e-01 1.0862780e+00 7.2801099e-01 1.2884099e+00 1.0862780e+00 1.1916375e+00 9.2736185e-01 8.1240384e-01 1.1313708e+00 1.2206556e+00 1.0488088e+00 2.6457513e-01 1.0954451e+00 9.3273791e-01 8.6602540e-01 8.1853528e-01 8.1240384e-01 1.7720045e+00 8.6023253e-01 1.0344080e+00 9.3273791e-01 7.5498344e-01 1.9261360e+00 9.0000000e-01 2.1142375e+00 9.6436508e-01 1.9416488e+00 1.3416408e+00 1.7058722e+00 2.7147744e+00 1.3490738e+00 2.2427661e+00 1.4560220e+00 2.5534291e+00 1.3038405e+00 1.0440307e+00 1.5362291e+00 9.1651514e-01 1.3000000e+00 1.5231546e+00 1.3490738e+00 3.1843367e+00 2.9681644e+00 5.3851648e-01 1.8894444e+00 1.0630146e+00 2.7748874e+00 7.1414284e-01 1.8055470e+00 2.0832667e+00 7.3484692e-01 9.4868330e-01 1.4035669e+00 1.8275667e+00 2.1260292e+00 3.0512293e+00 1.4491377e+00 8.5440037e-01 1.1789826e+00 2.4677925e+00 1.8627936e+00 1.3928388e+00 9.2736185e-01 1.5716234e+00 1.7549929e+00 1.5165751e+00 9.6436508e-01 1.9899749e+00 1.9748418e+00 1.4212670e+00 7.1414284e-01 1.2124356e+00 1.7000000e+00 1.0862780e+00 1.3711309e+00 6.2449980e-01 1.2845233e+00 9.9498744e-01 1.0000000e+00 1.2609520e+00 1.5588457e+00 1.7406895e+00 9.1651514e-01 4.3588989e-01 1.7320508e-01 2.6457513e-01 3.0000000e-01 1.3747727e+00 9.0000000e-01 1.2569805e+00 1.5394804e+00 9.0553851e-01 5.7445626e-01 2.4494897e-01 5.2915026e-01 1.0392305e+00 2.6457513e-01 8.7749644e-01 4.1231056e-01 6.0000000e-01 5.4772256e-01 8.4852814e-01 1.0295630e+00 4.2426407e-01 2.7386128e+00 1.4696938e+00 2.7386128e+00 2.0074860e+00 2.4248711e+00 3.5411862e+00 1.1000000e+00 3.0495901e+00 2.3043437e+00 3.2511536e+00 1.8841444e+00 1.8110770e+00 2.2912878e+00 1.4247807e+00 1.8055470e+00 2.1283797e+00 2.0273135e+00 3.8923001e+00 3.8548671e+00 1.2727922e+00 2.6191602e+00 1.3784049e+00 3.6262929e+00 1.4212670e+00 2.4677925e+00 2.8195744e+00 1.3228757e+00 1.4106736e+00 2.1494185e+00 2.5826343e+00 2.9681644e+00 3.7469988e+00 2.1977261e+00 1.4764823e+00 1.8000000e+00 3.3075671e+00 2.4248711e+00 2.0124612e+00 1.3076697e+00 2.3021729e+00 2.4799194e+00 2.2203603e+00 1.4696938e+00 2.7147744e+00 2.6551836e+00 2.1424285e+00 1.5297059e+00 1.8867962e+00 2.2045408e+00 1.5066519e+00 1.0440307e+00 8.6602540e-01 7.5498344e-01 9.1651514e-01 9.2195445e-01 1.0630146e+00 8.5440037e-01 5.2915026e-01 1.6522712e+00 1.5132746e+00 1.6278821e+00 1.1958261e+00 6.2449980e-01 6.8556546e-01 4.2426407e-01 8.6602540e-01 1.1747340e+00 9.3273791e-01 1.2409674e+00 1.0198039e+00 5.2915026e-01 1.1704700e+00 2.1236761e+00 9.7467943e-01 8.9442719e-01 8.6023253e-01 8.2462113e-01 2.2045408e+00 9.6953597e-01 1.4491377e+00 6.0000000e-01 1.6673332e+00 9.4339811e-01 1.2489996e+00 2.5019992e+00 1.2609520e+00 2.0736441e+00 1.4594520e+00 2.0074860e+00 7.0000000e-01 8.7177979e-01 1.1958261e+00 7.8102497e-01 7.8740079e-01 8.6602540e-01 9.4339811e-01 2.7147744e+00 2.8740216e+00 1.0677078e+00 1.4352700e+00 5.4772256e-01 2.6551836e+00 6.4807407e-01 1.2449900e+00 1.7691806e+00 5.0000000e-01 3.0000000e-01 1.0677078e+00 1.6643317e+00 2.0273135e+00 2.6381812e+00 1.1000000e+00 7.0710678e-01 1.0954451e+00 2.2847319e+00 1.0954451e+00 8.6602540e-01 2.2360680e-01 1.2083046e+00 1.2845233e+00 1.1618950e+00 6.0000000e-01 1.5066519e+00 1.3964240e+00 1.0440307e+00 8.3666003e-01 7.7459667e-01 8.6023253e-01 3.6055513e-01 9.8994949e-01 7.0710678e-01 4.3588989e-01 6.7823300e-01 1.0677078e+00 1.2489996e+00 5.5677644e-01 7.3484692e-01 7.7459667e-01 8.3666003e-01 3.4641016e-01 1.1489125e+00 9.0553851e-01 8.4261498e-01 9.8994949e-01 6.7082039e-01 5.4772256e-01 6.7082039e-01 7.5498344e-01 6.4031242e-01 3.7416574e-01 1.4282857e+00 5.4772256e-01 5.0000000e-01 4.5825757e-01 3.3166248e-01 1.4594520e+00 4.1231056e-01 2.3937418e+00 1.2922848e+00 2.3000000e+00 1.6911535e+00 2.0615528e+00 3.1128765e+00 1.3928388e+00 2.6438608e+00 1.9849433e+00 2.7748874e+00 1.4212670e+00 1.4662878e+00 1.8493242e+00 1.3190906e+00 1.5842980e+00 1.7146428e+00 1.6431677e+00 3.4146742e+00 3.4655447e+00 1.1874342e+00 2.1656408e+00 1.2449900e+00 3.2155870e+00 1.0535654e+00 2.0346990e+00 2.3706539e+00 9.4868330e-01 1.0488088e+00 1.8138357e+00 2.1400935e+00 2.5416530e+00 3.2388269e+00 1.8601075e+00 1.1357817e+00 1.6155494e+00 2.8301943e+00 2.0420578e+00 1.6370706e+00 9.6953597e-01 1.8248288e+00 2.0542639e+00 1.7146428e+00 1.2922848e+00 2.2934690e+00 2.2226111e+00 1.6852300e+00 1.2206556e+00 1.4594520e+00 1.8248288e+00 1.2409674e+00 5.0990195e-01 7.5498344e-01 7.7459667e-01 6.0000000e-01 6.7823300e-01 6.4031242e-01 1.6062378e+00 1.4212670e+00 1.5297059e+00 1.1747340e+00 4.2426407e-01 1.1045361e+00 1.0344080e+00 7.4833148e-01 5.7445626e-01 1.1916375e+00 1.2206556e+00 9.9498744e-01 6.2449980e-01 1.0770330e+00 2.1307276e+00 1.0295630e+00 1.0908712e+00 1.0246951e+00 7.5498344e-01 2.2825424e+00 1.0630146e+00 1.6881943e+00 7.0000000e-01 1.5000000e+00 8.6023253e-01 1.2609520e+00 2.2781571e+00 1.4696938e+00 1.7916473e+00 1.0295630e+00 2.1118712e+00 9.0553851e-01 6.0827625e-01 1.1045361e+00 7.8740079e-01 1.0908712e+00 1.1401754e+00 8.6023253e-01 2.7166155e+00 2.5709920e+00 4.3588989e-01 1.4594520e+00 9.1104336e-01 2.3537205e+00 3.6055513e-01 1.3416408e+00 1.6124515e+00 4.4721360e-01 6.1644140e-01 9.7467943e-01 1.3711309e+00 1.7029386e+00 2.5980762e+00 1.0392305e+00 3.6055513e-01 7.4161985e-01 2.0712315e+00 1.4525839e+00 9.0553851e-01 6.6332496e-01 1.1532563e+00 1.3490738e+00 1.1832160e+00 7.0000000e-01 1.5427249e+00 1.5620499e+00 1.0677078e+00 4.1231056e-01 7.9372539e-01 1.3076697e+00 7.3484692e-01 5.1961524e-01 6.4807407e-01 7.3484692e-01 8.6023253e-01 3.8729833e-01 1.2961481e+00 1.1575837e+00 1.2489996e+00 8.6023253e-01 5.8309519e-01 8.1240384e-01 7.5498344e-01 7.3484692e-01 6.2449980e-01 8.1240384e-01 9.7467943e-01 7.0000000e-01 3.0000000e-01 7.8740079e-01 1.8601075e+00 7.2111026e-01 6.7082039e-01 6.5574385e-01 4.3588989e-01 1.9974984e+00 7.2801099e-01 1.9157244e+00 8.6602540e-01 1.8138357e+00 1.1045361e+00 1.5524175e+00 2.5903668e+00 1.3490738e+00 2.0904545e+00 1.4212670e+00 2.3452079e+00 1.0583005e+00 9.7467943e-01 1.4071247e+00 9.8994949e-01 1.3000000e+00 1.3490738e+00 1.0954451e+00 2.9257478e+00 2.9410882e+00 7.4161985e-01 1.7349352e+00 9.6436508e-01 2.6832816e+00 6.7082039e-01 1.5556349e+00 1.8493242e+00 6.1644140e-01 6.6332496e-01 1.3076697e+00 1.6186414e+00 2.0346990e+00 2.7874720e+00 1.3784049e+00 5.3851648e-01 9.4339811e-01 2.4020824e+00 1.6278821e+00 1.0862780e+00 6.4807407e-01 1.4247807e+00 1.6431677e+00 1.4491377e+00 8.6602540e-01 1.8165902e+00 1.8165902e+00 1.3638182e+00 8.4261498e-01 1.0440307e+00 1.4387495e+00 7.7459667e-01 2.6457513e-01 6.5574385e-01 8.6602540e-01 4.8989795e-01 1.1445523e+00 1.1618950e+00 1.2288206e+00 7.5498344e-01 9.6436508e-01 1.0440307e+00 7.3484692e-01 5.7445626e-01 6.1644140e-01 8.3066239e-01 1.0295630e+00 9.5916630e-01 4.4721360e-01 7.4161985e-01 1.8466185e+00 8.3066239e-01 7.2111026e-01 7.0710678e-01 2.0000000e-01 1.8920888e+00 7.3484692e-01 2.1213203e+00 1.1832160e+00 1.9235384e+00 1.3964240e+00 1.7549929e+00 2.7166155e+00 1.6155494e+00 2.2494444e+00 1.6583124e+00 2.4103942e+00 1.1090537e+00 1.1832160e+00 1.5000000e+00 1.2767145e+00 1.4899664e+00 1.4456832e+00 1.3076697e+00 3.0116441e+00 3.0886890e+00 1.0862780e+00 1.8165902e+00 1.2247449e+00 2.8195744e+00 8.1240384e-01 1.6881943e+00 1.9672316e+00 7.4161985e-01 8.4261498e-01 1.5297059e+00 1.7291616e+00 2.1470911e+00 2.8213472e+00 1.5842980e+00 8.3666003e-01 1.3711309e+00 2.4372115e+00 1.7776389e+00 1.3152946e+00 8.1853528e-01 1.4628739e+00 1.7406895e+00 1.3892444e+00 1.1832160e+00 1.9519221e+00 1.9104973e+00 1.3820275e+00 1.0099505e+00 1.1489125e+00 1.5811388e+00 1.0723805e+00 4.8989795e-01 6.7823300e-01 6.2449980e-01 1.3928388e+00 1.4212670e+00 1.4899664e+00 1.0099505e+00 9.8994949e-01 1.2083046e+00 7.5498344e-01 3.4641016e-01 7.6811457e-01 1.0488088e+00 1.2767145e+00 1.1874342e+00 5.3851648e-01 1.0000000e+00 2.1023796e+00 1.0677078e+00 9.4339811e-01 9.3273791e-01 4.3588989e-01 2.1330729e+00 9.7467943e-01 1.9874607e+00 1.2124356e+00 1.7291616e+00 1.3038405e+00 1.6155494e+00 2.5159491e+00 1.8000000e+00 2.0663978e+00 1.5427249e+00 2.1954498e+00 9.4868330e-01 1.0908712e+00 1.3190906e+00 1.3341664e+00 1.4730920e+00 1.3038405e+00 1.1747340e+00 2.7892651e+00 2.9034462e+00 1.1704700e+00 1.6217275e+00 1.2845233e+00 2.6267851e+00 7.6811457e-01 1.5099669e+00 1.7663522e+00 7.2111026e-01 8.1240384e-01 1.4177447e+00 1.5362291e+00 1.9544820e+00 2.5865034e+00 1.4696938e+00 7.9372539e-01 1.3601471e+00 2.2158520e+00 1.6401219e+00 1.1916375e+00 8.2462113e-01 1.2609520e+00 1.5684387e+00 1.1832160e+00 1.2124356e+00 1.7720045e+00 1.7320508e+00 1.2083046e+00 9.7467943e-01 1.0049876e+00 1.4594520e+00 1.0677078e+00 4.2426407e-01 8.6602540e-01 1.7606817e+00 1.7146428e+00 1.7944358e+00 1.3638182e+00 8.8317609e-01 1.4491377e+00 1.0630146e+00 3.4641016e-01 8.1853528e-01 1.4071247e+00 1.5588457e+00 1.3892444e+00 7.5498344e-01 1.3114877e+00 2.4289916e+00 1.3490738e+00 1.2845233e+00 1.2609520e+00 7.9372539e-01 2.5119713e+00 1.3076697e+00 1.7748239e+00 1.1618950e+00 1.3527749e+00 1.0295630e+00 1.3304135e+00 2.1000000e+00 1.9697716e+00 1.6340135e+00 1.1224972e+00 1.9235384e+00 8.3666003e-01 8.1853528e-01 1.0099505e+00 1.3038405e+00 1.4456832e+00 1.1747340e+00 8.8317609e-01 2.4617067e+00 2.4637370e+00 1.0246951e+00 1.3379088e+00 1.3453624e+00 2.1863211e+00 6.5574385e-01 1.2489996e+00 1.3856406e+00 7.2111026e-01 8.3666003e-01 1.1357817e+00 1.1135529e+00 1.5165751e+00 2.2649503e+00 1.2000000e+00 5.9160798e-01 1.0816654e+00 1.8303005e+00 1.5000000e+00 9.4868330e-01 9.1651514e-01 9.7467943e-01 1.3190906e+00 1.0000000e+00 1.1618950e+00 1.4764823e+00 1.5099669e+00 1.0099505e+00 7.9372539e-01 8.0622577e-01 1.3747727e+00 1.0488088e+00 8.8881944e-01 1.9748418e+00 1.8973666e+00 1.9949937e+00 1.5362291e+00 7.7459667e-01 1.4071247e+00 9.5393920e-01 3.7416574e-01 1.0816654e+00 1.4764823e+00 1.6881943e+00 1.4866069e+00 7.8102497e-01 1.4899664e+00 2.6000000e+00 1.4491377e+00 1.3747727e+00 1.3453624e+00 9.5393920e-01 2.6776856e+00 1.4177447e+00 1.3747727e+00 9.7467943e-01 1.0630146e+00 7.3484692e-01 9.6436508e-01 1.8788294e+00 1.9339080e+00 1.4387495e+00 9.4868330e-01 1.5684387e+00 4.2426407e-01 5.5677644e-01 6.4807407e-01 1.1575837e+00 1.1618950e+00 7.6157731e-01 5.4772256e-01 2.1863211e+00 2.2649503e+00 1.0816654e+00 9.6436508e-01 1.1618950e+00 2.0049938e+00 5.1961524e-01 8.6023253e-01 1.1401754e+00 5.8309519e-01 6.1644140e-01 8.0622577e-01 9.4868330e-01 1.3341664e+00 2.0322401e+00 8.6023253e-01 5.0000000e-01 9.8488578e-01 1.6031220e+00 1.0816654e+00 6.0000000e-01 7.3484692e-01 6.0827625e-01 9.2736185e-01 6.4807407e-01 9.7467943e-01 1.1045361e+00 1.1045361e+00 6.3245553e-01 6.7082039e-01 4.1231056e-01 9.6436508e-01 8.1240384e-01 1.1958261e+00 1.0723805e+00 1.1789826e+00 7.2801099e-01 6.4031242e-01 6.0827625e-01 5.0990195e-01 7.5498344e-01 7.0710678e-01 6.0827625e-01 8.3666003e-01 6.6332496e-01 2.0000000e-01 6.8556546e-01 1.7464249e+00 5.7445626e-01 5.2915026e-01 4.6904158e-01 3.4641016e-01 1.8384776e+00 5.4772256e-01 1.8708287e+00 7.7459667e-01 1.8814888e+00 1.1789826e+00 1.5620499e+00 2.7092434e+00 1.1874342e+00 2.2405357e+00 1.5588457e+00 2.3430749e+00 9.7467943e-01 1.0000000e+00 1.4177447e+00 8.6602540e-01 1.1045361e+00 1.2369317e+00 1.1618950e+00 3.0049958e+00 3.0626786e+00 8.6023253e-01 1.7262677e+00 7.6157731e-01 2.8266588e+00 6.1644140e-01 1.5652476e+00 1.9672316e+00 4.7958315e-01 5.1961524e-01 1.3190906e+00 1.7748239e+00 2.1656408e+00 2.8774989e+00 1.3674794e+00 6.7823300e-01 1.1489125e+00 2.4698178e+00 1.5362291e+00 1.1357817e+00 4.3588989e-01 1.4212670e+00 1.5968719e+00 1.3601471e+00 7.7459667e-01 1.8248288e+00 1.7578396e+00 1.2767145e+00 8.1240384e-01 1.0000000e+00 1.3190906e+00 6.8556546e-01 4.2426407e-01 3.4641016e-01 4.6904158e-01 1.7378147e+00 1.2247449e+00 1.4456832e+00 1.7146428e+00 1.1618950e+00 7.8740079e-01 6.2449980e-01 9.4339811e-01 1.3000000e+00 5.4772256e-01 7.8740079e-01 7.7459667e-01 8.3066239e-01 8.1853528e-01 1.0344080e+00 7.9372539e-01 7.0000000e-01 3.0577770e+00 1.8411953e+00 3.0149627e+00 2.3452079e+00 2.7440845e+00 3.8196859e+00 1.4628739e+00 3.3361655e+00 2.6343880e+00 3.5014283e+00 2.1354157e+00 2.1330729e+00 2.5651511e+00 1.8055470e+00 2.1377558e+00 2.4041631e+00 2.3323808e+00 4.1376322e+00 4.1533119e+00 1.6583124e+00 2.8861739e+00 1.7349352e+00 3.9089641e+00 1.7233688e+00 2.7459060e+00 3.0822070e+00 1.6186414e+00 1.7088007e+00 2.4799194e+00 2.8390139e+00 3.2403703e+00 3.9610605e+00 2.5258662e+00 1.7916473e+00 2.1748563e+00 3.5510562e+00 2.7147744e+00 2.3194827e+00 1.6062378e+00 2.5514702e+00 2.7604347e+00 2.4372115e+00 1.8411953e+00 3.0033315e+00 2.9291637e+00 2.3958297e+00 1.8520259e+00 2.1656408e+00 2.4879711e+00 1.8439089e+00 1.4142136e-01 4.4721360e-01 1.5099669e+00 1.0099505e+00 1.4106736e+00 1.7029386e+00 1.0246951e+00 7.0710678e-01 3.0000000e-01 6.4031242e-01 1.2041595e+00 4.2426407e-01 7.2111026e-01 5.4772256e-01 7.5498344e-01 7.0000000e-01 1.0148892e+00 9.0000000e-01 5.7445626e-01 2.8722813e+00 1.5842980e+00 2.8861739e+00 2.1494185e+00 2.5632011e+00 3.6891733e+00 1.1045361e+00 3.1984371e+00 2.4372115e+00 3.4029399e+00 2.0346990e+00 1.9467922e+00 2.4372115e+00 1.5165751e+00 1.9052559e+00 2.2671568e+00 2.1771541e+00 4.0521599e+00 3.9912404e+00 1.3747727e+00 2.7658633e+00 1.4798649e+00 3.7709415e+00 1.5588457e+00 2.6191602e+00 2.9765752e+00 1.4628739e+00 1.5556349e+00 2.2825424e+00 2.7386128e+00 3.1144823e+00 3.9102430e+00 2.3280893e+00 1.6278821e+00 1.9313208e+00 3.4539832e+00 2.5632011e+00 2.1633308e+00 1.4491377e+00 2.4515301e+00 2.6191602e+00 2.3622024e+00 1.5842980e+00 2.8600699e+00 2.7964263e+00 2.2803509e+00 1.6522712e+00 2.0322401e+00 2.3430749e+00 1.6431677e+00 5.0990195e-01 1.6309506e+00 1.1224972e+00 1.5000000e+00 1.7832555e+00 1.1090537e+00 7.8740079e-01 4.3588989e-01 7.5498344e-01 1.3000000e+00 5.0990195e-01 6.4807407e-01 6.6332496e-01 8.3066239e-01 7.9372539e-01 1.0908712e+00 8.1853528e-01 6.7082039e-01 2.9983329e+00 1.7175564e+00 2.9949958e+00 2.2671568e+00 2.6851443e+00 3.7934153e+00 1.2247449e+00 3.3000000e+00 2.5495098e+00 3.5128336e+00 2.1447611e+00 2.0663978e+00 2.5495098e+00 1.6552945e+00 2.0420578e+00 2.3874673e+00 2.2891046e+00 4.1521079e+00 4.1000000e+00 1.4933185e+00 2.8792360e+00 1.6155494e+00 3.8729833e+00 1.6763055e+00 2.7313001e+00 3.0757113e+00 1.5811388e+00 1.6733201e+00 2.4062419e+00 2.8319605e+00 3.2155870e+00 4.0012498e+00 2.4535688e+00 1.7349352e+00 2.0420578e+00 3.5566838e+00 2.6851443e+00 2.2759613e+00 1.5684387e+00 2.5592968e+00 2.7386128e+00 2.4698178e+00 1.7175564e+00 2.9765752e+00 2.9154759e+00 2.3958297e+00 1.7748239e+00 2.1470911e+00 2.4637370e+00 1.7663522e+00 1.2806248e+00 8.3666003e-01 1.0246951e+00 1.3038405e+00 8.1853528e-01 4.2426407e-01 3.8729833e-01 5.9160798e-01 8.4261498e-01 1.4142136e-01 1.0954451e+00 3.7416574e-01 4.3588989e-01 3.8729833e-01 6.0827625e-01 1.1618950e+00 2.6457513e-01 2.5903668e+00 1.3892444e+00 2.5670995e+00 1.8814888e+00 2.2781571e+00 3.3808283e+00 1.2083046e+00 2.9000000e+00 2.1954498e+00 3.0495901e+00 1.6792856e+00 1.6763055e+00 2.1118712e+00 1.3784049e+00 1.7000000e+00 1.9442222e+00 1.8708287e+00 3.6959437e+00 3.7188708e+00 1.2609520e+00 2.4310492e+00 1.3000000e+00 3.4785054e+00 1.2688578e+00 2.2847319e+00 2.6419690e+00 1.1575837e+00 1.2409674e+00 2.0174241e+00 2.4124676e+00 2.8106939e+00 3.5369478e+00 2.0639767e+00 1.3379088e+00 1.7406895e+00 3.1224990e+00 2.2516660e+00 1.8547237e+00 1.1401754e+00 2.1047565e+00 2.3021729e+00 2.0049938e+00 1.3892444e+00 2.5416530e+00 2.4698178e+00 1.9493589e+00 1.4106736e+00 1.7058722e+00 2.0273135e+00 1.3784049e+00 9.0553851e-01 9.2195445e-01 9.0553851e-01 9.1104336e-01 1.1575837e+00 1.2609520e+00 9.5393920e-01 6.2449980e-01 1.1916375e+00 2.1817424e+00 1.0295630e+00 1.0723805e+00 1.0148892e+00 9.0000000e-01 2.3473389e+00 1.0908712e+00 1.4387495e+00 3.6055513e-01 1.4798649e+00 6.4807407e-01 1.0908712e+00 2.2693611e+00 1.2727922e+00 1.7916473e+00 1.0295630e+00 2.0149442e+00 8.1240384e-01 5.3851648e-01 1.0677078e+00 5.4772256e-01 8.3066239e-01 9.6953597e-01 7.3484692e-01 2.6495283e+00 2.5748786e+00 5.1961524e-01 1.3820275e+00 6.0827625e-01 2.3706539e+00 4.1231056e-01 1.2083046e+00 1.5937377e+00 4.2426407e-01 4.2426407e-01 8.1853528e-01 1.4212670e+00 1.7492856e+00 2.5826343e+00 8.8317609e-01 3.3166248e-01 5.5677644e-01 2.1142375e+00 1.2124356e+00 7.2111026e-01 4.6904158e-01 1.1445523e+00 1.2409674e+00 1.2083046e+00 3.6055513e-01 1.4212670e+00 1.4212670e+00 1.0392305e+00 4.7958315e-01 7.1414284e-01 1.0535654e+00 3.7416574e-01 7.2801099e-01 1.3190906e+00 1.1618950e+00 4.8989795e-01 7.4161985e-01 5.1961524e-01 7.1414284e-01 8.1240384e-01 1.5297059e+00 5.0990195e-01 5.1961524e-01 4.7958315e-01 8.5440037e-01 1.6583124e+00 5.7445626e-01 2.0371549e+00 8.7749644e-01 2.2825424e+00 1.4560220e+00 1.8411953e+00 3.1000000e+00 7.3484692e-01 2.6362853e+00 1.9287302e+00 2.6758176e+00 1.3638182e+00 1.3747727e+00 1.8220867e+00 9.1651514e-01 1.1704700e+00 1.5231546e+00 1.5165751e+00 3.3555923e+00 3.4423829e+00 1.1180340e+00 2.0904545e+00 7.0000000e-01 3.2280025e+00 1.0723805e+00 1.8920888e+00 2.3706539e+00 9.2736185e-01 8.6023253e-01 1.6155494e+00 2.2226111e+00 2.6000000e+00 3.2787193e+00 1.6552945e+00 1.1000000e+00 1.3674794e+00 2.9137605e+00 1.7291616e+00 1.4491377e+00 7.3484692e-01 1.8520259e+00 1.9287302e+00 1.8055470e+00 8.7749644e-01 2.1447611e+00 2.0542639e+00 1.6792856e+00 1.2124356e+00 1.3964240e+00 1.5000000e+00 8.3666003e-01 7.9372539e-01 1.1832160e+00 7.5498344e-01 1.1832160e+00 1.0295630e+00 4.6904158e-01 1.0440307e+00 2.0024984e+00 9.1104336e-01 7.0710678e-01 7.2111026e-01 6.4807407e-01 2.0297783e+00 8.3666003e-01 1.7776389e+00 9.8994949e-01 1.8920888e+00 1.2609520e+00 1.5684387e+00 2.7166155e+00 1.4247807e+00 2.2847319e+00 1.7406895e+00 2.2022716e+00 9.0000000e-01 1.1747340e+00 1.4317821e+00 1.1445523e+00 1.1832160e+00 1.1532563e+00 1.2041595e+00 2.8722813e+00 3.1272992e+00 1.3038405e+00 1.6673332e+00 9.1651514e-01 2.8722813e+00 8.8317609e-01 1.4798649e+00 1.9416488e+00 7.2801099e-01 6.0827625e-01 1.4071247e+00 1.8138357e+00 2.2293497e+00 2.7459060e+00 1.4456832e+00 9.0553851e-01 1.3784049e+00 2.4698178e+00 1.3928388e+00 1.1357817e+00 5.3851648e-01 1.4000000e+00 1.5588457e+00 1.3228757e+00 9.8994949e-01 1.7691806e+00 1.6583124e+00 1.2767145e+00 1.1135529e+00 1.0295630e+00 1.1575837e+00 7.5498344e-01 9.6436508e-01 1.2727922e+00 1.5264338e+00 1.3674794e+00 6.2449980e-01 1.2806248e+00 2.3958297e+00 1.2884099e+00 1.1618950e+00 1.1532563e+00 7.0000000e-01 2.4433583e+00 1.2206556e+00 1.7000000e+00 1.1357817e+00 1.4035669e+00 1.0488088e+00 1.3228757e+00 2.1886069e+00 1.9183326e+00 1.7464249e+00 1.2884099e+00 1.8601075e+00 6.7823300e-01 8.7749644e-01 1.0099505e+00 1.3038405e+00 1.3674794e+00 1.0488088e+00 8.8317609e-01 2.4454039e+00 2.5942244e+00 1.1789826e+00 1.3000000e+00 1.2609520e+00 2.3108440e+00 6.7082039e-01 1.1832160e+00 1.4282857e+00 6.6332496e-01 7.0710678e-01 1.1618950e+00 1.2165525e+00 1.6431677e+00 2.2516660e+00 1.2165525e+00 6.4031242e-01 1.1958261e+00 1.9000000e+00 1.3674794e+00 9.0553851e-01 7.7459667e-01 9.4339811e-01 1.2727922e+00 9.1651514e-01 1.1357817e+00 1.4491377e+00 1.4282857e+00 9.4868330e-01 8.7749644e-01 7.4161985e-01 1.2124356e+00 9.4868330e-01 1.0344080e+00 9.1651514e-01 8.6023253e-01 7.6157731e-01 7.1414284e-01 1.7291616e+00 8.3066239e-01 9.4868330e-01 8.7177979e-01 6.1644140e-01 1.8654758e+00 8.3666003e-01 2.2360680e+00 1.1224972e+00 2.0049938e+00 1.4317821e+00 1.8165902e+00 2.7676705e+00 1.4730920e+00 2.2847319e+00 1.5524175e+00 2.6134269e+00 1.3527749e+00 1.1575837e+00 1.6093477e+00 1.1180340e+00 1.4832397e+00 1.6217275e+00 1.4106736e+00 3.2109189e+00 3.0495901e+00 7.0710678e-01 1.9646883e+00 1.2165525e+00 2.8266588e+00 8.1240384e-01 1.8681542e+00 2.1047565e+00 8.1853528e-01 1.0148892e+00 1.5297059e+00 1.8303005e+00 2.1702534e+00 3.0495901e+00 1.5842980e+00 8.8317609e-01 1.2569805e+00 2.5179357e+00 1.9646883e+00 1.4525839e+00 9.9498744e-01 1.6248077e+00 1.8574176e+00 1.5779734e+00 1.1224972e+00 2.0760539e+00 2.0712315e+00 1.5132746e+00 8.7177979e-01 1.2884099e+00 1.7944358e+00 1.1789826e+00 5.1961524e-01 5.1961524e-01 7.1414284e-01 4.6904158e-01 1.2569805e+00 3.1622777e-01 1.7320508e-01 1.7320508e-01 6.4031242e-01 1.3228757e+00 2.2360680e-01 2.3727621e+00 1.2206556e+00 2.4758837e+00 1.7320508e+00 2.1236761e+00 3.3000000e+00 1.0295630e+00 2.8266588e+00 2.1447611e+00 2.8913665e+00 1.5297059e+00 1.5905974e+00 2.0099751e+00 1.2489996e+00 1.5132746e+00 1.7663522e+00 1.7378147e+00 3.5524639e+00 3.6619667e+00 1.2845233e+00 2.3000000e+00 1.0816654e+00 3.4205263e+00 1.2124356e+00 2.1213203e+00 2.5416530e+00 1.0677078e+00 1.0677078e+00 1.8894444e+00 2.3537205e+00 2.7640550e+00 3.4219877e+00 1.9339080e+00 1.2529964e+00 1.6340135e+00 3.0675723e+00 2.0273135e+00 1.6911535e+00 9.4868330e-01 2.0074860e+00 2.1633308e+00 1.9235384e+00 1.2206556e+00 2.3916521e+00 2.3021729e+00 1.8493242e+00 1.3820275e+00 1.5842980e+00 1.7916473e+00 1.1575837e+00 4.2426407e-01 9.8994949e-01 3.3166248e-01 9.3273791e-01 3.0000000e-01 5.8309519e-01 4.8989795e-01 8.6023253e-01 1.0954451e+00 3.7416574e-01 2.5922963e+00 1.3038405e+00 2.6570661e+00 1.9000000e+00 2.3021729e+00 3.4727511e+00 8.7749644e-01 2.9899833e+00 2.2203603e+00 3.1543621e+00 1.7860571e+00 1.7029386e+00 2.1977261e+00 1.2369317e+00 1.6124515e+00 1.9974984e+00 1.9364917e+00 3.8249183e+00 3.7762415e+00 1.1747340e+00 2.5179357e+00 1.1832160e+00 3.5651087e+00 1.3190906e+00 2.3685439e+00 2.7622455e+00 1.2124356e+00 1.2922848e+00 2.0248457e+00 2.5436195e+00 2.9103264e+00 3.7013511e+00 2.0663978e+00 1.4071247e+00 1.7146428e+00 3.2403703e+00 2.2847319e+00 1.9157244e+00 1.1789826e+00 2.2181073e+00 2.3600847e+00 2.1283797e+00 1.3038405e+00 2.6057628e+00 2.5317978e+00 2.0322401e+00 1.4142136e+00 1.7832555e+00 2.0639767e+00 1.3674794e+00 7.7459667e-01 5.0000000e-01 1.2609520e+00 2.6457513e-01 4.8989795e-01 4.2426407e-01 7.7459667e-01 1.4628739e+00 4.2426407e-01 2.3194827e+00 1.0392305e+00 2.4041631e+00 1.5905974e+00 2.0297783e+00 3.1968735e+00 7.9372539e-01 2.7018512e+00 1.9416488e+00 2.9103264e+00 1.5779734e+00 1.4560220e+00 1.9672316e+00 1.0246951e+00 1.4352700e+00 1.7860571e+00 1.6522712e+00 3.5454196e+00 3.5071356e+00 9.2736185e-01 2.2847319e+00 9.6953597e-01 3.2878564e+00 1.1224972e+00 2.1047565e+00 2.4839485e+00 1.0246951e+00 1.0630146e+00 1.7606817e+00 2.2737634e+00 2.6514147e+00 3.4409301e+00 1.8138357e+00 1.1224972e+00 1.3564660e+00 3.0166206e+00 2.0396078e+00 1.6217275e+00 9.6436508e-01 2.0049938e+00 2.1377558e+00 1.9773720e+00 1.0392305e+00 2.3473389e+00 2.3043437e+00 1.8574176e+00 1.2247449e+00 1.5620499e+00 1.8275667e+00 1.0816654e+00 8.0622577e-01 1.8841444e+00 7.1414284e-01 6.0000000e-01 5.8309519e-01 3.4641016e-01 1.9748418e+00 6.7823300e-01 1.8165902e+00 8.2462113e-01 1.7832555e+00 1.1000000e+00 1.4966630e+00 2.5961510e+00 1.3379088e+00 2.1213203e+00 1.4866069e+00 2.2427661e+00 9.0000000e-01 9.5916630e-01 1.3379088e+00 9.6436508e-01 1.1747340e+00 1.1958261e+00 1.0630146e+00 2.8722813e+00 2.9698485e+00 9.0553851e-01 1.6431677e+00 8.6023253e-01 2.7147744e+00 6.1644140e-01 1.4662878e+00 1.8357560e+00 5.0000000e-01 5.0000000e-01 1.2727922e+00 1.6401219e+00 2.0566964e+00 2.7349589e+00 1.3304135e+00 5.8309519e-01 1.0770330e+00 2.3706539e+00 1.4832397e+00 1.0344080e+00 4.5825757e-01 1.3341664e+00 1.5394804e+00 1.3076697e+00 8.2462113e-01 1.7406895e+00 1.6941074e+00 1.2369317e+00 8.3666003e-01 9.3808315e-01 1.2727922e+00 6.7082039e-01 1.1224972e+00 3.1622777e-01 4.5825757e-01 3.8729833e-01 5.9160798e-01 1.2288206e+00 2.6457513e-01 2.5357445e+00 1.3076697e+00 2.5039968e+00 1.8055470e+00 2.2113344e+00 3.3120990e+00 1.1489125e+00 2.8266588e+00 2.1023796e+00 3.0099834e+00 1.6431677e+00 1.5968719e+00 2.0542639e+00 1.2884099e+00 1.6401219e+00 1.9026298e+00 1.8055470e+00 3.6523965e+00 3.6373067e+00 1.1357817e+00 2.3811762e+00 1.2369317e+00 3.4029399e+00 1.1958261e+00 2.2360680e+00 2.5845696e+00 1.0954451e+00 1.1916375e+00 1.9416488e+00 2.3494680e+00 2.7386128e+00 3.5000000e+00 1.9899749e+00 1.2609520e+00 1.6401219e+00 3.0643107e+00 2.2113344e+00 1.7944358e+00 1.0954451e+00 2.0566964e+00 2.2494444e+00 1.9697716e+00 1.3076697e+00 2.4859606e+00 2.4248711e+00 1.9026298e+00 1.3228757e+00 1.6522712e+00 1.9924859e+00 1.3190906e+00 1.1916375e+00 1.3527749e+00 1.3228757e+00 1.7000000e+00 3.8729833e-01 1.2124356e+00 3.4971417e+00 2.2022716e+00 3.5874782e+00 2.8248894e+00 3.2295511e+00 4.3988635e+00 1.4071247e+00 3.9102430e+00 3.1336879e+00 4.0767634e+00 2.7018512e+00 2.6324893e+00 3.1272992e+00 2.1023796e+00 2.4677925e+00 2.9086079e+00 2.8670542e+00 4.7476310e+00 4.6936127e+00 2.0371549e+00 3.4452866e+00 2.0420578e+00 4.4833024e+00 2.2472205e+00 3.2954514e+00 3.6851052e+00 2.1400935e+00 2.2135944e+00 2.9512709e+00 3.4554305e+00 3.8288379e+00 4.6119410e+00 2.9899833e+00 2.3302360e+00 2.5980762e+00 4.1605288e+00 3.1859065e+00 2.8425341e+00 2.0928450e+00 3.1416556e+00 3.2832910e+00 3.0298515e+00 2.2022716e+00 3.5355339e+00 3.4496377e+00 2.9461840e+00 2.3302360e+00 2.7110883e+00 2.9580399e+00 2.2759613e+00 3.3166248e-01 2.2360680e-01 6.4031242e-01 1.3304135e+00 1.7320508e-01 2.3515952e+00 1.1000000e+00 2.4228083e+00 1.6552945e+00 2.0663978e+00 3.2388269e+00 8.8317609e-01 2.7549955e+00 2.0149442e+00 2.9017236e+00 1.5362291e+00 1.4866069e+00 1.9646883e+00 1.0862780e+00 1.4387495e+00 1.7606817e+00 1.6852300e+00 3.5608988e+00 3.5651087e+00 1.0440307e+00 2.2781571e+00 9.9498744e-01 3.3406586e+00 1.1090537e+00 2.1118712e+00 2.5099801e+00 9.8994949e-01 1.0392305e+00 1.8027756e+00 2.3021729e+00 2.6870058e+00 3.4394767e+00 1.8493242e+00 1.1618950e+00 1.4933185e+00 3.0182777e+00 2.0371549e+00 1.6552945e+00 9.2736185e-01 1.9824228e+00 2.1307276e+00 1.9131126e+00 1.1000000e+00 2.3622024e+00 2.2934690e+00 1.8165902e+00 1.2369317e+00 1.5459625e+00 1.8138357e+00 1.1135529e+00 1.4142136e-01 5.2915026e-01 1.4352700e+00 2.4494897e-01 2.3194827e+00 1.1832160e+00 2.3790755e+00 1.6401219e+00 2.0493902e+00 3.1906112e+00 1.1090537e+00 2.7092434e+00 2.0420578e+00 2.8124722e+00 1.4594520e+00 1.5099669e+00 1.9261360e+00 1.2369317e+00 1.5165751e+00 1.7175564e+00 1.6401219e+00 3.4481879e+00 3.5580894e+00 1.2083046e+00 2.2226111e+00 1.0862780e+00 3.3060551e+00 1.1401754e+00 2.0371549e+00 2.4269322e+00 1.0049876e+00 1.0049876e+00 1.8165902e+00 2.2293497e+00 2.6514147e+00 3.3105891e+00 1.8681542e+00 1.1401754e+00 1.5231546e+00 2.9698485e+00 1.9798990e+00 1.5968719e+00 9.0000000e-01 1.9235384e+00 2.1000000e+00 1.8627936e+00 1.1832160e+00 2.3130067e+00 2.2427661e+00 1.7916473e+00 1.3190906e+00 1.5099669e+00 1.7492856e+00 1.1000000e+00 5.0990195e-01 1.4142136e+00 1.4142136e-01 2.2803509e+00 1.1045361e+00 2.3452079e+00 1.6031220e+00 2.0049938e+00 3.1654384e+00 1.0246951e+00 2.6870058e+00 1.9924859e+00 2.7910571e+00 1.4247807e+00 1.4491377e+00 1.8841444e+00 1.1357817e+00 1.4282857e+00 1.6703293e+00 1.6093477e+00 3.4452866e+00 3.5185224e+00 1.1224972e+00 2.1863211e+00 1.0000000e+00 3.2787193e+00 1.0677078e+00 2.0124612e+00 2.4145393e+00 9.3273791e-01 9.5393920e-01 1.7606817e+00 2.2158520e+00 2.6210685e+00 3.3136083e+00 1.8083141e+00 1.1045361e+00 1.4899664e+00 2.9359837e+00 1.9442222e+00 1.5716234e+00 8.4261498e-01 1.8867962e+00 2.0518285e+00 1.8138357e+00 1.1045361e+00 2.2781571e+00 2.2022716e+00 1.7349352e+00 1.2328828e+00 1.4628739e+00 1.7146428e+00 1.0535654e+00 1.7606817e+00 5.4772256e-01 2.1213203e+00 1.0954451e+00 2.0049938e+00 1.3964240e+00 1.7776389e+00 2.8106939e+00 1.4317821e+00 2.3366643e+00 1.7058722e+00 2.4839485e+00 1.1445523e+00 1.2000000e+00 1.5652476e+00 1.1789826e+00 1.4212670e+00 1.4594520e+00 1.3379088e+00 3.1032241e+00 3.1780497e+00 1.0295630e+00 1.8814888e+00 1.1045361e+00 2.9171904e+00 8.1240384e-01 1.7349352e+00 2.0566964e+00 7.1414284e-01 7.9372539e-01 1.5427249e+00 1.8303005e+00 2.2472205e+00 2.9325757e+00 1.5968719e+00 8.3666003e-01 1.3416408e+00 2.5495098e+00 1.7776389e+00 1.3304135e+00 7.4161985e-01 1.5427249e+00 1.7860571e+00 1.4730920e+00 1.0954451e+00 2.0024984e+00 1.9519221e+00 1.4387495e+00 1.0099505e+00 1.1832160e+00 1.5684387e+00 9.9498744e-01 1.3038405e+00 3.6110940e+00 2.3622024e+00 3.6959437e+00 2.9748950e+00 3.3555923e+00 4.5232732e+00 1.6278821e+00 4.0472213e+00 3.3000000e+00 4.1460825e+00 2.7694765e+00 2.7676705e+00 3.2233523e+00 2.2737634e+00 2.5845696e+00 2.9849623e+00 2.9916551e+00 4.8321838e+00 4.8394215e+00 2.2494444e+00 3.5298725e+00 2.1817424e+00 4.6206060e+00 2.3622024e+00 3.3896903e+00 3.7934153e+00 2.2427661e+00 2.3130067e+00 3.0886890e+00 3.5707142e+00 3.9534795e+00 4.6797436e+00 3.1224990e+00 2.4698178e+00 2.8035692e+00 4.2497059e+00 3.2710854e+00 2.9647934e+00 2.1886069e+00 3.2186954e+00 3.3719431e+00 3.0740852e+00 2.3622024e+00 3.6373067e+00 3.5284558e+00 3.0149627e+00 2.4657656e+00 2.8035692e+00 3.0364453e+00 2.4062419e+00 2.3790755e+00 1.1747340e+00 2.4248711e+00 1.6941074e+00 2.0928450e+00 3.2465366e+00 1.0246951e+00 2.7676705e+00 2.0566964e+00 2.8861739e+00 1.5132746e+00 1.5165751e+00 1.9621417e+00 1.1789826e+00 1.4899664e+00 1.7578396e+00 1.7000000e+00 3.5454196e+00 3.5888717e+00 1.1401754e+00 2.2715633e+00 1.0677078e+00 3.3541020e+00 1.1224972e+00 2.1095023e+00 2.5039968e+00 9.9498744e-01 1.0440307e+00 1.8384776e+00 2.2956481e+00 2.6925824e+00 3.4088121e+00 1.8841444e+00 1.1832160e+00 1.5684387e+00 3.0066593e+00 2.0445048e+00 1.6703293e+00 9.3273791e-01 1.9646883e+00 2.1330729e+00 1.8788294e+00 1.1747340e+00 2.3685439e+00 2.2912878e+00 1.8027756e+00 1.2727922e+00 1.5427249e+00 1.8165902e+00 1.1532563e+00 1.3341664e+00 9.4868330e-01 9.0000000e-01 5.0990195e-01 1.5165751e+00 2.3430749e+00 1.3190906e+00 1.1532563e+00 9.5393920e-01 1.0535654e+00 1.1045361e+00 8.6602540e-01 1.5000000e+00 1.1489125e+00 7.4161985e-01 9.3273791e-01 1.6703293e+00 1.8165902e+00 1.8165902e+00 7.0710678e-01 1.4832397e+00 1.7175564e+00 1.4352700e+00 6.4031242e-01 1.1445523e+00 1.4798649e+00 1.3527749e+00 7.6157731e-01 1.3228757e+00 1.3527749e+00 1.7944358e+00 7.1414284e-01 1.4352700e+00 1.3784049e+00 1.4491377e+00 4.2426407e-01 8.8881944e-01 1.4525839e+00 9.5916630e-01 6.0827625e-01 1.1180340e+00 1.3341664e+00 5.5677644e-01 5.0000000e-01 9.6436508e-01 1.4142136e+00 1.0099505e+00 6.4807407e-01 1.2449900e+00 1.5684387e+00 7.4161985e-01 1.0770330e+00 2.3706539e+00 1.1180340e+00 1.9339080e+00 1.1618950e+00 2.0322401e+00 8.6602540e-01 6.3245553e-01 1.1357817e+00 2.6457513e-01 5.0990195e-01 9.0000000e-01 8.6602540e-01 2.7331301e+00 2.6495283e+00 6.7823300e-01 1.4071247e+00 3.1622777e-01 2.4879711e+00 5.4772256e-01 1.2529964e+00 1.7406895e+00 5.1961524e-01 4.7958315e-01 8.1240384e-01 1.6217275e+00 1.8894444e+00 2.7055499e+00 8.4261498e-01 6.4807407e-01 7.7459667e-01 2.2045408e+00 1.1135529e+00 8.3066239e-01 4.7958315e-01 1.2247449e+00 1.2124356e+00 1.2369317e+00 0.0000000e+00 1.4317821e+00 1.3747727e+00 1.0344080e+00 5.4772256e-01 7.7459667e-01 9.4868330e-01 3.3166248e-01 9.1104336e-01 6.1644140e-01 8.6023253e-01 2.6851443e+00 5.4772256e-01 7.1414284e-01 7.5498344e-01 1.0246951e+00 9.8994949e-01 5.0000000e-01 1.7406895e+00 1.5684387e+00 9.6436508e-01 7.8102497e-01 1.2845233e+00 1.2489996e+00 1.7378147e+00 4.0000000e-01 1.8165902e+00 1.0246951e+00 1.3490738e+00 5.3851648e-01 3.8729833e-01 1.4662878e+00 1.4456832e+00 7.8740079e-01 5.1961524e-01 4.5825757e-01 1.2409674e+00 7.9372539e-01 1.2961481e+00 1.3190906e+00 6.6332496e-01 9.8994949e-01 8.6602540e-01 1.5842980e+00 5.4772256e-01 5.9160798e-01 8.5440037e-01 1.5684387e+00 4.1231056e-01 6.7082039e-01 8.3066239e-01 1.3190906e+00 9.2736185e-01 1.1224972e+00 1.4730920e+00 5.0000000e-01 1.6703293e+00 1.8275667e+00 1.2206556e+00 6.0000000e-01 1.4282857e+00 6.4807407e-01 3.8729833e-01 6.0000000e-01 9.5916630e-01 9.3273791e-01 6.6332496e-01 2.4494897e-01 2.0346990e+00 1.9974984e+00 1.0148892e+00 8.4261498e-01 1.0148892e+00 1.7944358e+00 7.2801099e-01 6.4807407e-01 1.0295630e+00 8.1240384e-01 7.3484692e-01 3.3166248e-01 9.4868330e-01 1.2165525e+00 2.0124612e+00 4.2426407e-01 5.9160798e-01 5.3851648e-01 1.5716234e+00 7.8102497e-01 2.4494897e-01 8.6023253e-01 7.2801099e-01 7.4833148e-01 9.4868330e-01 7.4161985e-01 8.2462113e-01 9.0553851e-01 7.6157731e-01 7.2801099e-01 5.0000000e-01 7.4161985e-01 6.4807407e-01 1.3638182e+00 2.1794495e+00 1.0295630e+00 6.7082039e-01 1.0148892e+00 7.5498344e-01 6.6332496e-01 4.3588989e-01 1.2529964e+00 1.0295630e+00 5.5677644e-01 5.0000000e-01 1.7000000e+00 1.6792856e+00 1.4212670e+00 4.6904158e-01 1.3038405e+00 1.5264338e+00 1.0488088e+00 3.8729833e-01 8.5440037e-01 1.1357817e+00 1.0630146e+00 3.1622777e-01 9.2195445e-01 1.0148892e+00 1.7320508e+00 3.0000000e-01 1.0295630e+00 1.0000000e+00 1.2409674e+00 5.2915026e-01 5.1961524e-01 1.1874342e+00 5.8309519e-01 3.6055513e-01 8.1853528e-01 1.0770330e+00 3.8729833e-01 4.7958315e-01 6.4031242e-01 1.0099505e+00 6.3245553e-01 6.4807407e-01 1.0049876e+00 3.4799425e+00 5.2915026e-01 1.3379088e+00 9.6436508e-01 1.8734994e+00 1.8055470e+00 1.3601471e+00 2.5357445e+00 2.3706539e+00 1.7916473e+00 1.5842980e+00 8.1853528e-01 5.4772256e-01 2.4738634e+00 1.1747340e+00 2.6343880e+00 2.6457513e-01 2.1817424e+00 1.3076697e+00 8.0622577e-01 2.3086793e+00 2.2869193e+00 1.5748016e+00 1.0246951e+00 6.0827625e-01 8.8317609e-01 1.5779734e+00 2.0832667e+00 1.9748418e+00 5.4772256e-01 1.7146428e+00 1.6583124e+00 2.4269322e+00 1.3928388e+00 1.3820275e+00 1.6703293e+00 2.3706539e+00 1.1000000e+00 1.3674794e+00 1.6763055e+00 2.1307276e+00 1.7832555e+00 1.8973666e+00 2.2869193e+00 3.0282008e+00 2.2226111e+00 3.1144823e+00 1.8708287e+00 1.7233688e+00 2.2405357e+00 9.8994949e-01 1.3228757e+00 1.9339080e+00 1.9544820e+00 3.8236109e+00 3.7376463e+00 1.2609520e+00 2.5079872e+00 9.1104336e-01 3.5860842e+00 1.4730920e+00 2.3409400e+00 2.8354894e+00 1.3711309e+00 1.3638182e+00 1.9261360e+00 2.6907248e+00 2.9899833e+00 3.7934153e+00 1.9493589e+00 1.5652476e+00 1.6583124e+00 3.3181320e+00 2.1142375e+00 1.9026298e+00 1.2489996e+00 2.3086793e+00 2.3021729e+00 2.2538855e+00 1.1180340e+00 2.5337719e+00 2.4413111e+00 2.0832667e+00 1.5000000e+00 1.8411953e+00 1.9157244e+00 1.2727922e+00 8.7749644e-01 1.0148892e+00 1.4866069e+00 1.3638182e+00 9.9498744e-01 2.1095023e+00 2.0149442e+00 1.4662878e+00 1.1357817e+00 1.1357817e+00 9.2736185e-01 1.9899749e+00 9.2736185e-01 2.2135944e+00 6.0827625e-01 1.7320508e+00 9.8488578e-01 4.3588989e-01 1.8627936e+00 1.8466185e+00 1.1832160e+00 5.5677644e-01 2.6457513e-01 1.1045361e+00 1.2124356e+00 1.5937377e+00 1.4764823e+00 6.7823300e-01 1.4491377e+00 1.2206556e+00 1.9874607e+00 1.0488088e+00 1.1180340e+00 1.3747727e+00 1.9339080e+00 8.6602540e-01 1.1704700e+00 1.3527749e+00 1.6911535e+00 1.3784049e+00 1.5874508e+00 1.8466185e+00 1.4282857e+00 1.0295630e+00 6.2449980e-01 6.6332496e-01 1.2961481e+00 1.3228757e+00 1.0392305e+00 6.1644140e-01 1.9131126e+00 1.5716234e+00 1.1445523e+00 8.8881944e-01 1.4662878e+00 1.3928388e+00 1.0049876e+00 8.6023253e-01 8.8317609e-01 1.1575837e+00 1.1916375e+00 5.5677644e-01 7.3484692e-01 8.2462113e-01 1.8788294e+00 6.1644140e-01 9.1104336e-01 7.5498344e-01 1.2609520e+00 1.1704700e+00 7.3484692e-01 1.3190906e+00 8.0622577e-01 8.7177979e-01 1.0677078e+00 1.1618950e+00 8.7177979e-01 1.0677078e+00 9.2736185e-01 9.0000000e-01 8.3066239e-01 1.2124356e+00 1.1747340e+00 1.3784049e+00 1.5652476e+00 1.0198039e+00 2.2181073e+00 1.9000000e+00 1.2165525e+00 1.3038405e+00 8.6023253e-01 1.3892444e+00 2.3685439e+00 6.7082039e-01 2.2113344e+00 1.2247449e+00 1.8841444e+00 8.1240384e-01 8.1240384e-01 1.9544820e+00 1.8708287e+00 1.3000000e+00 1.1224972e+00 1.0198039e+00 9.3273791e-01 1.2727922e+00 1.8574176e+00 1.9157244e+00 8.0622577e-01 1.0535654e+00 1.3190906e+00 1.9949937e+00 9.9498744e-01 8.7177979e-01 1.1747340e+00 2.0322401e+00 6.3245553e-01 7.0710678e-01 1.2083046e+00 1.8947295e+00 1.3820275e+00 1.2529964e+00 1.8814888e+00 5.5677644e-01 5.4772256e-01 1.0677078e+00 9.0000000e-01 3.7416574e-01 4.8989795e-01 2.0976177e+00 2.2649503e+00 1.2288206e+00 7.8102497e-01 1.0049876e+00 2.0396078e+00 6.0827625e-01 6.4807407e-01 1.1575837e+00 6.1644140e-01 5.2915026e-01 6.5574385e-01 1.0862780e+00 1.4071247e+00 2.0024984e+00 6.7823300e-01 6.7082039e-01 1.0630146e+00 1.6031220e+00 7.0000000e-01 4.6904158e-01 6.4807407e-01 5.1961524e-01 6.7823300e-01 5.0990195e-01 8.6602540e-01 9.0553851e-01 8.1240384e-01 4.2426407e-01 7.4161985e-01 2.2360680e-01 5.5677644e-01 6.6332496e-01 5.7445626e-01 7.9372539e-01 8.1240384e-01 6.4031242e-01 3.8729833e-01 2.2248595e+00 2.1023796e+00 8.1240384e-01 9.0553851e-01 9.0553851e-01 1.9157244e+00 4.2426407e-01 8.0622577e-01 1.1789826e+00 5.5677644e-01 5.9160798e-01 3.7416574e-01 1.0344080e+00 1.2845233e+00 2.1633308e+00 4.3588989e-01 4.6904158e-01 6.6332496e-01 1.6062378e+00 9.1651514e-01 4.5825757e-01 7.1414284e-01 6.7823300e-01 7.6811457e-01 7.8102497e-01 6.3245553e-01 9.6436508e-01 9.8488578e-01 5.9160798e-01 3.7416574e-01 3.4641016e-01 8.3666003e-01 6.2449980e-01 1.3114877e+00 1.1357817e+00 5.2915026e-01 4.2426407e-01 1.7029386e+00 1.7233688e+00 1.3747727e+00 3.6055513e-01 1.3601471e+00 1.5165751e+00 8.8881944e-01 3.7416574e-01 7.3484692e-01 9.8994949e-01 9.6953597e-01 4.5825757e-01 7.0710678e-01 8.9442719e-01 1.6340135e+00 4.6904158e-01 9.0000000e-01 1.0723805e+00 1.1000000e+00 7.1414284e-01 5.0990195e-01 1.1045361e+00 1.7320508e-01 3.4641016e-01 4.6904158e-01 1.1357817e+00 4.8989795e-01 5.4772256e-01 3.7416574e-01 8.8881944e-01 4.3588989e-01 7.5498344e-01 1.0295630e+00 5.1961524e-01 1.0770330e+00 1.0862780e+00 2.9359837e+00 2.7766887e+00 6.5574385e-01 1.5842980e+00 3.3166248e-01 2.6419690e+00 6.7082039e-01 1.4628739e+00 1.9442222e+00 6.4807407e-01 6.7823300e-01 9.7467943e-01 1.8165902e+00 2.0493902e+00 2.9137605e+00 9.8994949e-01 8.4261498e-01 9.4339811e-01 2.3558438e+00 1.3000000e+00 1.0677078e+00 6.4807407e-01 1.4035669e+00 1.3711309e+00 1.3784049e+00 2.6457513e-01 1.6124515e+00 1.5427249e+00 1.1747340e+00 6.0827625e-01 9.6436508e-01 1.1445523e+00 5.8309519e-01 7.5498344e-01 1.0246951e+00 2.6851443e+00 2.6267851e+00 1.1045361e+00 1.3190906e+00 4.8989795e-01 2.5159491e+00 8.1240384e-01 1.2288206e+00 1.8138357e+00 7.8102497e-01 7.2801099e-01 8.3666003e-01 1.7691806e+00 1.9519221e+00 2.6944387e+00 8.0622577e-01 1.0295630e+00 1.1747340e+00 2.1587033e+00 9.2736185e-01 9.8488578e-01 7.2801099e-01 1.2165525e+00 1.0723805e+00 1.1445523e+00 5.0990195e-01 1.3453624e+00 1.1958261e+00 9.3273791e-01 7.7459667e-01 8.3666003e-01 7.8740079e-01 6.4031242e-01 5.8309519e-01 2.0049938e+00 2.1470911e+00 1.3747727e+00 6.4031242e-01 1.0246951e+00 1.9748418e+00 8.1853528e-01 5.4772256e-01 1.1747340e+00 8.3666003e-01 7.3484692e-01 5.3851648e-01 1.1916375e+00 1.4000000e+00 1.9773720e+00 5.0990195e-01 9.2195445e-01 1.1618950e+00 1.5394804e+00 3.8729833e-01 5.4772256e-01 8.3666003e-01 5.5677644e-01 4.4721360e-01 5.4772256e-01 9.0000000e-01 7.2111026e-01 5.4772256e-01 3.7416574e-01 8.6602540e-01 3.8729833e-01 3.0000000e-01 7.6157731e-01 1.9183326e+00 1.9519221e+00 1.1090537e+00 7.0000000e-01 1.1180340e+00 1.7204651e+00 7.0000000e-01 5.0990195e-01 8.8317609e-01 7.8740079e-01 7.2111026e-01 3.8729833e-01 7.8740079e-01 1.1045361e+00 1.8574176e+00 4.6904158e-01 5.7445626e-01 7.0000000e-01 1.4317821e+00 7.5498344e-01 1.4142136e-01 8.6023253e-01 5.1961524e-01 6.4807407e-01 7.6157731e-01 8.6602540e-01 7.3484692e-01 8.1240384e-01 6.1644140e-01 7.4161985e-01 3.6055513e-01 7.1414284e-01 7.2111026e-01 1.2206556e+00 2.9715316e+00 1.4177447e+00 2.9478806e+00 1.0198039e+00 2.5632011e+00 1.5033296e+00 1.1224972e+00 2.6495283e+00 2.5690465e+00 1.9773720e+00 1.4352700e+00 1.2409674e+00 4.1231056e-01 1.9748418e+00 2.4515301e+00 2.4186773e+00 1.0049876e+00 1.8357560e+00 1.9442222e+00 2.7018512e+00 1.6822604e+00 1.6552945e+00 1.9235384e+00 2.7331301e+00 1.3490738e+00 1.5297059e+00 1.9748418e+00 2.5748786e+00 2.0904545e+00 2.0273135e+00 2.5690465e+00 2.7018512e+00 1.5620499e+00 2.9223278e+00 4.1231056e-01 2.4939928e+00 1.7233688e+00 1.2922848e+00 2.6362853e+00 2.6400758e+00 1.8601075e+00 1.4525839e+00 9.6436508e-01 1.3490738e+00 1.8520259e+00 2.4248711e+00 2.2494444e+00 8.9442719e-01 2.0736441e+00 2.0371549e+00 2.7766887e+00 1.7832555e+00 1.7175564e+00 2.0322401e+00 2.6495283e+00 1.4730920e+00 1.7233688e+00 2.0124612e+00 2.3958297e+00 2.1400935e+00 2.2671568e+00 2.6248809e+00 1.7146428e+00 8.8317609e-01 2.5278449e+00 6.6332496e-01 1.5968719e+00 1.8788294e+00 7.2801099e-01 8.6602540e-01 1.1135529e+00 1.6522712e+00 1.9209373e+00 2.8948230e+00 1.1704700e+00 6.7823300e-01 7.3484692e-01 2.3194827e+00 1.6431677e+00 1.1445523e+00 8.7749644e-01 1.4628739e+00 1.5716234e+00 1.5066519e+00 6.7823300e-01 1.7578396e+00 1.7860571e+00 1.3453624e+00 5.8309519e-01 1.0862780e+00 1.5099669e+00 8.6602540e-01 1.6062378e+00 1.3747727e+00 1.2247449e+00 3.0000000e-01 6.5574385e-01 1.3076697e+00 1.2529964e+00 6.7823300e-01 7.9372539e-01 8.5440037e-01 1.3928388e+00 6.5574385e-01 1.2328828e+00 1.3490738e+00 9.1651514e-01 6.4807407e-01 7.4161985e-01 1.3820275e+00 3.7416574e-01 2.6457513e-01 6.0827625e-01 1.4071247e+00 2.2360680e-01 3.0000000e-01 5.7445626e-01 1.2247449e+00 7.3484692e-01 7.8740079e-01 1.2845233e+00 2.7658633e+00 7.3484692e-01 1.4525839e+00 1.9924859e+00 6.4031242e-01 5.7445626e-01 1.0677078e+00 1.8894444e+00 2.1656408e+00 2.9223278e+00 1.0816654e+00 8.8317609e-01 1.0677078e+00 2.4454039e+00 1.2247449e+00 1.0630146e+00 5.0000000e-01 1.4282857e+00 1.3964240e+00 1.3820275e+00 3.1622777e-01 1.6401219e+00 1.5329710e+00 1.1958261e+00 7.7459667e-01 9.6953597e-01 1.0295630e+00 4.5825757e-01 2.2912878e+00 1.5033296e+00 9.6953597e-01 2.4289916e+00 2.4248711e+00 1.7058722e+00 1.1224972e+00 6.7823300e-01 1.0630146e+00 1.7146428e+00 2.1840330e+00 2.0420578e+00 7.0000000e-01 1.9209373e+00 1.8055470e+00 2.5651511e+00 1.5588457e+00 1.5684387e+00 1.8384776e+00 2.4879711e+00 1.3038405e+00 1.5811388e+00 1.8384776e+00 2.2248595e+00 1.9313208e+00 2.0952327e+00 2.4248711e+00 1.1180340e+00 1.5066519e+00 1.7320508e-01 3.6055513e-01 7.7459667e-01 1.3228757e+00 1.6340135e+00 2.4617067e+00 8.1853528e-01 3.7416574e-01 8.3666003e-01 1.9339080e+00 1.1575837e+00 7.2801099e-01 4.3588989e-01 9.2736185e-01 1.0816654e+00 9.0000000e-01 5.4772256e-01 1.3228757e+00 1.2845233e+00 7.6811457e-01 2.4494897e-01 5.0990195e-01 1.0000000e+00 5.3851648e-01 6.6332496e-01 1.1832160e+00 1.0862780e+00 5.9160798e-01 7.7459667e-01 9.6953597e-01 1.4798649e+00 6.0000000e-01 1.0630146e+00 1.1618950e+00 1.1357817e+00 5.1961524e-01 5.0990195e-01 1.2165525e+00 4.1231056e-01 3.7416574e-01 6.9282032e-01 1.2529964e+00 3.1622777e-01 4.0000000e-01 6.1644140e-01 1.1532563e+00 6.2449980e-01 6.2449980e-01 1.0862780e+00 1.6124515e+00 1.5684387e+00 1.0246951e+00 3.4641016e-01 4.6904158e-01 1.0246951e+00 1.0583005e+00 1.3674794e+00 1.3747727e+00 7.4161985e-01 1.1704700e+00 9.4868330e-01 1.7088007e+00 7.4161985e-01 8.8317609e-01 1.0770330e+00 1.7406895e+00 6.4807407e-01 9.1651514e-01 1.0862780e+00 1.5198684e+00 1.1000000e+00 1.2845233e+00 1.5937377e+00 2.4494897e-01 8.7749644e-01 1.4422205e+00 1.7720045e+00 2.5475478e+00 9.1651514e-01 4.3588989e-01 9.2195445e-01 2.0566964e+00 1.1704700e+00 7.8740079e-01 2.8284271e-01 1.0148892e+00 1.1575837e+00 9.5916630e-01 5.1961524e-01 1.4071247e+00 1.3416408e+00 8.3666003e-01 3.8729833e-01 5.7445626e-01 9.8488578e-01 4.6904158e-01 8.4261498e-01 1.4352700e+00 1.7832555e+00 2.4839485e+00 8.8317609e-01 4.5825757e-01 9.0000000e-01 2.0615528e+00 1.0246951e+00 6.7823300e-01 1.4142136e-01 9.9498744e-01 1.1045361e+00 9.6953597e-01 4.7958315e-01 1.3341664e+00 1.2569805e+00 8.3666003e-01 5.5677644e-01 5.3851648e-01 8.1853528e-01 2.8284271e-01 9.8488578e-01 1.1357817e+00 1.9748418e+00 1.0000000e-01 7.8740079e-01 7.8740079e-01 1.4212670e+00 6.7823300e-01 4.3588989e-01 9.6436508e-01 6.1644140e-01 5.1961524e-01 7.9372539e-01 8.1240384e-01 6.7082039e-01 7.1414284e-01 5.7445626e-01 7.0710678e-01 4.6904158e-01 6.9282032e-01 7.9372539e-01 5.0990195e-01 1.2845233e+00 1.0392305e+00 1.1618950e+00 1.2041595e+00 9.1104336e-01 1.2845233e+00 8.8317609e-01 1.5748016e+00 7.1414284e-01 9.6953597e-01 1.0392305e+00 1.6217275e+00 8.3666003e-01 1.0770330e+00 1.0488088e+00 1.3379088e+00 1.0049876e+00 1.3453624e+00 1.4899664e+00 1.1618950e+00 1.1575837e+00 1.5394804e+00 1.4933185e+00 5.3851648e-01 1.4387495e+00 1.2083046e+00 1.9235384e+00 9.3273791e-01 1.0392305e+00 1.2247449e+00 1.8894444e+00 8.4852814e-01 1.1224972e+00 1.2247449e+00 1.5842980e+00 1.2922848e+00 1.5652476e+00 1.8165902e+00 1.9824228e+00 2.3452079e+00 2.3832751e+00 9.2736185e-01 1.8761663e+00 1.8947295e+00 2.6172505e+00 1.5811388e+00 1.6522712e+00 1.8083141e+00 2.7055499e+00 1.3820275e+00 1.5588457e+00 1.9000000e+00 2.4939928e+00 2.0099751e+00 2.0346990e+00 2.5238859e+00 8.6602540e-01 8.7749644e-01 1.4106736e+00 6.4031242e-01 5.0990195e-01 1.0000000e+00 6.2449980e-01 4.6904158e-01 7.7459667e-01 8.4261498e-01 6.4807407e-01 6.6332496e-01 5.4772256e-01 7.4161985e-01 5.0000000e-01 6.7082039e-01 8.3666003e-01 5.8309519e-01 1.9078784e+00 1.1916375e+00 5.9160798e-01 5.5677644e-01 9.4868330e-01 1.1445523e+00 1.0440307e+00 6.4807407e-01 1.3000000e+00 1.3304135e+00 9.2195445e-01 5.0990195e-01 5.8309519e-01 1.0488088e+00 5.3851648e-01 1.9442222e+00 1.2961481e+00 7.1414284e-01 9.8488578e-01 1.1916375e+00 1.2688578e+00 1.3964240e+00 7.7459667e-01 1.3228757e+00 1.4387495e+00 1.2206556e+00 8.1240384e-01 9.1651514e-01 1.2247449e+00 7.8102497e-01 1.5427249e+00 1.5198684e+00 2.1977261e+00 1.0862780e+00 1.1269428e+00 1.2845233e+00 2.2045408e+00 9.4339811e-01 1.1357817e+00 1.3453624e+00 1.8920888e+00 1.5297059e+00 1.7029386e+00 2.1189620e+00 6.8556546e-01 1.1180340e+00 7.6157731e-01 5.0000000e-01 8.4261498e-01 1.1135529e+00 6.2449980e-01 4.3588989e-01 7.0000000e-01 1.1916375e+00 7.2111026e-01 2.4494897e-01 9.6436508e-01 8.1240384e-01 5.9160798e-01 6.7823300e-01 8.1240384e-01 8.3066239e-01 7.6157731e-01 8.1240384e-01 6.6332496e-01 7.9372539e-01 3.8729833e-01 6.2449980e-01 6.4807407e-01 1.1269428e+00 1.2247449e+00 1.0770330e+00 4.7958315e-01 1.4628739e+00 1.3711309e+00 9.4868330e-01 6.2449980e-01 6.7082039e-01 9.0000000e-01 3.1622777e-01 4.1231056e-01 3.6055513e-01 1.2247449e+00 5.5677644e-01 5.7445626e-01 3.6055513e-01 9.5916630e-01 4.6904158e-01 7.8740079e-01 1.0908712e+00 5.4772256e-01 1.2124356e+00 3.4641016e-01 2.4494897e-01 4.2426407e-01 1.0630146e+00 6.0827625e-01 6.2449980e-01 1.1224972e+00 1.2369317e+00 8.1240384e-01 6.9282032e-01 2.4494897e-01 9.4339811e-01 5.1961524e-01 8.1853528e-01 1.1224972e+00 1.4317821e+00 1.3747727e+00 1.0344080e+00 5.4772256e-01 7.7459667e-01 9.4868330e-01 3.3166248e-01 3.1622777e-01 7.3484692e-01 1.3076697e+00 8.4261498e-01 8.0622577e-01 1.3190906e+00 6.1644140e-01 1.2845233e+00 7.9372539e-01 6.2449980e-01 1.2569805e+00 7.8102497e-01 3.6055513e-01 6.7082039e-01 9.4868330e-01 5.8309519e-01 1.0677078e+00 6.5574385e-01 6.1644140e-01 6.4031242e-01 7.6811457e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-euclidean-ml.txt b/voice_bridge/scipy/spatial/tests/data/pdist-euclidean-ml.txt new file mode 100644 index 0000000000000000000000000000000000000000..1b7552021bf9a0606c36628f9432e8f0afe8d765 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-euclidean-ml.txt @@ -0,0 +1 @@ + 4.0515260e+00 4.2121458e+00 3.7357405e+00 4.2313317e+00 3.9136009e+00 4.3843298e+00 3.9811426e+00 4.3624182e+00 4.0642508e+00 4.2105933e+00 4.0747226e+00 3.9068586e+00 4.1637004e+00 4.4303203e+00 4.1841564e+00 4.1063279e+00 4.1862390e+00 4.0719925e+00 4.2227579e+00 4.3173531e+00 3.8811067e+00 3.7577567e+00 4.0623722e+00 3.9882453e+00 4.0432671e+00 3.9085109e+00 4.0283414e+00 4.0846110e+00 3.6459235e+00 3.9544001e+00 4.1134244e+00 4.1805752e+00 3.5121011e+00 4.2747789e+00 4.1048323e+00 3.9269426e+00 3.8932032e+00 3.8281172e+00 3.7288430e+00 4.0863477e+00 4.1527428e+00 4.1646409e+00 4.2027433e+00 3.8441594e+00 4.8419117e+00 4.2455384e+00 3.7622220e+00 4.3967923e+00 4.4663183e+00 4.0435853e+00 4.0421692e+00 4.3124625e+00 4.6499961e+00 4.5595743e+00 3.4230430e+00 4.2612266e+00 3.5676603e+00 4.0866580e+00 4.2307103e+00 3.8521940e+00 3.9951183e+00 3.1022409e+00 3.7290193e+00 4.1931517e+00 4.1127027e+00 3.6633651e+00 4.0235815e+00 3.9729858e+00 4.1980132e+00 4.1579993e+00 3.9948955e+00 3.9081966e+00 3.9031152e+00 3.5069036e+00 4.0015727e+00 3.6763496e+00 3.6614339e+00 3.6227109e+00 3.7357992e+00 4.0170026e+00 3.5216829e+00 3.9322227e+00 3.9094621e+00 4.0170286e+00 4.3264246e+00 4.3435483e+00 4.0788635e+00 4.4761765e+00 3.8468186e+00 4.1490333e+00 4.2800007e+00 4.2260191e+00 4.3031858e+00 4.1897413e+00 4.0530244e+00 3.5893641e+00 4.2186615e+00 3.7979503e+00 4.0915473e+00 4.1343073e+00 4.5063851e+00 3.6394889e+00 4.2508448e+00 3.7160826e+00 4.0105262e+00 4.1578269e+00 4.0290590e+00 3.6971819e+00 3.9414087e+00 4.2522313e+00 4.4091714e+00 4.1542292e+00 3.9594691e+00 4.0923600e+00 4.0855497e+00 3.8253075e+00 4.3034717e+00 4.0976731e+00 4.1316523e+00 4.0872717e+00 4.2643353e+00 3.8887280e+00 3.9411273e+00 3.8848001e+00 4.3481996e+00 3.8716733e+00 3.9084684e+00 3.7546361e+00 3.9354816e+00 3.8293694e+00 3.7568515e+00 3.7184961e+00 3.8404278e+00 4.2570811e+00 4.1423777e+00 4.0291411e+00 4.2094682e+00 3.6127418e+00 4.0459839e+00 3.7737985e+00 3.7647653e+00 3.9762006e+00 3.8999512e+00 3.8509090e+00 3.8975941e+00 3.8432839e+00 4.2109046e+00 4.1339124e+00 3.5898873e+00 4.0794519e+00 4.3504966e+00 3.8862612e+00 3.8332931e+00 4.2190310e+00 4.1366595e+00 3.7220268e+00 4.1250795e+00 3.3169452e+00 4.0757181e+00 3.6487114e+00 3.9513724e+00 4.0735549e+00 3.9137880e+00 3.9656942e+00 3.7724953e+00 4.0505153e+00 3.9062302e+00 4.5671852e+00 3.7542175e+00 4.3731708e+00 3.6733907e+00 4.4667545e+00 4.1004635e+00 4.0530038e+00 4.0346958e+00 4.2145752e+00 4.4298637e+00 4.2982360e+00 4.0878239e+00 4.4061563e+00 4.2115971e+00 3.8263277e+00 3.8603258e+00 3.8572375e+00 4.1051910e+00 4.3787786e+00 4.5309659e+00 4.0047055e+00 4.1308854e+00 3.6283561e+00 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-hamming-ml.txt b/voice_bridge/scipy/spatial/tests/data/pdist-hamming-ml.txt new file mode 100644 index 0000000000000000000000000000000000000000..bc4e1ddcb6e1e8699570ecc410e2cb0cfdba2507 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-hamming-ml.txt @@ -0,0 +1 @@ + 4.6000000e-01 4.3000000e-01 4.3000000e-01 5.4000000e-01 4.1000000e-01 5.3000000e-01 4.3000000e-01 5.9000000e-01 4.8000000e-01 4.7000000e-01 4.6000000e-01 4.9000000e-01 4.5000000e-01 5.5000000e-01 5.3000000e-01 4.5000000e-01 4.8000000e-01 4.7000000e-01 4.8000000e-01 5.1000000e-01 4.9000000e-01 4.4000000e-01 4.9000000e-01 4.7000000e-01 4.9000000e-01 4.7000000e-01 5.2000000e-01 4.7000000e-01 4.2000000e-01 4.9000000e-01 4.7000000e-01 5.5000000e-01 3.9000000e-01 5.5000000e-01 4.6000000e-01 4.5000000e-01 4.0000000e-01 4.8000000e-01 4.5000000e-01 4.8000000e-01 4.8000000e-01 5.0000000e-01 4.8000000e-01 4.5000000e-01 6.4000000e-01 5.7000000e-01 4.6000000e-01 5.4000000e-01 5.6000000e-01 4.8000000e-01 4.8000000e-01 5.3000000e-01 5.4000000e-01 5.3000000e-01 4.5000000e-01 5.8000000e-01 4.2000000e-01 5.4000000e-01 6.0000000e-01 5.1000000e-01 4.6000000e-01 4.1000000e-01 4.4000000e-01 5.6000000e-01 5.4000000e-01 4.8000000e-01 4.8000000e-01 5.1000000e-01 5.2000000e-01 5.5000000e-01 4.5000000e-01 4.3000000e-01 4.7000000e-01 4.7000000e-01 5.6000000e-01 4.9000000e-01 4.8000000e-01 4.5000000e-01 4.9000000e-01 4.7000000e-01 4.5000000e-01 4.5000000e-01 5.6000000e-01 4.9000000e-01 5.8000000e-01 5.4000000e-01 4.6000000e-01 5.8000000e-01 5.3000000e-01 5.4000000e-01 5.5000000e-01 5.0000000e-01 5.2000000e-01 4.8000000e-01 5.0000000e-01 3.8000000e-01 5.3000000e-01 4.8000000e-01 5.1000000e-01 4.8000000e-01 5.2000000e-01 4.7000000e-01 5.0000000e-01 4.3000000e-01 4.8000000e-01 5.2000000e-01 5.0000000e-01 4.2000000e-01 4.2000000e-01 4.7000000e-01 5.4000000e-01 5.1000000e-01 5.4000000e-01 5.1000000e-01 4.8000000e-01 4.7000000e-01 5.2000000e-01 5.2000000e-01 5.4000000e-01 5.4000000e-01 5.0000000e-01 4.5000000e-01 4.4000000e-01 4.1000000e-01 5.7000000e-01 4.6000000e-01 5.1000000e-01 5.2000000e-01 5.0000000e-01 4.8000000e-01 5.0000000e-01 4.4000000e-01 5.3000000e-01 5.2000000e-01 4.9000000e-01 5.7000000e-01 5.8000000e-01 4.9000000e-01 5.1000000e-01 4.5000000e-01 5.3000000e-01 4.5000000e-01 4.4000000e-01 3.5000000e-01 4.2000000e-01 5.3000000e-01 5.2000000e-01 5.0000000e-01 3.8000000e-01 5.2000000e-01 5.6000000e-01 4.7000000e-01 4.4000000e-01 5.1000000e-01 5.7000000e-01 4.5000000e-01 5.7000000e-01 4.3000000e-01 5.1000000e-01 3.8000000e-01 5.3000000e-01 4.8000000e-01 4.4000000e-01 5.0000000e-01 4.8000000e-01 5.0000000e-01 4.7000000e-01 6.4000000e-01 4.9000000e-01 5.2000000e-01 4.8000000e-01 5.6000000e-01 4.3000000e-01 4.8000000e-01 4.7000000e-01 6.0000000e-01 5.4000000e-01 5.5000000e-01 4.0000000e-01 5.5000000e-01 5.6000000e-01 4.9000000e-01 5.0000000e-01 4.3000000e-01 5.7000000e-01 5.0000000e-01 5.7000000e-01 4.9000000e-01 4.2000000e-01 3.9000000e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-jaccard-ml.txt b/voice_bridge/scipy/spatial/tests/data/pdist-jaccard-ml.txt new file mode 100644 index 0000000000000000000000000000000000000000..a7570d8c3fbdf63bb2240c964941d9e48bc2ad3c --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-jaccard-ml.txt @@ -0,0 +1 @@ + 6.5714286e-01 6.0563380e-01 6.3235294e-01 7.3972603e-01 6.0294118e-01 7.3611111e-01 6.4179104e-01 7.7631579e-01 6.4000000e-01 6.6197183e-01 6.6666667e-01 7.0000000e-01 6.4285714e-01 7.7464789e-01 7.1621622e-01 6.4285714e-01 6.8571429e-01 6.4383562e-01 6.6666667e-01 6.5384615e-01 6.6216216e-01 6.1971831e-01 6.5333333e-01 6.5277778e-01 6.7123288e-01 6.4383562e-01 6.5000000e-01 6.3513514e-01 6.0000000e-01 6.7123288e-01 6.3513514e-01 7.4324324e-01 5.5714286e-01 7.0512821e-01 6.3888889e-01 6.0000000e-01 5.6338028e-01 6.3157895e-01 6.0810811e-01 6.2337662e-01 6.4000000e-01 6.5789474e-01 6.3157895e-01 5.6962025e-01 7.5294118e-01 7.1250000e-01 6.2162162e-01 6.7500000e-01 7.2727273e-01 6.2337662e-01 6.2337662e-01 6.7948718e-01 6.5853659e-01 6.6250000e-01 6.3380282e-01 7.3417722e-01 6.0869565e-01 7.2000000e-01 7.5949367e-01 6.4556962e-01 6.3013699e-01 5.9420290e-01 6.2857143e-01 7.1794872e-01 7.3972603e-01 6.4864865e-01 6.4864865e-01 6.8918919e-01 6.6666667e-01 7.0512821e-01 6.2500000e-01 6.2318841e-01 6.6197183e-01 6.5277778e-01 6.9135802e-01 6.6216216e-01 6.6666667e-01 6.4285714e-01 6.6216216e-01 6.8115942e-01 6.2500000e-01 6.2500000e-01 7.3684211e-01 6.4473684e-01 7.3417722e-01 7.1052632e-01 6.3888889e-01 7.3417722e-01 6.5432099e-01 6.9230769e-01 7.1428571e-01 6.7567568e-01 6.7532468e-01 6.7605634e-01 6.5789474e-01 5.4285714e-01 6.9736842e-01 6.2337662e-01 6.6233766e-01 6.7605634e-01 7.0270270e-01 6.1842105e-01 6.7567568e-01 6.2318841e-01 6.7605634e-01 6.9333333e-01 7.1428571e-01 6.0000000e-01 6.0000000e-01 6.6197183e-01 6.9230769e-01 6.8000000e-01 7.2000000e-01 6.5384615e-01 6.5753425e-01 6.6197183e-01 7.1232877e-01 6.9333333e-01 7.5000000e-01 7.1052632e-01 6.7567568e-01 6.4285714e-01 6.0273973e-01 5.8571429e-01 6.9512195e-01 6.3013699e-01 6.8918919e-01 7.0270270e-01 6.6666667e-01 6.8571429e-01 6.6666667e-01 6.1111111e-01 7.0666667e-01 6.6666667e-01 6.5333333e-01 6.8674699e-01 7.0731707e-01 6.3636364e-01 6.3750000e-01 6.1643836e-01 6.5432099e-01 5.8441558e-01 5.8666667e-01 4.7297297e-01 5.5263158e-01 6.9736842e-01 6.9333333e-01 6.5789474e-01 5.7575758e-01 6.7532468e-01 7.0886076e-01 6.4383562e-01 5.8666667e-01 6.6233766e-01 7.5000000e-01 6.2500000e-01 7.7027027e-01 6.0563380e-01 6.8000000e-01 5.6716418e-01 6.7948718e-01 6.4864865e-01 6.1971831e-01 7.1428571e-01 6.5753425e-01 6.7567568e-01 6.6197183e-01 7.7108434e-01 6.6216216e-01 7.1232877e-01 6.4000000e-01 7.0886076e-01 6.0563380e-01 6.2337662e-01 6.2666667e-01 7.7922078e-01 7.2972973e-01 7.5342466e-01 5.7971014e-01 7.3333333e-01 7.0886076e-01 6.6216216e-01 6.4102564e-01 5.8904110e-01 7.3076923e-01 6.4102564e-01 7.1250000e-01 6.4473684e-01 5.9154930e-01 5.3424658e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt b/voice_bridge/scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt new file mode 100644 index 0000000000000000000000000000000000000000..da698cf511e6983e1db1bbed6e2974f3480a6903 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-jensenshannon-ml-iris.txt @@ -0,0 +1 @@ +0.0211635609063 0.00455072769716 0.0230610904531 0.0076674324982 0.037571216894 0.029561354778 0.0115281186735 0.0225809070507 0.0346801442638 0.00321049176948 0.0232839774828 0.0321124517082 0.0244179197971 0.0331466156799 0.0373949302575 0.0411984503375 0.0218945865519 0.0198268474453 0.02395616278 0.0254898420418 0.0394901943037 0.0396613298853 0.0621120626322 0.0458316817045 0.0334832834948 0.0445794256135 0.00775602650151 0.00770188279153 0.0277044809709 0.0292851269343 0.0408206135002 0.0384837880405 0.0229145225178 0.0346801442638 0.017155900548 0.0186865079856 0.0346801442638 0.011778601551 0.0109570078484 0.0244803928224 0.0609237090857 0.0125274661674 0.0713079215249 0.05072181848 0.0329702193648 0.018118079192 0.0129441131729 0.00461235131171 0.0077204189151 0.190781073622 0.196235539354 0.202381067744 0.215567934651 0.208951210729 0.208395222197 0.202977772245 0.185995150024 0.197488445796 0.207754344906 0.207231776416 0.200895721783 0.202370257926 0.208839694602 0.184151316466 0.18979501874 0.211864590783 0.187854399445 0.231134943838 0.195836809821 0.220082308081 0.191451897661 0.227329222402 0.204586412278 0.192443163994 0.193684928543 0.206841791869 0.21612354251 0.209268704489 0.175345617131 0.197941218478 0.190625337098 0.191065854858 0.2317867516 0.214902945463 0.200203629275 0.200116177839 0.216845735492 0.194043462589 0.208271280018 0.210985756639 0.203659220099 0.197020978632 0.188475437149 0.205376756706 0.190989482965 0.1980172695 0.194499957306 0.176357663314 0.198492265793 0.255232551639 0.244270007892 0.240469208946 0.23837051036 0.248414598574 0.246817944636 0.244919131711 0.239985639856 0.249741998486 0.238825041235 0.224881988223 0.240479022193 0.236919216915 0.253134884385 0.257281336145 0.239143010746 0.231252341207 0.230464321684 0.26688757037 0.243702807017 0.239741931201 0.243722504108 0.250777226737 0.230838149152 0.233779458319 0.227365052742 0.226996712309 0.224529623997 0.248876761269 0.223504306637 0.241222705588 0.218359591571 0.251783757155 0.22178058433 0.237922879169 0.243259140467 0.243757719218 0.229784431814 0.223834512884 0.231417699032 0.245855002542 0.23260880661 0.244270007892 0.24412721812 0.245364556958 0.239069534798 0.242476824971 0.231950344971 0.238821966724 0.23119389821 0.0191037071088 0.0192689125135 0.0279599225292 0.0400056720024 0.0369973592153 0.0168401225107 0.0156647141811 0.0317569365649 0.0209196472407 0.0261095572505 0.0295911292198 0.0387034711929 0.047943015361 0.0502801086558 0.0505076233319 0.0276985510653 0.0178344200842 0.0354647529806 0.0132311775272 0.0439058315568 0.0587698655317 0.0548749800113 0.0420211683289 0.0158475875817 0.0410205075223 0.0159821398213 0.0151652303532 0.0238719428054 0.0190485214781 0.0351129659726 0.0545720530766 0.0439213935019 0.0317569365649 0.0241815879409 0.0259117209292 0.0317569365649 0.0173211662353 0.0138864344566 0.0338981856531 0.0429537854169 0.027658485714 0.0691172321094 0.0500059403041 0.0236011037029 0.0316644090037 0.0201837094834 0.0228863104939 0.0134861623597 0.176517909274 0.183281450302 0.188265462996 0.20119658342 0.194697326208 0.195209942657 0.190674274015 0.172425697578 0.182894065786 0.195529592383 0.192026007893 0.188346396823 0.186051479032 0.19538200246 0.171605310091 0.175766658214 0.199864266754 0.173408334646 0.215875435175 0.181358449055 0.208526316653 0.177419239992 0.212710197663 0.190468934035 0.178153770213 0.179556589614 0.19192211318 0.202468559318 0.196181327665 0.160572766746 0.183327527516 0.175701303909 0.177066663037 0.218355819513 0.203310616701 0.188843124957 0.186323930321 0.201194011774 0.18173319992 0.194510318604 0.197386404553 0.190483994401 0.182718294182 0.174255518011 0.19205782762 0.178239088983 0.18513760318 0.180597338326 0.163063102971 0.185284857996 0.243887274146 0.231624187925 0.22689829944 0.225289762668 0.235796354542 0.23269059518 0.23306609907 0.225602345189 0.235244584021 0.226892475392 0.212543999067 0.226908361887 0.223757309255 0.240296170501 0.245569279789 0.227339668618 0.2180947426 0.217889650333 0.252259964829 0.228879839341 0.227150610245 0.231836962279 0.236076260811 0.217244760533 0.221438124324 0.213671912157 0.213819376581 0.212040793299 0.235827974309 0.209053467969 0.226615210725 0.205271111175 0.238869689413 0.207939309627 0.223898822147 0.229226063436 0.232630777303 0.217026651272 0.211532427176 0.218347693285 0.233460132247 0.219891922362 0.231624187925 0.231652249658 0.233525349836 0.226377769419 0.228579062405 0.219098380821 0.227789794229 0.219023772057 0.0212386043587 0.00993825296406 0.0336976816686 0.0262325070397 0.0107568964379 0.0200399473123 0.0369530877134 0.00706121377315 0.0225180018442 0.0345620434634 0.0288649599558 0.0352792061476 0.0352526366714 0.0385518621347 0.0178346265809 0.0156087458393 0.0212893461987 0.0244762891905 0.0356848493955 0.0412118431681 0.0577499958146 0.0449060313177 0.0316682156452 0.0402876009485 0.00791017688182 0.00777689270989 0.0262859463939 0.0275691765497 0.0363841779992 0.0426229028778 0.0258430808451 0.0369530877134 0.0171494695134 0.0197038085728 0.0369530877134 0.00856221073911 0.010146504576 0.0213442662268 0.0572142712511 0.0116574543859 0.0671597340405 0.047388301579 0.0285557929122 0.0192111185981 0.0112749020632 0.00781127840012 0.00637566729419 0.187210918471 0.192623672543 0.198833727144 0.212008654042 0.205367416257 0.204958566039 0.199402528264 0.182409867249 0.193987091024 0.204155295526 0.20374075502 0.197257634781 0.198980554413 0.205364330922 0.180426094029 0.186163209648 0.208359044162 0.184479344493 0.227592539467 0.192334488284 0.216529219774 0.187806950452 0.223855994439 0.201226354807 0.188864300604 0.190066520684 0.203335651256 0.212548662023 0.205705049083 0.171742464799 0.194417377469 0.187136601809 0.187472552212 0.228344421279 0.211429948522 0.196622478767 0.196530972542 0.21335054643 0.190497911091 0.204702393397 0.207598118603 0.200155023454 0.193467129006 0.184880183147 0.201857382862 0.187517863683 0.194486736804 0.1909406597 0.172553592377 0.194927231832 0.251743344729 0.240762357546 0.236947962879 0.234942968898 0.244909063494 0.243388177909 0.24143957892 0.236617606758 0.24633077256 0.235257992461 0.221275459366 0.23695229759 0.23334624948 0.249598367646 0.253700089415 0.235542233543 0.227782636512 0.227013807453 0.263469639225 0.240293112196 0.236160757703 0.240179126335 0.247395319298 0.227257451928 0.23025606552 0.223937789122 0.223399862869 0.220967129276 0.245363860699 0.220116506797 0.237779687596 0.214888164759 0.248252329967 0.218347705592 0.234681014401 0.239690494574 0.240217402996 0.22632629355 0.220260809644 0.227813787503 0.242262375922 0.228918020841 0.240762357546 0.240592325032 0.241778783496 0.235418026861 0.23890352284 0.228365058816 0.235271978928 0.227703766766 0.0251699105319 0.0326109277538 0.029278801732 0.0117605763436 0.00495713045566 0.032971458625 0.022273270218 0.00845866705418 0.0338325850006 0.0418060793613 0.0560571569439 0.0475989809058 0.0526432986362 0.0287512950603 0.0156470261333 0.0286833299321 0.0125318521449 0.039628171731 0.0605879245284 0.051592881751 0.0247889558 0.0185457610439 0.0360813596602 0.0162897964903 0.0233731835598 0.00555113851114 0.00851608036789 0.039350861172 0.0494281040416 0.0428445793936 0.032971458625 0.0361084743768 0.0382083791018 0.032971458625 0.0135564720949 0.0133460812565 0.0359059029584 0.0525998249753 0.020448013216 0.0653401649712 0.0366222774812 0.0276779812669 0.0199994530533 0.0115734561992 0.0214333278479 0.0186461579961 0.171344593808 0.177166238774 0.182950771401 0.19643196185 0.18986301898 0.188443393542 0.18388535541 0.166562175459 0.177783941311 0.188923487579 0.187724110496 0.182156573883 0.18272910062 0.189078094755 0.165787292858 0.170722554665 0.192539814536 0.167433166664 0.21234151671 0.17610118578 0.201339136255 0.172496446246 0.207869320447 0.184234771078 0.173084677744 0.174586008872 0.187343267555 0.197067369338 0.190065322687 0.155992913972 0.178370192697 0.170834599332 0.171756811444 0.21215391799 0.195543956872 0.181382419503 0.180878423055 0.197648250997 0.174630568935 0.189049602981 0.190809942592 0.184007315234 0.177572802284 0.169158986096 0.1858235382 0.171054750208 0.178472858167 0.175024341592 0.15843173338 0.179125844986 0.236753289776 0.225336913605 0.221457751336 0.21877528886 0.229546376269 0.227338714151 0.226000082599 0.220106179064 0.230241030133 0.220420771483 0.206398010187 0.221506177578 0.218254053746 0.234550382603 0.23950257292 0.221077875617 0.211789394262 0.210958467875 0.247726054061 0.224160699687 0.221293898641 0.225242596712 0.231164838975 0.212084684776 0.21483758197 0.207582855086 0.208324546741 0.205668770285 0.22999948579 0.203519866038 0.221791807814 0.198729512666 0.233119883684 0.201955632148 0.21744774227 0.224658382044 0.22554531899 0.210282327898 0.205086194314 0.212901784527 0.227685054369 0.214992030153 0.225336913605 0.225426421062 0.227294020009 0.22122180599 0.223891772502 0.213328572484 0.220627825622 0.212027719306 0.036692370716 0.0274221847228 0.014614486903 0.0257905543192 0.038185072385 0.00854173398239 0.0228846598272 0.0362880440778 0.0247966202157 0.0338575762449 0.0341810302892 0.0403839419732 0.023192138006 0.0228332335778 0.0207802721216 0.030246984472 0.0385095591836 0.036116458124 0.063670015249 0.0456331337141 0.0387265099713 0.0454622939774 0.0128829753328 0.0153687384621 0.0290580106985 0.032571197138 0.0439484983687 0.0345946330706 0.0178789804494 0.038185072385 0.0229066226947 0.0243703411158 0.038185072385 0.0145921399018 0.0157758022112 0.0246899780508 0.0667942861058 0.00859371091119 0.0713780561444 0.0493343894065 0.0370078971529 0.0136450272229 0.0138896306008 0.00617904291603 0.0146027872105 0.19304474601 0.19797909852 0.204564547012 0.217893788171 0.211240589753 0.210085035577 0.204424468033 0.187966151726 0.199846283114 0.209201380502 0.209876418912 0.202509063074 0.205495616388 0.210676243708 0.185852822947 0.192014733821 0.213129345848 0.190059083655 0.233894763867 0.198141527364 0.221234996275 0.193689434624 0.229701750232 0.206631480423 0.194727732257 0.195938025333 0.209362001884 0.218147207364 0.211029564766 0.177869147322 0.200327033561 0.193124484677 0.193233901741 0.233604044389 0.215980508747 0.201289146742 0.20219168761 0.219726845187 0.195458087393 0.2103246028 0.212820091698 0.205396680761 0.199290845186 0.190742394929 0.207189399227 0.192521418757 0.199651552643 0.196594932336 0.178468769167 0.200295584792 0.256297768876 0.245841451519 0.242438509968 0.240033073248 0.249980802879 0.248953385462 0.246144223104 0.242178797272 0.252036489845 0.240176361939 0.226412773569 0.242454491215 0.238768549206 0.254841315698 0.258625650226 0.240494667361 0.232976534038 0.231927166971 0.269283834821 0.246140263748 0.241374087503 0.245035624054 0.253144665918 0.232868387014 0.235226383286 0.229279397026 0.228861516729 0.226064745168 0.250630665997 0.225726655668 0.243583203961 0.220042394187 0.253511231548 0.2237596548 0.239872842457 0.245497904196 0.24478307529 0.231328796826 0.225307213023 0.233256809435 0.247442229489 0.234444929147 0.245841451519 0.245661961251 0.246724980668 0.240848644418 0.244659004118 0.233675626419 0.239814102397 0.232532592403 0.010524058973 0.0347497758249 0.0310026365608 0.0636683508445 0.039628310023 0.0339732471818 0.0632599742009 0.0607622080336 0.0610552648732 0.0274939888458 0.0321794296029 0.0203038890155 0.0225566538528 0.0181521200419 0.0426282898435 0.00971744861179 0.0573638408809 0.0304112802741 0.0463628234513 0.044509344874 0.0125060810098 0.0372012029006 0.0399195029174 0.0351572922113 0.0381714857339 0.0236066809352 0.0675880085279 0.0489234884353 0.0636683508445 0.0451372647506 0.0495218266562 0.0636683508445 0.028718795999 0.0363358460459 0.0247409309981 0.0536494939568 0.0291783067693 0.0362475310016 0.0206193569389 0.0238060051702 0.0365127412376 0.0303247636874 0.0382706081182 0.036679138207 0.165252454208 0.168745392776 0.176697416201 0.190075579294 0.183193768644 0.182210296577 0.174895117306 0.159516185843 0.172780618769 0.179424300801 0.183362052205 0.17275366841 0.180884475296 0.18272692365 0.155645508044 0.163596126812 0.183814684533 0.163944686708 0.206939183794 0.170981324059 0.191238016882 0.165171506195 0.202635613449 0.180157192696 0.16688029242 0.167662828735 0.182473473654 0.189606814944 0.182144568907 0.150494922446 0.17311022882 0.166528964674 0.165043072843 0.205731384902 0.18659197245 0.17105591364 0.173799739615 0.193545885155 0.166211621507 0.181959703917 0.185636136233 0.177047042786 0.171588556623 0.162758165885 0.178831153511 0.164197394115 0.170920859204 0.168528635782 0.148460744764 0.171573948268 0.226522867074 0.216861787556 0.214094925757 0.211961277469 0.220987866614 0.22168305754 0.216782699249 0.215609692292 0.225208444508 0.210312198283 0.19661028383 0.214074476024 0.209773195915 0.225810880474 0.228551359916 0.210341321513 0.204691741777 0.203336697222 0.242485382573 0.21963599647 0.211892193047 0.215282949931 0.226705435666 0.20418234927 0.205942096193 0.201754356567 0.199749686219 0.196656401255 0.221913123953 0.199165134473 0.216656797893 0.191759529503 0.224569670715 0.196349964299 0.213992874847 0.217230566883 0.214548114667 0.2028246956 0.195690210734 0.20400781198 0.217750478797 0.204445613083 0.216861787556 0.216379424168 0.21668692278 0.211041069144 0.216249554038 0.204373397921 0.209459569208 0.203359671893 0.0284940982012 0.0283835072119 0.0582564069663 0.0317209110303 0.0290062929149 0.0575581968066 0.0518675712793 0.0525497818792 0.0214648817816 0.0289820919427 0.0151981567712 0.0194868108708 0.00772151270155 0.0396885548083 0.0133170749495 0.0480498930165 0.0406834920646 0.0454173876936 0.04374952779 0.0223043370554 0.0304932603619 0.0333669476098 0.0323541790178 0.036462611527 0.0283937271683 0.0581656862224 0.0386849486073 0.0582564069663 0.0386824403573 0.0427767064205 0.0582564069663 0.0225670168648 0.0303996112373 0.0186317385432 0.0589532282724 0.020129496567 0.0456219811369 0.0271705146906 0.0266114081755 0.0281840659315 0.0237276312035 0.0300017258072 0.0304084467621 0.174689674341 0.17830904459 0.186122916272 0.199541551099 0.192698821274 0.191439303409 0.184399484383 0.168993025372 0.182083678728 0.188980455315 0.192641208359 0.182376656778 0.189869605957 0.192034662906 0.165406381187 0.173160863593 0.193190156667 0.172952807322 0.216356837462 0.18028635421 0.200707489616 0.174765006311 0.211946253402 0.189197004074 0.176338357547 0.17720731475 0.191796191467 0.199121585044 0.191634817003 0.159923924784 0.182459504488 0.175769947329 0.174536678956 0.215011640541 0.195898920801 0.180555890973 0.183304141459 0.202828623711 0.175640593326 0.191448592698 0.194762075203 0.186406863824 0.181008054094 0.172240816201 0.188223844291 0.17346231425 0.180325575914 0.177965844821 0.158332658267 0.181046361448 0.235882031296 0.226280867303 0.2235366831 0.221221709251 0.230404936567 0.23094802366 0.226136073658 0.224736021115 0.234431734165 0.219824894986 0.20619515856 0.223526372128 0.219308937798 0.235287233784 0.23807753807 0.219908336162 0.214027462834 0.212631745032 0.251732773763 0.228838671706 0.221440978506 0.224753704594 0.235866097652 0.213724598783 0.215380278296 0.210994508636 0.209321687173 0.206162323946 0.231346876087 0.208300913946 0.225928825898 0.201080283138 0.234037288778 0.20558958438 0.222856381147 0.226751911512 0.223989137906 0.212135251626 0.205214537461 0.213595258296 0.2273163389 0.21417146415 0.226280867303 0.225846388312 0.226229092302 0.220705781832 0.225781259333 0.213928177909 0.218913775078 0.212729498479 0.0122225873329 0.0303322491715 0.0105162282326 0.0130161869455 0.0294681125528 0.031169289575 0.0445674903412 0.0424428041907 0.0471592401326 0.0242919299586 0.0151911030837 0.0250458377851 0.0159588466859 0.0393890228389 0.0501712969027 0.0573610210315 0.0347635318983 0.0244162409612 0.040396894165 0.00489128055225 0.0130158047868 0.0162739668734 0.0181749821427 0.0399100872217 0.0416234365466 0.032217222914 0.0303322491715 0.0260818708259 0.0277812891175 0.0303322491715 0.0068887146948 0.00350057866506 0.0298024545939 0.056647752597 0.0131029257559 0.0689399130874 0.0438694576111 0.0295733202179 0.0148293487362 0.005187559828 0.00994170320116 0.00857601759458 0.181761248694 0.187453410696 0.193365623784 0.206702523841 0.20011934013 0.199105672278 0.194191029325 0.176999836949 0.188302627791 0.199111466491 0.198126208998 0.192301121465 0.193121251528 0.199652739338 0.175765384319 0.180985589904 0.202947496477 0.178262931918 0.22241465847 0.18663850283 0.211489015658 0.182706386145 0.218266373994 0.195039865626 0.18346554188 0.184856636434 0.197755074291 0.207324505424 0.200404409787 0.166355990223 0.188830148216 0.181378679541 0.182125858248 0.222660838403 0.205968099039 0.191576935017 0.19121911341 0.207887116649 0.185081160877 0.199380829938 0.201554837448 0.194543615891 0.187991163757 0.179524780118 0.19631300534 0.181726234556 0.188970696545 0.185468734774 0.168191837054 0.18953883133 0.246767069406 0.235545780286 0.231684267956 0.229269594484 0.239724337717 0.237749571543 0.236212316891 0.230682790909 0.240645700931 0.230405224861 0.216420479968 0.231715542723 0.228337424754 0.244594612682 0.249193956399 0.230909382279 0.222229420235 0.221427946934 0.257965809127 0.234573740528 0.231288571496 0.235259005866 0.241607850531 0.22220070955 0.225060949621 0.218149643482 0.218415896436 0.21585883122 0.24017332069 0.214149237729 0.232167950695 0.20925074028 0.243199182069 0.21253980165 0.228281003551 0.23468620257 0.235446691383 0.22074773222 0.215228869367 0.222922571895 0.237553097068 0.224606387466 0.235545780286 0.235534393045 0.23712429287 0.230940484692 0.233917063813 0.223401305554 0.230524046304 0.222353817706 0.0346270873711 0.0222428303765 0.0130402644662 0.0348466257078 0.042829178942 0.0549568638218 0.0464424131108 0.050271964255 0.0260503884653 0.0122224116333 0.0282180922789 0.011864843965 0.0376053109772 0.060340947297 0.0487164281844 0.0285013587126 0.0163929694658 0.0334500762426 0.015996373074 0.0216948948211 0.00981357617646 0.00897169179454 0.0351677992971 0.0524329239391 0.0436506734112 0.0346270873711 0.0336931600411 0.03612022228 0.0346270873711 0.0123155461924 0.0128144174355 0.033561339537 0.0480641708164 0.0212422700005 0.0628734973132 0.0366806543871 0.0231396795589 0.0230741684915 0.0123098386274 0.0219470818919 0.0170687771133 0.170097980345 0.176050748737 0.181762587161 0.195138740624 0.188553715994 0.187634353283 0.182931238354 0.165421712546 0.176600578903 0.187912731345 0.186408453678 0.181027641681 0.181322068987 0.188146844457 0.16448627936 0.169388121003 0.191757508168 0.166524283083 0.21084441712 0.17494262847 0.200480456222 0.171127562134 0.206682857942 0.183402734812 0.171811556423 0.173255302855 0.186064001811 0.195873759085 0.189008503285 0.154586199139 0.177136590503 0.16961214015 0.170511848112 0.211247624618 0.194873193684 0.18053463387 0.179673493778 0.196193226513 0.173758955601 0.187869379999 0.19001738823 0.183074417621 0.176342969787 0.167870586049 0.184832516957 0.170273119593 0.177542554096 0.173865162824 0.156824879402 0.178069468543 0.235952767235 0.224377142544 0.220334312937 0.217941307072 0.228582134377 0.226295522057 0.225204079848 0.219149120214 0.229153457047 0.219423743811 0.20530168342 0.220370010221 0.217084867796 0.233463162138 0.238411309618 0.220004052761 0.210872881275 0.210171938201 0.24654768975 0.223020715486 0.220176358035 0.224293060856 0.230089603615 0.210845620347 0.213914309618 0.206670271948 0.207126565579 0.204661896322 0.228953899388 0.202543633745 0.220637247329 0.197849243585 0.232039444595 0.201016080923 0.216786754649 0.223327252818 0.224678101179 0.209454984614 0.204079198003 0.211687536154 0.226538202286 0.213579234366 0.224377142544 0.224425108032 0.226231350025 0.219899730556 0.222577135927 0.212192019374 0.21976273048 0.211209202307 0.0316629149872 0.033505533415 0.00599304401229 0.03147088677 0.0569379974042 0.0709823822809 0.0748230813085 0.0531369685873 0.043263968218 0.0546416139704 0.0249055105449 0.0688386171987 0.0687094353901 0.0814189949505 0.0424145094995 0.0312264925083 0.0671690041442 0.0297074253675 0.0326159908833 0.0331671559449 0.0312681130066 0.0653641254742 0.040141041955 0.0490986378456 0.0 0.0426893927085 0.0408370899258 0.0 0.0368318621027 0.029200211213 0.0578784327643 0.069522041979 0.0411420259607 0.0955605905857 0.0684916485028 0.0536985636371 0.0361283833003 0.0349236407736 0.0326544016244 0.0320790517512 0.190734279342 0.198011093099 0.202165335147 0.215193803756 0.208989236133 0.207943677959 0.205067684162 0.186855165408 0.196324830352 0.210203923195 0.205342231011 0.203427359944 0.198526873832 0.208498767129 0.187766259392 0.1906844311 0.213518747065 0.185598548292 0.229746939288 0.194805777978 0.222821572969 0.192481412892 0.225723793976 0.202375384198 0.192457483521 0.194300696695 0.205376522136 0.216660688755 0.21030774241 0.175307222553 0.197000980201 0.189066511629 0.191539232379 0.231058906578 0.21669613367 0.203523564636 0.200666530244 0.214595210264 0.195827171333 0.208641958794 0.209561099462 0.203960485003 0.196717491572 0.188766309975 0.205678014795 0.191492051166 0.198961164508 0.194672996958 0.180152450124 0.19944864987 0.257583228235 0.245178524187 0.240481186058 0.23786630337 0.249341975217 0.245210894146 0.246468005973 0.237456371003 0.247563146561 0.241300303041 0.22729752986 0.240555698528 0.237993038095 0.254193700204 0.26033321069 0.24220764546 0.231128442611 0.230799434891 0.264694157473 0.241194058993 0.241588310222 0.245985985156 0.248083056968 0.231516408669 0.235215479132 0.226198260689 0.228320459149 0.22623565919 0.249391170367 0.221152204365 0.239284874476 0.218351963925 0.252686589685 0.220530747351 0.234477098822 0.243363395844 0.246969168578 0.229990071021 0.225895881072 0.232959826282 0.248113114083 0.23570223574 0.245178524187 0.245562733947 0.248226439579 0.241702239505 0.242775171886 0.233515238499 0.242256602421 0.232453320509 0.0221776604768 0.0292076409984 0.0225953199419 0.0344078822964 0.0402556862186 0.0443490345462 0.0247422055695 0.0212582761966 0.0262237412261 0.0239779350196 0.0420497572884 0.0414593565817 0.0638943455501 0.0444256278823 0.0324613404347 0.0464717210574 0.0064847144584 0.00810593532016 0.026613709743 0.0281809240896 0.0431707040779 0.0364933096346 0.0236549105265 0.0316629149872 0.0188389154871 0.0197111271958 0.0316629149872 0.0126413659503 0.00989228745945 0.0276229976961 0.0619054176788 0.0136582341261 0.0735855911944 0.0517713161376 0.0346942423053 0.0171636471405 0.0128780143687 0.00330427938484 0.00787705209324 0.191124461218 0.196709270957 0.202716444922 0.215924187936 0.209335641351 0.208652638742 0.203456445375 0.186385899088 0.197740844714 0.208277146555 0.207460837635 0.201437849638 0.202451222155 0.209128071387 0.184787202976 0.190226937612 0.212288039878 0.187958804478 0.231460569504 0.196093364986 0.220613648389 0.191903262961 0.227580912528 0.204703238288 0.192798319471 0.194099784682 0.207098250381 0.21653588119 0.209689452484 0.175692486622 0.198220686642 0.190843207535 0.19145100341 0.232063526554 0.215320322312 0.200757345911 0.200512766736 0.217080327154 0.194471953875 0.2086592247 0.211165769014 0.203992872465 0.197349559187 0.188849659131 0.205722712353 0.191296031353 0.198394567468 0.194851113014 0.177048060287 0.198893335106 0.255751598489 0.244696550693 0.240850735029 0.238657915403 0.248846650839 0.247061872969 0.245367004129 0.240135191988 0.249950401792 0.239377278919 0.225435027987 0.240867925442 0.23738991955 0.253602763959 0.257909304325 0.239755581779 0.231578379023 0.230802663983 0.267123052444 0.243889033036 0.240266927764 0.244253437447 0.250943098487 0.231284290644 0.234232349856 0.227603903464 0.227483301065 0.225010355342 0.249291083828 0.223656027478 0.241455021635 0.21867583866 0.252237135554 0.222012069338 0.237944922778 0.2436864771 0.244348021388 0.230119265967 0.224342678419 0.231928535106 0.246422479994 0.233277995952 0.244696550693 0.244602643666 0.245964063641 0.239692017082 0.242916090829 0.232449793011 0.23942553099 0.231611684555 0.0349328180738 0.0397488289561 0.055917859907 0.0466657680267 0.0537525131124 0.0312322620487 0.0204661899168 0.0273925121636 0.0188335643607 0.0409848131484 0.0582072876054 0.056071962696 0.0229170555223 0.0261517042618 0.0397135231185 0.0177392332242 0.0260266328079 0.00803879586998 0.0153514357558 0.0448019039458 0.0436873202641 0.0393784019825 0.033505533415 0.038918396453 0.040710313775 0.033505533415 0.0158373818638 0.0159973219202 0.0374102115892 0.0608056403948 0.0178404535904 0.0681680501025 0.0368277650919 0.0342153028534 0.0136005609896 0.0114725513671 0.0202654226306 0.0215111320227 0.175502940168 0.180846739943 0.186993890383 0.200622200655 0.194047326964 0.191871659177 0.187230724864 0.170446969951 0.181958118895 0.192296146604 0.192168805538 0.185737701873 0.187567790504 0.1926934047 0.169562979332 0.174903941965 0.195627236518 0.171332536122 0.216941277953 0.180224282629 0.204375817171 0.176708588212 0.212007481629 0.187940100384 0.177271729963 0.178782860675 0.191678059195 0.200974345147 0.193702992688 0.160469688943 0.182595805427 0.175146841545 0.17584337797 0.215706304999 0.198413689274 0.184386485081 0.184856323095 0.20232915317 0.177913816132 0.192982627694 0.194328346433 0.187558388958 0.181714052911 0.173349805963 0.189464804214 0.174374123803 0.181953747446 0.178997842818 0.162709307879 0.18281187754 0.239644460978 0.228724428122 0.225246554022 0.222159074766 0.232927392687 0.231188577573 0.229024014707 0.22394884582 0.234228075319 0.223668927512 0.209861178703 0.225307285274 0.221985913786 0.238104571085 0.242774657148 0.224369518131 0.215280858443 0.214174032708 0.25182022511 0.228290074367 0.224826938646 0.228427350702 0.235193041024 0.216001866886 0.218122104545 0.211215702011 0.212099059638 0.209083204086 0.233569980649 0.207419375116 0.225880589953 0.202184861075 0.236687676647 0.205660738951 0.22091809464 0.228766193737 0.228457617824 0.2135856342 0.208453819381 0.216659972673 0.231192094033 0.218856194539 0.228724428122 0.228811270004 0.230578167797 0.224983578773 0.22794936407 0.216954366886 0.223518672063 0.215168993038 0.0278789967436 0.0521696459286 0.0689795584164 0.0720246274977 0.0510464689946 0.0421745394626 0.053582971262 0.0252834975098 0.0677158937278 0.0652657188093 0.081318384881 0.0463957610979 0.0317140261778 0.0668416555065 0.0279143378417 0.0293650428541 0.0349789935753 0.032697569141 0.0634994948437 0.0398508689254 0.0464805914619 0.00599304401229 0.0382168939676 0.0359888514026 0.00599304401229 0.0356302780251 0.0278103138868 0.0553135387293 0.0681148770417 0.0401494228711 0.0950529579724 0.0698373790182 0.0522664102967 0.0364245082526 0.034367526255 0.0307005764611 0.0296340954647 0.193320448722 0.200616479264 0.204805927895 0.217738543992 0.211505662739 0.210916031972 0.207785188245 0.189492055249 0.199011305592 0.212849559963 0.207930120156 0.205973985295 0.201080290256 0.211364107387 0.190111543422 0.193149888648 0.216396870129 0.188564248953 0.232124397575 0.197509520178 0.225563508571 0.194908404083 0.228408782968 0.205404773753 0.195016829794 0.196780548006 0.207983584103 0.219251719916 0.21299620972 0.177753842585 0.19963357851 0.19173429638 0.194103048534 0.233951642516 0.219652294406 0.206249008964 0.203257559758 0.217055047586 0.19863330849 0.211258733702 0.212590735452 0.206794766477 0.199327419787 0.191301214309 0.208462930841 0.194441169877 0.201765843334 0.197324504463 0.182225897174 0.202147597456 0.260363424846 0.247928312505 0.243139798114 0.240805098305 0.252082485905 0.248010938223 0.249316339351 0.240374954583 0.250342905364 0.243926059402 0.229857687769 0.243199557315 0.240557269117 0.256813113392 0.262812568913 0.244726906585 0.233983032632 0.233739246967 0.267372287242 0.243942332388 0.244149194945 0.248660562989 0.250891907974 0.234045856002 0.23797517084 0.229108542601 0.230855414047 0.228916694642 0.252083674667 0.224056856634 0.242001030284 0.221239323161 0.255322806495 0.223424493307 0.237685672547 0.245823282675 0.249646734812 0.232903558602 0.228555612396 0.235462898266 0.250614129287 0.237926457957 0.247928312505 0.248245352669 0.250761236826 0.244030479155 0.2452417146 0.236085335476 0.244925556651 0.235313123462 0.0296919493074 0.0549103775111 0.0590022859872 0.0450958795243 0.0436647014416 0.0451669494409 0.0401697758406 0.0620690815544 0.0404063812339 0.0856236153109 0.0591687282212 0.0490473517415 0.0683619493373 0.0275851844543 0.0264002504943 0.0446945676427 0.0461763497794 0.0638640733472 0.0232220049486 0.0233353107646 0.03147088677 0.029201781247 0.02614478673 0.03147088677 0.0350355717384 0.030428654072 0.0452764113597 0.0802967236314 0.0328354116031 0.0941098407344 0.0724718577687 0.0562014612922 0.0313029110017 0.0340889421853 0.0234757814211 0.028925036543 0.210407140423 0.216202187825 0.221880775368 0.234954371989 0.228487320903 0.227668142527 0.22288748901 0.205844703323 0.216768667446 0.227711464843 0.226263348504 0.220970493431 0.220784685064 0.228189601628 0.204608420295 0.209669658318 0.231537789973 0.206793805641 0.250139733355 0.215157813925 0.239903556316 0.211352210266 0.246303099122 0.223431243026 0.212079033799 0.213468334673 0.225996065584 0.235701634712 0.228967589943 0.195079844754 0.217288259849 0.209835756349 0.210818201018 0.25087781482 0.234509225035 0.220284749566 0.219817558162 0.235704379779 0.213924917847 0.22786813663 0.229992970272 0.223192642095 0.216540668547 0.208207378637 0.224921456698 0.210576921517 0.217728441709 0.214138769918 0.196963968339 0.21825274764 0.274633184392 0.263620373951 0.259724001387 0.257434131046 0.267733388911 0.265627371082 0.264299751314 0.258574475793 0.268388644672 0.258492710734 0.244710620006 0.259750243754 0.256426981572 0.272472244255 0.27686351493 0.258914849135 0.250487159092 0.249742739323 0.285410306304 0.262307563551 0.259345413586 0.26329314732 0.269271128247 0.250334873532 0.253300294048 0.246383898221 0.246645581278 0.244211963707 0.268141898451 0.242252606427 0.260005586654 0.237699783889 0.271104326635 0.240825446457 0.256147548483 0.26253800174 0.263413026859 0.249061635853 0.243583443282 0.251070491042 0.265469741093 0.25252673858 0.263620373951 0.263585528386 0.265041995624 0.258842865142 0.261802853748 0.251583944502 0.25855292944 0.250657339544 0.0450102465608 0.0439198537957 0.0420878714528 0.0489743500788 0.0458798154287 0.0561577170329 0.0578224624858 0.0224176386409 0.0849412968827 0.0784422731853 0.0630254855321 0.0686145922194 0.0402128784166 0.0342259139129 0.0607491308771 0.0616063147121 0.059057582132 0.0469260494382 0.0248556117621 0.0569379974042 0.0246670677105 0.0228986845775 0.0569379974042 0.0437638412118 0.0433305015411 0.0375189879884 0.0808012493057 0.0411311531308 0.089015155435 0.0787045494767 0.0568803303557 0.0468151856286 0.0457099341419 0.0358166687321 0.0382966404493 0.218181156993 0.223322073096 0.229705702275 0.242392790988 0.23577882008 0.236467641295 0.230139466345 0.213525276303 0.225126391191 0.234563959305 0.23447409507 0.227593305984 0.22968375989 0.236629811874 0.210530415308 0.216784944512 0.23930372472 0.216469414915 0.257297585894 0.22354090813 0.246798396633 0.218282233397 0.254457430072 0.233053108231 0.219729861221 0.220661425884 0.234112165808 0.242944509178 0.236406584132 0.202708452811 0.225406734885 0.218378296681 0.218342011887 0.259279663879 0.242430236301 0.227193609953 0.227261836804 0.24357213448 0.22167035443 0.235341335514 0.239265397892 0.231386700827 0.224381906232 0.215751708357 0.232947742221 0.219218926008 0.225693206986 0.221969156729 0.202171293512 0.225894654473 0.281426284779 0.270908280604 0.267090828143 0.26583132699 0.274949583689 0.273930107263 0.271660675229 0.267616788632 0.276815360824 0.264923167376 0.251110814116 0.267050687079 0.263175899022 0.279239724537 0.282486166675 0.264820551157 0.258570659963 0.257930961428 0.293412787858 0.270816835713 0.265787467516 0.269896000193 0.277962792777 0.257183140616 0.260538043514 0.255036127386 0.253324298915 0.251219664681 0.275354649609 0.251349757403 0.268235077166 0.245978035775 0.277988121804 0.249516964722 0.266387651101 0.269264891567 0.269706692396 0.257221982881 0.25041421232 0.257531332229 0.271532129624 0.257608408712 0.270908280604 0.270491687514 0.271003440471 0.264326332823 0.268486578685 0.258229704408 0.264793848318 0.258283260113 0.0140423845301 0.0233496953457 0.0357817640834 0.0194594148752 0.0561842963236 0.0207161808072 0.0344686045315 0.0532665757163 0.0656105748711 0.0611304155975 0.0376479555399 0.0422417749013 0.0418045042693 0.0514369987371 0.0551924216967 0.0370785412495 0.0618826263881 0.03552992519 0.0709823822809 0.0406476055096 0.0445130802003 0.0709823822809 0.0367370595656 0.0436433053629 0.0176515558758 0.0714383400943 0.0313405988749 0.050902359898 0.0456873150128 0.0406736463161 0.0404939048403 0.0386201712918 0.0390017962403 0.0411563720519 0.190383467974 0.193150151335 0.201647820189 0.214732592431 0.207848794505 0.207203203998 0.199017728171 0.184517441065 0.198117570692 0.203251095462 0.208611510757 0.196706601418 0.206520396425 0.207659337014 0.179547279859 0.188402996857 0.207901541524 0.18984126915 0.231436366396 0.196328732424 0.214604717337 0.189868309751 0.227504737805 0.2057488472 0.191940214233 0.192467873609 0.207646738607 0.21397919575 0.206530920079 0.175883310987 0.198349858418 0.192082967323 0.190004696209 0.230325216181 0.21052421867 0.194679583814 0.198537353627 0.218586319181 0.19070413186 0.206580728089 0.210890308648 0.201869808993 0.196665077614 0.187817228574 0.203604184545 0.189253631177 0.195640755776 0.193545341254 0.172360847921 0.196255157011 0.249220192749 0.240515221189 0.238164043403 0.236348677214 0.244540315717 0.246252369823 0.240132469075 0.240621741867 0.249888930273 0.233326376677 0.219986372028 0.238112203328 0.233455949013 0.249164983161 0.25077275089 0.233019708703 0.229068571188 0.227556818352 0.266813634827 0.244493984417 0.235169888951 0.238293435462 0.251527743788 0.228140449415 0.22959779378 0.226587756924 0.223543468869 0.220396683922 0.245620426642 0.22441324809 0.241383050303 0.216373265669 0.24802824928 0.221314439433 0.239346728325 0.241091845042 0.237050180919 0.227111645095 0.219282181885 0.227591404442 0.240665010845 0.227138628142 0.240515221189 0.239782829874 0.239319392462 0.23385508337 0.24006053231 0.227964392353 0.231962468731 0.227162089828 0.0243523838323 0.0383436294161 0.0281380877138 0.0591143980174 0.023507742642 0.0377968944731 0.0517794728194 0.0731209286425 0.0626942772583 0.0386453724111 0.0461236944312 0.0433886463071 0.057222198299 0.0591551673076 0.0319956615202 0.0701506857494 0.0422353525103 0.0748230813085 0.0387325777977 0.0428806382505 0.0748230813085 0.0409657262208 0.047385502674 0.0175421295668 0.0657295550805 0.0386908764688 0.0489968109439 0.0522686619491 0.0387311691946 0.0491554938498 0.0442532671202 0.0441014308007 0.0436449779054 0.189947462454 0.193087358051 0.201270443197 0.213957860003 0.207091706444 0.207812220084 0.199357780455 0.184445706942 0.197758177741 0.203406879958 0.207785745488 0.196588475912 0.205402521621 0.207935882191 0.179126107639 0.187763747972 0.208638518258 0.190249811145 0.22994679767 0.196055336563 0.215105618909 0.189122223025 0.226906503622 0.206245399814 0.191418910324 0.191795589288 0.206939834769 0.213506326546 0.206505332082 0.175157210084 0.197857598212 0.191658112797 0.189588115864 0.230508642351 0.211553886467 0.195374420383 0.198156887541 0.217288762795 0.191332101287 0.206198782834 0.211486527696 0.202202827305 0.19621322526 0.187298385594 0.203764973183 0.190101114406 0.196046751208 0.193325009164 0.171164264517 0.19631141813 0.249660253112 0.240527632421 0.23771162293 0.236702658605 0.244516745342 0.245937622906 0.240621381598 0.240549902948 0.249413960629 0.233305723303 0.219774348622 0.237625603888 0.232924324305 0.248768260048 0.250380844287 0.232789805762 0.22924781342 0.228117670835 0.26599273559 0.24389404773 0.234785533178 0.238373023947 0.251062908639 0.227440481837 0.229791016779 0.226764157548 0.223002288166 0.220422035353 0.245352612988 0.224391473735 0.240777024278 0.216738025139 0.247654798968 0.221447853711 0.240056634641 0.240035093698 0.237386138185 0.227558813593 0.219321439562 0.226978987712 0.240167124817 0.22597544824 0.240527632421 0.2396924683 0.239078218682 0.232902821621 0.239060496039 0.227569835839 0.232339810664 0.227659905228 0.0141262101806 0.0139426926457 0.0350538423884 0.0193552608508 0.0431621025158 0.0440916637552 0.050896637678 0.0388189182549 0.0267566278127 0.024110231253 0.0232035130335 0.0336464874245 0.0349533341335 0.0219308383764 0.0572621322316 0.0349099564127 0.0531369685873 0.0252590392656 0.0298245046475 0.0531369685873 0.0175869571119 0.0244711956848 0.00818635303924 0.0514811245358 0.0194723442375 0.0509148981601 0.038941584783 0.0196470806889 0.0301479553419 0.021545096414 0.0245644202822 0.0217515624084 0.179463055971 0.184039565703 0.191058390156 0.204133065196 0.197353411589 0.197439799103 0.1907070948 0.174350095292 0.18671934138 0.195225461177 0.19666314681 0.188285080293 0.192912863971 0.197716084636 0.17106321505 0.177948538218 0.199885832792 0.178001143095 0.220003811923 0.185034059424 0.207478284578 0.179492397587 0.21650741229 0.194560853934 0.181055185718 0.181934852801 0.196069170377 0.204292703623 0.197354097622 0.164139489747 0.187014857055 0.180129327357 0.179477085752 0.220692949894 0.202951258529 0.187497059139 0.188415486818 0.206311756974 0.182082560815 0.196599300923 0.200515380101 0.192263706104 0.185773424375 0.176983033355 0.193914579783 0.179773695055 0.186365398569 0.183085748173 0.163066216827 0.18672092116 0.242702110891 0.232274391832 0.228788679171 0.227187602483 0.236388804345 0.235969651611 0.232778101812 0.229721372697 0.23914299349 0.226115326683 0.212181006801 0.228757804803 0.224715842477 0.240932159296 0.244193462731 0.226111600887 0.219855639469 0.218968698082 0.256162056313 0.233278273993 0.227208648321 0.231128251362 0.240452766308 0.218806638317 0.221623839858 0.216518120679 0.214708469871 0.212229381986 0.23697508889 0.213254322456 0.230501952576 0.207030794754 0.239662752997 0.210997516317 0.22840539478 0.231383198724 0.230834439598 0.218316993527 0.211375460714 0.218990694003 0.233092437868 0.219339375552 0.232274391832 0.231855607362 0.232408829329 0.226038981221 0.230519762371 0.219567778072 0.225824086027 0.219230412501 0.0195919993919 0.0221059172753 0.0271326704392 0.0528905379198 0.0431586822848 0.0385447030867 0.0255010287112 0.0262337606525 0.0170321830128 0.0195302099147 0.0206521036595 0.0209833716129 0.0252645250533 0.0550811186683 0.0394746523074 0.043263968218 0.0280088992059 0.0316978756724 0.043263968218 0.00932281475944 0.0154549635915 0.0220221996956 0.04619442006 0.0176558634743 0.0549187053198 0.0347531723104 0.0154010655624 0.0249961358537 0.0130624709681 0.021086975526 0.0157582275316 0.172494143831 0.17784850126 0.184169405165 0.197413015598 0.190717679771 0.190383314088 0.184688114733 0.167622718241 0.179385018024 0.189454833136 0.189254205537 0.182497405034 0.184783379021 0.190756581821 0.165553322788 0.171383967321 0.193742345805 0.169993474917 0.213189630746 0.17771520654 0.201947261672 0.173029090008 0.209395719039 0.186777092552 0.174149245532 0.175318744446 0.188797436083 0.197902799323 0.191019058185 0.156992121883 0.179797341784 0.172560525004 0.172720884671 0.213854773981 0.196871318612 0.181938976962 0.181811193063 0.198969640765 0.17576606357 0.19002752539 0.193102069722 0.185493626505 0.178790364297 0.170132074883 0.187191891105 0.172843021691 0.179777298661 0.176222723299 0.157641466463 0.180195116525 0.237394679003 0.226280310947 0.222447934209 0.220486140395 0.230450693082 0.229006895923 0.227001818993 0.222289337207 0.232008422548 0.22075701885 0.20666547313 0.222449960987 0.218801277251 0.235156691974 0.239347508119 0.221054539901 0.213261323555 0.212504634094 0.249237472588 0.225979854618 0.221636903122 0.225713392695 0.233125083698 0.212685206963 0.215719410439 0.209455227207 0.208792490533 0.206361366722 0.230902205737 0.20571314342 0.223403407555 0.200311655198 0.233802587518 0.20384593219 0.22048704521 0.225216247618 0.225812871456 0.211806271505 0.205652934938 0.213231447371 0.227786843309 0.214377387143 0.226280310947 0.226104307704 0.22732908173 0.220904746241 0.224415757753 0.21378854989 0.220845147853 0.213178189936 0.0385029210536 0.0191331286834 0.0412210559221 0.0479132957057 0.0465498029846 0.0439952648454 0.0294155998226 0.0263022819525 0.0288814535105 0.0321813612841 0.0366154046916 0.0324760763417 0.0515944791638 0.0310049526439 0.0546416139704 0.0338597056505 0.0376153728239 0.0546416139704 0.019563854062 0.0269885058553 0.0157505744896 0.0625368339241 0.0143844790412 0.0526157254036 0.0338348827289 0.0297234721938 0.0233631827985 0.0204547357021 0.0243203298193 0.026443786847 0.18161798851 0.18540629351 0.193056292253 0.206460086953 0.199649422865 0.198358533748 0.191516052491 0.176005251549 0.188914967022 0.196119500233 0.199392563722 0.189524653623 0.196344377596 0.19896940691 0.172616127427 0.180167039066 0.200264745011 0.179650753185 0.223145799502 0.187130729579 0.207841346365 0.181781654149 0.218763807382 0.195938920487 0.183272877978 0.184194077304 0.198599402775 0.206127033545 0.198682577368 0.166796826532 0.189310437328 0.182538488189 0.181515358431 0.221921151125 0.202966250725 0.187732725258 0.190303161279 0.209524247949 0.182729260129 0.198439370358 0.201593038612 0.193392410161 0.187922687561 0.179193994752 0.195211914017 0.180441261226 0.187358375156 0.184927566912 0.165517695891 0.188083050798 0.242939707589 0.233299699182 0.23049760933 0.228144979761 0.237419943564 0.237777043946 0.233178399857 0.23147925362 0.241203272786 0.226929567318 0.213318762235 0.230491320835 0.22634238089 0.242302330141 0.245163744774 0.227035678455 0.220985219764 0.21961554026 0.258484704802 0.235572463293 0.228516087928 0.231839786454 0.242586485114 0.220732170317 0.222439032331 0.217865646283 0.216378419732 0.213247698263 0.238336887242 0.215052759841 0.232719204269 0.208046041848 0.241045913549 0.212450914632 0.229495311164 0.23369764439 0.231101958719 0.219113353825 0.212320151032 0.220660185426 0.234399345217 0.221293359798 0.233299699182 0.23289508713 0.233336932661 0.227805970609 0.232744111879 0.220997609023 0.226042688917 0.219773770977 0.0487507947542 0.0650775470184 0.057802762893 0.0304021403296 0.00977564963899 0.0440829229362 0.0178656109143 0.0223657443481 0.0145412613726 0.00880579384939 0.0433599813023 0.0513293545711 0.0472054655688 0.0249055105449 0.0346274013058 0.0356855998153 0.0249055105449 0.0197624673252 0.0147740519949 0.0419438715965 0.0491548979957 0.0286642180871 0.0731736765881 0.0474587573553 0.0307413987988 0.0281173399333 0.0193986394444 0.0247050709585 0.0190246608336 0.173083486457 0.179910481276 0.184747521108 0.19795391577 0.191518465927 0.190900207622 0.18706914222 0.168885152715 0.179192090216 0.192134059909 0.188561922702 0.185169221179 0.182516578227 0.191341288573 0.168884190059 0.172666616329 0.195883736852 0.16891577804 0.213018522015 0.177613800133 0.204946600226 0.174427531668 0.209108112252 0.18591519295 0.174796071764 0.17642769541 0.188436163643 0.199161967465 0.19263437332 0.157387090389 0.179769711315 0.171986018804 0.173714059835 0.214316704261 0.199152104404 0.185242989169 0.182953007753 0.198045578814 0.177854485907 0.191103831493 0.192902410163 0.18652468984 0.179244739552 0.170950373519 0.188227253613 0.173938849831 0.181251003885 0.177034455953 0.160977434421 0.181661357468 0.240293898195 0.228034626835 0.223480275138 0.221169492968 0.232239647508 0.228872448909 0.229260803782 0.221433214134 0.231450162781 0.223643381382 0.209415344527 0.223528489107 0.220601809879 0.237045012584 0.242726403737 0.224366911478 0.21417342735 0.213777217999 0.24873924373 0.225124576112 0.224039797111 0.228474279829 0.232185938187 0.214138987375 0.217816173262 0.209554716543 0.210717178803 0.208592377005 0.232371287988 0.204873194809 0.222962175481 0.201230387345 0.23556954796 0.203837920113 0.218977104053 0.226289614933 0.229286064361 0.212971252918 0.208146125074 0.215343203422 0.230520772996 0.217614435398 0.228034626835 0.228243837858 0.230522424537 0.223850190554 0.225634530741 0.215935214332 0.224460188266 0.215143400901 0.0529022439667 0.0328752197893 0.0555054115849 0.0509289255649 0.0173017354019 0.040824127775 0.0418711547682 0.0429127577978 0.0455543968368 0.0222834470095 0.0700549868865 0.0480571802481 0.0688386171987 0.0442386440219 0.048830469586 0.0688386171987 0.0328274300443 0.0405886029945 0.0207406620607 0.0561033512724 0.0322786585229 0.0341942563123 0.0291137581017 0.0267606033099 0.0411628160219 0.0351933008888 0.0409598634365 0.0395653722748 0.170381496838 0.173513369262 0.181763628401 0.194913312213 0.188005094963 0.187597093173 0.17963993246 0.164619720579 0.178109956535 0.183964962678 0.188586214824 0.177259498897 0.186421186364 0.18797705999 0.159970598511 0.168451386355 0.188698976448 0.169860506279 0.211615177079 0.176329007245 0.195682012245 0.169937526308 0.20770892717 0.185927394166 0.171947667368 0.172526710735 0.187654893417 0.194331077481 0.186965121806 0.155693053512 0.178340485408 0.171986662107 0.170058435784 0.210831670623 0.191498228781 0.175622787806 0.178712133706 0.198589692402 0.171227708945 0.186830515333 0.191211132234 0.182219297429 0.176703062328 0.167807776518 0.183929379926 0.16962686739 0.17602523241 0.173642929989 0.152568053554 0.176555503125 0.230732284086 0.221405569072 0.21873883425 0.217008773205 0.22548132942 0.226701619289 0.221306518291 0.22096326048 0.23027897199 0.21448417882 0.200865540743 0.218691208961 0.214158679542 0.230111756153 0.232289297369 0.214283536851 0.209653108975 0.208315017711 0.24732302577 0.224778673487 0.216104443077 0.219500448511 0.231884032027 0.208661452368 0.210499772687 0.206990362059 0.20415679031 0.201176497026 0.226427503413 0.204626996142 0.221691815993 0.19686908479 0.228928610762 0.20163871009 0.219806895143 0.22162291443 0.218587303851 0.207804967644 0.200129766869 0.208295752101 0.221764477978 0.208127749312 0.221405569072 0.220756657587 0.220625985458 0.214861357139 0.220620889076 0.208723632287 0.213494112425 0.208081504586 0.0844496882978 0.080315980567 0.0726849367116 0.0675030089859 0.0473153931529 0.0443066374773 0.0646752376508 0.0680246382237 0.0621370759065 0.0478849585017 0.0214355468841 0.0687094353901 0.0384979333887 0.0389190825118 0.0687094353901 0.0480894783324 0.0504544201599 0.0366660205236 0.0908834253131 0.0410486802894 0.0843383154277 0.0744888741312 0.0620377042787 0.0466247584157 0.0490662247741 0.0410809318858 0.0466044522535 0.220241217585 0.22387872958 0.231567441105 0.244549509651 0.237801597966 0.237271702597 0.229957304486 0.214803305719 0.2275731622 0.234289966045 0.237659204867 0.227701989162 0.234344570768 0.237727323575 0.210730852647 0.218580436475 0.238765139748 0.218842181584 0.260585713889 0.225852980679 0.24576980875 0.220083510611 0.25691340974 0.234991633089 0.221816502308 0.222561867727 0.236939418607 0.244255467135 0.237073335368 0.205416544089 0.227870869354 0.221241762596 0.220094090971 0.260317043933 0.241447551617 0.226019474356 0.228752673734 0.247333244315 0.221529692284 0.236780633698 0.24056209021 0.232170242771 0.226474325634 0.217767662464 0.233885787279 0.219595234935 0.226160651319 0.223600212873 0.203287025744 0.22672247966 0.280139937217 0.271046573965 0.268296536378 0.26646171863 0.275059767728 0.275799812567 0.270911764394 0.269815722172 0.27914751079 0.264301608642 0.250949666543 0.26825814336 0.263926860434 0.279625941989 0.2816892989 0.264092216423 0.259287884013 0.257990452056 0.295945646215 0.273550874886 0.265902495939 0.269211685027 0.28055869892 0.258431707188 0.260338655828 0.256388822593 0.254096852299 0.25120005211 0.275959599762 0.25362897924 0.27069009133 0.246633770083 0.278453866291 0.251048870754 0.268244717954 0.271056483898 0.268212813274 0.257487053623 0.250194192056 0.258191856723 0.27145388403 0.257986657626 0.271046573965 0.2704506544 0.270310564507 0.264635332559 0.270115829289 0.258629632749 0.263204545545 0.257890609764 0.0600044713459 0.054351041098 0.0187381084361 0.0598557442646 0.0614469307754 0.0531453686192 0.0525559397425 0.0279315026787 0.094761292833 0.0773971393828 0.0814189949505 0.0655620569523 0.0700255275487 0.0814189949505 0.0520201647503 0.0579117136152 0.0488956649362 0.0419109171187 0.0567137285928 0.0208614229037 0.0306610868801 0.0327090237565 0.063272034339 0.0543984191577 0.0634480421324 0.0583685669148 0.139330779418 0.142986837848 0.150850053523 0.163810831232 0.156868051014 0.157669598862 0.149679456655 0.133871337528 0.147188409072 0.153968295801 0.157417010427 0.14689076955 0.155379198936 0.157713790383 0.129254210759 0.137253284329 0.159227064857 0.139693977567 0.180206197156 0.145463442392 0.166351059616 0.138682374547 0.176816693259 0.155870230339 0.140840856643 0.141328264666 0.156536274226 0.163539791349 0.156545980147 0.124345584519 0.147307265103 0.141019689746 0.139034333305 0.180682036747 0.1624502749 0.146314826392 0.147811298228 0.167221256708 0.141400345074 0.156004354912 0.161263479124 0.152006654871 0.145684418167 0.136675034398 0.153561404536 0.139874017863 0.145889334783 0.142806737922 0.121109447078 0.146069682444 0.201853472765 0.191504661606 0.188213817168 0.18712354961 0.1956147304 0.196211110416 0.19205227926 0.19059963575 0.199675257584 0.184878502918 0.170804202645 0.188147863911 0.18370042204 0.200019806255 0.20290821197 0.184684396544 0.179556147674 0.178663646288 0.216648538359 0.194032671841 0.185998827234 0.190015746161 0.201299319844 0.177907547001 0.180731098286 0.176760615884 0.17358596254 0.171156072474 0.196260390732 0.174185108777 0.190914661822 0.166819613367 0.198798087344 0.171315193398 0.190200635461 0.190703604042 0.189694426068 0.178004531696 0.170196606131 0.177783458136 0.19179082566 0.177644952 0.191504661606 0.190881968726 0.191068709685 0.184514101166 0.189756786146 0.178407147108 0.184631515769 0.178548671216 0.0324892056253 0.0484887435869 0.0393579503362 0.0472627188893 0.0192847169709 0.0232472318507 0.0583393494412 0.0585791036352 0.0611786752117 0.0424145094995 0.0604358052165 0.0620651155443 0.0424145094995 0.037432002363 0.036885144463 0.057932237922 0.0663347563067 0.0402054510934 0.0745837939532 0.0379481013657 0.0474009418109 0.0343119409392 0.0339409730754 0.0428079642087 0.0427340086613 0.16139435599 0.167055199688 0.172679145019 0.186588672878 0.18018023169 0.176484226078 0.173177363378 0.15635152751 0.167324305619 0.178559723881 0.177731707828 0.172313382954 0.172981201837 0.177656055027 0.157022451257 0.161367937512 0.181005231952 0.155624650937 0.203264920982 0.165557822324 0.190457367134 0.163325089652 0.197312648791 0.172046353812 0.163261037864 0.165144860989 0.177247951111 0.186978615056 0.179504291425 0.146841839918 0.168174392768 0.160512412099 0.161906209121 0.200546381048 0.18360708151 0.170619997941 0.170849280459 0.188248015891 0.163499893332 0.178863271671 0.178580924982 0.172741578769 0.16748001244 0.159472149167 0.174802823761 0.159227769495 0.167358477359 0.164720514491 0.151099028747 0.168548224145 0.225620984689 0.214383995774 0.210968404368 0.206923044041 0.218629792329 0.216162853062 0.214556733684 0.208383921822 0.219163632298 0.210075568948 0.196412311391 0.211088115641 0.208225582965 0.224211015109 0.229824216763 0.21125046128 0.200361910494 0.199102744226 0.237092122358 0.213257676298 0.21126821314 0.214648561609 0.219950980033 0.202248505777 0.203853239286 0.195906673599 0.198467217831 0.195092454721 0.219319110187 0.191906900165 0.211043854216 0.187166023617 0.222703227226 0.190377176965 0.20432458647 0.215124925944 0.214931156714 0.198560742198 0.194613360236 0.203185148475 0.217932610212 0.206623397839 0.214383995774 0.214797954846 0.217376030313 0.212326968407 0.214327312553 0.203281369316 0.21006441216 0.200554878155 0.0431984510448 0.0262542814372 0.0292765976694 0.0197548242369 0.011556553192 0.0420248516295 0.0609848282508 0.0559281666693 0.0312264925083 0.0398958448421 0.0413044920489 0.0312264925083 0.0264369738429 0.0230439739916 0.0462734733613 0.0414691510708 0.0363662025945 0.071550533242 0.0478849716213 0.0291862789915 0.0369373377392 0.0270714127462 0.0334218260536 0.0262390277171 0.165544038214 0.172891912863 0.17727312869 0.190354892388 0.183958646909 0.183879480011 0.180336742003 0.161635841791 0.171548979047 0.185413091762 0.180663093483 0.178298498301 0.174219977581 0.184181187608 0.161990271375 0.165191063063 0.189309277337 0.161423020955 0.205054842396 0.170020243784 0.198514733988 0.166940816324 0.201451973118 0.178556962169 0.167238778058 0.168913749918 0.180647428918 0.191850429195 0.185573186785 0.149626920851 0.172104358108 0.164216849771 0.166272040954 0.207167511585 0.192763981398 0.178906141081 0.175593499508 0.189955336349 0.171151190119 0.183759195847 0.185737635433 0.179470325207 0.171692649892 0.163416602952 0.18110112813 0.167100843514 0.174311352225 0.169649991905 0.153736698586 0.174561747041 0.233945473753 0.221154951746 0.216216020369 0.214168470951 0.225368727897 0.22143399553 0.222723035108 0.213934293134 0.223870685298 0.217024022208 0.202620827758 0.216260896263 0.213476245827 0.230060267172 0.236168299839 0.217779153489 0.207119858677 0.206977473585 0.241093686538 0.217416199365 0.217140656671 0.221883614074 0.224541883596 0.206836488872 0.211062122527 0.202310625247 0.203579796415 0.201763166599 0.225319290313 0.197343747333 0.215321920645 0.194216980733 0.228558415828 0.196533366942 0.211711953148 0.218804815465 0.22300715182 0.206091661798 0.201387938181 0.208241563837 0.223688965104 0.210605474313 0.221154951746 0.221416959944 0.223922313478 0.216868041009 0.218202757079 0.208934179216 0.218220121204 0.208484033719 0.0428988991781 0.0449811252912 0.038369173012 0.0392012066073 0.0184411809504 0.0770158611592 0.0591685049971 0.0671690041442 0.0499388239897 0.0544215157144 0.0671690041442 0.0346140690794 0.0413101969584 0.0321994698404 0.0440967531043 0.0383047062906 0.0298070621094 0.0211708137004 0.0201129458309 0.0453565917011 0.0368886973605 0.0457257341 0.0417220412167 0.154888546678 0.158813196403 0.166441835812 0.179672088583 0.172784191158 0.17260621351 0.165322327284 0.149403283235 0.162445964052 0.169819706795 0.172790429714 0.162919256631 0.17010598628 0.17291506116 0.145591206314 0.153172124666 0.174534430185 0.15398983773 0.196191934512 0.160693736573 0.182015889553 0.154710886954 0.192325902157 0.170351903358 0.156482289316 0.157229639215 0.171983255038 0.179460677481 0.172268582678 0.139849276668 0.162711676043 0.156097286205 0.154728919391 0.195984213978 0.177559464338 0.161865850494 0.163584898864 0.182768842876 0.156718657607 0.171791912863 0.175973376944 0.167287390852 0.161244255475 0.152349968293 0.168972557751 0.154707872737 0.161224201041 0.158337859598 0.137882544324 0.161653779265 0.217498297387 0.207268525118 0.204089016051 0.202387117484 0.211405750921 0.211656794766 0.207586435206 0.20562940887 0.215067539532 0.200874708618 0.186922162094 0.204055283767 0.199813802793 0.216054855672 0.219141272905 0.20087129472 0.194993585948 0.193928687775 0.232251037143 0.209370889696 0.202145172225 0.205917531589 0.216546254935 0.194031863501 0.196450064794 0.191939021682 0.189735506736 0.187016932169 0.212133736422 0.189132050882 0.20640648386 0.182100826622 0.214796842045 0.186466235268 0.20445164291 0.206912827034 0.205504201388 0.193322781156 0.186101115818 0.194015369065 0.208047565025 0.194373832921 0.207268525118 0.20679029341 0.207231461284 0.201073842632 0.205977385627 0.194517602543 0.200437951551 0.194023674059 0.00847168263945 0.0208969725179 0.0218078109788 0.0404437599311 0.0404194345362 0.0297098891496 0.0297074253675 0.0216763215811 0.0230614305273 0.0297074253675 0.00860159575866 0.00340785264521 0.0287758992091 0.0571909561512 0.0141236699669 0.0709946391549 0.0479055513747 0.0306293087394 0.0170492128659 0.00888265836541 0.00729150357409 0.00480661141663 0.185289458233 0.191085055337 0.196910034804 0.210145347321 0.203571296435 0.202882056675 0.197912476038 0.180625340213 0.191835734795 0.202787038295 0.201526915714 0.195916323017 0.19637797116 0.203353873438 0.179306027922 0.184472082084 0.206754233077 0.181958697279 0.225649748274 0.19019500565 0.21522963857 0.186167360837 0.221735787446 0.198765098442 0.186973637291 0.18833349586 0.201196357535 0.21085114286 0.204041973491 0.169797982896 0.192333522046 0.184882922721 0.185665053896 0.226331622925 0.209833971543 0.195365654989 0.194769866304 0.211153682947 0.188870228152 0.202929999244 0.20531727443 0.198262875463 0.19151438955 0.183035158868 0.19999216658 0.185562442931 0.19270971507 0.189054601453 0.171539131405 0.193197031986 0.250465285504 0.239173402368 0.23520366736 0.232978158323 0.24334022521 0.241285239552 0.239942028126 0.234265108703 0.244131225098 0.234017030201 0.220001112132 0.235226825533 0.231841004595 0.248118070359 0.252690451382 0.234464488853 0.225903274535 0.225189555796 0.261354129729 0.238022768459 0.234821661628 0.238891339667 0.245085625014 0.225666229038 0.228735290931 0.22181584168 0.221922833386 0.219499858076 0.243732621444 0.217749348385 0.235627115362 0.212972717548 0.246730042299 0.216195919385 0.232029501648 0.238052252824 0.239120375862 0.224484292562 0.218871170879 0.226408868705 0.241049615623 0.227943328268 0.239173402368 0.239135618661 0.240673256099 0.234329166611 0.237299131316 0.226939754937 0.234208016817 0.226098822397 0.028402190164 0.0277439229346 0.038946334352 0.0434213038922 0.0291676419928 0.0326159908833 0.0134911389606 0.0149220924049 0.0326159908833 0.0134816006656 0.0104081517294 0.0265932070523 0.0554420028459 0.0189257472352 0.0720390188254 0.0531694571518 0.0303305078503 0.0242591305562 0.0161087615284 0.011068875208 0.00482735633945 0.188743108882 0.194722342773 0.200407653053 0.213430694909 0.206860229667 0.206917746824 0.201757002065 0.18426566592 0.195342886433 0.206526515083 0.204777293747 0.199508643938 0.199425600774 0.207211939812 0.182702196026 0.187804249003 0.210815779569 0.185880854144 0.228533589019 0.193748347964 0.21913688541 0.189440254064 0.225129338462 0.202750726696 0.19038113591 0.191653574921 0.204515978518 0.214295374936 0.207718295645 0.173066791358 0.195766348142 0.188344856347 0.189125824359 0.230149564934 0.214039936134 0.199354571186 0.198257058594 0.214135463178 0.192868986061 0.206423136152 0.209356843732 0.20214026995 0.194966833942 0.186438002469 0.20377859508 0.189699928978 0.196612341236 0.192629244536 0.174503529883 0.196914127595 0.254336052592 0.242870282486 0.238667846113 0.23688365824 0.247015930548 0.244839615824 0.243870689439 0.237954961473 0.247598002179 0.237655720218 0.223546452001 0.238671394435 0.23524462783 0.251589019272 0.256100611398 0.237973407633 0.229710932979 0.229189966161 0.264623863258 0.241418386796 0.238285878732 0.24258651618 0.248557401899 0.228985563027 0.232518055884 0.22563381547 0.2253177885 0.223189662674 0.247286475663 0.221462369267 0.239019854286 0.21687609732 0.250217711381 0.219989961903 0.23614243802 0.241179806044 0.242913875985 0.228427928816 0.222559090866 0.229759973658 0.244438457877 0.230953863516 0.242870282486 0.24276526561 0.244179265793 0.237465205634 0.24045622955 0.230407994296 0.238017253402 0.230045288889 0.0082487413397 0.0434871339043 0.050183840328 0.0462946366306 0.0331671559449 0.0413860283481 0.0432754400997 0.0331671559449 0.018728526127 0.0181135727037 0.0407852007406 0.0552412031248 0.0241124765906 0.0672915663182 0.036051090003 0.0318240646756 0.0214815502931 0.0159437959725 0.0254901889174 0.0237539942631 0.169264556488 0.175091115152 0.180818741393 0.194404360875 0.187867354301 0.185963216048 0.181699481724 0.164435072081 0.175589139186 0.186821392192 0.185627969522 0.180155862933 0.18064011609 0.186712516664 0.164002900607 0.168780956251 0.190185205445 0.164923288811 0.210475012613 0.173888729012 0.199146471591 0.170598213889 0.205685349652 0.18167215365 0.171034804287 0.172628140609 0.185232711805 0.19500403819 0.187902476327 0.154050585176 0.176234935944 0.168650273349 0.169706859713 0.209769138775 0.193110247226 0.179199013537 0.178806517751 0.195681872133 0.172338947581 0.186953230165 0.188260629529 0.181677967227 0.175468629894 0.167135725316 0.183547713996 0.168593151982 0.176177479228 0.172880341359 0.156935998004 0.176943539051 0.234533528357 0.223126396017 0.219327759561 0.216348929156 0.227347260416 0.225044163552 0.22369577874 0.21767535265 0.227960059154 0.218355967949 0.204394726122 0.219392188471 0.216234012192 0.232478694207 0.237601395838 0.219130755018 0.209451421522 0.208536786671 0.245551542374 0.221905162641 0.219288471778 0.22312655653 0.228847315313 0.210092965448 0.212618708514 0.205172062073 0.20633435877 0.203530506813 0.227844403478 0.201098042061 0.219576265526 0.196351165226 0.231028528894 0.199557870197 0.214705957114 0.222733230705 0.223437599857 0.207886214359 0.202974139337 0.210949037245 0.225748458168 0.213347187038 0.223126396017 0.223291835541 0.22533034675 0.219465519892 0.221962474599 0.211307137623 0.21852957733 0.209700938041 0.0419192621384 0.0545713872547 0.0501857282151 0.0312681130066 0.0402402021553 0.0420008929211 0.0312681130066 0.0205720172252 0.0184919891428 0.0424913715031 0.0489285308529 0.0286939286865 0.0683097144827 0.0399734095489 0.0293256914093 0.0276858239832 0.0194684720042 0.0280123772406 0.0234918354003 0.166859723597 0.173331371512 0.178503598003 0.191909917223 0.185423378094 0.184241312774 0.180294315558 0.162400014304 0.173055799755 0.185412361634 0.18273757472 0.178554599617 0.177148633502 0.184812731656 0.162323255063 0.166439616176 0.188986532191 0.162572794611 0.207449709654 0.171422042516 0.198077549025 0.168235674576 0.203119984389 0.179508471538 0.16860424661 0.170241216483 0.182496759111 0.192879913246 0.186103766816 0.151333877107 0.173669495408 0.165942821486 0.167423536544 0.20787297933 0.192134128169 0.178241744028 0.176630454655 0.192503529595 0.170982423968 0.184797056786 0.186354645979 0.179907483835 0.173050611212 0.164731478826 0.181684378357 0.167089130276 0.174542724285 0.170681059367 0.154769008136 0.175102602238 0.233527638993 0.221524217045 0.217249882254 0.214634557666 0.225748190551 0.222754126699 0.222507410575 0.215313006261 0.225475910208 0.217028556203 0.202867049651 0.217307455338 0.214308520933 0.230711314018 0.236268919515 0.217806003587 0.207670960165 0.207072717123 0.242941912447 0.219250459655 0.217623775648 0.221838573376 0.226272530404 0.207953270239 0.211176611808 0.20315950921 0.204399143496 0.201993427277 0.226020220858 0.198708591851 0.217018221505 0.194631582156 0.229236199557 0.197470332905 0.212630809879 0.22033552246 0.22249437564 0.206324343096 0.201514537553 0.209042945777 0.224134795171 0.211456208554 0.221524217045 0.221736417649 0.223988271389 0.217633390228 0.219633609699 0.209536083539 0.217632171671 0.20838956304 0.0775399671204 0.0564468400456 0.0653641254742 0.0395849667657 0.0441943006701 0.0653641254742 0.03373111328 0.0393588608039 0.0260318537498 0.035188052496 0.0394930245033 0.0375459624937 0.0389958606199 0.0129896263362 0.048815943545 0.0379517413039 0.0436570210079 0.0373605585088 0.163502150984 0.168337389523 0.175155703097 0.187828039408 0.181028588214 0.182647655516 0.175454197959 0.158708602469 0.170954310136 0.179762949884 0.180496225683 0.172501221065 0.176828601599 0.182533215894 0.154901122018 0.161684635697 0.185118234991 0.163277975005 0.203127118484 0.169348081929 0.192443927773 0.163103682661 0.200509349698 0.179853786658 0.164993784705 0.165655282459 0.179962811264 0.188228038198 0.18173609183 0.147930089008 0.171080786567 0.164375611634 0.163483874158 0.205438086531 0.18853460221 0.172643498492 0.17244334411 0.189729944562 0.167174105351 0.180637454384 0.185804185303 0.177096116234 0.169809585441 0.16090449309 0.178551065139 0.165213876 0.171240797871 0.16732581913 0.146067131815 0.17119760345 0.227732558213 0.216775565291 0.212818856201 0.212113111804 0.220864451794 0.220275362854 0.217805693503 0.214378859193 0.223348154292 0.21056133718 0.196361313338 0.212749259626 0.208612136858 0.225027947273 0.228363013794 0.210350767964 0.204552905568 0.204069917831 0.240058060786 0.217392662406 0.211260278871 0.215701291359 0.224730833936 0.202526361799 0.206282759299 0.201292044309 0.198558189974 0.196671901675 0.221200953228 0.197951565037 0.214539723027 0.191917757906 0.223779418483 0.195733699704 0.214045901526 0.214810423689 0.215727246675 0.203293848315 0.195827563807 0.20277537878 0.217057373264 0.202583470922 0.216775565291 0.216238379997 0.216660429864 0.209514540125 0.213990052328 0.203581443905 0.210749726957 0.20422943306 0.0299803388219 0.040141041955 0.0494042382248 0.0475047073328 0.040141041955 0.0460070623429 0.0427394298505 0.0575631845334 0.0966299998606 0.0399614977189 0.101837465377 0.0750240618009 0.0697188576907 0.0332240827502 0.042809511279 0.0352142478257 0.0438103448171 0.215592317841 0.220449330264 0.226776190885 0.240218246846 0.233773884944 0.231038348034 0.2263684672 0.210465175429 0.221881119192 0.231308080404 0.231995396207 0.22508238649 0.227350588366 0.232024663305 0.209334152567 0.215020396004 0.234337406165 0.211132081266 0.256379513335 0.220156177626 0.242726282533 0.216799299323 0.251315628789 0.227298889535 0.217343977664 0.218830240402 0.231491464759 0.240364579333 0.233021642066 0.20102218505 0.222551706707 0.215242680103 0.215887525017 0.254517907032 0.236803444285 0.223122921939 0.224639449766 0.242029857582 0.217221787473 0.232566133597 0.233478733428 0.226945320336 0.221662735537 0.213507733767 0.228892766884 0.213808892567 0.221389781107 0.218899946715 0.202924474402 0.222409930936 0.277044937084 0.267058523642 0.264053120566 0.260683081125 0.271161003688 0.269928478944 0.266921380471 0.262757571495 0.272983826376 0.261803183737 0.248535164351 0.264118904646 0.260747941814 0.276361888176 0.280348586054 0.262426412526 0.254046125774 0.252677731845 0.290308221249 0.267228122274 0.26327783075 0.266417210913 0.273921992864 0.255063180539 0.256547903477 0.250174111671 0.251094888905 0.247813627121 0.2719924919 0.246585755223 0.264876118769 0.241182097008 0.275004799417 0.244784301617 0.259319666846 0.267703173929 0.266005774781 0.252196034878 0.247119094967 0.255516011217 0.269407340011 0.257485027934 0.267058523642 0.267089999571 0.268509786469 0.263477813268 0.266862086523 0.25569939087 0.261126117571 0.253527997613 0.0490986378456 0.0295882981502 0.0295703235206 0.0490986378456 0.0321772405698 0.0329624694956 0.0320791928311 0.0819581869701 0.0243777845713 0.0815880274595 0.0630031242432 0.0521832400773 0.0270041961787 0.0316626367995 0.0225970983534 0.0306135777155 0.209138400452 0.213486068388 0.2205444529 0.233810061497 0.227135531522 0.225853564299 0.21966357841 0.203852341272 0.216088869802 0.224306040557 0.226217923772 0.217746213648 0.22224285579 0.226486749722 0.201086071725 0.207931744175 0.228270944293 0.206490835508 0.249940445551 0.214362660634 0.235990805565 0.209564571075 0.245741550277 0.222850058752 0.210799114332 0.211874421056 0.225608964554 0.23378761328 0.226547651045 0.194192088088 0.216536885951 0.209533041634 0.209201443093 0.249253656365 0.230961033564 0.21611849601 0.218017866257 0.236081990375 0.21084689547 0.226091432124 0.228781524913 0.221104911259 0.215367366824 0.206800674523 0.222911320613 0.208230596605 0.21525709201 0.2125575014 0.193883560936 0.215963986139 0.270701972881 0.26093677072 0.257916700448 0.255512820108 0.265026258573 0.264740784654 0.260931231555 0.258198696768 0.267948592812 0.254868213876 0.241356571396 0.257920982238 0.254009770883 0.269860597717 0.272941762013 0.255029971614 0.248480879151 0.247232962401 0.285073136127 0.262202525959 0.256323297606 0.259707446043 0.269155247223 0.248322830219 0.250262594392 0.245086205405 0.244160084795 0.241180503071 0.265837705385 0.241871800867 0.259549733542 0.235634358004 0.268589453275 0.239657575868 0.255932774798 0.261005427367 0.2590787222 0.246704864465 0.240323606031 0.248446059436 0.262200385961 0.249216212931 0.26093677072 0.260623330072 0.261243034875 0.255650557301 0.260116706756 0.248814449864 0.254094104786 0.247584466928 0.0426893927085 0.0408370899258 0.0 0.0368318621027 0.029200211213 0.0578784327643 0.069522041979 0.0411420259607 0.0955605905857 0.0684916485028 0.0536985636371 0.0361283833003 0.0349236407736 0.0326544016244 0.0320790517512 0.190734279342 0.198011093099 0.202165335147 0.215193803756 0.208989236133 0.207943677959 0.205067684162 0.186855165408 0.196324830352 0.210203923195 0.205342231011 0.203427359944 0.198526873832 0.208498767129 0.187766259392 0.1906844311 0.213518747065 0.185598548292 0.229746939288 0.194805777978 0.222821572969 0.192481412892 0.225723793976 0.202375384198 0.192457483521 0.194300696695 0.205376522136 0.216660688755 0.21030774241 0.175307222553 0.197000980201 0.189066511629 0.191539232379 0.231058906578 0.21669613367 0.203523564636 0.200666530244 0.214595210264 0.195827171333 0.208641958794 0.209561099462 0.203960485003 0.196717491572 0.188766309975 0.205678014795 0.191492051166 0.198961164508 0.194672996958 0.180152450124 0.19944864987 0.257583228235 0.245178524187 0.240481186058 0.23786630337 0.249341975217 0.245210894146 0.246468005973 0.237456371003 0.247563146561 0.241300303041 0.22729752986 0.240555698528 0.237993038095 0.254193700204 0.26033321069 0.24220764546 0.231128442611 0.230799434891 0.264694157473 0.241194058993 0.241588310222 0.245985985156 0.248083056968 0.231516408669 0.235215479132 0.226198260689 0.228320459149 0.22623565919 0.249391170367 0.221152204365 0.239284874476 0.218351963925 0.252686589685 0.220530747351 0.234477098822 0.243363395844 0.246969168578 0.229990071021 0.225895881072 0.232959826282 0.248113114083 0.23570223574 0.245178524187 0.245562733947 0.248226439579 0.241702239505 0.242775171886 0.233515238499 0.242256602421 0.232453320509 0.00485178735022 0.0426893927085 0.0245520805084 0.0237321483943 0.0251494297711 0.057422445578 0.0278324002426 0.0740340192529 0.0616449531864 0.0345266403523 0.0352178925007 0.0280808122885 0.0215364121522 0.0177797202497 0.195939824645 0.201850357058 0.207610278022 0.22033675889 0.213750209485 0.214735564684 0.209038553623 0.191585516698 0.202707820911 0.213610139259 0.211883129405 0.206450810174 0.206559465565 0.214810867748 0.189369070647 0.194741318593 0.218356798216 0.193913039155 0.235021949805 0.201158850164 0.226304640977 0.196279111135 0.23226696484 0.210758787397 0.197506357802 0.198589557713 0.211650666116 0.221261000242 0.214919567324 0.180148757389 0.203013849771 0.195747568982 0.196259855385 0.237643665501 0.221708411792 0.206639148011 0.205357745264 0.220920753341 0.200419916968 0.213509698507 0.217293606067 0.209702604063 0.202159105945 0.193545439121 0.211227865592 0.19759997516 0.20414742205 0.199914048013 0.180698578828 0.204233409175 0.261355583351 0.249944219156 0.245612323364 0.244419283687 0.254044690674 0.252080319195 0.251117898529 0.245501499007 0.254799073865 0.244461494615 0.230320341653 0.245583982907 0.241986600586 0.258340089378 0.262471019952 0.244539880639 0.237114058797 0.236754636273 0.271527597514 0.248603215251 0.244975611097 0.24946627049 0.255826675762 0.23571262824 0.239663820665 0.233206292475 0.232061847402 0.230234276049 0.254229520181 0.229085095534 0.246138012907 0.224434087127 0.257016267918 0.227571722833 0.244299258458 0.247702557023 0.249766808341 0.235948695396 0.229554249737 0.236400217176 0.250952182681 0.236961759284 0.249944219156 0.249686907558 0.250743450736 0.243657848264 0.246990711027 0.237179418187 0.244877764453 0.237415960787 0.0408370899258 0.0273499265107 0.025297636378 0.0296282714156 0.0603715739764 0.030303105355 0.0787085816014 0.0655434565706 0.0387042108075 0.0365503327535 0.0303622028421 0.0226096284705 0.0196557817863 0.19925839426 0.205348957591 0.210921560248 0.223617852816 0.217069506306 0.218059817218 0.212575335145 0.194999027921 0.205926088655 0.217171129009 0.215009852018 0.21001079042 0.209452929226 0.218136520475 0.193003747556 0.198137341753 0.221866239829 0.197043564846 0.238162085485 0.204392911714 0.229893095054 0.199683809546 0.235440202166 0.213909407049 0.200827966888 0.201961005661 0.214826820158 0.224639586072 0.218358620882 0.183440825957 0.206248208954 0.198918059014 0.19962810203 0.240932955881 0.225232933618 0.210279009964 0.208738673233 0.223984027141 0.203935797463 0.216876482587 0.220527121607 0.21308659534 0.205456328664 0.196890655988 0.214607347176 0.201007750183 0.207588221909 0.203264328774 0.184313736826 0.207665740899 0.26489357675 0.253366014308 0.248941435732 0.247733075772 0.257464720601 0.255265649267 0.254603866324 0.248608337617 0.25792057227 0.248007634376 0.233863042555 0.24891723403 0.245404277418 0.261753780229 0.266030956859 0.248122124098 0.240457509587 0.240148573616 0.27462290691 0.251686093628 0.248460344614 0.252996524531 0.258897040073 0.239091218679 0.243138472416 0.236454818573 0.235503469176 0.233728803563 0.257606144443 0.232210377675 0.249277823428 0.227793973728 0.260419107201 0.230810446264 0.247340816609 0.251008018719 0.253372216044 0.239328849004 0.233077723245 0.239855175697 0.254460096493 0.240510971982 0.253366014308 0.253147704183 0.254305992093 0.247178452498 0.250316773817 0.240645130337 0.248503359474 0.240885126472 0.0368318621027 0.029200211213 0.0578784327643 0.069522041979 0.0411420259607 0.0955605905857 0.0684916485028 0.0536985636371 0.0361283833003 0.0349236407736 0.0326544016244 0.0320790517512 0.190734279342 0.198011093099 0.202165335147 0.215193803756 0.208989236133 0.207943677959 0.205067684162 0.186855165408 0.196324830352 0.210203923195 0.205342231011 0.203427359944 0.198526873832 0.208498767129 0.187766259392 0.1906844311 0.213518747065 0.185598548292 0.229746939288 0.194805777978 0.222821572969 0.192481412892 0.225723793976 0.202375384198 0.192457483521 0.194300696695 0.205376522136 0.216660688755 0.21030774241 0.175307222553 0.197000980201 0.189066511629 0.191539232379 0.231058906578 0.21669613367 0.203523564636 0.200666530244 0.214595210264 0.195827171333 0.208641958794 0.209561099462 0.203960485003 0.196717491572 0.188766309975 0.205678014795 0.191492051166 0.198961164508 0.194672996958 0.180152450124 0.19944864987 0.257583228235 0.245178524187 0.240481186058 0.23786630337 0.249341975217 0.245210894146 0.246468005973 0.237456371003 0.247563146561 0.241300303041 0.22729752986 0.240555698528 0.237993038095 0.254193700204 0.26033321069 0.24220764546 0.231128442611 0.230799434891 0.264694157473 0.241194058993 0.241588310222 0.245985985156 0.248083056968 0.231516408669 0.235215479132 0.226198260689 0.228320459149 0.22623565919 0.249391170367 0.221152204365 0.239284874476 0.218351963925 0.252686589685 0.220530747351 0.234477098822 0.243363395844 0.246969168578 0.229990071021 0.225895881072 0.232959826282 0.248113114083 0.23570223574 0.245178524187 0.245562733947 0.248226439579 0.241702239505 0.242775171886 0.233515238499 0.242256602421 0.232453320509 0.00792313459309 0.0236297776546 0.0536541076705 0.0108412074571 0.0628560371629 0.0400426717019 0.0242916508179 0.0172043768705 0.0050674059167 0.0120480352419 0.00909567999576 0.179733982337 0.185130641595 0.191359647755 0.204666509458 0.198016972069 0.197255576955 0.19185518721 0.174855164332 0.186481505005 0.196685913747 0.196375987191 0.189833405887 0.19172374648 0.197739787295 0.173088361774 0.178764981726 0.200732295255 0.17675687262 0.220477540317 0.184804954654 0.209049846749 0.180446207623 0.21646451476 0.193507677512 0.181414841832 0.18267689502 0.195938395299 0.20515640915 0.198204629073 0.164323672477 0.186951627799 0.179636188812 0.180004434454 0.220785829324 0.203765417111 0.189073881786 0.189075562692 0.206144383335 0.182845346787 0.197260508087 0.199879524686 0.192526504243 0.186000212457 0.177429453542 0.194272636643 0.179755047797 0.186852872988 0.183420017951 0.165408476828 0.187374971211 0.244379342624 0.233352874625 0.229596132577 0.227371143644 0.2375226222 0.23596534417 0.233967797192 0.229101525181 0.238946455726 0.22794079194 0.213947298319 0.229612039594 0.226054866131 0.242325328031 0.24658946328 0.228320886035 0.220245475488 0.219410638455 0.256223296024 0.232925784221 0.228888220536 0.232841814871 0.240004491346 0.219971839687 0.222802209556 0.216357175156 0.216093165101 0.213536808192 0.238014479196 0.212556057937 0.230415334062 0.207278762186 0.240959781593 0.210762423837 0.226970181506 0.23251461018 0.23290234378 0.218739342276 0.212847200349 0.220551656017 0.235068599786 0.221910374672 0.233352874625 0.233237981586 0.234568034713 0.228343429058 0.231717374059 0.221050886534 0.227947933137 0.220165226053 0.0299322879907 0.05484354778 0.015508827535 0.0698377856312 0.0461226756997 0.0288439268589 0.0179749812614 0.00820193608475 0.0104514607363 0.00601471721254 0.182232775094 0.188144985482 0.193867682363 0.20711590793 0.200550766912 0.199860135361 0.195016228938 0.177611309591 0.188739284934 0.199919451707 0.198412537874 0.193032154463 0.193186123937 0.200329430243 0.176446801251 0.181459762374 0.203862717343 0.178811701467 0.222599641879 0.187102653143 0.212418919228 0.183165138771 0.218668582765 0.195650642874 0.183922319896 0.18531418569 0.198099986213 0.207874048966 0.201087073424 0.166709980365 0.189246782627 0.18175655241 0.182635613511 0.223327464443 0.206968516624 0.19255440527 0.191763016548 0.208041469681 0.185943960269 0.199929552497 0.20225187193 0.195263692383 0.188456709089 0.17999008547 0.196992188976 0.182563902533 0.189736470349 0.186019084029 0.168664903455 0.190216873808 0.247702840069 0.236282170601 0.232244216334 0.230002139075 0.240457584895 0.238254033101 0.237105182737 0.231183514354 0.241076068443 0.231215767875 0.217161214732 0.232270602033 0.228935795245 0.245246208475 0.249963377759 0.231700745278 0.222930449716 0.222251471523 0.258324331925 0.234942120984 0.231973156153 0.236088516952 0.242009296663 0.222723564262 0.225860019318 0.218781249758 0.219012595092 0.216618271437 0.240821050718 0.214649402949 0.232567858569 0.209986385677 0.243846113816 0.213146433073 0.228924161819 0.235098611969 0.236391467186 0.22153411273 0.216011138881 0.223520339681 0.238239770023 0.225155295827 0.236282170601 0.236275025927 0.237908750738 0.231525069379 0.234355153325 0.224056763193 0.231485450395 0.223215654925 0.0572284762647 0.0227453897084 0.0528238726337 0.0444991634471 0.0266973769114 0.0336948312224 0.0272062333147 0.0274688740262 0.0263328670099 0.185716682176 0.189934662812 0.1972487923 0.210230033124 0.203429572326 0.203652674494 0.196491149306 0.180504651022 0.193104816748 0.200892549998 0.203064104164 0.193989394378 0.199601579765 0.203900303764 0.17670118478 0.184039790634 0.205684310622 0.1846555251 0.226113704537 0.191415178407 0.212986837934 0.185538541574 0.222740004307 0.201086740863 0.187280443615 0.188038576609 0.202414184433 0.210238732317 0.203278048935 0.170511460418 0.193354539773 0.186626010428 0.185640639557 0.226777734968 0.208697723096 0.193060716385 0.194490643156 0.212667601723 0.188009393157 0.2026405567 0.206872630148 0.198376957217 0.192017886181 0.18319542151 0.200010008529 0.185964041385 0.192410951726 0.189280207199 0.168712331151 0.192752182621 0.248024768226 0.23796255467 0.234657432476 0.23319215601 0.242043627351 0.242092609424 0.238343194449 0.236048752979 0.245336704526 0.231521041315 0.21770196943 0.234612870361 0.230403022013 0.246515101463 0.249324779094 0.231381623939 0.225835327084 0.224876205525 0.262239370649 0.239554652873 0.232726560614 0.236544998844 0.246724791051 0.22460575232 0.227286958934 0.222708966341 0.220419587826 0.21790450046 0.242699459353 0.219658545965 0.236700307439 0.213086341068 0.245286981729 0.217239309033 0.234909245332 0.237182720463 0.236050820735 0.224251890124 0.21698485009 0.224622112179 0.238475066919 0.224611061526 0.23796255467 0.237436165661 0.237675287343 0.231370754771 0.236291823604 0.225200181658 0.231029643716 0.224953381888 0.0635710838178 0.0592624543316 0.0596058092469 0.0328544820604 0.0698260168465 0.0573261822931 0.0634460229613 0.0541080958794 0.147303482686 0.155161448288 0.159136079545 0.171063643845 0.16463221595 0.168584971583 0.163624194761 0.144235266953 0.153827968777 0.168063795028 0.16179307599 0.160206770568 0.155398151635 0.167887347752 0.142965424509 0.146100735749 0.17375554158 0.146517107633 0.184102282814 0.152518687773 0.182006446517 0.147493939017 0.182975421708 0.163616999091 0.148716120222 0.149770660992 0.161958247648 0.173135111896 0.168022513735 0.130738265008 0.153920373487 0.146537313722 0.147925357539 0.190546691654 0.177990062346 0.162980599048 0.157241520972 0.169863812022 0.155438284157 0.165389918434 0.170707644688 0.163197102596 0.153422018191 0.144858486622 0.164315178145 0.152440808389 0.158128021971 0.151920639439 0.132537826475 0.157359920068 0.217237278816 0.203578319465 0.19760287759 0.197919551969 0.207681519923 0.203644803794 0.206313583309 0.1971606419 0.205828867849 0.199064489236 0.184178187026 0.197538822775 0.194404518771 0.211337912042 0.217143803133 0.199166671372 0.190313841849 0.191119829867 0.222105237734 0.199176357415 0.198307304 0.204224647581 0.206706114847 0.187412194782 0.19389709622 0.185837912363 0.184448440446 0.184112718669 0.207064291919 0.180760878547 0.196874753064 0.177987251583 0.20993106085 0.180009687643 0.197504007472 0.198552660709 0.205899002329 0.189953313758 0.183706289376 0.188863124638 0.204480568311 0.189510910124 0.203578319465 0.203449868624 0.205307715981 0.196398052452 0.198053210544 0.190139115088 0.201200460168 0.192215464603 0.0646648846724 0.0408252785942 0.0324283536149 0.0110096321254 0.0094227049932 0.0109851717283 0.0161323040352 0.185995265043 0.190666336997 0.197500031875 0.21094278484 0.204249573992 0.202798103731 0.196994111724 0.180742040387 0.192881751338 0.201790619427 0.203110749465 0.195155158145 0.19908437034 0.203454592708 0.178505994536 0.184932353957 0.205646229373 0.183039730424 0.227253690432 0.191142107437 0.213742423108 0.186621634482 0.222820224457 0.199555206103 0.187692696694 0.188884746385 0.202513734055 0.21103182776 0.203761324581 0.170936996707 0.193374027451 0.18623653615 0.18612791719 0.226436319176 0.208431967216 0.193707164322 0.195058461584 0.2131353075 0.187987297837 0.203202157658 0.20562939159 0.19810682266 0.192261721083 0.183685994256 0.199939039445 0.185109577754 0.192293556305 0.189460942551 0.171326016098 0.193018202765 0.248893705524 0.238594153414 0.235368547023 0.232809197602 0.242745772985 0.241996085058 0.238752248121 0.235253956997 0.245180784501 0.232837883436 0.219103668122 0.235388183401 0.231636120508 0.247694764978 0.251385734184 0.233178069062 0.225757404345 0.224585782777 0.262534762143 0.239353867417 0.234157942476 0.23769286224 0.246342255252 0.225805082888 0.227891511242 0.222153883963 0.221706146684 0.218742321816 0.243483255972 0.218775046696 0.236736505477 0.212768083313 0.246367071263 0.216653745648 0.232866084107 0.238586937872 0.237342638049 0.224021718097 0.217958679267 0.226108360277 0.24024082191 0.227355271774 0.238594153414 0.238405022146 0.239431466759 0.233734880392 0.237715653864 0.226470518154 0.232346541994 0.225139690924 0.0393671458325 0.0464638919444 0.0721210062546 0.0652058473437 0.0727980489441 0.069475743339 0.146377403101 0.147841270546 0.157364583964 0.169967544666 0.162961576643 0.163688135507 0.153695971045 0.140223917866 0.154815274304 0.157460458795 0.165356253736 0.150768971529 0.165320051187 0.163744020488 0.133177827162 0.143578449734 0.163063976092 0.148488500757 0.186880468442 0.153029883361 0.168852310214 0.144817839236 0.183493098196 0.16367577587 0.147767272784 0.147687188567 0.164075138054 0.168705452589 0.161392920433 0.132534765042 0.154766041608 0.149414499828 0.145576557217 0.186084208163 0.165901662347 0.149128920765 0.153742006284 0.175201329509 0.146090630422 0.161672202019 0.168031911823 0.15768748388 0.152605178523 0.14358929792 0.159222856143 0.14586039418 0.151244472229 0.149339889326 0.125727972716 0.151529925609 0.203450036676 0.194986217471 0.192843589773 0.192004248179 0.198946853705 0.202034655336 0.194735870429 0.197390062155 0.205967080811 0.187074482741 0.173679535364 0.192730113791 0.187498820793 0.203203985607 0.204052975425 0.186370806955 0.184428712838 0.182999565721 0.222535792393 0.200873648 0.188899014225 0.192203777807 0.208005111614 0.182384645276 0.184037705225 0.182740791406 0.177549631149 0.174643342307 0.20005255491 0.181451046073 0.197344936488 0.172060390986 0.202165775813 0.177622921879 0.197550181919 0.195373313831 0.190875275066 0.18253037262 0.173366080571 0.181382900577 0.194116360457 0.17989694523 0.194986217471 0.19390780748 0.192735187036 0.186889353803 0.194252690611 0.181881177771 0.185754237755 0.182012475847 0.0362542669235 0.0433946419111 0.0395938166279 0.0499100911085 0.0488639489089 0.15152574063 0.154669324023 0.162831337388 0.176665753207 0.169806006203 0.167122046596 0.160378494232 0.145396880889 0.158882941041 0.165181343889 0.170013544811 0.158841213295 0.168049497633 0.168033584387 0.142281111905 0.150191536414 0.168801703096 0.149137142763 0.194382185315 0.156984666205 0.176687245859 0.151908690894 0.188914920448 0.165149458367 0.153257832095 0.154263633953 0.168972071115 0.175883337588 0.16793063653 0.137286345822 0.159386690667 0.152763325671 0.151315442368 0.191108253021 0.171290308569 0.156330262179 0.159994057675 0.180780718652 0.151317533557 0.168120728135 0.170508979962 0.162341150711 0.157837504705 0.149168793021 0.164327797527 0.148926027403 0.156192253462 0.154516403486 0.136169004568 0.157264478091 0.212122717934 0.202635989676 0.200296956824 0.197168424809 0.206818081536 0.207593997357 0.202129844535 0.201194661862 0.211287068634 0.196367072918 0.182828097116 0.200324517328 0.196191886272 0.212106652571 0.215234691898 0.196754062665 0.19012565359 0.188386972878 0.22899541165 0.20586463613 0.198258080181 0.201204812212 0.21275581912 0.190751178898 0.191559171703 0.187109767024 0.186199376174 0.182482910799 0.207969239625 0.184720534783 0.20291190811 0.176982703379 0.210810911009 0.181756418439 0.198660937707 0.204191111601 0.200384835859 0.187985877445 0.181554202617 0.190580156178 0.204342491841 0.191957847677 0.202635989676 0.20235052312 0.203098119616 0.198224746232 0.20315819974 0.190687755597 0.195275148422 0.188602851388 0.0403137962831 0.0283224608052 0.0352856337592 0.0279159520938 0.163417587591 0.169024399252 0.175174902733 0.188097949492 0.181377843525 0.182339093803 0.176249889638 0.158817342465 0.170474967966 0.180845899763 0.180001842158 0.173609983768 0.175496658011 0.182387588854 0.15630806646 0.162058645803 0.185720747493 0.161911269413 0.203372519665 0.168871283524 0.193698104443 0.163602773962 0.200347540332 0.178768462478 0.164992621499 0.165987648079 0.179610648238 0.188805808549 0.182291142535 0.147635227715 0.17074779736 0.163623388142 0.163626611748 0.205461058801 0.189137857415 0.173807850394 0.172761367844 0.189359771077 0.167597338583 0.181003226921 0.18512092453 0.177130623568 0.169726367846 0.160952549622 0.178668367454 0.164953979441 0.171439694143 0.167334380068 0.147661742228 0.171525073941 0.229212799489 0.217687295638 0.213458314816 0.212251579761 0.221837277994 0.220248120289 0.218836902814 0.21380890761 0.22315649106 0.21209021333 0.197773787684 0.213427195478 0.209691510886 0.226221325989 0.230427027381 0.212200169545 0.204828617154 0.204406576244 0.240127225685 0.217034360511 0.212651201199 0.2171572787 0.224326708779 0.203421180278 0.207247009432 0.201076078606 0.199634752126 0.197699005741 0.222082174457 0.197241869749 0.214400679015 0.192020712279 0.224886788727 0.195424460906 0.212817607264 0.215705919361 0.217495203752 0.20360497892 0.196993475509 0.204019508797 0.218720046506 0.204677118987 0.217687295638 0.217405004007 0.218489214457 0.2114130463 0.214943199157 0.204771656731 0.212548494217 0.204988233676 0.0127227257693 0.0140144534262 0.021205315625 0.187358882688 0.192125766059 0.198772205734 0.212374766999 0.205757524042 0.203466313878 0.198269360384 0.182075536022 0.193992765379 0.203217006858 0.204331991647 0.196763084721 0.20014614007 0.2043270841 0.180514052654 0.186571402116 0.206603961592 0.183553088594 0.228871000555 0.192232411854 0.214999691886 0.188338267885 0.22392031363 0.199986970587 0.189109313009 0.19048409561 0.20374267521 0.212452607057 0.205039311477 0.17250608603 0.194596025922 0.187333202847 0.187569910566 0.227244357321 0.209244321292 0.19501056691 0.196469921009 0.214533216851 0.189073691369 0.204563213894 0.206128514596 0.199072106837 0.193573357583 0.185159768848 0.200995780718 0.185854759801 0.193344854477 0.190733738309 0.173803583444 0.194263918799 0.250041858008 0.239752334137 0.236636793113 0.233547933384 0.243918830588 0.242901503454 0.239756635176 0.235879076786 0.246072511959 0.234292093183 0.220682667006 0.236686092505 0.233134556981 0.249085035733 0.25310029637 0.234845006722 0.226672837999 0.225370202913 0.263585873652 0.240269679341 0.235701118126 0.239046163418 0.247138992266 0.227341018945 0.229062028698 0.222899030248 0.223273394137 0.220071420598 0.244719204688 0.219434280561 0.237757183282 0.213627898609 0.247720909818 0.217420020786 0.232895251914 0.240194731922 0.23871780496 0.22484832656 0.219342733041 0.227741527766 0.241902288188 0.229541957119 0.239752334137 0.239709455557 0.241056941465 0.235717844261 0.239326740269 0.227988125157 0.23374803561 0.226117504317 0.0112944223112 0.0116717916683 0.180315550232 0.185624806253 0.191893759643 0.205310880338 0.198677901623 0.19742928403 0.192207723586 0.175346071899 0.186991091141 0.197103740487 0.197010559796 0.19035952375 0.192415460068 0.198031907427 0.173783350974 0.179445795208 0.200920544981 0.176980189616 0.221316628359 0.185291341794 0.209338461592 0.181164395028 0.216995536327 0.193672787851 0.182024521085 0.183353435463 0.196543976916 0.205721030365 0.198641096205 0.165041173069 0.187513275437 0.180170480519 0.180593580969 0.221064954649 0.203852710847 0.189347093282 0.189636739108 0.206917162302 0.183105626465 0.197802407018 0.200023916345 0.192826802002 0.18656768388 0.178054301819 0.194629767109 0.179904508376 0.187157083025 0.183925453374 0.166391248738 0.187799823192 0.244629672801 0.233720966397 0.230103074977 0.227589285523 0.237898238239 0.236372028201 0.234191133105 0.229406521443 0.239389014631 0.228371263352 0.214461216321 0.2301324354 0.226622699222 0.242825477857 0.247136546528 0.228837795311 0.220544275821 0.219592617307 0.256764787312 0.233410014962 0.229423132068 0.233225877209 0.240428033421 0.220595144087 0.223137647662 0.216631996383 0.216685610877 0.213950565713 0.238461242313 0.212870718316 0.230920602382 0.207535413146 0.241449474994 0.211057657029 0.226977435726 0.233226691247 0.233226664174 0.218957536154 0.213267398065 0.221167604245 0.235644719391 0.222751284499 0.233720966397 0.233655964463 0.235074683268 0.229091941731 0.232416283617 0.221592631388 0.228271321964 0.220395124759 0.00983853465038 0.190743576675 0.196127634014 0.202306832097 0.215593264431 0.208988714901 0.208014681254 0.202752493595 0.185879141861 0.197391448173 0.207587390017 0.207246286244 0.200818808941 0.202436920976 0.208560198594 0.184214467342 0.189842910423 0.211501430369 0.187510793393 0.23133568248 0.195719341744 0.219810583637 0.19153274103 0.227260064133 0.204197085901 0.192430421495 0.193730481177 0.206830708645 0.216081015857 0.209114627207 0.175418662775 0.197889059256 0.19054759391 0.191035254803 0.231501988345 0.214455580052 0.199914298524 0.200065006525 0.216984298716 0.193736240961 0.208206242937 0.210580217034 0.203385972256 0.196973524191 0.188474338185 0.205152454389 0.190585220715 0.197746685322 0.194398694242 0.176663593729 0.198324488838 0.254937931723 0.244068220209 0.240384243666 0.238035456407 0.248220627185 0.246646653405 0.244595211757 0.23972473422 0.249602645305 0.238680104062 0.224803483418 0.240405804306 0.236887764418 0.253050369586 0.257246240499 0.239074958955 0.230985078994 0.230097737916 0.266839316033 0.24359819756 0.239685124735 0.243538571704 0.250621888643 0.230851769093 0.233547789712 0.227075078845 0.226983547477 0.224362834405 0.248743336169 0.223247552937 0.241135379781 0.21805286342 0.251689329115 0.22150661959 0.237406179341 0.243353541542 0.243526559835 0.229448331316 0.22367406721 0.231427017129 0.245837459247 0.232818717405 0.244068220209 0.243969807123 0.245289155672 0.239200193755 0.242559723366 0.231895813526 0.238589343178 0.230870674099 0.185074856292 0.190966638679 0.196734232482 0.209858428647 0.203274864625 0.203041999473 0.197926990268 0.180506604609 0.191676192961 0.202741147438 0.201237653931 0.195771907735 0.19601488206 0.203403274574 0.179018659177 0.184172394714 0.206916175569 0.182062920029 0.225161143908 0.190059350101 0.215302655172 0.18583273214 0.22154130677 0.198918819466 0.186732254392 0.188033674574 0.200939126942 0.210648020541 0.203966819203 0.169461460201 0.19212652304 0.184698761261 0.185447917615 0.226382707341 0.210090272929 0.195464472523 0.194576182922 0.210724866996 0.188972223666 0.202749851185 0.205495551665 0.198311837225 0.191308228071 0.1827836447 0.199987032182 0.185758986102 0.192762378199 0.188912535548 0.170985151469 0.193136210612 0.250560354729 0.239150130043 0.235042618318 0.233086785485 0.243308731763 0.241203268173 0.2400598963 0.234273096316 0.244008938502 0.233948328457 0.219858019146 0.235053540533 0.231632132621 0.247968147104 0.252509320758 0.234318201257 0.225941055535 0.225341346975 0.261132458323 0.237859251815 0.23464919225 0.238863315744 0.244976238699 0.225403382703 0.228750181247 0.221868483944 0.221696049716 0.219446034661 0.243630311449 0.217754591911 0.235449762585 0.213055210612 0.246589228494 0.216230229788 0.232311179578 0.237695560547 0.239155145074 0.22460119101 0.218814227997 0.226159982187 0.240839111604 0.227492031494 0.239150130043 0.239069291581 0.240533107829 0.233959974366 0.236956995194 0.226761267667 0.234246750663 0.226212511867 0.0174000566092 0.0121264323023 0.0260599074483 0.0195345881682 0.024716388707 0.0269505563041 0.00951579401108 0.00952789620934 0.0312682326376 0.021380704298 0.0249359987416 0.0321633377276 0.0221499569656 0.0250465767598 0.00649156808823 0.035247677191 0.0178478945632 0.0452164673822 0.00796037757126 0.0457327534983 0.00806434689496 0.0382898633561 0.0225581753549 0.00212523780258 0.00635071079008 0.0188691425113 0.0279463141708 0.0242753243704 0.0170805625395 0.00904309726647 0.0106287996797 0.00382969063087 0.0441552932431 0.0411601284763 0.0356126616505 0.0116846154421 0.0333280322148 0.0231237862627 0.0194233884188 0.0266405905658 0.0193741592831 0.00647009737397 0.00313195595137 0.0192809490983 0.0198825976901 0.0184223649953 0.00589033700008 0.0274923472821 0.0151107135826 0.0780209291989 0.0597893086125 0.0526478480904 0.0519801851129 0.0640907142958 0.0582643284706 0.0646025548306 0.0523589334483 0.0615531482061 0.0598796956679 0.0449628875056 0.0527423072945 0.051324556652 0.0686409960047 0.0805660524071 0.0623345264313 0.0443847427682 0.0466830846559 0.0795542446923 0.0559362746494 0.057180756356 0.0643259703348 0.063211257271 0.0437185245372 0.0513548290636 0.0393714351609 0.0417127661704 0.0422926634719 0.0628873923745 0.0360959787877 0.0527047257983 0.0325588237609 0.0667976247829 0.0335624409497 0.0547114607912 0.0561052885987 0.069799650238 0.0449738444715 0.0432370258673 0.0470356297838 0.065014861143 0.055075271212 0.0597893086125 0.0609892947965 0.0676680770887 0.0593149961508 0.0554983088895 0.0479751568133 0.0662596895747 0.0497728280187 0.01649842894 0.0270940163297 0.0209738779605 0.0204474607028 0.0105739124512 0.0137437284214 0.0226997190651 0.0150100972387 0.0323011347894 0.008153166122 0.0475382748664 0.0187641297777 0.0164404692163 0.0144508320868 0.0210394349685 0.0305556358824 0.0486242638836 0.0214513889049 0.030175982789 0.0140964441541 0.0400575333251 0.0288561044949 0.0169609734513 0.0143755139823 0.0284025548343 0.0224203703593 0.013792641556 0.0304826758057 0.0219984307058 0.027438003111 0.0138498079587 0.0391014082904 0.0268826742795 0.0187520448645 0.0110691342578 0.0424853466078 0.0102534616754 0.0160596134656 0.0266767705353 0.0127998581546 0.0173245890268 0.0173211449901 0.0131739480428 0.0158792885006 0.00835247262611 0.012879803971 0.0267489620569 0.00596710410688 0.0651865992814 0.0501303259948 0.0467631748034 0.0452702235248 0.0544981935174 0.0559804285713 0.0526556031199 0.0527154428326 0.0609295585885 0.0469864956417 0.0318305696362 0.0467870360621 0.0428839020559 0.0596952123639 0.0682904935263 0.0490145651142 0.0373470202066 0.0372051258398 0.0783094346225 0.0573164765228 0.0464381891239 0.0518006344835 0.0637213875725 0.037328539174 0.0398159521041 0.0364375689163 0.0327311081772 0.0300536558581 0.0548465086523 0.039011355091 0.052932116237 0.0253006236285 0.0581288867478 0.0321437027692 0.0554616174667 0.0518535767409 0.0554912699649 0.0361633495108 0.0300500951908 0.0376868875379 0.053920841562 0.0443787428504 0.0501303259948 0.0504194914628 0.0549441033997 0.0487769599128 0.0505784847321 0.0377653392745 0.0513716565226 0.0379306609676 0.0148794236891 0.00906915259144 0.016817766784 0.022666965263 0.0181414830259 0.00943690730089 0.0254552863313 0.0160108606893 0.0217205062473 0.032481574081 0.0125336713193 0.0301048876528 0.0144114156753 0.0283145714635 0.0236499286545 0.0356994826174 0.00980454037789 0.0380382046842 0.0138535372004 0.0270349861178 0.0179310476065 0.0105970849446 0.0106446588122 0.0120133548138 0.0166102052911 0.0152998463146 0.0290596443937 0.00834773888805 0.0173051001714 0.012006228531 0.0322817993752 0.0342847484172 0.0334965405575 0.00578703582339 0.0267498608594 0.0233284219943 0.00836772170811 0.018341518653 0.0127097608473 0.00603575216863 0.0145919151911 0.0109073587446 0.0221533308504 0.0160929276419 0.00858765501382 0.0355326614102 0.0120318414802 0.067539995406 0.0482957753815 0.0406895564252 0.0402187129364 0.0525505335689 0.046337446086 0.0538706159523 0.0410532995186 0.049945680623 0.0498002070389 0.0356573113194 0.0408170339224 0.03998131445 0.0570983300794 0.0703114871106 0.0527515586 0.0326360388464 0.0357703215059 0.068007735392 0.0448145788863 0.0463929144792 0.0539072356938 0.0519007533329 0.0322898342044 0.0406511091494 0.027662402635 0.0308780067072 0.0322628111538 0.0510866561777 0.025649802677 0.0412344607513 0.0218269690426 0.0551778627391 0.0220763570226 0.0446670171533 0.0446820028935 0.060359178521 0.0338187065026 0.0337343667693 0.0362783001067 0.0544202026485 0.046225200341 0.0482957753815 0.049817335882 0.0576169439653 0.0494636606912 0.0440581542748 0.0371733017103 0.0573289834367 0.0394045266195 0.00716257307188 0.0232372547893 0.0296783885761 0.0325204811189 0.0212497773179 0.0292620976856 0.016439517739 0.0284506794787 0.0345599570635 0.018188478274 0.0407426749726 0.0271225123173 0.0318719827355 0.0364884226289 0.021914442716 0.0227786086069 0.0376078412936 0.0256329664506 0.0146652900265 0.0257412263731 0.0241649286356 0.0229439407062 0.0136364254073 0.0100256292567 0.0184950349026 0.0419365600148 0.0198805169881 0.0290470401931 0.0259349477967 0.0239003990661 0.037192775807 0.0407761513521 0.0178664543062 0.0194803036782 0.0346542430628 0.0113663417752 0.0228609184952 0.0221099611853 0.0198250112005 0.0281996969801 0.0190943935979 0.0354944682629 0.027502181016 0.0232700909093 0.0459502516894 0.0236639254111 0.0598327139142 0.0386582188727 0.0283018389706 0.0317793366202 0.0424462733589 0.0338166029949 0.0467225177273 0.0306128740578 0.0370882424143 0.0426365040044 0.0306518342203 0.028353389025 0.0289452185335 0.045605512774 0.0609187137277 0.0456096520871 0.0247062397964 0.0307047013286 0.0541889879817 0.0321986089116 0.0369056583991 0.0461635888574 0.0394191509814 0.0202672234717 0.0339543216254 0.0203331670034 0.0216962768111 0.0275014797571 0.0396207876643 0.019383687668 0.028134515225 0.0203542968498 0.0436399460151 0.0166979961449 0.0388212853573 0.0305366541923 0.0544557347145 0.0282093601187 0.0297076171269 0.0264388860156 0.0445799816631 0.0375526043804 0.0386582188727 0.0404030430489 0.0493940445937 0.0396161416365 0.0302144214958 0.0282464575757 0.0525063270042 0.0345543356833 0.0213910735206 0.0254312508458 0.0257342205184 0.0165221855109 0.0259539784544 0.0158835031429 0.0233831935041 0.0339323139257 0.0162106434865 0.03399674531 0.0200386549433 0.0299171917849 0.0320747556874 0.028046974659 0.0176794481153 0.0368388291758 0.0185008751091 0.0214981879903 0.0241691252685 0.0175651807134 0.0158907875652 0.0122369735355 0.0109776452166 0.015527622801 0.0354957768624 0.014819189185 0.0241388300803 0.019062303667 0.0287269405771 0.0357916006232 0.0364633531028 0.011219279972 0.0225748368915 0.0292557862917 0.00702356235419 0.0224830610185 0.0181511844266 0.0136555064616 0.0214280218685 0.0152776051594 0.0301845131917 0.0223495731118 0.0167956602264 0.0389756608983 0.0178914144765 0.0630604023291 0.0430080788574 0.0341031378994 0.0365519407683 0.0470199755056 0.0406269394348 0.0499352678431 0.0370580183618 0.0441324571638 0.0450148609009 0.0313792244576 0.0341228348643 0.0333031398497 0.0505703458181 0.0642434790604 0.0476293944045 0.0288843329998 0.0334940865056 0.0612862785232 0.0392274534904 0.040346557275 0.0490663667656 0.0464449738085 0.0249899515531 0.0366261362911 0.0249803957153 0.0245501998624 0.0286632518672 0.0448796558538 0.024023128213 0.0352285170514 0.0213779538928 0.0486983458625 0.0204530056806 0.043719713027 0.0366461987398 0.0563678077617 0.0312461294161 0.0303695774583 0.0296679302141 0.0480887950749 0.0392929320337 0.0430080788574 0.0443169978812 0.0521158245642 0.0424602914349 0.0361040078467 0.0312787145737 0.0538034695951 0.0366723303295 0.0189709615579 0.0257111384326 0.0217335533305 0.0223113040739 0.027967478664 0.0234852927562 0.0436322798908 0.00521461133699 0.0364760096592 0.0272343470762 0.018116669378 0.0277674536165 0.043201573857 0.0213285903305 0.0304538340544 0.0273512817022 0.0293661558134 0.0151289313008 0.0243064809516 0.0249356313294 0.024322988602 0.0196946095768 0.0139855052687 0.0405716245072 0.0223909261135 0.0287463352875 0.0239629151594 0.02510185342 0.0224608355139 0.0285849443101 0.0178309380123 0.0374526670612 0.0202528662781 0.0157931802813 0.00761690092841 0.00770880409418 0.0208073209186 0.0270724449013 0.00859765119326 0.0186885305588 0.0137210864523 0.0193875507074 0.0459800217652 0.015147695259 0.0594631949225 0.0420406539118 0.0381002519704 0.0316288068581 0.0464453358384 0.0430647918893 0.0450678160713 0.0372936406921 0.0478953066773 0.0442824164473 0.0325191406563 0.038458833697 0.0379877190085 0.0530105166244 0.0661049064219 0.0485790002934 0.0252530768184 0.0252616659632 0.0666194586026 0.0445179442461 0.0429682351205 0.0475934680999 0.0500384221554 0.032835448251 0.0334890161181 0.0210425012042 0.0304165560005 0.0269992218058 0.0464858418191 0.0236376116141 0.0409389296383 0.0119097107153 0.0510342004912 0.0165862044563 0.0364051705359 0.0459765436849 0.0532158463913 0.0240697227805 0.0285944307415 0.0355069812685 0.0512442268801 0.0481793211849 0.0420406539118 0.0442662890139 0.05312949223 0.0494910921385 0.04499219501 0.0346009573898 0.0502278687979 0.0300299308142 0.0232712606741 0.0301762988443 0.00654363258115 0.0383536762695 0.0073760670349 0.0549696818035 0.0187358469678 0.0230006125585 0.0248620926321 0.0112006081505 0.0374302122761 0.0507791829944 0.0291552453818 0.0203658790931 0.0243768337268 0.0405838171216 0.0318523232399 0.0264398107213 0.0241549255092 0.0342108755533 0.021966370396 0.0115756992435 0.0406700397742 0.029726336077 0.0360763557135 0.0237708872008 0.0347952708329 0.0167090325333 0.0113314779127 0.0183460384347 0.0474972085699 0.0106130759732 0.0188803713912 0.0264255690706 0.0134988723184 0.025504997589 0.027405254367 0.0141844800377 0.0190580694839 0.0107985492476 0.0215712414492 0.0353201768965 0.0121413245456 0.0563908913649 0.043304104353 0.0426898087169 0.0394771848203 0.0476531796683 0.0529352282385 0.044079995111 0.0508430194559 0.0587105683651 0.0390993709526 0.0249779507582 0.042747127932 0.0380731496814 0.053623295371 0.0609715530482 0.0414798961609 0.0320700568798 0.0299307088952 0.0756552859532 0.0564187262812 0.0401315540974 0.0437426630802 0.0619393205223 0.0343990170803 0.0321000148697 0.0335451545545 0.0286757976711 0.0228629245983 0.0490706959168 0.039335399235 0.0517108395757 0.0207931533413 0.0521719534959 0.0306385429933 0.0529657791748 0.0494073245643 0.0465244230409 0.0294935309982 0.022387741137 0.033135656028 0.0474637239198 0.0404408008061 0.043304104353 0.0434045699957 0.0474449484031 0.0438694244062 0.0478136370998 0.032205230585 0.0422362620829 0.0295056722481 0.0179188644218 0.028584000913 0.0304649482776 0.0216593575612 0.0411663680687 0.024518088592 0.0182283990095 0.00873472135302 0.0327024039756 0.0205661418233 0.0530659348987 0.0158790060301 0.0434559310925 0.0108179884547 0.0451149656888 0.0268976993783 0.0108384529361 0.0115766662184 0.0275567255166 0.0316971869351 0.0249562865626 0.0177154908046 0.0178955252093 0.0182906823384 0.00749636648732 0.0477714342488 0.038203551745 0.0295629098692 0.0151014852938 0.0424264199032 0.016651504005 0.0233361298106 0.0296064297332 0.0193940010564 0.0143559792056 0.00822362335298 0.0204472045407 0.0141062231631 0.0150969431722 0.00961358010122 0.0230069897016 0.0136344497691 0.0781873312389 0.061922358127 0.0567947708263 0.0549074799203 0.0663268940474 0.0637272258979 0.0651276548535 0.0583171690756 0.0677292446726 0.060191106251 0.0450946120505 0.0568830058441 0.0543288790766 0.0714589426004 0.081454303397 0.0624243269582 0.0472314410611 0.0479124253475 0.0858307227677 0.0627914211132 0.0589518720089 0.0648453559762 0.0697319385158 0.0476614784634 0.0522209851297 0.0435983831723 0.0443141443319 0.0427834724857 0.0660408790701 0.0423473900154 0.0592034189384 0.034371623803 0.0697342027164 0.0381294864628 0.0595840773258 0.0612390496755 0.0689539919437 0.0466415215244 0.0431628346368 0.0495434525796 0.0666477691564 0.0566659496435 0.061922358127 0.062735676927 0.0682003243846 0.0612178236271 0.0603270178223 0.0499653486319 0.0649071391116 0.0500436367076 0.0339571590708 0.013301666182 0.0294613107742 0.0254168369791 0.0187830848109 0.0337456927151 0.0151760493477 0.0360233288761 0.0161149469416 0.039042024755 0.00229536147376 0.0468249873675 0.0157583667263 0.0310761119725 0.0155497505138 0.0090301180649 0.0126416375855 0.0110159110748 0.0254616235228 0.0242814349621 0.0236690592627 0.00236651040215 0.00842631126501 0.0122777482071 0.0387720000407 0.041740632348 0.0402615455592 0.013434165421 0.0259957062278 0.0279382933191 0.0176919201528 0.0210030594474 0.0190515742522 0.00605687883869 0.0125440326934 0.0183812478057 0.0234838433483 0.0213287295776 0.0102611349583 0.0367608397052 0.018305882063 0.0762459812633 0.0565232340764 0.0480647092458 0.0470080033305 0.0607210126742 0.0515562009886 0.0623134210102 0.0445281266442 0.0542656274802 0.0589172543408 0.0450145677229 0.0482495838943 0.0484339717661 0.0652417317109 0.0793204584838 0.0620264746962 0.0400173133601 0.0433798691859 0.0725797896424 0.0481813389954 0.055284998644 0.0628522019597 0.0554630904838 0.0404443368976 0.0493602212467 0.0332419109523 0.0398075629785 0.0413677301877 0.0589057964055 0.0278286603835 0.0454781228443 0.029186418008 0.0632373035893 0.027173160002 0.0466041993476 0.0516248397682 0.0694549695494 0.0414073101396 0.0429302830553 0.0451579600752 0.0633364240737 0.0552241510377 0.0565232340764 0.0584051969715 0.0667610558404 0.0584355181495 0.0512925402879 0.046128531904 0.0664605075068 0.0478081299752 0.0403784266437 0.00849189377523 0.0578244327435 0.0213782060591 0.0260735654855 0.0286868543518 0.0107633182991 0.0429130184438 0.049112317265 0.0332704837119 0.015321761445 0.0277153223765 0.0397499694056 0.0358925712782 0.0304111104426 0.027505083584 0.0362377250231 0.020296537393 0.0117771620381 0.0454349567531 0.033203078919 0.0404550655622 0.028050968973 0.0329121522515 0.0157822820327 0.0128529655968 0.0212960295437 0.0481179953609 0.0170390332384 0.0197624290813 0.029414246654 0.0176689832365 0.0290804953813 0.0317312604538 0.0173770193477 0.0255273516204 0.0167054579942 0.0259572392414 0.0381174742782 0.0168503391466 0.0510138836142 0.0385473731826 0.0388016187658 0.0368178557203 0.04276997017 0.0503778656709 0.0392641023806 0.0497809957465 0.0564266766196 0.0332095636962 0.0188695135337 0.0387787813789 0.0331502464834 0.0483979502244 0.0548055143427 0.035228159998 0.0294466884888 0.0272290139414 0.0724784935896 0.0548011964122 0.0344489111929 0.0380672364964 0.060026483594 0.0303284751736 0.0272545178904 0.0327801045139 0.0239410636139 0.0177611799925 0.0444007657806 0.0400513163792 0.0497786040376 0.0204852863736 0.0470867569115 0.0308376722615 0.0532970554221 0.0452748414675 0.0407821021663 0.0268849282918 0.0168086928609 0.0279020988959 0.0414910752652 0.0343097482389 0.0385473731826 0.0381367753126 0.0413101510581 0.0377465227401 0.0435171810289 0.0268955341281 0.0365057689544 0.0254190380854 0.0372013435687 0.018490846954 0.023945912793 0.0440067159299 0.0253810829497 0.0423576975539 0.0266775910883 0.0276067076791 0.0155964706338 0.0512008225103 0.0249293141549 0.0224091031216 0.0208182862063 0.0201027144741 0.0218258381078 0.00416795120123 0.0250523436876 0.0291897433081 0.0336104473732 0.0126474260294 0.0179897427224 0.0236687976248 0.0357767564718 0.0479080621505 0.0493839683144 0.0213027411218 0.0127962715487 0.0389577422313 0.0207982711984 0.0248766740175 0.0272195414617 0.0164396805465 0.0239043019084 0.0253433507519 0.0357454256498 0.0316012688091 0.0218891275488 0.046006369731 0.0279699723289 0.0753592714385 0.0538300830176 0.0427234551893 0.0442120918411 0.0575724115573 0.0438794711912 0.0617203772126 0.0368175055007 0.0452394206226 0.0586608041232 0.0462735571883 0.0428932945735 0.0449361857949 0.0608531076963 0.0771129194067 0.0618336393757 0.0380658785095 0.0436894906412 0.0627047512052 0.0382553066725 0.0531191059331 0.0620656917263 0.0459127944105 0.0362899248016 0.0492167826913 0.0301637294202 0.0380460619309 0.0426550687382 0.0544072702012 0.0218332584664 0.0362322493618 0.031226533309 0.0587827461321 0.024852970378 0.0417700516252 0.0442659791406 0.0702283886386 0.0412678777108 0.0447863140602 0.0428258891548 0.0608093994885 0.0534400200237 0.0538300830176 0.0561072465285 0.0656581075266 0.0558523559264 0.0444818994194 0.044500683531 0.0680831562837 0.0488303143353 0.0536500814259 0.0217777943298 0.0182308184172 0.0213271967089 0.017640751735 0.0384313065823 0.0491331799987 0.0285490579199 0.0236508921207 0.0203518801552 0.0410544418794 0.0345949638149 0.024163451076 0.0208164937907 0.0331698057229 0.0214370208347 0.0129093004816 0.0378630437774 0.0285080143021 0.0349561724295 0.0213643314537 0.0378207671811 0.023156173716 0.0144257922467 0.0164328495191 0.0458898771597 0.0137078716838 0.0180512548912 0.0303444429836 0.0166949845236 0.0239375979831 0.0248141920509 0.0164642247089 0.0221495310709 0.0136759772653 0.0203492925476 0.0297801495496 0.012327304817 0.0589145352863 0.0454329273476 0.0436909431618 0.0429329583507 0.0496681092117 0.0546717056664 0.0472190839202 0.0531335572006 0.0601545762173 0.0404993410456 0.025351107679 0.0436326686228 0.0384109589864 0.0546942484238 0.0614849395874 0.0420121432296 0.0350678306139 0.034181943564 0.076607832627 0.0574938051354 0.0407284888751 0.0455392391661 0.0634559846389 0.0340449392101 0.0346886110004 0.0365094436091 0.0283668149646 0.0247441178114 0.0505723350307 0.0414268809452 0.0526572068528 0.0247442404176 0.0533283030157 0.0333480302269 0.0569330422551 0.0488445949836 0.0485166318417 0.0334202567015 0.0241088713298 0.0327605074387 0.0477602460683 0.037977730422 0.0454329273476 0.0450703466816 0.0481686005759 0.0425586735361 0.0473020173719 0.0326189496433 0.0442268465034 0.033265894657 0.0406164099076 0.0563656658624 0.0369501005762 0.0596090654503 0.0304778147307 0.0384204581231 0.0270915625525 0.0693260105947 0.0372136698768 0.037023479673 0.0317062741776 0.0317712182279 0.034804368211 0.0223060266021 0.0435100417127 0.047004084469 0.0362329082931 0.0256397756303 0.0233719185841 0.035627421568 0.0521858971919 0.0650552610656 0.0654396577163 0.0372909018916 0.0208395075735 0.0532980934411 0.0387978654626 0.0393043110859 0.0433091405548 0.0302415541747 0.0340064333643 0.0421117035102 0.0478257550421 0.0466283489206 0.035247716101 0.054937797688 0.0434605477125 0.0930034854114 0.0710598303107 0.0590352084097 0.0604460830393 0.0745555173324 0.0571183440581 0.0793617023165 0.0484952151811 0.0566053521726 0.0768259736839 0.0646780127707 0.0592344115045 0.0623544010763 0.0773513341111 0.0945306112415 0.0799933849368 0.0552426925494 0.0610368934489 0.0726391747026 0.0486086004123 0.0708765879781 0.0800292786375 0.0558578611463 0.0537011940437 0.0672777153371 0.0461852109805 0.0561622363977 0.0610513630375 0.0708198771797 0.0350965268909 0.0484124521051 0.0486964562823 0.075256407672 0.04111323881 0.0515712020719 0.0592740141065 0.0884497661149 0.0586350703024 0.0632023881374 0.0606979638812 0.078320587954 0.0707259147977 0.0710598303107 0.0736338137855 0.0835800603427 0.0732101393079 0.0599496433571 0.0625510181289 0.0864139348042 0.0667637091982 0.0350728110176 0.0244007367668 0.0194418255357 0.0278046770383 0.0387260088041 0.0187288873365 0.0305873095592 0.0241694671072 0.0257451620145 0.0153815378858 0.021353060898 0.0215496497445 0.0200984831417 0.0151985245998 0.0110228413253 0.0387908454863 0.0190085239065 0.026530841158 0.0213969993651 0.0238500818663 0.0245406806879 0.0294364678495 0.0140120927131 0.0331219747793 0.0211978333105 0.0108265764026 0.00897607251111 0.00677241917437 0.0173770286262 0.0245814114194 0.00563326172159 0.0204952689406 0.0138047466575 0.0168569359655 0.0438273371107 0.0133334601758 0.0592456323654 0.0408071286879 0.0355509485261 0.031081872872 0.0452073820968 0.0410054777303 0.04495248578 0.0356498623047 0.0456510923283 0.0431383529174 0.0306345228853 0.0358468154266 0.0353265194622 0.0511623196634 0.0646276367384 0.0471382277373 0.0240229200194 0.0254553208119 0.0642596486532 0.0418982548195 0.0409421092482 0.0466753277343 0.0478887558589 0.0293373407147 0.0326161841904 0.0195735225778 0.0272641873276 0.0254981680323 0.0447040552558 0.0215611560931 0.0381253844376 0.0111199895155 0.0491745036263 0.0145691145727 0.0368088577033 0.0425091408325 0.0528722213324 0.0238501784613 0.0272505069941 0.0325614342759 0.0492627638247 0.0449950723976 0.0408071286879 0.042880253884 0.0517160325388 0.0466599102029 0.0415966376912 0.0321745745193 0.0499813496989 0.030016506883 0.0195067443548 0.034042657808 0.0387050626348 0.0607833234632 0.0322025711177 0.0402462250605 0.019510297358 0.0548235012014 0.0429595771314 0.0252279466187 0.0223273919125 0.0407231129516 0.0368354815518 0.0293953937656 0.0290463736365 0.0328461116776 0.035143771121 0.0215923461726 0.0549385181088 0.0388660143681 0.023623079783 0.0246479181765 0.0539903453268 0.0196033552608 0.0309786977565 0.0425462811063 0.0289476729073 0.0283702078661 0.0228452205014 0.0295421719187 0.0252210967901 0.0230772437036 0.0240590413191 0.0135013916275 0.0218148808282 0.0760114892469 0.0634029220478 0.0607879784498 0.0607163777171 0.0675569624906 0.0711706523576 0.0650651993965 0.0685468614076 0.0760185520266 0.0575507664821 0.0425210776149 0.0606855343213 0.0555229333703 0.0720565340813 0.0774071503121 0.0581416488422 0.0527363337967 0.0521637490438 0.0926239124524 0.0722776295267 0.0578783465588 0.0627903946651 0.0789206671037 0.0503951188312 0.0527646858335 0.0526096570134 0.04527903594 0.0427992547678 0.0682205850017 0.0547141937568 0.067807383273 0.0414244261778 0.0707718291801 0.0483573376556 0.0717861683592 0.0644987372167 0.0647006499463 0.0513517515126 0.0419564687506 0.0495341433982 0.064364362053 0.0518587226049 0.0634029220478 0.0627776825589 0.0645131806296 0.0578143875392 0.0631581345522 0.0499505499876 0.0600491597535 0.0513712453732 0.034450057717 0.0238219725869 0.0465207238958 0.0139550218762 0.0437148052069 0.00245848843359 0.0406705408218 0.027996084796 0.00617634742576 0.0042140797155 0.0226171443798 0.0276920922451 0.023535776763 0.0178395839466 0.0140356715542 0.0166022608919 0.00395476408572 0.0457707350729 0.0404789526881 0.0327219703849 0.0114454148489 0.0362680793242 0.0217463268752 0.0196365744138 0.030477863462 0.0206103562172 0.0102714385383 0.00460445870022 0.0203218526125 0.0212018784388 0.0183586998171 0.00829854248447 0.0218572079865 0.0144443422105 0.0767877803745 0.0594466868469 0.0529468255635 0.0532520366965 0.0637146030163 0.0600960587419 0.0639510014921 0.0553587219012 0.0637257572972 0.0581054381403 0.0427611569751 0.0529559974475 0.050488809742 0.0679805704334 0.0784814462329 0.0599903091584 0.0453120703317 0.0472956572051 0.081207762841 0.0585100350392 0.0557351056658 0.0628545029747 0.0657746195642 0.0431635890019 0.0505766161122 0.0416755044323 0.0404647152471 0.0409948112567 0.0627553433594 0.0398795409571 0.0548556307871 0.0337749376373 0.0662751646447 0.0362650420971 0.058753352182 0.0559719307576 0.0677796526261 0.0456851568449 0.0415580901724 0.0455930571155 0.0632638085815 0.0519953365762 0.0594466868469 0.0601182129397 0.0655928087235 0.0568949371894 0.0552051980285 0.0466604473635 0.0640269526764 0.0493532376221 0.0428114080859 0.0513187599237 0.0353300715155 0.0136841990953 0.0339659047401 0.0392151216672 0.0331598693815 0.0346578423583 0.0330024342773 0.038276947888 0.0230224626975 0.0145213724276 0.0501490398304 0.0358866478447 0.0428680377048 0.0326728263648 0.0285835471973 0.0062386073831 0.0158573158736 0.0257280664001 0.0503050577684 0.0188536314021 0.0230369129683 0.0250256284162 0.0171680840121 0.0324912467925 0.0363869094834 0.0177612063031 0.0250245943265 0.0179627656197 0.0294197347692 0.0465091219099 0.0203095862842 0.0478795030574 0.0357982238192 0.0376241763265 0.0315623347361 0.0401002610089 0.0474897864671 0.0350582543035 0.0460362585126 0.0538318698513 0.0325365856585 0.0213227378076 0.0377990397126 0.0335693240845 0.0471274543161 0.054885621667 0.0360897261843 0.0253548565171 0.0209548211511 0.0704293001167 0.0527939534511 0.0346935966786 0.0364130601336 0.0572327947958 0.0318390512051 0.0244040617426 0.0286768378156 0.0261566126157 0.0173773142125 0.0423900706098 0.037171182215 0.0481110975007 0.0159454061909 0.0456669379702 0.0274665151098 0.0469607798152 0.0462908705234 0.039158298024 0.0212460794033 0.0171141394847 0.0299475002492 0.0420369583583 0.0395076063195 0.0357982238192 0.0363456286371 0.0414629362584 0.0409765644808 0.0445581081298 0.027819794971 0.035217696192 0.0204081888775 0.0536229831874 0.0147412411033 0.0555896434003 0.0257773616784 0.0440411962897 0.0177255540439 0.0193829905731 0.0239978426404 0.0256940264499 0.0393410530504 0.0352411367288 0.0221731284836 0.0182277280594 0.0123391761488 0.020654487001 0.0493059373458 0.0475681206642 0.0451873269558 0.025880190654 0.0390288508413 0.0309214453223 0.0313196281309 0.0264802426158 0.0266703711652 0.0198006836859 0.019520741807 0.0278514155812 0.0221034761072 0.026690732588 0.0196134707881 0.0412756734602 0.0265178671443 0.0867397306197 0.0682110021203 0.0611304199736 0.0570363597121 0.0725160802101 0.0632079822129 0.072465417084 0.0544582268199 0.0658094706322 0.0704919404627 0.0569226677141 0.0614294730407 0.0618668588016 0.0780403185985 0.0918397178914 0.0740264863472 0.0508436174709 0.0525989509117 0.0847012354501 0.0597190404435 0.0681089165817 0.0742388101657 0.0664380426823 0.0545769816583 0.0603124830305 0.0434291774759 0.0533790593767 0.0526730162139 0.07136820435 0.037381607524 0.0577096830472 0.0386410478706 0.0759869972218 0.0373998132392 0.052181569916 0.0660810261842 0.0798969360666 0.0510826914494 0.0540234946407 0.0588039304452 0.0763291167757 0.0694007087698 0.0682110021203 0.0704124305268 0.0789271737863 0.0724038719993 0.0657040131714 0.0590995697111 0.0765722724394 0.0574793382897 0.0411299322154 0.0539204969946 0.0448971315706 0.0161163886115 0.0424587591617 0.0433134103038 0.0424269061508 0.0282464178608 0.029088245744 0.0394507490162 0.0585960215177 0.0377541724349 0.0451043153526 0.0458829699472 0.0321616028135 0.0557124503126 0.0614547711288 0.0392057260171 0.0177559051904 0.0564954090624 0.0332181102648 0.0405454141429 0.0436595776199 0.0393478274847 0.0472383546112 0.0407140770645 0.0569892712332 0.049354627832 0.0439022116296 0.0637553975882 0.0455099918129 0.0646100490562 0.0426539647843 0.028431151743 0.0378227253234 0.0447657124399 0.0278050988657 0.0537117960824 0.0274550112394 0.027109787916 0.0505464931112 0.0435971104256 0.0283504838988 0.0329648689469 0.0443909167392 0.0625701957077 0.0530229796398 0.0343871989004 0.0426240079793 0.0396498771349 0.0213457948378 0.0419360502864 0.0525916757896 0.0283713421426 0.0258314536376 0.0439902997437 0.0303827915475 0.0319578433558 0.0415135400801 0.039276736137 0.0267873256159 0.0192565547583 0.03784586336 0.0425882783228 0.0300770949457 0.0411024644378 0.0235206108502 0.0627241786637 0.0400442811976 0.0439864971889 0.0336917004451 0.0474026229045 0.0427127785344 0.0426539647843 0.0447428873557 0.0545498150062 0.042821329639 0.024917555179 0.03656154349 0.0624685036836 0.0466134489709 0.0464211898017 0.0148261628982 0.0328892390973 0.0156382282878 0.00785541359441 0.0118772597644 0.013233811524 0.0262299374219 0.024092235964 0.0223052815816 0.00384479195153 0.00794260597132 0.0107672255318 0.0397240757159 0.0410257993957 0.038935751255 0.0130110302225 0.0282819504394 0.0262547979726 0.0181226466418 0.0211289335782 0.0182692961983 0.00567846812613 0.0110316054291 0.0179271098732 0.0214946645806 0.0199268176637 0.00875561948865 0.0354271933282 0.0171443662393 0.0766374045646 0.0572939392451 0.0492966948245 0.0478544225205 0.061544844174 0.0531089657897 0.0627070970919 0.0461191081801 0.0559975469422 0.0592703107177 0.045198111152 0.0494826907565 0.0493817865713 0.0662451441692 0.0799247658409 0.0623534855885 0.040791427856 0.0437433118438 0.0743922416686 0.0500539193261 0.0559768175635 0.0632764008918 0.057263588801 0.0415573436738 0.0497522962913 0.0342757495228 0.040553840562 0.0415809282216 0.0599519771943 0.0293556022635 0.0472716928172 0.029440558171 0.0642586667075 0.0282006776448 0.047766217977 0.0531327322745 0.069584244341 0.041867047458 0.0430265316703 0.045945636772 0.0640480645613 0.0558532996544 0.0572939392451 0.0591020248311 0.0672221887664 0.0591786574244 0.0527229760336 0.0467939943149 0.066452337221 0.048010057481 0.0426662887331 0.0439949614302 0.0453715955589 0.0447907897725 0.0421151539334 0.0471958607455 0.0276193449237 0.02275846799 0.0605343055308 0.046215555164 0.0540603153716 0.0428037480825 0.0313795261955 0.0125172459763 0.0208290281635 0.0350621941996 0.0569552641601 0.0299458792012 0.0309610087229 0.0366049420462 0.0292005950568 0.0426658539169 0.046550389751 0.0288402250941 0.037809939324 0.0299619998536 0.0401350153602 0.0525912191682 0.0310232912468 0.0370292782789 0.029362531619 0.0351405044644 0.0315813022426 0.0330583005053 0.0477931213785 0.0264557013575 0.0497838425895 0.0546676669155 0.0214340498054 0.0132666230167 0.0351245595911 0.0284887636401 0.0394555006358 0.0430107543153 0.024001610215 0.0265746605534 0.0212961513987 0.0686985381679 0.055308045978 0.0261438582005 0.0256904480148 0.0588231449389 0.030013743601 0.0177237336601 0.0340579413817 0.023239083403 0.0129096702298 0.0366510262704 0.0448114548715 0.0501265325324 0.0238298010541 0.0385248366721 0.0348349791307 0.0531258509004 0.0432244806886 0.0265717263897 0.0222990802833 0.0105547424712 0.0247433535936 0.0319856839059 0.0308056274733 0.029362531619 0.0282110860995 0.0297242114734 0.0317000113527 0.0411003820059 0.0219347750579 0.022168792283 0.0155469808263 0.0395024062804 0.0287670845389 0.00713435108734 0.00340555052012 0.0220748170877 0.0261325674803 0.0225086838709 0.0197301659136 0.0143163055062 0.0179547860371 0.00552830002832 0.044759241534 0.0400746442935 0.0324889323762 0.0105741309237 0.0352991764051 0.0223168808509 0.0183878728912 0.0306904112299 0.0206021475442 0.0104104939928 0.00670538845305 0.0199820984844 0.0226302237702 0.0187233601872 0.00907856174289 0.0219615297998 0.0143637742128 0.0753337380263 0.0579434032885 0.0513377916805 0.0521965044751 0.0621695045027 0.0588078748161 0.0626419440897 0.0544695113035 0.0624620281749 0.0565017669041 0.0411290585403 0.0513195597187 0.0487054775012 0.0662358469866 0.0766400648909 0.0582604755345 0.0441853596565 0.0463597810918 0.0797172873623 0.0573362873289 0.0539615186802 0.0612879452506 0.064613107429 0.0413483548752 0.0492060616579 0.0408536005927 0.0386658835072 0.0395835329499 0.0611338973657 0.0393645393569 0.053572584104 0.0330686361703 0.0645571164168 0.0355933171917 0.0584689678276 0.0540828717859 0.066313152361 0.0447064134304 0.0401381607957 0.0437269026711 0.0614075519202 0.0498319714223 0.0579434032885 0.0585128670581 0.0638402708268 0.0548281142484 0.0533071471302 0.0449195692472 0.0626153729552 0.0482343239424 0.0295703841711 0.0366619395227 0.036500768924 0.0216100790796 0.0202086133386 0.0291081498137 0.0538445159183 0.0304641791012 0.0384602037434 0.0388505887816 0.0184939827468 0.0431509831641 0.0514497599393 0.0311190761962 0.0193480902941 0.0458820296239 0.0240997287473 0.0258856149523 0.0318135695083 0.0318488388141 0.0408906139312 0.0292269134036 0.0455476970026 0.0384416242221 0.0355459730354 0.0604270677753 0.0357756191383 0.0579552498509 0.0353240752803 0.0225716123758 0.0257247517645 0.0383804782921 0.0215488097029 0.044948702922 0.0168022708851 0.0235401078242 0.0440963094143 0.0364932029322 0.0228797040221 0.0280879909327 0.0410984380147 0.0600604954457 0.0480533666552 0.0218775898923 0.029791578282 0.04158778439 0.0178882014032 0.0370867411707 0.0460836281119 0.0252155578724 0.0208740877353 0.0346851323104 0.0150019768994 0.0257835645737 0.0322493089661 0.0342156649906 0.0116183721538 0.0144931943243 0.0240188836176 0.0389068297986 0.0144502424291 0.0276541092905 0.0250061652639 0.0558771163058 0.0271661677907 0.0351271284352 0.0289684152137 0.0442230773604 0.0424650907505 0.0353240752803 0.0384614305736 0.0501641618566 0.0418657156844 0.0254781137454 0.0303709238998 0.055175185082 0.0355533707256 0.0226236551837 0.0257243733887 0.0188708962976 0.0280458447088 0.0259495603965 0.0358126700891 0.0173972925493 0.0203550537734 0.0241984077655 0.0326658712857 0.0374251933899 0.0415458358848 0.0220997016335 0.0314739029711 0.0297599572502 0.0224642058136 0.0101652715813 0.0184552115135 0.0190795074292 0.0255522841638 0.0188568594492 0.0233889500734 0.0229626929389 0.0201746110209 0.0490887418108 0.0229752614583 0.0721999639917 0.0528489482775 0.0457665690796 0.0402580156331 0.0570067559398 0.0466251225309 0.0575932728503 0.0376765605581 0.0495760090258 0.0573857799099 0.0457294834507 0.0461836275015 0.0478961825174 0.0628300939828 0.0782978077723 0.0617431168606 0.0349090738753 0.0372003973766 0.0687801137491 0.0442160827041 0.0545072221083 0.0604041835816 0.0503536267186 0.0412302683921 0.0462335606677 0.0265634879228 0.0409252862943 0.0403112510912 0.055713517152 0.0210444978159 0.042126696634 0.0241765196843 0.060672423249 0.020816933598 0.0350778856202 0.052005350197 0.067095717986 0.0355534496705 0.0422916670752 0.0461036815572 0.0628103650139 0.0591144676161 0.0528489482775 0.055715116829 0.0658580956838 0.0605104799512 0.0516473901424 0.0459913703308 0.0644951804376 0.0433395766961 0.00476396676703 0.0174479971383 0.0262117873028 0.0230803954362 0.0185627883328 0.00806620839217 0.0113814933623 0.00390319674762 0.0428177321428 0.0406615445683 0.0354624532828 0.0102263088249 0.0317971018832 0.0234656694869 0.0177926588758 0.0262276626577 0.0188776295133 0.00499606040594 0.00424685036617 0.0184558479833 0.02090066744 0.0184013332257 0.00554993882929 0.0277916235291 0.0146255258466 0.0766584518429 0.0582536091399 0.0509151171955 0.0506702158494 0.0625265075224 0.0566765790267 0.063296553303 0.0510181236531 0.0599592936046 0.0584198089247 0.0435074153396 0.0509940717271 0.0495661398107 0.0669266401341 0.0789376730305 0.0608280979649 0.0430167703447 0.0455496571569 0.0778333721022 0.054368608181 0.0555188693765 0.0628704456678 0.0616793859568 0.0418722056261 0.0499801315934 0.0381178178645 0.0399704213931 0.0409195428074 0.0612188756554 0.0349680364553 0.0510653686933 0.0315069690942 0.065089713199 0.0323745393338 0.0538917621299 0.0541667478451 0.0685063182149 0.0437802642661 0.0419063387522 0.0452698567868 0.063322234724 0.0532615551019 0.0582536091399 0.0594215850469 0.0661130238288 0.0575059193505 0.0535703049783 0.046311202915 0.0650382074097 0.0485914933238 0.018842155525 0.0240370427448 0.0209528738511 0.0208089534878 0.0111203801188 0.015917150662 0.00453488271341 0.0421322819802 0.0391458330884 0.0331083714381 0.00824403626512 0.0325132311882 0.0224847085725 0.015999516676 0.0277730715816 0.0186363801442 0.00708833477439 0.00642676690132 0.0178233234532 0.0220593051536 0.0178006297585 0.00678573265748 0.0253531080362 0.0132629896024 0.0741187095157 0.0561450172019 0.0491350925935 0.0497514221438 0.060382347286 0.0560370322524 0.0611402268324 0.051356658754 0.059586423134 0.0554899035837 0.04031824984 0.0491508161478 0.0470289167752 0.0645318276086 0.0757421493114 0.057544117742 0.0418388253062 0.0443100927117 0.0770487593095 0.0543425693403 0.0526585588221 0.0601234502371 0.0616298008584 0.039475560154 0.0476974484773 0.0379820027229 0.0371681460661 0.0383173623723 0.0591794106435 0.0360653613059 0.0506856040282 0.0307481929538 0.0627839311984 0.0325910275487 0.055237903563 0.0520576344331 0.0655568890209 0.042566036842 0.0391094232808 0.0423459347432 0.0602723002528 0.0493981873195 0.0561450172019 0.0569656980973 0.0629553672806 0.0540102942581 0.0513493159685 0.0435185067862 0.0620208318713 0.0466647868495 0.0214518595921 0.0250574648372 0.0327035392179 0.0101396500324 0.0173409936704 0.0207342919251 0.0332931627481 0.0438902497615 0.0452760516117 0.0173962021214 0.0155474932413 0.035097178823 0.0167075078514 0.0219356609327 0.0232161362242 0.0132718217012 0.0215035441799 0.0212520758854 0.0323731403418 0.0276797560909 0.0185474301296 0.0436301136913 0.0240030156204 0.0722858985812 0.0511085237891 0.0406650357841 0.0417405611462 0.0549851372416 0.0429543701734 0.0585838550979 0.0363293620395 0.0449605458738 0.0553495140321 0.0426335340052 0.0408304262584 0.0422933209142 0.0585800490211 0.0742997883793 0.058525226517 0.035206446299 0.0404435198283 0.0627381786217 0.038449263747 0.0501920171677 0.058888502058 0.0460241869732 0.0337463818569 0.0458812434451 0.0278046680998 0.0349131438742 0.0389869853015 0.0521722266345 0.0209043609517 0.0359181281747 0.0276204608178 0.056526179998 0.0222941226577 0.041152359144 0.0428797366789 0.0667722352073 0.0380782189511 0.0410414680557 0.0398735425148 0.0580112990116 0.0505852294836 0.0511085237891 0.0532571789631 0.0625496034306 0.0531021220011 0.042888351609 0.0413906647006 0.0644729984699 0.0453457651066 0.0107504885543 0.0445819304752 0.0242663168605 0.0336280858548 0.0265479358211 0.0206001571473 0.0282052030653 0.0325495864256 0.0168933846577 0.0294835383052 0.0291726320507 0.0086711740192 0.0221542204142 0.0179304565478 0.022368606911 0.0297212987281 0.0149126627655 0.0323363820236 0.0229519593576 0.0235384954017 0.0445361789109 0.0198724736323 0.0521573964273 0.0325036248406 0.025391360945 0.0274230805005 0.0366454630316 0.0344942582431 0.0390192491133 0.0331598239249 0.0393605141281 0.0342759827638 0.021260474927 0.0254073365966 0.0234580906639 0.0407813070352 0.054069225305 0.037293221414 0.0193616327469 0.0235535155332 0.0561408138747 0.0363119747552 0.0299684113953 0.0382284111131 0.0425406057591 0.0161350959097 0.0256650379468 0.0189027762255 0.0143744986934 0.0179840539103 0.0351859317755 0.0236434364907 0.0314452790447 0.0139598707264 0.0389431870684 0.016485382228 0.040355084472 0.0299825744277 0.0456481354326 0.0213868007101 0.0199817088813 0.0197684998025 0.0379808456048 0.0313521152981 0.0325036248406 0.0336827412775 0.0416843797906 0.0334784161986 0.0289224474022 0.0207816321212 0.0432746333929 0.0259935830061 0.0406061367838 0.0235468720047 0.0318323205013 0.0219961670771 0.0255808203638 0.0205571609912 0.0226267329863 0.013012280745 0.0369778751619 0.0187789289183 0.00874641080149 0.0197571101094 0.00977736627462 0.0202312308031 0.0257037864622 0.00771970434192 0.0231557657352 0.0133099999073 0.0187324692629 0.0392432332024 0.0115155558073 0.0539655210169 0.0370438046358 0.0333786958054 0.0316409822149 0.0414621149977 0.0426991016561 0.0406517401284 0.0403678402162 0.0480644681966 0.0361424947464 0.0218113942904 0.0334641673727 0.0302365918541 0.0468956611505 0.0577004528593 0.0391757625612 0.0236163775334 0.0242576354959 0.0653230280715 0.0452817250312 0.0345762397733 0.0404850805623 0.0511603238121 0.024886725311 0.0274569982987 0.0236904007661 0.0206352481789 0.0182697542476 0.041544522984 0.0289437440201 0.0406091261712 0.0126309522282 0.0451825068417 0.020490830418 0.0442568377481 0.0395946754396 0.0458654268305 0.0228937718071 0.0192636370802 0.0258703030154 0.0425840964507 0.0360074541747 0.0370438046358 0.0378930233244 0.0444471390747 0.0387351356827 0.0382610925586 0.0256300024801 0.0425154846229 0.0258478343265 0.0236633695184 0.0174990292841 0.0186530571476 0.0611247172591 0.0557656094279 0.0466088592976 0.0282320418304 0.0445464770565 0.0341524834412 0.0362512556468 0.0421364706395 0.0353354631726 0.0231187500774 0.0150626098763 0.0358189243099 0.0298015850374 0.0324439491366 0.0223980477721 0.0244141427415 0.0302418773665 0.0942328467355 0.076594072426 0.0693014297828 0.0689934026371 0.0808708035071 0.0745545238332 0.0811424703773 0.0680895982601 0.0772898278285 0.0757784502044 0.0604959493699 0.069358828284 0.0677179794145 0.0851546659241 0.0961465377952 0.0777040409423 0.0614342341624 0.0635105029946 0.0949910709403 0.0710640834394 0.0733442971825 0.0804630717269 0.0785569975811 0.0599979412685 0.0679000809486 0.0561320615274 0.0579270123928 0.0585056430456 0.0796324819571 0.0514612322157 0.068283919556 0.0494122791964 0.083373938553 0.0501855689957 0.06973964597 0.0718399524308 0.0852557355766 0.0618964253913 0.0591154896454 0.0630863991576 0.0809010142666 0.069276699042 0.076594072426 0.0775503896407 0.0833112406891 0.0743754315619 0.0713482849075 0.0642332894623 0.0813844341388 0.0662752735857 0.00943572253542 0.0115403490897 0.0385513754293 0.041736873589 0.0399182937318 0.0122452242861 0.025058464158 0.0280391379811 0.0165428935252 0.0220426648475 0.0191966494038 0.00476868066193 0.0119136882752 0.0182274158907 0.024378991238 0.0214350053307 0.00987681971674 0.0356319079498 0.0179231346829 0.0755450486118 0.0557883449648 0.0471764532756 0.0467952668235 0.0599617395979 0.0511011457957 0.0617632501287 0.0445144536833 0.0538325742859 0.0579584947727 0.0439095831377 0.0473266714409 0.047305404175 0.0642647315012 0.0781620294368 0.0608980895696 0.0396397814207 0.0431908772896 0.0719383279912 0.0477762451878 0.0541744509705 0.0619776318533 0.0551486398926 0.0392023772277 0.0486708484659 0.0332061436773 0.0385651962783 0.0405238673503 0.0580675371179 0.0281169946403 0.0449308552075 0.0290979509914 0.0622896414172 0.0272386507233 0.0473564201885 0.0503681043652 0.0686223309305 0.0411717919112 0.0420509879955 0.0438795889294 0.0621492769732 0.0535535915584 0.0557883449648 0.0575324110618 0.0656578520536 0.0569417414102 0.0500298962763 0.0450037013757 0.0656341911967 0.0473715492692 0.0144468889514 0.0470085725986 0.0485138320241 0.0451352115307 0.0199583230651 0.0301372822106 0.0318368515531 0.0256571286264 0.0278614622113 0.0258405224944 0.0121251490044 0.0124421067497 0.0257425497103 0.0259848999587 0.0265128839547 0.0147071440313 0.0356921243154 0.0239599037157 0.0844599977072 0.0649098127865 0.0563152793656 0.0552526867175 0.0691043038035 0.0592804769565 0.0705635348052 0.0516880380121 0.0615515055365 0.0669939251635 0.0527850317966 0.0564919445965 0.0566781222463 0.0735436571696 0.0873943668972 0.0699246931044 0.0483722118201 0.0515510652048 0.079719756552 0.0550353446586 0.0634817595673 0.0710367571435 0.0623974106591 0.0485852577555 0.0576004808055 0.0413084590066 0.0479219849808 0.0493882028299 0.0672450344951 0.0348287223508 0.0527462211198 0.037276801135 0.071549415301 0.0351894067592 0.0529630080357 0.0594111357934 0.0773657257414 0.0496448829109 0.0507905960575 0.0532464315334 0.0714643731585 0.0625646663061 0.0649098127865 0.0667401518449 0.0748051995393 0.0661923309263 0.0591726016457 0.0543093310288 0.074196762744 0.0559244370052 0.043526425057 0.0386335179446 0.0321768451862 0.00974615024856 0.0352347635745 0.0201959159339 0.0180776754132 0.0269400303578 0.0177526168011 0.0076128290146 0.00378003726334 0.0176983136317 0.0184021944475 0.015957334363 0.00467338620778 0.0252008449064 0.0123712673884 0.075769333126 0.0581585948517 0.051685391535 0.0510927438975 0.0624824329901 0.058237684953 0.0625760535583 0.0529652916573 0.0618856654756 0.0574514448673 0.0423317456956 0.0517508019275 0.0497149058646 0.0670867680773 0.0782168275888 0.0597219475669 0.0433096265158 0.0451925797267 0.0797528207091 0.0566461202307 0.0551309318768 0.0620350591031 0.0638200944876 0.0424059873805 0.0493045876457 0.0391604065434 0.0398454975082 0.039967724192 0.0615633801452 0.0371201263022 0.0531118088353 0.0313373664803 0.0653089870936 0.0335830893361 0.0556009421638 0.055317918307 0.0671296861942 0.0435900410315 0.0407047621897 0.0451219309698 0.062876852421 0.0526327547566 0.0581585948517 0.0591116735406 0.0652217833265 0.0570769673892 0.0545681643866 0.0459758175248 0.0634453480612 0.0477419996054 0.0302678050916 0.043643410877 0.0344015081898 0.0370220423378 0.041870658286 0.0264826423168 0.0239447061259 0.0294219290852 0.0381639993795 0.046622906961 0.0276160450358 0.0433656713185 0.0358877725536 0.0394027280044 0.0640596529703 0.0352660320463 0.0428200645336 0.0215929365207 0.0157178006612 0.00848045847957 0.0252717757761 0.0193974269851 0.0287059984697 0.018482386953 0.0255997786212 0.0310533244304 0.0267766622161 0.0163561213819 0.0202134806234 0.03120585866 0.04895305148 0.036414957876 0.00526673250197 0.012342309222 0.0430330254803 0.0254800237585 0.0266262246906 0.032239986602 0.0289090953163 0.0180202124552 0.0199181330431 0.00657470209208 0.019948180111 0.0209059906389 0.0237059104211 0.0188029876251 0.0211536929916 0.0148162018356 0.0289733910382 0.0123381921604 0.0242362829274 0.0258181102869 0.0413288399769 0.0100525363381 0.0238857865216 0.0223322555234 0.0341921837249 0.0380927560603 0.0215929365207 0.0255329039423 0.0383043472154 0.0356000313746 0.0249953874006 0.0211460552822 0.0407734262534 0.0193178864072 0.0180029141367 0.0318989553395 0.055546862882 0.0233316606029 0.0289827460324 0.0288090712213 0.0227692137274 0.0384790428766 0.0423072029606 0.0235668927239 0.02923399269 0.0234314824709 0.0353361736228 0.0517306479462 0.0262806947106 0.0449202216804 0.0352666691701 0.0392752718621 0.0317034826189 0.0393329164331 0.0490036652351 0.0325915526781 0.0480657848217 0.0556144972514 0.0314856186317 0.0229568301755 0.0394863344353 0.0351981784458 0.0468732286627 0.0534614036052 0.0352961935447 0.0267660200432 0.0206247795688 0.0716485928619 0.0553558677376 0.0350658679333 0.0348297205048 0.0591015265332 0.0349050611526 0.0239450314084 0.0312292238968 0.0291976883203 0.0191245738332 0.0424715605102 0.0407160306554 0.0507428891398 0.0195019979736 0.0455419926359 0.03094049167 0.0480027022309 0.0486981223658 0.0364405293098 0.0216724201002 0.0185311393183 0.0322264249099 0.0418711782883 0.0415641388928 0.0352666691701 0.0357541599092 0.040359691621 0.0422344237589 0.0468672273606 0.0294846892119 0.0324923805449 0.019122055634 0.028756827183 0.0587035417373 0.0144079071022 0.0301136242831 0.0361947314748 0.0239369814187 0.0355016994114 0.0354103580658 0.0251937444079 0.0240593800606 0.0195703439645 0.0308533101673 0.0370972551577 0.0220259748313 0.0576270076145 0.0487199724465 0.0508215730415 0.0469075647824 0.0527931212734 0.0620087731623 0.046977227863 0.0607033064427 0.0681905343994 0.0417090362137 0.0294084394516 0.0508408090071 0.0450400867986 0.0589677823649 0.0627613332544 0.0433082894199 0.0402642195426 0.0363046359117 0.084449006206 0.0666097386489 0.0449981598795 0.0463289168121 0.0716590868723 0.0429897098207 0.0369198960753 0.0432687121664 0.0364322199569 0.0287560795721 0.0553568599314 0.0500592177291 0.0617693212505 0.0302484927047 0.057817398636 0.0410079662048 0.0620054930234 0.0578523465697 0.0464641684152 0.0365865801942 0.0272374209649 0.0399576916682 0.0513227964118 0.0446516670985 0.0487199724465 0.0480806958126 0.0495453699635 0.0482845333883 0.0560302897599 0.0385013240981 0.0415333749992 0.0339802993211 0.0315416974034 0.0191639032899 0.00845591360235 0.0214341775937 0.0115218935447 0.00798589270321 0.013178045677 0.0100207917484 0.0199483776485 0.012776598265 0.00707708079363 0.0310463483447 0.00779811122226 0.0665120827305 0.0484731432285 0.0421114253952 0.0418216626354 0.0527853739739 0.0493966141428 0.0532439640509 0.0450980317427 0.0534929652418 0.0482563107324 0.0334028573772 0.0421738253769 0.0400650763191 0.0574010770653 0.0690115183659 0.0507807835016 0.033887277172 0.0360916940356 0.0712074753299 0.0488889567075 0.0456112729946 0.052712754383 0.0558191941722 0.0328832629679 0.0398769847778 0.0305626616722 0.03027845702 0.0307126203306 0.051853149134 0.0304315085387 0.0449208583046 0.0225669791691 0.0556104755493 0.0254498384131 0.0488668504899 0.0462046921677 0.0583599392562 0.0343786096194 0.0317076229625 0.0356254179774 0.0534761774586 0.0442401179536 0.0484731432285 0.0494784680372 0.0560638562362 0.0481022882275 0.0453399462887 0.0364100969213 0.0549637988767 0.0386182470618 0.0499647144567 0.0286991839096 0.0337581082348 0.0374427591873 0.0282526339036 0.0355267981937 0.0350362485208 0.0478325170984 0.0425063748067 0.0336958297594 0.0552821351039 0.0385952701325 0.0756759815417 0.0533281139759 0.0401812481707 0.0447968354881 0.0564105044414 0.0391525826608 0.0629279047421 0.0332827070938 0.0388499692511 0.059944716429 0.0494848197832 0.0402828974015 0.0438747097228 0.0580998054312 0.0757410641548 0.0628856838636 0.039846534973 0.047063496141 0.0543336344875 0.0312551117596 0.052807685198 0.0627916823231 0.0390112066642 0.0353058185357 0.0513654740192 0.0323261929775 0.0392211101217 0.0463708307148 0.0519963158873 0.0234224809867 0.0301000678074 0.0372829629985 0.056085164213 0.0288004220361 0.0417416575715 0.0389119081863 0.0720657500645 0.0444323135884 0.0487706488559 0.0429521653792 0.0596874437499 0.052997284605 0.0533281139759 0.055723410021 0.0657462146584 0.0546473494099 0.0397492903292 0.0452841873006 0.0707319601823 0.0522103619845 0.0233361726138 0.027152976334 0.0145101369991 0.0239084572879 0.0231796409398 0.016731423623 0.010101659051 0.00781108168527 0.0186261838058 0.0315054768721 0.011535154356 0.0661583467836 0.0529737197777 0.0515726376237 0.0472342284059 0.0573866872941 0.0602508051558 0.0535888133028 0.0563499220883 0.0655740867856 0.0494619373879 0.0355566546043 0.0516986354459 0.0478914383755 0.0636133913461 0.0714227137745 0.0519652065027 0.040040966538 0.0379888138015 0.0833863881089 0.0624232672218 0.0504889561622 0.0539483262888 0.0682863371673 0.0434492287718 0.0419221148541 0.0393420375143 0.0383919560321 0.0330409365388 0.0586345177546 0.0426935993753 0.0582011555148 0.0271038932712 0.0620596992058 0.0353264927481 0.0565229896235 0.0582751327845 0.0564224406826 0.0375222198637 0.032781658946 0.0431629716317 0.0579468986629 0.0506284295544 0.0529737197777 0.0534601982596 0.057907553496 0.0543043024926 0.0568716124285 0.0423649825393 0.0520169525452 0.0387128090446 0.0188041113809 0.0118952838684 0.0140246174547 0.0212962400935 0.00885470160798 0.025142494638 0.0164130694781 0.0148836892075 0.0382106616301 0.012548123562 0.0595024177002 0.0405144987261 0.0336829979036 0.0339109092342 0.0447786612887 0.0412528753098 0.0460796810618 0.0378073897699 0.0456289595962 0.0415318567716 0.0274434917532 0.0337586538145 0.0320559171837 0.0493065770936 0.0620181140557 0.044443934423 0.0259210454652 0.0290199212383 0.0631931226882 0.0415500215307 0.0381432011516 0.0457001329437 0.0482419653282 0.0247624956339 0.0326912275918 0.0231122641396 0.0226732232029 0.0241582823353 0.0435758556102 0.024717419625 0.037259551901 0.0161507334564 0.0474557435456 0.0186437176733 0.0428749718429 0.0381563349544 0.0522572394535 0.0270598018866 0.0256825354187 0.028101885414 0.0461630275758 0.0383904744963 0.0405144987261 0.041758354552 0.0492909187408 0.0413606774947 0.0372601462253 0.0289047461458 0.049343864353 0.0319066136889 0.0140884736377 0.0220949123029 0.0294588701998 0.0141915109934 0.0236402055251 0.0202701530019 0.0222961106051 0.0508328337578 0.0209048958304 0.0623063919994 0.0435137888223 0.0379179155361 0.0310595169631 0.0477275300573 0.0401401417297 0.0476510837343 0.0326069267264 0.0442002914704 0.048031576146 0.0373260310255 0.0383688546713 0.0396828967833 0.0540671732006 0.0691161945626 0.052697233895 0.0256698443155 0.0271866198766 0.063343063446 0.0401717345523 0.0456835007162 0.0508390224232 0.0457136947621 0.0340506342939 0.0366331778515 0.0185292352758 0.0332180639746 0.031402826069 0.0470047522912 0.0178796862446 0.0373117201537 0.0146755143259 0.0519441438124 0.0134308953908 0.0305516636587 0.0455547559496 0.0574314455133 0.025677142017 0.0334820400224 0.038185075996 0.053981408775 0.0519396327628 0.0435137888223 0.0464090712113 0.0566586435278 0.0526391071675 0.0449104072539 0.0375180555166 0.0549667946861 0.0334290223836 0.0161979126548 0.0212360948185 0.00313302296585 0.0148689509772 0.00704616989296 0.0135603719097 0.0384674432624 0.00748847746612 0.0612097252086 0.0442840466006 0.040253243803 0.0360606427738 0.0487530949526 0.0470778914902 0.0473392259982 0.0422963534873 0.051949068182 0.0444929580638 0.0309270188278 0.040473799777 0.0386243677637 0.0547837147046 0.066413370926 0.0479885282668 0.0287291459439 0.0288822795748 0.0702829288423 0.0483322868723 0.0432975336344 0.0484818516534 0.0543652596123 0.0329738645592 0.0348525988976 0.0258401695463 0.029652318833 0.0266204166929 0.0488217477328 0.0282325775461 0.0443695558327 0.0152728890442 0.0529346931915 0.0211960241124 0.0432358961175 0.0470042201915 0.0535566647123 0.027654993588 0.0276975587847 0.0349524016669 0.051450438001 0.0457471816551 0.0442840466006 0.0457673454519 0.0531390087147 0.0482613634351 0.0459084957324 0.0344895940573 0.0501063288839 0.0320407745768 0.00918896575074 0.0152928703979 0.0213827580606 0.0175694634428 0.00562421953864 0.0321021314886 0.0136468651148 0.0732793088732 0.0542749513511 0.0465599433205 0.0461499050441 0.0585282720374 0.0518386762248 0.0596896497046 0.0460357977086 0.0551020502351 0.055347044484 0.0408456006547 0.0466734650365 0.0457685420886 0.0629790052567 0.0758414718388 0.0580725583953 0.0386295601195 0.0415657641172 0.0731303485703 0.0495368191967 0.0520897347287 0.0595893389246 0.0567976430466 0.0379647331116 0.046441942626 0.0333063038869 0.036500108881 0.0377849182347 0.0570420811998 0.030012797008 0.0462545479693 0.027461418846 0.0610786707463 0.0275181743774 0.0490679602747 0.0500650525841 0.0657243645716 0.0396831631678 0.0390548397455 0.0418579268325 0.0600259300869 0.0509169565607 0.0542749513511 0.0557017357111 0.0631094362549 0.0546387182457 0.049518990394 0.0428637051261 0.0625010516077 0.0450876329039 0.0212339329669 0.0205215987455 0.0193863377449 0.00780222226267 0.0244911121423 0.0160871853235 0.0794295187203 0.0616053956912 0.0547043851777 0.0543405312398 0.065903416382 0.0608093966938 0.066232208671 0.0551996275352 0.0641635601661 0.0610614868167 0.0459223913813 0.0547672174246 0.0529488520021 0.0703578675954 0.081658744309 0.063265127216 0.046621662518 0.0487339053303 0.0819943915105 0.0586031447424 0.058550915637 0.0656481691263 0.065908662167 0.0454276311919 0.0529363521846 0.0420305180364 0.0431442291715 0.0436296366503 0.0648039964872 0.0390809908814 0.0552779379516 0.0347675148936 0.0685677387884 0.0363124697872 0.0577513229251 0.0579440183076 0.0708163109663 0.0470812852612 0.0443909494169 0.0483964045026 0.0662606542355 0.055662814312 0.0616053956912 0.0625961650479 0.0687432253304 0.0602433308395 0.0572909174214 0.0493875071422 0.0671416550914 0.051430941132 0.0176886906331 0.00927892520724 0.0135248076976 0.0386449359276 0.00800171044068 0.0600153322548 0.0424684845259 0.0378068586886 0.0345103555887 0.0469162632965 0.044830307306 0.0461600332306 0.040366688607 0.0496446456688 0.0429723253644 0.0292480429291 0.0380026884561 0.0362133523808 0.0526530252718 0.0646507321938 0.046401756397 0.0269368048123 0.0278152961494 0.0678508831968 0.0459682894465 0.0412431425852 0.0469952418705 0.0521326959742 0.0302237793371 0.0333696851677 0.0240543524356 0.0271406206916 0.0250182744726 0.0466959485275 0.0265020349694 0.0419000008026 0.0139599809083 0.0507909794153 0.0193885955935 0.0423195771966 0.0441873849773 0.0525088605043 0.0263754966172 0.0262639235251 0.0325133748369 0.0494223408625 0.0434143928247 0.0424684845259 0.0439360734198 0.0514706200386 0.0459476778298 0.0431249041549 0.0322850478478 0.0492075262735 0.0310084454102 0.0100495790147 0.0163467110294 0.0344303173981 0.0138069903297 0.0725561849391 0.0577945355153 0.0549392051976 0.0494983401957 0.0622636335656 0.0613281176125 0.0591564733558 0.055493289997 0.0660163516694 0.0564238014063 0.0428263617367 0.0551692935595 0.0528843814502 0.0687090576791 0.0786138592889 0.0595588928727 0.0427108568825 0.041401774003 0.084635719422 0.0620469795992 0.0566375025559 0.0605458826833 0.0680826326606 0.0475957607451 0.0475121922821 0.0395532575369 0.0436869591597 0.0392259763169 0.0629613789201 0.0403138291944 0.0584854937624 0.0290090498587 0.0669476358426 0.0346983343078 0.0538333844175 0.0617823249939 0.0639146571596 0.0406990904981 0.0395983575205 0.0488304003282 0.0645174644364 0.0580105192199 0.0577945355153 0.0590354282227 0.0651460923012 0.0612394393036 0.0606595939265 0.0481508588685 0.059835663962 0.0439114917806 0.0129734005291 0.03341718436 0.00522839839082 0.0644629130549 0.0490781090479 0.0460393397147 0.042060104717 0.0535473784021 0.0537667869164 0.051142158888 0.0493088817338 0.0587609555936 0.047452874403 0.0333145084294 0.0462018313471 0.0433659094036 0.0595851004773 0.0694769331379 0.0504124171924 0.034671039268 0.0338977868767 0.0768809318577 0.0551957563061 0.0471765984426 0.0517762151648 0.0612752535588 0.0380650031758 0.0388085089346 0.0327052071067 0.0338902731124 0.0299610722521 0.054082097785 0.0351427472718 0.0511360689963 0.0213341577068 0.0578680354373 0.0282202368919 0.0499673113862 0.0525352197686 0.0557073834724 0.0329893658613 0.0303826410135 0.0390336792828 0.055051644837 0.0480885204528 0.0490781090479 0.0500556489468 0.0560168186449 0.0513666437929 0.0513150683738 0.038531174408 0.0517653307967 0.0358776775011 0.0293027668161 0.00949293329263 0.0725916210374 0.0547194436592 0.0483620571735 0.0469816749901 0.0590799074957 0.0545464429945 0.0590974756513 0.0490311503881 0.0583273766433 0.0546626805606 0.0398779297173 0.0484777367952 0.0468025950712 0.0639880374892 0.0757199402524 0.0573199482995 0.0393352512936 0.0411552751652 0.0764775433632 0.0532445829203 0.052349146311 0.0590579753428 0.0602574471507 0.039602885829 0.0459481099263 0.0349175810863 0.037150941259 0.03692168916 0.0582151479375 0.0331340211018 0.0497159727507 0.0271028830882 0.0621502493207 0.0292960379946 0.0512053897158 0.0526539904442 0.0643939446385 0.0395489605444 0.0378591872997 0.0425337328718 0.0602822057384 0.0511975943751 0.0547194436592 0.0559279154126 0.0626694858218 0.0551041885347 0.0518894661445 0.0431638732605 0.0608387750183 0.0441361499894 0.0310183369902 0.0871660444405 0.0733823196322 0.0689340787316 0.0705597518969 0.0774366556509 0.07861295118 0.076322371958 0.075585060054 0.0826717778434 0.0681219966699 0.0528565530205 0.0687776982924 0.0640973745197 0.0810751053908 0.0869333145725 0.0682988428223 0.0623918658396 0.0630905228705 0.0989058396189 0.0780506774957 0.0672923155095 0.0734447484338 0.0852286487915 0.0580045491026 0.0635565107431 0.0611199283903 0.0538632888957 0.0534691027903 0.0772908944799 0.0610905623861 0.073927736475 0.0514266311669 0.0797776498939 0.0563654161709 0.0797411714169 0.0709112220147 0.075991910188 0.061929266059 0.0528794637266 0.0580761513446 0.0735774043698 0.0591743277341 0.0733823196322 0.0727567267189 0.0745131906994 0.065799040936 0.0698581828462 0.0592195179672 0.0715468662712 0.0627789092372 0.0648080233561 0.0484745494015 0.0442686485072 0.0419227062634 0.0529072690322 0.05220865754 0.0515658331236 0.0480206916873 0.056947174509 0.0470499747083 0.0323100366631 0.0443750009709 0.0415450201967 0.0583606796016 0.0686531977281 0.0497286282257 0.0341488382347 0.0345013563823 0.0748626932863 0.0530450676854 0.0459172064129 0.0515384326883 0.0594738121093 0.0355453778886 0.038659716641 0.0319368788596 0.0317165879728 0.0293867131129 0.0529269518286 0.0336846365247 0.0489212862277 0.021422361173 0.0566364033836 0.0272163294484 0.0501093106191 0.0498104699827 0.056059086848 0.0332712049783 0.0299486671345 0.0369730112559 0.0537844555125 0.0455932312627 0.0484745494015 0.0493458255088 0.0553162838774 0.0492693292197 0.0486803630647 0.0369835817896 0.0522579331221 0.0364683447074 0.022730708667 0.036743783492 0.0359076620395 0.0204805867516 0.0463990865864 0.0147686048767 0.0549817963395 0.0525592871928 0.0196223614467 0.0349129145261 0.0366443799136 0.0327544499352 0.0228629827418 0.017026958725 0.0209966358066 0.0388626349727 0.035298115438 0.0567980979705 0.0582058906835 0.0248891314561 0.0145753449597 0.0570907866116 0.0413833987479 0.0269969183741 0.0492024913366 0.039440986454 0.0359176091053 0.0265520996515 0.0615044403055 0.0540461620767 0.0485288037826 0.0239710979892 0.0540724881457 0.0603640513646 0.0428978851084 0.0134027017241 0.0368991356344 0.0354211161184 0.0355257446359 0.0210753154172 0.0377446845484 0.022730708667 0.0201673641607 0.0164841811953 0.0313109932769 0.041027522952 0.0326196770584 0.0184701671719 0.0295244972329 0.0144112105736 0.0158826147242 0.00448936000965 0.0256749628733 0.0117360219999 0.0336843126425 0.0324372887253 0.0146677582401 0.0224513170291 0.0144322007261 0.0126899376427 0.0122323800999 0.0277700509569 0.0197834641463 0.0180901487399 0.0184584697146 0.0418096588931 0.0367452397055 0.0108947935426 0.0131136689852 0.0371795334759 0.0204476192567 0.0118634455346 0.0280193213775 0.0206906520081 0.0208413846069 0.00805992628737 0.0397245591306 0.032104964073 0.0301985227624 0.0107001908888 0.0330988349255 0.0415195660786 0.0230606485279 0.0236410923325 0.0187109927435 0.0222023475244 0.0175662614613 0.0151089228932 0.0279798538754 0.0 0.00504787707871 0.0188703006517 0.0219245561497 0.0212095981034 0.0149942492526 0.0252687339312 0.016225955903 0.014982874816 0.0165713440517 0.0153992347708 0.0258128425881 0.0233194300829 0.0216404069707 0.0254243329386 0.0254506863873 0.000788850240312 0.00972288497974 0.0186083951584 0.0381473811219 0.0294157845108 0.0147578085456 0.0211609731312 0.0342335739858 0.0241609615185 0.0173732430671 0.0259324451415 0.0264222187829 0.0114741578552 0.0193298399608 0.0206022299244 0.0166568971164 0.0229188388297 0.0118905814083 0.0295550989457 0.0189848877848 0.0268759492012 0.0164070102936 0.0252554335694 0.0350869023834 0.0115237799469 0.0366507700029 0.0195520445986 0.0253794363549 0.0152320827299 0.0231766024476 0.028592244401 0.0144112105736 0.0173074040909 0.0296651493536 0.0243710502336 0.0103105434217 0.0155263675354 0.0374184940592 0.0231136537684 0.0190194510466 0.0188364876977 0.0221985054079 0.0214172478398 0.0257446692843 0.0273354856863 0.0275203255805 0.0156705691302 0.0195445751869 0.0258686776409 0.0435616108671 0.0331590462603 0.00822908256455 0.0111578554098 0.0408350390305 0.0282065307084 0.0237933699021 0.0272090918986 0.029489839001 0.0211107451245 0.0171676820259 0.0146997004559 0.0226821224304 0.022183640748 0.0185118691459 0.0264356522916 0.0241910879615 0.0208215741795 0.0237366538469 0.0205889692169 0.0263075431581 0.0263185412116 0.036342074994 0.0103523538982 0.0247947799759 0.0232770954007 0.0303205821473 0.0382875192266 0.0158826147242 0.0205185838207 0.033936913628 0.0342591359421 0.0252819239657 0.0211775313941 0.0366164619483 0.0168683582115 0.0265527528137 0.0122930639405 0.0356399190513 0.0329283261088 0.0161748547301 0.0260907486468 0.0165355786656 0.0155013603686 0.00871939649167 0.0250375042482 0.0205784254424 0.0221880849182 0.0226228234946 0.0402265547715 0.0380348851944 0.0126404460089 0.0132955588731 0.037673161045 0.023685130291 0.0159536871846 0.0317445670434 0.0244407629294 0.0250650773838 0.00665080674841 0.0431354773139 0.0336884150562 0.0346100496205 0.00755310734613 0.037025240188 0.043753914844 0.0237910793513 0.023601769275 0.0229675330633 0.0262374537149 0.0207694947791 0.0142462256068 0.0294419901123 0.00448936000965 0.00600179744469 0.0181950411243 0.0225874743282 0.0220819669003 0.0185630904749 0.0261141108628 0.0202600233978 0.0359510556228 0.0122181959079 0.00715556619596 0.0393252590156 0.0398773260728 0.015944130912 0.025117898688 0.0279596047113 0.0495072405762 0.0438343802676 0.0223301740975 0.0291958566192 0.0238057945893 0.0121408308676 0.0320439133203 0.0386704894061 0.0116681269474 0.0246115626131 0.0320654848123 0.02212022544 0.0306066666749 0.0360151785428 0.0212925403107 0.0265520435189 0.0096564754612 0.0336990775918 0.0258669646258 0.0271355620099 0.0255831511409 0.0192288539502 0.0492946436623 0.0275425360484 0.0388140188539 0.0303333899385 0.0366778253264 0.0435686919256 0.0256749628733 0.0298872725089 0.043124071299 0.0391147976424 0.020010154166 0.0305524053833 0.0506217525372 0.0340993001213 0.0427032875962 0.0428000340065 0.0127191401929 0.0244302586973 0.0258873530415 0.0224665589838 0.0196473040025 0.0259734946343 0.0180766430627 0.0245847379502 0.0205530555257 0.0515531804399 0.047284161103 0.0168752313645 0.0093274994833 0.0472940785154 0.0299859804517 0.013568861355 0.0349261393238 0.0278394065303 0.0233159306106 0.018729490125 0.0474594716238 0.0428441003445 0.0340535271365 0.0192623858156 0.0396592952666 0.0474504224755 0.0346317498496 0.0155232846365 0.0221965807346 0.023519540409 0.0251795457747 0.0186807942123 0.0328466600284 0.0117360219999 0.0111390258469 0.0171063233969 0.027248122956 0.0326787410905 0.0216187767654 0.0174724607588 0.0153838329478 0.013043913361 0.0463610530412 0.0437380208469 0.0239671160531 0.0322222363953 0.0387763357271 0.0597848214293 0.0513617728094 0.0233595904168 0.0301944308891 0.0320013483014 0.010613155035 0.0399275999718 0.0464560281456 0.0134988892532 0.0290788786594 0.0367911394695 0.0175534572334 0.0343523037935 0.0386269072909 0.0314326304477 0.0171757591643 0.0105270444576 0.0306360966619 0.0365489530298 0.0211256372062 0.0155275027866 0.0282406353658 0.0565789653078 0.0282583663219 0.0416523037157 0.0359394330813 0.0459783038684 0.0506220203623 0.0336843126425 0.0381881672141 0.0517807652888 0.0474506190651 0.0289707556044 0.0360504295798 0.0570578425896 0.0371145976129 0.0460576692202 0.0464080290911 0.0220712004031 0.0312574889523 0.0332251791601 0.054841891687 0.0503036463483 0.028999517533 0.0360798173432 0.0193249539307 0.00826557354917 0.0383933246971 0.0453200679143 0.00495350657916 0.0300295268949 0.0391229254289 0.0271682846269 0.036533385441 0.0426841669003 0.0272844902637 0.0287077471132 0.00927183865749 0.0394174170837 0.0313084618098 0.0314758507766 0.0267668687054 0.022518491913 0.05600898249 0.0343402195218 0.045509854552 0.0363226467926 0.0425348706975 0.0488168350062 0.0324372887253 0.0364696219213 0.0493441352532 0.0445105561012 0.0238995483109 0.0369592605884 0.0574925847452 0.0411978099793 0.0155890245102 0.0251635522565 0.0177733966699 0.020295344741 0.0224627747939 0.00622985531203 0.0261058920982 0.0236064364637 0.0550881017687 0.0493624804622 0.00949195374034 0.00547982782741 0.0508785947136 0.0251610996387 0.0119430843146 0.0365988456521 0.0213251727582 0.0179868212343 0.0207990673061 0.0485594746656 0.044214664825 0.0334335462828 0.0201922684831 0.0401754502972 0.0535325351547 0.0317074448278 0.0123261383208 0.0244676333996 0.0171629247157 0.0177983318365 0.0115555789303 0.021116940732 0.0146677582401 0.0105358367361 0.00915254671069 0.0167177891777 0.0294167459865 0.0147897849251 0.0123457776793 0.0164773765795 0.0252103273152 0.0171988861049 0.0305685435552 0.0365843208441 0.0175183238357 0.0218496748957 0.0205878945245 0.0593121863314 0.0470793832619 0.0159816216856 0.020744505382 0.0508661739809 0.018760262956 0.0128710885205 0.0300992820345 0.0117566522917 0.00679248320348 0.0280357325772 0.040552704264 0.0415859654681 0.0232256372519 0.0295874345059 0.0314690999231 0.0507965688951 0.0314591977122 0.0256867489287 0.0204093797109 0.00501434925534 0.0122277574576 0.0226603353781 0.0187158051887 0.0224513170291 0.020695499337 0.0231928239165 0.0198154038419 0.0292814261752 0.0103125125409 0.0227651885673 0.0153489133169 0.00921470910295 0.0183064776985 0.0377227049932 0.0290274791546 0.0152667053733 0.0215933253328 0.0343731361054 0.0245700487863 0.0169471424987 0.0257260078813 0.0268671065769 0.0111334970897 0.0193681705086 0.0211865368155 0.0163734248392 0.0228777878657 0.0117963493804 0.030068869871 0.0193762721713 0.027229886316 0.0161367068103 0.025740037188 0.0358416431275 0.0109673463454 0.0364509080414 0.0199965660095 0.0252914478428 0.014773865842 0.0226846344514 0.0279231861054 0.0144322007261 0.0171240275822 0.0292730788831 0.0236979129564 0.00966001570023 0.0151974508598 0.0372251985508 0.023311727536 0.0176051360114 0.032564167101 0.0208965631668 0.0168197313548 0.0207764664023 0.0423976172234 0.033280884693 0.00922898632458 0.0197200489933 0.0360385863631 0.00893473638299 0.014132357355 0.0250685285695 0.0103719683171 0.0167517993882 0.0135716425012 0.0350532380107 0.027879799306 0.0265658749664 0.015993421345 0.0286771298489 0.0428550440411 0.0151980773887 0.0298292088786 0.0196261415194 0.0184891599574 0.00655621912452 0.016322105066 0.0195353045374 0.0126899376427 0.0127701290018 0.0223217848231 0.0157154460048 0.0129568955631 0.00690277207156 0.0299954575682 0.0193817024498 0.0216771607077 0.0225658582257 0.0286138040807 0.0305011727447 0.0368382394098 0.0388439321615 0.0148355509953 0.0174423915339 0.0380129595202 0.0258082909988 0.0229842208153 0.0373470514825 0.0277476701907 0.0308564780706 0.00775830938446 0.0476084837369 0.0347450599297 0.0411478727988 0.00231111578025 0.0424821924039 0.0488975441679 0.0213563138315 0.0271270574956 0.0304501786922 0.0318897260431 0.0231190177025 0.0126379860304 0.0283589858381 0.0122323800999 0.0114136835682 0.0186791916183 0.0206672138954 0.0199079464104 0.0222007080182 0.0303110417953 0.0280827256688 0.0197052367937 0.0449418080277 0.0438182248518 0.0562345701188 0.0602081091792 0.0243728506009 0.019370550695 0.0596171194363 0.0411146925258 0.0330451422406 0.0550957050319 0.039836592047 0.0399228025142 0.0284759591968 0.0663825437409 0.0558252777705 0.0548034680532 0.0236830779253 0.0595333433641 0.0686676458986 0.0397842689041 0.0197958382609 0.0446061287137 0.0392287195509 0.0347118369491 0.0164026290561 0.0305214779366 0.0277700509569 0.0235155079857 0.0135888018548 0.0240589544851 0.0380160024194 0.0333951182547 0.0242014401746 0.0377621051763 0.0314790157987 0.029423203836 0.0582327389784 0.0535477924002 0.0121411421616 0.00926081168701 0.0551978876015 0.028043323191 0.0176337854836 0.041735927557 0.023998617859 0.0217227463265 0.0246395690903 0.0532994548372 0.0483234850835 0.0380931811444 0.0229416680066 0.0449797181572 0.059280772983 0.0338814552602 0.0120128740103 0.0301325195322 0.0203844916972 0.0200232099081 0.0110576598728 0.0182627991321 0.0197834641463 0.0150522414851 0.006556855609 0.0144048387324 0.0315801243453 0.0179150623078 0.0119668288225 0.022167313587 0.00841936493113 0.0453128226707 0.029523676844 0.0223084484022 0.0276272783534 0.0327625901905 0.0158536182647 0.0147737513682 0.0106741877313 0.0162753926733 0.0159900539559 0.0215767754577 0.023165172617 0.0247814128516 0.0137331692872 0.0264980088885 0.0153005942866 0.0291348842471 0.0254286093168 0.0364522562324 0.00599431682769 0.0189173783639 0.0184393411187 0.0301213577678 0.0340540018144 0.0180901487399 0.0215314467503 0.0337154513722 0.0315978293722 0.024169684578 0.0166816873996 0.0356875981931 0.0143747226708 0.0514837021891 0.0372994668391 0.0230531280792 0.0250236859971 0.0398047566678 0.021934356719 0.0118568359564 0.016712157857 0.0198275637831 0.0143476538676 0.0244078681037 0.0292462046489 0.0328036706334 0.014376575397 0.0286902071785 0.0204063466033 0.0328369650405 0.0322219054124 0.0320787692664 0.00266433410273 0.0165423262191 0.0215926529236 0.03047036768 0.0358252300263 0.0184584697146 0.0213100670766 0.032046547675 0.0333108429689 0.0306570115485 0.018573514939 0.030845411182 0.00824349209933 0.0254197321026 0.047324533609 0.0530498030585 0.0193482837408 0.0436475526218 0.0516271254066 0.0457976820229 0.0502335440929 0.0569904536613 0.0344485460271 0.0478896186827 0.0272539439051 0.0572698534085 0.0358919394369 0.050424643016 0.0434868500285 0.0309009333379 0.0632686594285 0.0501928388473 0.0594250482186 0.0482632028717 0.0482478127894 0.0570074387196 0.0418096588931 0.0448198518438 0.0553205633247 0.0514202209421 0.0325934353049 0.0491031454545 0.0660730408504 0.0548573842682 0.041501827818 0.0494594306994 0.00785967991178 0.0297803384368 0.0414124759052 0.0250716250932 0.0363814861914 0.0430927179021 0.0324719682551 0.0232534554573 0.00557282966442 0.0375065288364 0.0368003051347 0.0279998159154 0.0252506405295 0.0244125277036 0.0601405054627 0.0351799654171 0.0460355764213 0.0372310571496 0.0466115069454 0.0500875517655 0.0367452397055 0.0406580228721 0.0534802351733 0.0469702184582 0.0259083864948 0.0383091416944 0.0609916135179 0.0430915691708 0.0113258711014 0.0432937210726 0.0175511998728 0.0124157338497 0.03212454916 0.0158561794962 0.0176896387613 0.0143093876737 0.043181186929 0.0362893217044 0.0315177649353 0.0141883840024 0.0359022033885 0.0492746305333 0.0223018781484 0.0211045193185 0.0229404408443 0.0181288833884 0.0110415500412 0.00837364985146 0.0172572597582 0.0108947935426 0.00733209366776 0.0131427681285 0.0115359384743 0.0199994152339 0.0092535804352 0.0217603350968 0.0184515208125 0.0501255669434 0.0279388443296 0.0142651752221 0.0382528821443 0.0251771073359 0.0222405400165 0.018907996804 0.050407441851 0.0445874581383 0.03644061978 0.0176520694999 0.0424414179548 0.05338489352 0.0323923780445 0.0108812710336 0.0260958051981 0.0217902651406 0.0213471493056 0.0109405604951 0.0249968133743 0.0131136689852 0.00893536297262 0.00802513881767 0.0192978256402 0.0302167707341 0.018365059649 0.0130926236805 0.0184689475202 0.0343186729394 0.0436186327673 0.0296745080858 0.0408423177906 0.0469164023153 0.0321355072481 0.0293927572919 0.0115197032526 0.0423686398976 0.036129628995 0.0335484782832 0.0259949361948 0.0269704035297 0.0607621523939 0.0380404513983 0.049798653181 0.0409231346593 0.0474755392541 0.0535032395417 0.0371795334759 0.0413092694389 0.054257484038 0.0493594143559 0.0284802931744 0.0415978746211 0.0622229836323 0.0453773607077 0.0190484002067 0.0207749590973 0.00729318592678 0.0172314224853 0.0208211767145 0.0287420582694 0.0242194382748 0.0225231335589 0.0240241380028 0.0229680498334 0.0402905798782 0.0150429891355 0.0374592753112 0.0199314954492 0.0195088829088 0.00836639606005 0.0247669201274 0.0222507048525 0.0204476192567 0.021382719754 0.0305241597933 0.0212891172726 0.0134776031268 0.0107610654817 0.0367912026131 0.0227467739819 0.0252412358335 0.0153803531506 0.0101093922522 0.0191555088988 0.0375470144869 0.0363071087647 0.0222024666973 0.021689493319 0.0288400411309 0.0426974513075 0.0286740878663 0.0220177373135 0.0125507954062 0.0111145943691 0.0144555489867 0.0195277194831 0.0258649050915 0.0118634455346 0.0120062047089 0.0205457113931 0.0226933950128 0.0265812358015 0.010518368073 0.0210005232169 0.00605049088221 0.0227443548737 0.0240543202468 0.0298445720969 0.0127076622649 0.0212764877573 0.0130936058244 0.0350960563748 0.00616350137707 0.0220053566886 0.029182207808 0.0470447673611 0.0143549158293 0.0270200109504 0.0261798064669 0.0399469917707 0.0417589617676 0.0280193213775 0.0317524179632 0.0441157592928 0.0402109444472 0.0286518076954 0.0255338040061 0.0460635600013 0.0240522825965 0.0109052701432 0.0235077295524 0.0318665250498 0.0308330797717 0.0201941994246 0.026209699134 0.024272445381 0.0437154276666 0.0215651164399 0.0334110813047 0.0182660118628 0.0127706309369 0.00547043300996 0.0236736081076 0.0193715054877 0.0206906520081 0.0206326054685 0.0278093020752 0.0196854448654 0.0196839348285 0.00687772387853 0.0318861086105 0.0187829482374 0.026787715043 0.0349795665731 0.0377560197701 0.0169430774361 0.0294826475268 0.02554791035 0.044564230482 0.0307872765104 0.0282156909894 0.0140157312571 0.00304825224888 0.013131876158 0.0254598849649 0.0242359050423 0.0208413846069 0.0206178921833 0.0265921223132 0.024263714379 0.0287692622655 0.0105411880456 0.0255889461915 0.0107722057824 0.040112260752 0.0281920691871 0.0346799127455 0.00547299312148 0.0351698801996 0.0413927767406 0.0178561632488 0.0295704831304 0.0240442618733 0.028433519767 0.0198981190422 0.0164655446546 0.0293872770207 0.00805992628737 0.0103936152637 0.0224915115769 0.0226675446032 0.0164308167554 0.0187959028502 0.0318543120786 0.0236941138036 0.0214366809685 0.02158460699 0.0453234846239 0.0100348820296 0.0205571695519 0.0349449705288 0.0594353543617 0.0269232145172 0.0378563722335 0.0357646095878 0.0508614713562 0.0504482642845 0.0397245591306 0.0434432071263 0.055747628056 0.0497669350857 0.035138779746 0.0361053959246 0.0583555321158 0.0365217793435 0.0331608473129 0.0326376645366 0.0244797200401 0.0260351758605 0.0198255233977 0.055257342683 0.030607412773 0.0406724640229 0.0316826493706 0.0417015250124 0.044714144963 0.032104964073 0.0358439539437 0.0485265392209 0.0417397589401 0.0211023329973 0.0328093106913 0.055937896155 0.0382216948189 0.039178382683 0.0116462448464 0.0328449320294 0.0356452166019 0.0431040247851 0.0127555033307 0.0192172916283 0.0248029464024 0.0398126039657 0.0387676066679 0.0301985227624 0.0325088097853 0.0421224110282 0.0389248491362 0.0344395222458 0.0235149407659 0.0407276725105 0.0194230824244 0.0402634745964 0.0467109968717 0.0198309327046 0.0277799641062 0.0285372209353 0.0306922978338 0.0217975015009 0.0133715864954 0.0282760151712 0.0107001908888 0.0106288988311 0.0196086000788 0.0208164998238 0.0183683750797 0.0208677693371 0.0306825719595 0.0267116346537 0.0244223076771 0.0326854873354 0.0507787766717 0.0180800830921 0.0283453513368 0.0285692206738 0.0439964220236 0.0435330479692 0.0330988349255 0.0364104267178 0.047999984958 0.0430530022579 0.0322085934623 0.0282999450485 0.049271623839 0.0273166054796 0.0425206430225 0.0621747559979 0.0316259435967 0.0474306721161 0.0460270405303 0.055722110458 0.0616108763567 0.0415195660786 0.0463779068274 0.0600405303926 0.058487923658 0.0428330727641 0.0451145046508 0.0622811849491 0.040848815271 0.0429266615699 0.0304603009077 0.0328680598837 0.0193092495687 0.0257358921646 0.0276472175985 0.0230606485279 0.0242829569209 0.0335562675322 0.0240112096757 0.0023427193432 0.0215758461563 0.0440130376785 0.0333268924093 0.0336594835101 0.0267459293135 0.0300685155468 0.0190916149163 0.0302108808069 0.0236410923325 0.0197983357166 0.0116623663768 0.025981149508 0.0407083432987 0.0270506742353 0.00533601555818 0.0243947872794 0.0165388277645 0.0203993556991 0.0306449706863 0.0351570407904 0.0187109927435 0.0216936002715 0.0328125704384 0.0328483395342 0.0289711567156 0.0177290185247 0.0324417191457 0.00990245486371 0.0144435719572 0.0253854499058 0.0235470613048 0.0222023475244 0.0213735089495 0.0257216217897 0.0239916366178 0.0307770932602 0.0118559544137 0.0237036848381 0.0117425310805 0.0184555467145 0.0157796168272 0.0175662614613 0.0166625646078 0.0231975140243 0.0146299258577 0.0171445810537 0.00397360286698 0.029169512627 0.0192796522734 0.0184254094366 0.0151089228932 0.0104342480435 0.00810789324855 0.0108902089943 0.0236280097789 0.017384613998 0.0213096795304 0.0252727380791 0.0279798538754 0.0244605767611 0.021500625639 0.00796997392989 0.0256665045822 0.0176058458155 0.0292950203769 0.0310442784549 0.00504787707871 0.0188703006517 0.0219245561497 0.0212095981034 0.0149942492526 0.0252687339312 0.016225955903 0.0138989004364 0.0180767927652 0.0222193332382 0.0141002986592 0.0215877207325 0.0172352062998 0.0156454524782 0.031383424491 0.0212505739868 0.0142951058056 0.0253431933269 0.0219030212003 0.0156518397703 0.0263438271284 0.0285058962486 0.0193626993311 0.0417261059969 0.0313743437253 0.0260801912789 0.0155074292908 0.0227467684355 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-jensenshannon-ml.txt b/voice_bridge/scipy/spatial/tests/data/pdist-jensenshannon-ml.txt new file mode 100644 index 0000000000000000000000000000000000000000..8ed5b9653f4ed88e146d2d5dc5b032537b426f8b --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-jensenshannon-ml.txt @@ -0,0 +1 @@ +0.320369972991 0.338972466 0.308199372323 0.3452431902 0.310024768313 0.357115225615 0.311131096357 0.357391534414 0.329718053755 0.347365921475 0.335272625287 0.336451560653 0.33015370606 0.369628769749 0.344499490029 0.321622508707 0.345377707016 0.321007207534 0.350728979121 0.32809430086 0.30207071308 0.291663252492 0.30760470102 0.315976639534 0.308132467187 0.313014586878 0.310463895925 0.321091616502 0.290044394125 0.322213459935 0.315509196522 0.3331114403 0.281071919202 0.320854431887 0.332190658438 0.299342730178 0.313528775154 0.310049073937 0.288821516545 0.307662081954 0.328387688508 0.317185603454 0.332046170365 0.291912213887 0.37870970117 0.336080073379 0.304593343921 0.330138983604 0.355071759299 0.311946140607 0.302025400768 0.330940761586 0.351140062502 0.354772884287 0.272605322053 0.327957349848 0.28871110366 0.320821172951 0.340976919806 0.30757488831 0.320975346884 0.252776262329 0.314549731907 0.326876483 0.337684418756 0.296520013735 0.31493077245 0.327721982167 0.325802862624 0.341908184107 0.300481749419 0.312499767894 0.301061762121 0.27665157989 0.3082566692 0.287466396145 0.288313694552 0.296629698731 0.283556095025 0.322489360684 0.280765581604 0.297958166613 0.313189657041 0.303470399659 0.348652898212 0.331594734387 0.299446687464 0.339047458559 0.286979246044 0.316326095312 0.321618884109 0.330065896317 0.324500638067 0.328300795872 0.309002568222 0.262587468469 0.31974123777 0.286316182293 0.321162329165 0.328160620315 0.356618051635 0.289733970648 0.344507756538 0.301485561986 0.335785898715 0.322635066518 0.331480718646 0.297897604494 0.306942928189 0.350843442517 0.342585296966 0.341311053315 0.306780105123 0.313401804298 0.319978145568 0.302460397612 0.346105758567 0.312802351189 0.331552275517 0.321624157344 0.318798118247 0.301906095501 0.301585920138 0.314556178985 0.333215221158 0.306929663844 0.317083256901 0.309667679181 0.306529028004 0.30865993751 0.296031907986 0.28742420979 0.311584483038 0.319043629504 0.330278008622 0.314466433681 0.327937382021 0.296448162218 0.307033121385 0.296391953011 0.292691206116 0.297146209653 0.307929858983 0.291863681454 0.307300188104 0.306597817799 0.34718100163 0.317436210259 0.29952626739 0.330762834707 0.334951064852 0.323806678898 0.296203706701 0.33398466797 0.344931265559 0.293948734727 0.332764639313 0.272651853935 0.317324315923 0.300493570867 0.307008231016 0.333263322802 0.31390648462 0.332416491248 0.314766869708 0.321015549211 0.322909289307 0.356882966656 0.310596945263 0.343939748528 0.286269629586 0.33173459898 0.323848483719 0.305841388975 0.319266258167 0.34012363898 0.3443280395 0.353885654057 0.320544729867 0.353280499623 0.315621795536 0.312176062734 0.301562130879 0.312061680573 0.312642847966 0.326222109701 0.357417912858 0.313083593142 0.334033412713 0.295630506074 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt b/voice_bridge/scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt new file mode 100644 index 0000000000000000000000000000000000000000..dc396c8c16032b9657101532f7e08e6aa04b2aea --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt @@ -0,0 +1 @@ + 5.0817745e-01 4.4535192e-01 5.6700421e-01 1.2418578e-01 4.8927739e-01 5.0180477e-01 1.4096146e-01 8.1242502e-01 4.1586001e-01 3.2586371e-01 3.2586371e-01 5.2942799e-01 8.6137722e-01 7.7039952e-01 9.7270522e-01 4.5581864e-01 1.0000000e-01 6.3861009e-01 3.0546431e-01 3.7427929e-01 2.5251796e-01 5.6700421e-01 3.8776762e-01 5.2942799e-01 5.0905001e-01 2.5651975e-01 1.2418578e-01 1.2418578e-01 4.5470518e-01 4.5470518e-01 3.2816937e-01 6.0181382e-01 7.3457830e-01 4.1586001e-01 3.2586371e-01 4.0147421e-01 4.1586001e-01 7.6752131e-01 1.2418578e-01 1.4096146e-01 1.2396136e+00 7.1462831e-01 4.1449626e-01 5.3588338e-01 5.2942799e-01 3.2352160e-01 5.2862779e-01 2.5251796e-01 2.0656129e-01 3.5031395e+00 3.2158090e+00 3.6682165e+00 2.7164367e+00 3.3288934e+00 3.1477087e+00 3.4033622e+00 2.0308266e+00 3.3209346e+00 2.5912926e+00 2.3257069e+00 2.8912179e+00 2.7273721e+00 3.3660466e+00 2.2876649e+00 3.1664710e+00 3.1642132e+00 2.7448172e+00 3.2474124e+00 2.5734684e+00 3.5025969e+00 2.6980573e+00 3.5983434e+00 3.3515288e+00 3.0113552e+00 3.1469325e+00 3.5526357e+00 3.7475562e+00 3.1812462e+00 2.1818668e+00 2.4927109e+00 2.3909738e+00 2.5729378e+00 3.7711998e+00 3.1620401e+00 3.1916270e+00 3.4478147e+00 3.1312883e+00 2.7541224e+00 2.6886547e+00 3.0483897e+00 3.2685282e+00 2.6752185e+00 2.0587064e+00 2.8619072e+00 2.8416143e+00 2.8554471e+00 2.9845926e+00 1.7697734e+00 2.7640668e+00 4.7690606e+00 3.8067806e+00 4.6866422e+00 4.2843668e+00 4.5417384e+00 5.4120246e+00 3.2161426e+00 5.0569442e+00 4.5165793e+00 4.9462324e+00 3.8595100e+00 4.0249346e+00 4.2787236e+00 3.7387507e+00 3.9160762e+00 4.0938708e+00 4.2028863e+00 5.5316487e+00 5.7297286e+00 3.6968486e+00 4.5074741e+00 3.6330985e+00 5.5146761e+00 3.6293227e+00 4.4495340e+00 4.7599229e+00 3.5255287e+00 3.6076762e+00 4.3339547e+00 4.5590471e+00 4.8997298e+00 5.2856169e+00 4.3511402e+00 3.7760534e+00 4.2460554e+00 5.0103780e+00 4.3808704e+00 4.1939019e+00 3.5087649e+00 4.2018804e+00 4.4140402e+00 3.9807996e+00 3.8067806e+00 4.6775324e+00 4.5250934e+00 4.0376133e+00 3.7473276e+00 3.9523060e+00 4.1709262e+00 3.7872951e+00 2.5251796e-01 3.0546431e-01 6.0060595e-01 9.5035453e-01 4.4535192e-01 4.0293660e-01 5.0090417e-01 1.4096146e-01 7.6752131e-01 4.1449626e-01 1.2418578e-01 6.2024833e-01 1.1845977e+00 1.4700179e+00 9.4309624e-01 5.0905001e-01 1.0003617e+00 8.0358695e-01 5.8851328e-01 7.0826681e-01 6.6384020e-01 4.3456114e-01 5.6700421e-01 2.0656129e-01 4.2667565e-01 5.2942799e-01 4.4417983e-01 2.8192292e-01 2.1269358e-01 5.7324170e-01 1.1056650e+00 1.2393677e+00 1.4096146e-01 2.5251796e-01 6.8961791e-01 1.4096146e-01 5.0090417e-01 4.1449626e-01 5.0270183e-01 7.3535471e-01 5.0905001e-01 5.7324170e-01 8.5690100e-01 1.2418578e-01 8.0587320e-01 3.2352160e-01 7.3496673e-01 3.0275928e-01 3.5601468e+00 3.2472699e+00 3.7137483e+00 2.6693888e+00 3.3563815e+00 3.1472333e+00 3.4276314e+00 1.9506288e+00 3.3563695e+00 2.5739370e+00 2.1870094e+00 2.9033014e+00 2.6860278e+00 3.3789262e+00 2.2884830e+00 3.2153154e+00 3.1667333e+00 2.7423060e+00 3.2269725e+00 2.5465772e+00 3.5123782e+00 2.7147889e+00 3.6030381e+00 3.3619470e+00 3.0427908e+00 3.1888219e+00 3.5910272e+00 3.7805671e+00 3.1921903e+00 2.1611020e+00 2.4491518e+00 2.3430978e+00 2.5700421e+00 3.7741357e+00 3.1615131e+00 3.2084454e+00 3.4884789e+00 3.1228939e+00 2.7575407e+00 2.6617768e+00 3.0343591e+00 3.2842184e+00 2.6656374e+00 1.9595652e+00 2.8539100e+00 2.8474367e+00 2.8585579e+00 3.0059712e+00 1.6867642e+00 2.7634340e+00 4.7806735e+00 3.8055585e+00 4.7194850e+00 4.2963997e+00 4.5579706e+00 5.4507801e+00 3.1945300e+00 5.0903533e+00 4.5297786e+00 4.9814379e+00 3.8841455e+00 4.0376849e+00 4.3069372e+00 3.7284750e+00 3.9173293e+00 4.1124749e+00 4.2221165e+00 5.5759608e+00 5.7633066e+00 3.6758942e+00 4.5370189e+00 3.6312130e+00 5.5536680e+00 3.6416405e+00 4.4736906e+00 4.7961103e+00 3.5380868e+00 3.6203213e+00 4.3467079e+00 4.5977693e+00 4.9380624e+00 5.3421274e+00 4.3637834e+00 3.7899304e+00 4.2477635e+00 5.0602038e+00 4.3953045e+00 4.2110583e+00 3.5192753e+00 4.2358121e+00 4.4378207e+00 4.0189525e+00 3.8055585e+00 4.7017335e+00 4.5483787e+00 4.0656879e+00 3.7516222e+00 3.9742971e+00 4.1845313e+00 3.7939847e+00 2.1269358e-01 4.4535192e-01 8.9366705e-01 2.1845981e-01 3.4378533e-01 3.7427929e-01 2.5651975e-01 7.7039952e-01 3.2586371e-01 2.1845981e-01 4.2667565e-01 1.2113327e+00 1.3801284e+00 8.7175869e-01 4.4651726e-01 1.0719360e+00 6.5223271e-01 7.3813096e-01 5.7867728e-01 4.4535192e-01 5.2655962e-01 6.0611244e-01 3.8776762e-01 4.0176783e-01 5.3588338e-01 5.0905001e-01 3.0000000e-01 3.0546431e-01 7.1169738e-01 9.4309624e-01 1.1327825e+00 2.5651975e-01 3.0275928e-01 8.1067767e-01 2.5651975e-01 3.2352160e-01 4.2538717e-01 3.7427929e-01 9.0252542e-01 3.0000000e-01 5.1138698e-01 7.7869083e-01 2.1845981e-01 6.6384020e-01 1.2418578e-01 6.9325418e-01 3.0546431e-01 3.7098973e+00 3.3770904e+00 3.8553941e+00 2.7868575e+00 3.4895316e+00 3.2571492e+00 3.5499573e+00 2.0646687e+00 3.4944845e+00 2.6743800e+00 2.3196869e+00 3.0181476e+00 2.8270253e+00 3.4973911e+00 2.3997585e+00 3.3600102e+00 3.2716172e+00 2.8619072e+00 3.3597438e+00 2.6649106e+00 3.6203213e+00 2.8440609e+00 3.7280682e+00 3.4822008e+00 3.1786890e+00 3.3296038e+00 3.7325066e+00 3.9121945e+00 3.3084060e+00 2.2888897e+00 2.5683989e+00 2.4649412e+00 2.6906230e+00 3.8866112e+00 3.2625043e+00 3.3219248e+00 3.6264668e+00 3.2609948e+00 2.8656468e+00 2.7738624e+00 3.1430282e+00 3.4033622e+00 2.7865812e+00 2.0797392e+00 2.9638836e+00 2.9589097e+00 2.9695568e+00 3.1337459e+00 1.7991433e+00 2.8758936e+00 4.8875515e+00 3.9111857e+00 4.8490379e+00 4.4107143e+00 4.6725771e+00 5.5854254e+00 3.2933477e+00 5.2226262e+00 4.6541348e+00 5.1068487e+00 4.0049607e+00 4.1564977e+00 4.4321573e+00 3.8331006e+00 4.0161098e+00 4.2255639e+00 4.3417782e+00 5.7091264e+00 5.8970064e+00 3.7961619e+00 4.6611065e+00 3.7313856e+00 5.6903014e+00 3.7618406e+00 4.5942943e+00 4.9290197e+00 3.6553612e+00 3.7333492e+00 4.4613366e+00 4.7342792e+00 5.0749049e+00 5.4844039e+00 4.4774673e+00 3.9102500e+00 4.3611782e+00 5.2016658e+00 4.5034762e+00 4.3281161e+00 3.6300436e+00 4.3648112e+00 4.5562166e+00 4.1482002e+00 3.9111857e+00 4.8218416e+00 4.6648403e+00 4.1879434e+00 3.8717400e+00 4.0945154e+00 4.2919258e+00 3.9013483e+00 5.6700421e-01 9.9714776e-01 3.0546431e-01 4.4417983e-01 2.5251796e-01 3.0275928e-01 8.8835966e-01 3.2586371e-01 2.1845981e-01 4.4651726e-01 1.3360558e+00 1.5022608e+00 9.9714776e-01 5.6769031e-01 1.1765359e+00 7.6752131e-01 8.1354181e-01 6.9325418e-01 6.2092891e-01 5.4292906e-01 4.5470518e-01 4.0293660e-01 4.5581864e-01 6.4704320e-01 6.2024833e-01 1.4096146e-01 2.0656129e-01 8.1354181e-01 1.0574300e+00 1.2554784e+00 3.0275928e-01 4.4535192e-01 9.2264612e-01 3.0275928e-01 2.5251796e-01 5.2862779e-01 5.0592043e-01 8.0358695e-01 2.5251796e-01 5.6454040e-01 7.9878917e-01 2.1845981e-01 7.6752131e-01 1.2418578e-01 8.1242502e-01 4.1449626e-01 3.5875094e+00 3.2277825e+00 3.7190120e+00 2.6019240e+00 3.3414931e+00 3.0741797e+00 3.3904673e+00 1.8683030e+00 3.3506325e+00 2.4892190e+00 2.1209506e+00 2.8530088e+00 2.6606291e+00 3.3264150e+00 2.2345869e+00 3.2325480e+00 3.0894572e+00 2.6859989e+00 3.1954750e+00 2.4836725e+00 3.4467337e+00 2.6928468e+00 3.5602810e+00 3.3090659e+00 3.0346426e+00 3.1953687e+00 3.5930845e+00 3.7635112e+00 3.1392617e+00 2.1242643e+00 2.3839455e+00 2.2806773e+00 2.5225548e+00 3.7070070e+00 3.0760590e+00 3.1551922e+00 3.4865435e+00 3.1033781e+00 2.6867856e+00 2.5906376e+00 2.9536363e+00 3.2348458e+00 2.6148507e+00 1.8841403e+00 2.7819255e+00 2.7801917e+00 2.7920574e+00 2.9774862e+00 1.6195190e+00 2.7001131e+00 4.7151191e+00 3.7310738e+00 4.6963107e+00 4.2348119e+00 4.5036643e+00 5.4345951e+00 3.1040223e+00 5.0660245e+00 4.4858951e+00 4.9576471e+00 3.8485743e+00 3.9894963e+00 4.2781102e+00 3.6535208e+00 3.8473084e+00 4.0656969e+00 4.1736133e+00 5.5611269e+00 5.7439963e+00 3.6142694e+00 4.5082936e+00 3.5527533e+00 5.5400450e+00 3.5988819e+00 4.4321573e+00 4.7754556e+00 3.4913787e+00 3.5638529e+00 4.2915574e+00 4.5844335e+00 4.9269527e+00 5.3501611e+00 4.3091163e+00 3.7395252e+00 4.1763853e+00 5.0687940e+00 4.3363292e+00 4.1568278e+00 3.4594086e+00 4.2175903e+00 4.4004449e+00 4.0139427e+00 3.7310738e+00 4.6611132e+00 4.5083524e+00 4.0415593e+00 3.7070350e+00 3.9354060e+00 4.1243443e+00 3.7225506e+00 4.8927739e-01 4.1449626e-01 2.0656129e-01 8.1242502e-01 5.0270183e-01 4.0293660e-01 2.8192292e-01 6.0611244e-01 8.2305664e-01 8.2899253e-01 9.3824087e-01 4.5581864e-01 1.4096146e-01 7.1840099e-01 2.1845981e-01 4.5470518e-01 2.1845981e-01 4.9674312e-01 4.2418962e-01 5.1607523e-01 6.0551856e-01 2.8192292e-01 2.1269358e-01 2.4837156e-01 4.5470518e-01 5.1607523e-01 4.2667565e-01 5.0991930e-01 6.8917100e-01 5.0270183e-01 4.1312257e-01 5.0180477e-01 5.0270183e-01 7.4549115e-01 2.1269358e-01 1.4096146e-01 1.3190071e+00 6.4755655e-01 4.1449626e-01 5.1691876e-01 6.0611244e-01 2.5251796e-01 4.9674312e-01 3.0546431e-01 3.0000000e-01 3.5310961e+00 3.2313174e+00 3.6912396e+00 2.7363446e+00 3.3486156e+00 3.1550780e+00 3.4146950e+00 2.0587064e+00 3.3422688e+00 2.6000813e+00 2.3658814e+00 2.9005672e+00 2.7581254e+00 3.3764180e+00 2.2982662e+00 3.1914715e+00 3.1684808e+00 2.7581145e+00 3.2719146e+00 2.5906376e+00 3.5076679e+00 2.7164648e+00 3.6146980e+00 3.3629944e+00 3.0317688e+00 3.1699749e+00 3.5767944e+00 3.7653940e+00 3.1912695e+00 2.2046610e+00 2.5131017e+00 2.4132939e+00 2.5882494e+00 3.7797776e+00 3.1649733e+00 3.1986968e+00 3.4685882e+00 3.1575873e+00 2.7599092e+00 2.7031874e+00 3.0575551e+00 3.2787144e+00 2.6914804e+00 2.0914773e+00 2.8714673e+00 2.8482104e+00 2.8631525e+00 3.0002861e+00 1.8009624e+00 2.7738624e+00 4.7744685e+00 3.8132783e+00 4.7036953e+00 4.2925903e+00 4.5507995e+00 5.4317036e+00 3.2245243e+00 5.0748136e+00 4.5314818e+00 4.9621679e+00 3.8715927e+00 4.0372136e+00 4.2937599e+00 3.7469906e+00 3.9213497e+00 4.1030149e+00 4.2136261e+00 5.5512721e+00 5.7499082e+00 3.7127205e+00 4.5218897e+00 3.6377830e+00 5.5357771e+00 3.6429670e+00 4.4609633e+00 4.7775824e+00 3.5373240e+00 3.6158814e+00 4.3437318e+00 4.5790474e+00 4.9211035e+00 5.3110568e+00 4.3608329e+00 3.7876656e+00 4.2543813e+00 5.0356467e+00 4.3872625e+00 4.2028863e+00 3.5161021e+00 4.2189979e+00 4.4261470e+00 4.0000622e+00 3.8132783e+00 4.6893387e+00 4.5361087e+00 4.0527696e+00 3.7622948e+00 3.9645936e+00 4.1768667e+00 3.7924679e+00 8.6137722e-01 5.7867728e-01 1.2470767e+00 8.6361309e-01 2.8192292e-01 6.9369532e-01 9.8450810e-01 1.2949422e+00 5.7324170e-01 5.3588338e-01 4.0000000e-01 4.8135521e-01 3.0546431e-01 3.2816937e-01 5.0817745e-01 3.4378533e-01 9.4558103e-01 6.2024833e-01 6.9728513e-01 9.2288144e-01 5.6700421e-01 4.3691963e-01 5.4292906e-01 8.7202528e-01 8.9095811e-01 5.0817745e-01 3.6171588e-01 3.8934542e-01 8.6361309e-01 7.9878917e-01 5.0592043e-01 8.6361309e-01 1.1959482e+00 5.4292906e-01 5.6454040e-01 1.6807352e+00 1.1055064e+00 5.0592043e-01 3.2586371e-01 9.7779835e-01 3.2816937e-01 9.4558103e-01 2.8507955e-01 6.6827038e-01 3.1533911e+00 2.8840079e+00 3.3274872e+00 2.5335921e+00 3.0169509e+00 2.8661222e+00 3.0732956e+00 1.9492232e+00 3.0013391e+00 2.3437032e+00 2.3116343e+00 2.5873149e+00 2.5591371e+00 3.0631725e+00 2.0220740e+00 2.8270253e+00 2.8656468e+00 2.4892190e+00 3.0178921e+00 2.3656538e+00 3.1846482e+00 2.4132559e+00 3.3163294e+00 3.0590735e+00 2.6993871e+00 2.8174914e+00 3.2310326e+00 3.4162231e+00 2.8802219e+00 1.9932786e+00 2.3173648e+00 2.2314118e+00 2.3212593e+00 3.4779999e+00 2.8654680e+00 2.8662571e+00 3.1113805e+00 2.8927401e+00 2.4634131e+00 2.4685230e+00 2.7948819e+00 2.9596963e+00 2.4341346e+00 2.0039447e+00 2.6000813e+00 2.5498770e+00 2.5700421e+00 2.6813098e+00 1.7123398e+00 2.4913669e+00 4.4418755e+00 3.5123791e+00 4.3488707e+00 3.9713081e+00 4.2172545e+00 5.0700045e+00 2.9631582e+00 4.7239900e+00 4.2113881e+00 4.5979409e+00 3.5255287e+00 3.7162377e+00 3.9448212e+00 3.4598280e+00 3.6097419e+00 3.7620043e+00 3.8810240e+00 5.1822310e+00 5.3953096e+00 3.4508156e+00 4.1665786e+00 3.3353616e+00 5.1763300e+00 3.3260356e+00 4.1143832e+00 4.4201622e+00 3.2188998e+00 3.2929599e+00 4.0183758e+00 4.2229849e+00 4.5637045e+00 4.9290256e+00 4.0343724e+00 3.4708900e+00 3.9559935e+00 4.6576736e+00 4.0502252e+00 3.8718131e+00 3.1963475e+00 3.8610636e+00 4.0785553e+00 3.6345765e+00 3.5123791e+00 4.3416284e+00 4.1864302e+00 3.7018916e+00 3.4568305e+00 3.6254423e+00 3.8415026e+00 3.4775621e+00 4.0293660e-01 5.0905001e-01 3.8934542e-01 8.1130291e-01 2.5251796e-01 4.2538717e-01 4.8927739e-01 1.2406194e+00 1.3074132e+00 8.5233811e-01 5.0090417e-01 1.1185330e+00 5.6700421e-01 8.1099042e-01 5.3022554e-01 4.1449626e-01 5.3665999e-01 5.0905001e-01 5.0592043e-01 4.1449626e-01 6.0181382e-01 6.0060595e-01 2.5651975e-01 3.4583729e-01 8.0064372e-01 8.1558458e-01 1.0597541e+00 3.8934542e-01 4.2667565e-01 9.0074515e-01 3.8934542e-01 4.1586001e-01 5.0180477e-01 4.0293660e-01 1.1003197e+00 2.5651975e-01 4.5581864e-01 6.6539428e-01 4.1312257e-01 5.7324170e-01 2.0656129e-01 7.1504098e-01 4.0293660e-01 3.6583368e+00 3.3018939e+00 3.7934214e+00 2.7118627e+00 3.4196168e+00 3.1646752e+00 3.4663954e+00 1.9965608e+00 3.4302944e+00 2.5753574e+00 2.2837561e+00 2.9283888e+00 2.7788099e+00 3.4116298e+00 2.3101107e+00 3.3028359e+00 3.1719381e+00 2.7826178e+00 3.2957091e+00 2.5882494e+00 3.5217244e+00 2.7724782e+00 3.6509512e+00 3.3998935e+00 3.1126354e+00 3.2681313e+00 3.6719821e+00 3.8384263e+00 3.2202056e+00 2.2240476e+00 2.4960474e+00 2.3970928e+00 2.6119950e+00 3.7951491e+00 3.1592993e+00 3.2300555e+00 3.5604418e+00 3.2025128e+00 2.7701355e+00 2.6892823e+00 3.0524247e+00 3.3177721e+00 2.7092568e+00 2.0227167e+00 2.8731220e+00 2.8671099e+00 2.8775912e+00 3.0586720e+00 1.7332099e+00 2.7865812e+00 4.7866828e+00 3.8123695e+00 4.7714708e+00 4.3189924e+00 4.5796358e+00 5.5132325e+00 3.1921277e+00 5.1489022e+00 4.5737586e+00 5.0249531e+00 3.9185849e+00 4.0697987e+00 4.3502657e+00 3.7349501e+00 3.9102370e+00 4.1311343e+00 4.2548570e+00 5.6362307e+00 5.8240252e+00 3.7174048e+00 4.5773480e+00 3.6270581e+00 5.6209145e+00 3.6774027e+00 4.5072397e+00 4.8555167e+00 3.5675580e+00 3.6401392e+00 4.3693804e+00 4.6657186e+00 5.0062061e+00 5.4227201e+00 4.3844239e+00 3.8261182e+00 4.2718480e+00 5.1373047e+00 4.4043123e+00 4.2383633e+00 3.5347392e+00 4.2868650e+00 4.4668117e+00 4.0716645e+00 3.8123695e+00 4.7338066e+00 4.5734052e+00 4.1036179e+00 3.7882079e+00 4.0078491e+00 4.1922661e+00 3.8027591e+00 6.8961791e-01 3.0546431e-01 4.4417983e-01 2.0656129e-01 4.1586001e-01 7.6625946e-01 8.9687438e-01 1.0919712e+00 5.7867728e-01 1.5422108e-01 7.3851529e-01 4.0293660e-01 4.1312257e-01 3.2586371e-01 5.7257017e-01 3.2816937e-01 4.1312257e-01 4.0147421e-01 2.0656129e-01 2.0656129e-01 2.0656129e-01 3.2586371e-01 3.2586371e-01 4.1312257e-01 7.0437330e-01 8.5205778e-01 3.0546431e-01 3.2352160e-01 5.0905001e-01 3.0546431e-01 6.5172743e-01 1.0000000e-01 2.1269358e-01 1.1283882e+00 6.1092863e-01 4.0293660e-01 5.0592043e-01 4.1586001e-01 4.0293660e-01 4.1449626e-01 3.7255734e-01 1.2418578e-01 3.4445326e+00 3.1392617e+00 3.6011035e+00 2.6118700e+00 3.2516941e+00 3.0511838e+00 3.3218097e+00 1.9189245e+00 3.2468925e+00 2.4924452e+00 2.2081024e+00 2.8038661e+00 2.6291264e+00 3.2767369e+00 2.1964719e+00 3.1025274e+00 3.0696611e+00 2.6485861e+00 3.1554034e+00 2.4715204e+00 3.4135983e+00 2.6151245e+00 3.5092032e+00 3.2604423e+00 2.9354140e+00 3.0782101e+00 3.4818889e+00 3.6726568e+00 3.0922811e+00 2.0843471e+00 2.3874354e+00 2.2845234e+00 2.4794505e+00 3.6775470e+00 3.0659000e+00 3.1055388e+00 3.3775462e+00 3.0430948e+00 2.6597612e+00 2.5873149e+00 2.9471553e+00 3.1807044e+00 2.5795723e+00 1.9450499e+00 2.7640668e+00 2.7473221e+00 2.7611864e+00 2.9015702e+00 1.6626642e+00 2.6693888e+00 4.6823704e+00 3.7130994e+00 4.6117428e+00 4.1946425e+00 4.4565357e+00 5.3399939e+00 3.1168466e+00 4.9805386e+00 4.4303862e+00 4.8738189e+00 3.7806643e+00 3.9387918e+00 4.2018804e+00 3.6441274e+00 3.8290120e+00 4.0132700e+00 4.1177139e+00 5.4615788e+00 5.6559440e+00 3.5983434e+00 4.4321573e+00 3.5405803e+00 5.4429455e+00 3.5441556e+00 4.3687483e+00 4.6853394e+00 3.4399664e+00 3.5203203e+00 4.2473048e+00 4.4861009e+00 4.8281381e+00 5.2242271e+00 4.2652659e+00 3.6876909e+00 4.1503255e+00 4.9488209e+00 4.2966585e+00 4.1071698e+00 3.4205830e+00 4.1292490e+00 4.3363292e+00 3.9150359e+00 3.7130994e+00 4.5977729e+00 4.4473292e+00 3.9643224e+00 3.6603913e+00 3.8715927e+00 4.0861975e+00 3.6954796e+00 5.0991930e-01 1.1327825e+00 5.7257017e-01 4.0293660e-01 3.0811765e-01 1.5771666e+00 1.7488874e+00 1.2431040e+00 8.1273630e-01 1.4170618e+00 1.0106392e+00 1.0389435e+00 9.3824087e-01 7.3813096e-01 7.5976039e-01 6.6491075e-01 6.0611244e-01 6.9728513e-01 8.8861541e-01 8.5177726e-01 3.8776762e-01 4.2538717e-01 1.0346741e+00 1.2943100e+00 1.5015203e+00 5.0991930e-01 6.2482915e-01 1.1473003e+00 5.0991930e-01 1.2418578e-01 7.6752131e-01 7.4586719e-01 6.0181382e-01 3.0275928e-01 7.7869083e-01 1.0440187e+00 4.0293660e-01 1.0120221e+00 3.2352160e-01 1.0597541e+00 6.4704320e-01 3.7504939e+00 3.3717768e+00 3.8731169e+00 2.7062054e+00 3.4865562e+00 3.1921903e+00 3.5262546e+00 1.9522524e+00 3.5018009e+00 2.5914913e+00 2.1818668e+00 2.9807120e+00 2.7874290e+00 3.4557351e+00 2.3604042e+00 3.3915488e+00 3.2027420e+00 2.8150728e+00 3.3206640e+00 2.6018930e+00 3.5642457e+00 2.8360166e+00 3.6902583e+00 3.4394878e+00 3.1847477e+00 3.3503379e+00 3.7461474e+00 3.9068076e+00 3.2666666e+00 2.2590074e+00 2.4950353e+00 2.3935209e+00 2.6534332e+00 3.8259590e+00 3.1834936e+00 3.2834077e+00 3.6377049e+00 3.2390016e+00 2.8060305e+00 2.7012392e+00 3.0647279e+00 3.3658240e+00 2.7423171e+00 1.9645331e+00 2.8984764e+00 2.9033203e+00 2.9139413e+00 3.1189900e+00 1.7118795e+00 2.8228127e+00 4.8290847e+00 3.8416142e+00 4.8350745e+00 4.3569606e+00 4.6261788e+00 5.5774554e+00 3.1958228e+00 5.2067803e+00 4.6153241e+00 5.0947934e+00 3.9803199e+00 4.1159553e+00 4.4131159e+00 3.7587872e+00 3.9513472e+00 4.1881498e+00 4.3024754e+00 5.7071195e+00 5.8839539e+00 3.7280682e+00 4.6419531e+00 3.6578722e+00 5.6843788e+00 3.7276068e+00 4.5625120e+00 4.9179194e+00 3.6182608e+00 3.6866535e+00 4.4136707e+00 4.7307689e+00 5.0723886e+00 5.5062533e+00 4.4301849e+00 3.8690719e+00 4.2943891e+00 5.2192815e+00 4.4536259e+00 4.2828634e+00 3.5797958e+00 4.3570079e+00 4.5278761e+00 4.1542558e+00 3.8416142e+00 4.7899951e+00 4.6341170e+00 4.1740602e+00 3.8316735e+00 4.0656969e+00 4.2413113e+00 3.8376713e+00 6.8961791e-01 3.0811765e-01 1.4096146e-01 6.4755655e-01 1.1229906e+00 1.3835747e+00 8.6361309e-01 4.2667565e-01 9.4009473e-01 7.0784540e-01 5.3665999e-01 6.2482915e-01 6.3977563e-01 4.3691963e-01 4.4651726e-01 1.5422108e-01 3.7598397e-01 4.4535192e-01 3.7598397e-01 2.1845981e-01 1.4096146e-01 5.5419992e-01 1.0065841e+00 1.1474460e+00 0.0000000e+00 3.0811765e-01 6.5223271e-01 0.0000000e+00 5.0991930e-01 3.2586371e-01 4.2667565e-01 8.3172002e-01 5.0991930e-01 5.6769031e-01 7.5082357e-01 2.1845981e-01 7.0479928e-01 3.0811765e-01 6.4755655e-01 2.1845981e-01 3.4865562e+00 3.1726595e+00 3.6377960e+00 2.5987470e+00 3.2814045e+00 3.0627375e+00 3.3515846e+00 1.8841865e+00 3.2769379e+00 2.5038079e+00 2.1311468e+00 2.8311678e+00 2.6104387e+00 3.2962520e+00 2.2214438e+00 3.1433122e+00 3.0878634e+00 2.6552472e+00 3.1570103e+00 2.4668912e+00 3.4394878e+00 2.6411293e+00 3.5233648e+00 3.2747247e+00 2.9659871e+00 3.1154783e+00 3.5134741e+00 3.7059620e+00 3.1148696e+00 2.0851901e+00 2.3731428e+00 2.2655571e+00 2.4927109e+00 3.6920087e+00 3.0823446e+00 3.1337459e+00 3.4135200e+00 3.0481703e+00 2.6780487e+00 2.5874301e+00 2.9489507e+00 3.2027420e+00 2.5873149e+00 1.8973383e+00 2.7738355e+00 2.7632614e+00 2.7778954e+00 2.9269923e+00 1.6390769e+00 2.6848587e+00 4.7106706e+00 3.7313856e+00 4.6446321e+00 4.2142736e+00 4.4836580e+00 5.3716885e+00 3.1250284e+00 5.0074019e+00 4.4485220e+00 4.9128219e+00 3.8150636e+00 3.9624529e+00 4.2358121e+00 3.6602286e+00 3.8605980e+00 4.0488387e+00 4.1418643e+00 5.4970002e+00 5.6855224e+00 3.5964347e+00 4.4685630e+00 3.5634461e+00 5.4730406e+00 3.5693950e+00 4.3989089e+00 4.7150659e+00 3.4668130e+00 3.5464993e+00 4.2723380e+00 4.5155386e+00 4.8594290e+00 5.2647079e+00 4.2921213e+00 3.7064459e+00 4.1581964e+00 4.9913682e+00 4.3286007e+00 4.1303097e+00 3.4468286e+00 4.1669742e+00 4.3729308e+00 3.9624170e+00 3.7313856e+00 4.6297577e+00 4.4844827e+00 4.0056359e+00 3.6817961e+00 3.9035218e+00 4.1179678e+00 3.7164366e+00 6.2024833e-01 8.1304731e-01 1.1868139e+00 4.8036801e-01 7.1799256e-01 2.8192292e-01 3.2816937e-01 3.2816937e-01 3.0546431e-01 3.2352160e-01 3.2352160e-01 8.5205778e-01 4.8927739e-01 6.6384020e-01 7.3496673e-01 4.5581864e-01 2.4837156e-01 3.2586371e-01 7.6752131e-01 7.4549115e-01 3.2352160e-01 4.1449626e-01 5.0180477e-01 6.8961791e-01 5.8851328e-01 2.5251796e-01 6.8961791e-01 1.0919712e+00 3.7255734e-01 4.2667565e-01 1.4993782e+00 1.0344911e+00 5.0592043e-01 4.5581864e-01 8.1304731e-01 3.0546431e-01 8.5205778e-01 1.0000000e-01 4.9766035e-01 3.3472053e+00 3.0922811e+00 3.5254266e+00 2.6661987e+00 3.2094276e+00 3.0570957e+00 3.2869053e+00 2.0190980e+00 3.1913594e+00 2.5206151e+00 2.3403819e+00 2.7928582e+00 2.6680945e+00 3.2615924e+00 2.2070201e+00 3.0233425e+00 3.0716969e+00 2.6575076e+00 3.1694367e+00 2.5088543e+00 3.4030318e+00 2.5954147e+00 3.4988409e+00 3.2483608e+00 2.8891737e+00 3.0123702e+00 3.4182420e+00 3.6203759e+00 3.0811775e+00 2.1190324e+00 2.4416796e+00 2.3440712e+00 2.4897570e+00 3.6753309e+00 3.0715435e+00 3.0851463e+00 3.3123070e+00 3.0424689e+00 2.6625505e+00 2.6241824e+00 2.9689697e+00 3.1616811e+00 2.5961850e+00 2.0559262e+00 2.7803619e+00 2.7462372e+00 2.7639489e+00 2.8736288e+00 1.7674365e+00 2.6773131e+00 4.6660957e+00 3.7173526e+00 4.5567672e+00 4.1782968e+00 4.4326194e+00 5.2720689e+00 3.1469325e+00 4.9232255e+00 4.4057732e+00 4.8164157e+00 3.7433882e+00 3.9194796e+00 4.1567419e+00 3.6582432e+00 3.8303544e+00 3.9861488e+00 4.0892044e+00 5.3882212e+00 5.5946413e+00 3.6180819e+00 4.3839191e+00 3.5469476e+00 5.3734444e+00 3.5262672e+00 4.3306501e+00 4.6237863e+00 3.4237160e+00 3.5051302e+00 4.2288456e+00 4.4201622e+00 4.7609637e+00 5.1280035e+00 4.2469785e+00 3.6684143e+00 4.1480002e+00 4.8602572e+00 4.2765700e+00 4.0824098e+00 3.4092877e+00 4.0737132e+00 4.2991233e+00 3.8524190e+00 3.7173526e+00 4.5590471e+00 4.4107160e+00 3.9202843e+00 3.6509512e+00 3.8388884e+00 4.0680120e+00 3.6894983e+00 4.1449626e-01 6.6539428e-01 1.0717668e+00 1.1847335e+00 7.0776547e-01 3.2816937e-01 9.2095040e-01 4.4651726e-01 6.0060595e-01 3.8934542e-01 6.1092863e-01 3.7598397e-01 3.0000000e-01 4.1312257e-01 2.4837156e-01 4.0293660e-01 4.1312257e-01 2.0656129e-01 3.0000000e-01 6.0611244e-01 7.3535471e-01 9.3801395e-01 3.0811765e-01 4.2538717e-01 7.1462831e-01 3.0811765e-01 5.2574978e-01 3.0275928e-01 3.2816937e-01 1.1107977e+00 4.5470518e-01 4.1449626e-01 4.8927739e-01 4.1449626e-01 4.4417983e-01 2.8192292e-01 5.2942799e-01 2.5251796e-01 3.4297053e+00 3.0906838e+00 3.5704156e+00 2.5301680e+00 3.2062204e+00 2.9663489e+00 3.2615889e+00 1.8330979e+00 3.2074600e+00 2.4030878e+00 2.1292724e+00 2.7344480e+00 2.5716369e+00 3.2053511e+00 2.1242643e+00 3.0798277e+00 2.9831836e+00 2.5729378e+00 3.0964590e+00 2.3917863e+00 3.3353616e+00 2.5635110e+00 3.4441347e+00 3.1882407e+00 2.8938821e+00 3.0477086e+00 3.4484194e+00 3.6265826e+00 3.0209783e+00 2.0203134e+00 2.3063579e+00 2.2046610e+00 2.4100833e+00 3.5972040e+00 2.9748436e+00 3.0349291e+00 3.3414931e+00 2.9918962e+00 2.5764694e+00 2.5038051e+00 2.8573838e+00 3.1113597e+00 2.5079404e+00 1.8623849e+00 2.6799601e+00 2.6656374e+00 2.6804452e+00 2.8458006e+00 1.5870088e+00 2.5906376e+00 4.6056614e+00 3.6293396e+00 4.5625120e+00 4.1184849e+00 4.3862724e+00 5.2957861e+00 3.0253131e+00 4.9300368e+00 4.3656957e+00 4.8256905e+00 3.7228356e+00 3.8717400e+00 4.1487889e+00 3.5605424e+00 3.7509165e+00 3.9489970e+00 4.0502806e+00 5.4193574e+00 5.6096505e+00 3.5201263e+00 4.3797165e+00 3.4555095e+00 5.4004015e+00 3.4805320e+00 4.3069452e+00 4.6373516e+00 3.3738930e+00 3.4478147e+00 4.1762321e+00 4.4428877e+00 4.7870294e+00 5.1982218e+00 4.1948678e+00 3.6180819e+00 4.0668114e+00 4.9227056e+00 4.2245318e+00 4.0358897e+00 3.3459883e+00 4.0835979e+00 4.2783731e+00 3.8797354e+00 3.6293396e+00 4.5370189e+00 4.3879553e+00 3.9155334e+00 3.5955337e+00 3.8113970e+00 4.0131848e+00 3.6132595e+00 5.2862779e-01 1.2431040e+00 1.5013525e+00 9.7779835e-01 5.3588338e-01 1.0669582e+00 8.1385214e-01 6.6432544e-01 7.2823007e-01 6.5223271e-01 5.1138698e-01 5.6700421e-01 2.5251796e-01 4.6472023e-01 5.6769031e-01 4.9766035e-01 2.5651975e-01 2.1269358e-01 6.6432544e-01 1.1134787e+00 1.2632199e+00 1.4096146e-01 2.8507955e-01 7.6787403e-01 1.4096146e-01 4.0293660e-01 4.4651726e-01 5.1691876e-01 7.1840099e-01 4.1586001e-01 6.3108414e-01 8.7021234e-01 2.0000000e-01 8.1385214e-01 2.5251796e-01 7.6787403e-01 3.2586371e-01 3.6025735e+00 3.2810515e+00 3.7511944e+00 2.6894009e+00 3.3904673e+00 3.1636869e+00 3.4574937e+00 1.9666356e+00 3.3893691e+00 2.5954173e+00 2.1997395e+00 2.9322283e+00 2.7092568e+00 3.4012145e+00 2.3186758e+00 3.2568914e+00 3.1861493e+00 2.7595194e+00 3.2561045e+00 2.5646808e+00 3.5381764e+00 2.7476411e+00 3.6278993e+00 3.3809159e+00 3.0768226e+00 3.2277675e+00 3.6265617e+00 3.8150532e+00 3.2176230e+00 2.1864840e+00 2.4668912e+00 2.3596992e+00 2.5949561e+00 3.7935487e+00 3.1789378e+00 3.2360886e+00 3.5252258e+00 3.1522058e+00 2.7777040e+00 2.6819136e+00 3.0473722e+00 3.3079290e+00 2.6886547e+00 1.9757309e+00 2.8726212e+00 2.8654680e+00 2.8788483e+00 3.0349462e+00 1.7160413e+00 2.7852734e+00 4.8087107e+00 3.8282466e+00 4.7531334e+00 4.3176393e+00 4.5857287e+00 5.4831923e+00 3.2147850e+00 5.1185883e+00 4.5544260e+00 5.0194259e+00 3.9185849e+00 4.0655452e+00 4.3416283e+00 3.7535680e+00 3.9509795e+00 4.1478442e+00 4.2472736e+00 5.6096505e+00 5.7957776e+00 3.6945993e+00 4.5734622e+00 3.6568202e+00 5.5854254e+00 3.6720840e+00 4.5038991e+00 4.8262859e+00 3.5684917e+00 3.6474985e+00 4.3740189e+00 4.6282931e+00 4.9713928e+00 5.3806679e+00 4.3928114e+00 3.8121990e+00 4.2612863e+00 5.1032991e+00 4.4267055e+00 4.2347444e+00 3.5464871e+00 4.2738510e+00 4.4745238e+00 4.0663411e+00 3.8282466e+00 4.7338066e+00 4.5852690e+00 4.1075310e+00 3.7823897e+00 4.0070636e+00 4.2156933e+00 3.8157950e+00 1.6177449e+00 1.7454671e+00 1.2604558e+00 8.6361309e-01 1.4955532e+00 1.0118409e+00 1.1594648e+00 9.6204649e-01 6.2081167e-01 9.1750357e-01 8.7504951e-01 7.6752131e-01 8.0660588e-01 9.5965467e-01 9.2859317e-01 5.7324170e-01 6.2205176e-01 1.1313840e+00 1.2653669e+00 1.4930627e+00 6.4755655e-01 7.0479928e-01 1.2236003e+00 6.4755655e-01 2.1269358e-01 8.5105559e-01 7.7360126e-01 7.1169738e-01 2.5651975e-01 8.7229670e-01 1.1327578e+00 5.3588338e-01 1.0269295e+00 3.8934542e-01 1.1042097e+00 7.2823007e-01 4.0317004e+00 3.6659830e+00 4.1618561e+00 3.0123702e+00 3.7804276e+00 3.4970843e+00 3.8244351e+00 2.2591077e+00 3.7930789e+00 2.8953397e+00 2.4889124e+00 3.2809188e+00 3.0866488e+00 3.7578933e+00 2.6595288e+00 3.6754272e+00 3.5073435e+00 3.1173742e+00 3.6212723e+00 2.9065572e+00 3.8667462e+00 3.1302383e+00 3.9918403e+00 3.7416229e+00 3.4760444e+00 3.6375992e+00 4.0358101e+00 4.2016915e+00 3.5683934e+00 2.5569968e+00 2.8007817e+00 2.6989368e+00 2.9539253e+00 4.1306024e+00 3.4882801e+00 3.5831257e+00 3.9280671e+00 3.5362697e+00 3.1099883e+00 3.0065416e+00 3.3706887e+00 3.6672620e+00 3.0442126e+00 2.2719663e+00 3.2032390e+00 3.2071637e+00 3.2176230e+00 3.4155491e+00 2.0139971e+00 3.1260028e+00 5.1310217e+00 4.1456639e+00 5.1323742e+00 4.6609614e+00 4.9284761e+00 5.8739676e+00 3.4984873e+00 5.5048216e+00 4.9175276e+00 5.3898806e+00 4.2781762e+00 4.4176533e+00 4.7107211e+00 4.0621414e+00 4.2494597e+00 4.4865569e+00 4.6045396e+00 6.0012333e+00 6.1816086e+00 4.0339598e+00 4.9390284e+00 3.9601358e+00 5.9803696e+00 4.0277694e+00 4.8626276e+00 5.2147981e+00 3.9185849e+00 3.9887029e+00 4.7161140e+00 5.0257341e+00 5.3673499e+00 5.7939320e+00 4.7320534e+00 4.1713411e+00 4.5995433e+00 5.5085511e+00 4.7536729e+00 4.5857356e+00 3.8819510e+00 4.6519178e+00 4.8256399e+00 4.4430890e+00 4.1456639e+00 5.0898961e+00 4.9316646e+00 4.4680158e+00 4.1325542e+00 4.3648035e+00 4.5412859e+00 4.1418557e+00 4.5581864e-01 4.1586001e-01 7.7074935e-01 5.0991930e-01 7.1840099e-01 7.2486328e-01 7.3145860e-01 1.2122249e+00 9.2112464e-01 1.1384810e+00 1.1451403e+00 9.1163729e-01 7.0386584e-01 7.4855857e-01 1.2220203e+00 1.1947245e+00 6.6827038e-01 6.2081167e-01 3.4378533e-01 1.1229906e+00 9.9348625e-01 5.2942799e-01 1.1229906e+00 1.5344133e+00 8.2275389e-01 8.5233811e-01 1.8985661e+00 1.4692412e+00 8.9653332e-01 8.7420176e-01 1.2431040e+00 7.3813096e-01 1.2951131e+00 5.5419992e-01 9.3801395e-01 3.5789198e+00 3.3663244e+00 3.7753619e+00 3.0049442e+00 3.4909841e+00 3.3695525e+00 3.5654259e+00 2.3989172e+00 3.4663502e+00 2.8427326e+00 2.7185849e+00 3.0894572e+00 3.0108764e+00 3.5617386e+00 2.5173832e+00 3.2758681e+00 3.3732554e+00 2.9816791e+00 3.4895316e+00 2.8451507e+00 3.6905956e+00 2.8989400e+00 3.8036776e+00 3.5549103e+00 3.1734856e+00 3.2772927e+00 3.6834126e+00 3.8868430e+00 3.3809159e+00 2.4588872e+00 2.7850016e+00 2.6918796e+00 2.8113773e+00 3.9804187e+00 3.3742776e+00 3.3692592e+00 3.5726491e+00 3.3587234e+00 2.9691171e+00 2.9539253e+00 3.2926883e+00 3.4584304e+00 2.9217347e+00 2.4321061e+00 3.0988783e+00 3.0546600e+00 3.0736357e+00 3.1699903e+00 2.1306832e+00 2.9913743e+00 4.9420128e+00 4.0177712e+00 4.8123874e+00 4.4703485e+00 4.7113827e+00 5.5147622e+00 3.4715574e+00 5.1811127e+00 4.6944291e+00 5.0587041e+00 4.0117533e+00 4.2085851e+00 4.4199792e+00 3.9616570e+00 4.1103933e+00 4.2537610e+00 4.3721243e+00 5.6218420e+00 5.8419148e+00 3.9412893e+00 4.6390186e+00 3.8408636e+00 5.6159291e+00 3.8175051e+00 4.5984929e+00 4.8771654e+00 3.7143727e+00 3.7943375e+00 4.5141564e+00 4.6734732e+00 5.0086627e+00 5.3396700e+00 4.5298770e+00 3.9647930e+00 4.4561969e+00 5.0778756e+00 4.5477422e+00 4.3671210e+00 3.6996802e+00 4.3269400e+00 4.5602465e+00 4.0901232e+00 4.0177712e+00 4.8233796e+00 4.6689006e+00 4.1757336e+00 3.9466531e+00 4.1130674e+00 4.3408596e+00 3.9840684e+00 5.3588338e-01 9.7098574e-01 6.0611244e-01 7.4549115e-01 1.0101422e+00 8.1242502e-01 1.2342162e+00 1.1486378e+00 1.1959482e+00 1.4468211e+00 1.0906388e+00 9.4287188e-01 1.0346741e+00 1.3793330e+00 1.4148192e+00 1.0065841e+00 5.5419992e-01 2.8507955e-01 1.3835747e+00 1.2681309e+00 9.0679720e-01 1.3835747e+00 1.6801917e+00 1.0588560e+00 1.0122141e+00 2.2040881e+00 1.5564198e+00 1.0122141e+00 7.7553525e-01 1.4987155e+00 7.4893123e-01 1.4320120e+00 7.3813096e-01 1.1765359e+00 3.3186105e+00 3.0934278e+00 3.5115632e+00 2.9015832e+00 3.2557855e+00 3.1381850e+00 3.2787144e+00 2.3983798e+00 3.2261964e+00 2.6655261e+00 2.7738368e+00 2.8425716e+00 2.9377092e+00 3.3097860e+00 2.3365894e+00 3.0236933e+00 3.1147370e+00 2.7988444e+00 3.3431646e+00 2.7201960e+00 3.4033622e+00 2.7009102e+00 3.5863979e+00 3.3171611e+00 2.9439491e+00 3.0327979e+00 3.4475678e+00 3.6195561e+00 3.1337459e+00 2.3758157e+00 2.6957302e+00 2.6219409e+00 2.6429556e+00 3.7305206e+00 3.1152653e+00 3.0762634e+00 3.3088561e+00 3.2115055e+00 2.7318540e+00 2.8101506e+00 3.0967820e+00 3.1998457e+00 2.7619926e+00 2.4596921e+00 2.9002737e+00 2.8148869e+00 2.8449691e+00 2.9378173e+00 2.1723936e+00 2.7843048e+00 4.6358686e+00 3.7621042e+00 4.5316360e+00 4.1928583e+00 4.4221843e+00 5.2364242e+00 3.2682245e+00 4.9072991e+00 4.4428425e+00 4.7561724e+00 3.7219581e+00 3.9498605e+00 4.1390009e+00 3.7275066e+00 3.8377652e+00 3.9569614e+00 4.0917968e+00 5.3289557e+00 5.5738695e+00 3.7540308e+00 4.3457024e+00 3.5797958e+00 5.3465693e+00 3.5720882e+00 4.3012547e+00 4.5936468e+00 3.4616111e+00 3.5205889e+00 4.2389017e+00 4.4031390e+00 4.7432976e+00 5.0569442e+00 4.2531342e+00 3.7092459e+00 4.2038056e+00 4.8064634e+00 4.2402361e+00 4.0806404e+00 3.4276314e+00 4.0438546e+00 4.2676463e+00 3.8085992e+00 3.7621042e+00 4.5272919e+00 4.3667579e+00 3.8946701e+00 3.7163265e+00 3.8338395e+00 4.0342445e+00 3.7061759e+00 4.4651726e-01 4.4651726e-01 3.2816937e-01 5.7257017e-01 3.4378533e-01 8.2384013e-01 6.6432544e-01 8.0758367e-01 9.3048953e-01 5.8851328e-01 4.3691963e-01 5.1691876e-01 8.8062848e-01 8.9917007e-01 5.0817745e-01 3.6171588e-01 3.2816937e-01 8.6361309e-01 7.3851529e-01 4.1449626e-01 8.6361309e-01 1.1845977e+00 5.4292906e-01 4.9766035e-01 1.6754036e+00 1.0919712e+00 5.3309112e-01 6.2024833e-01 9.7098574e-01 3.8934542e-01 9.3824087e-01 2.8507955e-01 6.5223271e-01 3.5185448e+00 3.2633258e+00 3.6996953e+00 2.8710255e+00 3.3892942e+00 3.2497279e+00 3.4561374e+00 2.2371784e+00 3.3772302e+00 2.7023432e+00 2.5704711e+00 2.9638836e+00 2.8904978e+00 3.4483274e+00 2.3826791e+00 3.1954061e+00 3.2493673e+00 2.8645447e+00 3.3669805e+00 2.7184506e+00 3.5654259e+00 2.7812639e+00 3.6908684e+00 3.4451701e+00 3.0736340e+00 3.1881331e+00 3.6017790e+00 3.7914024e+00 3.2604423e+00 2.3308454e+00 2.6548674e+00 2.5630676e+00 2.6859989e+00 3.8615219e+00 3.2492317e+00 3.2498302e+00 3.4856775e+00 3.2460061e+00 2.8456767e+00 2.8220742e+00 3.1709561e+00 3.3452644e+00 2.7965957e+00 2.2780262e+00 2.9733693e+00 2.9362769e+00 2.9511072e+00 3.0600825e+00 1.9688013e+00 2.8661222e+00 4.8176767e+00 3.8889176e+00 4.7230291e+00 4.3578295e+00 4.5962942e+00 5.4442203e+00 3.3242177e+00 5.1039076e+00 4.5914416e+00 4.9653393e+00 3.8994399e+00 4.0931001e+00 4.3174903e+00 3.8262108e+00 3.9673816e+00 4.1302370e+00 4.2654212e+00 5.5551457e+00 5.7673205e+00 3.8189943e+00 4.5366342e+00 3.7059360e+00 5.5500825e+00 3.6985459e+00 4.4934936e+00 4.7995387e+00 3.5922369e+00 3.6724425e+00 4.3963259e+00 4.6010378e+00 4.9364818e+00 5.2936070e+00 4.4094664e+00 3.8558772e+00 4.3453589e+00 5.0159331e+00 4.4225169e+00 4.2579435e+00 3.5745624e+00 4.2301614e+00 4.4459076e+00 3.9875954e+00 3.8889176e+00 4.7169919e+00 4.5531173e+00 4.0619315e+00 3.8238093e+00 3.9999729e+00 4.2141826e+00 3.8611742e+00 6.3808075e-01 3.0275928e-01 3.7598397e-01 2.1269358e-01 5.6769031e-01 3.4378533e-01 5.3022554e-01 5.0991930e-01 2.1845981e-01 1.4096146e-01 1.4096146e-01 4.5581864e-01 4.5581864e-01 3.0811765e-01 6.0670504e-01 7.3496673e-01 4.2667565e-01 3.2816937e-01 4.0293660e-01 4.2667565e-01 7.6787403e-01 1.4096146e-01 1.2418578e-01 1.2394907e+00 7.1504098e-01 3.2586371e-01 5.2942799e-01 5.2862779e-01 3.2586371e-01 5.2942799e-01 2.5651975e-01 2.1269358e-01 3.4944845e+00 3.2032390e+00 3.6588207e+00 2.7040077e+00 3.3172489e+00 3.1387381e+00 3.3902207e+00 2.0195610e+00 3.3129652e+00 2.5744164e+00 2.3173648e+00 2.8753045e+00 2.7215057e+00 3.3565935e+00 2.2694598e+00 3.1556501e+00 3.1511848e+00 2.7390328e+00 3.2351115e+00 2.5646808e+00 3.4858646e+00 2.6854398e+00 3.5885398e+00 3.3452644e+00 3.0014619e+00 3.1359624e+00 3.5442447e+00 3.7351231e+00 3.1683717e+00 2.1722580e+00 2.4832809e+00 2.3831271e+00 2.5617005e+00 3.7607269e+00 3.1489919e+00 3.1764760e+00 3.4370400e+00 3.1222134e+00 2.7420671e+00 2.6759392e+00 3.0406669e+00 3.2584404e+00 2.6649106e+00 2.0477765e+00 2.8508344e+00 2.8325946e+00 2.8443188e+00 2.9745020e+00 1.7495699e+00 2.7521074e+00 4.7498176e+00 3.7908064e+00 4.6736601e+00 4.2736523e+00 4.5261084e+00 5.4025762e+00 3.1986968e+00 5.0495127e+00 4.5070435e+00 4.9284820e+00 3.8418637e+00 4.0108142e+00 4.2628455e+00 3.7198158e+00 3.8891307e+00 4.0719001e+00 4.1917075e+00 5.5215376e+00 5.7192826e+00 3.6876129e+00 4.4897240e+00 3.6129201e+00 5.5066558e+00 3.6138577e+00 4.4349731e+00 4.7514299e+00 3.5090368e+00 3.5920050e+00 4.3185209e+00 4.5521574e+00 4.8905855e+00 5.2768100e+00 4.3339547e+00 3.7672401e+00 4.2403934e+00 4.9963306e+00 4.3598652e+00 4.1826701e+00 3.4920978e+00 4.1853524e+00 4.3933832e+00 3.9574195e+00 3.7908064e+00 4.6611791e+00 4.5034762e+00 4.0149574e+00 3.7307866e+00 3.9355645e+00 4.1498459e+00 3.7732223e+00 6.0551856e-01 4.4535192e-01 6.0670504e-01 1.1765359e+00 6.9325418e-01 9.2288144e-01 9.3637892e-01 7.3535471e-01 5.3665999e-01 5.8914551e-01 1.0576043e+00 1.0106392e+00 4.5581864e-01 5.4292906e-01 4.5581864e-01 9.4009473e-01 8.6290690e-01 4.5581864e-01 9.4009473e-01 1.3885563e+00 6.5223271e-01 7.4740267e-01 1.7041201e+00 1.3421549e+00 7.2823007e-01 6.0611244e-01 1.0653845e+00 6.0121055e-01 1.1521791e+00 4.1586001e-01 7.7919451e-01 3.1037808e+00 2.8727295e+00 3.2914954e+00 2.5112138e+00 2.9950832e+00 2.8632951e+00 3.0711321e+00 1.9332545e+00 2.9709745e+00 2.3448578e+00 2.2688022e+00 2.5914913e+00 2.5183808e+00 3.0579528e+00 2.0217308e+00 2.7906520e+00 2.8719896e+00 2.4738237e+00 2.9926636e+00 2.3440712e+00 3.1967616e+00 2.3980102e+00 3.3005331e+00 3.0483897e+00 2.6752185e+00 2.7868575e+00 3.1931777e+00 3.3968373e+00 2.8794765e+00 1.9640287e+00 2.2899742e+00 2.1990648e+00 2.3082381e+00 3.4762640e+00 2.8726212e+00 2.8752807e+00 3.0841055e+00 2.8599559e+00 2.4658566e+00 2.4543822e+00 2.7852734e+00 2.9558536e+00 2.4184985e+00 1.9730073e+00 2.5941661e+00 2.5490308e+00 2.5692104e+00 2.6676432e+00 1.6855044e+00 2.4875166e+00 4.4549901e+00 3.5192877e+00 4.3283747e+00 3.9701062e+00 4.2192020e+00 5.0364062e+00 2.9740218e+00 4.6944291e+00 4.1953299e+00 4.5849301e+00 3.5249823e+00 3.7114178e+00 3.9340128e+00 3.4663826e+00 3.6308220e+00 3.7719045e+00 3.8752244e+00 5.1489675e+00 5.3620178e+00 3.4363773e+00 4.1584261e+00 3.3485848e+00 5.1375979e+00 3.3209346e+00 4.1101337e+00 4.3926615e+00 3.2186045e+00 3.2982828e+00 4.0192434e+00 4.1885158e+00 4.5274663e+00 4.8785522e+00 4.0373008e+00 3.4619693e+00 3.9494193e+00 4.6147130e+00 4.0642632e+00 3.8698115e+00 3.2042572e+00 3.8459316e+00 4.0795080e+00 3.6227082e+00 3.5192877e+00 4.3377723e+00 4.1907472e+00 3.6992844e+00 3.4507210e+00 3.6230931e+00 3.8568809e+00 3.4855556e+00 4.5581864e-01 1.2418578e-01 6.2660376e-01 5.1607523e-01 5.2655962e-01 8.0096515e-01 4.0438741e-01 3.0546431e-01 4.0438741e-01 6.4806901e-01 7.1504098e-01 4.4535192e-01 3.2586371e-01 4.9857388e-01 7.0784540e-01 6.2081167e-01 4.5581864e-01 7.0784540e-01 9.3824087e-01 4.0147421e-01 3.2586371e-01 1.5252485e+00 8.1558458e-01 3.7598397e-01 4.0147421e-01 8.1099042e-01 1.2418578e-01 6.9006418e-01 2.1269358e-01 5.0270183e-01 3.4104878e+00 3.1150013e+00 3.5735680e+00 2.6813198e+00 3.2413086e+00 3.0598576e+00 3.2985843e+00 2.0418831e+00 3.2329790e+00 2.5171713e+00 2.3816204e+00 2.7937685e+00 2.7102198e+00 3.2724336e+00 2.2054062e+00 3.0737397e+00 3.0650685e+00 2.6738621e+00 3.1983741e+00 2.5253420e+00 3.3951420e+00 2.6185785e+00 3.5201263e+00 3.2642011e+00 2.9245132e+00 3.0559405e+00 3.4672191e+00 3.6501426e+00 3.0869409e+00 2.1449779e+00 2.4611582e+00 2.3678836e+00 2.5038051e+00 3.6798850e+00 3.0627375e+00 3.0833417e+00 3.3518108e+00 3.0810282e+00 2.6595270e+00 2.6323570e+00 2.9747184e+00 3.1719581e+00 2.6119950e+00 2.0875308e+00 2.7837517e+00 2.7481947e+00 2.7653278e+00 2.8959432e+00 1.7918633e+00 2.6810089e+00 4.6579399e+00 3.7114178e+00 4.5860934e+00 4.1845117e+00 4.4368376e+00 5.3138890e+00 3.1392617e+00 4.9608959e+00 4.4280037e+00 4.8387532e+00 3.7530908e+00 3.9304148e+00 4.1764880e+00 3.6510310e+00 3.8111653e+00 3.9838913e+00 4.1019789e+00 5.4302306e+00 5.6352651e+00 3.6334268e+00 4.4011799e+00 3.5332521e+00 5.4201202e+00 3.5378545e+00 4.3430510e+00 4.6603717e+00 3.4302376e+00 3.5053533e+00 4.2335883e+00 4.4640423e+00 4.8059592e+00 5.1881280e+00 4.2497073e+00 3.6834126e+00 4.1572779e+00 4.9128775e+00 4.2687104e+00 4.0908836e+00 3.4061169e+00 4.0989195e+00 4.3064904e+00 3.8759115e+00 3.7114178e+00 4.5707958e+00 4.4147019e+00 3.9326468e+00 3.6624594e+00 3.8494244e+00 4.0586633e+00 3.6843892e+00 4.0176783e-01 9.3801395e-01 3.7427929e-01 6.0551856e-01 4.9766035e-01 4.1449626e-01 2.5251796e-01 3.2352160e-01 7.0437330e-01 6.2024833e-01 2.4837156e-01 7.0826681e-01 8.1099042e-01 5.3665999e-01 5.7257017e-01 4.0293660e-01 5.3665999e-01 1.0321505e+00 3.2352160e-01 4.9857388e-01 1.2654843e+00 1.0181000e+00 4.9857388e-01 4.6472023e-01 6.6432544e-01 4.4535192e-01 8.1354181e-01 3.2586371e-01 4.4535192e-01 3.1652953e+00 2.9034751e+00 3.3383293e+00 2.4277398e+00 3.0113552e+00 2.8500355e+00 3.0981427e+00 1.7594421e+00 2.9944056e+00 2.3105335e+00 2.0559262e+00 2.5987706e+00 2.4171653e+00 3.0605340e+00 2.0054351e+00 2.8372675e+00 2.8748086e+00 2.4385827e+00 2.9411544e+00 2.2761106e+00 3.2149087e+00 2.3896630e+00 3.2878368e+00 3.0415738e+00 2.6906230e+00 2.8216976e+00 3.2222602e+00 3.4304873e+00 2.8822802e+00 1.8816502e+00 2.1994544e+00 2.0961718e+00 2.2729984e+00 3.4713427e+00 2.8746311e+00 2.8977380e+00 3.1239380e+00 2.8149627e+00 2.4626518e+00 2.3988202e+00 2.7512943e+00 2.9634715e+00 2.3744738e+00 1.7861398e+00 2.5679553e+00 2.5438761e+00 2.5602722e+00 2.6724144e+00 1.5132025e+00 2.4693940e+00 4.4818617e+00 3.5188715e+00 4.3693901e+00 3.9814082e+00 4.2430316e+00 5.0846750e+00 2.9378173e+00 4.7308926e+00 4.2026915e+00 4.6369546e+00 3.5592955e+00 3.7219741e+00 3.9702025e+00 3.4565374e+00 3.6485303e+00 3.8059948e+00 3.8952692e+00 5.2045888e+00 5.4038637e+00 3.3942456e+00 4.2018404e+00 3.3550631e+00 5.1838582e+00 3.3280757e+00 4.1439346e+00 4.4349731e+00 3.2284912e+00 3.3133518e+00 4.0354963e+00 4.2293106e+00 4.5707958e+00 4.9486891e+00 4.0555804e+00 3.4667821e+00 3.9402495e+00 4.6817169e+00 4.0952984e+00 3.8891597e+00 3.2181054e+00 3.8906833e+00 4.1179678e+00 3.6792093e+00 3.5188715e+00 4.3738746e+00 4.2318888e+00 3.7415007e+00 3.4482727e+00 3.6509580e+00 3.8867812e+00 3.4956677e+00 6.2660376e-01 4.1449626e-01 4.8927739e-01 7.0479928e-01 3.0546431e-01 2.5251796e-01 3.2816937e-01 5.7324170e-01 6.2538346e-01 3.7255734e-01 4.4535192e-01 5.7324170e-01 6.2482915e-01 5.3665999e-01 4.3691963e-01 6.2482915e-01 8.7420176e-01 3.2352160e-01 2.5651975e-01 1.4293465e+00 7.7360126e-01 2.5651975e-01 4.0147421e-01 7.1504098e-01 2.1269358e-01 6.2660376e-01 2.4837156e-01 4.1586001e-01 3.4011512e+00 3.1015495e+00 3.5629124e+00 2.6446848e+00 3.2242409e+00 3.0444970e+00 3.2854349e+00 1.9922212e+00 3.2208624e+00 2.4875432e+00 2.3235341e+00 2.7738624e+00 2.6761538e+00 3.2590031e+00 2.1770137e+00 3.0609726e+00 3.0488669e+00 2.6564689e+00 3.1671679e+00 2.4967542e+00 3.3778209e+00 2.5968629e+00 3.5012162e+00 3.2523394e+00 2.9093820e+00 3.0417674e+00 3.4541339e+00 3.6357801e+00 3.0695657e+00 2.1117686e+00 2.4265922e+00 2.3324013e+00 2.4794505e+00 3.6641654e+00 3.0465086e+00 3.0686943e+00 3.3395333e+00 3.0541897e+00 2.6428278e+00 2.6018930e+00 2.9558536e+00 3.1589081e+00 2.5867906e+00 2.0334278e+00 2.7624503e+00 2.7347909e+00 2.7481947e+00 2.8804789e+00 1.7294430e+00 2.6604040e+00 4.6390099e+00 3.6904099e+00 4.5721680e+00 4.1717456e+00 4.4201622e+00 5.3038306e+00 3.1101376e+00 4.9521292e+00 4.4131751e+00 4.8218478e+00 3.7351231e+00 3.9119157e+00 4.1593600e+00 3.6239123e+00 3.7806200e+00 3.9616940e+00 4.0893976e+00 5.4208566e+00 5.6225133e+00 3.6099406e+00 4.3833809e+00 3.5087649e+00 5.4106224e+00 3.5167400e+00 4.3287713e+00 4.6517741e+00 3.4091049e+00 3.4875352e+00 4.2154392e+00 4.4559638e+00 4.7948032e+00 5.1800729e+00 4.2298456e+00 3.6705550e+00 4.1464752e+00 4.8981257e+00 4.2482702e+00 4.0788778e+00 3.3871264e+00 4.0817153e+00 4.2852718e+00 3.8517038e+00 3.6904099e+00 4.5544260e+00 4.3933832e+00 3.9084933e+00 3.6377874e+00 3.8310704e+00 4.0381483e+00 3.6684338e+00 7.9016429e-01 9.0454394e-01 7.7553525e-01 6.5633874e-01 6.8961791e-01 6.5172743e-01 6.4755655e-01 6.9325418e-01 8.5690100e-01 7.5871717e-01 9.8800009e-01 6.3977563e-01 5.0503591e-01 9.0852141e-01 6.3977563e-01 6.2482915e-01 6.2605182e-01 4.4651726e-01 1.3039319e+00 4.5470518e-01 6.8801986e-01 9.4492923e-01 6.5223271e-01 6.9325418e-01 4.9674312e-01 7.6752131e-01 5.2574978e-01 3.9950977e+00 3.6682165e+00 4.1454421e+00 3.1171350e+00 3.7869887e+00 3.5623665e+00 3.8418637e+00 2.4093459e+00 3.7913585e+00 2.9787373e+00 2.6930133e+00 3.3123070e+00 3.1668302e+00 3.7985007e+00 2.6980573e+00 3.6472316e+00 3.5682291e+00 3.1756360e+00 3.6828708e+00 2.9891564e+00 3.9102500e+00 3.1455588e+00 4.0375495e+00 3.7881557e+00 3.4756264e+00 3.6204175e+00 4.0288578e+00 4.2043522e+00 3.6069560e+00 2.6127852e+00 2.9004348e+00 2.8010550e+00 3.0013391e+00 4.1901031e+00 3.5584876e+00 3.6126340e+00 3.9170234e+00 3.5823448e+00 3.1646752e+00 3.0923554e+00 3.4563987e+00 3.7021702e+00 3.1018442e+00 2.4341346e+00 3.2724336e+00 3.2604423e+00 3.2715632e+00 3.4335342e+00 2.1375243e+00 3.1807243e+00 5.1732049e+00 4.2085267e+00 5.1402548e+00 4.7090394e+00 4.9640507e+00 5.8783901e+00 3.5970425e+00 5.5195747e+00 4.9586651e+00 5.3905797e+00 4.2919639e+00 4.4545908e+00 4.7214902e+00 4.1323720e+00 4.2962344e+00 4.5078390e+00 4.6379987e+00 5.9985593e+00 6.1927610e+00 4.1173292e+00 4.9467209e+00 4.0217355e+00 5.9855018e+00 4.0597680e+00 4.8845913e+00 5.2228901e+00 3.9505682e+00 4.0262006e+00 4.7557226e+00 5.0295898e+00 5.3695643e+00 5.7704875e+00 4.7697480e+00 4.2124808e+00 4.6690776e+00 5.4857949e+00 4.7867344e+00 4.6237863e+00 3.9220577e+00 4.6512574e+00 4.8397561e+00 4.4244727e+00 4.2085267e+00 5.1102370e+00 4.9464234e+00 4.4688286e+00 4.1733964e+00 4.3844239e+00 4.5752087e+00 4.1957914e+00 3.8934542e-01 3.7598397e-01 1.5422108e-01 3.4583729e-01 3.7598397e-01 4.4651726e-01 3.8934542e-01 3.2816937e-01 8.2929029e-01 9.3610001e-01 4.3691963e-01 5.3022554e-01 5.3309112e-01 4.3691963e-01 7.5976039e-01 3.2586371e-01 4.2667565e-01 1.0733200e+00 7.4777660e-01 2.1845981e-01 5.0905001e-01 4.3456114e-01 5.2942799e-01 5.5492130e-01 4.6472023e-01 3.7427929e-01 3.2191540e+00 2.9033203e+00 3.3725057e+00 2.3744738e+00 3.0162346e+00 2.8254861e+00 3.0850817e+00 1.6867642e+00 3.0206125e+00 2.2489449e+00 1.9859316e+00 2.5612285e+00 2.4037412e+00 3.0483897e+00 1.9482601e+00 2.8711841e+00 2.8361445e+00 2.4280197e+00 2.9151666e+00 2.2428341e+00 3.1709561e+00 2.3770599e+00 3.2772927e+00 3.0392407e+00 2.7044574e+00 2.8456337e+00 3.2543813e+00 3.4363773e+00 2.8560815e+00 1.8517858e+00 2.1570512e+00 2.0577182e+00 2.2449618e+00 3.4472201e+00 2.8333788e+00 2.8654874e+00 3.1455372e+00 2.8102985e+00 2.4278629e+00 2.3504126e+00 2.7240842e+00 2.9511072e+00 2.3468577e+00 1.7140774e+00 2.5325623e+00 2.5220817e+00 2.5303132e+00 2.6704164e+00 1.4096199e+00 2.4355523e+00 4.4339908e+00 3.4713427e+00 4.3742064e+00 3.9639546e+00 4.2144772e+00 5.1104621e+00 2.8721481e+00 4.7555981e+00 4.1997133e+00 4.6264002e+00 3.5337158e+00 3.6986235e+00 3.9580816e+00 3.3951420e+00 3.5651172e+00 3.7578933e+00 3.8852294e+00 5.2312618e+00 5.4232729e+00 3.3678461e+00 4.1846468e+00 3.2909043e+00 5.2164277e+00 3.3005331e+00 4.1286019e+00 4.4583488e+00 3.1948184e+00 3.2785487e+00 4.0052019e+00 4.2627019e+00 4.5989546e+00 4.9978258e+00 4.0193656e+00 3.4602150e+00 3.9315374e+00 4.7096962e+00 4.0441539e+00 3.8751788e+00 3.1769821e+00 3.8841455e+00 4.0829381e+00 3.6559398e+00 3.4713427e+00 4.3535851e+00 4.1922661e+00 3.7063225e+00 3.4135466e+00 3.6262912e+00 3.8337160e+00 3.4586921e+00 4.5470518e-01 3.4378533e-01 4.9766035e-01 5.6631629e-01 3.2586371e-01 3.7255734e-01 6.5172743e-01 7.6625946e-01 9.7356960e-01 4.4651726e-01 7.0784540e-01 8.1273630e-01 4.4651726e-01 6.8757066e-01 4.4417983e-01 6.0670504e-01 1.1521791e+00 6.5172743e-01 4.5581864e-01 4.5470518e-01 5.6700421e-01 4.8036801e-01 5.1607523e-01 5.8851328e-01 5.0905001e-01 3.1972361e+00 2.8360340e+00 3.3243092e+00 2.2696891e+00 2.9531300e+00 2.6835197e+00 2.9981183e+00 1.5916843e+00 2.9546153e+00 2.1366783e+00 1.9099663e+00 2.4717637e+00 2.3219731e+00 2.9300203e+00 1.8705419e+00 2.8448793e+00 2.7044574e+00 2.2951567e+00 2.8430073e+00 2.1220080e+00 3.0654291e+00 2.3117864e+00 3.1749624e+00 2.9091307e+00 2.6433897e+00 2.8065044e+00 3.2000771e+00 3.3714688e+00 2.7511201e+00 1.7679293e+00 2.0426611e+00 1.9423536e+00 2.1457242e+00 3.3172489e+00 2.6940968e+00 2.7682296e+00 3.0935247e+00 2.7391698e+00 2.2996943e+00 2.2360451e+00 2.5729378e+00 2.8382774e+00 2.2413445e+00 1.6312555e+00 2.4031247e+00 2.3848740e+00 2.4037412e+00 2.5843380e+00 1.3803845e+00 2.3178393e+00 4.3373464e+00 3.3555449e+00 4.3029534e+00 3.8394246e+00 4.1166152e+00 5.0345534e+00 2.7564428e+00 4.6628709e+00 4.0929284e+00 4.5724955e+00 3.4657942e+00 3.6038441e+00 3.8912788e+00 3.2935105e+00 3.4985926e+00 3.6938082e+00 3.7771525e+00 5.1600819e+00 5.3477989e+00 3.2453592e+00 4.1245629e+00 3.1885527e+00 5.1389533e+00 3.2183480e+00 4.0411872e+00 4.3734530e+00 3.1116430e+00 3.1793624e+00 3.9065553e+00 4.1815080e+00 4.5288943e+00 4.9506208e+00 3.9281139e+00 3.3421539e+00 3.7789119e+00 4.6812838e+00 3.9623295e+00 3.7603559e+00 3.0782101e+00 3.8325036e+00 4.0241284e+00 3.6474081e+00 3.3555449e+00 4.2739171e+00 4.1339606e+00 3.6720159e+00 3.3349237e+00 3.5512382e+00 3.7511837e+00 3.3364095e+00 4.1312257e-01 5.0905001e-01 4.2538717e-01 3.2352160e-01 2.0656129e-01 5.0592043e-01 1.1017858e+00 1.2234738e+00 1.5422108e-01 4.1312257e-01 6.3924842e-01 1.5422108e-01 6.1968386e-01 4.0293660e-01 5.2942799e-01 7.7919451e-01 6.2482915e-01 5.6631629e-01 8.1385214e-01 2.5251796e-01 8.0032200e-01 4.2538717e-01 7.1462831e-01 3.2352160e-01 3.3601225e+00 3.0492285e+00 3.5130686e+00 2.4784234e+00 3.1574358e+00 2.9495683e+00 3.2305582e+00 1.7639552e+00 3.1543365e+00 2.3872777e+00 2.0066796e+00 2.7104259e+00 2.4866453e+00 3.1794134e+00 2.0999661e+00 3.0164205e+00 2.9733298e+00 2.5409084e+00 3.0313923e+00 2.3496997e+00 3.3211033e+00 2.5173832e+00 3.4035007e+00 3.1599783e+00 2.8424094e+00 2.9896107e+00 3.3894792e+00 3.5821206e+00 2.9960387e+00 1.9633030e+00 2.2546134e+00 2.1472921e+00 2.3729779e+00 3.5766918e+00 2.9692947e+00 3.0147040e+00 3.2887803e+00 2.9233984e+00 2.5628362e+00 2.4694536e+00 2.8371552e+00 3.0850817e+00 2.4681135e+00 1.7749726e+00 2.6585961e+00 2.6493446e+00 2.6623632e+00 2.8060305e+00 1.5124582e+00 2.5679553e+00 4.5912390e+00 3.6144502e+00 4.5212874e+00 4.0982070e+00 4.3637125e+00 5.2494547e+00 3.0086587e+00 4.8875515e+00 4.3294620e+00 4.7880147e+00 3.6912876e+00 3.8418637e+00 4.1117747e+00 3.5413188e+00 3.7389462e+00 3.9250135e+00 4.0233529e+00 5.3754694e+00 5.5627643e+00 3.4785161e+00 4.3436980e+00 3.4455964e+00 5.3512930e+00 3.4471566e+00 4.2776053e+00 4.5941143e+00 3.3449470e+00 3.4269051e+00 4.1524717e+00 4.3946634e+00 4.7366258e+00 5.1414650e+00 4.1713411e+00 3.5895201e+00 4.0467919e+00 4.8635964e+00 4.2075047e+00 4.0127353e+00 3.3273395e+00 4.0411872e+00 4.2480617e+00 3.8321687e+00 3.6144502e+00 4.5072985e+00 4.3598062e+00 3.8780834e+00 3.5586316e+00 3.7805671e+00 3.9971028e+00 3.6003219e+00 2.5651975e-01 2.8192292e-01 3.4378533e-01 3.4378533e-01 4.0147421e-01 7.1840099e-01 8.5690100e-01 3.7598397e-01 4.2538717e-01 5.3665999e-01 3.7598397e-01 6.6827038e-01 2.1269358e-01 3.0546431e-01 1.1320702e+00 6.2988288e-01 2.0656129e-01 4.4535192e-01 4.2667565e-01 4.1449626e-01 4.3691963e-01 3.8934542e-01 2.5251796e-01 3.3428183e+00 3.0232018e+00 3.4944845e+00 2.4950353e+00 3.1381402e+00 2.9363801e+00 3.2027420e+00 1.8084630e+00 3.1409294e+00 2.3642733e+00 2.1113036e+00 2.6784604e+00 2.5283251e+00 3.1625374e+00 2.0667297e+00 2.9950041e+00 2.9473422e+00 2.5410503e+00 3.0407257e+00 2.3596992e+00 3.2858223e+00 2.4986337e+00 3.3960124e+00 3.1519721e+00 2.8255193e+00 2.9686973e+00 3.3765772e+00 3.5575681e+00 2.9720515e+00 1.9732878e+00 2.2758449e+00 2.1765379e+00 2.3630256e+00 3.5606213e+00 2.9432282e+00 2.9813163e+00 3.2672636e+00 2.9349850e+00 2.5393641e+00 2.4678343e+00 2.8348218e+00 3.0655803e+00 2.4649412e+00 1.8381372e+00 2.6459992e+00 2.6324803e+00 2.6428278e+00 2.7886501e+00 1.5384446e+00 2.5500177e+00 4.5509522e+00 3.5863729e+00 4.4953238e+00 4.0776551e+00 4.3319640e+00 5.2308062e+00 2.9880146e+00 4.8736353e+00 4.3174903e+00 4.7496159e+00 3.6545040e+00 3.8173510e+00 4.0797075e+00 3.5129177e+00 3.6850739e+00 3.8789949e+00 4.0011311e+00 5.3516635e+00 5.5447338e+00 3.4854203e+00 4.3070102e+00 3.4066692e+00 5.3366535e+00 3.4209331e+00 4.2472818e+00 4.5769661e+00 3.3144678e+00 3.3951450e+00 4.1229838e+00 4.3815083e+00 4.7200939e+00 5.1200990e+00 4.1381062e+00 3.5750015e+00 4.0416512e+00 4.8355880e+00 4.1628805e+00 3.9898962e+00 3.2934163e+00 4.0073765e+00 4.2053647e+00 3.7839552e+00 3.5863729e+00 4.4735121e+00 4.3145846e+00 3.8316735e+00 3.5355330e+00 3.7466071e+00 3.9521135e+00 3.5718733e+00 1.2418578e-01 5.2942799e-01 4.9766035e-01 2.5251796e-01 6.0060595e-01 7.1462831e-01 4.4535192e-01 3.8776762e-01 3.2352160e-01 4.4535192e-01 8.5434758e-01 1.2418578e-01 2.5251796e-01 1.2643026e+00 8.1354181e-01 4.1449626e-01 4.5581864e-01 5.6769031e-01 3.0546431e-01 6.2024833e-01 2.0656129e-01 2.5251796e-01 3.3898078e+00 3.1105347e+00 3.5575681e+00 2.6249526e+00 3.2229246e+00 3.0488669e+00 3.3004172e+00 1.9465831e+00 3.2118493e+00 2.4993166e+00 2.2473120e+00 2.7928582e+00 2.6296047e+00 3.2639046e+00 2.1933937e+00 3.0559188e+00 3.0673749e+00 2.6440180e+00 3.1486786e+00 2.4776707e+00 3.4056271e+00 2.5954147e+00 3.4958702e+00 3.2483608e+00 2.9041534e+00 3.0378828e+00 3.4425295e+00 3.6411503e+00 3.0811775e+00 2.0851901e+00 2.3997585e+00 2.2980893e+00 2.4741664e+00 3.6714798e+00 3.0661139e+00 3.0923102e+00 3.3390355e+00 3.0288896e+00 2.6566259e+00 2.5949561e+00 2.9511072e+00 3.1662394e+00 2.5768305e+00 1.9764051e+00 2.7650187e+00 2.7420671e+00 2.7570191e+00 2.8802219e+00 1.6913411e+00 2.6662783e+00 4.6723657e+00 3.7109297e+00 4.5802461e+00 4.1829734e+00 4.4414351e+00 5.3024507e+00 3.1246867e+00 4.9479218e+00 4.4123750e+00 4.8421359e+00 3.7582429e+00 3.9238367e+00 4.1750713e+00 3.6455011e+00 3.8262353e+00 3.9966275e+00 4.0997246e+00 5.4219259e+00 5.6210533e+00 3.5985833e+00 4.4045665e+00 3.5402439e+00 5.4041845e+00 3.5288526e+00 4.3467079e+00 4.6509698e+00 3.4261305e+00 3.5087649e+00 4.2340479e+00 4.4487072e+00 4.7898591e+00 5.1726632e+00 4.2521321e+00 3.6728562e+00 4.1446028e+00 4.9002452e+00 4.2845042e+00 4.0915924e+00 3.4110551e+00 4.0971854e+00 4.3142588e+00 3.8789274e+00 3.7109297e+00 4.5753785e+00 4.4261431e+00 3.9377466e+00 3.6482464e+00 3.8509694e+00 4.0749843e+00 3.6894983e+00 5.1607523e-01 4.5470518e-01 2.5251796e-01 7.0086313e-01 8.1067767e-01 3.7598397e-01 2.8192292e-01 3.0546431e-01 3.7598397e-01 8.2654509e-01 1.2418578e-01 2.1845981e-01 1.1754055e+00 8.0326782e-01 4.2667565e-01 5.7324170e-01 4.9766035e-01 4.1449626e-01 6.0551856e-01 3.0546431e-01 2.0656129e-01 3.4780839e+00 3.2028668e+00 3.6478832e+00 2.7001131e+00 3.3123070e+00 3.1424188e+00 3.3940268e+00 2.0081113e+00 3.3027430e+00 2.5846998e+00 2.2899742e+00 2.8843735e+00 2.7014135e+00 3.3579678e+00 2.2804674e+00 3.1447330e+00 3.1614572e+00 2.7347909e+00 3.2266676e+00 2.5600441e+00 3.4989115e+00 2.6835197e+00 3.5848035e+00 3.3425343e+00 2.9944056e+00 3.1272113e+00 3.5315985e+00 3.7321938e+00 3.1736230e+00 2.1642821e+00 2.4763086e+00 2.3729779e+00 2.5613679e+00 3.7645411e+00 3.1602773e+00 3.1861493e+00 3.4298218e+00 3.1090174e+00 2.7503801e+00 2.6773131e+00 3.0414782e+00 3.2606191e+00 2.6626548e+00 2.0308266e+00 2.8549070e+00 2.8371552e+00 2.8500790e+00 2.9720515e+00 1.7439430e+00 2.7570191e+00 4.7646254e+00 3.8019108e+00 4.6714768e+00 4.2777154e+00 4.5341669e+00 5.3940437e+00 3.2096520e+00 5.0408889e+00 4.5037829e+00 4.9318274e+00 3.8493108e+00 4.0147814e+00 4.2656755e+00 3.7323432e+00 3.9122146e+00 4.0862794e+00 4.1939019e+00 5.5136120e+00 5.7113100e+00 3.6836404e+00 4.4947801e+00 3.6298047e+00 5.4953655e+00 3.6181784e+00 4.4396174e+00 4.7440323e+00 3.5161021e+00 3.6013159e+00 4.3259034e+00 4.5411167e+00 4.8804165e+00 5.2620015e+00 4.3431589e+00 3.7666196e+00 4.2394195e+00 4.9871255e+00 4.3755778e+00 4.1864815e+00 3.5032323e+00 4.1868319e+00 4.4036244e+00 3.9638383e+00 3.8019108e+00 4.6672392e+00 4.5155386e+00 4.0245873e+00 3.7349501e+00 3.9420153e+00 4.1660972e+00 3.7835217e+00 1.2418578e-01 7.0826681e-01 9.4125538e-01 1.1340084e+00 2.1845981e-01 4.4417983e-01 8.2105460e-01 2.1845981e-01 3.8776762e-01 4.1449626e-01 4.2418962e-01 9.1075311e-01 3.7255734e-01 4.8036801e-01 6.6827038e-01 2.5651975e-01 6.4704320e-01 2.0656129e-01 6.8961791e-01 3.2586371e-01 3.4686627e+00 3.1154621e+00 3.6024772e+00 2.5108475e+00 3.2290313e+00 2.9704704e+00 3.2809332e+00 1.7903952e+00 3.2349033e+00 2.3960250e+00 2.0600195e+00 2.7476411e+00 2.5610667e+00 3.2181149e+00 2.1323638e+00 3.1154281e+00 2.9881598e+00 2.5786309e+00 3.0947136e+00 2.3839455e+00 3.3448197e+00 2.5821869e+00 3.4532666e+00 3.1998457e+00 2.9200439e+00 3.0794219e+00 3.4771054e+00 3.6510733e+00 3.0328831e+00 2.0197525e+00 2.2893157e+00 2.1858172e+00 2.4166535e+00 3.6030381e+00 2.9770247e+00 3.0492285e+00 3.3713329e+00 2.9974031e+00 2.5833316e+00 2.4944673e+00 2.8535197e+00 3.1260028e+00 2.5104998e+00 1.8109223e+00 2.6804452e+00 2.6742360e+00 2.6875169e+00 2.8656044e+00 1.5447938e+00 2.5961850e+00 4.6147552e+00 3.6320149e+00 4.5850999e+00 4.1288948e+00 4.3989089e+00 5.3208661e+00 3.0146208e+00 4.9525575e+00 4.3778283e+00 4.8485483e+00 3.7416375e+00 3.8836706e+00 4.1692394e+00 3.5584719e+00 3.7545764e+00 3.9635178e+00 4.0653249e+00 5.4465238e+00 5.6318041e+00 3.5148434e+00 4.4004449e+00 3.4571917e+00 5.4256316e+00 3.4931266e+00 4.3243671e+00 4.6617210e+00 3.3863989e+00 3.4595139e+00 4.1872611e+00 4.4691596e+00 4.8126955e+00 5.2323771e+00 4.2057897e+00 3.6309135e+00 4.0714444e+00 4.9544547e+00 4.2355968e+00 4.0495222e+00 3.3563815e+00 4.1075398e+00 4.2957899e+00 3.9065020e+00 3.6320149e+00 4.5543025e+00 4.4046818e+00 3.9362563e+00 3.6038441e+00 3.8285790e+00 4.0238937e+00 3.6204282e+00 6.2538346e-01 1.0167353e+00 1.1763980e+00 1.4096146e-01 4.1449626e-01 7.4740267e-01 1.4096146e-01 4.4535192e-01 3.7427929e-01 4.5581864e-01 8.2135873e-01 4.4535192e-01 5.0503591e-01 7.3145860e-01 2.1269358e-01 7.1421512e-01 2.5251796e-01 6.8961791e-01 2.8192292e-01 3.4295980e+00 3.0905489e+00 3.5700123e+00 2.4944673e+00 3.2020292e+00 2.9613737e+00 3.2617087e+00 1.7750284e+00 3.2049793e+00 2.3909366e+00 2.0308266e+00 2.7326472e+00 2.5286673e+00 3.2028668e+00 2.1181082e+00 3.0792693e+00 2.9816970e+00 2.5624932e+00 3.0681391e+00 2.3677174e+00 3.3352475e+00 2.5566444e+00 3.4334259e+00 3.1839972e+00 2.8907702e+00 3.0462904e+00 3.4448498e+00 3.6256155e+00 3.0181476e+00 1.9946242e+00 2.2719663e+00 2.1665831e+00 2.3980102e+00 3.5922217e+00 2.9733478e+00 3.0355056e+00 3.3410264e+00 2.9673667e+00 2.5744164e+00 2.4820750e+00 2.8455141e+00 3.1100045e+00 2.4920874e+00 1.7903952e+00 2.6704164e+00 2.6637326e+00 2.6767607e+00 2.8425716e+00 1.5257596e+00 2.5839288e+00 4.6057175e+00 3.6244539e+00 4.5619285e+00 4.1170543e+00 4.3856360e+00 5.2953657e+00 3.0110441e+00 4.9290739e+00 4.3593510e+00 4.8266994e+00 3.7227460e+00 3.8675033e+00 4.1480696e+00 3.5505922e+00 3.7479505e+00 3.9489183e+00 4.0495222e+00 5.4213759e+00 5.6069704e+00 3.4988409e+00 4.3796539e+00 3.4519560e+00 5.3990720e+00 3.4751739e+00 4.3070102e+00 4.6372963e+00 3.3701472e+00 3.4467337e+00 4.1738909e+00 4.4422690e+00 4.7852959e+00 5.2004339e+00 4.1925494e+00 3.6148707e+00 4.0613681e+00 4.9222119e+00 4.2248103e+00 4.0355816e+00 3.3448336e+00 4.0832977e+00 4.2781022e+00 3.8793995e+00 3.6244539e+00 4.5369609e+00 4.3880177e+00 3.9147164e+00 3.5857963e+00 3.8105301e+00 4.0134966e+00 3.6122845e+00 7.1799256e-01 8.0358695e-01 5.5419992e-01 4.6472023e-01 2.5651975e-01 5.5419992e-01 1.0198386e+00 3.2352160e-01 4.1586001e-01 1.2565757e+00 1.0054037e+00 4.1586001e-01 5.2574978e-01 6.4806901e-01 4.5581864e-01 8.0619006e-01 3.2586371e-01 4.1586001e-01 3.3274681e+00 3.0643176e+00 3.5031390e+00 2.5831315e+00 3.1734856e+00 3.0256804e+00 3.2593968e+00 1.9049236e+00 3.1662394e+00 2.4587503e+00 2.1948123e+00 2.7522526e+00 2.5874301e+00 3.2341367e+00 2.1493214e+00 2.9966141e+00 3.0389019e+00 2.6214821e+00 3.0978571e+00 2.4463233e+00 3.3673968e+00 2.5500177e+00 3.4578354e+00 3.2240818e+00 2.8578051e+00 2.9828212e+00 3.3905763e+00 3.5904118e+00 3.0455175e+00 2.0467937e+00 2.3640301e+00 2.2638728e+00 2.4385827e+00 3.6424937e+00 3.0387448e+00 3.0543001e+00 3.2867025e+00 2.9810914e+00 2.6291524e+00 2.5579619e+00 2.9291061e+00 3.1351569e+00 2.5421955e+00 1.9274228e+00 2.7359835e+00 2.7196686e+00 2.7293044e+00 2.8418790e+00 1.6234861e+00 2.6349941e+00 4.6268918e+00 3.6735954e+00 4.5276539e+00 4.1519514e+00 4.3981255e+00 5.2510005e+00 3.0847983e+00 4.9049143e+00 4.3738893e+00 4.7804645e+00 3.7059360e+00 3.8806135e+00 4.1210200e+00 3.6007179e+00 3.7674167e+00 3.9401211e+00 4.0632074e+00 5.3683319e+00 5.5675337e+00 3.5650560e+00 4.3467079e+00 3.4964385e+00 5.3534247e+00 3.4817971e+00 4.3007421e+00 4.6057628e+00 3.3796707e+00 3.4684743e+00 4.1910954e+00 4.4033511e+00 4.7374076e+00 5.1107389e+00 4.2056854e+00 3.6417452e+00 4.1251907e+00 4.8290847e+00 4.2339606e+00 4.0576409e+00 3.3702841e+00 4.0376849e+00 4.2550416e+00 3.8015574e+00 3.6735954e+00 4.5249462e+00 4.3662620e+00 3.8699102e+00 3.5979726e+00 3.8007870e+00 4.0252278e+00 3.6568095e+00 3.0811765e-01 1.0065841e+00 9.1075311e-01 6.2538346e-01 1.0065841e+00 1.2125198e+00 7.0086313e-01 6.1623531e-01 1.8279039e+00 1.0613462e+00 6.9369532e-01 4.8135521e-01 1.1149070e+00 3.0811765e-01 9.7098574e-01 4.0293660e-01 8.0358695e-01 3.4154940e+00 3.1439160e+00 3.5872997e+00 2.8054691e+00 3.2839149e+00 3.1127876e+00 3.3274872e+00 2.2159139e+00 3.2598167e+00 2.6167778e+00 2.5758644e+00 2.8523754e+00 2.8256291e+00 3.3122081e+00 2.3003824e+00 3.0947136e+00 3.1160876e+00 2.7384939e+00 3.2940867e+00 2.6281170e+00 3.4401593e+00 2.6851662e+00 3.5758474e+00 3.3024121e+00 2.9636544e+00 3.0850893e+00 3.4935692e+00 3.6787177e+00 3.1382691e+00 2.2643860e+00 2.5838312e+00 2.4957147e+00 2.5876691e+00 3.7272092e+00 3.1148696e+00 3.1192689e+00 3.3731473e+00 3.1641238e+00 2.7164367e+00 2.7371177e+00 3.0433470e+00 3.2094276e+00 2.6989752e+00 2.2729593e+00 2.8576425e+00 2.7954161e+00 2.8234677e+00 2.9407805e+00 1.9980146e+00 2.7511201e+00 4.6993349e+00 3.7716613e+00 4.6090409e+00 4.2170163e+00 4.4740971e+00 5.3233695e+00 3.2312218e+00 4.9715358e+00 4.4649192e+00 4.8634712e+00 3.7907269e+00 3.9777091e+00 4.2102908e+00 3.7293867e+00 3.8887827e+00 4.0321207e+00 4.1304736e+00 5.4336694e+00 5.6535520e+00 3.7088191e+00 4.4333012e+00 3.6017014e+00 5.4287623e+00 3.5939935e+00 4.3696884e+00 4.6684884e+00 3.4865359e+00 3.5518450e+00 4.2777882e+00 4.4716883e+00 4.8204359e+00 5.1830573e+00 4.2975164e+00 3.7189281e+00 4.1915116e+00 4.9282164e+00 4.3126073e+00 4.1182993e+00 3.4568393e+00 4.1297239e+00 4.3496867e+00 3.9207320e+00 3.7716613e+00 4.6018276e+00 4.4564872e+00 3.9827168e+00 3.7295388e+00 3.8905364e+00 4.1037903e+00 3.7281480e+00 1.1474460e+00 1.0344911e+00 7.0043186e-01 1.1474460e+00 1.4311891e+00 8.2654509e-01 7.6787403e-01 1.9730918e+00 1.3073038e+00 7.9878917e-01 6.2407309e-01 1.2632199e+00 5.0503591e-01 1.1833480e+00 5.0905001e-01 9.4080461e-01 3.4392518e+00 3.2008338e+00 3.6261197e+00 2.9076510e+00 3.3439089e+00 3.2073032e+00 3.3907558e+00 2.3329978e+00 3.3168472e+00 2.7081478e+00 2.6929215e+00 2.9283888e+00 2.9264154e+00 3.3942456e+00 2.3848677e+00 3.1312883e+00 3.2027420e+00 2.8385969e+00 3.3781905e+00 2.7324845e+00 3.5133135e+00 2.7602023e+00 3.6563535e+00 3.3905763e+00 3.0256209e+00 3.1312883e+00 3.5423793e+00 3.7299679e+00 3.2179031e+00 2.3661539e+00 2.6906231e+00 2.6054995e+00 2.6801752e+00 3.8138933e+00 3.2027420e+00 3.1888037e+00 3.4187265e+00 3.2459231e+00 2.8060305e+00 2.8359967e+00 3.1453916e+00 3.2886661e+00 2.7943622e+00 2.3874574e+00 2.9533314e+00 2.8877105e+00 2.9141136e+00 3.0147886e+00 2.0992326e+00 2.8425716e+00 4.7630756e+00 3.8535531e+00 4.6550014e+00 4.2947468e+00 4.5389196e+00 5.3632740e+00 3.3234239e+00 5.0232338e+00 4.5363757e+00 4.8985515e+00 3.8441304e+00 4.0476997e+00 4.2599433e+00 3.8106152e+00 3.9516603e+00 4.0850903e+00 4.1995425e+00 5.4671477e+00 5.6957748e+00 3.8037734e+00 4.4771379e+00 3.6783395e+00 5.4690202e+00 3.6632177e+00 4.4259826e+00 4.7160634e+00 3.5560806e+00 3.6239123e+00 4.3464973e+00 4.5187406e+00 4.8619146e+00 5.2001625e+00 4.3635746e+00 3.7979761e+00 4.2843668e+00 4.9451734e+00 4.3709882e+00 4.1899199e+00 3.5299194e+00 4.1707976e+00 4.3972344e+00 3.9457376e+00 3.8535531e+00 4.6546556e+00 4.5024055e+00 4.0227020e+00 3.8004969e+00 3.9484773e+00 4.1635108e+00 3.8079860e+00 3.0811765e-01 6.5223271e-01 0.0000000e+00 5.0991930e-01 3.2586371e-01 4.2667565e-01 8.3172002e-01 5.0991930e-01 5.6769031e-01 7.5082357e-01 2.1845981e-01 7.0479928e-01 3.0811765e-01 6.4755655e-01 2.1845981e-01 3.4865562e+00 3.1726595e+00 3.6377960e+00 2.5987470e+00 3.2814045e+00 3.0627375e+00 3.3515846e+00 1.8841865e+00 3.2769379e+00 2.5038079e+00 2.1311468e+00 2.8311678e+00 2.6104387e+00 3.2962520e+00 2.2214438e+00 3.1433122e+00 3.0878634e+00 2.6552472e+00 3.1570103e+00 2.4668912e+00 3.4394878e+00 2.6411293e+00 3.5233648e+00 3.2747247e+00 2.9659871e+00 3.1154783e+00 3.5134741e+00 3.7059620e+00 3.1148696e+00 2.0851901e+00 2.3731428e+00 2.2655571e+00 2.4927109e+00 3.6920087e+00 3.0823446e+00 3.1337459e+00 3.4135200e+00 3.0481703e+00 2.6780487e+00 2.5874301e+00 2.9489507e+00 3.2027420e+00 2.5873149e+00 1.8973383e+00 2.7738355e+00 2.7632614e+00 2.7778954e+00 2.9269923e+00 1.6390769e+00 2.6848587e+00 4.7106706e+00 3.7313856e+00 4.6446321e+00 4.2142736e+00 4.4836580e+00 5.3716885e+00 3.1250284e+00 5.0074019e+00 4.4485220e+00 4.9128219e+00 3.8150636e+00 3.9624529e+00 4.2358121e+00 3.6602286e+00 3.8605980e+00 4.0488387e+00 4.1418643e+00 5.4970002e+00 5.6855224e+00 3.5964347e+00 4.4685630e+00 3.5634461e+00 5.4730406e+00 3.5693950e+00 4.3989089e+00 4.7150659e+00 3.4668130e+00 3.5464993e+00 4.2723380e+00 4.5155386e+00 4.8594290e+00 5.2647079e+00 4.2921213e+00 3.7064459e+00 4.1581964e+00 4.9913682e+00 4.3286007e+00 4.1303097e+00 3.4468286e+00 4.1669742e+00 4.3729308e+00 3.9624170e+00 3.7313856e+00 4.6297577e+00 4.4844827e+00 4.0056359e+00 3.6817961e+00 3.9035218e+00 4.1179678e+00 3.7164366e+00 5.2942799e-01 3.0811765e-01 6.0611244e-01 3.2586371e-01 3.0546431e-01 9.4125538e-01 6.0060595e-01 5.2574978e-01 8.1558458e-01 2.8507955e-01 6.4755655e-01 4.1312257e-01 5.5419992e-01 2.0656129e-01 3.7045940e+00 3.4142500e+00 3.8690719e+00 2.8688189e+00 3.5226542e+00 3.3385842e+00 3.6010215e+00 2.1580776e+00 3.5196916e+00 2.7652601e+00 2.4083873e+00 3.0820950e+00 2.8783309e+00 3.5617386e+00 2.4693940e+00 3.3658240e+00 3.3558214e+00 2.9322808e+00 3.4112518e+00 2.7424260e+00 3.6945405e+00 2.8867565e+00 3.7848217e+00 3.5471494e+00 3.2076743e+00 3.3449470e+00 3.7498842e+00 3.9450818e+00 3.3735875e+00 2.3490515e+00 2.6490839e+00 2.5444216e+00 2.7549369e+00 3.9621873e+00 3.3527310e+00 3.3865418e+00 3.6475099e+00 3.3024121e+00 2.9459653e+00 2.8566322e+00 3.2311957e+00 3.4653703e+00 2.8535197e+00 2.1708533e+00 3.0455175e+00 3.0364473e+00 3.0465086e+00 3.1799184e+00 1.8842354e+00 2.9509414e+00 4.9594922e+00 3.9924292e+00 4.8844442e+00 4.4804205e+00 4.7354764e+00 5.6130421e+00 3.3873806e+00 5.2583931e+00 4.7090394e+00 5.1413739e+00 4.0532097e+00 4.2156327e+00 4.4735121e+00 3.9153764e+00 4.0928954e+00 4.2826383e+00 4.4004808e+00 5.7341534e+00 5.9271927e+00 3.8700742e+00 4.7016733e+00 3.8156003e+00 5.7155096e+00 3.8175051e+00 4.6462020e+00 4.9620553e+00 3.7143727e+00 3.8000004e+00 4.5254820e+00 4.7613782e+00 5.0991856e+00 5.4888822e+00 4.5411167e+00 3.9718373e+00 4.4397363e+00 5.2074442e+00 4.5701358e+00 4.3916992e+00 3.6996802e+00 4.3970196e+00 4.6045465e+00 4.1691696e+00 3.9924292e+00 4.8725396e+00 4.7150659e+00 4.2256400e+00 3.9292780e+00 4.1454529e+00 4.3599285e+00 3.9798670e+00 6.5223271e-01 1.1268457e+00 4.1449626e-01 5.0090417e-01 1.3784393e+00 1.1053488e+00 5.8851328e-01 6.6827038e-01 7.6787403e-01 4.8036801e-01 9.0852141e-01 2.8192292e-01 5.0905001e-01 3.5117473e+00 3.2719724e+00 3.6961290e+00 2.8060101e+00 3.3799936e+00 3.2401159e+00 3.4709594e+00 2.1293854e+00 3.3643376e+00 2.6848587e+00 2.4115946e+00 2.9723937e+00 2.7984009e+00 3.4455106e+00 2.3749088e+00 3.1913397e+00 3.2574960e+00 2.8320532e+00 3.3150921e+00 2.6637326e+00 3.5883493e+00 2.7640668e+00 3.6695251e+00 3.4317240e+00 3.0617355e+00 3.1820648e+00 3.5855784e+00 3.7950050e+00 3.2620096e+00 2.2645766e+00 2.5831070e+00 2.4810276e+00 2.6563403e+00 3.8575846e+00 3.2574960e+00 3.2718360e+00 3.4856613e+00 3.1913594e+00 2.8466990e+00 2.7801709e+00 3.1437608e+00 3.3466419e+00 2.7595194e+00 2.1498672e+00 2.9543365e+00 2.9330570e+00 2.9459653e+00 3.0511838e+00 1.8555964e+00 2.8534301e+00 4.8490742e+00 3.8962785e+00 4.7308926e+00 4.3643842e+00 4.6144072e+00 5.4442665e+00 3.3129652e+00 5.0997539e+00 4.5816539e+00 4.9883395e+00 3.9213002e+00 4.0961125e+00 4.3313403e+00 3.8279951e+00 4.0003537e+00 4.1625074e+00 4.2731561e+00 5.5604940e+00 5.7635900e+00 3.7812981e+00 4.5580745e+00 3.7236571e+00 5.5437155e+00 3.6992145e+00 4.5120086e+00 4.8012240e+00 3.5989213e+00 3.6872980e+00 4.4084575e+00 4.5949523e+00 4.9306472e+00 5.2918657e+00 4.4250143e+00 3.8510143e+00 4.3338058e+00 5.0198276e+00 4.4571663e+00 4.2687771e+00 3.5910375e+00 4.2454653e+00 4.4729144e+00 4.0139676e+00 3.8962785e+00 4.7377574e+00 4.5853438e+00 4.0877338e+00 3.8179781e+00 4.0161568e+00 4.2490407e+00 3.8755800e+00 5.0991930e-01 3.2586371e-01 4.2667565e-01 8.3172002e-01 5.0991930e-01 5.6769031e-01 7.5082357e-01 2.1845981e-01 7.0479928e-01 3.0811765e-01 6.4755655e-01 2.1845981e-01 3.4865562e+00 3.1726595e+00 3.6377960e+00 2.5987470e+00 3.2814045e+00 3.0627375e+00 3.3515846e+00 1.8841865e+00 3.2769379e+00 2.5038079e+00 2.1311468e+00 2.8311678e+00 2.6104387e+00 3.2962520e+00 2.2214438e+00 3.1433122e+00 3.0878634e+00 2.6552472e+00 3.1570103e+00 2.4668912e+00 3.4394878e+00 2.6411293e+00 3.5233648e+00 3.2747247e+00 2.9659871e+00 3.1154783e+00 3.5134741e+00 3.7059620e+00 3.1148696e+00 2.0851901e+00 2.3731428e+00 2.2655571e+00 2.4927109e+00 3.6920087e+00 3.0823446e+00 3.1337459e+00 3.4135200e+00 3.0481703e+00 2.6780487e+00 2.5874301e+00 2.9489507e+00 3.2027420e+00 2.5873149e+00 1.8973383e+00 2.7738355e+00 2.7632614e+00 2.7778954e+00 2.9269923e+00 1.6390769e+00 2.6848587e+00 4.7106706e+00 3.7313856e+00 4.6446321e+00 4.2142736e+00 4.4836580e+00 5.3716885e+00 3.1250284e+00 5.0074019e+00 4.4485220e+00 4.9128219e+00 3.8150636e+00 3.9624529e+00 4.2358121e+00 3.6602286e+00 3.8605980e+00 4.0488387e+00 4.1418643e+00 5.4970002e+00 5.6855224e+00 3.5964347e+00 4.4685630e+00 3.5634461e+00 5.4730406e+00 3.5693950e+00 4.3989089e+00 4.7150659e+00 3.4668130e+00 3.5464993e+00 4.2723380e+00 4.5155386e+00 4.8594290e+00 5.2647079e+00 4.2921213e+00 3.7064459e+00 4.1581964e+00 4.9913682e+00 4.3286007e+00 4.1303097e+00 3.4468286e+00 4.1669742e+00 4.3729308e+00 3.9624170e+00 3.7313856e+00 4.6297577e+00 4.4844827e+00 4.0056359e+00 3.6817961e+00 3.9035218e+00 4.1179678e+00 3.7164366e+00 7.3813096e-01 6.8961791e-01 7.0086313e-01 2.0000000e-01 7.3805807e-01 1.0030700e+00 4.0293660e-01 9.4352681e-01 2.5251796e-01 1.0120221e+00 6.2024833e-01 3.8265307e+00 3.4552560e+00 3.9537404e+00 2.8022534e+00 3.5701225e+00 3.2863508e+00 3.6126198e+00 2.0524386e+00 3.5845127e+00 2.6848587e+00 2.2888897e+00 3.0684384e+00 2.8791325e+00 3.5464993e+00 2.4469125e+00 3.4686755e+00 3.2961206e+00 2.9072057e+00 3.4107902e+00 2.6959009e+00 3.6545040e+00 2.9195876e+00 3.7806187e+00 3.5312474e+00 3.2669151e+00 3.4295849e+00 3.8277021e+00 3.9909030e+00 3.3562690e+00 2.3465937e+00 2.5906376e+00 2.4892531e+00 2.7423171e+00 3.9193693e+00 3.2780620e+00 3.3708389e+00 3.7190229e+00 3.3268984e+00 2.8983020e+00 2.7954161e+00 3.1611584e+00 3.4557351e+00 2.8328337e+00 2.0658700e+00 2.9919517e+00 2.9960210e+00 3.0059712e+00 3.2048547e+00 1.8038968e+00 2.9141136e+00 4.9189452e+00 3.9342203e+00 4.9208723e+00 4.4494735e+00 4.7160542e+00 5.6635213e+00 3.2909043e+00 5.2946188e+00 4.7062325e+00 5.1779277e+00 4.0657707e+00 4.2053647e+00 4.4986279e+00 3.8509694e+00 4.0384339e+00 4.2739171e+00 4.3927415e+00 5.7909894e+00 5.9706827e+00 3.8237409e+00 4.7267599e+00 3.7490739e+00 5.7704875e+00 3.8154018e+00 4.6503268e+00 5.0044958e+00 3.7060524e+00 3.7762575e+00 4.5037232e+00 4.8164539e+00 5.1574084e+00 5.5860081e+00 4.5195237e+00 3.9601358e+00 4.3901702e+00 5.2992055e+00 4.5412859e+00 4.3739561e+00 3.6694918e+00 4.4403343e+00 4.6130415e+00 4.2323959e+00 3.9342203e+00 4.8773871e+00 4.7190595e+00 4.2559866e+00 3.9201797e+00 4.1523445e+00 4.3289315e+00 3.9302319e+00 2.1845981e-01 1.1486378e+00 7.0784540e-01 4.0438741e-01 5.0503591e-01 4.4651726e-01 4.0147421e-01 5.0905001e-01 3.2352160e-01 1.4096146e-01 3.4156574e+00 3.1235447e+00 3.5778307e+00 2.6097685e+00 3.2346686e+00 3.0478400e+00 3.3101102e+00 1.9193093e+00 3.2270948e+00 2.4922287e+00 2.2081369e+00 2.7965957e+00 2.6184141e+00 3.2685282e+00 2.1916898e+00 3.0773654e+00 3.0673749e+00 2.6423274e+00 3.1444983e+00 2.4678343e+00 3.4088888e+00 2.6016025e+00 3.4988409e+00 3.2521427e+00 2.9171710e+00 3.0559188e+00 3.4597125e+00 3.6553612e+00 3.0847983e+00 2.0765921e+00 2.3848740e+00 2.2817008e+00 2.4722095e+00 3.6724425e+00 3.0650478e+00 3.0981264e+00 3.3567173e+00 3.0288896e+00 2.6566259e+00 2.5851693e+00 2.9455446e+00 3.1719381e+00 2.5729378e+00 1.9450955e+00 2.7611864e+00 2.7431084e+00 2.7570191e+00 2.8884401e+00 1.6625998e+00 2.6648989e+00 4.6768871e+00 3.7101281e+00 4.5948688e+00 4.1876541e+00 4.4480565e+00 5.3202405e+00 3.1169790e+00 4.9630576e+00 4.4189653e+00 4.8572313e+00 3.7684708e+00 3.9292780e+00 4.1872611e+00 3.6418662e+00 3.8262353e+00 4.0041417e+00 4.1076175e+00 5.4411191e+00 5.6370076e+00 3.5929878e+00 4.4174698e+00 3.5389106e+00 5.4223305e+00 3.5340177e+00 4.3569684e+00 4.6672392e+00 3.4309563e+00 3.5133135e+00 4.2392500e+00 4.4661721e+00 4.8075242e+00 5.1977119e+00 4.2572858e+00 3.6784045e+00 4.1454521e+00 4.9233665e+00 4.2900309e+00 4.0984960e+00 3.4145942e+00 4.1120703e+00 4.3243538e+00 3.8957047e+00 3.7101281e+00 4.5857922e+00 4.4360042e+00 3.9497198e+00 3.6509512e+00 3.8600234e+00 4.0800357e+00 3.6915258e+00 1.2223099e+00 6.2024833e-01 3.7255734e-01 6.2081167e-01 5.0905001e-01 3.7598397e-01 4.4651726e-01 3.4583729e-01 2.1269358e-01 3.6091470e+00 3.3105724e+00 3.7708932e+00 2.7979838e+00 3.4251430e+00 3.2391031e+00 3.4951642e+00 2.1075335e+00 3.4235403e+00 2.6687055e+00 2.3989464e+00 2.9762945e+00 2.8216056e+00 3.4603628e+00 2.3673218e+00 3.2680041e+00 3.2498302e+00 2.8414525e+00 3.3361912e+00 2.6626548e+00 3.5849794e+00 2.7906520e+00 3.6926714e+00 3.4497714e+00 3.1105675e+00 3.2468925e+00 3.6557661e+00 3.8432801e+00 3.2705166e+00 2.2719663e+00 2.5786309e+00 2.4784234e+00 2.6629602e+00 3.8619321e+00 3.2465133e+00 3.2780765e+00 3.5475145e+00 3.2266676e+00 2.8416143e+00 2.7719788e+00 3.1392934e+00 3.3624692e+00 2.7656089e+00 2.1335398e+00 2.9496515e+00 2.9338590e+00 2.9447165e+00 3.0808399e+00 1.8330979e+00 2.8520904e+00 4.8482992e+00 3.8884996e+00 4.7814945e+00 4.3763964e+00 4.6280797e+00 5.5131379e+00 3.2922206e+00 5.1594703e+00 4.6125121e+00 5.0342965e+00 3.9453171e+00 4.1137002e+00 4.3683295e+00 3.8151836e+00 3.9816719e+00 4.1715493e+00 4.2963195e+00 5.6322635e+00 5.8289682e+00 3.7874513e+00 4.5945190e+00 3.7079546e+00 5.6180203e+00 3.7164611e+00 4.5394421e+00 4.8614468e+00 3.6107037e+00 3.6929889e+00 4.4201622e+00 4.6634723e+00 5.0015266e+00 5.3906681e+00 4.4348302e+00 3.8718490e+00 4.3427947e+00 5.1078309e+00 4.4583488e+00 4.2864208e+00 3.5920050e+00 4.2919639e+00 4.4953238e+00 4.0619633e+00 3.8884996e+00 4.7650352e+00 4.6046026e+00 4.1173867e+00 3.8320624e+00 4.0389546e+00 4.2480032e+00 3.8727359e+00 9.0049692e-01 1.2307737e+00 1.5483011e+00 7.1462831e-01 1.5272277e+00 9.0074515e-01 1.4700179e+00 1.0331736e+00 3.7896333e+00 3.4304205e+00 3.9179666e+00 2.7683644e+00 3.5321772e+00 3.2685282e+00 3.5962433e+00 2.0250421e+00 3.5485736e+00 2.6642702e+00 2.2244833e+00 3.0435803e+00 2.8325748e+00 3.5230967e+00 2.4205131e+00 3.4307077e+00 3.2815626e+00 2.8843735e+00 3.3658240e+00 2.6687055e+00 3.6389628e+00 2.8830783e+00 3.7490739e+00 3.5087649e+00 3.2305801e+00 3.3908902e+00 3.7876239e+00 3.9561876e+00 3.3306114e+00 2.3112968e+00 2.5604155e+00 2.4585271e+00 2.7122813e+00 3.8971376e+00 3.2667813e+00 3.3674720e+00 3.6849783e+00 3.2837729e+00 2.8840079e+00 2.7685572e+00 3.1442943e+00 3.4335342e+00 2.8028287e+00 2.0286682e+00 2.9704704e+00 2.9822537e+00 2.9868163e+00 3.1741954e+00 1.7644184e+00 2.8908296e+00 4.8985114e+00 3.9102500e+00 4.8863496e+00 4.4272873e+00 4.6888407e+00 5.6294495e+00 3.2706756e+00 5.2636641e+00 4.6765452e+00 5.1547576e+00 4.0378383e+00 4.1743230e+00 4.4638518e+00 3.8230269e+00 4.0056625e+00 4.2450961e+00 4.3676124e+00 5.7745449e+00 5.9344852e+00 3.7932062e+00 4.6942925e+00 3.7245053e+00 5.7354045e+00 3.7814598e+00 4.6271849e+00 4.9763401e+00 3.6736483e+00 3.7511498e+00 4.4747039e+00 4.7841150e+00 5.1205765e+00 5.5660104e+00 4.4889834e+00 3.9348528e+00 4.3728356e+00 5.2548517e+00 4.5226167e+00 4.3526639e+00 3.6449787e+00 4.4041536e+00 4.5787890e+00 4.1879339e+00 3.9102500e+00 4.8490587e+00 4.6897155e+00 4.2150171e+00 3.8841455e+00 4.1203091e+00 4.3121165e+00 3.9109678e+00 6.7975091e-01 9.0056222e-01 4.1586001e-01 8.2275389e-01 2.0656129e-01 9.4287188e-01 6.0121055e-01 3.8264361e+00 3.4551376e+00 3.9537404e+00 2.8149627e+00 3.5710248e+00 3.2874334e+00 3.6122384e+00 2.0711789e+00 3.5849006e+00 2.6879715e+00 2.3281827e+00 3.0685922e+00 2.8946126e+00 3.5468964e+00 2.4478108e+00 3.4686755e+00 3.2962520e+00 2.9098194e+00 3.4214793e+00 2.7033034e+00 3.6543993e+00 2.9209919e+00 3.7841436e+00 3.5321717e+00 3.2673909e+00 3.4297053e+00 3.8284763e+00 3.9909892e+00 3.3567173e+00 2.3533545e+00 2.6019240e+00 2.5015675e+00 2.7452885e+00 3.9207248e+00 3.2781950e+00 3.3698144e+00 3.7190229e+00 3.3356294e+00 2.8984764e+00 2.8022534e+00 3.1646752e+00 3.4558535e+00 2.8373077e+00 2.0905239e+00 2.9944056e+00 2.9961831e+00 3.0065426e+00 3.2053509e+00 1.8216743e+00 2.9155237e+00 4.9187518e+00 3.9355645e+00 4.9209267e+00 4.4497146e+00 4.7161140e+00 5.6635613e+00 3.2956846e+00 5.2947833e+00 4.7084108e+00 5.1767384e+00 4.0656879e+00 4.2065257e+00 4.4986941e+00 3.8543544e+00 4.0391220e+00 4.2738429e+00 4.3928114e+00 5.7890564e+00 5.9715517e+00 3.8320624e+00 4.7267005e+00 3.7498842e+00 5.7708014e+00 3.8168398e+00 4.6501080e+00 5.0044433e+00 3.7068836e+00 3.7763550e+00 4.5042646e+00 4.8165110e+00 5.1578102e+00 5.5839155e+00 4.5200609e+00 3.9608542e+00 4.3918790e+00 5.2992517e+00 4.5407542e+00 4.3739561e+00 3.6695956e+00 4.4403343e+00 4.6130415e+00 4.2323959e+00 3.9355645e+00 4.8773316e+00 4.7188476e+00 4.2560614e+00 3.9234348e+00 4.1524235e+00 4.3283407e+00 3.9303212e+00 3.8934542e-01 5.4292906e-01 4.4535192e-01 5.3309112e-01 4.5581864e-01 4.2538717e-01 3.3319064e+00 3.0058998e+00 3.4822680e+00 2.4967542e+00 3.1249915e+00 2.9284660e+00 3.1836200e+00 1.8265014e+00 3.1331426e+00 2.3481462e+00 2.1458939e+00 2.6572703e+00 2.5437119e+00 3.1519721e+00 2.0470220e+00 2.9815576e+00 2.9302134e+00 2.5421955e+00 3.0374850e+00 2.3632684e+00 3.2598750e+00 2.4873223e+00 3.3884269e+00 3.1477087e+00 2.8156804e+00 2.9556616e+00 3.3682605e+00 3.5401725e+00 2.9561205e+00 1.9790422e+00 2.2832934e+00 2.1885968e+00 2.3571494e+00 3.5486864e+00 2.9260462e+00 2.9587612e+00 3.2530866e+00 2.9361879e+00 2.5256527e+00 2.4631898e+00 2.8325946e+00 3.0534395e+00 2.4619105e+00 1.8618589e+00 2.6377354e+00 2.6235630e+00 2.6314198e+00 2.7785210e+00 1.5475473e+00 2.5392051e+00 4.5179415e+00 3.5641186e+00 4.4752183e+00 4.0626016e+00 4.3069105e+00 5.2164277e+00 2.9689697e+00 4.8634846e+00 4.3067423e+00 4.7194915e+00 3.6262912e+00 3.7979761e+00 4.0547736e+00 3.4875352e+00 3.6400497e+00 3.8418637e+00 3.9849753e+00 5.3352888e+00 5.5294519e+00 3.4830211e+00 4.2776053e+00 3.3760149e+00 5.3253207e+00 3.4004918e+00 4.2238184e+00 4.5645220e+00 3.2914954e+00 3.3718862e+00 4.0995928e+00 4.3725648e+00 4.7075069e+00 5.1063282e+00 4.1113292e+00 3.5651462e+00 4.0375056e+00 4.8132427e+00 4.1268905e+00 3.9732869e+00 3.2685282e+00 3.9810803e+00 4.1706050e+00 3.7449914e+00 3.5641186e+00 4.4464848e+00 4.2774083e+00 3.7941767e+00 3.5148434e+00 3.7206115e+00 3.9162695e+00 3.5510950e+00 8.6137722e-01 3.2352160e-01 7.6166891e-01 4.2667565e-01 6.2660376e-01 3.0634640e+00 2.7392828e+00 3.2125175e+00 2.3391296e+00 2.8734025e+00 2.6707502e+00 2.9145708e+00 1.7569738e+00 2.8671376e+00 2.1479276e+00 2.1308063e+00 2.4108292e+00 2.3854031e+00 2.8851564e+00 1.8368900e+00 2.7201546e+00 2.6724144e+00 2.2982662e+00 2.8483417e+00 2.1701312e+00 3.0045018e+00 2.2531409e+00 3.1444983e+00 2.8783309e+00 2.5586145e+00 2.6967931e+00 3.1073497e+00 3.2779401e+00 2.7018605e+00 1.8104298e+00 2.1219691e+00 2.0368741e+00 2.1367260e+00 3.2905600e+00 2.6692615e+00 2.6939411e+00 2.9863438e+00 2.7320931e+00 2.2698938e+00 2.2722516e+00 2.5933163e+00 2.7845473e+00 2.2472326e+00 1.8195937e+00 2.4037412e+00 2.3571494e+00 2.3781826e+00 2.5200525e+00 1.5411691e+00 2.3001580e+00 4.2694227e+00 3.3237039e+00 4.2115652e+00 3.7930080e+00 4.0498216e+00 4.9425633e+00 2.7652100e+00 4.5847429e+00 4.0464464e+00 4.4666326e+00 3.3738930e+00 3.5479683e+00 3.8004969e+00 3.2711669e+00 3.4346585e+00 3.6043418e+00 3.7146255e+00 5.0600559e+00 5.2639977e+00 3.2609948e+00 4.0260071e+00 3.1481222e+00 5.0506841e+00 3.1599876e+00 3.9594081e+00 4.2851208e+00 3.0501817e+00 3.1181310e+00 3.8476631e+00 4.0931004e+00 4.4378867e+00 4.8317916e+00 3.8652515e+00 3.2970551e+00 3.7653550e+00 4.5583070e+00 3.8836720e+00 3.7008091e+00 3.0188386e+00 3.7282629e+00 3.9300286e+00 3.5186510e+00 3.3237039e+00 4.1891573e+00 4.0376133e+00 3.5648733e+00 3.2885200e+00 3.4693398e+00 3.6732275e+00 3.2917360e+00 8.1385214e-01 2.5251796e-01 7.6787403e-01 3.2586371e-01 3.5846101e+00 3.2546607e+00 3.7315988e+00 2.6609901e+00 3.3659358e+00 3.1439065e+00 3.4298218e+00 1.9383545e+00 3.3723944e+00 2.5580763e+00 2.1777421e+00 2.8983020e+00 2.6954219e+00 3.3808052e+00 2.2790175e+00 3.2344170e+00 3.1579733e+00 2.7462372e+00 3.2292608e+00 2.5444216e+00 3.5028333e+00 2.7205595e+00 3.6067948e+00 3.3670797e+00 3.0557792e+00 3.2048395e+00 3.6088607e+00 3.7891577e+00 3.1900581e+00 2.1641873e+00 2.4447974e+00 2.3408917e+00 2.5700421e+00 3.7710363e+00 3.1506190e+00 3.2040124e+00 3.5027314e+00 3.1322650e+00 2.7512730e+00 2.6533250e+00 3.0299528e+00 3.2862185e+00 2.6656374e+00 1.9477421e+00 2.8481000e+00 2.8454952e+00 2.8544453e+00 3.0132514e+00 1.6658308e+00 2.7590026e+00 4.7688362e+00 3.7943375e+00 4.7263320e+00 4.2949506e+00 4.5532150e+00 5.4636648e+00 3.1768366e+00 5.1030228e+00 4.5342701e+00 4.9831843e+00 3.8820426e+00 4.0358897e+00 4.3088496e+00 3.7133362e+00 3.8949006e+00 4.1025163e+00 4.2237423e+00 5.5888590e+00 5.7742361e+00 3.6743314e+00 4.5370189e+00 3.6141649e+00 5.5687868e+00 3.6395551e+00 4.4736906e+00 4.8085584e+00 3.5338163e+00 3.6144782e+00 4.3417782e+00 4.6138035e+00 4.9524148e+00 5.3625968e+00 4.3570317e+00 3.7932922e+00 4.2488997e+00 5.0747397e+00 4.3832471e+00 4.2110583e+00 3.5113289e+00 4.2399031e+00 4.4320962e+00 4.0189525e+00 3.7943375e+00 4.7000551e+00 4.5409269e+00 4.0612006e+00 3.7475562e+00 3.9722979e+00 4.1719819e+00 3.7859347e+00 6.9325418e-01 2.1269358e-01 5.0905001e-01 3.3337914e+00 3.0376046e+00 3.4948415e+00 2.6099670e+00 3.1641230e+00 2.9745020e+00 3.2202056e+00 1.9796375e+00 3.1511769e+00 2.4469125e+00 2.3235342e+00 2.7196435e+00 2.6337042e+00 3.1881331e+00 2.1375243e+00 2.9985516e+00 2.9847499e+00 2.5867906e+00 3.1255136e+00 2.4464131e+00 3.3203249e+00 2.5432298e+00 3.4386456e+00 3.1757436e+00 2.8453413e+00 2.9797458e+00 3.3872988e+00 3.5731577e+00 3.0079222e+00 2.0710306e+00 2.3862284e+00 2.2923690e+00 2.4260574e+00 3.5964347e+00 2.9822786e+00 3.0067893e+00 3.2740296e+00 3.0040546e+00 2.5786309e+00 2.5579141e+00 2.8891491e+00 3.0885642e+00 2.5333642e+00 2.0283816e+00 2.7031874e+00 2.6626548e+00 2.6835197e+00 2.8149627e+00 1.7472675e+00 2.6016025e+00 4.5864727e+00 3.6358644e+00 4.5092329e+00 4.1008711e+00 4.3608329e+00 5.2329560e+00 3.0685922e+00 4.8761643e+00 4.3448624e+00 4.7688607e+00 3.6817961e+00 3.8534030e+00 4.1033021e+00 3.5811154e+00 3.7530357e+00 3.9183591e+00 4.0199092e+00 5.3504583e+00 5.5556442e+00 3.5517562e+00 4.3306939e+00 3.4641481e+00 5.3377017e+00 3.4637450e+00 4.2663890e+00 4.5772917e+00 3.3571533e+00 3.4296698e+00 4.1575709e+00 4.3797497e+00 4.7253427e+00 5.1097682e+00 4.1763898e+00 3.5983434e+00 4.0666282e+00 4.8419079e+00 4.2005634e+00 4.0083071e+00 3.3318481e+00 4.0278210e+00 4.2396851e+00 3.8171357e+00 3.6358644e+00 4.4969460e+00 4.3490518e+00 3.8705614e+00 3.5905415e+00 3.7766172e+00 3.9906340e+00 3.6052686e+00 7.6752131e-01 4.0147421e-01 3.6660608e+00 3.3135273e+00 3.8017611e+00 2.7018605e+00 3.4275679e+00 3.1699903e+00 3.4789136e+00 1.9730403e+00 3.4358679e+00 2.5840983e+00 2.2331309e+00 2.9422991e+00 2.7581254e+00 3.4189217e+00 2.3233512e+00 3.3121677e+00 3.1836200e+00 2.7812918e+00 3.2896000e+00 2.5816639e+00 3.5379744e+00 2.7795823e+00 3.6532797e+00 3.4029480e+00 3.1195329e+00 3.2770643e+00 3.6772238e+00 3.8493740e+00 3.2305582e+00 2.2173292e+00 2.4840285e+00 2.3814525e+00 2.6148507e+00 3.8019334e+00 3.1710836e+00 3.2447990e+00 3.5700242e+00 3.1955870e+00 2.7803619e+00 2.6879415e+00 3.0521985e+00 3.3264150e+00 2.7089627e+00 1.9908167e+00 2.8775912e+00 2.8744403e+00 2.8857836e+00 3.0658390e+00 1.7171798e+00 2.7936066e+00 4.8056033e+00 3.8247106e+00 4.7834029e+00 4.3283723e+00 4.5943507e+00 5.5219241e+00 3.2001457e+00 5.1552806e+00 4.5786596e+00 5.0423887e+00 3.9353963e+00 4.0804944e+00 4.3648743e+00 3.7469906e+00 3.9346929e+00 4.1523445e+00 4.2650653e+00 5.6468786e+00 5.8321617e+00 3.7127205e+00 4.5943003e+00 3.6444925e+00 5.6275915e+00 3.6885686e+00 4.5212945e+00 4.8635596e+00 3.5807904e+00 3.6545040e+00 4.3827087e+00 4.6717461e+00 5.0136174e+00 5.4320857e+00 4.3994790e+00 3.8323332e+00 4.2736523e+00 5.1501184e+00 4.4249262e+00 4.2490074e+00 3.5500567e+00 4.3022895e+00 4.4864974e+00 4.0933482e+00 3.8247106e+00 4.7495571e+00 4.5943072e+00 4.1245629e+00 3.7976741e+00 4.0232438e+00 4.2129603e+00 3.8158144e+00 4.4535192e-01 3.3681634e+00 3.1015495e+00 3.5417515e+00 2.6663854e+00 3.2198557e+00 3.0579528e+00 3.2934163e+00 2.0153916e+00 3.2040320e+00 2.5204039e+00 2.3388377e+00 2.7956674e+00 2.6725726e+00 3.2655357e+00 2.2078644e+00 3.0402181e+00 3.0721050e+00 2.6595270e+00 3.1749624e+00 2.5094912e+00 3.4048516e+00 2.6019240e+00 3.5045178e+00 3.2523394e+00 2.8999277e+00 3.0267460e+00 3.4333346e+00 3.6317584e+00 3.0844423e+00 2.1209506e+00 2.4419061e+00 2.3443191e+00 2.4920874e+00 3.6775470e+00 3.0715602e+00 3.0884019e+00 3.3261336e+00 3.0501817e+00 2.6631094e+00 2.6243758e+00 2.9691171e+00 3.1659032e+00 2.5983106e+00 2.0538718e+00 2.7808700e+00 2.7473221e+00 2.7650187e+00 2.8804789e+00 1.7660585e+00 2.6784604e+00 4.6691123e+00 3.7183181e+00 4.5689156e+00 4.1821417e+00 4.4377562e+00 5.2873851e+00 3.1455384e+00 4.9362269e+00 4.4131751e+00 4.8285697e+00 3.7508315e+00 3.9249912e+00 4.1665786e+00 3.6588207e+00 3.8312584e+00 3.9914600e+00 4.0953360e+00 5.4042844e+00 5.6094349e+00 3.6203759e+00 4.3940519e+00 3.5472449e+00 5.3896044e+00 3.5318477e+00 4.3383367e+00 4.6370771e+00 3.4283809e+00 3.5084967e+00 4.2335104e+00 4.4348302e+00 4.7765764e+00 5.1494118e+00 4.2515997e+00 3.6735311e+00 4.1503255e+00 4.8803856e+00 4.2802235e+00 4.0874500e+00 3.4119017e+00 4.0856134e+00 4.3069340e+00 3.8658667e+00 3.7183181e+00 4.5670800e+00 4.4180998e+00 3.9298460e+00 3.6561219e+00 3.8459316e+00 4.0712063e+00 3.6910219e+00 3.5300704e+00 3.2300705e+00 3.6894189e+00 2.6906230e+00 3.3402581e+00 3.1455455e+00 3.4142500e+00 1.9871921e+00 3.3364095e+00 2.5801041e+00 2.2579027e+00 2.8953397e+00 2.7040361e+00 3.3706887e+00 2.2848507e+00 3.1889632e+00 3.1641787e+00 2.7405950e+00 3.2351115e+00 2.5567836e+00 3.5066271e+00 2.7031874e+00 3.5985833e+00 3.3547156e+00 3.0245025e+00 3.1656773e+00 3.5695690e+00 3.7624530e+00 3.1847809e+00 2.1665831e+00 2.4678343e+00 2.3636654e+00 2.5680682e+00 3.7710578e+00 3.1606607e+00 3.1985717e+00 3.4665002e+00 3.1244487e+00 2.7540755e+00 2.6724144e+00 3.0392407e+00 3.2747247e+00 2.6671530e+00 2.0066796e+00 2.8554471e+00 2.8427684e+00 2.8549070e+00 2.9928504e+00 1.7230625e+00 2.7611864e+00 4.7742557e+00 3.8047268e+00 4.7018935e+00 4.2892152e+00 4.5488617e+00 5.4303909e+00 3.2037606e+00 5.0724791e+00 4.5217060e+00 4.9623634e+00 3.8707551e+00 4.0296740e+00 4.2915574e+00 3.7321091e+00 3.9154512e+00 4.1022778e+00 4.2113304e+00 5.5520135e+00 5.7453700e+00 3.6849669e+00 4.5212945e+00 3.6308222e+00 5.5330176e+00 3.6335075e+00 4.4607162e+00 4.7770551e+00 3.5299194e+00 3.6126659e+00 4.3390241e+00 4.5771358e+00 4.9175276e+00 5.3118739e+00 4.3561658e+00 3.7812981e+00 4.2455646e+00 5.0340960e+00 4.3872001e+00 4.2015182e+00 3.5126820e+00 4.2176412e+00 4.4249262e+00 3.9985367e+00 3.8047268e+00 4.6887893e+00 4.5358705e+00 4.0502685e+00 3.7475470e+00 3.9619683e+00 4.1767972e+00 3.7895730e+00 6.0611244e-01 2.1845981e-01 1.6212669e+00 5.6769031e-01 1.3103855e+00 7.0437330e-01 2.2923690e+00 4.4651726e-01 1.8497891e+00 2.2196852e+00 1.1283882e+00 1.3099706e+00 9.0827783e-01 1.5790055e+00 3.7427929e-01 1.4018200e+00 1.2701139e+00 1.1341579e+00 1.5133392e+00 1.1134787e+00 1.0264409e+00 8.7202528e-01 9.2264612e-01 6.6432544e-01 4.5470518e-01 4.1449626e-01 4.3456114e-01 1.0085601e+00 1.5838351e+00 1.6415861e+00 1.6742876e+00 1.3140585e+00 1.0496979e+00 1.6013574e+00 1.0054037e+00 3.0546431e-01 1.0168833e+00 1.4293465e+00 1.5774037e+00 1.5278635e+00 9.0252542e-01 1.2994764e+00 2.2231652e+00 1.4317371e+00 1.3207609e+00 1.3224963e+00 8.3649708e-01 2.2607507e+00 1.3421549e+00 1.5412452e+00 1.2539702e+00 1.2643026e+00 1.0324775e+00 1.2342162e+00 1.9387309e+00 2.1209313e+00 1.6105602e+00 1.1912106e+00 1.5832517e+00 7.2486328e-01 8.5585239e-01 9.4009473e-01 1.3873503e+00 1.3945703e+00 1.0313560e+00 8.7720955e-01 2.0658700e+00 2.2655571e+00 1.2460824e+00 1.1834841e+00 1.4368020e+00 2.0378171e+00 7.9878917e-01 1.0960883e+00 1.3102767e+00 8.5105559e-01 9.2480363e-01 1.0805899e+00 1.1043883e+00 1.4313279e+00 1.8006336e+00 1.1235486e+00 7.6625946e-01 1.1633029e+00 1.5390703e+00 1.2493717e+00 9.0965328e-01 1.0182895e+00 8.6983677e-01 1.1880428e+00 9.2095040e-01 1.2539702e+00 1.3335022e+00 1.3109705e+00 9.5035453e-01 9.2112464e-01 7.6166891e-01 1.1418127e+00 1.1276971e+00 5.6700421e-01 1.1449732e+00 4.0293660e-01 7.3813096e-01 2.1845981e-01 1.7551534e+00 3.4583729e-01 1.2603076e+00 1.7354460e+00 5.3588338e-01 1.0777972e+00 3.8934542e-01 1.0669582e+00 3.0811765e-01 8.0294841e-01 7.8768770e-01 1.0018083e+00 1.0175773e+00 5.5419992e-01 5.9426792e-01 7.3496673e-01 4.8927739e-01 3.4378533e-01 2.5651975e-01 5.2655962e-01 5.4292906e-01 4.4417983e-01 1.1634384e+00 1.1527805e+00 1.2020363e+00 8.1521713e-01 7.2526325e-01 1.0018083e+00 4.1449626e-01 3.2586371e-01 9.0277242e-01 8.3172002e-01 1.0440187e+00 9.7779835e-01 3.2816937e-01 8.1521713e-01 1.7083888e+00 8.6361309e-01 7.3145860e-01 7.3145860e-01 3.6171588e-01 1.7816674e+00 7.6914805e-01 1.6177449e+00 8.3060013e-01 1.4732400e+00 1.1107977e+00 1.3546017e+00 2.2147080e+00 1.5404344e+00 1.8624350e+00 1.3603471e+00 1.7544191e+00 6.8961791e-01 8.7478495e-01 1.0733200e+00 9.5271386e-01 1.0466623e+00 9.9348625e-01 1.0085601e+00 2.3452277e+00 2.5288464e+00 1.0480665e+00 1.3130641e+00 8.9653332e-01 2.3282127e+00 5.8914551e-01 1.2436109e+00 1.5625142e+00 4.8927739e-01 4.8927739e-01 1.1593224e+00 1.3813076e+00 1.7138020e+00 2.1603815e+00 1.1866786e+00 6.4755655e-01 1.1521791e+00 1.8620175e+00 1.2565757e+00 1.0067784e+00 4.8927739e-01 1.0056742e+00 1.2594846e+00 9.3049742e-01 8.3060013e-01 1.4762619e+00 1.3817041e+00 9.4558103e-01 7.9613242e-01 7.7074935e-01 1.0627606e+00 7.0776547e-01 1.5593809e+00 4.8036801e-01 1.2165505e+00 6.1151102e-01 2.2871743e+00 4.0176783e-01 1.7963441e+00 2.1851225e+00 1.0906388e+00 1.2884575e+00 8.0619006e-01 1.6156775e+00 5.0905001e-01 1.3093850e+00 1.2434795e+00 1.0262547e+00 1.4875372e+00 1.0069726e+00 1.0669582e+00 7.4511469e-01 8.2384013e-01 6.9728513e-01 5.3022554e-01 3.0811765e-01 2.5651975e-01 9.2264612e-01 1.6478667e+00 1.6180636e+00 1.6694817e+00 1.3199714e+00 9.2288144e-01 1.5068702e+00 9.2859317e-01 2.4837156e-01 9.3238528e-01 1.3813076e+00 1.5238543e+00 1.4346522e+00 8.1130291e-01 1.2794849e+00 2.2234347e+00 1.3629833e+00 1.2671726e+00 1.2652657e+00 8.1810461e-01 2.3116343e+00 1.2988558e+00 1.3410314e+00 1.1276971e+00 1.0590298e+00 8.2552685e-01 1.0264409e+00 1.7485421e+00 2.0171203e+00 1.4118594e+00 9.7949166e-01 1.3980896e+00 5.7324170e-01 6.6317860e-01 7.4586719e-01 1.2603076e+00 1.2604558e+00 8.7504951e-01 6.6432544e-01 1.8915404e+00 2.0711789e+00 1.1178264e+00 9.9368623e-01 1.3223897e+00 1.8515012e+00 6.6384020e-01 8.9303452e-01 1.1107977e+00 7.2823007e-01 8.1099042e-01 8.7170815e-01 9.0876485e-01 1.2370832e+00 1.6626615e+00 9.2112464e-01 6.2482915e-01 9.7377870e-01 1.3752391e+00 1.0720678e+00 7.0386584e-01 9.0876485e-01 6.8917100e-01 1.0120221e+00 8.0294841e-01 1.1276971e+00 1.1329323e+00 1.1353806e+00 8.1385214e-01 7.7588000e-01 5.8914551e-01 9.8054887e-01 1.0085601e+00 1.0879524e+00 6.2605182e-01 1.2079117e+00 8.2305664e-01 1.1903922e+00 4.4651726e-01 6.5648056e-01 7.4164639e-01 5.2942799e-01 8.9852394e-01 6.4755655e-01 1.3035649e+00 7.7074935e-01 4.8135521e-01 7.7074935e-01 2.5651975e-01 1.1022599e+00 6.8917100e-01 1.0627606e+00 8.6290690e-01 9.7759114e-01 1.1868139e+00 1.3969297e+00 1.4333755e+00 7.6166891e-01 5.6075294e-01 2.5251796e-01 3.7427929e-01 4.4651726e-01 1.1444449e+00 7.7074935e-01 1.1571858e+00 1.3491011e+00 8.2624515e-01 7.0086313e-01 2.0000000e-01 4.4535192e-01 8.9852394e-01 3.7427929e-01 7.7885297e-01 4.1449626e-01 7.0826681e-01 6.1092863e-01 8.2275389e-01 1.0198386e+00 5.0905001e-01 2.2002582e+00 1.1640914e+00 2.2347161e+00 1.6833015e+00 1.9584639e+00 2.9773446e+00 7.2852070e-01 2.5984158e+00 1.9494155e+00 2.5625921e+00 1.4644662e+00 1.4491244e+00 1.8190688e+00 1.0934620e+00 1.3861754e+00 1.6265426e+00 1.6626615e+00 3.1878246e+00 3.2549253e+00 1.0346741e+00 2.0606771e+00 1.0425476e+00 3.0882196e+00 1.1022599e+00 1.9692383e+00 2.3537589e+00 1.0082605e+00 1.0950112e+00 1.7332099e+00 2.1942739e+00 2.5032087e+00 3.0886055e+00 1.7538274e+00 1.2342162e+00 1.6237100e+00 2.7181432e+00 1.8926658e+00 1.6507294e+00 1.0082605e+00 1.8244836e+00 1.9254808e+00 1.7303440e+00 1.1640914e+00 2.1641182e+00 2.0565627e+00 1.6435752e+00 1.1782910e+00 1.4699978e+00 1.7142546e+00 1.2095267e+00 8.0326782e-01 5.0991930e-01 1.8350577e+00 2.1269358e-01 1.3537729e+00 1.7146525e+00 6.5172743e-01 8.5585239e-01 4.0438741e-01 1.1847335e+00 3.4583729e-01 9.0252542e-01 8.2372435e-01 6.2024833e-01 1.0324775e+00 6.6827038e-01 6.5172743e-01 3.8776762e-01 4.4535192e-01 3.2816937e-01 2.5651975e-01 3.2586371e-01 4.3691963e-01 5.0180477e-01 1.2342162e+00 1.1573546e+00 1.2172454e+00 8.7848692e-01 6.2205176e-01 1.1016264e+00 6.9006418e-01 3.2586371e-01 5.2371571e-01 9.4492923e-01 1.0646687e+00 1.0101422e+00 4.1449626e-01 8.2552685e-01 1.7679545e+00 9.2288144e-01 8.3888121e-01 8.2929029e-01 3.8934542e-01 1.8776878e+00 8.5434758e-01 1.5481649e+00 7.9613242e-01 1.3657247e+00 1.0085601e+00 1.2641849e+00 2.1002817e+00 1.6030661e+00 1.7482192e+00 1.2100024e+00 1.7005893e+00 6.6491075e-01 7.3535471e-01 9.7949166e-01 8.8358844e-01 1.0423677e+00 9.5498315e-01 9.1051084e-01 2.2737459e+00 2.4086493e+00 7.2486328e-01 1.2326306e+00 9.4832302e-01 2.2096958e+00 3.8934542e-01 1.1729895e+00 1.4561933e+00 3.8776762e-01 4.8927739e-01 1.0574300e+00 1.2643026e+00 1.5918956e+00 2.0914667e+00 1.0906388e+00 5.0817745e-01 1.0182895e+00 1.7457596e+00 1.2250414e+00 9.1663180e-01 5.4292906e-01 9.1750357e-01 1.1891470e+00 8.8358844e-01 7.9613242e-01 1.3916739e+00 1.3267389e+00 8.9303452e-01 5.3309112e-01 6.9325418e-01 1.0574013e+00 7.0776547e-01 7.0776547e-01 1.3071453e+00 9.0049692e-01 6.9006418e-01 1.2079117e+00 3.6171588e-01 7.1791510e-01 4.1586001e-01 9.0049692e-01 1.0069726e+00 2.5251796e-01 4.4651726e-01 6.9325418e-01 6.2538346e-01 5.9426792e-01 5.6631629e-01 6.6827038e-01 4.1449626e-01 7.0437330e-01 9.0277242e-01 1.1055069e+00 1.0496979e+00 3.2586371e-01 1.0083666e+00 7.4164639e-01 8.3888121e-01 6.0181382e-01 6.3861009e-01 3.4378533e-01 6.3808075e-01 1.0101422e+00 6.8961791e-01 4.1449626e-01 5.3588338e-01 2.5651975e-01 4.1586001e-01 5.0991930e-01 1.2869134e+00 3.0546431e-01 3.2586371e-01 3.0275928e-01 5.0905001e-01 1.5278635e+00 4.0000000e-01 1.7279861e+00 7.4586719e-01 1.7831878e+00 1.1718516e+00 1.4824233e+00 2.5111349e+00 8.3620494e-01 2.1256928e+00 1.4719311e+00 2.0814452e+00 1.0175773e+00 1.0014633e+00 1.3875139e+00 7.7885297e-01 1.1473003e+00 1.2144845e+00 1.1591754e+00 2.6773585e+00 2.7900071e+00 7.0776547e-01 1.6151673e+00 7.3496673e-01 2.6263773e+00 7.2526325e-01 1.4645804e+00 1.8755806e+00 6.3924842e-01 6.2407309e-01 1.2731262e+00 1.7507664e+00 2.0635966e+00 2.6116811e+00 1.3129189e+00 7.4855857e-01 1.1149070e+00 2.3160147e+00 1.4246028e+00 1.1229843e+00 5.6075294e-01 1.4120836e+00 1.5094575e+00 1.4108494e+00 7.4586719e-01 1.6884234e+00 1.6211869e+00 1.3017208e+00 8.1521713e-01 1.0401425e+00 1.2452704e+00 6.9728513e-01 1.8185955e+00 4.8135521e-01 1.2509218e+00 1.8049926e+00 5.8914551e-01 1.2205493e+00 4.2538717e-01 1.1912106e+00 4.6472023e-01 7.1840099e-01 8.9207714e-01 1.1017858e+00 1.1127329e+00 4.1586001e-01 7.8197925e-01 8.0326782e-01 5.7257017e-01 5.2655962e-01 4.3456114e-01 6.2660376e-01 4.8135521e-01 4.5581864e-01 1.3318128e+00 1.2468939e+00 1.3144065e+00 9.4935318e-01 6.6384020e-01 9.1075311e-01 3.2586371e-01 4.1449626e-01 1.0130748e+00 8.3280511e-01 1.0906119e+00 9.6204649e-01 3.4583729e-01 9.3296062e-01 1.7901543e+00 8.7170815e-01 7.3805807e-01 7.3805807e-01 5.2655962e-01 1.9041928e+00 8.1521713e-01 1.4138821e+00 7.3805807e-01 1.3166957e+00 9.2264612e-01 1.1533602e+00 2.0690479e+00 1.4700179e+00 1.7092525e+00 1.2231847e+00 1.5870088e+00 5.0592043e-01 7.5791688e-01 9.0575661e-01 9.1750357e-01 9.1802948e-01 8.1304731e-01 8.1638392e-01 2.1978861e+00 2.3802944e+00 1.1107977e+00 1.1386292e+00 7.9878917e-01 2.1900222e+00 6.1092863e-01 1.0480665e+00 1.4148192e+00 5.0991930e-01 3.6171588e-01 9.7825559e-01 1.2593659e+00 1.5912764e+00 2.0615043e+00 1.0056742e+00 5.6700421e-01 1.0137836e+00 1.7695175e+00 1.0597541e+00 8.0619006e-01 3.8934542e-01 8.6513410e-01 1.0755693e+00 8.4050231e-01 7.3805807e-01 1.2832075e+00 1.1947245e+00 8.0660588e-01 8.2105460e-01 5.9426792e-01 8.6983677e-01 5.3309112e-01 1.9083318e+00 6.7975091e-01 4.1449626e-01 1.2452704e+00 1.1763980e+00 1.6420607e+00 7.9016429e-01 1.9365498e+00 1.3172979e+00 1.0653845e+00 1.5684812e+00 8.1304731e-01 1.7169601e+00 1.2768639e+00 1.8804140e+00 1.6311692e+00 1.6315809e+00 1.8424891e+00 2.1489929e+00 2.2038673e+00 1.4613032e+00 8.0587320e-01 6.8961791e-01 6.4704320e-01 9.7949166e-01 1.9250543e+00 1.2802798e+00 1.5824669e+00 2.0485534e+00 1.5790055e+00 1.0078327e+00 8.2305664e-01 1.1498269e+00 1.5838351e+00 1.0137836e+00 1.2418578e-01 1.0230441e+00 1.1119327e+00 1.0941064e+00 1.4719311e+00 3.2816937e-01 1.0165138e+00 2.9338155e+00 1.9158303e+00 3.0455280e+00 2.4635485e+00 2.7430309e+00 3.7921012e+00 1.2632199e+00 3.4105293e+00 2.7619926e+00 3.3261421e+00 2.2045198e+00 2.2598424e+00 2.6204307e+00 1.8330979e+00 2.0701646e+00 2.3622531e+00 2.4525409e+00 3.9619101e+00 4.0743074e+00 1.8315269e+00 2.8475224e+00 1.7388184e+00 3.9054939e+00 1.9111264e+00 2.7377517e+00 3.1510494e+00 1.7964653e+00 1.8350071e+00 2.5277506e+00 2.9970778e+00 3.3196868e+00 3.8532018e+00 2.5453122e+00 2.0312250e+00 2.3887539e+00 3.5269824e+00 2.5986705e+00 2.4210417e+00 1.7228354e+00 2.6125646e+00 2.7062349e+00 2.4839132e+00 1.9158303e+00 2.9502077e+00 2.8139128e+00 2.4180244e+00 1.9947426e+00 2.2550764e+00 2.3956104e+00 1.9332869e+00 1.4468211e+00 1.8027242e+00 7.3851529e-01 9.0658670e-01 5.0180477e-01 1.2418578e+00 2.5651975e-01 1.0022010e+00 8.6361309e-01 7.3851529e-01 1.1055064e+00 7.8197925e-01 6.8961791e-01 4.8927739e-01 5.0270183e-01 3.2352160e-01 2.1269358e-01 2.5651975e-01 4.9857388e-01 6.0611244e-01 1.2633451e+00 1.2342162e+00 1.2794849e+00 9.3824087e-01 7.0776547e-01 1.2014753e+00 7.0429250e-01 2.5651975e-01 6.2482915e-01 1.0329901e+00 1.1593224e+00 1.1069580e+00 5.0180477e-01 8.9712482e-01 1.8394959e+00 1.0181000e+00 9.2095040e-01 9.2047746e-01 4.4417983e-01 1.9314297e+00 9.4103005e-01 1.6328100e+00 9.3238528e-01 1.3969297e+00 1.0389435e+00 1.3327491e+00 2.0961718e+00 1.7103548e+00 1.7405652e+00 1.2330392e+00 1.7474965e+00 7.7919451e-01 8.1810461e-01 1.0613462e+00 1.0417249e+00 1.2331989e+00 1.0974061e+00 9.4125538e-01 2.2568188e+00 2.4127176e+00 8.4050231e-01 1.3145067e+00 1.0960883e+00 2.1973666e+00 5.6075294e-01 1.2221471e+00 1.4467170e+00 5.7324170e-01 6.3977563e-01 1.1341579e+00 1.2436109e+00 1.5826476e+00 2.0476065e+00 1.1847335e+00 5.3665999e-01 1.0391247e+00 1.7521201e+00 1.3293211e+00 9.4492923e-01 6.9369532e-01 1.0019724e+00 1.3083079e+00 1.0406064e+00 9.3238528e-01 1.4580335e+00 1.4387122e+00 1.0576043e+00 7.0233835e-01 8.1304731e-01 1.1697902e+00 8.2372435e-01 7.6914805e-01 7.2823007e-01 8.7504951e-01 1.0611732e+00 4.5581864e-01 1.5204521e+00 6.6432544e-01 6.5172743e-01 1.0866092e+00 4.5470518e-01 1.0573285e+00 9.0074515e-01 1.3083079e+00 1.0613462e+00 1.2123540e+00 1.4190961e+00 1.6754036e+00 1.6596797e+00 8.9095811e-01 6.1947990e-01 4.2418962e-01 4.8927739e-01 6.0551856e-01 1.2951131e+00 6.2538346e-01 1.0030700e+00 1.5663312e+00 1.1396406e+00 4.5581864e-01 3.2816937e-01 5.3665999e-01 1.0166932e+00 6.0670504e-01 6.9167458e-01 4.4535192e-01 5.6075294e-01 5.3665999e-01 1.0182895e+00 9.1075311e-01 5.0991930e-01 2.2632657e+00 1.2601890e+00 2.4384530e+00 1.8269304e+00 2.0927845e+00 3.1870761e+00 6.4290921e-01 2.8096725e+00 2.1462316e+00 2.6900593e+00 1.5901181e+00 1.6364474e+00 2.0101738e+00 1.1729895e+00 1.4078246e+00 1.7083888e+00 1.8278268e+00 3.3435703e+00 3.4604677e+00 1.2331989e+00 2.2206574e+00 1.0719360e+00 3.3068858e+00 1.3163598e+00 2.0994872e+00 2.5539296e+00 1.1948578e+00 1.1991899e+00 1.8828324e+00 2.4245766e+00 2.7358293e+00 3.2702869e+00 1.8959565e+00 1.4312787e+00 1.7665622e+00 2.9527671e+00 1.9255490e+00 1.7860690e+00 1.0796583e+00 2.0205937e+00 2.0647798e+00 1.9168750e+00 1.2601890e+00 2.3069539e+00 2.1608869e+00 1.8128438e+00 1.3838212e+00 1.6376058e+00 1.7220696e+00 1.2768639e+00 1.2687651e+00 1.0344911e+00 1.5320003e+00 9.7779835e-01 1.8837258e+00 1.2979752e+00 1.0012667e+00 1.3957794e+00 7.2526325e-01 1.6850672e+00 1.2372418e+00 1.7004805e+00 1.4979666e+00 1.5611922e+00 1.7745022e+00 2.0153916e+00 2.0815027e+00 1.3830210e+00 8.1242502e-01 5.8914551e-01 5.7257017e-01 9.5676647e-01 1.7518264e+00 1.2724737e+00 1.6669115e+00 1.9675324e+00 1.4200435e+00 1.1136605e+00 7.1881659e-01 1.0072663e+00 1.5134954e+00 9.3238528e-01 3.2352160e-01 9.5222919e-01 1.1681971e+00 1.1043332e+00 1.4120836e+00 6.2205176e-01 1.0078327e+00 2.8028143e+00 1.7525933e+00 2.8815987e+00 2.2944257e+00 2.5825907e+00 3.6132031e+00 1.1179743e+00 3.2286633e+00 2.5673494e+00 3.2133201e+00 2.1127170e+00 2.0867931e+00 2.4721080e+00 1.6626615e+00 1.9456450e+00 2.2607446e+00 2.2983453e+00 3.8335668e+00 3.8818411e+00 1.6299374e+00 2.7127377e+00 1.6132118e+00 3.7182722e+00 1.7559391e+00 2.6123294e+00 2.9966900e+00 1.6625128e+00 1.7303440e+00 2.3560577e+00 2.8340159e+00 3.1407514e+00 3.7366777e+00 2.3765195e+00 1.8701780e+00 2.1933937e+00 3.3657099e+00 2.5066503e+00 2.2797241e+00 1.6331631e+00 2.4808010e+00 2.5698271e+00 2.3770285e+00 1.7525933e+00 2.8053367e+00 2.6963680e+00 2.2934334e+00 1.8202060e+00 2.1229819e+00 2.3231793e+00 1.8122257e+00 8.5462626e-01 5.0991930e-01 6.2538346e-01 8.0358695e-01 3.7255734e-01 5.3022554e-01 8.2105460e-01 6.0900723e-01 6.2482915e-01 3.0844217e-01 7.9580667e-01 5.4292906e-01 5.0991930e-01 7.0437330e-01 9.7270522e-01 9.9532071e-01 3.0546431e-01 7.9878917e-01 7.2343175e-01 7.8768770e-01 4.2418962e-01 9.0876485e-01 5.2862779e-01 4.4651726e-01 8.5205778e-01 7.4164639e-01 3.2586371e-01 5.7867728e-01 5.3309112e-01 4.1449626e-01 4.5581864e-01 1.2131545e+00 3.8776762e-01 3.2352160e-01 2.5251796e-01 3.2816937e-01 1.3221405e+00 2.8507955e-01 1.8873850e+00 9.2859317e-01 1.8730683e+00 1.4111029e+00 1.6550480e+00 2.6320302e+00 1.0406064e+00 2.2657813e+00 1.6658308e+00 2.1339968e+00 1.0072663e+00 1.1444449e+00 1.4417207e+00 8.9971984e-01 1.1192426e+00 1.2342162e+00 1.3367840e+00 2.7726042e+00 2.9277580e+00 9.9368623e-01 1.6694974e+00 7.8197925e-01 2.7493700e+00 7.5976039e-01 1.5849874e+00 1.9802196e+00 6.4290921e-01 7.1799256e-01 1.4445746e+00 1.8216794e+00 2.1462316e+00 2.6367554e+00 1.4616539e+00 9.2264612e-01 1.4088394e+00 2.3235034e+00 1.5120955e+00 1.3224963e+00 6.2024833e-01 1.4078246e+00 1.5587730e+00 1.2801437e+00 9.2859317e-01 1.8096161e+00 1.6710566e+00 1.2378278e+00 8.9653332e-01 1.0864449e+00 1.3071453e+00 9.0827783e-01 8.9159388e-01 7.7763126e-01 1.0417249e+00 9.1802948e-01 5.0905001e-01 6.2605182e-01 4.4651726e-01 1.2379511e+00 6.2024833e-01 9.5571254e-01 8.1558458e-01 7.5976039e-01 9.2944046e-01 1.0661822e+00 1.2662457e+00 8.2342214e-01 5.8851328e-01 5.1691876e-01 5.3588338e-01 5.1691876e-01 1.1717125e+00 9.6838716e-01 1.2601890e+00 1.1258723e+00 4.8135521e-01 8.3649708e-01 5.5419992e-01 6.2407309e-01 9.0965328e-01 4.2538717e-01 1.0906388e+00 5.9426792e-01 8.1638392e-01 7.3145860e-01 7.3145860e-01 1.1880428e+00 6.3861009e-01 2.2927296e+00 1.2766755e+00 2.1156916e+00 1.6869465e+00 1.9835684e+00 2.8209672e+00 1.2028939e+00 2.4458200e+00 1.8683030e+00 2.5149752e+00 1.4746001e+00 1.4371043e+00 1.7501772e+00 1.2500343e+00 1.5991931e+00 1.7211928e+00 1.6271057e+00 3.0580852e+00 3.1168283e+00 1.0328064e+00 2.0203543e+00 1.2344562e+00 2.9146252e+00 1.0941064e+00 1.9515265e+00 2.2002582e+00 1.0531192e+00 1.1790011e+00 1.7600233e+00 1.9895190e+00 2.3106402e+00 2.8876509e+00 1.7983401e+00 1.1763719e+00 1.6118154e+00 2.5100676e+00 2.0039716e+00 1.6449456e+00 1.1276917e+00 1.7245185e+00 1.9495298e+00 1.6638124e+00 1.2766755e+00 2.1512175e+00 2.1034605e+00 1.6449189e+00 1.1924295e+00 1.4645804e+00 1.8408873e+00 1.3037063e+00 1.1269972e+00 6.2482915e-01 5.0991930e-01 6.6827038e-01 7.0479928e-01 8.8358844e-01 4.5581864e-01 7.0086313e-01 4.2667565e-01 2.0656129e-01 4.4535192e-01 5.2942799e-01 7.0086313e-01 6.3861009e-01 2.1269358e-01 1.2261087e+00 1.0119857e+00 1.1001291e+00 8.1638392e-01 4.2667565e-01 7.0479928e-01 5.1691876e-01 6.0611244e-01 6.2538346e-01 6.9006418e-01 8.3812833e-01 6.4290921e-01 1.2418578e-01 7.3145860e-01 1.6044563e+00 6.2660376e-01 5.7324170e-01 5.6700421e-01 4.0293660e-01 1.7981158e+00 6.4806901e-01 1.5090287e+00 5.9426792e-01 1.4258804e+00 9.2264612e-01 1.2221471e+00 2.1613095e+00 1.2165505e+00 1.7814077e+00 1.1712156e+00 1.7475837e+00 7.0233835e-01 7.0776547e-01 1.0386594e+00 7.0233835e-01 1.0228981e+00 9.8450810e-01 8.5105559e-01 2.3257048e+00 2.4547248e+00 7.1504098e-01 1.2838690e+00 6.9369532e-01 2.2753334e+00 4.3691963e-01 1.1508502e+00 1.5109753e+00 4.0438741e-01 4.1449626e-01 1.0168833e+00 1.3670543e+00 1.6898941e+00 2.2253038e+00 1.0655560e+00 4.1586001e-01 9.0827783e-01 1.9262937e+00 1.2075315e+00 8.3888121e-01 4.0438741e-01 1.0401425e+00 1.2250414e+00 1.0755693e+00 5.9426792e-01 1.3866792e+00 1.3487634e+00 1.0056742e+00 5.9426792e-01 7.2526325e-01 1.0425476e+00 5.0592043e-01 1.2125198e+00 9.0252542e-01 5.4292906e-01 1.0679144e+00 4.5470518e-01 1.2307737e+00 5.6700421e-01 1.3629833e+00 1.1271488e+00 9.3592296e-01 1.1329323e+00 1.4903933e+00 1.5826638e+00 9.2264612e-01 3.7598397e-01 5.1691876e-01 5.3022554e-01 3.4583729e-01 1.5102079e+00 9.0478973e-01 9.6664346e-01 1.3678655e+00 1.0012667e+00 5.0090417e-01 4.9766035e-01 8.1130291e-01 1.0331736e+00 4.5581864e-01 7.6955924e-01 6.0551856e-01 6.0181382e-01 6.0060595e-01 8.1242502e-01 7.2852070e-01 5.0180477e-01 2.4944334e+00 1.5259640e+00 2.4897635e+00 2.0286682e+00 2.2758462e+00 3.2467454e+00 1.0417249e+00 2.8819633e+00 2.2804674e+00 2.7472449e+00 1.6234861e+00 1.7644184e+00 2.0587064e+00 1.4533724e+00 1.6559784e+00 1.8347926e+00 1.9605308e+00 3.3855928e+00 3.5452222e+00 1.4540815e+00 2.2852232e+00 1.3536716e+00 3.3617477e+00 1.3717027e+00 2.2096171e+00 2.5931780e+00 1.2603076e+00 1.3371180e+00 2.0643410e+00 2.4233813e+00 2.7521037e+00 3.2246201e+00 2.0784533e+00 1.5405106e+00 2.0088441e+00 2.9099860e+00 2.1140685e+00 1.9448322e+00 1.2330392e+00 2.0122105e+00 2.1687358e+00 1.8353933e+00 1.5259640e+00 2.4321505e+00 2.2783778e+00 1.8251179e+00 1.4795374e+00 1.7068208e+00 1.9049236e+00 1.5165339e+00 1.1004794e+00 9.4753140e-01 9.4125538e-01 1.1763719e+00 8.5105559e-01 6.6432544e-01 7.2526325e-01 6.4290921e-01 3.2816937e-01 1.2418578e-01 4.4535192e-01 6.2024833e-01 7.0479928e-01 1.2172454e+00 1.3021788e+00 1.3289150e+00 9.6141901e-01 8.9366705e-01 1.3003320e+00 7.1840099e-01 3.0275928e-01 8.2654509e-01 1.1056650e+00 1.2497790e+00 1.2234738e+00 6.0611244e-01 9.6141901e-01 1.8661545e+00 1.1149070e+00 1.0038051e+00 1.0038051e+00 5.0991930e-01 1.8886923e+00 1.0132664e+00 1.7427900e+00 1.0573285e+00 1.5462225e+00 1.2230220e+00 1.4700179e+00 2.2554582e+00 1.8183902e+00 1.9191337e+00 1.4359851e+00 1.8399871e+00 8.1558458e-01 9.6664346e-01 1.1754055e+00 1.1548215e+00 1.2523175e+00 1.1229906e+00 1.1149070e+00 2.3868096e+00 2.5734684e+00 1.0665149e+00 1.4148192e+00 1.1763719e+00 2.3591336e+00 6.6317860e-01 1.3545005e+00 1.6178623e+00 6.3735887e-01 7.2526325e-01 1.2709820e+00 1.4169523e+00 1.7425222e+00 2.1449779e+00 1.3015611e+00 7.4777660e-01 1.2601890e+00 1.8513630e+00 1.3897316e+00 1.1185330e+00 7.6625946e-01 1.0919712e+00 1.3783420e+00 1.0120221e+00 1.0573285e+00 1.5860263e+00 1.5022608e+00 1.0597541e+00 8.3060013e-01 8.9095811e-01 1.2107055e+00 9.5498315e-01 5.9426792e-01 8.8835966e-01 7.2486328e-01 4.3456114e-01 6.3108414e-01 7.9580667e-01 5.4292906e-01 8.0619006e-01 1.0003942e+00 1.2057554e+00 1.1282371e+00 4.0147421e-01 1.0482443e+00 8.3812833e-01 9.3049742e-01 6.4290921e-01 6.6432544e-01 2.0000000e-01 4.9766035e-01 1.1016264e+00 8.7202528e-01 4.1312257e-01 6.2660376e-01 4.4651726e-01 5.0180477e-01 5.9426792e-01 1.3172979e+00 3.8776762e-01 3.7427929e-01 3.2816937e-01 6.1151102e-01 1.5338492e+00 4.2667565e-01 1.6536633e+00 6.6827038e-01 1.8195408e+00 1.1798960e+00 1.4588731e+00 2.5552364e+00 7.7039952e-01 2.1764356e+00 1.5179392e+00 2.0659196e+00 1.0072663e+00 1.0165138e+00 1.4077317e+00 7.0523271e-01 9.7441804e-01 1.1290808e+00 1.1879078e+00 2.7003420e+00 2.8251568e+00 8.7478495e-01 1.6113870e+00 5.7257017e-01 2.6756977e+00 7.5976039e-01 1.4611141e+00 1.9290721e+00 6.4290921e-01 5.8851328e-01 1.2509218e+00 1.8216794e+00 2.1226924e+00 2.6556584e+00 1.2741904e+00 8.1527569e-01 1.1396406e+00 2.3658814e+00 1.3219975e+00 1.1377990e+00 4.8036801e-01 1.4418088e+00 1.4695582e+00 1.4097125e+00 6.6827038e-01 1.6762567e+00 1.5624022e+00 1.2731262e+00 8.4812820e-01 1.0423677e+00 1.1235486e+00 6.3808075e-01 7.0328431e-01 2.8507955e-01 9.7377870e-01 3.7598397e-01 8.9971984e-01 6.2538346e-01 6.2988288e-01 8.4591037e-01 1.1042097e+00 1.1949615e+00 5.7867728e-01 6.0121055e-01 4.2418962e-01 4.8036801e-01 2.4837156e-01 1.0588560e+00 6.3735887e-01 8.4050231e-01 1.0216438e+00 6.0900723e-01 3.8776762e-01 3.8934542e-01 3.8934542e-01 6.0900723e-01 2.1269358e-01 1.0100718e+00 3.2586371e-01 3.2816937e-01 3.2816937e-01 4.6472023e-01 1.1765359e+00 3.0546431e-01 2.1603815e+00 1.1833480e+00 2.0696037e+00 1.5733646e+00 1.8844302e+00 2.7913211e+00 1.0279631e+00 2.4069427e+00 1.8096161e+00 2.4013270e+00 1.3194762e+00 1.3641156e+00 1.6852518e+00 1.1847335e+00 1.5344133e+00 1.5901181e+00 1.5133392e+00 2.9601125e+00 3.0929882e+00 9.7994716e-01 1.9359434e+00 1.1341579e+00 2.8969791e+00 1.0267435e+00 1.8174459e+00 2.1353579e+00 9.5498315e-01 1.0067464e+00 1.6752254e+00 1.9600024e+00 2.3015655e+00 2.8161147e+00 1.7177705e+00 1.0636401e+00 1.5095556e+00 2.5229584e+00 1.8388413e+00 1.5016471e+00 9.4558103e-01 1.6620056e+00 1.8661202e+00 1.6246433e+00 1.1833480e+00 2.0524784e+00 1.9917352e+00 1.5896248e+00 1.1449732e+00 1.3635198e+00 1.6539414e+00 1.1377990e+00 7.8695083e-01 1.0194752e+00 6.9369532e-01 4.4535192e-01 6.2538346e-01 7.1169738e-01 8.2684479e-01 7.5791688e-01 8.9971984e-01 7.0394675e-01 1.0777972e+00 8.9366705e-01 9.7548738e-01 7.3805807e-01 6.9369532e-01 9.9348625e-01 1.2013436e+00 9.4287188e-01 2.1845981e-01 9.1163729e-01 7.8197925e-01 7.4777660e-01 8.0096515e-01 6.3735887e-01 1.5043029e+00 7.0776547e-01 8.7021234e-01 7.8197925e-01 7.0784540e-01 1.6629594e+00 7.2852070e-01 1.7521201e+00 7.5705927e-01 1.5812904e+00 1.1798960e+00 1.4306494e+00 2.2994849e+00 1.3047221e+00 1.9342059e+00 1.3259654e+00 2.0165210e+00 1.0919404e+00 8.7720955e-01 1.2180145e+00 7.1881659e-01 1.0466623e+00 1.2389598e+00 1.1426203e+00 2.5872805e+00 2.5766735e+00 5.0817745e-01 1.4924169e+00 8.3060013e-01 2.3982377e+00 5.8914551e-01 1.4728952e+00 1.7209381e+00 6.3808075e-01 8.3649708e-01 1.1916257e+00 1.5183917e+00 1.7983401e+00 2.4620092e+00 1.2174316e+00 7.4549115e-01 1.1136343e+00 1.9965599e+00 1.5255331e+00 1.1891470e+00 8.2384013e-01 1.2304904e+00 1.3937115e+00 1.1842231e+00 7.5705927e-01 1.6132118e+00 1.5725854e+00 1.1127329e+00 5.8914551e-01 9.8054887e-01 1.4089719e+00 9.0521488e-01 1.1043332e+00 5.3665999e-01 1.1040512e+00 8.6137722e-01 8.5335130e-01 1.0692258e+00 1.3395518e+00 1.4121163e+00 7.2343175e-01 4.0438741e-01 1.4096146e-01 2.1845981e-01 2.5251796e-01 1.2340567e+00 7.2852070e-01 1.0216438e+00 1.2599182e+00 7.7360126e-01 5.1607523e-01 2.1269358e-01 5.0270183e-01 8.3345577e-01 2.1845981e-01 7.4893123e-01 3.4378533e-01 5.3022554e-01 4.5581864e-01 6.9167458e-01 9.4080461e-01 3.4583729e-01 2.3056888e+00 1.2961380e+00 2.2790932e+00 1.7645599e+00 2.0520955e+00 3.0186066e+00 8.9827435e-01 2.6386155e+00 2.0191749e+00 2.5992685e+00 1.4843324e+00 1.5325189e+00 1.8691652e+00 1.2554784e+00 1.5582387e+00 1.7070813e+00 1.7171798e+00 3.2008583e+00 3.3121677e+00 1.1313840e+00 2.1147926e+00 1.1879078e+00 3.1280700e+00 1.1681971e+00 2.0145868e+00 2.3728666e+00 1.0720678e+00 1.1437669e+00 1.8347926e+00 2.2027051e+00 2.5315934e+00 3.0688850e+00 1.8636112e+00 1.2768639e+00 1.7126039e+00 2.7381221e+00 1.9739212e+00 1.7039473e+00 1.0573285e+00 1.8507968e+00 2.0095044e+00 1.7591313e+00 1.2961380e+00 2.2339736e+00 2.1358764e+00 1.7106141e+00 1.2731262e+00 1.5241199e+00 1.7828037e+00 1.2869134e+00 8.7720955e-01 7.4777660e-01 6.5223271e-01 7.1881659e-01 7.6914805e-01 9.3999899e-01 8.0619006e-01 4.2418962e-01 1.4104707e+00 1.2144845e+00 1.3128167e+00 1.0056742e+00 5.3665999e-01 5.6075294e-01 3.4583729e-01 8.1130291e-01 9.7730901e-01 7.8197925e-01 9.9089002e-01 8.0353565e-01 4.3691963e-01 9.6095130e-01 1.7110336e+00 7.7033318e-01 7.5196795e-01 7.0776547e-01 6.5648056e-01 1.8915404e+00 7.9878917e-01 1.2730931e+00 5.3022554e-01 1.4349259e+00 8.3620494e-01 1.0733200e+00 2.1767273e+00 1.0960883e+00 1.8048569e+00 1.2035173e+00 1.6539414e+00 6.2482915e-01 7.0523271e-01 1.0184370e+00 7.1169738e-01 6.6432544e-01 7.0480730e-01 8.1527569e-01 2.3116343e+00 2.4505705e+00 1.0085601e+00 1.2063335e+00 4.5581864e-01 2.3022338e+00 5.6700421e-01 1.0655560e+00 1.5550492e+00 4.4417983e-01 2.5251796e-01 8.8358844e-01 1.4559276e+00 1.7532140e+00 2.2755980e+00 8.9653332e-01 5.5160819e-01 9.1163729e-01 1.9862884e+00 9.1163729e-01 7.6752131e-01 2.0656129e-01 1.0632598e+00 1.0516761e+00 1.0391247e+00 5.3022554e-01 1.2756158e+00 1.1406052e+00 8.7720955e-01 7.3851529e-01 6.5633874e-01 7.0776547e-01 3.2352160e-01 9.1273187e-01 7.0043186e-01 3.7427929e-01 5.7324170e-01 9.3615100e-01 1.0733200e+00 5.0991930e-01 5.9426792e-01 6.5633874e-01 6.7975091e-01 3.0811765e-01 1.1056650e+00 7.7360126e-01 7.0429250e-01 8.2552685e-01 5.7257017e-01 5.0905001e-01 6.1968386e-01 6.5223271e-01 6.0611244e-01 3.2586371e-01 1.2028939e+00 5.0905001e-01 4.2667565e-01 4.1449626e-01 3.0546431e-01 1.2470767e+00 4.0147421e-01 2.1213832e+00 1.1521791e+00 2.0070710e+00 1.6126950e+00 1.8637576e+00 2.7490677e+00 1.2370832e+00 2.3910690e+00 1.8274132e+00 2.3004229e+00 1.1979861e+00 1.3368881e+00 1.5972416e+00 1.1093572e+00 1.3693737e+00 1.4644753e+00 1.5211725e+00 2.9013543e+00 3.0559175e+00 1.0590298e+00 1.8374244e+00 1.0423677e+00 2.8588399e+00 9.4309624e-01 1.7735968e+00 2.0979142e+00 8.5205778e-01 9.4287188e-01 1.6546836e+00 1.9109434e+00 2.2424413e+00 2.7118839e+00 1.6774684e+00 1.1029298e+00 1.6007141e+00 2.3898698e+00 1.7557336e+00 1.5191033e+00 8.5462626e-01 1.5344007e+00 1.7571295e+00 1.3898545e+00 1.1521791e+00 1.9987470e+00 1.8815752e+00 1.4085850e+00 1.0646687e+00 1.2740417e+00 1.5577803e+00 1.1296247e+00 4.0176783e-01 6.5223271e-01 6.3977563e-01 5.3022554e-01 5.7324170e-01 5.2574978e-01 1.4438552e+00 1.2221471e+00 1.3131724e+00 1.0406064e+00 3.4583729e-01 9.5943875e-01 9.2859317e-01 6.5172743e-01 5.1607523e-01 9.7548738e-01 1.0611732e+00 8.6137722e-01 5.3665999e-01 9.4854455e-01 1.8311457e+00 8.7420176e-01 8.7170815e-01 8.4050231e-01 6.5223271e-01 2.0303919e+00 8.9917007e-01 1.3866318e+00 5.7867728e-01 1.2002762e+00 7.4740267e-01 1.0440187e+00 1.9213397e+00 1.4087466e+00 1.5433565e+00 9.2836103e-01 1.6392533e+00 7.7360126e-01 5.0592043e-01 8.5585239e-01 6.8961791e-01 9.5035453e-01 9.5498315e-01 7.0776547e-01 2.1812146e+00 2.2081369e+00 3.7427929e-01 1.1335961e+00 7.7885297e-01 2.0291151e+00 3.2352160e-01 1.0661822e+00 1.3165513e+00 3.7598397e-01 5.3588338e-01 8.2305664e-01 1.1437730e+00 1.4415965e+00 2.0902718e+00 8.7848692e-01 3.2352160e-01 7.0479928e-01 1.6865203e+00 1.1904611e+00 7.5791688e-01 5.5492130e-01 8.9207714e-01 1.0805899e+00 9.6271042e-01 5.7867728e-01 1.2256881e+00 1.2481462e+00 8.8358844e-01 4.0147421e-01 6.4405773e-01 1.0887986e+00 5.9426792e-01 4.4651726e-01 5.4292906e-01 7.0437330e-01 7.0776547e-01 3.2816937e-01 1.2134101e+00 9.8820253e-01 1.0733200e+00 8.1099042e-01 4.9857388e-01 7.2172678e-01 6.5223271e-01 6.3808075e-01 5.3665999e-01 6.9369532e-01 8.2305664e-01 6.2482915e-01 2.5251796e-01 7.1799256e-01 1.5895397e+00 6.2205176e-01 5.7257017e-01 5.6769031e-01 4.0438741e-01 1.7935777e+00 6.4755655e-01 1.6267976e+00 7.4777660e-01 1.4807336e+00 9.7270522e-01 1.3173487e+00 2.1839601e+00 1.2277129e+00 1.7938033e+00 1.1948932e+00 1.8448199e+00 8.7383925e-01 8.2305664e-01 1.1418127e+00 8.4591037e-01 1.2153720e+00 1.1640914e+00 9.1163729e-01 2.3638833e+00 2.4815883e+00 6.3861009e-01 1.3946921e+00 8.5434758e-01 2.2902807e+00 6.1151102e-01 1.2452704e+00 1.5325394e+00 6.0121055e-01 6.1092863e-01 1.1228379e+00 1.3752705e+00 1.7103060e+00 2.2560685e+00 1.1879078e+00 4.5470518e-01 9.0454394e-01 1.9729066e+00 1.3650300e+00 9.0521488e-01 6.0670504e-01 1.1454006e+00 1.3674559e+00 1.2262704e+00 7.4777660e-01 1.4820085e+00 1.4947429e+00 1.1729895e+00 7.3145860e-01 8.7720955e-01 1.2163831e+00 6.5633874e-01 2.1845981e-01 5.6769031e-01 7.4777660e-01 4.2538717e-01 9.5099818e-01 9.7994716e-01 1.0119857e+00 6.5223271e-01 8.3888121e-01 1.0038051e+00 5.9426792e-01 4.6472023e-01 6.0121055e-01 8.0326782e-01 9.2836103e-01 9.0876485e-01 3.7598397e-01 6.3861009e-01 1.5602029e+00 8.0326782e-01 7.0129382e-01 7.0043186e-01 2.0000000e-01 1.6208239e+00 7.0437330e-01 1.8619092e+00 9.6271042e-01 1.6849072e+00 1.3188999e+00 1.5860263e+00 2.4084158e+00 1.5142414e+00 2.0543079e+00 1.5230852e+00 1.9980352e+00 9.4375082e-01 1.0588560e+00 1.3035649e+00 1.0035600e+00 1.2499342e+00 1.2459608e+00 1.2225634e+00 2.5586145e+00 2.7210925e+00 8.9366705e-01 1.5500052e+00 1.0014633e+00 2.5139485e+00 6.9369532e-01 1.4790710e+00 1.7580510e+00 6.2660376e-01 7.0429250e-01 1.3804167e+00 1.5625881e+00 1.8966943e+00 2.3518757e+00 1.4139741e+00 8.0358695e-01 1.3075101e+00 2.0455018e+00 1.5153654e+00 1.2234738e+00 6.6539428e-01 1.2342162e+00 1.5049644e+00 1.1591754e+00 9.6271042e-01 1.7107332e+00 1.6328100e+00 1.1880428e+00 8.3812833e-01 1.0106392e+00 1.3267389e+00 8.9767734e-01 4.2538717e-01 6.2024833e-01 6.0181382e-01 1.1431021e+00 1.1948932e+00 1.2269747e+00 8.6361309e-01 8.2552685e-01 1.2002640e+00 6.5223271e-01 3.0811765e-01 7.1462831e-01 1.0067784e+00 1.1396406e+00 1.1147518e+00 5.0817745e-01 8.5335130e-01 1.7711504e+00 1.0085601e+00 9.0454394e-01 9.0277242e-01 4.0438741e-01 1.8140813e+00 9.1075311e-01 1.7412567e+00 9.8054887e-01 1.5527694e+00 1.2155004e+00 1.4692412e+00 2.2702600e+00 1.7126039e+00 1.9279661e+00 1.4238090e+00 1.8539569e+00 8.1558458e-01 9.5035453e-01 1.1763980e+00 1.0621081e+00 1.2047214e+00 1.1205013e+00 1.1134787e+00 2.4108292e+00 2.5851693e+00 9.6095130e-01 1.4178113e+00 1.0879524e+00 2.3751496e+00 6.0900723e-01 1.3570688e+00 1.6277043e+00 5.7015910e-01 6.6491075e-01 1.2652657e+00 1.4292566e+00 1.7566567e+00 2.1846001e+00 1.2961380e+00 7.1840099e-01 1.2329148e+00 1.8795815e+00 1.3897316e+00 1.1149070e+00 6.8757066e-01 1.0960883e+00 1.3785366e+00 1.0168833e+00 9.8054887e-01 1.5871961e+00 1.5043071e+00 1.0597541e+00 7.7033318e-01 8.8861541e-01 1.2058675e+00 8.9134001e-01 3.4583729e-01 8.1130291e-01 1.5090287e+00 1.4644753e+00 1.5150043e+00 1.1847335e+00 8.1385214e-01 1.4041085e+00 8.9917007e-01 3.0811765e-01 6.6539428e-01 1.2643026e+00 1.3836712e+00 1.3112758e+00 7.0784540e-01 1.1353806e+00 2.0777059e+00 1.2396136e+00 1.1498269e+00 1.1474460e+00 6.9006418e-01 2.1775976e+00 1.1752673e+00 1.4613032e+00 1.0391247e+00 1.1810170e+00 8.7504951e-01 1.1390131e+00 1.8670836e+00 1.9048338e+00 1.5205305e+00 1.0228981e+00 1.5676403e+00 6.7975091e-01 6.6539428e-01 8.7175869e-01 1.1533602e+00 1.2459608e+00 9.7730901e-01 7.5082357e-01 2.0536508e+00 2.1838261e+00 8.9095811e-01 1.1306949e+00 1.2394907e+00 1.9665910e+00 5.6769031e-01 1.0425476e+00 1.2324706e+00 6.4704320e-01 7.3851529e-01 9.5476489e-01 1.0198386e+00 1.3510699e+00 1.8411319e+00 1.0100718e+00 5.2942799e-01 9.3801395e-01 1.5112621e+00 1.2002762e+00 7.7763126e-01 8.2899253e-01 8.2305664e-01 1.1377990e+00 9.1663180e-01 1.0391247e+00 1.2653669e+00 1.2757312e+00 9.2288144e-01 6.4405773e-01 6.6827038e-01 1.0851476e+00 9.3048953e-01 7.7074935e-01 1.6569692e+00 1.5391185e+00 1.6134578e+00 1.2794849e+00 7.1504098e-01 1.3197776e+00 7.9613242e-01 3.2586371e-01 8.6165877e-01 1.2653669e+00 1.4028652e+00 1.2701139e+00 6.6384020e-01 1.2172454e+00 2.1489775e+00 1.2262704e+00 1.1578646e+00 1.1452867e+00 7.9613242e-01 2.2808589e+00 1.1959482e+00 1.1500393e+00 9.1075311e-01 9.3999899e-01 6.4806901e-01 8.5434758e-01 1.6806723e+00 1.8184542e+00 1.3334814e+00 8.5205778e-01 1.2702636e+00 3.4583729e-01 4.3456114e-01 5.6700421e-01 1.0389435e+00 1.0122141e+00 6.4290921e-01 5.0905001e-01 1.8419636e+00 1.9902374e+00 9.3801395e-01 8.1810461e-01 1.1069580e+00 1.7940242e+00 4.4651726e-01 7.4740267e-01 1.0346741e+00 5.1691876e-01 6.0121055e-01 6.6827038e-01 8.5205778e-01 1.1776640e+00 1.6778021e+00 7.0776547e-01 4.2667565e-01 7.8695083e-01 1.3400806e+00 8.6165877e-01 5.3022554e-01 7.0437330e-01 5.0592043e-01 8.1273630e-01 6.0670504e-01 9.1075311e-01 9.7270522e-01 9.4352681e-01 6.0551856e-01 5.7257017e-01 3.4378533e-01 7.5705927e-01 8.0064372e-01 1.0450018e+00 8.4812820e-01 9.3847194e-01 6.2988288e-01 6.0611244e-01 6.0060595e-01 5.0090417e-01 7.0784540e-01 6.2538346e-01 5.0592043e-01 6.6932542e-01 5.5492130e-01 1.5422108e-01 5.6075294e-01 1.4235605e+00 4.6472023e-01 4.2418962e-01 3.8776762e-01 2.8192292e-01 1.5978583e+00 4.5581864e-01 1.6256459e+00 6.5633874e-01 1.5986180e+00 1.1106412e+00 1.3708966e+00 2.3519748e+00 1.1147518e+00 1.9798165e+00 1.3654173e+00 1.8856245e+00 7.7033318e-01 8.5335130e-01 1.1771643e+00 6.8076724e-01 9.7270522e-01 1.0165138e+00 1.0391247e+00 2.5081056e+00 2.6437138e+00 7.6716823e-01 1.4120836e+00 6.1947990e-01 2.4692682e+00 4.8927739e-01 1.3077572e+00 1.7030709e+00 3.8934542e-01 4.4651726e-01 1.1594648e+00 1.5551984e+00 1.8760773e+00 2.3977345e+00 1.1868139e+00 6.2024833e-01 1.1056650e+00 2.0821572e+00 1.2794849e+00 1.0244319e+00 3.7427929e-01 1.1646003e+00 1.3139135e+00 1.1119327e+00 6.5633874e-01 1.5344007e+00 1.4333755e+00 1.0386594e+00 6.3735887e-01 8.2372435e-01 1.0901359e+00 6.2081167e-01 3.4583729e-01 2.8192292e-01 4.1586001e-01 1.6237100e+00 1.0540105e+00 1.1816401e+00 1.4110536e+00 9.8450810e-01 6.6432544e-01 5.3665999e-01 9.0454394e-01 1.1389644e+00 5.0905001e-01 7.1799256e-01 7.1504098e-01 7.3813096e-01 7.2783368e-01 8.7021234e-01 6.9006418e-01 6.2482915e-01 2.6619364e+00 1.6754669e+00 2.5821869e+00 2.1421834e+00 2.4107926e+00 3.3209792e+00 1.2036484e+00 2.9531363e+00 2.3720162e+00 2.8824828e+00 1.7671769e+00 1.8842354e+00 2.1714345e+00 1.6176764e+00 1.8723516e+00 2.0134817e+00 2.0676751e+00 3.4808493e+00 3.6264553e+00 1.5230852e+00 2.4135751e+00 1.5351194e+00 3.4281547e+00 1.4948868e+00 2.3378347e+00 2.6680945e+00 1.3977032e+00 1.4832928e+00 2.1976523e+00 2.4795532e+00 2.8157449e+00 3.2956170e+00 2.2214438e+00 1.6336229e+00 2.1064881e+00 2.9775369e+00 2.2994849e+00 2.0603946e+00 1.3916739e+00 2.1189748e+00 2.3204945e+00 1.9672068e+00 1.6754669e+00 2.5635110e+00 2.4436082e+00 1.9753271e+00 1.6077195e+00 1.8374244e+00 2.0981613e+00 1.6585806e+00 1.2418578e-01 3.7598397e-01 1.3405045e+00 8.3812833e-01 1.1437669e+00 1.3915412e+00 8.9095811e-01 6.2538346e-01 2.5251796e-01 6.0611244e-01 9.6791960e-01 3.4583729e-01 6.2205176e-01 4.5581864e-01 6.5223271e-01 5.7867728e-01 8.2619017e-01 8.2654509e-01 4.6472023e-01 2.4061696e+00 1.3868130e+00 2.3985329e+00 1.8751958e+00 2.1591630e+00 3.1397173e+00 8.9852394e-01 2.7599154e+00 2.1335771e+00 2.7193241e+00 1.6021202e+00 1.6415861e+00 1.9851797e+00 1.3336069e+00 1.6224878e+00 1.8082080e+00 1.8350829e+00 3.3292261e+00 3.4297053e+00 1.2340567e+00 2.2298076e+00 1.2654843e+00 3.2489933e+00 1.2770118e+00 2.1337366e+00 2.4988032e+00 1.1786349e+00 1.2545301e+00 1.9393053e+00 2.3288114e+00 2.6536325e+00 3.2010862e+00 1.9648876e+00 1.3964978e+00 1.8188234e+00 2.8588023e+00 2.0766308e+00 1.8216743e+00 1.1634384e+00 1.9698860e+00 2.1147926e+00 1.8660999e+00 1.3868130e+00 2.3472906e+00 2.2417376e+00 1.8131734e+00 1.3752391e+00 1.6372749e+00 1.8856245e+00 1.3922071e+00 4.0176783e-01 1.4467170e+00 9.3049742e-01 1.2002762e+00 1.4411886e+00 9.4375082e-01 6.6432544e-01 3.7427929e-01 7.0784540e-01 1.0466623e+00 4.0176783e-01 5.6700421e-01 5.5492130e-01 6.9728513e-01 6.4405773e-01 8.7170815e-01 7.3535471e-01 5.3309112e-01 2.5193321e+00 1.5039793e+00 2.4895662e+00 1.9791576e+00 2.2673478e+00 3.2268480e+00 1.0014633e+00 2.8466240e+00 2.2303173e+00 2.8133325e+00 1.6962564e+00 1.7458338e+00 2.0805039e+00 1.4552205e+00 1.7454671e+00 1.9165907e+00 1.9332869e+00 3.4131779e+00 3.5208177e+00 1.3379696e+00 2.3275025e+00 1.3866044e+00 3.3340853e+00 1.3784233e+00 2.2317775e+00 2.5822073e+00 1.2828332e+00 1.3595018e+00 2.0485193e+00 2.4057315e+00 2.7355566e+00 3.2722484e+00 2.0762069e+00 1.4905436e+00 1.9191337e+00 2.9375895e+00 2.1864773e+00 1.9213461e+00 1.2702636e+00 2.0582667e+00 2.2206505e+00 1.9521784e+00 1.5039793e+00 2.4497583e+00 2.3480580e+00 1.9124077e+00 1.4817438e+00 1.7372199e+00 1.9937367e+00 1.5016471e+00 1.2122249e+00 6.7975091e-01 8.4050231e-01 1.0796583e+00 6.6539428e-01 3.4583729e-01 3.2816937e-01 5.2942799e-01 7.3145860e-01 1.2418578e-01 9.1163729e-01 3.2586371e-01 3.7427929e-01 3.2816937e-01 5.0592043e-01 1.0122141e+00 2.1845981e-01 2.2481791e+00 1.2631020e+00 2.1874158e+00 1.7295419e+00 1.9965608e+00 2.9333950e+00 1.0072663e+00 2.5632712e+00 1.9670002e+00 2.4858980e+00 1.3655398e+00 1.4724669e+00 1.7716601e+00 1.2125198e+00 1.4903113e+00 1.6105641e+00 1.6572339e+00 3.0940465e+00 3.2344170e+00 1.1332978e+00 2.0129643e+00 1.1341579e+00 3.0445772e+00 1.0864449e+00 1.9287276e+00 2.2802541e+00 9.8820253e-01 1.0688498e+00 1.7838044e+00 2.1039999e+00 2.4365934e+00 2.9349538e+00 1.8084630e+00 1.2266837e+00 1.7026843e+00 2.6138892e+00 1.8911946e+00 1.6476803e+00 9.7949166e-01 1.7305789e+00 1.9168750e+00 1.6065247e+00 1.2631020e+00 2.1541966e+00 2.0395401e+00 1.5896248e+00 1.1996741e+00 1.4306494e+00 1.6928004e+00 1.2436109e+00 7.5791688e-01 8.1242502e-01 7.6625946e-01 7.5976039e-01 1.0289803e+00 1.1332978e+00 7.9613242e-01 5.3665999e-01 1.1149070e+00 1.9007091e+00 9.2836103e-01 9.3610001e-01 9.1858284e-01 8.1638392e-01 2.1493214e+00 1.0132664e+00 1.1680362e+00 3.2352160e-01 1.2372418e+00 5.4292906e-01 8.7170815e-01 1.9366254e+00 1.1486378e+00 1.5564198e+00 8.7420176e-01 1.5682049e+00 6.6491075e-01 4.5470518e-01 8.8358844e-01 4.5581864e-01 8.0326782e-01 7.9878917e-01 5.9426792e-01 2.1460410e+00 2.1931311e+00 5.0180477e-01 1.0950112e+00 5.0592043e-01 2.0545952e+00 3.4378533e-01 9.3923979e-01 1.3512935e+00 3.4583729e-01 3.4583729e-01 6.6539428e-01 1.2670555e+00 1.5369942e+00 2.1465859e+00 7.2526325e-01 3.0546431e-01 5.0991930e-01 1.8206746e+00 9.8054887e-01 5.7015910e-01 3.8776762e-01 9.6664346e-01 9.9089002e-01 1.0262547e+00 3.2352160e-01 1.1127329e+00 1.1166017e+00 8.7848692e-01 3.8934542e-01 5.8914551e-01 8.8062848e-01 3.2586371e-01 6.4755655e-01 1.3011270e+00 1.0122141e+00 4.2538717e-01 6.2660376e-01 4.4651726e-01 7.0086313e-01 6.3735887e-01 1.2926374e+00 4.0176783e-01 4.2288438e-01 3.8934542e-01 8.0619006e-01 1.5230852e+00 4.6472023e-01 1.6933635e+00 7.0233835e-01 1.9584922e+00 1.2594846e+00 1.5411691e+00 2.6785768e+00 6.2605182e-01 2.3003744e+00 1.6284481e+00 2.1882128e+00 1.1729895e+00 1.1500393e+00 1.5577059e+00 7.1881659e-01 9.8985697e-01 1.2389598e+00 1.3108618e+00 2.8217641e+00 2.9357847e+00 9.3026633e-01 1.7457596e+00 5.7867728e-01 2.7994225e+00 9.3610001e-01 1.5801828e+00 2.0692197e+00 8.2384013e-01 7.4740267e-01 1.3410314e+00 1.9783833e+00 2.2683159e+00 2.8062463e+00 1.3610783e+00 9.7249562e-01 1.1868139e+00 2.5244698e+00 1.3852951e+00 1.2460824e+00 6.3808075e-01 1.6077195e+00 1.5873000e+00 1.5826476e+00 7.0233835e-01 1.7831878e+00 1.6667819e+00 1.4276261e+00 9.9519977e-01 1.1984588e+00 1.1903343e+00 7.0386584e-01 7.1840099e-01 1.1107977e+00 5.8624446e-01 9.8495853e-01 8.7504951e-01 4.1586001e-01 8.7720955e-01 1.5824669e+00 7.5976039e-01 5.5160819e-01 5.7741073e-01 5.4292906e-01 1.6736318e+00 6.7975091e-01 1.5883552e+00 8.2552685e-01 1.5948732e+00 1.1332978e+00 1.3595997e+00 2.3503762e+00 1.2554784e+00 1.9862884e+00 1.4596621e+00 1.8378727e+00 7.2852070e-01 9.6204649e-01 1.1697902e+00 9.6664346e-01 9.6271042e-01 9.5676647e-01 1.0496979e+00 2.4751922e+00 2.6546397e+00 1.2224367e+00 1.3843268e+00 7.2343175e-01 2.4751922e+00 7.5082357e-01 1.2832075e+00 1.7000773e+00 6.2988288e-01 5.0592043e-01 1.1833351e+00 1.5613251e+00 1.8886923e+00 2.3645560e+00 1.2016233e+00 7.5791688e-01 1.2125198e+00 2.0748074e+00 1.2155370e+00 1.0244319e+00 4.5470518e-01 1.1485394e+00 1.2770118e+00 1.0720678e+00 8.2552685e-01 1.5113992e+00 1.3835368e+00 1.0035600e+00 9.5571254e-01 8.2234151e-01 1.0120221e+00 6.5223271e-01 8.3888121e-01 1.1486378e+00 1.2994764e+00 1.2307737e+00 6.0181382e-01 1.0483827e+00 1.9867752e+00 1.1408504e+00 1.0391247e+00 1.0361698e+00 5.7867728e-01 2.0669733e+00 1.0646687e+00 1.4623898e+00 9.5866719e-01 1.2497790e+00 9.3048953e-01 1.1765359e+00 1.9666356e+00 1.8175297e+00 1.6242657e+00 1.1520347e+00 1.5603665e+00 5.7324170e-01 7.0233835e-01 8.8887100e-01 1.0919404e+00 1.1355826e+00 8.9712482e-01 8.1385214e-01 2.1052360e+00 2.2845234e+00 1.0166932e+00 1.1341579e+00 1.1332978e+00 2.0738150e+00 5.3309112e-01 1.0588560e+00 1.3224963e+00 5.5492130e-01 6.2538346e-01 9.8450810e-01 1.1271488e+00 1.4561933e+00 1.8911946e+00 1.0230441e+00 5.2574978e-01 1.0056742e+00 1.5916843e+00 1.1355826e+00 8.2105460e-01 7.1504098e-01 8.1527569e-01 1.1176720e+00 8.2899253e-01 9.5866719e-01 1.2943100e+00 1.2429818e+00 8.5205778e-01 7.0233835e-01 6.2660376e-01 9.8054887e-01 8.3649708e-01 8.7822463e-01 8.2899253e-01 8.1099042e-01 7.0826681e-01 5.8914551e-01 1.5042268e+00 7.3813096e-01 8.1558458e-01 7.4855857e-01 6.0121055e-01 1.6260946e+00 7.0386584e-01 1.8605327e+00 8.8503502e-01 1.6493191e+00 1.2601890e+00 1.5390703e+00 2.3592515e+00 1.4088394e+00 1.9940473e+00 1.4245508e+00 2.0716002e+00 1.1004436e+00 9.8820253e-01 1.2927814e+00 9.0056222e-01 1.2208301e+00 1.3194807e+00 1.1996741e+00 2.6153308e+00 2.6539963e+00 6.2538346e-01 1.5687169e+00 9.5271386e-01 2.4562038e+00 6.6491075e-01 1.5249255e+00 1.7538274e+00 6.6539428e-01 8.2619017e-01 1.3131724e+00 1.5409345e+00 1.8470010e+00 2.4533073e+00 1.3504603e+00 7.7039952e-01 1.2057554e+00 2.0352149e+00 1.6006330e+00 1.2331989e+00 8.0660588e-01 1.2747177e+00 1.5040391e+00 1.2426449e+00 8.8503502e-01 1.7019078e+00 1.6700310e+00 1.2144845e+00 7.4855857e-01 1.0401425e+00 1.4600567e+00 9.3296062e-01 5.0180477e-01 4.4651726e-01 6.2149089e-01 4.1586001e-01 1.0078327e+00 3.0275928e-01 1.4096146e-01 1.4096146e-01 6.0611244e-01 1.1536782e+00 2.0656129e-01 2.0491051e+00 1.0646687e+00 2.0979729e+00 1.5528443e+00 1.8279176e+00 2.8469870e+00 8.2234151e-01 2.4692682e+00 1.8399871e+00 2.3632803e+00 1.2493717e+00 1.3312249e+00 1.6756749e+00 1.0425476e+00 1.3092012e+00 1.4505265e+00 1.5123788e+00 2.9884772e+00 3.1361386e+00 1.0755693e+00 1.9017004e+00 9.3801395e-01 2.9635613e+00 9.8054887e-01 1.7833384e+00 2.1970231e+00 8.6513410e-01 8.9742724e-01 1.6159903e+00 2.0524973e+00 2.3760856e+00 2.8811560e+00 1.6399646e+00 1.0934620e+00 1.5205305e+00 2.5867433e+00 1.6928004e+00 1.4836711e+00 7.9580667e-01 1.6659943e+00 1.7839298e+00 1.5792930e+00 1.0646687e+00 2.0113485e+00 1.8901379e+00 1.5067717e+00 1.0950112e+00 1.3129189e+00 1.4875372e+00 1.0389435e+00 4.0293660e-01 8.0499049e-01 3.0546431e-01 7.8197925e-01 2.5251796e-01 5.1691876e-01 4.2538717e-01 7.4740267e-01 1.0181000e+00 3.2586371e-01 2.1717162e+00 1.1533602e+00 2.2234347e+00 1.6690840e+00 1.9433381e+00 2.9713636e+00 7.2486328e-01 2.5929835e+00 1.9489982e+00 2.5241649e+00 1.4089364e+00 1.4425304e+00 1.8012344e+00 1.0919712e+00 1.3726860e+00 1.5830057e+00 1.6408468e+00 3.1546522e+00 3.2544456e+00 1.0406064e+00 2.0352149e+00 1.0168833e+00 3.0859269e+00 1.0901359e+00 1.9325796e+00 2.3348454e+00 9.8054887e-01 1.0379132e+00 1.7250039e+00 2.1825260e+00 2.4995667e+00 3.0529994e+00 1.7458338e+00 1.2167151e+00 1.6214915e+00 2.7108297e+00 1.8418195e+00 1.6195190e+00 9.3847194e-01 1.7995863e+00 1.9034198e+00 1.7022897e+00 1.1533602e+00 2.1413027e+00 2.0233319e+00 1.6211869e+00 1.1770266e+00 1.4411886e+00 1.6502968e+00 1.1644030e+00 6.5633874e-01 4.4417983e-01 1.1332978e+00 2.1845981e-01 4.2538717e-01 3.4583729e-01 7.1504098e-01 1.4080793e+00 3.4583729e-01 1.8866180e+00 8.7848692e-01 1.9819543e+00 1.3312249e+00 1.6523803e+00 2.6988671e+00 6.9006418e-01 2.3100474e+00 1.6455737e+00 2.2934334e+00 1.2426449e+00 1.1905954e+00 1.5932297e+00 8.9095811e-01 1.2681309e+00 1.4065584e+00 1.3487634e+00 2.8835141e+00 2.9705897e+00 7.3805807e-01 1.8205354e+00 8.5462626e-01 2.8117234e+00 9.3049742e-01 1.6679957e+00 2.0758969e+00 8.4050231e-01 8.3060013e-01 1.4419145e+00 1.9521697e+00 2.2599493e+00 2.8310619e+00 1.4807336e+00 9.4558103e-01 1.2404967e+00 2.5235709e+00 1.6070713e+00 1.3102444e+00 7.5705927e-01 1.6281130e+00 1.7021627e+00 1.6240596e+00 8.7848692e-01 1.8790831e+00 1.8155245e+00 1.5040391e+00 1.0014633e+00 1.2481462e+00 1.4334902e+00 8.6165877e-01 6.6827038e-01 1.5475692e+00 5.8914551e-01 5.0503591e-01 4.9857388e-01 3.0811765e-01 1.7160413e+00 5.7324170e-01 1.5795964e+00 6.5648056e-01 1.4967461e+00 1.0182895e+00 1.3034549e+00 2.2380042e+00 1.2266837e+00 1.8619092e+00 1.2701139e+00 1.8007564e+00 7.2852070e-01 7.9016429e-01 1.0989735e+00 7.5705927e-01 1.0406064e+00 1.0184370e+00 9.3999899e-01 2.3886514e+00 2.5357185e+00 8.2684479e-01 1.3424112e+00 7.0776547e-01 2.3522207e+00 4.8927739e-01 1.2205493e+00 1.5832517e+00 4.2667565e-01 4.4417983e-01 1.0974061e+00 1.4319225e+00 1.7587110e+00 2.2711652e+00 1.1390131e+00 5.1691876e-01 1.0163549e+00 1.9782498e+00 1.2532075e+00 9.2859317e-01 4.1449626e-01 1.0852663e+00 1.2786117e+00 1.0887986e+00 6.5648056e-01 1.4596621e+00 1.3991741e+00 1.0313560e+00 6.6932542e-01 7.7553525e-01 1.0741917e+00 5.7257017e-01 9.4558103e-01 2.5651975e-01 4.1449626e-01 3.2816937e-01 4.8135521e-01 1.0908017e+00 2.1845981e-01 2.1701312e+00 1.1752673e+00 2.1084262e+00 1.6352583e+00 1.9101184e+00 2.8518881e+00 9.7825559e-01 2.4781934e+00 1.8745369e+00 2.4235816e+00 1.3078976e+00 1.3842113e+00 1.6965018e+00 1.1329323e+00 1.4319225e+00 1.5496439e+00 1.5691346e+00 3.0266197e+00 3.1503439e+00 1.0244319e+00 1.9425540e+00 1.0627606e+00 2.9623467e+00 1.0056742e+00 1.8539828e+00 2.2026387e+00 9.1163729e-01 9.9475949e-01 1.6952454e+00 2.0275673e+00 2.3581240e+00 2.8793190e+00 1.7227544e+00 1.1332978e+00 1.6029963e+00 2.5482247e+00 1.8278913e+00 1.5611067e+00 9.1163729e-01 1.6653066e+00 1.8462692e+00 1.5634147e+00 1.1752673e+00 2.0757295e+00 1.9739212e+00 1.5320003e+00 1.1179743e+00 1.3567326e+00 1.6362950e+00 1.1594648e+00 9.9475949e-01 1.1004436e+00 1.0720678e+00 1.4108494e+00 3.2816937e-01 9.8054887e-01 2.9240179e+00 1.9013501e+00 3.0016960e+00 2.4403742e+00 2.7185134e+00 3.7481490e+00 1.2643026e+00 3.3676733e+00 2.7249658e+00 3.2951180e+00 2.1701459e+00 2.2231652e+00 2.5778372e+00 1.8193838e+00 2.0594742e+00 2.3383666e+00 2.4210417e+00 3.9277558e+00 4.0319248e+00 1.8011138e+00 2.8105150e+00 1.7324239e+00 3.8595400e+00 1.8657887e+00 2.7098209e+00 3.1083486e+00 1.7551534e+00 1.8090259e+00 2.5002193e+00 2.9463843e+00 3.2688068e+00 3.8087204e+00 2.5182043e+00 1.9933741e+00 2.3692479e+00 3.4705738e+00 2.5885717e+00 2.3959721e+00 1.7005893e+00 2.5655126e+00 2.6734142e+00 2.4311441e+00 1.9013501e+00 2.9205331e+00 2.7876433e+00 2.3735872e+00 1.9516947e+00 2.2170194e+00 2.3879674e+00 1.9213461e+00 3.0546431e-01 2.0656129e-01 6.0611244e-01 1.2246352e+00 1.4096146e-01 1.9777274e+00 9.7249562e-01 2.0297383e+00 1.4616539e+00 1.7458338e+00 2.7737198e+00 7.5082357e-01 2.3931826e+00 1.7478866e+00 2.3213742e+00 1.2131545e+00 1.2498134e+00 1.6130724e+00 9.3824087e-01 1.2565757e+00 1.4029855e+00 1.4325768e+00 2.9414941e+00 3.0577671e+00 8.7720955e-01 1.8438146e+00 8.6956871e-01 2.8895427e+00 9.1310225e-01 1.7228354e+00 2.1321061e+00 8.0499049e-01 8.3345577e-01 1.5318874e+00 1.9898963e+00 2.3090270e+00 2.8491218e+00 1.5587730e+00 1.0122141e+00 1.4162017e+00 2.5326059e+00 1.6463627e+00 1.4047678e+00 7.3805807e-01 1.6165635e+00 1.7228488e+00 1.5520745e+00 9.7249562e-01 1.9420274e+00 1.8372522e+00 1.4628493e+00 1.0030700e+00 1.2523175e+00 1.4531349e+00 9.5571254e-01 1.2418578e-01 5.0270183e-01 1.2603076e+00 2.1269358e-01 1.9932786e+00 1.0168833e+00 1.9946994e+00 1.4557537e+00 1.7495699e+00 2.7337358e+00 9.0575661e-01 2.3519748e+00 1.7324239e+00 2.2796281e+00 1.1801240e+00 1.2450709e+00 1.5872286e+00 1.0267435e+00 1.3336069e+00 1.4152303e+00 1.4096199e+00 2.8778917e+00 3.0268604e+00 1.0067464e+00 1.8215944e+00 9.3824087e-01 2.8471683e+00 9.0658670e-01 1.6933635e+00 2.0801243e+00 8.0758367e-01 8.3783744e-01 1.5390703e+00 1.9310038e+00 2.2599493e+00 2.7651778e+00 1.5728839e+00 9.7949166e-01 1.4165336e+00 2.4822593e+00 1.6510537e+00 1.3842113e+00 7.5755387e-01 1.5773217e+00 1.7253276e+00 1.5255331e+00 1.0168833e+00 1.9287244e+00 1.8366596e+00 1.4599710e+00 1.0339865e+00 1.2378278e+00 1.4537266e+00 9.7249562e-01 5.0090417e-01 1.2507669e+00 1.2418578e-01 1.9589833e+00 9.7270522e-01 1.9792779e+00 1.4437673e+00 1.7230625e+00 2.7260686e+00 8.6012420e-01 2.3478326e+00 1.7190893e+00 2.2590861e+00 1.1454006e+00 1.2174316e+00 1.5614941e+00 9.5476489e-01 1.2555979e+00 1.3635198e+00 1.3969297e+00 2.8759951e+00 3.0161959e+00 9.4558103e-01 1.7925890e+00 8.6983677e-01 2.8416706e+00 8.6513410e-01 1.6742876e+00 2.0756986e+00 7.5871717e-01 7.9613242e-01 1.5107481e+00 1.9286915e+00 2.2531942e+00 2.7669732e+00 1.5384446e+00 9.7270522e-01 1.4111029e+00 2.4671050e+00 1.6105641e+00 1.3717027e+00 7.0429250e-01 1.5517600e+00 1.6837214e+00 1.4807336e+00 9.7270522e-01 1.9032219e+00 1.7953587e+00 1.4097072e+00 9.7855477e-01 1.2036484e+00 1.4110536e+00 9.4309624e-01 1.5090287e+00 5.0905001e-01 1.8619092e+00 9.1163729e-01 1.7230625e+00 1.3188999e+00 1.5883552e+00 2.4588872e+00 1.3193952e+00 2.0946464e+00 1.5338492e+00 2.0321740e+00 9.5099818e-01 1.0604511e+00 1.3277861e+00 9.3296062e-01 1.2221471e+00 1.2470767e+00 1.2266837e+00 2.6106370e+00 2.7667028e+00 8.7420176e-01 1.5746612e+00 8.9852394e-01 2.5679581e+00 6.9369532e-01 1.4905436e+00 1.8028753e+00 6.2149089e-01 6.9006418e-01 1.3813076e+00 1.6199747e+00 1.9552274e+00 2.4344864e+00 1.4148192e+00 8.0358695e-01 1.3039319e+00 2.1287551e+00 1.5153654e+00 1.2246352e+00 6.2660376e-01 1.2741904e+00 1.5160122e+00 1.2047214e+00 9.1163729e-01 1.7242097e+00 1.6420607e+00 1.2064640e+00 8.3812833e-01 1.0168833e+00 1.3257654e+00 8.6137722e-01 1.1533602e+00 3.1382691e+00 2.1485328e+00 3.1786790e+00 2.6799312e+00 2.9354140e+00 3.9353144e+00 1.5252485e+00 3.5658557e+00 2.9475994e+00 3.4455976e+00 2.3167786e+00 2.4320363e+00 2.7459122e+00 2.0598189e+00 2.2501104e+00 2.5013206e+00 2.6321552e+00 4.0910078e+00 4.2293986e+00 2.0521052e+00 2.9731112e+00 1.9619929e+00 4.0491656e+00 2.0481101e+00 2.8946126e+00 3.2860707e+00 1.9342059e+00 2.0025214e+00 2.7210925e+00 3.1148548e+00 3.4424959e+00 3.9360563e+00 2.7333517e+00 2.2078200e+00 2.6383936e+00 3.6047462e+00 2.7713578e+00 2.6121617e+00 1.8925840e+00 2.7075477e+00 2.8419886e+00 2.5215069e+00 2.1485328e+00 3.1102248e+00 2.9517129e+00 2.5041493e+00 2.1435335e+00 2.3887866e+00 2.5633372e+00 2.1544995e+00 2.0467316e+00 1.0576043e+00 2.0528819e+00 1.5379283e+00 1.8096161e+00 2.8029161e+00 8.6012420e-01 2.4272793e+00 1.8028753e+00 2.3372930e+00 1.2144845e+00 1.2985682e+00 1.6312555e+00 1.0166932e+00 1.3073038e+00 1.4333755e+00 1.4843487e+00 2.9588514e+00 3.0950947e+00 9.7949166e-01 1.8647706e+00 9.3615100e-01 2.9181741e+00 9.3049742e-01 1.7594421e+00 2.1522124e+00 8.2342214e-01 8.7720955e-01 1.5965946e+00 1.9973159e+00 2.3235032e+00 2.8379387e+00 1.6211988e+00 1.0588560e+00 1.5076049e+00 2.5254157e+00 1.6945041e+00 1.4637418e+00 7.8197925e-01 1.6130724e+00 1.7539916e+00 1.5193574e+00 1.0576043e+00 1.9849009e+00 1.8691652e+00 1.4607586e+00 1.0375119e+00 1.2741904e+00 1.4947429e+00 1.0361698e+00 1.0621081e+00 8.3649708e-01 7.6590510e-01 4.0176783e-01 1.3455136e+00 1.8827665e+00 1.1093572e+00 9.5676647e-01 9.0852141e-01 9.4309624e-01 8.9852394e-01 6.8076724e-01 1.2002762e+00 9.7825559e-01 7.0479928e-01 7.8197925e-01 1.4637418e+00 1.5390703e+00 1.4628493e+00 6.2538346e-01 1.2208301e+00 1.4754770e+00 1.2162549e+00 5.2574978e-01 1.0104465e+00 1.2832075e+00 1.1810170e+00 6.1947990e-01 1.1242402e+00 1.1718516e+00 1.6295015e+00 5.8914551e-01 1.2063335e+00 1.1879206e+00 1.4041085e+00 4.0293660e-01 7.7074935e-01 1.2709820e+00 7.7869083e-01 5.0592043e-01 9.7441804e-01 1.0621081e+00 5.0991930e-01 4.4417983e-01 8.3888121e-01 1.1770266e+00 8.6361309e-01 6.0670504e-01 1.0324775e+00 1.3844611e+00 6.2660376e-01 8.8695363e-01 2.0692197e+00 9.7441804e-01 1.6995747e+00 1.0122141e+00 1.6372749e+00 7.6752131e-01 6.0551856e-01 1.0244319e+00 2.1845981e-01 5.0090417e-01 7.2852070e-01 7.4777660e-01 2.2645802e+00 2.3019759e+00 5.7324170e-01 1.1833351e+00 2.5651975e-01 2.1907335e+00 5.0905001e-01 1.0330459e+00 1.5124582e+00 4.4651726e-01 3.8934542e-01 6.9369532e-01 1.4517959e+00 1.7036156e+00 2.3021295e+00 7.0429250e-01 5.6700421e-01 6.3977563e-01 1.9782093e+00 8.7229670e-01 6.8801986e-01 3.8934542e-01 1.1199472e+00 9.9519977e-01 1.1263042e+00 0.0000000e+00 1.1697902e+00 1.0851476e+00 9.2859317e-01 5.0905001e-01 7.1504098e-01 7.7763126e-01 3.0546431e-01 8.2135873e-01 6.0121055e-01 7.6716823e-01 2.3579605e+00 4.5581864e-01 5.8914551e-01 6.5223271e-01 8.9095811e-01 8.2552685e-01 4.4417983e-01 1.5124582e+00 1.3844611e+00 8.1810461e-01 6.6384020e-01 1.0516761e+00 1.0733200e+00 1.3725949e+00 3.0844217e-01 1.6183051e+00 8.9095811e-01 1.1426203e+00 4.5470518e-01 3.2816937e-01 1.2604558e+00 1.2459608e+00 7.1799256e-01 5.0180477e-01 3.6171588e-01 1.0269295e+00 7.1840099e-01 1.0531192e+00 1.1093572e+00 6.1092863e-01 8.4591037e-01 7.4777660e-01 1.3693737e+00 5.0905001e-01 4.8135521e-01 8.0619006e-01 1.3844611e+00 3.4378533e-01 5.3309112e-01 7.3813096e-01 1.0901359e+00 8.1273630e-01 9.6141901e-01 1.2978356e+00 4.2667565e-01 1.4573287e+00 1.5826638e+00 1.0904758e+00 5.0503591e-01 1.1258723e+00 5.4292906e-01 3.2816937e-01 5.3022554e-01 7.7869083e-01 7.5871717e-01 5.5492130e-01 2.1269358e-01 1.6596342e+00 1.6919202e+00 8.3280511e-01 7.0429250e-01 8.7202528e-01 1.5772389e+00 7.0394675e-01 5.2655962e-01 9.2836103e-01 8.0064372e-01 7.0437330e-01 3.0546431e-01 9.0478973e-01 1.1271488e+00 1.7235501e+00 4.0293660e-01 5.2942799e-01 4.5470518e-01 1.4317371e+00 6.8917100e-01 2.1269358e-01 8.1099042e-01 6.2988288e-01 6.5172743e-01 7.6166891e-01 6.2660376e-01 6.5648056e-01 7.6625946e-01 6.1947990e-01 6.4755655e-01 4.2667565e-01 6.2660376e-01 5.6700421e-01 1.2113327e+00 1.8396098e+00 8.7504951e-01 5.7257017e-01 8.3280511e-01 7.0784540e-01 5.5492130e-01 3.7427929e-01 1.0284501e+00 8.7420176e-01 5.0991930e-01 4.4417983e-01 1.4089719e+00 1.4387122e+00 1.1127329e+00 4.1586001e-01 1.1205013e+00 1.3344634e+00 9.3048953e-01 3.2816937e-01 7.4164639e-01 1.0244319e+00 9.3999899e-01 2.5651975e-01 8.1242502e-01 9.1858284e-01 1.4955532e+00 2.5251796e-01 8.7420176e-01 8.5335130e-01 1.2045536e+00 4.3691963e-01 4.4651726e-01 1.0480665e+00 4.9857388e-01 2.8507955e-01 7.3535471e-01 8.8695363e-01 3.2816937e-01 3.8934542e-01 6.0611244e-01 8.6361309e-01 6.0551856e-01 5.2655962e-01 8.3783744e-01 3.0351721e+00 4.2418962e-01 1.0941064e+00 7.5705927e-01 1.6559784e+00 1.5582387e+00 1.2112034e+00 2.1967372e+00 2.0692197e+00 1.5564198e+00 1.3693737e+00 8.0096515e-01 4.5581864e-01 2.0330276e+00 1.0137836e+00 2.3142399e+00 2.1845981e-01 1.9017011e+00 1.1228379e+00 6.6827038e-01 2.0223026e+00 1.9969203e+00 1.3792358e+00 8.7478495e-01 5.2371571e-01 8.1385214e-01 1.3793330e+00 1.7664528e+00 1.6569692e+00 5.0905001e-01 1.4644753e+00 1.4341959e+00 2.1204309e+00 1.2632199e+00 1.1880428e+00 1.5405106e+00 2.0692197e+00 9.4009473e-01 1.1355826e+00 1.4992973e+00 1.8311457e+00 1.5765737e+00 1.6311692e+00 1.9969203e+00 2.6670272e+00 1.9783833e+00 2.5784641e+00 1.6572339e+00 1.5613865e+00 1.9842916e+00 8.6110333e-01 1.0720678e+00 1.6180482e+00 1.7140774e+00 3.2123303e+00 3.2542669e+00 1.1332978e+00 2.1449779e+00 7.5976039e-01 3.1540626e+00 1.4088394e+00 1.9797139e+00 2.4825886e+00 1.3075101e+00 1.2330392e+00 1.6629594e+00 2.4148300e+00 2.6746409e+00 3.2573703e+00 1.6686069e+00 1.4322723e+00 1.4341959e+00 2.9471490e+00 1.6864366e+00 1.6385322e+00 1.1320702e+00 2.0632091e+00 1.9468380e+00 2.0389505e+00 9.7441804e-01 2.1303950e+00 2.0095672e+00 1.8517858e+00 1.4168607e+00 1.6483152e+00 1.5346983e+00 1.0866092e+00 7.2486328e-01 8.7202528e-01 1.2988558e+00 1.1847335e+00 8.6137722e-01 1.8265471e+00 1.7177705e+00 1.2107055e+00 9.9368623e-01 9.5866719e-01 7.3805807e-01 1.6506221e+00 7.3805807e-01 1.9449573e+00 5.0592043e-01 1.5350426e+00 7.8695083e-01 3.7427929e-01 1.6553809e+00 1.6249178e+00 1.0168833e+00 5.0991930e-01 2.1845981e-01 9.7270522e-01 1.0264409e+00 1.3817041e+00 1.2768639e+00 5.7324170e-01 1.1634384e+00 1.0611732e+00 1.7483574e+00 9.3048953e-01 9.0056222e-01 1.2340567e+00 1.6995747e+00 6.8076724e-01 9.1883539e-01 1.1718516e+00 1.4616896e+00 1.2125198e+00 1.2951888e+00 1.6249178e+00 1.2028939e+00 8.7420176e-01 5.3665999e-01 5.5492130e-01 1.1340084e+00 1.0720678e+00 8.3345577e-01 5.3588338e-01 1.5520745e+00 1.3258714e+00 9.5099818e-01 7.7074935e-01 1.2604558e+00 1.1891470e+00 9.2264612e-01 8.1099042e-01 7.7039952e-01 1.0389435e+00 1.0054794e+00 4.3456114e-01 6.2605182e-01 7.2823007e-01 1.5784191e+00 4.8927739e-01 7.5976039e-01 6.5223271e-01 1.0692258e+00 9.8985697e-01 6.3808075e-01 1.1178200e+00 6.6827038e-01 7.4855857e-01 8.6513410e-01 1.0122141e+00 7.6787403e-01 9.3615100e-01 7.5835500e-01 8.2654509e-01 6.9728513e-01 9.9519977e-01 9.7356960e-01 1.1306887e+00 1.2197188e+00 8.0353565e-01 1.7933375e+00 1.5916843e+00 1.0118409e+00 1.0089164e+00 7.0776547e-01 1.1591754e+00 1.8437762e+00 5.3309112e-01 1.8278913e+00 9.6838716e-01 1.4843324e+00 6.3735887e-01 7.3496673e-01 1.5570415e+00 1.5003972e+00 1.0421979e+00 9.7759114e-01 8.9070384e-01 7.8197925e-01 1.0329598e+00 1.4388174e+00 1.5204340e+00 6.9325418e-01 9.4309624e-01 1.0339865e+00 1.6134578e+00 8.0660588e-01 7.0523271e-01 1.0406064e+00 1.6372749e+00 5.1303949e-01 5.8851328e-01 1.0072663e+00 1.4951106e+00 1.0950112e+00 1.0934620e+00 1.5213929e+00 5.0991930e-01 4.5581864e-01 9.3615100e-01 7.6590510e-01 3.2586371e-01 4.2538717e-01 1.7942496e+00 1.9566981e+00 1.0636401e+00 6.6384020e-01 9.2264612e-01 1.7814077e+00 5.2371571e-01 6.0670504e-01 1.0120221e+00 4.8927739e-01 4.3691963e-01 5.6769031e-01 8.9366705e-01 1.1948578e+00 1.6982795e+00 5.7324170e-01 5.7257017e-01 8.3060013e-01 1.3824965e+00 5.7867728e-01 4.1586001e-01 5.4292906e-01 4.4651726e-01 5.7324170e-01 4.4535192e-01 7.6752131e-01 8.2105460e-01 6.9369532e-01 3.4583729e-01 7.0479928e-01 2.0656129e-01 4.3456114e-01 6.1092863e-01 4.6472023e-01 7.1840099e-01 6.9369532e-01 5.6631629e-01 3.2816937e-01 1.8058693e+00 1.8261179e+00 6.3735887e-01 7.0328431e-01 8.2684479e-01 1.6791597e+00 4.0293660e-01 6.6827038e-01 9.7377870e-01 5.0991930e-01 4.8135521e-01 3.2586371e-01 8.7021234e-01 1.1327825e+00 1.7839298e+00 3.7427929e-01 4.1586001e-01 5.5492130e-01 1.3916739e+00 7.7919451e-01 4.1449626e-01 5.8914551e-01 5.7324170e-01 6.0900723e-01 6.2407309e-01 6.0551856e-01 7.5705927e-01 7.8695083e-01 4.8135521e-01 3.2586371e-01 3.0811765e-01 7.3851529e-01 5.3665999e-01 1.1524979e+00 1.0244319e+00 4.3691963e-01 3.7255734e-01 1.4090646e+00 1.5060944e+00 1.0810263e+00 2.8507955e-01 1.2406194e+00 1.3336069e+00 7.1791510e-01 3.2586371e-01 5.9426792e-01 8.2552685e-01 8.2275389e-01 4.1449626e-01 5.8851328e-01 7.5196795e-01 1.3415658e+00 4.1586001e-01 7.2852070e-01 8.9159388e-01 9.7249562e-01 5.8914551e-01 4.4535192e-01 9.4352681e-01 1.4096146e-01 3.0811765e-01 4.1586001e-01 1.0244319e+00 4.2538717e-01 4.5581864e-01 3.2586371e-01 7.0869559e-01 3.7427929e-01 6.5223271e-01 9.2836103e-01 4.4651726e-01 8.8695363e-01 8.9971984e-01 2.4227359e+00 2.4243464e+00 5.5419992e-01 1.3235313e+00 3.0546431e-01 2.3149695e+00 6.1151102e-01 1.2036484e+00 1.6520677e+00 5.4292906e-01 5.7324170e-01 8.2305664e-01 1.5788188e+00 1.8238348e+00 2.4554026e+00 8.2552685e-01 7.0429250e-01 7.7588000e-01 2.0959492e+00 1.0466623e+00 8.6513410e-01 5.4292906e-01 1.2497790e+00 1.1215059e+00 1.2436109e+00 2.1845981e-01 1.3165513e+00 1.2256881e+00 1.0406064e+00 6.0060595e-01 8.5434758e-01 9.6664346e-01 5.1691876e-01 6.5223271e-01 8.4050231e-01 2.2451458e+00 2.2996030e+00 9.7270522e-01 1.1594648e+00 4.2538717e-01 2.1936248e+00 6.9369532e-01 1.0119857e+00 1.5297036e+00 6.6384020e-01 6.2988288e-01 7.0386584e-01 1.5113992e+00 1.7140171e+00 2.2868482e+00 6.9325418e-01 9.4080461e-01 1.0406064e+00 1.9734538e+00 7.5835500e-01 7.8695083e-01 6.2988288e-01 1.1158787e+00 9.4832302e-01 1.1055069e+00 5.0090417e-01 1.1452867e+00 1.0056742e+00 9.0277242e-01 6.3977563e-01 7.3851529e-01 6.6432544e-01 6.0611244e-01 5.1691876e-01 1.6983410e+00 1.8377590e+00 1.1500393e+00 5.6631629e-01 8.6012420e-01 1.6864433e+00 6.6539428e-01 4.5581864e-01 9.7356960e-01 6.6932542e-01 5.9426792e-01 4.5470518e-01 9.7548738e-01 1.1573546e+00 1.6772907e+00 4.4535192e-01 8.2929029e-01 9.8450810e-01 1.3812107e+00 3.2816937e-01 5.0905001e-01 6.6932542e-01 5.0991930e-01 3.7598397e-01 5.0905001e-01 7.2852070e-01 6.4704320e-01 4.5581864e-01 3.2586371e-01 7.4777660e-01 3.2816937e-01 2.5251796e-01 6.3108414e-01 1.5573817e+00 1.6420607e+00 9.0575661e-01 5.7867728e-01 9.7441804e-01 1.4917344e+00 6.2482915e-01 4.0176783e-01 7.7039952e-01 7.1799256e-01 6.4704320e-01 3.2816937e-01 7.1799256e-01 9.7270522e-01 1.5593809e+00 4.1586001e-01 4.6472023e-01 5.6454040e-01 1.2601890e+00 6.5223271e-01 1.2418578e-01 7.6716823e-01 4.4651726e-01 6.0670504e-01 6.1947990e-01 7.4777660e-01 5.9426792e-01 7.2172678e-01 5.3588338e-01 6.2660376e-01 3.2352160e-01 5.8914551e-01 6.4704320e-01 1.2013436e+00 2.3665136e+00 1.1771643e+00 2.4806944e+00 1.0018083e+00 2.1098467e+00 1.2627078e+00 8.8503502e-01 2.2024869e+00 2.1511385e+00 1.6189643e+00 1.1368070e+00 1.0688498e+00 3.4378533e-01 1.6188960e+00 1.9698860e+00 1.9254808e+00 8.8861541e-01 1.5832517e+00 1.5978297e+00 2.2712062e+00 1.4277162e+00 1.3610783e+00 1.6849072e+00 2.2645802e+00 1.1106525e+00 1.2665468e+00 1.6689743e+00 2.0995265e+00 1.7457596e+00 1.7532140e+00 2.1511385e+00 2.2712062e+00 1.3276804e+00 2.5485519e+00 3.4378533e-01 2.1870851e+00 1.4266198e+00 1.0379132e+00 2.3072128e+00 2.2736138e+00 1.6156775e+00 1.2095267e+00 8.3888121e-01 1.2277129e+00 1.6151153e+00 2.0528819e+00 1.8792214e+00 8.2624515e-01 1.7265353e+00 1.7004805e+00 2.3953564e+00 1.5733646e+00 1.4691764e+00 1.8497891e+00 2.3019759e+00 1.2238809e+00 1.4266198e+00 1.7962897e+00 2.1027465e+00 1.8635467e+00 1.9008621e+00 2.2439391e+00 1.3353353e+00 7.2526325e-01 2.1293320e+00 5.5492130e-01 1.2776560e+00 1.5193574e+00 6.2988288e-01 8.1130291e-01 8.6912228e-01 1.3752391e+00 1.6044563e+00 2.3474075e+00 9.1883539e-01 6.2024833e-01 6.4806901e-01 1.9000365e+00 1.3674559e+00 9.6664346e-01 8.1354181e-01 1.1751082e+00 1.2304904e+00 1.2256933e+00 5.7324170e-01 1.3628690e+00 1.4089364e+00 1.0866132e+00 4.8036801e-01 8.9971984e-01 1.3044654e+00 8.1130291e-01 1.3916739e+00 1.1500393e+00 9.6838716e-01 2.5251796e-01 5.5419992e-01 1.0573285e+00 1.0284501e+00 5.7324170e-01 7.1840099e-01 6.6317860e-01 1.1434428e+00 5.6769031e-01 9.7855477e-01 1.1106525e+00 8.2899253e-01 6.0670504e-01 6.2660376e-01 1.1449732e+00 3.2586371e-01 2.1845981e-01 6.0060595e-01 1.1833351e+00 2.0656129e-01 2.5251796e-01 5.1607523e-01 9.6324667e-01 5.9426792e-01 7.1799256e-01 1.0879524e+00 2.4372751e+00 7.0437330e-01 1.2331989e+00 1.7427900e+00 6.0611244e-01 5.1607523e-01 9.3615100e-01 1.6812503e+00 1.9411754e+00 2.5109747e+00 9.3801395e-01 7.7039952e-01 8.6513410e-01 2.2052183e+00 9.6324667e-01 8.9917007e-01 4.2667565e-01 1.3224963e+00 1.1912106e+00 1.3084046e+00 2.5651975e-01 1.3897316e+00 1.2541242e+00 1.1120775e+00 7.1504098e-01 9.1051084e-01 8.1521713e-01 3.6171588e-01 2.0209349e+00 1.2627078e+00 7.9878917e-01 2.1431239e+00 2.1198551e+00 1.5016009e+00 9.6141901e-01 6.2024833e-01 1.0083666e+00 1.5022608e+00 1.8803649e+00 1.7557336e+00 6.2482915e-01 1.6044563e+00 1.5582387e+00 2.2435182e+00 1.3836712e+00 1.3199714e+00 1.6568705e+00 2.1907335e+00 1.0796583e+00 1.2825987e+00 1.6205332e+00 1.9460721e+00 1.6995133e+00 1.7678302e+00 2.1198551e+00 9.1750357e-01 1.2756158e+00 1.4096146e-01 3.2352160e-01 7.1504098e-01 1.1242402e+00 1.4312787e+00 2.0223464e+00 7.3535471e-01 3.2586371e-01 7.3851529e-01 1.6386882e+00 9.4477932e-01 6.4755655e-01 3.7427929e-01 7.3805807e-01 8.6165877e-01 7.2852070e-01 5.0905001e-01 1.0922991e+00 1.0175773e+00 6.0900723e-01 2.1269358e-01 4.0176783e-01 8.2372435e-01 4.5470518e-01 5.5492130e-01 9.8495853e-01 9.0521488e-01 5.2942799e-01 6.3977563e-01 7.9878917e-01 1.2832075e+00 5.3022554e-01 8.3060013e-01 9.4500268e-01 1.0244319e+00 4.4651726e-01 4.0176783e-01 1.0230441e+00 3.4378533e-01 3.2586371e-01 6.1623531e-01 1.0330459e+00 2.5651975e-01 4.0000000e-01 5.3588338e-01 9.5676647e-01 5.3665999e-01 5.3665999e-01 9.0521488e-01 1.3865084e+00 1.3669552e+00 8.6012420e-01 2.8192292e-01 4.1586001e-01 8.4050231e-01 8.7383925e-01 1.1355826e+00 1.1712156e+00 6.2660376e-01 9.8985697e-01 8.5205778e-01 1.4909823e+00 6.3861009e-01 7.2526325e-01 9.4854455e-01 1.5124582e+00 5.6700421e-01 7.7919451e-01 8.9971984e-01 1.2483814e+00 9.4009473e-01 1.0879524e+00 1.4147273e+00 2.1269358e-01 8.1354181e-01 1.2441035e+00 1.5551238e+00 2.1137172e+00 8.2899253e-01 3.7427929e-01 8.2929029e-01 1.7587110e+00 9.6095130e-01 7.1799256e-01 2.4837156e-01 8.3280511e-01 9.3797093e-01 7.9016429e-01 4.4651726e-01 1.1833351e+00 1.0724413e+00 6.6932542e-01 3.2816937e-01 4.6472023e-01 8.0467258e-01 3.8776762e-01 7.3145860e-01 1.2564564e+00 1.5558094e+00 2.0983278e+00 7.5082357e-01 3.6171588e-01 7.6590510e-01 1.7862655e+00 8.4050231e-01 6.2024833e-01 1.2418578e-01 8.6137722e-01 8.9852394e-01 8.5462626e-01 3.8934542e-01 1.1192362e+00 1.0078327e+00 7.0386584e-01 5.0991930e-01 4.5470518e-01 6.6539428e-01 2.4837156e-01 8.5690100e-01 1.0344911e+00 1.6689743e+00 1.0000000e-01 6.8961791e-01 7.1799256e-01 1.3207609e+00 6.2024833e-01 3.7427929e-01 8.3888121e-01 5.3588338e-01 4.2288438e-01 6.4405773e-01 6.9369532e-01 5.3309112e-01 5.8914551e-01 4.6472023e-01 6.2538346e-01 4.1586001e-01 6.1623531e-01 6.4405773e-01 4.0176783e-01 1.0175773e+00 8.9303452e-01 1.0122141e+00 1.1161766e+00 7.7885297e-01 1.0755693e+00 8.1385214e-01 1.3792358e+00 5.8914551e-01 8.5462626e-01 8.7848692e-01 1.4517959e+00 7.3851529e-01 9.4854455e-01 8.6263408e-01 1.0941064e+00 8.3783744e-01 1.1172689e+00 1.3545005e+00 1.0391247e+00 1.0389435e+00 1.3163598e+00 1.3379696e+00 4.5470518e-01 1.1951875e+00 1.0632598e+00 1.6796759e+00 7.8197925e-01 8.3345577e-01 1.0540105e+00 1.7036156e+00 6.9167458e-01 8.8503502e-01 1.0279631e+00 1.3693737e+00 1.1192426e+00 1.3077572e+00 1.6183051e+00 1.6694974e+00 1.9094934e+00 1.9895190e+00 8.2384013e-01 1.6634400e+00 1.6218244e+00 2.2178691e+00 1.3008161e+00 1.3567326e+00 1.4994715e+00 2.3021295e+00 1.1763719e+00 1.3024224e+00 1.5535909e+00 2.0373882e+00 1.6756749e+00 1.7981158e+00 2.1739455e+00 7.6752131e-01 8.1354181e-01 1.3198846e+00 6.0611244e-01 4.4535192e-01 8.5335130e-01 5.3665999e-01 3.8776762e-01 6.3977563e-01 7.0429250e-01 5.2655962e-01 5.5492130e-01 4.5581864e-01 6.3861009e-01 4.2667565e-01 6.1151102e-01 6.6932542e-01 5.1691876e-01 1.5922648e+00 1.0054794e+00 4.8135521e-01 4.3456114e-01 7.6955924e-01 9.6664346e-01 8.9687438e-01 5.6700421e-01 1.0421979e+00 1.1001291e+00 8.2929029e-01 4.4535192e-01 5.1691876e-01 8.9712482e-01 4.5470518e-01 1.6914476e+00 1.1340084e+00 5.8914551e-01 8.5105559e-01 9.7548738e-01 1.0864449e+00 1.1160770e+00 6.3977563e-01 1.0720678e+00 1.2163831e+00 1.0047836e+00 6.9369532e-01 7.2343175e-01 1.0613462e+00 6.2407309e-01 1.4238090e+00 1.3511716e+00 1.9067300e+00 9.3824087e-01 1.0331736e+00 1.1327825e+00 1.9782093e+00 9.0454394e-01 1.0244319e+00 1.1833480e+00 1.5948732e+00 1.3360558e+00 1.5461469e+00 1.8900319e+00 6.2081167e-01 9.1750357e-01 6.4290921e-01 4.4417983e-01 7.0429250e-01 8.7229670e-01 5.3665999e-01 4.0438741e-01 5.6454040e-01 1.0054794e+00 5.7015910e-01 2.1269358e-01 7.5705927e-01 7.3496673e-01 5.2942799e-01 6.2024833e-01 6.6491075e-01 6.8801986e-01 6.1947990e-01 7.2172678e-01 5.5492130e-01 6.9006418e-01 3.2816937e-01 5.3665999e-01 5.6700421e-01 9.7779835e-01 1.0014633e+00 9.4854455e-01 3.8934542e-01 1.2342162e+00 1.1043332e+00 7.9580667e-01 5.3665999e-01 5.7257017e-01 7.2852070e-01 3.0275928e-01 3.4378533e-01 3.2352160e-01 1.1199472e+00 5.0991930e-01 4.6472023e-01 2.8507955e-01 7.7869083e-01 4.1586001e-01 7.1799256e-01 1.0132664e+00 5.0905001e-01 9.9519977e-01 3.0811765e-01 2.1269358e-01 4.0293660e-01 8.3060013e-01 5.0592043e-01 5.3665999e-01 9.3049742e-01 1.1263042e+00 8.0064372e-01 6.1623531e-01 2.1269358e-01 7.7588000e-01 4.4651726e-01 7.2783368e-01 1.0329901e+00 1.1697902e+00 1.0851476e+00 9.2859317e-01 5.0905001e-01 7.1504098e-01 7.7763126e-01 3.0546431e-01 2.5651975e-01 7.0437330e-01 1.0573285e+00 7.3145860e-01 6.9325418e-01 1.0901359e+00 5.3588338e-01 1.0175773e+00 6.4405773e-01 5.3665999e-01 1.0078327e+00 6.2407309e-01 3.2352160e-01 5.7257017e-01 8.5205778e-01 5.1691876e-01 9.4022486e-01 5.6769031e-01 4.8927739e-01 6.0611244e-01 6.0900723e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt b/voice_bridge/scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt new file mode 100644 index 0000000000000000000000000000000000000000..daa81110a2be1a670f6163a8b255b2c6ecd5ccaa --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-minkowski-3.2-ml.txt @@ -0,0 +1 @@ + 2.0215050e+00 2.0988154e+00 1.8614681e+00 2.0510161e+00 1.9210911e+00 2.1323516e+00 1.9565454e+00 2.1029889e+00 1.9617871e+00 2.0544792e+00 2.0357408e+00 1.8811414e+00 2.0694693e+00 2.1245977e+00 2.0632165e+00 2.0452823e+00 2.0249330e+00 1.9635489e+00 2.0508580e+00 2.0838578e+00 1.9324052e+00 1.8224609e+00 1.9795343e+00 1.9536534e+00 1.9694910e+00 1.9075569e+00 1.9590397e+00 2.0022087e+00 1.8814000e+00 1.8884208e+00 1.9961121e+00 2.0215351e+00 1.7515769e+00 2.0756437e+00 2.0109476e+00 1.9234849e+00 1.9160076e+00 1.8550862e+00 1.7733640e+00 2.0071906e+00 2.0209542e+00 2.0616569e+00 2.0565503e+00 1.9083573e+00 2.2732431e+00 1.9975503e+00 1.9080072e+00 2.1437809e+00 2.1296295e+00 1.9739085e+00 1.9834166e+00 2.1078664e+00 2.2016840e+00 2.2080962e+00 1.7340579e+00 2.0549287e+00 1.7331748e+00 1.9559688e+00 2.0343364e+00 1.8736929e+00 1.9730416e+00 1.5308944e+00 1.8421831e+00 2.0174240e+00 2.0137378e+00 1.7956151e+00 1.9606596e+00 1.9074857e+00 2.0413879e+00 2.0070305e+00 1.9584677e+00 1.8977851e+00 1.9176239e+00 1.7067419e+00 1.9461927e+00 1.8431700e+00 1.8284576e+00 1.7778704e+00 1.8350329e+00 2.0175415e+00 1.7459063e+00 1.9242505e+00 1.8757370e+00 1.9312506e+00 2.0574808e+00 2.0894636e+00 1.9780203e+00 2.1374036e+00 1.8900436e+00 2.0273032e+00 2.0681953e+00 2.0234699e+00 2.0666449e+00 2.0663485e+00 1.9281402e+00 1.7846314e+00 2.0372479e+00 1.8831230e+00 2.0186015e+00 2.0193231e+00 2.2022665e+00 1.8145737e+00 2.0466545e+00 1.8092421e+00 1.9600687e+00 2.0322961e+00 1.9556364e+00 1.8266422e+00 1.9950345e+00 2.1038429e+00 2.1164145e+00 2.0188062e+00 1.8863331e+00 2.0006971e+00 1.9971068e+00 1.8771862e+00 2.1148855e+00 1.9570638e+00 1.9859615e+00 2.0030854e+00 2.0737344e+00 1.9739259e+00 1.9266524e+00 1.9200535e+00 2.1376689e+00 1.8944425e+00 1.9330553e+00 1.8561590e+00 1.9422954e+00 1.8874178e+00 1.8624808e+00 1.8265563e+00 1.8840519e+00 2.0515092e+00 2.0174226e+00 1.9771196e+00 2.0635988e+00 1.7334466e+00 1.9912604e+00 1.8915711e+00 1.8262636e+00 1.9369173e+00 1.9560446e+00 1.9549934e+00 1.9279230e+00 1.9021073e+00 2.0113391e+00 2.0305786e+00 1.8066806e+00 1.9656739e+00 2.1219217e+00 1.8820250e+00 1.8936826e+00 2.0565131e+00 1.9839441e+00 1.8553479e+00 1.9923760e+00 1.6393276e+00 1.9786440e+00 1.8274394e+00 1.9322611e+00 2.0404318e+00 1.9216532e+00 1.9361171e+00 1.8401373e+00 1.9908059e+00 1.9495117e+00 2.1975655e+00 1.8413913e+00 2.1528773e+00 1.8434374e+00 2.1668863e+00 2.0429273e+00 1.9980016e+00 1.9790129e+00 2.0264829e+00 2.1478843e+00 2.0899600e+00 2.0280670e+00 2.1210881e+00 1.9993891e+00 1.8646871e+00 1.9099983e+00 1.9263353e+00 2.0042495e+00 2.1365919e+00 2.1830279e+00 1.9631961e+00 2.0880004e+00 1.8348369e+00 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt b/voice_bridge/scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt new file mode 100644 index 0000000000000000000000000000000000000000..aa26b0439f568f97c95bee1b04204f24c9a1f3e0 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-minkowski-5.8-ml-iris.txt @@ -0,0 +1 @@ + 5.0042326e-01 4.1210927e-01 5.2133179e-01 1.1269424e-01 4.2362917e-01 5.0001522e-01 1.2085435e-01 7.4262850e-01 4.0127250e-01 3.0482299e-01 3.0482299e-01 5.0436965e-01 8.0923926e-01 7.1629168e-01 9.1424701e-01 4.1317535e-01 1.0000000e-01 6.0366256e-01 3.0017653e-01 3.3813251e-01 2.2573593e-01 5.2133179e-01 3.4080442e-01 5.0436965e-01 5.0043084e-01 2.2608083e-01 1.1269424e-01 1.1269424e-01 4.1315633e-01 4.1315633e-01 3.0490481e-01 6.0000952e-01 7.0462550e-01 4.0127250e-01 3.0482299e-01 4.0002221e-01 4.0127250e-01 7.1621748e-01 1.1269424e-01 1.2085435e-01 1.2036864e+00 7.0088477e-01 4.0125062e-01 5.0476836e-01 5.0436965e-01 3.0474106e-01 5.0436235e-01 2.2573593e-01 2.0061436e-01 3.3243227e+00 3.1068812e+00 3.5145413e+00 2.6080595e+00 3.2075731e+00 3.1014454e+00 3.3055260e+00 1.9156198e+00 3.2079238e+00 2.5066441e+00 2.1498493e+00 2.8059664e+00 2.6093989e+00 3.3021953e+00 2.2070266e+00 3.0158454e+00 3.1034764e+00 2.7009878e+00 3.1081779e+00 2.5032992e+00 3.4074959e+00 2.6050088e+00 3.5035589e+00 3.3011884e+00 2.9065890e+00 3.0117336e+00 3.4118782e+00 3.6094426e+00 3.1038958e+00 2.1042326e+00 2.4058620e+00 2.3063407e+00 2.5029614e+00 3.7025335e+00 3.1034636e+00 3.1057006e+00 3.3110189e+00 3.0065909e+00 2.7025941e+00 2.6047974e+00 3.0013665e+00 3.2025221e+00 2.6029242e+00 1.9242109e+00 2.8024935e+00 2.8013151e+00 2.8022622e+00 2.9036582e+00 1.6267693e+00 2.7028014e+00 4.6144526e+00 3.7071079e+00 4.5121787e+00 4.2031939e+00 4.4087839e+00 5.2153194e+00 3.1086291e+00 4.9093646e+00 4.4044245e+00 4.7202040e+00 3.7119486e+00 3.9066365e+00 4.1123628e+00 3.6114402e+00 3.7307413e+00 3.9194642e+00 4.1043951e+00 5.3177489e+00 5.5157728e+00 3.6035661e+00 4.3162097e+00 3.5127031e+00 5.3163123e+00 3.5077296e+00 4.3088507e+00 4.6100803e+00 3.4082578e+00 3.5068380e+00 4.2080636e+00 4.4113183e+00 4.7149608e+00 5.0316727e+00 4.2105572e+00 3.7024462e+00 4.2007769e+00 4.7331529e+00 4.2173557e+00 4.1039096e+00 3.4076329e+00 4.0157626e+00 4.2194897e+00 3.7329396e+00 3.7071079e+00 4.5119962e+00 4.3218071e+00 3.8249612e+00 3.6093673e+00 3.8105293e+00 4.0166459e+00 3.7050109e+00 2.2573593e-01 3.0017653e-01 6.0000317e-01 9.0534502e-01 4.1210927e-01 4.0004442e-01 5.0000761e-01 1.2085435e-01 7.1621748e-01 4.0125062e-01 1.1269424e-01 6.0184622e-01 1.0776294e+00 1.4092540e+00 9.0508756e-01 5.0043084e-01 9.0181717e-01 8.0004602e-01 5.2491131e-01 7.0017011e-01 6.1119267e-01 3.6452132e-01 5.2133179e-01 2.0061436e-01 4.0246123e-01 5.0436965e-01 4.1209001e-01 2.4170870e-01 2.0121983e-01 5.2167829e-01 1.1001015e+00 1.2036862e+00 1.2085435e-01 2.2573593e-01 6.3164977e-01 1.2085435e-01 5.0000761e-01 4.0125062e-01 5.0002283e-01 7.0462844e-01 5.0043084e-01 5.2167829e-01 8.0888055e-01 1.1269424e-01 8.0008884e-01 3.0474106e-01 7.0462697e-01 3.0008832e-01 3.3416860e+00 3.1112912e+00 3.5249966e+00 2.6033557e+00 3.2127499e+00 3.1015178e+00 3.3078313e+00 1.9025708e+00 3.2150318e+00 2.5060738e+00 2.1061951e+00 2.8068283e+00 2.6040016e+00 3.3032134e+00 2.2072454e+00 3.0286102e+00 3.1035443e+00 2.7011973e+00 3.1070853e+00 2.5014549e+00 3.4078435e+00 2.6080511e+00 3.5048916e+00 3.3021665e+00 2.9125999e+00 3.0213627e+00 3.4211337e+00 3.6148618e+00 3.1047537e+00 2.1027003e+00 2.4016639e+00 2.3011929e+00 2.5032633e+00 3.7028303e+00 3.1034629e+00 3.1065984e+00 3.3192072e+00 3.0078209e+00 2.7027260e+00 2.6031664e+00 3.0009332e+00 3.2037232e+00 2.6027120e+00 1.9031578e+00 2.8022915e+00 2.8015662e+00 2.8024715e+00 2.9065359e+00 1.6099792e+00 2.7029416e+00 4.6149181e+00 3.7071538e+00 4.5172866e+00 4.2039132e+00 4.4099272e+00 5.2224057e+00 3.1078968e+00 4.9146298e+00 4.4063795e+00 4.7253524e+00 3.7145622e+00 3.9080413e+00 4.1161770e+00 3.6111646e+00 3.7308314e+00 3.9209137e+00 4.1060063e+00 5.3254977e+00 5.5222404e+00 3.6024247e+00 4.3201293e+00 3.5126957e+00 5.3240486e+00 3.5093499e+00 4.3111749e+00 4.6158382e+00 3.4095576e+00 3.5076152e+00 4.2090727e+00 4.4184242e+00 4.7227808e+00 5.0458491e+00 4.2115634e+00 3.7037441e+00 4.2010125e+00 4.7466313e+00 4.2180733e+00 4.1050714e+00 3.4081972e+00 4.0212972e+00 4.2220584e+00 3.7407842e+00 3.7071538e+00 4.5144444e+00 4.3240980e+00 3.8290678e+00 3.6105228e+00 3.8128297e+00 4.0172657e+00 3.7052380e+00 2.0121983e-01 4.1210927e-01 7.9153339e-01 2.0181667e-01 3.0915245e-01 3.3813251e-01 2.2608083e-01 7.1629168e-01 3.0482299e-01 2.0181667e-01 4.0246123e-01 1.1281267e+00 1.2633045e+00 7.8890721e-01 4.1212852e-01 1.0095370e+00 6.0964891e-01 7.0470720e-01 5.2201750e-01 4.1210927e-01 4.5784410e-01 6.0017982e-01 3.4080442e-01 3.4342562e-01 5.0476836e-01 5.0043084e-01 3.0000000e-01 3.0017653e-01 7.0025283e-01 9.0508756e-01 1.0426513e+00 2.2608083e-01 3.0008832e-01 8.0046605e-01 2.2608083e-01 3.0474106e-01 4.0243965e-01 3.3813251e-01 9.0002570e-01 3.0000000e-01 4.3213914e-01 6.8170466e-01 2.0181667e-01 6.1119267e-01 1.1269424e-01 6.3178534e-01 3.0017653e-01 3.4595765e+00 3.2168311e+00 3.6364650e+00 2.7037323e+00 3.3192099e+00 3.2017763e+00 3.4107328e+00 2.0033798e+00 3.3237063e+00 2.6050967e+00 2.2121910e+00 2.9077087e+00 2.7085154e+00 3.4047917e+00 2.3071665e+00 3.1428042e+00 3.2033135e+00 2.8024935e+00 3.2103481e+00 2.6021247e+00 3.5076152e+00 2.7127272e+00 3.6073242e+00 3.4038884e+00 3.0203881e+00 3.1325879e+00 3.5317021e+00 3.7210979e+00 3.2059139e+00 2.2051638e+00 2.5023084e+00 2.4021168e+00 2.6048201e+00 3.8033004e+00 3.2030448e+00 3.2074921e+00 3.4286399e+00 3.1131211e+00 2.8028008e+00 2.7031257e+00 3.1010004e+00 3.3055260e+00 2.7040740e+00 2.0050309e+00 2.9023862e+00 2.9020767e+00 2.9028421e+00 3.0107283e+00 1.7089863e+00 2.8033666e+00 4.7142986e+00 3.8066401e+00 4.6226512e+00 4.3047830e+00 4.5107876e+00 5.3296471e+00 3.2068572e+00 5.0203871e+00 4.5089338e+00 4.8299744e+00 3.8170042e+00 4.0095939e+00 4.2200398e+00 3.7100654e+00 3.8275330e+00 4.0209836e+00 4.2079639e+00 5.4332277e+00 5.6287689e+00 3.7032748e+00 4.4237036e+00 3.6112573e+00 5.4319232e+00 3.6111754e+00 4.4135512e+00 4.7221364e+00 3.5107924e+00 3.6081749e+00 4.3098514e+00 4.5261773e+00 4.8309399e+00 5.1593152e+00 4.3120751e+00 3.8056232e+00 4.3015640e+00 4.8592534e+00 4.3174320e+00 4.2064763e+00 3.5083248e+00 4.1268500e+00 4.3236383e+00 3.8471097e+00 3.8066401e+00 4.6166518e+00 4.4251081e+00 3.9318948e+00 3.7118930e+00 3.9150333e+00 4.1165034e+00 3.8051417e+00 5.2133179e-01 9.0160400e-01 3.0017653e-01 4.1209001e-01 2.2573593e-01 3.0008832e-01 8.2418002e-01 3.0482299e-01 2.0181667e-01 4.1212852e-01 1.2363278e+00 1.3741498e+00 9.0160400e-01 5.2133802e-01 1.1133986e+00 7.1621748e-01 8.0051036e-01 6.3178534e-01 5.6347121e-01 5.0517282e-01 4.1315633e-01 4.0004442e-01 4.1317535e-01 6.0948212e-01 6.0184622e-01 1.2085435e-01 2.0061436e-01 8.0051036e-01 1.0087250e+00 1.1527669e+00 3.0008832e-01 4.1210927e-01 9.0142636e-01 3.0008832e-01 2.2573593e-01 5.0436235e-01 4.5148429e-01 8.0004602e-01 2.2573593e-01 4.8342635e-01 7.2044167e-01 2.0181667e-01 7.1621748e-01 1.1269424e-01 7.4262850e-01 4.0125062e-01 3.2983364e+00 3.0300451e+00 3.4603347e+00 2.5053901e+00 3.1338090e+00 3.0030658e+00 3.2183845e+00 1.8040969e+00 3.1419971e+00 2.4075162e+00 2.0123013e+00 2.7132680e+00 2.5163999e+00 3.2086215e+00 2.1132077e+00 2.9750754e+00 3.0049127e+00 2.6055197e+00 3.0177719e+00 2.4040962e+00 3.3110162e+00 2.5253371e+00 3.4126529e+00 3.2074182e+00 2.8380954e+00 2.9580787e+00 3.3536443e+00 3.5347730e+00 3.0101869e+00 2.0123796e+00 2.3038195e+00 2.2036797e+00 2.4099203e+00 3.6051707e+00 3.0042758e+00 3.0123228e+00 3.2490712e+00 2.9241808e+00 2.6047889e+00 2.5049231e+00 2.9016211e+00 3.1100277e+00 2.5081992e+00 1.8056342e+00 2.7040060e+00 2.7039988e+00 2.7050721e+00 2.8205713e+00 1.5147271e+00 2.6060742e+00 4.5183778e+00 3.6090052e+00 4.4337691e+00 4.1072664e+00 4.3151164e+00 5.1425125e+00 3.0092613e+00 4.8303615e+00 4.3139066e+00 4.6422789e+00 3.6259317e+00 3.8146285e+00 4.0301568e+00 3.5133848e+00 3.6358680e+00 3.8290678e+00 4.0124919e+00 5.2471177e+00 5.4403962e+00 3.5051114e+00 4.2343452e+00 3.4149831e+00 5.2455706e+00 3.4177035e+00 4.2200398e+00 4.5335328e+00 3.3168776e+00 3.4123846e+00 4.1140176e+00 4.3402553e+00 4.6459028e+00 4.9843016e+00 4.1167964e+00 3.6096226e+00 4.1026403e+00 4.6849407e+00 4.1230798e+00 4.0100505e+00 3.3123688e+00 3.9407837e+00 4.1330547e+00 3.6700537e+00 3.6090052e+00 4.4237036e+00 4.2343452e+00 3.7463488e+00 3.5181052e+00 3.7227931e+00 3.9220791e+00 3.6072781e+00 4.2362917e-01 4.0125062e-01 2.0061436e-01 7.4262850e-01 5.0002283e-01 4.0004442e-01 2.4170870e-01 6.0017982e-01 7.4329527e-01 8.0250123e-01 8.5406674e-01 4.1317535e-01 1.2085435e-01 7.0096858e-01 2.0181667e-01 4.1315633e-01 2.0181667e-01 4.5077696e-01 3.6259865e-01 5.0084481e-01 6.0017665e-01 2.4170870e-01 2.0121983e-01 2.2538848e-01 4.1315633e-01 5.0084481e-01 4.0246123e-01 5.0043842e-01 6.3164729e-01 5.0002283e-01 4.0122873e-01 5.0001522e-01 5.0002283e-01 6.7616723e-01 2.0121983e-01 1.2085435e-01 1.3008771e+00 6.0948506e-01 4.0125062e-01 5.0085236e-01 6.0017982e-01 2.2573593e-01 4.5077696e-01 3.0017653e-01 3.0000000e-01 3.3320240e+00 3.1087192e+00 3.5191371e+00 2.6110181e+00 3.2098845e+00 3.1016129e+00 3.3064697e+00 1.9242109e+00 3.2110200e+00 2.5072065e+00 2.1702438e+00 2.8063347e+00 2.6144115e+00 3.3026483e+00 2.2074446e+00 3.0213781e+00 3.1035271e+00 2.7015967e+00 3.1108570e+00 2.5049231e+00 3.4076266e+00 2.6065485e+00 3.5045818e+00 3.3016829e+00 2.9091905e+00 3.0158857e+00 3.4160038e+00 3.6117923e+00 3.1042949e+00 2.1068047e+00 2.4087956e+00 2.3099309e+00 2.5038387e+00 3.7027671e+00 3.1034919e+00 3.1060428e+00 3.3145595e+00 3.0095593e+00 2.7026925e+00 2.6061038e+00 3.0017811e+00 3.2030205e+00 2.6039803e+00 1.9366876e+00 2.8028640e+00 2.8014482e+00 2.8024453e+00 2.9049136e+00 1.6388635e+00 2.7031257e+00 4.6146430e+00 3.7072412e+00 4.5144508e+00 4.2035048e+00 4.4092709e+00 5.2185448e+00 3.1091788e+00 4.9117351e+00 4.4054277e+00 4.7224997e+00 3.7130507e+00 3.9073151e+00 4.1140274e+00 3.6117351e+00 3.7308330e+00 3.9200674e+00 4.1050815e+00 5.3212796e+00 5.5187578e+00 3.6046347e+00 4.3179262e+00 3.5127783e+00 5.3198559e+00 3.5085510e+00 4.3098508e+00 4.6126513e+00 3.4088749e+00 3.5071604e+00 4.2085176e+00 4.4144980e+00 4.7185095e+00 5.0381903e+00 4.2110099e+00 3.7030413e+00 4.2009868e+00 4.7393218e+00 4.2176488e+00 4.1043951e+00 3.4078683e+00 4.0181902e+00 4.2205976e+00 3.7363838e+00 3.7072412e+00 4.5130595e+00 4.3227928e+00 3.8267408e+00 3.6102542e+00 3.8115096e+00 4.0168944e+00 3.7051079e+00 8.0923926e-01 5.2201750e-01 1.1270411e+00 8.0928056e-01 2.4170870e-01 6.3178782e-01 9.1471442e-01 1.1573074e+00 5.2167829e-01 5.0476836e-01 4.0000000e-01 4.2270142e-01 3.0017653e-01 3.0490481e-01 5.0042326e-01 3.0915245e-01 8.5440680e-01 6.0184622e-01 6.3192325e-01 9.0142681e-01 5.2133179e-01 4.0363334e-01 5.0517282e-01 7.8890806e-01 8.2421923e-01 5.0042326e-01 3.1328089e-01 3.4085233e-01 8.0928056e-01 7.2044167e-01 4.5148429e-01 8.0928056e-01 1.0782211e+00 5.0517282e-01 4.8342635e-01 1.6097492e+00 1.0215068e+00 4.5148429e-01 3.0482299e-01 9.1446938e-01 3.0490481e-01 8.5440680e-01 2.4195741e-01 6.1135434e-01 3.0143288e+00 2.8035152e+00 3.2080663e+00 2.3476141e+00 2.9053991e+00 2.8028019e+00 3.0030626e+00 1.7519158e+00 2.9045816e+00 2.2149484e+00 2.0887699e+00 2.5048522e+00 2.3645147e+00 3.0018766e+00 1.9120303e+00 2.7085154e+00 2.8028008e+00 2.4075162e+00 2.8284908e+00 2.2272457e+00 3.1054022e+00 2.3075573e+00 3.2060163e+00 3.0018874e+00 2.6044486e+00 2.7064438e+00 3.1073418e+00 3.3054063e+00 2.8034238e+00 1.8447840e+00 2.1492024e+00 2.0607272e+00 2.2122063e+00 3.4028104e+00 2.8028007e+00 2.8036182e+00 3.0057998e+00 2.7234787e+00 2.4027927e+00 2.3234132e+00 2.7070699e+00 2.9017335e+00 2.3151346e+00 1.8036834e+00 2.5072065e+00 2.5017313e+00 2.5032633e+00 2.6031823e+00 1.5292174e+00 2.4058519e+00 4.3116266e+00 3.4064593e+00 4.2076930e+00 3.9021503e+00 4.1063936e+00 4.9099401e+00 2.8141516e+00 4.6055969e+00 4.1036742e+00 4.4145324e+00 3.4082578e+00 3.6052799e+00 3.8082804e+00 3.3123693e+00 3.4273179e+00 3.6154977e+00 3.8026444e+00 5.0117750e+00 5.2107474e+00 3.3130198e+00 4.0114753e+00 3.2109395e+00 5.0107787e+00 3.2067490e+00 4.0058313e+00 4.3058539e+00 3.1067996e+00 3.2049797e+00 3.9061098e+00 4.1066170e+00 4.4095056e+00 4.7221364e+00 3.9082316e+00 3.4019453e+00 3.9014304e+00 4.4232188e+00 3.9139973e+00 3.8023591e+00 3.1057392e+00 3.7104219e+00 3.9150553e+00 3.4248402e+00 3.4064593e+00 4.2084919e+00 4.0172759e+00 3.5193527e+00 3.3100431e+00 3.5073655e+00 3.7133435e+00 3.4036743e+00 4.0004442e-01 5.0043084e-01 3.4085233e-01 8.0046764e-01 2.2573593e-01 4.0243965e-01 4.2362917e-01 1.2036925e+00 1.1896595e+00 8.0879776e-01 5.0000761e-01 1.1006371e+00 5.2133179e-01 8.0046685e-01 5.0437695e-01 4.0125062e-01 5.0477564e-01 5.0043084e-01 4.5148429e-01 4.0125062e-01 6.0000952e-01 6.0000317e-01 2.2608083e-01 3.0922892e-01 8.0000160e-01 7.4269314e-01 9.6572569e-01 3.4085233e-01 4.0246123e-01 9.0000136e-01 3.4085233e-01 4.0127250e-01 5.0001522e-01 4.0004442e-01 1.1000003e+00 2.2608083e-01 4.1317535e-01 5.7609230e-01 4.0122873e-01 5.2167829e-01 2.0061436e-01 7.0088627e-01 4.0004442e-01 3.3852404e+00 3.1245391e+00 3.5521657e+00 2.6057331e+00 3.2281303e+00 3.1021033e+00 3.3145497e+00 1.9088256e+00 3.2358110e+00 2.5040476e+00 2.1337832e+00 2.8091158e+00 2.6173653e+00 3.3068237e+00 2.2078368e+00 3.0635687e+00 3.1029264e+00 2.7045714e+00 3.1156892e+00 2.5038387e+00 3.4072735e+00 2.6199287e+00 3.5105217e+00 3.3061800e+00 2.9316687e+00 3.0488379e+00 3.4462681e+00 3.6292576e+00 3.1074604e+00 2.1103491e+00 2.4046650e+00 2.3052527e+00 2.5074705e+00 3.7037846e+00 3.1023805e+00 3.1087156e+00 3.3416864e+00 3.0212423e+00 2.7029308e+00 2.6036513e+00 3.0012006e+00 3.2078939e+00 2.6064541e+00 1.9145304e+00 2.8026114e+00 2.8028068e+00 2.8033825e+00 2.9167099e+00 1.6147493e+00 2.7040740e+00 4.6133719e+00 3.7058811e+00 4.5290217e+00 4.2056470e+00 4.4115634e+00 5.2381327e+00 3.1057013e+00 4.9271590e+00 4.4118721e+00 4.7354168e+00 3.7201124e+00 3.9113698e+00 4.1247181e+00 3.6087856e+00 3.7244383e+00 3.9212835e+00 4.1101783e+00 5.3422962e+00 5.5362181e+00 3.6046999e+00 4.3279835e+00 3.5095358e+00 5.3412086e+00 3.5135120e+00 4.3162096e+00 4.6297141e+00 3.4124092e+00 3.5088081e+00 4.2105763e+00 4.4358170e+00 4.7408876e+00 5.0762364e+00 4.2125085e+00 3.7079173e+00 4.2021973e+00 4.7752666e+00 4.2166536e+00 4.1080028e+00 3.4084548e+00 4.0338654e+00 4.2256165e+00 3.7563734e+00 3.7058811e+00 4.5190617e+00 4.3264209e+00 3.8360186e+00 3.6136974e+00 3.8177300e+00 4.0156240e+00 3.7048582e+00 6.3164977e-01 3.0017653e-01 4.1209001e-01 2.0061436e-01 4.0127250e-01 7.0911112e-01 8.2458409e-01 1.0207396e+00 5.2201750e-01 1.2699992e-01 7.0470867e-01 4.0004442e-01 4.0122873e-01 3.0482299e-01 5.2167208e-01 3.0490481e-01 4.0122873e-01 4.0002221e-01 2.0061436e-01 2.0061436e-01 2.0061436e-01 3.0482299e-01 3.0482299e-01 4.0122873e-01 7.0008584e-01 8.0879701e-01 3.0017653e-01 3.0474106e-01 5.0043084e-01 3.0017653e-01 6.0964597e-01 1.0000000e-01 2.0121983e-01 1.1019599e+00 6.0035305e-01 4.0004442e-01 4.5148429e-01 4.0127250e-01 4.0004442e-01 4.0125062e-01 3.3808272e-01 1.1269424e-01 3.2369541e+00 3.0101869e+00 3.4219340e+00 2.5073576e+00 3.1113295e+00 3.0016913e+00 3.2074921e+00 1.8128536e+00 3.1127326e+00 2.4076937e+00 2.0429861e+00 2.7074657e+00 2.5087337e+00 3.2029987e+00 2.1087640e+00 2.9250474e+00 3.0040848e+00 2.6011837e+00 3.0090716e+00 2.4029250e+00 3.3087901e+00 2.5074281e+00 3.4046875e+00 3.2018065e+00 2.8107271e+00 2.9185950e+00 3.3183094e+00 3.5134617e+00 3.0049285e+00 2.0041542e+00 2.3049133e+00 2.2050331e+00 2.4035997e+00 3.6030023e+00 3.0040438e+00 3.0070658e+00 3.2168317e+00 2.9083216e+00 2.6031436e+00 2.5048522e+00 2.9013423e+00 3.1034810e+00 2.5032729e+00 1.8201043e+00 2.7028014e+00 2.7016556e+00 2.7027522e+00 2.8056775e+00 1.5256523e+00 2.6033557e+00 4.5162553e+00 3.6081006e+00 4.4160732e+00 4.1039121e+00 4.3103378e+00 5.1203327e+00 3.0096880e+00 4.8129366e+00 4.3058720e+00 4.6249088e+00 3.6148619e+00 3.8081633e+00 4.0157626e+00 3.5129206e+00 3.6349703e+00 3.8226858e+00 4.0057080e+00 5.2232912e+00 5.4204287e+00 3.5035589e+00 4.2200398e+00 3.4145570e+00 5.2217206e+00 3.4096180e+00 4.2110197e+00 4.5140458e+00 3.3101076e+00 3.4081996e+00 4.1095117e+00 4.3161641e+00 4.6204721e+00 4.9419857e+00 4.1123051e+00 3.6033860e+00 4.1009647e+00 4.6434791e+00 4.1197833e+00 4.0049425e+00 3.3090452e+00 3.9205015e+00 4.1230798e+00 3.6413278e+00 3.6081006e+00 4.4145323e+00 4.2254713e+00 3.7302938e+00 3.5112285e+00 3.7130507e+00 3.9190472e+00 3.6058055e+00 5.0043842e-01 1.0426513e+00 5.2167208e-01 4.0004442e-01 3.0026460e-01 1.4542931e+00 1.5965783e+00 1.1269511e+00 7.4262964e-01 1.3253871e+00 9.3306807e-01 1.0032293e+00 8.5406674e-01 7.0470720e-01 7.0633229e-01 5.7608844e-01 6.0017982e-01 6.3192325e-01 8.2418071e-01 8.0879625e-01 3.4080442e-01 4.0243965e-01 1.0030871e+00 1.2189645e+00 1.3741465e+00 5.0043842e-01 6.0201716e-01 1.1055705e+00 5.0043842e-01 1.1269424e-01 7.1621748e-01 6.7616902e-01 6.0000952e-01 3.0008832e-01 6.8170466e-01 9.3735629e-01 4.0004442e-01 9.3308853e-01 3.0474106e-01 9.6572569e-01 6.0948212e-01 3.4311880e+00 3.1440065e+00 3.5828092e+00 2.6061623e+00 3.2490712e+00 3.1047537e+00 3.3265679e+00 1.9024467e+00 3.2610547e+00 2.5066443e+00 2.1042326e+00 2.8182771e+00 2.6268573e+00 3.3136174e+00 2.2177383e+00 3.1042292e+00 3.1056084e+00 2.7106185e+00 3.1258664e+00 2.5072166e+00 3.4123850e+00 2.6397031e+00 3.5191318e+00 3.3125861e+00 2.9569988e+00 3.0825000e+00 3.4750557e+00 3.6484459e+00 3.1148203e+00 2.1231535e+00 2.4058952e+00 2.3063814e+00 2.5167763e+00 3.7071732e+00 3.1042001e+00 3.1166462e+00 3.3690976e+00 3.0370401e+00 2.7067267e+00 2.6060811e+00 3.0024163e+00 3.2157547e+00 2.6139440e+00 1.9029771e+00 2.8056558e+00 2.8068283e+00 2.8077255e+00 2.9323793e+00 1.6119586e+00 2.7091848e+00 4.6187512e+00 3.7092298e+00 4.5442576e+00 4.2099019e+00 4.4180513e+00 5.2548586e+00 3.1079005e+00 4.9407795e+00 4.4195588e+00 4.7516511e+00 3.7329384e+00 3.9191954e+00 4.1389224e+00 3.6127211e+00 3.7328453e+00 3.9318950e+00 4.1174259e+00 5.3601143e+00 5.5513974e+00 3.6073242e+00 4.3425018e+00 3.5138357e+00 5.3586950e+00 3.5235095e+00 4.3257995e+00 4.6452056e+00 3.4217238e+00 3.5154314e+00 4.2169032e+00 4.4544908e+00 4.7602896e+00 5.1057502e+00 4.2193718e+00 3.7147036e+00 4.2043114e+00 4.8058872e+00 4.2239687e+00 4.1138950e+00 3.4146523e+00 4.0526593e+00 4.2382079e+00 3.7847403e+00 3.7092298e+00 4.5291248e+00 4.3385161e+00 3.8547029e+00 3.6228903e+00 3.8290678e+00 4.0228342e+00 3.7082809e+00 6.3164977e-01 3.0026460e-01 1.2085435e-01 6.0948506e-01 1.0143978e+00 1.3131369e+00 8.0928056e-01 4.0246123e-01 8.5409862e-01 7.0016860e-01 5.0477564e-01 6.0201716e-01 5.6595908e-01 4.0363334e-01 4.1212852e-01 1.2699992e-01 3.3818226e-01 4.1210927e-01 3.3818226e-01 2.0181667e-01 1.2085435e-01 5.0855077e-01 1.0001598e+00 1.1055707e+00 0.0000000e+00 3.0026460e-01 6.0964891e-01 0.0000000e+00 5.0043842e-01 3.0482299e-01 4.0246123e-01 8.0254500e-01 5.0043842e-01 5.2133802e-01 7.0556260e-01 2.0181667e-01 7.0008735e-01 3.0026460e-01 6.0948506e-01 2.0181667e-01 3.2490712e+00 3.0153168e+00 3.4297841e+00 2.5067523e+00 3.1166337e+00 3.0027816e+00 3.2112793e+00 1.8068048e+00 3.1183051e+00 2.4116924e+00 2.0138832e+00 2.7116615e+00 2.5059537e+00 3.2048192e+00 2.1144760e+00 2.9351753e+00 3.0063019e+00 2.6019122e+00 3.0106587e+00 2.4030297e+00 3.3125861e+00 2.5120719e+00 3.4068163e+00 3.2029877e+00 2.8162444e+00 2.9267417e+00 3.3252407e+00 3.5189464e+00 3.0077107e+00 2.0051350e+00 2.3037132e+00 2.2028146e+00 2.4058620e+00 3.6044981e+00 3.0062070e+00 3.0107283e+00 3.2237456e+00 2.9105093e+00 2.6052541e+00 2.5062865e+00 2.9018772e+00 3.1056084e+00 2.5048522e+00 1.8082911e+00 2.7043948e+00 2.7029415e+00 2.7046027e+00 2.8091099e+00 1.5248852e+00 2.6055127e+00 4.5209020e+00 3.6112573e+00 4.4212031e+00 4.1056541e+00 4.3138986e+00 5.1255338e+00 3.0133997e+00 4.8167235e+00 4.3081273e+00 4.6319211e+00 3.6205854e+00 3.8114965e+00 4.0212972e+00 3.5173798e+00 3.6449970e+00 3.8299342e+00 4.0081754e+00 5.2290121e+00 5.4254411e+00 3.5039202e+00 4.2264145e+00 3.4198378e+00 5.2270034e+00 3.4138008e+00 4.2149806e+00 4.5183778e+00 3.3145502e+00 3.4118179e+00 4.1129687e+00 4.3210760e+00 4.6261633e+00 4.9512603e+00 4.1165035e+00 3.6051692e+00 4.1014742e+00 4.6540056e+00 4.1257291e+00 4.0071257e+00 3.3129914e+00 3.9274863e+00 4.1301604e+00 3.6542046e+00 3.6112573e+00 4.4192311e+00 4.2328883e+00 3.7399948e+00 3.5155767e+00 3.7180846e+00 3.9250546e+00 3.6083191e+00 6.0184622e-01 7.4263078e-01 1.1138955e+00 4.2268438e-01 7.0096708e-01 2.4170870e-01 3.0490481e-01 3.0490481e-01 3.0017653e-01 3.0474106e-01 3.0474106e-01 8.0879701e-01 4.2362917e-01 6.1119267e-01 7.0462697e-01 4.1317535e-01 2.2538848e-01 3.0482299e-01 7.1621748e-01 6.7616723e-01 3.0474106e-01 4.0125062e-01 5.0001522e-01 6.3164977e-01 5.2491131e-01 2.2573593e-01 6.3164977e-01 1.0207396e+00 3.3808272e-01 4.0246123e-01 1.4180463e+00 1.0030868e+00 4.5148429e-01 4.1317535e-01 7.4263078e-01 3.0017653e-01 8.0879701e-01 1.0000000e-01 4.5078948e-01 3.2116783e+00 3.0049285e+00 3.4072983e+00 2.5182898e+00 3.1051604e+00 3.0020136e+00 3.2049016e+00 1.8469618e+00 3.1036832e+00 2.4099081e+00 2.1180493e+00 2.7068820e+00 2.5224740e+00 3.2021231e+00 2.1097449e+00 2.9077617e+00 3.0041462e+00 2.6022422e+00 3.0134290e+00 2.4087504e+00 3.3085101e+00 2.5050799e+00 3.4038679e+00 3.2010814e+00 2.8036959e+00 2.9060895e+00 3.3058271e+00 3.5063866e+00 3.0043212e+00 2.0122773e+00 2.3159426e+00 2.2186306e+00 2.4051454e+00 3.6029749e+00 3.0041461e+00 3.0062373e+00 3.2059465e+00 2.9096170e+00 2.6032656e+00 2.5097004e+00 2.9028411e+00 3.1023606e+00 2.5057847e+00 1.8685354e+00 2.7039990e+00 2.7016498e+00 2.7029428e+00 2.8028074e+00 1.5747520e+00 2.6039937e+00 4.5157550e+00 3.6083209e+00 4.4088451e+00 4.1031691e+00 4.3089952e+00 5.1095334e+00 3.0117336e+00 4.8052574e+00 4.3035619e+00 4.6175091e+00 3.6116958e+00 3.8067089e+00 4.0107159e+00 3.5138361e+00 3.6350483e+00 3.8210210e+00 4.0037985e+00 5.2113565e+00 5.4105254e+00 3.5063553e+00 4.2147222e+00 3.4147657e+00 5.2097995e+00 3.4081036e+00 4.2080425e+00 4.5057296e+00 3.3089414e+00 3.4074852e+00 4.1084282e+00 4.3058539e+00 4.6088153e+00 4.9193995e+00 4.1112251e+00 3.6020843e+00 4.1009356e+00 4.6223848e+00 4.1190046e+00 4.0036188e+00 3.3085886e+00 3.9129256e+00 4.1197933e+00 3.6305006e+00 3.6083209e+00 4.4113183e+00 4.2225427e+00 3.7249938e+00 3.5105217e+00 3.7103007e+00 3.9184088e+00 3.6056580e+00 4.0125062e-01 5.7609230e-01 1.0095367e+00 1.0776296e+00 6.3322667e-01 3.0490481e-01 9.0140221e-01 4.1212852e-01 6.0000317e-01 3.4085233e-01 6.0035305e-01 3.3818226e-01 3.0000000e-01 4.0122873e-01 2.2538848e-01 4.0004442e-01 4.0122873e-01 2.0061436e-01 3.0000000e-01 6.0017982e-01 7.0462844e-01 8.5406616e-01 3.0026460e-01 4.0243965e-01 7.0088477e-01 3.0026460e-01 4.5783248e-01 3.0008832e-01 3.0490481e-01 1.1002025e+00 4.1315633e-01 4.0125062e-01 4.2362917e-01 4.0125062e-01 4.1209001e-01 2.4170870e-01 5.0436965e-01 2.2573593e-01 3.1712557e+00 2.9203034e+00 3.3425817e+00 2.4092081e+00 3.0228582e+00 2.9024211e+00 3.1131137e+00 1.7168003e+00 3.0276611e+00 2.3094323e+00 1.9540727e+00 2.6109956e+00 2.4153242e+00 3.1056218e+00 2.0123796e+00 2.8520945e+00 2.9050328e+00 2.5029614e+00 2.9148948e+00 2.3042831e+00 3.2109395e+00 2.4161682e+00 3.3086859e+00 3.1042389e+00 2.7244207e+00 2.8394157e+00 3.2369857e+00 3.4247142e+00 2.9077271e+00 1.9085444e+00 2.2064916e+00 2.1068047e+00 2.3066817e+00 3.5042241e+00 2.9048033e+00 2.9102290e+00 3.1338090e+00 2.8169587e+00 2.5042601e+00 2.4061715e+00 2.8017212e+00 3.0065627e+00 2.4058322e+00 1.7261843e+00 2.6037439e+00 2.6027120e+00 2.6040234e+00 2.7127458e+00 1.4350761e+00 2.5049231e+00 4.4189015e+00 3.5095669e+00 4.3257995e+00 4.0057109e+00 4.2135057e+00 5.0324952e+00 2.9113810e+00 4.7221382e+00 4.2099962e+00 4.5353918e+00 3.5215862e+00 3.7118930e+00 3.9240025e+00 3.4150232e+00 3.5401623e+00 3.7282910e+00 3.9092259e+00 5.1365012e+00 5.3314853e+00 3.4049933e+00 4.1286955e+00 3.3168890e+00 5.1347989e+00 3.3143385e+00 4.1161770e+00 4.4243750e+00 3.2143454e+00 3.3110189e+00 4.0125032e+00 4.2289520e+00 4.5343227e+00 4.8661173e+00 4.0156353e+00 3.5063553e+00 4.0017163e+00 4.5675364e+00 4.0235140e+00 3.9076272e+00 3.2116700e+00 3.8321139e+00 4.0301570e+00 3.5598557e+00 3.5095669e+00 4.3201293e+00 4.1322798e+00 3.6413292e+00 3.4157005e+00 3.6188994e+00 3.8226858e+00 3.5071409e+00 5.0436235e-01 1.1269511e+00 1.4180734e+00 9.1446938e-01 5.0476836e-01 9.6593231e-01 8.0051115e-01 6.1119558e-01 7.0176271e-01 6.0964891e-01 4.3213914e-01 5.2133179e-01 2.2573593e-01 4.1420960e-01 5.2133802e-01 4.5078948e-01 2.2608083e-01 2.0121983e-01 6.1119558e-01 1.1005364e+00 1.2089192e+00 1.2085435e-01 2.4195741e-01 7.1621884e-01 1.2085435e-01 4.0004442e-01 4.1212852e-01 5.0085236e-01 7.0096858e-01 4.0127250e-01 5.6394820e-01 8.0967961e-01 2.0000000e-01 8.0051115e-01 2.2573593e-01 7.1621884e-01 3.0482299e-01 3.3545239e+00 3.1166331e+00 3.5333785e+00 2.6054739e+00 3.2183845e+00 3.1025789e+00 3.3116521e+00 1.9046783e+00 3.2211369e+00 2.5096353e+00 2.1074907e+00 2.8107054e+00 2.6064541e+00 3.3051050e+00 2.2121875e+00 3.0393610e+00 3.1054994e+00 2.7022579e+00 3.1107490e+00 2.5027328e+00 3.4112739e+00 2.6129479e+00 3.5073688e+00 3.3035252e+00 2.9185900e+00 3.0300451e+00 3.4286400e+00 3.6205854e+00 3.1074470e+00 2.1053074e+00 2.4030297e+00 2.3022754e+00 2.5057763e+00 3.7043108e+00 3.1053329e+00 3.1100313e+00 3.3265652e+00 3.0118276e+00 2.7046025e+00 2.6052853e+00 3.0016501e+00 3.2059133e+00 2.6047974e+00 1.9052628e+00 2.8038694e+00 2.8028007e+00 2.8041967e+00 2.9102290e+00 1.6179159e+00 2.7049931e+00 4.6192199e+00 3.7100254e+00 4.5225779e+00 4.2056438e+00 4.4133506e+00 5.2278849e+00 3.1114444e+00 4.9186970e+00 4.4088300e+00 4.7323336e+00 3.7201124e+00 3.9113387e+00 4.1217116e+00 3.6152935e+00 3.7397620e+00 3.9276515e+00 4.1085246e+00 5.3314853e+00 5.5274937e+00 3.6037456e+00 4.3264210e+00 3.5173586e+00 5.3296471e+00 3.5134601e+00 4.3151165e+00 4.6204664e+00 3.4137985e+00 3.5110031e+00 4.2123903e+00 4.4237218e+00 4.7288133e+00 5.0555470e+00 4.2155430e+00 3.7056457e+00 4.2016096e+00 4.7574592e+00 4.2235569e+00 4.1072664e+00 3.4118179e+00 4.0283196e+00 4.2288238e+00 3.7532858e+00 3.7100254e+00 4.5190617e+00 4.3311362e+00 3.8383398e+00 3.6148683e+00 3.8177286e+00 4.0227665e+00 3.7075359e+00 1.5237054e+00 1.5778323e+00 1.1528553e+00 8.0928056e-01 1.4109657e+00 9.0296858e-01 1.1060939e+00 8.5617086e-01 6.0184934e-01 8.2671175e-01 8.1112984e-01 7.1621748e-01 7.2113820e-01 9.0642722e-01 9.0166476e-01 5.2167829e-01 5.6347978e-01 1.1011719e+00 1.1531951e+00 1.3523685e+00 6.0948506e-01 7.0008735e-01 1.2012929e+00 6.0948506e-01 2.0121983e-01 8.0488008e-01 7.1636719e-01 7.0025283e-01 2.2608083e-01 7.4418186e-01 9.6702272e-01 5.0476836e-01 9.0657583e-01 3.4085233e-01 1.0214933e+00 7.0176271e-01 3.7102713e+00 3.4382051e+00 3.8712380e+00 2.9060895e+00 3.5425492e+00 3.4047913e+00 3.6240078e+00 2.2025238e+00 3.5521653e+00 2.8062729e+00 2.4042873e+00 3.1166330e+00 2.9229849e+00 3.6127194e+00 2.5155829e+00 3.3866455e+00 3.4056098e+00 3.0096888e+00 3.4232171e+00 2.8068501e+00 3.7118528e+00 2.9335034e+00 3.8176417e+00 3.6116893e+00 3.2480452e+00 3.3690976e+00 3.7643838e+00 3.9429243e+00 3.4137984e+00 2.4191998e+00 2.7057317e+00 2.6060678e+00 2.8148779e+00 4.0071259e+00 3.4042418e+00 3.4154455e+00 3.6592943e+00 3.3320889e+00 3.0065584e+00 2.9059779e+00 3.3025806e+00 3.5145393e+00 2.9126049e+00 2.2031052e+00 3.1056091e+00 3.1065947e+00 3.1074470e+00 3.2280982e+00 1.9100994e+00 3.0087060e+00 4.9179684e+00 4.0090033e+00 4.8406432e+00 4.5097222e+00 4.7173415e+00 5.5505575e+00 3.4073974e+00 5.2376127e+00 4.7184838e+00 5.0477042e+00 4.0301568e+00 4.2181242e+00 4.4357103e+00 3.9120615e+00 4.0296437e+00 4.2295175e+00 4.4165186e+00 5.6553854e+00 5.8478137e+00 3.9072483e+00 4.6391758e+00 3.8129545e+00 5.6540008e+00 3.8217034e+00 4.6242406e+00 4.9412981e+00 3.7201124e+00 3.8146271e+00 4.5162248e+00 4.7490801e+00 5.0547228e+00 5.3951639e+00 4.5184830e+00 4.0138270e+00 4.5043856e+00 5.0949990e+00 4.5225786e+00 4.4133506e+00 3.7138970e+00 4.3475342e+00 4.5353918e+00 4.0747727e+00 4.0090033e+00 4.8274110e+00 4.6357670e+00 4.1493188e+00 3.9212879e+00 4.1268500e+00 4.3214438e+00 4.0081754e+00 4.1317535e-01 4.0127250e-01 7.1629303e-01 5.0043842e-01 7.0096858e-01 6.3912709e-01 7.0184453e-01 1.2003596e+00 7.9871893e-01 1.0286506e+00 1.0433442e+00 8.2635069e-01 6.3309012e-01 6.7626502e-01 1.1286016e+00 1.0782105e+00 6.1135434e-01 6.0184934e-01 3.0915245e-01 1.0143978e+00 9.0155393e-01 5.0436965e-01 1.0143978e+00 1.4324323e+00 7.4329414e-01 8.0879776e-01 1.7570482e+00 1.4092511e+00 8.1343016e-01 7.8895472e-01 1.1269511e+00 7.0470720e-01 1.2189701e+00 5.0855077e-01 8.5406616e-01 3.5025396e+00 3.3027388e+00 3.7022129e+00 2.8281704e+00 3.4036672e+00 3.3025779e+00 3.5030234e+00 2.1725076e+00 3.4018155e+00 2.7109019e+00 2.4518621e+00 3.0049127e+00 2.8364042e+00 3.5019450e+00 2.4088882e+00 3.2025657e+00 3.3031143e+00 2.9050277e+00 3.3192099e+00 2.7159616e+00 3.6057054e+00 2.8056568e+00 3.7048624e+00 3.5016345e+00 3.1026586e+00 3.2026875e+00 3.6024856e+00 3.8034160e+00 3.3035252e+00 2.3226028e+00 2.6270968e+00 2.5319718e+00 2.7081195e+00 3.9029099e+00 3.3031170e+00 3.3039553e+00 3.5023855e+00 3.2150432e+00 2.9028412e+00 2.8148779e+00 3.2051933e+00 3.4018781e+00 2.8098127e+00 2.1974660e+00 3.0055598e+00 3.0017653e+00 3.0030650e+00 3.1026235e+00 1.9002712e+00 2.9047832e+00 4.8115512e+00 3.9065683e+00 4.7047990e+00 4.4023911e+00 4.6064349e+00 5.4038141e+00 3.3119500e+00 5.1019028e+00 4.6029848e+00 4.9110422e+00 3.9076509e+00 4.1051819e+00 4.3067850e+00 3.8114951e+00 3.9246405e+00 4.1145310e+00 4.3025710e+00 5.5046681e+00 5.7049556e+00 3.8098343e+00 4.5095384e+00 3.7106240e+00 5.5035834e+00 3.7063915e+00 4.5052925e+00 4.8020893e+00 3.6066590e+00 3.7052383e+00 4.4062090e+00 4.6017113e+00 4.9033376e+00 5.2065497e+00 4.4082090e+00 3.9018737e+00 4.4013937e+00 4.9097097e+00 4.4135256e+00 4.3024877e+00 3.6059708e+00 4.2076410e+00 4.4136664e+00 3.9189006e+00 3.9065683e+00 4.7076752e+00 4.5157700e+00 4.0166034e+00 3.8091065e+00 4.0069397e+00 4.2129114e+00 3.9040721e+00 5.0476836e-01 9.1422402e-01 6.0017982e-01 6.7616723e-01 1.0001903e+00 7.4262850e-01 1.1298636e+00 1.1055799e+00 1.0782211e+00 1.4043036e+00 1.0207260e+00 9.0508712e-01 1.0030871e+00 1.2632996e+00 1.3253497e+00 1.0001598e+00 5.0855077e-01 2.4195741e-01 1.3131369e+00 1.2089895e+00 9.0007572e-01 1.3131369e+00 1.5263518e+00 1.0087393e+00 9.3308891e-01 2.1138769e+00 1.4140515e+00 9.3308891e-01 6.8160885e-01 1.4180436e+00 6.7626681e-01 1.3018145e+00 7.0470720e-01 1.1133986e+00 3.2054626e+00 3.0041787e+00 3.4044439e+00 2.6382589e+00 3.1129223e+00 3.0138245e+00 3.2030205e+00 2.1566438e+00 3.1086927e+00 2.4554557e+00 2.5269479e+00 2.7127202e+00 2.6737930e+00 3.2074207e+00 2.1510232e+00 2.9068053e+00 3.0077106e+00 2.6369414e+00 3.0816280e+00 2.4971338e+00 3.3055260e+00 2.5325155e+00 3.4206210e+00 3.2100081e+00 2.8135926e+00 2.9088609e+00 3.3100014e+00 3.5053029e+00 3.0107283e+00 2.1554609e+00 2.4508792e+00 2.3794578e+00 2.4537333e+00 3.6090037e+00 3.0077114e+00 3.0034200e+00 3.2047284e+00 2.9729700e+00 2.6131590e+00 2.5821442e+00 2.9309340e+00 3.1060464e+00 2.5610212e+00 2.2285414e+00 2.7317035e+00 2.7106184e+00 2.7159615e+00 2.8134622e+00 1.9767345e+00 2.6270952e+00 4.5095106e+00 3.6117736e+00 4.4050179e+00 4.1034650e+00 4.3058769e+00 5.1048430e+00 3.0395885e+00 4.8030341e+00 4.3077256e+00 4.6095748e+00 3.6067573e+00 3.8091370e+00 4.0067454e+00 3.5235094e+00 3.6257071e+00 3.8125141e+00 4.0031827e+00 5.2054101e+00 5.4066794e+00 3.5404367e+00 4.2082468e+00 3.4146523e+00 5.2054259e+00 3.4138230e+00 4.2042865e+00 4.5025743e+00 3.3123786e+00 3.4067938e+00 4.1072911e+00 4.3032007e+00 4.6053784e+00 4.9093646e+00 4.1089590e+00 3.6062594e+00 4.1061436e+00 4.6117560e+00 4.1111295e+00 4.0026055e+00 3.3078313e+00 3.9072844e+00 4.1120111e+00 3.6177796e+00 3.6117736e+00 4.4064445e+00 4.2133758e+00 3.7157992e+00 3.5215805e+00 3.7072609e+00 3.9105673e+00 3.6051689e+00 4.1212852e-01 4.1212852e-01 3.0490481e-01 5.2167208e-01 3.0915245e-01 8.0097499e-01 6.1119558e-01 6.9518117e-01 9.0168933e-01 5.2491131e-01 4.0363334e-01 5.0085236e-01 7.8940551e-01 8.2462252e-01 5.0042326e-01 3.1328089e-01 3.0490481e-01 8.0928056e-01 7.0470867e-01 4.0125062e-01 8.0928056e-01 1.0776294e+00 5.0517282e-01 4.5078948e-01 1.6096629e+00 1.0207396e+00 4.5847767e-01 6.0184622e-01 9.1422402e-01 3.4085233e-01 8.5406674e-01 2.4195741e-01 6.0964891e-01 3.4079041e+00 3.2018548e+00 3.6045966e+00 2.7227151e+00 3.3029106e+00 3.2014779e+00 3.4016816e+00 2.0608234e+00 3.3024690e+00 2.6067721e+00 2.3393392e+00 2.9023862e+00 2.7310942e+00 3.4010299e+00 2.3048574e+00 3.1044057e+00 3.2014774e+00 2.8036023e+00 3.2152055e+00 2.6124447e+00 3.5030234e+00 2.7035171e+00 3.6034258e+00 3.4010358e+00 3.0022434e+00 3.1033306e+00 3.5041120e+00 3.7031277e+00 3.2018065e+00 2.2177966e+00 2.5220687e+00 2.4265152e+00 2.6055197e+00 3.8016494e+00 3.2014773e+00 3.2019092e+00 3.4031881e+00 3.1122360e+00 2.8013347e+00 2.7110046e+00 3.1036553e+00 3.3009330e+00 2.7070770e+00 2.0855888e+00 2.9035486e+00 2.9008500e+00 2.9016034e+00 3.0016038e+00 1.7857124e+00 2.8028019e+00 4.7076061e+00 3.8037955e+00 4.6049801e+00 4.3013465e+00 4.5040960e+00 5.3068325e+00 3.2075036e+00 5.0037550e+00 4.5023523e+00 4.8096009e+00 3.8048551e+00 4.0031892e+00 4.2051335e+00 3.7071735e+00 3.8161632e+00 4.0093901e+00 4.2016368e+00 5.4081547e+00 5.6075434e+00 3.7075525e+00 4.4072835e+00 3.6062405e+00 5.4074634e+00 3.6038441e+00 4.4036959e+00 4.7038247e+00 3.5038075e+00 3.6028345e+00 4.3038299e+00 4.5042394e+00 4.8062731e+00 5.1150203e+00 4.3051629e+00 3.8011413e+00 4.3008955e+00 4.8153681e+00 4.3087925e+00 4.2014602e+00 3.5032125e+00 4.1063865e+00 4.3094598e+00 3.8146853e+00 3.8037955e+00 4.6054982e+00 4.4109814e+00 3.9115832e+00 3.7058197e+00 3.9043917e+00 4.1081838e+00 3.8021570e+00 6.0365948e-01 3.0008832e-01 3.3818226e-01 2.0121983e-01 5.2133802e-01 3.0915245e-01 5.0437695e-01 5.0043842e-01 2.0181667e-01 1.2085435e-01 1.2085435e-01 4.1317535e-01 4.1317535e-01 3.0026460e-01 6.0018299e-01 7.0462697e-01 4.0246123e-01 3.0490481e-01 4.0004442e-01 4.0246123e-01 7.1621884e-01 1.2085435e-01 1.1269424e-01 1.2036863e+00 7.0088627e-01 3.0482299e-01 5.0436965e-01 5.0436235e-01 3.0482299e-01 5.0436965e-01 2.2608083e-01 2.0121983e-01 3.3237063e+00 3.1056091e+00 3.5138377e+00 2.6067805e+00 3.2064817e+00 3.1008890e+00 3.3041599e+00 1.9144935e+00 3.2074507e+00 2.5042498e+00 2.1492024e+00 2.8038903e+00 2.6091437e+00 3.3015588e+00 2.2041706e+00 3.0148613e+00 3.1021975e+00 2.7007716e+00 3.1069083e+00 2.5027328e+00 3.4052051e+00 2.6037226e+00 3.5028446e+00 3.3009330e+00 2.9058292e+00 3.0107430e+00 3.4113341e+00 3.6081814e+00 3.1026177e+00 2.1035154e+00 2.4051766e+00 2.3058791e+00 2.5019964e+00 3.7017412e+00 3.1021847e+00 3.1038570e+00 3.3100818e+00 3.0059450e+00 2.7015162e+00 2.6035107e+00 3.0009631e+00 3.2017847e+00 2.6021247e+00 1.9231085e+00 2.8015882e+00 2.8007533e+00 2.8013565e+00 2.9028946e+00 1.6222582e+00 2.7017239e+00 4.6112605e+00 3.7050457e+00 4.5107898e+00 4.2023583e+00 4.4067851e+00 5.2146266e+00 3.1060428e+00 4.9089682e+00 4.4037570e+00 4.7173415e+00 3.7092301e+00 3.9050336e+00 4.1101936e+00 3.6083379e+00 3.7235997e+00 3.9149881e+00 4.1034584e+00 5.3169366e+00 5.5149065e+00 3.6029421e+00 4.3133953e+00 3.5091580e+00 5.3158283e+00 3.5057369e+00 4.3071174e+00 4.6095441e+00 3.4059695e+00 3.5048429e+00 4.2061215e+00 4.4109767e+00 4.7143113e+00 5.0310424e+00 4.2080636e+00 3.7018984e+00 4.2005765e+00 4.7313463e+00 4.2134002e+00 4.1029723e+00 3.4053426e+00 4.0133308e+00 4.2155438e+00 3.7272780e+00 3.7050457e+00 4.5097224e+00 4.3174320e+00 3.8199266e+00 3.6070223e+00 3.8081328e+00 4.0126677e+00 3.7034791e+00 6.0017665e-01 4.1210927e-01 6.0018299e-01 1.1133986e+00 6.3178534e-01 9.0142681e-01 8.5403486e-01 7.0462844e-01 5.0477564e-01 5.2491734e-01 1.0087252e+00 9.3306807e-01 4.1317535e-01 5.0517282e-01 4.1317535e-01 8.5409862e-01 7.5503094e-01 4.1317535e-01 8.5409862e-01 1.3133231e+00 6.0964891e-01 7.0548138e-01 1.5640758e+00 1.3027556e+00 7.0176271e-01 6.0017982e-01 9.6591433e-01 6.0000635e-01 1.1056693e+00 4.0127250e-01 7.1700909e-01 3.0056049e+00 2.8037508e+00 3.2038047e+00 2.3350905e+00 2.9043042e+00 2.8024566e+00 3.0040963e+00 1.7133143e+00 2.9021652e+00 2.2134864e+00 2.0299551e+00 2.5066443e+00 2.3464211e+00 3.0020171e+00 1.9120296e+00 2.7041827e+00 2.8038683e+00 2.4047867e+00 2.8219468e+00 2.2186306e+00 3.1079220e+00 2.3063026e+00 3.2048524e+00 3.0013665e+00 2.6029242e+00 2.7037323e+00 3.1033714e+00 3.3046348e+00 2.8041978e+00 1.8296471e+00 2.1344310e+00 2.0421601e+00 2.2088481e+00 3.4030563e+00 2.8038694e+00 2.8056176e+00 3.0035303e+00 2.7166861e+00 2.4032760e+00 2.3173405e+00 2.7049931e+00 2.9020943e+00 2.3107074e+00 1.7540491e+00 2.5057744e+00 2.5017294e+00 2.5032614e+00 2.6027348e+00 1.4738792e+00 2.4051328e+00 4.3150879e+00 3.4081972e+00 4.2065765e+00 3.9027794e+00 4.1082339e+00 4.9060122e+00 2.8144592e+00 4.6029848e+00 4.1031680e+00 4.4149701e+00 3.4105998e+00 3.6062867e+00 3.8091132e+00 3.3145497e+00 3.4354252e+00 3.6203140e+00 3.8031346e+00 5.0073650e+00 5.2071845e+00 3.3100796e+00 4.0129286e+00 3.2145637e+00 5.0059527e+00 3.2079238e+00 4.0069158e+00 4.3033003e+00 3.1086424e+00 3.2069551e+00 3.9078313e+00 4.1030253e+00 4.4053246e+00 4.7120702e+00 3.9105941e+00 3.4019027e+00 3.9011588e+00 4.4155733e+00 3.9183552e+00 3.8030509e+00 3.1080886e+00 3.7106642e+00 3.9186175e+00 3.4279064e+00 3.4081972e+00 4.2100505e+00 4.0214626e+00 3.5236480e+00 3.3110446e+00 3.5093262e+00 3.7177968e+00 3.4052047e+00 4.1317535e-01 1.1269424e-01 5.6371422e-01 5.0084481e-01 4.5784410e-01 8.0000239e-01 4.0006662e-01 3.0017653e-01 4.0006662e-01 6.0948800e-01 7.0088627e-01 4.1210927e-01 3.0482299e-01 4.5080200e-01 7.0016860e-01 6.0184934e-01 4.1317535e-01 7.0016860e-01 8.5406674e-01 4.0002221e-01 3.0482299e-01 1.5012719e+00 7.4269314e-01 3.3818226e-01 4.0002221e-01 8.0046685e-01 1.1269424e-01 6.3165225e-01 2.0121983e-01 5.0002283e-01 3.2274206e+00 3.0066036e+00 3.4159337e+00 2.5238518e+00 3.1081931e+00 3.0018108e+00 3.2048307e+00 1.8672243e+00 3.1090333e+00 2.4088880e+00 2.1557835e+00 2.7050009e+00 2.5327574e+00 3.2021240e+00 2.1075767e+00 2.9175658e+00 3.0027966e+00 2.6034860e+00 3.0173373e+00 2.4124132e+00 3.3060311e+00 2.5063233e+00 3.4049933e+00 3.2016467e+00 2.8074882e+00 2.9128790e+00 3.3135401e+00 3.5094636e+00 3.0034944e+00 2.0185049e+00 2.3226176e+00 2.2272636e+00 2.4061715e+00 3.6025233e+00 3.0027816e+00 3.0045159e+00 3.2117473e+00 2.9147768e+00 2.6022650e+00 2.5117111e+00 2.9035536e+00 3.1022707e+00 2.5074705e+00 1.8959538e+00 2.7040250e+00 2.7012715e+00 2.7023320e+00 2.8040244e+00 1.6015419e+00 2.6035923e+00 4.5125057e+00 3.6062867e+00 4.4120448e+00 4.1027436e+00 4.3076126e+00 5.1160601e+00 3.0101869e+00 4.8099407e+00 4.3047534e+00 4.6192050e+00 3.6105351e+00 3.8061121e+00 4.0115220e+00 3.5110245e+00 3.6271576e+00 3.8169672e+00 4.0039490e+00 5.2185416e+00 5.4163883e+00 3.5078417e+00 4.2149895e+00 3.4109302e+00 5.2173835e+00 3.4072913e+00 4.2079670e+00 4.5106052e+00 3.3073674e+00 3.4056857e+00 4.1070396e+00 4.3122866e+00 4.6159499e+00 4.9341410e+00 4.1092167e+00 3.6024856e+00 4.1011078e+00 4.6347105e+00 4.1150272e+00 4.0033723e+00 3.3063033e+00 3.9150644e+00 4.1174503e+00 3.6310621e+00 3.6062867e+00 4.4108290e+00 4.2194920e+00 3.7226812e+00 3.5095242e+00 3.7093173e+00 3.9142888e+00 3.6040604e+00 3.4342562e-01 8.5406616e-01 3.3813251e-01 6.0017665e-01 4.5078948e-01 4.0125062e-01 2.2573593e-01 3.0474106e-01 7.0008584e-01 6.0184622e-01 2.2538848e-01 7.0017011e-01 8.0046685e-01 5.0477564e-01 5.2167208e-01 4.0004442e-01 5.0477564e-01 1.0016896e+00 3.0474106e-01 4.5080200e-01 1.1531953e+00 1.0008617e+00 4.5080200e-01 4.1420960e-01 6.1119558e-01 4.1210927e-01 8.0051036e-01 3.0482299e-01 4.1210927e-01 3.0158412e+00 2.8068284e+00 3.2097099e+00 2.3108757e+00 2.9065890e+00 2.8022000e+00 3.0066661e+00 1.6225614e+00 2.9048017e+00 2.2116239e+00 1.8685354e+00 2.5096708e+00 2.3100354e+00 3.0026666e+00 1.9136652e+00 2.7108290e+00 2.8056165e+00 2.4010446e+00 2.8094419e+00 2.2042326e+00 3.1114445e+00 2.3060252e+00 3.2036636e+00 3.0010404e+00 2.6048201e+00 2.7083838e+00 3.1074856e+00 3.3083888e+00 2.8056953e+00 1.8055938e+00 2.1074903e+00 2.0078120e+00 2.2044098e+00 3.4034897e+00 2.8056164e+00 2.8086638e+00 3.0080453e+00 2.7058598e+00 2.4044765e+00 2.3071636e+00 2.7018643e+00 2.9031229e+00 2.3040302e+00 1.6345914e+00 2.5039390e+00 2.5021287e+00 2.5037126e+00 2.6035547e+00 1.3485963e+00 2.4045982e+00 4.3195471e+00 3.4105069e+00 4.2110205e+00 3.9039624e+00 4.1112641e+00 4.9115281e+00 2.8134622e+00 4.6064153e+00 4.1040152e+00 4.4216160e+00 3.4153329e+00 3.6083651e+00 3.8136432e+00 3.3170079e+00 3.4454804e+00 3.6271127e+00 3.8048208e+00 5.0136911e+00 5.2125102e+00 3.3041886e+00 4.0185527e+00 3.2193583e+00 5.0117789e+00 3.2102572e+00 4.0101485e+00 4.3071174e+00 3.1116727e+00 3.2099137e+00 3.9105756e+00 4.1073261e+00 4.4108290e+00 4.7236350e+00 3.9141183e+00 3.4025037e+00 3.9008223e+00 4.4275986e+00 3.9240716e+00 3.8046112e+00 3.1114731e+00 3.7165788e+00 3.9250546e+00 3.4397950e+00 3.4105069e+00 4.2141201e+00 4.0283725e+00 3.5323888e+00 3.3126331e+00 3.5133654e+00 3.7236064e+00 3.4073749e+00 5.6371422e-01 4.0125062e-01 4.2362917e-01 7.0008735e-01 3.0017653e-01 2.2573593e-01 3.0490481e-01 5.2167829e-01 6.0202028e-01 3.3808272e-01 4.1210927e-01 5.2167829e-01 6.0201716e-01 5.0477564e-01 4.0363334e-01 6.0201716e-01 7.8895472e-01 3.0474106e-01 2.2608083e-01 1.4017696e+00 7.1636719e-01 2.2608083e-01 4.0002221e-01 7.0088627e-01 2.0121983e-01 5.6371422e-01 2.2538848e-01 4.0127250e-01 3.2269400e+00 3.0055754e+00 3.4153574e+00 2.5158464e+00 3.1070003e+00 3.0010043e+00 3.2037264e+00 1.8447764e+00 3.1084925e+00 2.4051329e+00 2.1169795e+00 2.7031257e+00 2.5230194e+00 3.2014729e+00 2.1040690e+00 2.9167440e+00 3.0016616e+00 2.6020655e+00 3.0122358e+00 2.4077390e+00 3.3040889e+00 2.5044039e+00 3.4036241e+00 3.2011770e+00 2.8066054e+00 2.9119764e+00 3.3128842e+00 3.5083774e+00 3.0022542e+00 2.0112442e+00 2.3146813e+00 2.2178117e+00 2.4035997e+00 3.6016272e+00 3.0016466e+00 3.0030180e+00 3.2109724e+00 2.9107838e+00 2.6012056e+00 2.5072166e+00 2.9020943e+00 3.1016037e+00 2.5045154e+00 1.8665804e+00 2.7022828e+00 2.7006623e+00 2.7012715e+00 2.8031364e+00 1.5665127e+00 2.6019940e+00 4.5096473e+00 3.6042722e+00 4.4108394e+00 4.1020090e+00 4.3058539e+00 5.1154681e+00 3.0065585e+00 4.8095984e+00 4.3039464e+00 4.6166518e+00 3.6081814e+00 3.8045576e+00 4.0096177e+00 3.5076367e+00 3.6204901e+00 3.8129595e+00 4.0031500e+00 5.2178509e+00 5.4155855e+00 3.5053718e+00 4.2125027e+00 3.4076329e+00 5.2169557e+00 3.4052726e+00 4.2064771e+00 4.5101686e+00 3.3051940e+00 3.4039470e+00 4.1052782e+00 4.3120002e+00 4.6153660e+00 4.9336188e+00 4.1069501e+00 3.6018985e+00 4.1007374e+00 4.6331222e+00 4.1114855e+00 4.0025890e+00 3.3042986e+00 3.9129418e+00 4.1139050e+00 3.6259501e+00 3.6042722e+00 4.4088300e+00 4.2155438e+00 3.7181244e+00 3.5068261e+00 3.7072136e+00 3.9107458e+00 3.6027359e+00 7.1779518e-01 9.0005048e-01 6.8160885e-01 6.0980961e-01 6.3164977e-01 6.0964597e-01 6.0948506e-01 6.3178534e-01 8.0888055e-01 6.5712813e-01 9.1552331e-01 5.6595908e-01 4.5147187e-01 9.0026543e-01 5.6595908e-01 6.0201716e-01 5.6370994e-01 4.1212852e-01 1.3000455e+00 4.1315633e-01 6.1830764e-01 9.0511169e-01 6.0964891e-01 6.3178534e-01 4.5077696e-01 7.1621748e-01 4.5783248e-01 3.7510248e+00 3.5145413e+00 3.9319585e+00 3.0060350e+00 3.6168418e+00 3.5015800e+00 3.7092301e+00 2.3098782e+00 3.6209185e+00 2.9036019e+00 2.5319798e+00 3.2059465e+00 3.0125595e+00 3.7043511e+00 2.6050088e+00 3.4362995e+00 3.5023713e+00 3.1027850e+00 3.5112541e+00 2.9034027e+00 3.8056232e+00 3.0109656e+00 3.9070007e+00 3.7037948e+00 3.3177283e+00 3.4278716e+00 3.8279200e+00 4.0185639e+00 3.5049370e+00 2.5063489e+00 2.8048596e+00 2.7053910e+00 2.9045816e+00 4.1028806e+00 3.5020661e+00 3.5059137e+00 3.7249599e+00 3.4134555e+00 3.1021033e+00 3.0035422e+00 3.4012314e+00 3.6049329e+00 3.0042977e+00 2.3151346e+00 3.2021240e+00 3.2018065e+00 3.2023319e+00 3.3095196e+00 2.0139907e+00 3.1028259e+00 5.0111326e+00 4.1049446e+00 4.9203200e+00 4.6042055e+00 4.8089555e+00 5.6273643e+00 3.5051449e+00 5.3190012e+00 4.8083867e+00 5.1260178e+00 4.1140184e+00 4.3082037e+00 4.5172930e+00 4.0074572e+00 4.1195081e+00 4.3162103e+00 4.5071301e+00 5.7305902e+00 5.9266306e+00 4.0041352e+00 4.7202045e+00 3.9078680e+00 5.7295930e+00 3.9093593e+00 4.7117434e+00 5.0203874e+00 3.8087102e+00 3.9064537e+00 4.6081315e+00 4.8239967e+00 5.1282911e+00 5.4538016e+00 4.6097450e+00 4.1052255e+00 4.6016323e+00 5.1527860e+00 4.6133719e+00 4.5057296e+00 3.8063301e+00 4.4231469e+00 4.6192070e+00 4.1384491e+00 4.1049446e+00 4.9142249e+00 4.7202041e+00 4.2256252e+00 4.0099717e+00 4.2125085e+00 4.4124585e+00 4.1039188e+00 3.4085233e-01 3.3818226e-01 1.2699992e-01 3.0922892e-01 3.3818226e-01 4.1212852e-01 3.4085233e-01 3.0490481e-01 8.0250202e-01 9.0192695e-01 4.0363334e-01 5.0437695e-01 4.5847767e-01 4.0363334e-01 7.0633229e-01 3.0482299e-01 4.0246123e-01 1.0095513e+00 7.0548283e-01 2.0181667e-01 5.0043084e-01 3.6452132e-01 5.0436965e-01 5.0855778e-01 4.1420960e-01 3.3813251e-01 3.0360001e+00 2.8068283e+00 3.2199554e+00 2.3040302e+00 2.9083228e+00 2.8004229e+00 3.0040677e+00 1.6099792e+00 2.9111118e+00 2.2023225e+00 1.8444678e+00 2.5026969e+00 2.3072196e+00 3.0013665e+00 1.9023442e+00 2.7227152e+00 2.8012528e+00 2.4005053e+00 2.8054838e+00 2.2013444e+00 3.1036553e+00 2.3040711e+00 3.2026875e+00 3.0010106e+00 2.6084695e+00 2.7159628e+00 3.1166009e+00 3.3100796e+00 2.8019019e+00 1.8020058e+00 2.1029252e+00 2.0034861e+00 2.2011906e+00 3.4011297e+00 2.8012319e+00 2.8028007e+00 3.0142197e+00 2.7060544e+00 2.4007552e+00 2.3017471e+00 2.7003774e+00 2.9016034e+00 2.3011979e+00 1.6179000e+00 2.5007284e+00 2.5003793e+00 2.5007008e+00 2.6035320e+00 1.3154932e+00 2.4008859e+00 4.3091529e+00 3.4034897e+00 4.2123904e+00 3.9018704e+00 4.1056542e+00 4.9181670e+00 2.8038684e+00 4.6114520e+00 4.1039624e+00 4.4180514e+00 3.4084525e+00 3.6042874e+00 3.8104403e+00 3.3060311e+00 3.4198458e+00 3.6127194e+00 3.8032956e+00 5.0208687e+00 5.2178587e+00 3.3018328e+00 4.0133297e+00 3.2067991e+00 5.0200323e+00 3.2048524e+00 4.0067585e+00 4.3122441e+00 3.1047671e+00 3.2036090e+00 3.9049697e+00 4.1148052e+00 4.4184267e+00 4.7404155e+00 3.9065727e+00 3.4018864e+00 3.9004186e+00 4.4392802e+00 3.9110280e+00 3.8025990e+00 3.1038577e+00 3.7145622e+00 3.9140895e+00 3.4287259e+00 3.4034897e+00 4.2090841e+00 4.0156240e+00 3.5189468e+00 3.3056781e+00 3.5073617e+00 3.7102604e+00 3.4023494e+00 4.1315633e-01 3.0915245e-01 4.5078948e-01 5.2132556e-01 3.0482299e-01 3.3808272e-01 6.0964597e-01 7.0911112e-01 8.6051414e-01 4.1212852e-01 7.0016860e-01 7.4262964e-01 4.1212852e-01 6.1830489e-01 4.1209001e-01 6.0018299e-01 1.1056693e+00 6.0964597e-01 4.1317535e-01 4.1315633e-01 5.2133179e-01 4.2268438e-01 5.0084481e-01 5.2491131e-01 5.0043084e-01 2.9115264e+00 2.6338022e+00 3.0658361e+00 2.1172959e+00 2.7373419e+00 2.6040822e+00 2.8212040e+00 1.4407364e+00 2.7450517e+00 2.0182279e+00 1.7116683e+00 2.3196029e+00 2.1285888e+00 2.8091316e+00 1.7264084e+00 2.5863714e+00 2.6084695e+00 2.2054529e+00 2.6248910e+00 2.0083311e+00 2.9174382e+00 2.1301354e+00 3.0136614e+00 2.8068911e+00 2.4421128e+00 2.5659281e+00 2.9581275e+00 3.1380442e+00 2.6129786e+00 1.6191482e+00 1.9129988e+00 1.8141046e+00 2.0129571e+00 3.2064817e+00 2.6080847e+00 2.6171503e+00 2.8540078e+00 2.5288353e+00 2.2078337e+00 2.1116321e+00 2.5029614e+00 2.7108347e+00 2.1109969e+00 1.4620239e+00 2.3067198e+00 2.3048725e+00 2.3072196e+00 2.4221911e+00 1.1960519e+00 2.2090466e+00 4.1263920e+00 3.2146438e+00 4.0362393e+00 3.7082866e+00 3.9191966e+00 4.7434452e+00 2.6190662e+00 4.4302264e+00 3.9142233e+00 4.2488425e+00 3.2328627e+00 3.4177609e+00 3.6349468e+00 3.1232336e+00 3.2606481e+00 3.4419771e+00 3.6135028e+00 4.8484858e+00 5.0414126e+00 3.1077602e+00 3.8409517e+00 3.0264502e+00 4.8462395e+00 3.0224855e+00 3.8231767e+00 4.1339850e+00 2.9228238e+00 3.0173096e+00 3.7181009e+00 3.9409635e+00 4.2473796e+00 4.5888422e+00 3.7226114e+00 3.2097424e+00 3.7024938e+00 4.2924779e+00 3.7339169e+00 3.6111693e+00 2.9185950e+00 3.5470876e+00 3.7434043e+00 3.2896416e+00 3.2146438e+00 4.0283196e+00 3.8460162e+00 3.3616756e+00 3.1242731e+00 3.3284650e+00 3.5333785e+00 3.2109426e+00 4.0122873e-01 5.0043084e-01 4.0243965e-01 3.0474106e-01 2.0061436e-01 4.5148429e-01 1.1000100e+00 1.2012928e+00 1.2699992e-01 4.0122873e-01 5.6595488e-01 1.2699992e-01 6.0184309e-01 4.0004442e-01 5.0436965e-01 7.1700909e-01 6.0201716e-01 5.2132556e-01 8.0051115e-01 2.2573593e-01 8.0000080e-01 4.0243965e-01 7.0088477e-01 3.0474106e-01 3.1428043e+00 2.9119661e+00 3.3252402e+00 2.4048325e+00 3.0131919e+00 2.9019362e+00 3.1087162e+00 1.7043719e+00 3.0148571e+00 2.3090280e+00 1.9099608e+00 2.6089255e+00 2.4039780e+00 3.1034773e+00 2.0109328e+00 2.8295063e+00 2.9047982e+00 2.5011632e+00 2.9079901e+00 2.3019339e+00 3.2101766e+00 2.4088882e+00 3.3051149e+00 3.1020645e+00 2.7127201e+00 2.8219255e+00 3.2211370e+00 3.4154432e+00 2.9057760e+00 1.9031964e+00 2.2023924e+00 2.1016800e+00 2.3040177e+00 3.5033831e+00 2.9047500e+00 2.9083094e+00 3.1195526e+00 2.8078790e+00 2.5037817e+00 2.4045555e+00 2.8012577e+00 3.0040677e+00 2.4032886e+00 1.7053664e+00 2.6031367e+00 2.6019751e+00 2.6032655e+00 2.7067267e+00 1.4186217e+00 2.5039390e+00 4.4180854e+00 3.5092124e+00 4.3179254e+00 4.0043989e+00 4.2115634e+00 5.0223341e+00 2.9108451e+00 4.7142986e+00 4.2064794e+00 4.5276385e+00 3.5169841e+00 3.7092301e+00 3.9177719e+00 3.4145764e+00 3.5398518e+00 3.7257231e+00 3.9064409e+00 5.1255523e+00 5.3223081e+00 3.4028319e+00 4.1224590e+00 3.3167394e+00 5.1238108e+00 3.3110167e+00 4.1123595e+00 4.4156295e+00 3.2116669e+00 3.3094499e+00 4.0106882e+00 4.2180724e+00 4.5227110e+00 4.8462592e+00 4.0138270e+00 3.5038530e+00 4.0010263e+00 4.5481710e+00 4.0222346e+00 3.9055783e+00 3.2104685e+00 3.8231767e+00 4.0259306e+00 3.5470873e+00 3.5092124e+00 4.3162096e+00 4.1285334e+00 3.6344358e+00 3.4126369e+00 3.6148618e+00 3.8215373e+00 3.5066394e+00 2.2608083e-01 2.4170870e-01 3.0915245e-01 3.0915245e-01 4.0002221e-01 7.0096858e-01 8.0888055e-01 3.3818226e-01 4.0243965e-01 5.0477564e-01 3.3818226e-01 6.1135434e-01 2.0121983e-01 3.0017653e-01 1.1020506e+00 6.0219099e-01 2.0061436e-01 4.1210927e-01 4.0246123e-01 4.0125062e-01 4.0363334e-01 3.4085233e-01 2.2573593e-01 3.1414744e+00 2.9090612e+00 3.3237063e+00 2.4058952e+00 3.0107723e+00 2.9007492e+00 3.1056084e+00 1.7139238e+00 3.0138400e+00 2.3035512e+00 1.9525301e+00 2.6040007e+00 2.4100386e+00 3.1020779e+00 2.0037730e+00 2.8273039e+00 2.9018637e+00 2.5009579e+00 2.9077468e+00 2.3022754e+00 3.2048985e+00 2.4059812e+00 3.3038272e+00 3.1015567e+00 2.7110304e+00 2.8196992e+00 3.2199879e+00 3.4126307e+00 2.9028597e+00 1.9035634e+00 2.2044603e+00 2.1052067e+00 2.3021298e+00 3.5016873e+00 2.9018153e+00 2.9040211e+00 3.1174676e+00 2.8083845e+00 2.5012694e+00 2.4028385e+00 2.8006965e+00 3.0024198e+00 2.4021168e+00 1.7233814e+00 2.6012647e+00 2.6007117e+00 2.6012056e+00 2.7050190e+00 1.4220898e+00 2.5015263e+00 4.4109742e+00 3.5045842e+00 4.3148937e+00 4.0025815e+00 4.2071337e+00 5.0208677e+00 2.9053047e+00 4.7134688e+00 4.2051335e+00 4.5213131e+00 3.5107904e+00 3.7056862e+00 3.9129303e+00 3.4076849e+00 3.5232606e+00 3.7154827e+00 3.9043905e+00 5.1238111e+00 5.3204854e+00 3.4027174e+00 4.1161770e+00 3.3085473e+00 5.1228006e+00 3.3065392e+00 4.1085246e+00 4.4144908e+00 3.2064337e+00 3.3048953e+00 4.0063737e+00 4.2173565e+00 4.5213359e+00 4.8449108e+00 4.0082529e+00 3.5026815e+00 4.0006690e+00 4.5442582e+00 4.0132894e+00 3.9035247e+00 3.2051958e+00 3.8177289e+00 4.0170262e+00 3.5340950e+00 3.5045842e+00 4.3111747e+00 4.1186693e+00 3.6228903e+00 3.4075338e+00 3.6094379e+00 3.8124787e+00 3.5031928e+00 1.1269424e-01 5.0436965e-01 4.5078948e-01 2.2573593e-01 6.0000317e-01 7.0088477e-01 4.1210927e-01 3.4080442e-01 3.0474106e-01 4.1210927e-01 8.0883841e-01 1.1269424e-01 2.2573593e-01 1.2089253e+00 8.0051036e-01 4.0125062e-01 4.1317535e-01 5.2133802e-01 3.0017653e-01 6.0184622e-01 2.0061436e-01 2.2573593e-01 3.2211375e+00 3.0065592e+00 3.4126307e+00 2.5097024e+00 3.1069750e+00 3.0016616e+00 3.2056674e+00 1.8201133e+00 3.1066333e+00 2.4080686e+00 2.0619186e+00 2.7068820e+00 2.5107632e+00 3.2022485e+00 2.1087015e+00 2.9137660e+00 3.0040552e+00 2.6010528e+00 3.0089164e+00 2.4039766e+00 3.3085605e+00 2.5050799e+00 3.4035383e+00 3.2010814e+00 2.8057187e+00 2.9102474e+00 3.3101488e+00 3.5088135e+00 3.0043212e+00 2.0051350e+00 2.3071665e+00 2.2078183e+00 2.4034084e+00 3.6027901e+00 3.0040510e+00 3.0064311e+00 3.2097125e+00 2.9065741e+00 2.6030847e+00 2.5057763e+00 2.9016034e+00 3.1025924e+00 2.5033702e+00 1.8310475e+00 2.7029487e+00 2.7015162e+00 2.7026433e+00 2.8034238e+00 1.5358653e+00 2.6032968e+00 4.5159027e+00 3.6080734e+00 4.4115652e+00 4.1033603e+00 4.3094200e+00 5.1138780e+00 3.0100866e+00 4.8082318e+00 4.3041941e+00 4.6203464e+00 3.6127197e+00 3.8070338e+00 4.0124958e+00 3.5130651e+00 3.6349183e+00 3.8215362e+00 4.0044029e+00 5.2162094e+00 5.4145184e+00 3.5039680e+00 4.2166537e+00 3.4145702e+00 5.2146334e+00 3.4083285e+00 4.2090727e+00 4.5089181e+00 3.3091123e+00 3.4076329e+00 4.1087140e+00 4.3098021e+00 4.6133860e+00 4.9287684e+00 4.1115100e+00 3.6023704e+00 4.1007820e+00 4.6309835e+00 4.1192351e+00 4.0040240e+00 3.3086516e+00 3.9156759e+00 4.1209257e+00 3.6344375e+00 3.6080734e+00 4.4124586e+00 4.2235561e+00 3.7268101e+00 3.5102374e+00 3.7111714e+00 3.9185866e+00 3.6056580e+00 5.0084481e-01 4.1315633e-01 2.2573593e-01 7.0000303e-01 8.0046605e-01 3.3818226e-01 2.4170870e-01 3.0017653e-01 3.3818226e-01 8.0245824e-01 1.1269424e-01 2.0181667e-01 1.1133897e+00 8.0004523e-01 4.0246123e-01 5.2167829e-01 4.5078948e-01 4.0125062e-01 6.0017665e-01 3.0017653e-01 2.0061436e-01 3.3182813e+00 3.1056085e+00 3.5110034e+00 2.6060742e+00 3.2059465e+00 3.1013637e+00 3.3048926e+00 1.9099680e+00 3.2056789e+00 2.5063346e+00 2.1344310e+00 2.8057704e+00 2.6059875e+00 3.3019214e+00 2.2068463e+00 3.0117188e+00 3.1034568e+00 2.7006623e+00 3.1063566e+00 2.5023073e+00 3.4074246e+00 2.6040822e+00 3.5028878e+00 3.3008913e+00 2.9048017e+00 3.0087102e+00 3.4087684e+00 3.6077011e+00 3.1036688e+00 2.1027637e+00 2.4039664e+00 2.3040177e+00 2.5024922e+00 3.7023994e+00 3.1034532e+00 3.1054994e+00 3.3083865e+00 3.0045919e+00 2.7025560e+00 2.6039937e+00 3.0011260e+00 3.2022183e+00 2.6023240e+00 1.9156198e+00 2.8022964e+00 2.8012577e+00 2.8021795e+00 2.9028597e+00 1.6190551e+00 2.7026433e+00 4.6143249e+00 3.7070367e+00 4.5103890e+00 4.2029881e+00 4.4084395e+00 5.2126509e+00 3.1082889e+00 4.9074567e+00 4.4036930e+00 4.7183734e+00 3.7111657e+00 3.9061763e+00 4.1111077e+00 3.6112621e+00 3.7306950e+00 3.9190472e+00 4.1039096e+00 5.3148048e+00 5.5132902e+00 3.6028435e+00 4.3148929e+00 3.5126667e+00 5.3133599e+00 3.5071916e+00 4.3081091e+00 4.6080296e+00 3.4078683e+00 3.5066415e+00 4.2077543e+00 4.4087821e+00 4.7120754e+00 5.0261502e+00 4.2102487e+00 3.7020547e+00 4.2006494e+00 4.7279956e+00 4.2171591e+00 4.1035746e+00 3.4074978e+00 4.0138994e+00 4.2186688e+00 3.7302926e+00 3.7070367e+00 4.5111938e+00 4.3210760e+00 3.8236433e+00 3.6087856e+00 3.8098356e+00 4.0164852e+00 3.7049593e+00 1.1269424e-01 7.0017011e-01 9.0506343e-01 1.0426636e+00 2.0181667e-01 4.1209001e-01 8.0093081e-01 2.0181667e-01 3.4080442e-01 4.0125062e-01 3.6259865e-01 9.0029064e-01 3.3808272e-01 4.2268438e-01 6.1135434e-01 2.2608083e-01 6.0948212e-01 2.0061436e-01 6.3164977e-01 3.0482299e-01 3.1902650e+00 2.9267417e+00 3.3545239e+00 2.4065479e+00 3.0300492e+00 2.9028462e+00 3.1166330e+00 1.7073273e+00 3.0369978e+00 2.3091363e+00 1.9242178e+00 2.6129479e+00 2.4148785e+00 3.1074477e+00 2.0138888e+00 2.8680310e+00 2.9053048e+00 2.5042875e+00 2.9165009e+00 2.3038195e+00 3.2116668e+00 2.4221589e+00 3.3110857e+00 3.1060464e+00 2.7333589e+00 2.8520935e+00 3.2480481e+00 3.4313924e+00 2.9094538e+00 1.9103596e+00 2.2042535e+00 2.1040084e+00 2.3086427e+00 3.5048916e+00 2.9048754e+00 2.9119661e+00 3.1440058e+00 2.8212158e+00 2.5048147e+00 2.4054865e+00 2.8016296e+00 3.0087060e+00 2.4071454e+00 1.7108740e+00 2.6040234e+00 2.6035022e+00 2.6047905e+00 2.7176633e+00 1.4222462e+00 2.5057847e+00 4.4195581e+00 3.5098289e+00 4.3311360e+00 4.0067587e+00 4.2149806e+00 5.0390074e+00 2.9109559e+00 4.7273232e+00 4.2124122e+00 4.5405876e+00 3.5250718e+00 3.7139027e+00 3.9284285e+00 3.4150426e+00 3.5404383e+00 3.7302923e+00 3.9113385e+00 5.1434691e+00 5.3373009e+00 3.4049076e+00 4.1330547e+00 3.3170101e+00 5.1417717e+00 3.3168869e+00 4.1189487e+00 4.4302240e+00 3.2165105e+00 3.3123688e+00 4.0139003e+00 4.2362095e+00 4.5418862e+00 4.8784553e+00 4.0170271e+00 3.5083268e+00 4.0022121e+00 4.5797332e+00 4.0245435e+00 3.9092247e+00 3.2127499e+00 3.8383398e+00 4.0332236e+00 3.5687055e+00 3.5098289e+00 4.3229228e+00 4.1350002e+00 3.6463113e+00 3.4177609e+00 3.6219569e+00 3.8236419e+00 3.5076152e+00 6.0202028e-01 1.0008471e+00 1.1133984e+00 1.2085435e-01 4.0125062e-01 7.0548138e-01 1.2085435e-01 4.1210927e-01 3.3813251e-01 4.1317535e-01 8.0093160e-01 4.1210927e-01 4.5147187e-01 7.0184453e-01 2.0121983e-01 7.0088326e-01 2.2573593e-01 6.3164977e-01 2.4170870e-01 3.1712556e+00 2.9203033e+00 3.3425812e+00 2.4054865e+00 3.0228150e+00 2.9023686e+00 3.1131138e+00 1.7053664e+00 3.0276460e+00 2.3090554e+00 1.9156198e+00 2.6109872e+00 2.4094445e+00 3.1056085e+00 2.0122722e+00 2.8520934e+00 2.9050277e+00 2.5027053e+00 2.9125192e+00 2.3027405e+00 3.2109394e+00 2.4160415e+00 3.3084146e+00 3.1042008e+00 2.7243956e+00 2.8394100e+00 3.2369546e+00 3.4247119e+00 2.9077087e+00 1.9065546e+00 2.2031052e+00 2.1025721e+00 2.3063026e+00 3.5041732e+00 2.9047982e+00 2.9102300e+00 3.1338083e+00 2.8152056e+00 2.5042498e+00 2.4049187e+00 2.8014068e+00 3.0065584e+00 2.4051787e+00 1.7073273e+00 2.6035320e+00 2.6027034e+00 2.6039922e+00 2.7127202e+00 1.4197348e+00 2.5048165e+00 4.4189015e+00 3.5095163e+00 4.3257988e+00 4.0057069e+00 4.2135049e+00 5.0324949e+00 2.9108796e+00 4.7221364e+00 4.2099109e+00 4.5353940e+00 3.5215862e+00 3.7118544e+00 3.9240013e+00 3.4147901e+00 3.5401421e+00 3.7282909e+00 3.9092247e+00 5.1365094e+00 5.3314710e+00 3.4038679e+00 4.1286955e+00 3.3168613e+00 5.1347955e+00 3.3142720e+00 4.1161770e+00 4.4243750e+00 3.2143132e+00 3.3110162e+00 4.0124921e+00 4.2289511e+00 4.5343165e+00 4.8661278e+00 4.0156242e+00 3.5063341e+00 4.0016595e+00 4.5675358e+00 4.0235142e+00 3.9076269e+00 3.2116668e+00 3.8321137e+00 4.0301568e+00 3.5598554e+00 3.5095163e+00 4.3201293e+00 4.1322798e+00 3.6413274e+00 3.4154677e+00 3.6188977e+00 3.8226861e+00 3.5071388e+00 7.0096708e-01 8.0004602e-01 5.0855077e-01 4.1420960e-01 2.2608083e-01 5.0855077e-01 1.0008768e+00 3.0474106e-01 4.0127250e-01 1.1527746e+00 1.0000457e+00 4.0127250e-01 4.5783248e-01 6.0948800e-01 4.1317535e-01 8.0008964e-01 3.0482299e-01 4.0127250e-01 3.2104685e+00 3.0024155e+00 3.4059092e+00 2.5048145e+00 3.1026586e+00 3.0005260e+00 3.2022151e+00 1.8108200e+00 3.1025924e+00 2.4028974e+00 2.0417688e+00 2.7025751e+00 2.5062865e+00 3.2007416e+00 2.1027376e+00 2.9057769e+00 3.0015388e+00 2.6003213e+00 3.0043084e+00 2.4017233e+00 3.3039365e+00 2.5015263e+00 3.4013673e+00 3.2002931e+00 2.8019179e+00 2.9040262e+00 3.3045112e+00 3.5038551e+00 3.0015958e+00 2.0020172e+00 2.3035509e+00 2.2041009e+00 2.4010446e+00 3.6011254e+00 3.0015387e+00 3.0025850e+00 3.2040849e+00 2.9029297e+00 2.6009614e+00 2.5022969e+00 2.9005699e+00 3.1008537e+00 2.5011717e+00 1.8179820e+00 2.7009799e+00 2.7004102e+00 2.7008225e+00 2.8010266e+00 1.5160787e+00 2.6010449e+00 4.5093548e+00 3.6039076e+00 4.4060847e+00 4.1014985e+00 4.3050076e+00 5.1081747e+00 3.0045274e+00 4.8044751e+00 4.3019076e+00 4.6117611e+00 3.6062405e+00 3.8032982e+00 4.0063634e+00 3.5066398e+00 3.6202705e+00 3.8119529e+00 4.0019489e+00 5.2097659e+00 5.4087506e+00 3.5019664e+00 4.2090727e+00 3.4073888e+00 5.2088330e+00 3.4037266e+00 4.2046087e+00 4.5046939e+00 3.3041077e+00 3.4034672e+00 4.1044794e+00 4.3051857e+00 4.6074982e+00 4.9181673e+00 4.1061528e+00 3.6008588e+00 4.1002763e+00 4.6187512e+00 4.1110303e+00 4.0017844e+00 3.3039580e+00 3.9080413e+00 4.1118167e+00 3.6188744e+00 3.6039076e+00 4.4067826e+00 4.2136946e+00 3.7147052e+00 3.5048712e+00 3.7054764e+00 3.9103830e+00 3.6025973e+00 3.0026460e-01 1.0001598e+00 9.0029064e-01 6.0202028e-01 1.0001598e+00 1.1281352e+00 7.0000303e-01 6.0052920e-01 1.8012962e+00 9.6574369e-01 6.3178782e-01 4.2270142e-01 1.1005460e+00 3.0026460e-01 9.1422402e-01 4.0004442e-01 8.0004602e-01 3.2225450e+00 3.0091788e+00 3.4142737e+00 2.5659206e+00 3.1121190e+00 3.0065741e+00 3.2080663e+00 1.9795481e+00 3.1095927e+00 2.4291135e+00 2.3151880e+00 2.7129011e+00 2.5826416e+00 3.2051685e+00 2.1273517e+00 2.9165009e+00 3.0077149e+00 2.6132476e+00 3.0422268e+00 2.4403272e+00 3.3124044e+00 2.5166989e+00 3.4115621e+00 3.2044340e+00 2.8105357e+00 2.9137358e+00 3.3135329e+00 3.5115123e+00 3.0089448e+00 2.0634477e+00 2.3669955e+00 2.2798676e+00 2.4222743e+00 3.6065353e+00 3.0077107e+00 3.0095641e+00 3.2119165e+00 2.9352117e+00 2.6080595e+00 2.5371436e+00 2.9126008e+00 3.1051604e+00 2.5254396e+00 2.0316239e+00 2.7143597e+00 2.7050980e+00 2.7084026e+00 2.8082597e+00 1.7626307e+00 2.6129786e+00 4.5202811e+00 3.6136284e+00 4.4137920e+00 4.1051791e+00 4.3125133e+00 5.1149745e+00 3.0264326e+00 4.8090821e+00 4.3074243e+00 4.6242424e+00 3.6169385e+00 3.8113313e+00 4.0160029e+00 3.5235168e+00 3.6464145e+00 3.8279938e+00 4.0062032e+00 5.2173409e+00 5.4162239e+00 3.5202520e+00 4.2206877e+00 3.4219359e+00 5.2156030e+00 3.4146242e+00 4.2116109e+00 4.5097900e+00 3.3150972e+00 3.4115327e+00 4.1123767e+00 4.3106068e+00 4.6148396e+00 4.9296764e+00 4.1159139e+00 3.6049049e+00 4.1030781e+00 4.6336876e+00 4.1247374e+00 4.0056652e+00 3.3131419e+00 3.9194404e+00 4.1265834e+00 3.6428011e+00 3.6136284e+00 4.4157045e+00 4.2295866e+00 3.7344540e+00 3.5196603e+00 3.7152542e+00 3.9242137e+00 3.6086340e+00 1.1055707e+00 1.0030868e+00 7.0000151e-01 1.1055707e+00 1.3018102e+00 8.0245824e-01 7.1621884e-01 1.9078389e+00 1.1896594e+00 7.2044167e-01 5.3943256e-01 1.2089192e+00 4.5147187e-01 1.0776188e+00 5.0043084e-01 9.0506254e-01 3.3079985e+00 3.1046072e+00 3.5056118e+00 2.6709345e+00 3.2081329e+00 3.1065948e+00 3.3043807e+00 2.0901204e+00 3.2052035e+00 2.5276373e+00 2.4267777e+00 2.8091158e+00 2.6904888e+00 3.3041886e+00 2.2241050e+00 3.0065909e+00 3.1056084e+00 2.7155799e+00 3.1440950e+00 2.5451785e+00 3.4078458e+00 2.6152930e+00 3.5111169e+00 3.3045112e+00 2.9070941e+00 3.0065909e+00 3.4069929e+00 3.6059670e+00 3.1068940e+00 2.1702441e+00 2.4737523e+00 2.3880607e+00 2.5238437e+00 3.7056514e+00 3.1056084e+00 3.1055129e+00 3.3051256e+00 3.0372254e+00 2.7067267e+00 2.6397031e+00 3.0142197e+00 3.2037566e+00 2.6278605e+00 2.1424915e+00 2.8148768e+00 2.8047553e+00 2.8077256e+00 2.9066659e+00 1.8682644e+00 2.7127202e+00 4.6142218e+00 3.7103348e+00 4.5074842e+00 4.2035307e+00 4.4083412e+00 5.2074280e+00 3.1239158e+00 4.9041928e+00 4.4055872e+00 4.7149533e+00 3.7103439e+00 3.9081758e+00 4.1095835e+00 3.6188977e+00 3.7328455e+00 3.9187196e+00 4.1037709e+00 5.3087451e+00 5.5089283e+00 3.6218973e+00 4.3127798e+00 3.5155554e+00 5.3076917e+00 3.5109043e+00 4.3070065e+00 4.6043043e+00 3.4107931e+00 3.5076367e+00 4.2085644e+00 4.4044360e+00 4.7071579e+00 5.0144130e+00 4.2110565e+00 3.7038321e+00 4.2031939e+00 4.7176313e+00 4.2169539e+00 4.1034568e+00 3.4087523e+00 4.0110693e+00 4.2176552e+00 3.7262659e+00 3.7103348e+00 4.5099841e+00 4.3199893e+00 3.8223318e+00 3.6159248e+00 3.8096375e+00 4.0163546e+00 3.7058422e+00 3.0026460e-01 6.0964891e-01 0.0000000e+00 5.0043842e-01 3.0482299e-01 4.0246123e-01 8.0254500e-01 5.0043842e-01 5.2133802e-01 7.0556260e-01 2.0181667e-01 7.0008735e-01 3.0026460e-01 6.0948506e-01 2.0181667e-01 3.2490712e+00 3.0153168e+00 3.4297841e+00 2.5067523e+00 3.1166337e+00 3.0027816e+00 3.2112793e+00 1.8068048e+00 3.1183051e+00 2.4116924e+00 2.0138832e+00 2.7116615e+00 2.5059537e+00 3.2048192e+00 2.1144760e+00 2.9351753e+00 3.0063019e+00 2.6019122e+00 3.0106587e+00 2.4030297e+00 3.3125861e+00 2.5120719e+00 3.4068163e+00 3.2029877e+00 2.8162444e+00 2.9267417e+00 3.3252407e+00 3.5189464e+00 3.0077107e+00 2.0051350e+00 2.3037132e+00 2.2028146e+00 2.4058620e+00 3.6044981e+00 3.0062070e+00 3.0107283e+00 3.2237456e+00 2.9105093e+00 2.6052541e+00 2.5062865e+00 2.9018772e+00 3.1056084e+00 2.5048522e+00 1.8082911e+00 2.7043948e+00 2.7029415e+00 2.7046027e+00 2.8091099e+00 1.5248852e+00 2.6055127e+00 4.5209020e+00 3.6112573e+00 4.4212031e+00 4.1056541e+00 4.3138986e+00 5.1255338e+00 3.0133997e+00 4.8167235e+00 4.3081273e+00 4.6319211e+00 3.6205854e+00 3.8114965e+00 4.0212972e+00 3.5173798e+00 3.6449970e+00 3.8299342e+00 4.0081754e+00 5.2290121e+00 5.4254411e+00 3.5039202e+00 4.2264145e+00 3.4198378e+00 5.2270034e+00 3.4138008e+00 4.2149806e+00 4.5183778e+00 3.3145502e+00 3.4118179e+00 4.1129687e+00 4.3210760e+00 4.6261633e+00 4.9512603e+00 4.1165035e+00 3.6051692e+00 4.1014742e+00 4.6540056e+00 4.1257291e+00 4.0071257e+00 3.3129914e+00 3.9274863e+00 4.1301604e+00 3.6542046e+00 3.6112573e+00 4.4192311e+00 4.2328883e+00 3.7399948e+00 3.5155767e+00 3.7180846e+00 3.9250546e+00 3.6083191e+00 5.0436965e-01 3.0026460e-01 6.0017982e-01 3.0482299e-01 3.0017653e-01 9.0506343e-01 6.0000317e-01 4.5783248e-01 7.4269314e-01 2.4195741e-01 6.0948506e-01 4.0122873e-01 5.0855077e-01 2.0061436e-01 3.5243030e+00 3.3064692e+00 3.7147036e+00 2.8028227e+00 3.4072759e+00 3.3010449e+00 3.5048841e+00 2.1026764e+00 3.4081977e+00 2.7042302e+00 2.3098753e+00 3.0045117e+00 2.8027924e+00 3.5019450e+00 2.4045982e+00 3.2157547e+00 3.3025861e+00 2.9005886e+00 3.3047156e+00 2.7010554e+00 3.6058037e+00 2.8042692e+00 3.7029937e+00 3.5011559e+00 3.1065954e+00 3.2116669e+00 3.6121048e+00 3.8091015e+00 3.3031148e+00 2.3014285e+00 2.6014645e+00 2.5011992e+00 2.7018905e+00 3.9020189e+00 3.3025600e+00 3.3044825e+00 3.5110031e+00 3.2044340e+00 2.9018587e+00 2.8023124e+00 3.2006932e+00 3.4022345e+00 2.8016296e+00 2.1039819e+00 3.0015958e+00 3.0009948e+00 3.0016466e+00 3.1034780e+00 1.8068049e+00 2.9019412e+00 4.8119571e+00 3.9055005e+00 4.7117433e+00 4.4027872e+00 4.6074923e+00 5.4154953e+00 3.3059205e+00 5.1096877e+00 4.6042055e+00 4.9184662e+00 3.9101579e+00 4.1056576e+00 4.3111747e+00 3.8086187e+00 3.9240074e+00 4.1158326e+00 4.3040379e+00 5.5178488e+00 5.7157885e+00 3.8018678e+00 4.5144444e+00 3.7097243e+00 5.5166376e+00 3.7063915e+00 4.5079295e+00 4.8103281e+00 3.6066590e+00 3.7054748e+00 4.4067833e+00 4.6117275e+00 4.9151622e+00 5.2317566e+00 4.4087821e+00 3.9022961e+00 4.4006562e+00 4.9323259e+00 4.4141504e+00 4.3034963e+00 3.6059708e+00 4.2144276e+00 4.4165186e+00 3.9284285e+00 3.9055005e+00 4.7106152e+00 4.5183778e+00 4.0209837e+00 3.8074712e+00 4.0090032e+00 4.2134002e+00 3.9039579e+00 6.0964891e-01 1.1019501e+00 4.0125062e-01 5.0000761e-01 1.2632947e+00 1.1001012e+00 5.2491131e-01 6.1135434e-01 7.1621884e-01 4.2268438e-01 9.0026543e-01 2.4170870e-01 5.0043084e-01 3.4064574e+00 3.2033141e+00 3.6042703e+00 2.7067267e+00 3.3031847e+00 3.2012079e+00 3.4035361e+00 2.0125822e+00 3.3019706e+00 2.6055127e+00 2.2404570e+00 2.9047685e+00 2.7070958e+00 3.4014441e+00 2.3056303e+00 3.1043374e+00 3.2029748e+00 2.8006755e+00 3.2059945e+00 2.6027034e+00 3.5064150e+00 2.7028014e+00 3.6021536e+00 3.4005708e+00 3.0020583e+00 3.1034908e+00 3.5031921e+00 3.7043162e+00 3.2030081e+00 2.2031888e+00 2.5048145e+00 2.4051640e+00 2.6022353e+00 3.8020808e+00 3.2029748e+00 3.2045605e+00 3.4036086e+00 3.1036832e+00 2.8021575e+00 2.7039988e+00 3.1011640e+00 3.3016475e+00 2.7022579e+00 2.0191796e+00 2.9020892e+00 2.9010579e+00 2.9018587e+00 3.0016913e+00 1.7203066e+00 2.8022905e+00 4.7127831e+00 3.8062227e+00 4.6064153e+00 4.3024456e+00 4.5071341e+00 5.3066177e+00 3.2074507e+00 5.0034627e+00 4.5024143e+00 4.8135231e+00 3.8088290e+00 4.0049892e+00 4.2080447e+00 3.7100251e+00 3.8270894e+00 4.0163858e+00 4.2028588e+00 5.4079977e+00 5.6075453e+00 3.7029588e+00 4.4113160e+00 3.6111044e+00 5.4066727e+00 3.6058057e+00 4.4062018e+00 4.7037812e+00 3.5065182e+00 3.6056307e+00 4.3065776e+00 4.5036239e+00 4.8058364e+00 5.1131156e+00 4.3088094e+00 3.8014142e+00 4.3005452e+00 4.8156922e+00 4.3151195e+00 4.2027764e+00 3.5064276e+00 4.1095026e+00 4.3155223e+00 3.8226872e+00 3.8062227e+00 4.6088777e+00 4.4178507e+00 3.9190516e+00 3.7073875e+00 3.9078043e+00 4.1144923e+00 3.8043348e+00 5.0043842e-01 3.0482299e-01 4.0246123e-01 8.0254500e-01 5.0043842e-01 5.2133802e-01 7.0556260e-01 2.0181667e-01 7.0008735e-01 3.0026460e-01 6.0948506e-01 2.0181667e-01 3.2490712e+00 3.0153168e+00 3.4297841e+00 2.5067523e+00 3.1166337e+00 3.0027816e+00 3.2112793e+00 1.8068048e+00 3.1183051e+00 2.4116924e+00 2.0138832e+00 2.7116615e+00 2.5059537e+00 3.2048192e+00 2.1144760e+00 2.9351753e+00 3.0063019e+00 2.6019122e+00 3.0106587e+00 2.4030297e+00 3.3125861e+00 2.5120719e+00 3.4068163e+00 3.2029877e+00 2.8162444e+00 2.9267417e+00 3.3252407e+00 3.5189464e+00 3.0077107e+00 2.0051350e+00 2.3037132e+00 2.2028146e+00 2.4058620e+00 3.6044981e+00 3.0062070e+00 3.0107283e+00 3.2237456e+00 2.9105093e+00 2.6052541e+00 2.5062865e+00 2.9018772e+00 3.1056084e+00 2.5048522e+00 1.8082911e+00 2.7043948e+00 2.7029415e+00 2.7046027e+00 2.8091099e+00 1.5248852e+00 2.6055127e+00 4.5209020e+00 3.6112573e+00 4.4212031e+00 4.1056541e+00 4.3138986e+00 5.1255338e+00 3.0133997e+00 4.8167235e+00 4.3081273e+00 4.6319211e+00 3.6205854e+00 3.8114965e+00 4.0212972e+00 3.5173798e+00 3.6449970e+00 3.8299342e+00 4.0081754e+00 5.2290121e+00 5.4254411e+00 3.5039202e+00 4.2264145e+00 3.4198378e+00 5.2270034e+00 3.4138008e+00 4.2149806e+00 4.5183778e+00 3.3145502e+00 3.4118179e+00 4.1129687e+00 4.3210760e+00 4.6261633e+00 4.9512603e+00 4.1165035e+00 3.6051692e+00 4.1014742e+00 4.6540056e+00 4.1257291e+00 4.0071257e+00 3.3129914e+00 3.9274863e+00 4.1301604e+00 3.6542046e+00 3.6112573e+00 4.4192311e+00 4.2328883e+00 3.7399948e+00 3.5155767e+00 3.7180846e+00 3.9250546e+00 3.6083191e+00 7.0470720e-01 6.3164977e-01 7.0000303e-01 2.0000000e-01 6.4049114e-01 8.7212232e-01 4.0004442e-01 8.5437440e-01 2.2573593e-01 9.3308853e-01 6.0184622e-01 3.5152865e+00 3.2379971e+00 3.6729302e+00 2.7052554e+00 3.3425813e+00 3.2040843e+00 3.4230889e+00 2.0021220e+00 3.3530528e+00 2.6055127e+00 2.2051638e+00 2.9154879e+00 2.7227228e+00 3.4118179e+00 2.3143923e+00 3.1902650e+00 3.2048191e+00 2.8089346e+00 3.2223766e+00 2.6060092e+00 3.5107904e+00 2.7333577e+00 3.6167495e+00 3.4109217e+00 3.0488339e+00 3.1712556e+00 3.5658221e+00 3.7426726e+00 3.2127498e+00 2.2186491e+00 2.5049231e+00 2.4052960e+00 2.6139440e+00 3.8063158e+00 3.2036084e+00 3.2143157e+00 3.4603347e+00 3.1318587e+00 2.8056557e+00 2.7050980e+00 3.1020681e+00 3.3136174e+00 2.7116685e+00 2.0027889e+00 2.9047842e+00 2.9057760e+00 2.9065359e+00 3.0276459e+00 1.7091578e+00 2.8077256e+00 4.7169309e+00 3.8081280e+00 4.6399369e+00 4.3088507e+00 4.5162248e+00 5.3501952e+00 3.2067991e+00 5.0370912e+00 4.5175831e+00 4.8468186e+00 3.8290678e+00 4.0170262e+00 4.2347722e+00 3.7111714e+00 3.8289857e+00 4.0283196e+00 4.2155430e+00 5.4550926e+00 5.6472422e+00 3.7064735e+00 4.4381718e+00 3.6121030e+00 5.4538016e+00 3.6205857e+00 4.4231445e+00 4.7408825e+00 3.5189464e+00 3.6135011e+00 4.3151164e+00 4.5490925e+00 4.8546830e+00 5.1966534e+00 4.3173271e+00 3.8129545e+00 4.3038527e+00 4.8962803e+00 4.3214438e+00 4.2123903e+00 3.5127693e+00 4.1469662e+00 4.3342207e+00 3.8751227e+00 3.8081280e+00 4.6262568e+00 4.4345825e+00 3.9485180e+00 3.7201181e+00 3.9257254e+00 4.1203162e+00 3.8072915e+00 2.0181667e-01 1.1055799e+00 7.0016860e-01 4.0006662e-01 4.5147187e-01 4.1212852e-01 4.0002221e-01 5.0043084e-01 3.0474106e-01 1.2085435e-01 3.2280983e+00 3.0080445e+00 3.4166801e+00 2.5073304e+00 3.1087541e+00 3.0016255e+00 3.2064005e+00 1.8128544e+00 3.1091921e+00 2.4076934e+00 2.0429861e+00 2.7070770e+00 2.5077793e+00 3.2025221e+00 2.1086021e+00 2.9185909e+00 3.0040552e+00 2.6009247e+00 3.0080768e+00 2.4028385e+00 3.3086417e+00 2.5058827e+00 3.4038679e+00 3.2013290e+00 2.8077473e+00 2.9137660e+00 3.3136458e+00 3.5107924e+00 3.0045274e+00 2.0036964e+00 2.3048725e+00 2.2049827e+00 2.4032211e+00 3.6028345e+00 3.0040403e+00 3.0066661e+00 3.2127504e+00 2.9065741e+00 2.6030847e+00 2.5048249e+00 2.9013288e+00 3.1029264e+00 2.5029614e+00 1.8201043e+00 2.7027522e+00 2.7015465e+00 2.7026433e+00 2.8042851e+00 1.5256523e+00 2.6032253e+00 4.5160443e+00 3.6080467e+00 4.4135519e+00 4.1035779e+00 4.3098000e+00 5.1168009e+00 3.0096881e+00 4.8103297e+00 4.3048676e+00 4.6223708e+00 3.6136097e+00 3.8074712e+00 4.0139003e+00 3.5128896e+00 3.6349183e+00 3.8220063e+00 4.0049433e+00 5.2194303e+00 5.4171979e+00 3.5033669e+00 4.2181241e+00 3.4145410e+00 5.2178541e+00 3.4088042e+00 4.2099019e+00 4.5111938e+00 3.3094784e+00 3.4078458e+00 4.1090316e+00 4.3126257e+00 4.6165627e+00 4.9348334e+00 4.1118265e+00 3.6027619e+00 4.1008192e+00 4.6366757e+00 4.1194553e+00 4.0043991e+00 3.3087928e+00 3.9177721e+00 4.1218428e+00 3.6374332e+00 3.6080467e+00 4.4133507e+00 4.2243717e+00 3.7282925e+00 3.5105217e+00 3.7119499e+00 3.9187675e+00 3.6057072e+00 1.2012865e+00 6.0184622e-01 3.3808272e-01 6.0184934e-01 5.0043084e-01 3.3818226e-01 4.1212852e-01 3.0922892e-01 2.0121983e-01 3.4273160e+00 3.2064011e+00 3.6161317e+00 2.7056828e+00 3.3075153e+00 3.2008118e+00 3.4044261e+00 2.0113827e+00 3.3090710e+00 2.6035236e+00 2.2398619e+00 2.9035670e+00 2.7083020e+00 3.4017079e+00 2.3034787e+00 3.1174706e+00 3.2019092e+00 2.8008297e+00 3.2066700e+00 2.6023240e+00 3.5046442e+00 2.7041827e+00 3.6031099e+00 3.4011658e+00 3.0071110e+00 3.1127326e+00 3.5134157e+00 3.7092355e+00 3.2025439e+00 2.2031052e+00 2.5042875e+00 2.4048325e+00 2.6019131e+00 3.8016620e+00 3.2018790e+00 3.2036084e+00 3.4118203e+00 3.1063566e+00 2.8013151e+00 2.7029498e+00 3.1008327e+00 3.3019518e+00 2.7019892e+00 2.0181988e+00 2.9013773e+00 2.9007142e+00 2.9012240e+00 3.0034647e+00 1.7168003e+00 2.8015399e+00 4.7103355e+00 3.8044833e+00 4.6117631e+00 4.3023732e+00 4.5065283e+00 5.3163061e+00 3.2051927e+00 5.0102923e+00 4.5041827e+00 4.8177760e+00 3.8091017e+00 4.0050028e+00 4.2105704e+00 3.7073402e+00 3.8208501e+00 4.0138272e+00 4.2036880e+00 5.4187310e+00 5.6164039e+00 3.7027275e+00 4.4135513e+00 3.6080195e+00 5.4177192e+00 3.6056365e+00 4.4072751e+00 4.7109359e+00 3.5056751e+00 3.6045028e+00 4.3058539e+00 4.5127175e+00 4.8161488e+00 5.1342237e+00 4.3075896e+00 3.8021528e+00 4.3006308e+00 4.8339900e+00 4.3122441e+00 4.2030792e+00 3.5048429e+00 4.1140184e+00 4.3148937e+00 3.8271256e+00 3.8044833e+00 4.6097143e+00 4.4165186e+00 3.9191998e+00 3.7067060e+00 3.9080455e+00 4.1114854e+00 3.8031381e+00 9.0000091e-01 1.2014191e+00 1.5025345e+00 7.0088477e-01 1.5012926e+00 9.0000136e-01 1.4092540e+00 1.0030724e+00 3.4932678e+00 3.2284356e+00 3.6579694e+00 2.7029237e+00 3.3320310e+00 3.2025221e+00 3.4171529e+00 2.0008116e+00 3.3407229e+00 2.6032740e+00 2.2005685e+00 2.9103582e+00 2.7153679e+00 3.4082220e+00 2.3087475e+00 3.1706684e+00 3.2030687e+00 2.8057704e+00 3.2157547e+00 2.6035236e+00 3.5075879e+00 2.7233837e+00 3.6121030e+00 3.4076329e+00 3.0364247e+00 3.1548602e+00 3.5517240e+00 3.7328844e+00 3.2086545e+00 2.2116271e+00 2.5026949e+00 2.4028972e+00 2.6089340e+00 3.8042764e+00 3.2022967e+00 3.2108192e+00 3.4468872e+00 3.1231713e+00 2.8035152e+00 2.7029238e+00 3.1011647e+00 3.3095196e+00 2.7074599e+00 2.0008921e+00 2.9028462e+00 2.9036791e+00 2.9040745e+00 3.0197997e+00 1.7043730e+00 2.8047771e+00 4.7130344e+00 3.8056232e+00 4.6318991e+00 4.3063824e+00 4.5121922e+00 5.3416858e+00 3.2045522e+00 5.0302179e+00 4.5134527e+00 4.8380714e+00 3.8218532e+00 4.0124930e+00 4.2269486e+00 3.7079008e+00 3.8220112e+00 4.0214147e+00 4.2115853e+00 5.4465234e+00 5.6393868e+00 3.7043105e+00 4.4299700e+00 3.6085945e+00 5.4449998e+00 3.6148636e+00 4.4178341e+00 4.7331065e+00 3.5134671e+00 3.6094821e+00 4.3111775e+00 4.5398308e+00 4.8449119e+00 5.1826640e+00 4.3129030e+00 3.8093617e+00 4.3026669e+00 4.8806417e+00 4.3164776e+00 4.2091204e+00 3.5088587e+00 4.1368711e+00 4.3264627e+00 3.8592376e+00 3.8056232e+00 4.6204067e+00 4.4269727e+00 3.9374314e+00 3.7145622e+00 3.9192263e+00 4.1154804e+00 3.8050056e+00 6.1288055e-01 7.7603846e-01 4.0127250e-01 7.4329414e-01 2.0061436e-01 9.0508712e-01 6.0000635e-01 3.5152864e+00 3.2379970e+00 3.6729302e+00 2.7058598e+00 3.3425838e+00 3.2040874e+00 3.4230885e+00 2.0034894e+00 3.3530533e+00 2.6055423e+00 2.2123846e+00 2.9154880e+00 2.7237438e+00 3.4118184e+00 2.3143951e+00 3.1902650e+00 3.2048192e+00 2.8089552e+00 3.2228316e+00 2.6061975e+00 3.5107904e+00 2.7333644e+00 3.6167885e+00 3.4109240e+00 3.0488346e+00 3.1712557e+00 3.5658240e+00 3.7426726e+00 3.2127504e+00 2.2188249e+00 2.5053901e+00 2.4058634e+00 2.6139732e+00 3.8063206e+00 3.2036085e+00 3.2143126e+00 3.4603347e+00 3.1321581e+00 2.8056558e+00 2.7052554e+00 3.1021033e+00 3.3136175e+00 2.7117356e+00 2.0053411e+00 2.9048017e+00 2.9057761e+00 2.9065368e+00 3.0276467e+00 1.7105814e+00 2.8077315e+00 4.7169308e+00 3.8081328e+00 4.6399369e+00 4.3088509e+00 4.5162248e+00 5.3501952e+00 3.2068686e+00 5.0370912e+00 4.5175965e+00 4.8468145e+00 3.8290678e+00 4.0170299e+00 4.2347722e+00 3.7112059e+00 3.8289870e+00 4.0283196e+00 4.2155430e+00 5.4550814e+00 5.6472442e+00 3.7067060e+00 4.4381718e+00 3.6121048e+00 5.4538018e+00 3.6205918e+00 4.4231444e+00 4.7408825e+00 3.5189484e+00 3.6135011e+00 4.3151171e+00 4.5490925e+00 4.8546834e+00 5.1966394e+00 4.3173279e+00 3.8129558e+00 4.3038600e+00 4.8962803e+00 4.3214431e+00 4.2123903e+00 3.5127694e+00 4.1469662e+00 4.3342207e+00 3.8751227e+00 3.8081328e+00 4.6262568e+00 4.4345823e+00 3.9485180e+00 3.7201522e+00 3.9257254e+00 4.1203153e+00 3.8072915e+00 3.4085233e-01 5.0517282e-01 4.1210927e-01 4.5847767e-01 4.1317535e-01 4.0243965e-01 3.1409605e+00 2.9078360e+00 3.3230621e+00 2.4077390e+00 3.0097979e+00 2.9003941e+00 3.1042002e+00 1.7227897e+00 3.0135090e+00 2.3017319e+00 1.9755996e+00 2.6019350e+00 2.4142039e+00 3.1015567e+00 2.0014192e+00 2.8264551e+00 2.9006366e+00 2.5011717e+00 2.9082658e+00 2.3033727e+00 3.2022157e+00 2.4051094e+00 3.3034165e+00 3.1014454e+00 2.7104802e+00 2.8188502e+00 3.2195779e+00 3.4112824e+00 2.9016560e+00 1.9053006e+00 2.2068965e+00 2.1085395e+00 2.3018945e+00 3.5009585e+00 2.9005880e+00 2.9020766e+00 3.1165913e+00 2.8092629e+00 2.5004154e+00 2.4029432e+00 2.8007533e+00 3.0017918e+00 2.4022355e+00 1.7369589e+00 2.6007937e+00 2.6003442e+00 2.6005344e+00 2.7044628e+00 1.4329832e+00 2.5008032e+00 4.4064393e+00 3.5021596e+00 4.3131639e+00 4.0016670e+00 4.2045223e+00 5.0200323e+00 2.9028411e+00 4.7130517e+00 4.2044861e+00 4.5172866e+00 3.5073617e+00 3.7038321e+00 3.9101623e+00 3.4039470e+00 3.5128093e+00 3.7092301e+00 3.9033549e+00 5.1227972e+00 5.3193887e+00 3.4029615e+00 4.1123595e+00 3.3040255e+00 5.1222477e+00 3.3043123e+00 4.1063328e+00 4.4139149e+00 3.2038047e+00 3.3025879e+00 4.0039164e+00 4.2170349e+00 4.5206139e+00 4.8441812e+00 4.0049702e+00 3.5022104e+00 4.0005674e+00 4.5418878e+00 4.0077003e+00 3.9024858e+00 3.2025221e+00 3.8146102e+00 4.0114630e+00 3.5261349e+00 3.5021596e+00 4.3081194e+00 4.1123594e+00 3.6158324e+00 3.4049076e+00 3.6064417e+00 3.8069566e+00 3.5014492e+00 8.0923926e-01 3.0474106e-01 6.5724028e-01 4.0246123e-01 5.6371422e-01 2.8500310e+00 2.6110761e+00 3.0277528e+00 2.1510447e+00 2.7141512e+00 2.6027937e+00 2.8070667e+00 1.5787180e+00 2.7167342e+00 2.0166118e+00 1.9318287e+00 2.3071806e+00 2.1715812e+00 2.8031215e+00 1.7145956e+00 2.5336675e+00 2.6035547e+00 2.2074446e+00 2.6319765e+00 2.0282636e+00 2.9076123e+00 2.1122780e+00 3.0080768e+00 2.8027924e+00 2.4144060e+00 2.5243969e+00 2.9241685e+00 3.1150226e+00 2.6049377e+00 1.6499744e+00 1.9530826e+00 1.8666228e+00 2.0130304e+00 3.2033374e+00 2.6035250e+00 2.6059866e+00 2.8207213e+00 2.5287261e+00 2.2032583e+00 2.1244184e+00 2.5066544e+00 2.7033232e+00 2.1157724e+00 1.6395342e+00 2.3072196e+00 2.3018945e+00 2.3035850e+00 2.4072314e+00 1.3788456e+00 2.2062031e+00 4.1150297e+00 3.2079717e+00 4.0170857e+00 3.7033716e+00 3.9093672e+00 4.7228082e+00 2.6158579e+00 4.4145659e+00 3.9067187e+00 4.2256164e+00 3.2143454e+00 3.4081069e+00 3.6159248e+00 3.1148584e+00 3.2358776e+00 3.4219579e+00 3.6052692e+00 4.8260874e+00 5.0225486e+00 3.1131211e+00 3.8201108e+00 3.0142352e+00 4.8248264e+00 3.0102191e+00 3.8104451e+00 4.1158426e+00 2.9100849e+00 3.0073054e+00 3.7087638e+00 3.9191144e+00 4.2237272e+00 4.5500784e+00 3.7114840e+00 3.2036323e+00 3.7015743e+00 4.2506870e+00 3.7186993e+00 3.6043148e+00 2.9081165e+00 3.5216379e+00 3.7226348e+00 3.2449757e+00 3.2079717e+00 4.0139105e+00 3.8249612e+00 3.3311289e+00 3.1134242e+00 3.3125194e+00 3.5179632e+00 3.2049019e+00 8.0051115e-01 2.2573593e-01 7.1621884e-01 3.0482299e-01 3.3530529e+00 3.1135637e+00 3.5317001e+00 2.6021962e+00 3.2157548e+00 3.1011640e+00 3.3083865e+00 1.9014069e+00 3.2199554e+00 2.5036852e+00 2.1054816e+00 2.8056557e+00 2.6057308e+00 3.3035252e+00 2.2049636e+00 3.0369970e+00 3.1023768e+00 2.7016498e+00 3.1076516e+00 2.5011992e+00 3.4059088e+00 2.6097152e+00 3.5056297e+00 3.3028597e+00 2.9166917e+00 3.0276459e+00 3.4273156e+00 3.6176286e+00 3.1043337e+00 2.1032882e+00 2.4011650e+00 2.3009622e+00 2.5032633e+00 3.7024058e+00 3.1022094e+00 3.1056121e+00 3.3243222e+00 3.0101957e+00 2.7018643e+00 2.6020065e+00 3.0005955e+00 3.2040843e+00 2.6027120e+00 1.9019962e+00 2.8015673e+00 2.8013346e+00 2.8018959e+00 2.9083044e+00 1.6052507e+00 2.7022567e+00 4.6121162e+00 3.7052383e+00 4.5194334e+00 4.2036849e+00 4.4088275e+00 5.2263180e+00 3.1053076e+00 4.9177739e+00 4.4072684e+00 4.7260117e+00 3.7138970e+00 3.9076272e+00 4.1167962e+00 3.6081590e+00 3.7238338e+00 3.9176170e+00 4.1063328e+00 5.3296625e+00 5.5255577e+00 3.6022190e+00 4.3201293e+00 3.5092121e+00 5.3285444e+00 3.5088064e+00 4.3111749e+00 4.6192198e+00 3.4084525e+00 3.5063337e+00 4.2079639e+00 4.4229098e+00 4.7273231e+00 5.0541259e+00 4.2099019e+00 3.7043105e+00 4.2011108e+00 4.7534769e+00 4.2147199e+00 4.1050714e+00 3.4064570e+00 4.0228303e+00 4.2200398e+00 3.7407842e+00 3.7052383e+00 4.5139612e+00 4.3214432e+00 3.8271242e+00 3.6094426e+00 3.8122429e+00 4.0138280e+00 3.7039439e+00 6.3178534e-01 2.0121983e-01 5.0043084e-01 3.1326249e+00 2.9095058e+00 3.3192760e+00 2.4306373e+00 3.0110559e+00 2.9028946e+00 3.1074604e+00 1.7872757e+00 3.0111989e+00 2.3143923e+00 2.0898615e+00 2.6089349e+00 2.4398792e+00 3.1033306e+00 2.0139907e+00 2.8220753e+00 2.9050462e+00 2.5045154e+00 2.9220490e+00 2.3159976e+00 3.2100379e+00 2.4095513e+00 3.3067017e+00 3.1022615e+00 2.7099669e+00 2.8165723e+00 3.2163877e+00 3.4125151e+00 2.9058641e+00 1.9245956e+00 2.2287984e+00 2.1344529e+00 2.3089795e+00 3.5039202e+00 2.9050287e+00 2.9078401e+00 3.1149135e+00 2.8183214e+00 2.5042875e+00 2.4160514e+00 2.8047612e+00 3.0036601e+00 2.4102273e+00 1.8223604e+00 2.6061038e+00 2.6023240e+00 2.6040822e+00 2.7058598e+00 1.5384093e+00 2.5058827e+00 4.4178532e+00 3.5098740e+00 4.3151587e+00 4.0041429e+00 4.2110099e+00 5.0184788e+00 2.9154880e+00 4.7114737e+00 4.2061525e+00 4.5248210e+00 3.5155767e+00 3.7089995e+00 3.9157422e+00 3.4167041e+00 3.5401921e+00 3.7249704e+00 3.9056466e+00 5.1213055e+00 5.3189433e+00 3.4098171e+00 4.1203252e+00 3.3172667e+00 5.1196436e+00 3.3110366e+00 4.1111102e+00 4.4124656e+00 3.2115770e+00 3.3091938e+00 4.0103680e+00 4.2141675e+00 4.5185015e+00 4.8383751e+00 4.0135080e+00 3.5035589e+00 4.0015001e+00 4.5406852e+00 4.0218666e+00 3.9049967e+00 3.2103515e+00 3.8201314e+00 4.0245707e+00 3.5427188e+00 3.5098740e+00 4.3149009e+00 4.1273075e+00 3.6322643e+00 3.4139979e+00 3.6137088e+00 3.8212216e+00 3.5066417e+00 7.1621748e-01 4.0002221e-01 3.3858048e+00 3.1257747e+00 3.5528332e+00 2.6049377e+00 3.2291581e+00 3.1026235e+00 3.3158954e+00 1.9043238e+00 3.2362541e+00 2.5062158e+00 2.1151984e+00 2.8111676e+00 2.6144115e+00 3.3074459e+00 2.2106051e+00 3.0644792e+00 3.1042002e+00 2.7046286e+00 3.1155579e+00 2.5035275e+00 3.4095576e+00 2.6210983e+00 3.5110555e+00 3.3064076e+00 2.9323802e+00 3.0497665e+00 3.4467650e+00 3.6304824e+00 3.1087162e+00 2.1099919e+00 2.4034951e+00 2.3034398e+00 2.5081992e+00 3.7045399e+00 3.1036554e+00 3.1105454e+00 3.3425812e+00 3.0208515e+00 2.7039990e+00 2.6042124e+00 3.0014077e+00 3.2086215e+00 2.6068616e+00 1.9064532e+00 2.8033825e+00 2.8033607e+00 2.8042643e+00 2.9174390e+00 1.6121856e+00 2.7050791e+00 4.6165570e+00 3.7079065e+00 4.5303834e+00 4.2064764e+00 4.4135512e+00 5.2388103e+00 3.1079784e+00 4.9275471e+00 4.4124760e+00 4.7382279e+00 3.7227931e+00 3.9129335e+00 4.1268500e+00 3.6117351e+00 3.7315577e+00 3.9257254e+00 4.1111068e+00 5.3430927e+00 5.5370582e+00 3.6046347e+00 4.3307527e+00 3.5130597e+00 5.3416790e+00 3.5154388e+00 4.3179254e+00 4.6302391e+00 3.4146546e+00 3.5107904e+00 4.2125004e+00 4.4361489e+00 4.7415152e+00 5.0768435e+00 4.2149814e+00 3.7084460e+00 4.2023583e+00 4.7769945e+00 4.2205945e+00 4.1089343e+00 3.4107328e+00 4.0362383e+00 4.2295174e+00 3.7618281e+00 3.7079065e+00 4.5213131e+00 4.3307527e+00 3.8409517e+00 3.6158715e+00 3.8200965e+00 4.0195882e+00 3.7063858e+00 4.1210927e-01 3.2157661e+00 3.0055754e+00 3.4095823e+00 2.5182899e+00 3.1060146e+00 3.0020171e+00 3.2051958e+00 1.8468437e+00 3.1049591e+00 2.4099079e+00 2.1180305e+00 2.7069308e+00 2.5226256e+00 3.2022186e+00 2.1097489e+00 2.9102819e+00 3.0041469e+00 2.6022650e+00 3.0136614e+00 2.4087525e+00 3.3085287e+00 2.5053901e+00 3.4040883e+00 3.2011770e+00 2.8045980e+00 2.9078384e+00 3.3077457e+00 3.5074140e+00 3.0043867e+00 2.0123013e+00 2.3159429e+00 2.2186309e+00 2.4051787e+00 3.6030023e+00 3.0041461e+00 3.0063027e+00 3.2075252e+00 2.9100849e+00 2.6032671e+00 2.5097006e+00 2.9028412e+00 3.1024718e+00 2.5058120e+00 1.8685011e+00 2.7040002e+00 2.7016556e+00 2.7029487e+00 2.8031364e+00 1.5747356e+00 2.6040007e+00 4.5158117e+00 3.6083256e+00 4.4100325e+00 4.1032589e+00 4.3091726e+00 5.1114855e+00 3.0117223e+00 4.8065772e+00 4.3039464e+00 4.6187506e+00 3.6121095e+00 3.8069169e+00 4.0114753e+00 3.5138377e+00 3.6350529e+00 3.8212252e+00 4.0040508e+00 5.2135438e+00 5.4123528e+00 3.5063866e+00 4.2155461e+00 3.4147661e+00 5.2119900e+00 3.4083227e+00 4.2084707e+00 4.5071259e+00 3.3090897e+00 3.4075560e+00 4.1085724e+00 4.3075896e+00 4.6108642e+00 4.9236635e+00 4.1113689e+00 3.6022523e+00 4.1009647e+00 4.6262707e+00 4.1190929e+00 4.0037820e+00 3.3086298e+00 3.9141023e+00 4.1202674e+00 3.6321858e+00 3.6083256e+00 4.4117993e+00 4.2229640e+00 3.7257625e+00 3.5107118e+00 3.7106642e+00 3.9184747e+00 3.6056703e+00 3.3320214e+00 3.1087156e+00 3.5191298e+00 2.6048201e+00 3.2097208e+00 3.1014199e+00 3.3064692e+00 1.9064149e+00 3.2109426e+00 2.5061784e+00 2.1231492e+00 2.8062729e+00 2.6052659e+00 3.3025806e+00 2.2069764e+00 3.0213628e+00 3.1034889e+00 2.7008785e+00 3.1069083e+00 2.5018386e+00 3.4076243e+00 2.6061038e+00 3.5039680e+00 3.3015400e+00 2.9090661e+00 3.0158419e+00 3.4158824e+00 3.6117739e+00 3.1042038e+00 2.1025721e+00 2.4028385e+00 2.3026344e+00 2.5028039e+00 3.7026090e+00 3.1034538e+00 3.1060427e+00 3.3145497e+00 3.0064351e+00 2.7026184e+00 2.6035547e+00 3.0010106e+00 3.2029877e+00 2.6024546e+00 1.9099608e+00 2.8022622e+00 2.8013859e+00 2.8022964e+00 2.9047883e+00 1.6144390e+00 2.7027522e+00 4.6146429e+00 3.7070840e+00 4.5144445e+00 4.2034836e+00 4.4092638e+00 5.2185417e+00 3.1080879e+00 4.9117250e+00 4.4052231e+00 4.7224998e+00 3.7130492e+00 3.9071930e+00 4.1140176e+00 3.6112039e+00 3.7307535e+00 3.9200662e+00 4.1050716e+00 5.3212806e+00 5.5187164e+00 3.6026912e+00 4.3179254e+00 3.5126721e+00 5.3198415e+00 3.5083463e+00 4.3098506e+00 4.6126508e+00 3.4087523e+00 3.5071392e+00 4.2084730e+00 4.4144909e+00 4.7184838e+00 5.0381916e+00 4.2109654e+00 3.7029588e+00 4.2008334e+00 4.7393168e+00 4.2176488e+00 4.1043916e+00 3.4078439e+00 4.0181863e+00 4.2205945e+00 3.7363783e+00 3.7070840e+00 4.5130589e+00 4.3227927e+00 3.8267268e+00 3.6097220e+00 3.8114954e+00 4.0168944e+00 3.7050916e+00 6.0017982e-01 2.0181667e-01 1.5160570e+00 5.2133802e-01 1.3002451e+00 7.0008584e-01 2.1344529e+00 4.1212852e-01 1.8029854e+00 2.0342311e+00 1.1019599e+00 1.1393620e+00 9.0026497e-01 1.4543172e+00 3.3813251e-01 1.4000061e+00 1.2053003e+00 1.0426638e+00 1.4134492e+00 1.1005364e+00 9.3424697e-01 7.8890806e-01 9.0142636e-01 6.1119558e-01 4.1315633e-01 4.0125062e-01 3.6452132e-01 1.0001753e+00 1.4158897e+00 1.5195166e+00 1.5300146e+00 1.2201636e+00 1.0039209e+00 1.6000032e+00 1.0000457e+00 3.0017653e-01 9.3329055e-01 1.4017696e+00 1.5061610e+00 1.5012947e+00 9.0002570e-01 1.2124837e+00 2.0445123e+00 1.4012283e+00 1.3008855e+00 1.3009222e+00 8.0291749e-01 2.0440118e+00 1.3027556e+00 1.3788457e+00 1.2029161e+00 1.2089253e+00 9.3446811e-01 1.1298636e+00 1.9014076e+00 2.1006232e+00 1.6001224e+00 1.1139906e+00 1.4544336e+00 6.3912709e-01 7.1183012e-01 8.5409862e-01 1.3086191e+00 1.2638465e+00 9.2745734e-01 8.1117067e-01 2.0027889e+00 2.2028146e+00 1.1270327e+00 1.0776190e+00 1.4019372e+00 2.0011307e+00 7.2044167e-01 1.0208709e+00 1.3002450e+00 8.0488008e-01 9.0145141e-01 9.4622126e-01 1.1000289e+00 1.4009513e+00 1.7086186e+00 9.7694377e-01 7.0911112e-01 1.0224133e+00 1.4220925e+00 1.0923537e+00 8.2631334e-01 1.0008620e+00 7.8886139e-01 1.0777307e+00 9.0140221e-01 1.2029161e+00 1.2362755e+00 1.1897289e+00 9.0534502e-01 7.9871893e-01 6.5724028e-01 9.8998705e-01 1.1010807e+00 5.2133179e-01 1.0171340e+00 4.0004442e-01 7.0470720e-01 2.0181667e-01 1.5698091e+00 3.0922892e-01 1.2049541e+00 1.5104875e+00 5.0476836e-01 1.0069214e+00 3.4085233e-01 9.6593231e-01 3.0026460e-01 8.0004443e-01 6.6334810e-01 1.0000152e+00 8.7372177e-01 5.0855077e-01 5.2524663e-01 7.0462697e-01 4.2362917e-01 3.0915245e-01 2.2608083e-01 4.5784410e-01 5.0517282e-01 4.1209001e-01 1.0313359e+00 9.9085945e-01 1.0179856e+00 6.9600743e-01 6.3912943e-01 1.0000152e+00 4.0125062e-01 3.0482299e-01 9.0002615e-01 8.0254500e-01 9.3735629e-01 9.1446938e-01 3.0490481e-01 6.9600743e-01 1.4994060e+00 8.0928056e-01 7.0184453e-01 7.0184453e-01 3.1328089e-01 1.5989637e+00 7.0918894e-01 1.5237054e+00 6.9987517e-01 1.4060443e+00 1.1002025e+00 1.3061181e+00 2.1141220e+00 1.5030978e+00 1.8055480e+00 1.3062025e+00 1.6223413e+00 6.3164977e-01 8.1112909e-01 1.0095513e+00 8.0713433e-01 9.2867113e-01 9.0155393e-01 1.0001753e+00 2.2182690e+00 2.4124980e+00 1.0039060e+00 1.2201577e+00 8.1343016e-01 2.2176846e+00 5.2491734e-01 1.2037520e+00 1.5066999e+00 4.2362917e-01 4.2362917e-01 1.1060937e+00 1.3130978e+00 1.6177611e+00 1.9760242e+00 1.1138953e+00 6.0948506e-01 1.1056693e+00 1.6779798e+00 1.1527746e+00 1.0001601e+00 4.2362917e-01 9.1892454e-01 1.1528477e+00 8.3183672e-01 6.9987517e-01 1.4094144e+00 1.2633467e+00 8.5440680e-01 7.2036951e-01 7.1629303e-01 9.6576136e-01 6.3322667e-01 1.4267554e+00 4.2268438e-01 1.2004262e+00 6.0035621e-01 2.0860325e+00 3.4342562e-01 1.7133162e+00 1.9641993e+00 1.0207260e+00 1.0897469e+00 8.0008964e-01 1.4650300e+00 5.0043084e-01 1.3002407e+00 1.1303267e+00 9.3424659e-01 1.3473688e+00 1.0001604e+00 9.6593231e-01 6.7616545e-01 8.0097499e-01 6.3192325e-01 5.0437695e-01 3.0026460e-01 2.2608083e-01 9.0142636e-01 1.4861824e+00 1.4580174e+00 1.4889602e+00 1.1900969e+00 9.0142681e-01 1.5001212e+00 9.0166476e-01 2.2538848e-01 8.3187290e-01 1.3130978e+00 1.4197078e+00 1.4012600e+00 8.0046764e-01 1.1544060e+00 2.0075255e+00 1.3063533e+00 1.2089835e+00 1.2089313e+00 7.4275547e-01 2.0887699e+00 1.2190319e+00 1.1935069e+00 1.1010807e+00 1.0087396e+00 7.4335736e-01 9.3424697e-01 1.7023957e+00 2.0003507e+00 1.4002035e+00 9.1449234e-01 1.2643523e+00 5.2167829e-01 5.5450500e-01 6.7616902e-01 1.2049541e+00 1.1528553e+00 8.1112984e-01 6.1119558e-01 1.8053679e+00 2.0034894e+00 1.0142484e+00 9.0155438e-01 1.3009222e+00 1.8029948e+00 6.1119267e-01 8.2425704e-01 1.1002025e+00 7.0176271e-01 8.0046685e-01 7.5564478e-01 9.0026588e-01 1.2017042e+00 1.5269837e+00 7.9871893e-01 6.0201716e-01 8.6051471e-01 1.2366099e+00 9.4532171e-01 6.3309012e-01 9.0026588e-01 6.3164729e-01 9.3308853e-01 8.0004443e-01 1.1010807e+00 1.0426516e+00 1.0426760e+00 8.0051115e-01 6.8161057e-01 5.2491734e-01 8.6084272e-01 1.0001753e+00 1.0116865e+00 5.6370994e-01 1.0599087e+00 7.4329527e-01 1.1110092e+00 4.1212852e-01 5.6838732e-01 7.0478886e-01 5.0436965e-01 7.7598796e-01 6.0948506e-01 1.2192920e+00 7.1629303e-01 4.2270142e-01 7.1629303e-01 2.2608083e-01 9.7033357e-01 6.3164729e-01 9.6576136e-01 7.5503094e-01 9.1446896e-01 1.1138955e+00 1.3139296e+00 1.2705641e+00 6.5724028e-01 5.0894102e-01 2.2573593e-01 3.3813251e-01 4.1212852e-01 1.1025819e+00 7.1629303e-01 1.1039833e+00 1.2272550e+00 8.0245746e-01 7.0000303e-01 2.0000000e-01 4.1210927e-01 7.7598796e-01 3.3813251e-01 7.1700774e-01 4.0125062e-01 7.0017011e-01 6.0035305e-01 7.4329414e-01 1.0008768e+00 5.0043084e-01 2.0249458e+00 1.1061923e+00 2.0081838e+00 1.6061519e+00 1.8167511e+00 2.7171724e+00 6.3925756e-01 2.3875202e+00 1.8286180e+00 2.2239101e+00 1.2353587e+00 1.3278587e+00 1.6038059e+00 1.0207533e+00 1.2407946e+00 1.3868868e+00 1.5269837e+00 2.8396169e+00 2.9941208e+00 1.0030871e+00 1.8005082e+00 9.3733589e-01 2.8269381e+00 9.7033357e-01 1.7520952e+00 2.1193712e+00 8.6676847e-01 9.4912864e-01 1.6147493e+00 1.9768316e+00 2.2674825e+00 2.7199050e+00 1.6193612e+00 1.1298636e+00 1.6009488e+00 2.4288142e+00 1.6617386e+00 1.5199103e+00 8.6676847e-01 1.5881447e+00 1.6785116e+00 1.4886759e+00 1.1061923e+00 1.9457481e+00 1.7813291e+00 1.3946348e+00 1.0498347e+00 1.2771155e+00 1.4848797e+00 1.1157320e+00 8.0004523e-01 5.0043842e-01 1.6743483e+00 2.0121983e-01 1.3061139e+00 1.5464046e+00 6.0964597e-01 7.1183012e-01 4.0006662e-01 1.0776296e+00 3.0922892e-01 9.0002570e-01 7.3084171e-01 6.0184622e-01 9.3446811e-01 6.1135434e-01 6.0964597e-01 3.4080442e-01 4.1210927e-01 3.0490481e-01 2.2608083e-01 3.0482299e-01 4.0363334e-01 5.0001522e-01 1.1298636e+00 1.0440350e+00 1.0803561e+00 7.8935898e-01 5.6347978e-01 1.1000098e+00 6.3165225e-01 3.0482299e-01 5.0126466e-01 9.0511169e-01 1.0088926e+00 1.0001903e+00 4.0125062e-01 7.4335736e-01 1.5972311e+00 9.0142681e-01 8.0296037e-01 8.0250202e-01 3.4085233e-01 1.7081446e+00 8.0883841e-01 1.4329858e+00 7.2036951e-01 1.3050153e+00 1.0001753e+00 1.2089252e+00 2.0109333e+00 1.6000184e+00 1.7036944e+00 1.2001396e+00 1.5327217e+00 5.7608844e-01 7.0462844e-01 9.1449234e-01 8.1156529e-01 9.3733552e-01 8.5583415e-01 9.0029018e-01 2.1191883e+00 2.3098756e+00 6.3912709e-01 1.1290757e+00 9.0532049e-01 2.1139617e+00 3.4085233e-01 1.1074834e+00 1.4044980e+00 3.4080442e-01 4.2362917e-01 1.0087250e+00 1.2089253e+00 1.5132032e+00 1.8748226e+00 1.0207260e+00 5.0042326e-01 1.0008620e+00 1.5694554e+00 1.0837679e+00 9.0053003e-01 5.0517282e-01 8.2671175e-01 1.0777411e+00 8.1156529e-01 7.2036951e-01 1.3133662e+00 1.1910068e+00 8.2425704e-01 4.5847767e-01 6.3178534e-01 9.1590889e-01 6.3322667e-01 6.3322667e-01 1.2193537e+00 9.0000091e-01 6.3165225e-01 1.0599087e+00 3.1328089e-01 6.3451734e-01 4.0127250e-01 9.0000091e-01 1.0001604e+00 2.2573593e-01 4.1212852e-01 6.3178534e-01 6.0202028e-01 5.2524663e-01 5.2132556e-01 6.1135434e-01 4.0125062e-01 7.0008584e-01 9.0002615e-01 1.1001014e+00 1.0039209e+00 3.0482299e-01 1.0001751e+00 7.0478886e-01 8.0296037e-01 6.0000952e-01 6.0366256e-01 3.0915245e-01 6.0365948e-01 1.0001903e+00 6.3164977e-01 4.0125062e-01 5.0476836e-01 2.2608083e-01 4.0127250e-01 5.0043842e-01 1.2102248e+00 3.0017653e-01 3.0482299e-01 3.0008832e-01 5.0043084e-01 1.5012947e+00 4.0000000e-01 1.5653766e+00 6.7616902e-01 1.5829749e+00 1.1074742e+00 1.3373141e+00 2.2681751e+00 8.0291671e-01 1.9315820e+00 1.3458100e+00 1.7862938e+00 8.7372177e-01 8.7209348e-01 1.2093969e+00 7.1700774e-01 1.1055705e+00 1.0604287e+00 1.0451812e+00 2.3834499e+00 2.5286011e+00 6.3322667e-01 1.3903623e+00 7.0462697e-01 2.3796582e+00 6.3912943e-01 1.2792049e+00 1.6907308e+00 5.6595488e-01 5.3943256e-01 1.1400339e+00 1.5965952e+00 1.8639835e+00 2.3424496e+00 1.1635325e+00 6.7626502e-01 1.1005460e+00 2.0903382e+00 1.2459141e+00 1.0236548e+00 5.0894102e-01 1.2528590e+00 1.2949162e+00 1.2662318e+00 6.7616902e-01 1.4817248e+00 1.3908238e+00 1.1389163e+00 6.9600743e-01 8.9540816e-01 1.0858512e+00 6.3192325e-01 1.5890088e+00 4.2270142e-01 1.1330776e+00 1.5368468e+00 5.2491734e-01 1.1187430e+00 4.0243965e-01 1.1139906e+00 4.1420960e-01 7.0096858e-01 7.3895268e-01 1.1000100e+00 9.3861512e-01 4.0127250e-01 7.1708289e-01 8.0004523e-01 5.2167208e-01 4.5784410e-01 3.6452132e-01 5.6371422e-01 4.2270142e-01 4.1317535e-01 1.2159868e+00 1.0567817e+00 1.1138092e+00 8.3387677e-01 6.1119267e-01 9.0029064e-01 3.0482299e-01 4.0125062e-01 1.0003196e+00 7.4395693e-01 9.3459651e-01 8.5617086e-01 3.0922892e-01 8.0073117e-01 1.5493206e+00 7.5564478e-01 6.4049114e-01 6.4049114e-01 4.5784410e-01 1.7404389e+00 6.9600743e-01 1.3253457e+00 6.4049114e-01 1.2202193e+00 9.0142636e-01 1.1056785e+00 1.9348400e+00 1.4092540e+00 1.6176783e+00 1.1286101e+00 1.4350761e+00 4.5148429e-01 6.7720957e-01 8.1757693e-01 8.2671175e-01 8.1937731e-01 7.4263078e-01 8.0055465e-01 2.0418418e+00 2.2277117e+00 1.1002025e+00 1.0286508e+00 7.2044167e-01 2.0415798e+00 6.0035305e-01 1.0039060e+00 1.3253497e+00 5.0043842e-01 3.1328089e-01 9.0999313e-01 1.1528476e+00 1.4548293e+00 1.8637334e+00 9.1892454e-01 5.2133179e-01 9.3310976e-01 1.5801693e+00 9.6572569e-01 8.0008964e-01 3.4085233e-01 7.5508853e-01 9.6674360e-01 7.4618926e-01 6.4049114e-01 1.2101609e+00 1.0782105e+00 7.2113820e-01 8.0093081e-01 5.2524663e-01 7.8886139e-01 4.5847767e-01 1.7572657e+00 6.1288055e-01 4.0125062e-01 1.0858512e+00 1.1133984e+00 1.4858469e+00 7.1779518e-01 1.8187119e+00 1.2137020e+00 9.6591433e-01 1.4146346e+00 7.4263078e-01 1.5359852e+00 1.2093243e+00 1.7083042e+00 1.4853863e+00 1.5241361e+00 1.7234436e+00 1.9756319e+00 1.9771636e+00 1.3035495e+00 8.0008884e-01 6.3164977e-01 6.0948212e-01 9.1449234e-01 1.8179429e+00 1.2062153e+00 1.3486924e+00 1.8673780e+00 1.4543172e+00 8.7240114e-01 7.4329527e-01 1.1055892e+00 1.4158897e+00 9.3310976e-01 1.1269424e-01 9.3351278e-01 9.7600992e-01 9.6953662e-01 1.3458100e+00 3.0490481e-01 9.0320459e-01 2.7259033e+00 1.8109877e+00 2.7506971e+00 2.3226569e+00 2.5372438e+00 3.4590995e+00 1.2089192e+00 3.1281646e+00 2.5610212e+00 2.9500632e+00 1.9406671e+00 2.0633570e+00 2.3443688e+00 1.7168003e+00 1.8708330e+00 2.0857354e+00 2.2573821e+00 3.5725062e+00 3.7336869e+00 1.7229558e+00 2.5362836e+00 1.6198349e+00 3.5690915e+00 1.7116793e+00 2.4776152e+00 2.8599555e+00 1.6016303e+00 1.6534235e+00 2.3372717e+00 2.7159844e+00 3.0094888e+00 3.4430661e+00 2.3406025e+00 1.8663319e+00 2.3090404e+00 3.1586433e+00 2.3454995e+00 2.2408459e+00 1.5471213e+00 2.3192230e+00 2.4059451e+00 2.1751377e+00 1.8109877e+00 2.6757243e+00 2.4966678e+00 2.1111734e+00 1.7901165e+00 2.0121175e+00 2.1471399e+00 1.8133657e+00 1.4043036e+00 1.6388784e+00 7.0470867e-01 7.7652636e-01 5.0001522e-01 1.1269424e+00 2.2608083e-01 1.0000158e+00 8.0928056e-01 7.0470867e-01 1.0215068e+00 7.1708289e-01 6.3164977e-01 4.2362917e-01 5.0002283e-01 3.0474106e-01 2.0121983e-01 2.2608083e-01 4.5080200e-01 6.0017982e-01 1.1529284e+00 1.1298636e+00 1.1544060e+00 8.5406674e-01 6.3322667e-01 1.2000066e+00 6.3309258e-01 2.2608083e-01 6.0201716e-01 1.0030721e+00 1.1060937e+00 1.1001110e+00 5.0001522e-01 8.2458478e-01 1.6747799e+00 1.0008617e+00 9.0140221e-01 9.0140131e-01 4.1209001e-01 1.7511598e+00 9.0506299e-01 1.4854079e+00 8.3187290e-01 1.3139296e+00 1.0032293e+00 1.2362702e+00 2.0078120e+00 1.7001329e+00 1.7019430e+00 1.2016381e+00 1.5675442e+00 7.1700909e-01 7.4275547e-01 9.6574369e-01 9.3541878e-01 1.1298552e+00 1.0208844e+00 9.0506343e-01 2.1136134e+00 2.3085898e+00 7.4618926e-01 1.1897982e+00 1.0208709e+00 2.1090362e+00 5.0894102e-01 1.1286018e+00 1.4024091e+00 5.2167829e-01 5.6595908e-01 1.0426638e+00 1.2037520e+00 1.5079206e+00 1.8503663e+00 1.0776296e+00 5.0477564e-01 1.0032296e+00 1.5611241e+00 1.1910693e+00 9.0511169e-01 6.3178782e-01 9.0184172e-01 1.1896660e+00 1.0032443e+00 8.3187290e-01 1.3450688e+00 1.3020492e+00 1.0087252e+00 6.1990228e-01 7.4263078e-01 1.0458540e+00 7.3084171e-01 7.0918894e-01 7.0176271e-01 8.1112984e-01 9.6574336e-01 4.1317535e-01 1.5005626e+00 6.1119558e-01 6.0964597e-01 1.0116724e+00 4.1315633e-01 9.3848935e-01 9.0000136e-01 1.1896660e+00 9.6574369e-01 1.2003597e+00 1.4006465e+00 1.6096629e+00 1.5401713e+00 8.2421923e-01 5.3914287e-01 3.6259865e-01 4.2362917e-01 6.0017665e-01 1.2189701e+00 6.0202028e-01 8.7212232e-01 1.5067961e+00 1.1024820e+00 4.1317535e-01 3.0490481e-01 5.0477564e-01 9.3329017e-01 6.0018299e-01 6.1845783e-01 4.1210927e-01 5.0894102e-01 5.0477564e-01 1.0008620e+00 9.0029064e-01 5.0043842e-01 2.1169442e+00 1.2049539e+00 2.2014913e+00 1.7227908e+00 1.9366943e+00 2.8973091e+00 6.0383105e-01 2.5621105e+00 1.9756002e+00 2.3854144e+00 1.4163126e+00 1.4857201e+00 1.8044011e+00 1.1074834e+00 1.2661803e+00 1.4994060e+00 1.6741010e+00 3.0107625e+00 3.1586112e+00 1.1298552e+00 1.9796588e+00 1.0095370e+00 3.0090568e+00 1.1900276e+00 1.8963668e+00 2.3135911e+00 1.0782107e+00 1.0783219e+00 1.7384347e+00 2.2009981e+00 2.4793129e+00 2.9421182e+00 1.7402224e+00 1.3018103e+00 1.7072540e+00 2.6745468e+00 1.7367211e+00 1.6485141e+00 9.6691372e-01 1.8209763e+00 1.8293641e+00 1.7435092e+00 1.2049539e+00 2.0881329e+00 1.9090398e+00 1.6063540e+00 1.2407432e+00 1.4664722e+00 1.5386307e+00 1.2093243e+00 1.0943600e+00 1.0030868e+00 1.3272142e+00 9.1446938e-01 1.7295996e+00 1.1336109e+00 8.7209296e-01 1.2643054e+00 6.3912943e-01 1.4396100e+00 1.1299441e+00 1.5271597e+00 1.3148232e+00 1.4267817e+00 1.6268514e+00 1.8468437e+00 1.8305154e+00 1.1759988e+00 7.4262850e-01 5.2491734e-01 5.2167208e-01 8.5586571e-01 1.6206300e+00 1.1291536e+00 1.4631182e+00 1.7576822e+00 1.3254284e+00 1.0172489e+00 6.0605366e-01 9.1894698e-01 1.2951152e+00 8.3187290e-01 3.0474106e-01 8.1500329e-01 1.0396215e+00 9.6150595e-01 1.2528590e+00 5.6347978e-01 8.7240114e-01 2.5401214e+00 1.6166178e+00 2.5672376e+00 2.1256636e+00 2.3434890e+00 3.2706021e+00 1.0235120e+00 2.9379053e+00 2.3650373e+00 2.7819820e+00 1.7939428e+00 1.8718670e+00 2.1669217e+00 1.5269837e+00 1.7152309e+00 1.9257519e+00 2.0672316e+00 3.3962101e+00 3.5413320e+00 1.5241169e+00 2.3604685e+00 1.4422764e+00 3.3805191e+00 1.5352907e+00 2.2985560e+00 2.6785349e+00 1.4314424e+00 1.4886759e+00 2.1422340e+00 2.5406863e+00 2.8290490e+00 3.2861997e+00 2.1472832e+00 1.6782365e+00 2.1087015e+00 2.9939447e+00 2.1830772e+00 2.0525906e+00 1.3935744e+00 2.1564350e+00 2.2287876e+00 2.0426476e+00 1.6166178e+00 2.4889554e+00 2.3256006e+00 1.9561612e+00 1.6066555e+00 1.8386981e+00 2.0009986e+00 1.6313110e+00 8.0883916e-01 5.0043842e-01 6.0202028e-01 8.0004602e-01 3.3808272e-01 5.0437695e-01 8.0093081e-01 5.2838320e-01 6.0201716e-01 2.5399984e-01 7.2036819e-01 5.0517282e-01 5.0043842e-01 7.0008584e-01 9.1424701e-01 9.0157896e-01 3.0017653e-01 7.2044167e-01 6.2656178e-01 6.6334810e-01 3.6259865e-01 9.0026588e-01 5.0436235e-01 4.1212852e-01 8.0879701e-01 7.0478886e-01 3.0482299e-01 5.2201750e-01 4.5847767e-01 4.0125062e-01 4.1317535e-01 1.0363096e+00 3.4080442e-01 3.0474106e-01 2.2573593e-01 3.0490481e-01 1.2204839e+00 2.4195741e-01 1.8101835e+00 9.0166476e-01 1.7375279e+00 1.4002005e+00 1.6032003e+00 2.4532171e+00 1.0032443e+00 2.1331916e+00 1.6052507e+00 1.9422649e+00 9.1894698e-01 1.1025819e+00 1.3276411e+00 8.1719606e-01 1.0142626e+00 1.1298636e+00 1.3025621e+00 2.5612597e+00 2.7430487e+00 9.0155438e-01 1.5299064e+00 7.1708289e-01 2.5605373e+00 7.0633229e-01 1.5079428e+00 1.8443132e+00 6.0383105e-01 7.0096708e-01 1.4023806e+00 1.6740160e+00 1.9756002e+00 2.3801033e+00 1.4049093e+00 9.0142636e-01 1.4001717e+00 2.0898615e+00 1.4183606e+00 1.3009222e+00 6.0184622e-01 1.2661803e+00 1.4267527e+00 1.1084368e+00 9.0166476e-01 1.7108631e+00 1.5299252e+00 1.0782751e+00 8.1343016e-01 1.0116721e+00 1.2193537e+00 9.0026497e-01 7.9148746e-01 7.0993998e-01 9.3541878e-01 8.1937731e-01 5.0043084e-01 5.6370994e-01 4.1212852e-01 1.0782753e+00 6.0184622e-01 9.0557807e-01 7.4269314e-01 7.0633229e-01 8.2841920e-01 9.1695534e-01 1.0756891e+00 7.3084048e-01 5.2491131e-01 5.0085236e-01 5.0476836e-01 5.0085236e-01 1.1074740e+00 8.3916809e-01 1.2049539e+00 9.6501813e-01 4.2270142e-01 8.0291749e-01 5.0855077e-01 5.3943256e-01 8.2631334e-01 4.0243965e-01 1.0207260e+00 5.2524663e-01 8.0055465e-01 7.0184453e-01 7.0184453e-01 1.0777307e+00 6.0366256e-01 2.0696799e+00 1.1543334e+00 1.9286352e+00 1.6071727e+00 1.8312163e+00 2.6295459e+00 1.1153247e+00 2.3155068e+00 1.8040969e+00 2.1901871e+00 1.2562955e+00 1.3263639e+00 1.5518083e+00 1.1271226e+00 1.4557657e+00 1.4915559e+00 1.5136393e+00 2.7555974e+00 2.9267466e+00 1.0030718e+00 1.7744103e+00 1.0843333e+00 2.7324083e+00 9.6953662e-01 1.7456060e+00 2.0249458e+00 9.1568820e-01 1.0151258e+00 1.6309461e+00 1.8315348e+00 2.1358791e+00 2.5304748e+00 1.6492450e+00 1.1075720e+00 1.6001777e+00 2.2141198e+00 1.7441970e+00 1.5196090e+00 9.6683480e-01 1.4838225e+00 1.7167914e+00 1.4122282e+00 1.1543334e+00 1.9438463e+00 1.8374400e+00 1.4268530e+00 1.0778421e+00 1.2792049e+00 1.5857302e+00 1.1532421e+00 1.1019503e+00 6.0201716e-01 5.0043842e-01 6.1135434e-01 7.0008735e-01 8.1156529e-01 4.1317535e-01 7.0000303e-01 4.0246123e-01 2.0061436e-01 4.1210927e-01 5.0436965e-01 7.0000303e-01 6.0366256e-01 2.0121983e-01 1.2007726e+00 9.1916394e-01 1.0124729e+00 8.0055465e-01 4.0246123e-01 7.0008735e-01 5.0085236e-01 6.0017982e-01 6.0202028e-01 6.3165225e-01 7.4612830e-01 6.0383105e-01 1.1269424e-01 7.0184453e-01 1.4559030e+00 5.6371422e-01 5.2167829e-01 5.2133179e-01 4.0004442e-01 1.7133283e+00 6.0948800e-01 1.3743342e+00 5.2524663e-01 1.2702954e+00 9.0142636e-01 1.1286018e+00 1.9763960e+00 1.2004262e+00 1.6484371e+00 1.1066159e+00 1.5033966e+00 6.1990228e-01 6.3322667e-01 8.9538275e-01 6.1990228e-01 1.0010060e+00 9.1471442e-01 8.0488008e-01 2.0894199e+00 2.2581358e+00 7.0088627e-01 1.1085342e+00 6.3178782e-01 2.0855639e+00 4.0363334e-01 1.0293900e+00 1.3743657e+00 4.0006662e-01 4.0125062e-01 9.3329055e-01 1.2396422e+00 1.5267540e+00 1.9798779e+00 9.6591465e-01 4.0127250e-01 9.0026497e-01 1.7151603e+00 1.0797805e+00 8.0296037e-01 4.0006662e-01 8.9540816e-01 1.0837679e+00 9.6674360e-01 5.2524663e-01 1.2440789e+00 1.1938630e+00 9.1892454e-01 5.2524663e-01 6.3912943e-01 9.3733589e-01 4.5148429e-01 1.1281352e+00 9.0002570e-01 5.0517282e-01 9.4513210e-01 4.1315633e-01 1.2014191e+00 5.2133179e-01 1.3063533e+00 1.1019505e+00 8.5403370e-01 1.0426516e+00 1.3523310e+00 1.4544312e+00 9.0142636e-01 3.3818226e-01 5.0085236e-01 5.0437695e-01 3.0922892e-01 1.5001461e+00 9.0005094e-01 9.0668287e-01 1.2396475e+00 8.7209296e-01 5.0000761e-01 4.5078948e-01 8.0046764e-01 1.0030724e+00 4.1317535e-01 6.7824250e-01 6.0017665e-01 6.0000952e-01 6.0000317e-01 7.4262850e-01 6.3925756e-01 5.0001522e-01 2.4077059e+00 1.5012741e+00 2.3329496e+00 2.0008921e+00 2.2042323e+00 3.0476353e+00 9.3541878e-01 2.7309758e+00 2.2068463e+00 2.5373913e+00 1.5160787e+00 1.7043730e+00 1.9242109e+00 1.4044668e+00 1.5401330e+00 1.7168122e+00 1.9044144e+00 3.1543192e+00 3.3406961e+00 1.4044697e+00 2.1265131e+00 1.3061138e+00 3.1536525e+00 1.3069754e+00 2.1097680e+00 2.4379736e+00 1.2049541e+00 1.3017511e+00 2.0033792e+00 2.2562563e+00 2.5606009e+00 2.9377608e+00 2.0050253e+00 1.5030978e+00 2.0001168e+00 2.6390071e+00 2.0114908e+00 1.9023062e+00 1.2016381e+00 1.8467998e+00 2.0209800e+00 1.6143467e+00 1.5012741e+00 2.3121231e+00 2.1220697e+00 1.6461460e+00 1.4062065e+00 1.6118728e+00 1.8108200e+00 1.5004645e+00 1.1000005e+00 9.0305330e-01 9.0506343e-01 1.1075720e+00 8.0488008e-01 6.1119558e-01 6.3912943e-01 6.0383105e-01 3.0490481e-01 1.1269424e-01 4.1210927e-01 6.0184622e-01 7.0008735e-01 1.0803561e+00 1.2125410e+00 1.2178626e+00 9.0645118e-01 7.9153339e-01 1.3000002e+00 7.0096858e-01 3.0008832e-01 8.0245824e-01 1.1001015e+00 1.2040344e+00 1.2012928e+00 6.0017982e-01 9.0645118e-01 1.7262450e+00 1.1005460e+00 1.0000307e+00 1.0000307e+00 5.0043842e-01 1.7087610e+00 1.0003198e+00 1.6300950e+00 9.3848935e-01 1.5032156e+00 1.2007124e+00 1.4092540e+00 2.2026134e+00 1.8005395e+00 1.9004485e+00 1.4019342e+00 1.7231818e+00 7.4269314e-01 9.0668287e-01 1.1133897e+00 1.0251597e+00 1.0924484e+00 1.0143978e+00 1.1005460e+00 2.3044111e+00 2.5032992e+00 9.4511250e-01 1.3253497e+00 1.1075720e+00 2.3033192e+00 5.5450500e-01 1.3061180e+00 1.6004128e+00 5.4219811e-01 6.3912943e-01 1.2090477e+00 1.4006179e+00 1.7019555e+00 2.0185049e+00 1.2190878e+00 7.0548283e-01 1.2049539e+00 1.7202439e+00 1.2636227e+00 1.1006371e+00 7.0911112e-01 1.0207396e+00 1.2632946e+00 9.3308853e-01 9.3848935e-01 1.5130871e+00 1.3741498e+00 9.6572569e-01 6.9987517e-01 8.2421923e-01 1.0798806e+00 8.5583415e-01 5.2524663e-01 8.2418002e-01 6.3912709e-01 3.6452132e-01 5.6394820e-01 7.2036819e-01 5.0517282e-01 8.0008964e-01 1.0000005e+00 1.2000731e+00 1.1019597e+00 4.0002221e-01 1.0039063e+00 7.4612830e-01 8.3183672e-01 6.0383105e-01 6.1119558e-01 2.0000000e-01 4.5078948e-01 1.1000098e+00 7.8890806e-01 4.0122873e-01 5.6371422e-01 4.1212852e-01 5.0001522e-01 5.2524663e-01 1.2137020e+00 3.4080442e-01 3.3813251e-01 3.0490481e-01 6.0035621e-01 1.5010034e+00 4.0246123e-01 1.5265987e+00 6.1135434e-01 1.6395342e+00 1.1134850e+00 1.3309249e+00 2.3136797e+00 7.1629168e-01 1.9760038e+00 1.3748534e+00 1.8136626e+00 9.1894698e-01 9.0320459e-01 1.2661802e+00 6.0427481e-01 9.1427000e-01 9.6685270e-01 1.0777305e+00 2.4270428e+00 2.5626268e+00 8.1112909e-01 1.4228744e+00 5.2167208e-01 2.4261071e+00 7.0633229e-01 1.3043552e+00 1.7511131e+00 6.0383105e-01 5.2491131e-01 1.1330776e+00 1.6740160e+00 1.9314874e+00 2.4166999e+00 1.1400420e+00 7.4269200e-01 1.1024820e+00 2.1702438e+00 1.1639421e+00 1.0427822e+00 4.2268438e-01 1.3276412e+00 1.2710363e+00 1.3154933e+00 6.1135434e-01 1.4922566e+00 1.3466030e+00 1.1400339e+00 7.3461436e-01 9.3733552e-01 9.7694377e-01 6.0365948e-01 5.8750389e-01 2.4195741e-01 8.6051471e-01 3.3818226e-01 8.1719606e-01 6.0202028e-01 6.0219099e-01 8.0337471e-01 1.0214933e+00 1.0338224e+00 5.2201750e-01 6.0000635e-01 3.6259865e-01 4.2268438e-01 2.2538848e-01 1.0087393e+00 5.4219811e-01 7.4618926e-01 9.2019277e-01 5.2838320e-01 3.4080442e-01 3.4085233e-01 3.4085233e-01 5.2838320e-01 2.0121983e-01 9.0294373e-01 3.0482299e-01 3.0490481e-01 3.0490481e-01 4.1420960e-01 1.1133986e+00 3.0017653e-01 1.9760242e+00 1.0776188e+00 1.8598666e+00 1.5071120e+00 1.7384459e+00 2.5637810e+00 9.3426769e-01 2.2403929e+00 1.7108631e+00 2.0993246e+00 1.1405598e+00 1.2394690e+00 1.4816205e+00 1.0776296e+00 1.4324323e+00 1.4163126e+00 1.4134492e+00 2.6753615e+00 2.8540068e+00 9.1001664e-01 1.6986597e+00 1.0426638e+00 2.6697938e+00 9.0657539e-01 1.6396943e+00 1.9541963e+00 8.5583415e-01 9.0207914e-01 1.5412500e+00 1.7849153e+00 2.0880425e+00 2.4973149e+00 1.5650163e+00 1.0060994e+00 1.5001440e+00 2.2185588e+00 1.6410190e+00 1.4111252e+00 8.5440680e-01 1.4330979e+00 1.6474117e+00 1.4095656e+00 1.0776188e+00 1.8534896e+00 1.7579984e+00 1.3938438e+00 1.0171340e+00 1.1990152e+00 1.4686236e+00 1.0427822e+00 6.8261201e-01 1.0004792e+00 6.3178782e-01 4.1210927e-01 6.0202028e-01 7.0025283e-01 8.0245903e-01 6.7720957e-01 8.1719606e-01 7.0008432e-01 1.0069214e+00 7.9153339e-01 8.6054545e-01 6.4049114e-01 6.3178782e-01 9.0155393e-01 1.2000065e+00 9.0508712e-01 2.0181667e-01 8.2635069e-01 7.1708289e-01 7.0548283e-01 8.0000239e-01 5.4219811e-01 1.3530568e+00 6.3322667e-01 8.0967961e-01 7.1708289e-01 7.0016860e-01 1.5402579e+00 6.3925756e-01 1.5611241e+00 6.4620889e-01 1.4283663e+00 1.1134850e+00 1.3189663e+00 2.1346646e+00 1.3000497e+00 1.8186729e+00 1.3009674e+00 1.7335369e+00 1.0118233e+00 8.1117067e-01 1.0567664e+00 6.0605366e-01 9.2867113e-01 1.0782857e+00 1.0429127e+00 2.2917679e+00 2.4270713e+00 5.0042326e-01 1.2848760e+00 6.9987517e-01 2.2396581e+00 5.2491734e-01 1.3051737e+00 1.5457820e+00 6.0365948e-01 8.0291749e-01 1.1110184e+00 1.3561934e+00 1.6492450e+00 2.1212048e+00 1.1186586e+00 6.7616723e-01 1.1005365e+00 1.7574668e+00 1.3269962e+00 1.0777411e+00 8.0097499e-01 1.0411548e+00 1.1972915e+00 9.9911696e-01 6.4620889e-01 1.4422764e+00 1.3473056e+00 9.3861512e-01 5.2491734e-01 8.6084272e-01 1.2528048e+00 8.2498722e-01 9.6150595e-01 5.0477564e-01 1.0214931e+00 8.0923926e-01 8.0492246e-01 1.0062544e+00 1.2363856e+00 1.2438823e+00 6.2656178e-01 4.0006662e-01 1.2085435e-01 2.0181667e-01 2.2573593e-01 1.2016443e+00 6.3925756e-01 9.2019277e-01 1.1335345e+00 7.1636719e-01 5.0084481e-01 2.0121983e-01 5.0002283e-01 7.3155911e-01 2.0181667e-01 6.7626681e-01 3.0915245e-01 5.0437695e-01 4.1317535e-01 6.1845783e-01 9.0506254e-01 3.0922892e-01 2.1350025e+00 1.2189760e+00 2.0658767e+00 1.7034615e+00 1.9177947e+00 2.7775988e+00 7.7598704e-01 2.4534018e+00 1.9144928e+00 2.2857680e+00 1.2749306e+00 1.4182222e+00 1.6639408e+00 1.1527669e+00 1.4140789e+00 1.4954274e+00 1.6121856e+00 2.8913337e+00 3.0644792e+00 1.1011719e+00 1.8708183e+00 1.0777305e+00 2.8852099e+00 1.0396215e+00 1.8297117e+00 2.1701542e+00 9.4532171e-01 1.0262619e+00 1.7168122e+00 2.0059655e+00 2.3063931e+00 2.7230908e+00 1.7261949e+00 1.2093243e+00 1.7002548e+00 2.4331092e+00 1.7646791e+00 1.6080687e+00 9.3848935e-01 1.6152383e+00 1.7771159e+00 1.4971922e+00 1.2189760e+00 2.0349233e+00 1.8831259e+00 1.4665397e+00 1.1400339e+00 1.3492939e+00 1.5757399e+00 1.2102248e+00 8.1117067e-01 7.0548283e-01 6.0964891e-01 6.0605366e-01 7.0918894e-01 9.0279223e-01 8.0008964e-01 3.6259865e-01 1.3154973e+00 1.0604287e+00 1.1536694e+00 9.1892454e-01 5.0477564e-01 5.0894102e-01 3.0922892e-01 8.0046764e-01 9.0778124e-01 7.1708289e-01 8.6225026e-01 6.8685125e-01 4.0363334e-01 8.4536936e-01 1.5318139e+00 6.5832080e-01 6.7636452e-01 6.3322667e-01 5.6838732e-01 1.8053679e+00 7.2044167e-01 1.2092602e+00 5.0437695e-01 1.3018595e+00 8.0291671e-01 1.0095513e+00 1.9760044e+00 1.0208709e+00 1.6387179e+00 1.0597877e+00 1.4686236e+00 6.0201716e-01 6.0427481e-01 9.3331138e-01 7.0025283e-01 6.1119558e-01 6.0427175e-01 7.4269200e-01 2.0887699e+00 2.2281421e+00 1.0001753e+00 1.0797700e+00 4.1317535e-01 2.0885107e+00 5.2133179e-01 9.6591465e-01 1.4140457e+00 4.1209001e-01 2.2573593e-01 8.1156529e-01 1.3450340e+00 1.5966664e+00 2.0855643e+00 8.1343016e-01 4.6440171e-01 8.2635069e-01 1.8444686e+00 8.2635069e-01 7.1621748e-01 2.0061436e-01 1.0088783e+00 9.1566538e-01 1.0032296e+00 5.0437695e-01 1.1543257e+00 9.8997136e-01 8.1117067e-01 7.0470867e-01 6.0980961e-01 6.3322667e-01 3.0474106e-01 9.0031539e-01 7.0000151e-01 3.3813251e-01 5.2167829e-01 8.5403428e-01 1.0095513e+00 5.0043842e-01 5.2524663e-01 6.0980961e-01 6.1288055e-01 3.0026460e-01 1.1001015e+00 7.1636719e-01 6.3309258e-01 7.4335736e-01 5.2167208e-01 5.0043084e-01 6.0184309e-01 6.0964891e-01 6.0017982e-01 3.0482299e-01 1.1153247e+00 5.0043084e-01 4.0246123e-01 4.0125062e-01 3.0017653e-01 1.1270411e+00 4.0002221e-01 2.0175565e+00 1.1056693e+00 1.9099615e+00 1.6003257e+00 1.8055799e+00 2.6186105e+00 1.2017042e+00 2.3090806e+00 1.8007233e+00 2.1233216e+00 1.1144002e+00 1.3025622e+00 1.5097103e+00 1.0216374e+00 1.2396937e+00 1.3452695e+00 1.5005647e+00 2.7241212e+00 2.9166918e+00 1.0087396e+00 1.7168636e+00 9.3733552e-01 2.7221286e+00 9.0508756e-01 1.7046111e+00 2.0107590e+00 8.0879701e-01 9.0508712e-01 1.6049314e+00 1.8174378e+00 2.1221146e+00 2.4750525e+00 1.6096791e+00 1.1000193e+00 1.6000016e+00 2.1732693e+00 1.6308665e+00 1.5004872e+00 8.0883916e-01 1.4182493e+00 1.6308803e+00 1.2094550e+00 1.1056693e+00 1.9088565e+00 1.7377460e+00 1.2661852e+00 1.0088926e+00 1.2092662e+00 1.4340155e+00 1.1019692e+00 3.4342562e-01 6.0964891e-01 5.6595908e-01 5.0437695e-01 5.2167829e-01 4.5783248e-01 1.4023777e+00 1.1286018e+00 1.2201578e+00 1.0032443e+00 3.0922892e-01 9.0642679e-01 9.0166476e-01 6.0964597e-01 5.0084481e-01 8.6054545e-01 9.6574336e-01 8.0923926e-01 5.0477564e-01 9.0532093e-01 1.6742781e+00 7.8895472e-01 7.5564478e-01 7.4618926e-01 6.0964891e-01 1.9222003e+00 8.2462252e-01 1.2093908e+00 5.2201750e-01 1.0522594e+00 7.0548138e-01 9.3735629e-01 1.7578497e+00 1.4001717e+00 1.4326118e+00 9.0166431e-01 1.3681502e+00 7.1636719e-01 4.5148429e-01 7.1183012e-01 6.3164977e-01 9.0534502e-01 8.5583415e-01 6.3322667e-01 1.9047821e+00 2.0429861e+00 3.3813251e-01 9.4634218e-01 7.1700774e-01 1.8662975e+00 3.0474106e-01 9.1695534e-01 1.1636098e+00 3.3818226e-01 5.0476836e-01 7.4329527e-01 1.0171202e+00 1.3020942e+00 1.8013674e+00 7.8935898e-01 3.0474106e-01 7.0008735e-01 1.4927071e+00 1.0336860e+00 6.7720957e-01 5.0855778e-01 7.3895268e-01 9.4622126e-01 8.4540285e-01 5.2201750e-01 1.0621172e+00 1.0788651e+00 8.1156529e-01 4.0002221e-01 5.6618864e-01 9.6935134e-01 5.2524663e-01 4.1212852e-01 5.0517282e-01 7.0008584e-01 6.3322667e-01 3.0490481e-01 1.2003660e+00 9.1552373e-01 1.0095513e+00 8.0046685e-01 4.5080200e-01 7.0105084e-01 6.0964891e-01 6.0365948e-01 5.0477564e-01 6.3178782e-01 7.4329527e-01 6.0201716e-01 2.2573593e-01 7.0096708e-01 1.4548054e+00 5.6347978e-01 5.2167208e-01 5.2133802e-01 4.0006662e-01 1.7132643e+00 6.0948506e-01 1.4655221e+00 7.0548283e-01 1.2921474e+00 9.1424701e-01 1.1900342e+00 1.9791159e+00 1.2013591e+00 1.6491682e+00 1.1111057e+00 1.5689626e+00 8.0726668e-01 7.4329527e-01 9.8998705e-01 8.0337471e-01 1.2004198e+00 1.1061923e+00 8.2635069e-01 2.0953157e+00 2.2622460e+00 6.0366256e-01 1.2097311e+00 8.0883841e-01 2.0866883e+00 6.0035621e-01 1.0858512e+00 1.3762609e+00 6.0000635e-01 6.0035305e-01 1.0143975e+00 1.2399444e+00 1.5291965e+00 1.9842703e+00 1.0777305e+00 4.1315633e-01 9.0005048e-01 1.7303039e+00 1.2394744e+00 8.2498722e-01 6.0018299e-01 9.9013884e-01 1.2395260e+00 1.1286911e+00 7.0548283e-01 1.3081175e+00 1.3479052e+00 1.1074834e+00 7.0184453e-01 8.1117067e-01 1.1186499e+00 6.0980961e-01 2.0181667e-01 5.2133802e-01 7.0548283e-01 4.0243965e-01 8.5471446e-01 9.1001664e-01 9.1916394e-01 6.0964891e-01 8.0296037e-01 1.0000307e+00 5.2524663e-01 4.1420960e-01 6.0000635e-01 8.0004523e-01 9.0166431e-01 9.0026588e-01 3.3818226e-01 6.0366256e-01 1.4340438e+00 8.0004523e-01 7.0000454e-01 7.0000151e-01 2.0000000e-01 1.4651632e+00 7.0008584e-01 1.7369589e+00 8.4540285e-01 1.6071563e+00 1.3008770e+00 1.5130871e+00 2.3098753e+00 1.5002444e+00 2.0034559e+00 1.5005854e+00 1.8322392e+00 8.5437498e-01 1.0087393e+00 1.2192920e+00 8.4786353e-01 1.1330694e+00 1.1270325e+00 1.2012867e+00 2.4144060e+00 2.6097166e+00 7.9153339e-01 1.4330116e+00 8.7209348e-01 2.4119960e+00 6.3178782e-01 1.4094452e+00 1.7039341e+00 5.6371422e-01 6.3309258e-01 1.3130937e+00 1.5066999e+00 1.8106410e+00 2.1515742e+00 1.3253458e+00 8.0004602e-01 1.3000908e+00 1.8533295e+00 1.3748188e+00 1.2012928e+00 5.7609230e-01 1.1298636e+00 1.3741846e+00 1.0451812e+00 8.4540285e-01 1.6176927e+00 1.4854079e+00 1.0777307e+00 7.4612830e-01 9.3306807e-01 1.1910068e+00 8.1715665e-01 4.0243965e-01 6.0184622e-01 6.0000952e-01 1.0158274e+00 1.1111057e+00 1.1191444e+00 8.0928056e-01 7.4335736e-01 1.2000002e+00 6.0964891e-01 3.0026460e-01 7.0088477e-01 1.0001601e+00 1.1024820e+00 1.1005458e+00 5.0042326e-01 8.0492246e-01 1.6321742e+00 1.0001753e+00 9.0005048e-01 9.0002615e-01 4.0006662e-01 1.6390068e+00 9.0029064e-01 1.6300430e+00 8.6084272e-01 1.5035329e+00 1.2004200e+00 1.4092511e+00 2.2043907e+00 1.7002548e+00 1.9010379e+00 1.4007831e+00 1.7240342e+00 7.4269314e-01 9.0534502e-01 1.1133984e+00 9.3184922e-01 1.0597992e+00 1.0142766e+00 1.1005364e+00 2.3071806e+00 2.5048249e+00 8.4536936e-01 1.3253910e+00 1.0116865e+00 2.3056305e+00 5.2838320e-01 1.3061582e+00 1.6010223e+00 4.8391482e-01 5.7608844e-01 1.2089313e+00 1.4017695e+00 1.7039229e+00 2.0293124e+00 1.2189760e+00 7.0096858e-01 1.2016380e+00 1.7295384e+00 1.2636227e+00 1.1005460e+00 6.1830489e-01 1.0208709e+00 1.2632948e+00 9.3329055e-01 8.6084272e-01 1.5130912e+00 1.3741813e+00 9.6572569e-01 6.5832080e-01 8.2418071e-01 1.0788007e+00 7.9148662e-01 3.0922892e-01 8.0046764e-01 1.3743342e+00 1.3452695e+00 1.3745152e+00 1.0776296e+00 8.0051115e-01 1.4000349e+00 8.2462252e-01 3.0026460e-01 5.7609230e-01 1.2089253e+00 1.3131370e+00 1.3002493e+00 7.0016860e-01 1.0426760e+00 1.8951252e+00 1.2036864e+00 1.1055892e+00 1.1055707e+00 6.3165225e-01 1.9760099e+00 1.1133895e+00 1.3035495e+00 1.0032296e+00 1.1134939e+00 8.1112984e-01 1.0427944e+00 1.8040883e+00 1.9000220e+00 1.5005626e+00 1.0010060e+00 1.3844234e+00 6.1288055e-01 5.7609230e-01 7.8890721e-01 1.1056785e+00 1.1270325e+00 9.0778124e-01 7.0556260e-01 1.9141294e+00 2.1052841e+00 8.2421923e-01 1.0150395e+00 1.2036863e+00 1.9046783e+00 5.2133802e-01 9.3733589e-01 1.2010584e+00 6.0948212e-01 7.0470867e-01 8.5583357e-01 1.0008768e+00 1.3033860e+00 1.6469593e+00 9.0294373e-01 5.0436965e-01 8.5406616e-01 1.3485619e+00 1.0522594e+00 7.0993998e-01 8.0250123e-01 7.4329527e-01 1.0427822e+00 9.0053003e-01 1.0032296e+00 1.1531951e+00 1.1543259e+00 9.0142681e-01 5.6618864e-01 6.1135434e-01 9.3984267e-01 9.0168933e-01 7.1629303e-01 1.5266891e+00 1.3564850e+00 1.4198077e+00 1.1544060e+00 7.0088627e-01 1.3008812e+00 7.2036951e-01 3.0482299e-01 7.4954884e-01 1.1531951e+00 1.2645755e+00 1.2053003e+00 6.1119267e-01 1.0803561e+00 1.9177201e+00 1.1286911e+00 1.0451689e+00 1.0433444e+00 7.2036951e-01 2.0856547e+00 1.0782211e+00 1.0434746e+00 9.0029064e-01 9.0279223e-01 6.0948800e-01 8.0883841e-01 1.6097492e+00 1.8003682e+00 1.3025173e+00 8.0879701e-01 1.1347620e+00 3.0922892e-01 3.6452132e-01 5.2133179e-01 1.0032293e+00 9.3308891e-01 6.0383105e-01 5.0043084e-01 1.7170314e+00 1.9082779e+00 8.5406616e-01 7.4275547e-01 1.1001110e+00 1.7132654e+00 4.1212852e-01 7.0548138e-01 1.0030871e+00 5.0085236e-01 6.0000635e-01 6.1135434e-01 8.0879701e-01 1.1134075e+00 1.4922778e+00 6.3322667e-01 4.0246123e-01 6.8261201e-01 1.1935004e+00 7.4954884e-01 5.0437695e-01 7.0008584e-01 4.5148429e-01 7.4262964e-01 6.0018299e-01 9.0029064e-01 9.1424701e-01 8.5437440e-01 6.0017665e-01 5.2167208e-01 3.0915245e-01 6.4620889e-01 8.0000160e-01 1.0033867e+00 7.3461436e-01 8.2512420e-01 6.0219099e-01 6.0017982e-01 6.0000317e-01 5.0000761e-01 7.0016860e-01 6.0202028e-01 4.5148429e-01 5.7630313e-01 5.0855778e-01 1.2699992e-01 5.0894102e-01 1.2671752e+00 4.1420960e-01 3.6259865e-01 3.4080442e-01 2.4170870e-01 1.5133193e+00 4.1317535e-01 1.5238388e+00 6.0980961e-01 1.4557632e+00 1.1002023e+00 1.3069713e+00 2.1693127e+00 1.1005458e+00 1.8443124e+00 1.3063934e+00 1.6655594e+00 6.5832080e-01 8.0492246e-01 1.0498228e+00 5.7832449e-01 9.1424701e-01 9.0320459e-01 1.0032296e+00 2.2802814e+00 2.4537354e+00 7.1621613e-01 1.2528590e+00 5.3914287e-01 2.2781292e+00 4.2362917e-01 1.2128138e+00 1.5640141e+00 3.4085233e-01 4.1212852e-01 1.1060939e+00 1.4140458e+00 1.7081323e+00 2.1436849e+00 1.1138955e+00 6.0184622e-01 1.1001015e+00 1.8659091e+00 1.1544060e+00 1.0010209e+00 3.3813251e-01 1.0224270e+00 1.1635398e+00 9.7600992e-01 6.0980961e-01 1.4182493e+00 1.2705641e+00 8.9538275e-01 5.4219811e-01 7.3084171e-01 9.6936870e-01 6.0184934e-01 3.0922892e-01 2.4170870e-01 4.0127250e-01 1.6009488e+00 1.0040629e+00 1.0499492e+00 1.2653025e+00 9.1471442e-01 6.1119558e-01 5.0477564e-01 9.0005048e-01 1.1016049e+00 5.0043084e-01 7.0096708e-01 7.0088627e-01 7.0470720e-01 7.0176121e-01 8.0967961e-01 6.3165225e-01 6.0201716e-01 2.5221737e+00 1.6096629e+00 2.4221589e+00 2.1015969e+00 2.3098905e+00 3.1317714e+00 1.0597879e+00 2.8188299e+00 2.3040148e+00 2.6373482e+00 1.6231306e+00 1.8068049e+00 2.0210084e+00 1.5237053e+00 1.7080686e+00 1.8459262e+00 2.0034094e+00 3.2387184e+00 3.4286399e+00 1.5005854e+00 2.2285172e+00 1.4324350e+00 3.2358000e+00 1.4109628e+00 2.2110849e+00 2.5224740e+00 1.3139336e+00 1.4095777e+00 2.1090366e+00 2.3323064e+00 2.6377472e+00 2.9966835e+00 2.1144760e+00 1.6012568e+00 2.1000482e+00 2.6968519e+00 2.1346646e+00 2.0025815e+00 1.3133662e+00 1.9351024e+00 2.1377869e+00 1.7137965e+00 1.6096629e+00 2.4161682e+00 2.2434416e+00 1.7684500e+00 1.5143051e+00 1.7168636e+00 1.9368172e+00 1.6050040e+00 1.1269424e-01 3.3818226e-01 1.3017961e+00 7.4612830e-01 1.0262619e+00 1.2443200e+00 8.2421923e-01 6.0202028e-01 2.2573593e-01 6.0017982e-01 8.4572653e-01 3.0922892e-01 5.6347978e-01 4.1317535e-01 6.0964891e-01 5.2201750e-01 7.3090905e-01 8.0245824e-01 4.1420960e-01 2.2297880e+00 1.3131802e+00 2.1734835e+00 1.8042696e+00 2.0169191e+00 2.8857511e+00 7.7598796e-01 2.5607759e+00 2.0181988e+00 2.3909895e+00 1.3770846e+00 1.5195166e+00 1.7689720e+00 1.2362756e+00 1.4651863e+00 1.5800353e+00 1.7155605e+00 3.0010211e+00 3.1712557e+00 1.2016443e+00 1.9735224e+00 1.1531953e+00 2.9937162e+00 1.1401191e+00 1.9335528e+00 2.2793947e+00 1.0403116e+00 1.1237940e+00 1.8155572e+00 2.1170640e+00 2.4166673e+00 2.8369211e+00 1.8227568e+00 1.3135522e+00 1.8005404e+00 2.5443612e+00 1.8557670e+00 1.7105814e+00 1.0313359e+00 1.7226330e+00 1.8708183e+00 1.5881025e+00 1.3131802e+00 2.1363271e+00 1.9752912e+00 1.5530527e+00 1.2366099e+00 1.4501583e+00 1.6655594e+00 1.3088083e+00 3.4342562e-01 1.4024091e+00 8.3183672e-01 1.0522594e+00 1.2712749e+00 8.5437498e-01 6.1119558e-01 3.3813251e-01 7.0016860e-01 9.2867113e-01 3.4342562e-01 5.2133179e-01 5.0855778e-01 6.3192325e-01 5.6618864e-01 7.5564478e-01 7.0462844e-01 4.5847767e-01 2.3345511e+00 1.4181033e+00 2.2624227e+00 1.9044571e+00 2.1188381e+00 2.9740725e+00 8.7209348e-01 2.6511588e+00 2.1151751e+00 2.4832720e+00 1.4692287e+00 1.6190709e+00 1.8603115e+00 1.3450304e+00 1.5778323e+00 1.6856949e+00 1.8133657e+00 3.0879393e+00 3.2627356e+00 1.3017553e+00 2.0678448e+00 1.2635708e+00 3.0810441e+00 1.2366675e+00 2.0307764e+00 2.3657459e+00 1.1404856e+00 1.2257611e+00 1.9176961e+00 2.1957105e+00 2.4980314e+00 2.9055191e+00 1.9262438e+00 1.4100098e+00 1.9004485e+00 2.6116431e+00 1.9609333e+00 1.8095574e+00 1.1347620e+00 1.8037909e+00 1.9725464e+00 1.6581521e+00 1.4181033e+00 2.2355461e+00 2.0784269e+00 1.6462102e+00 1.3373104e+00 1.5468618e+00 1.7698041e+00 1.4111252e+00 1.2003596e+00 6.1288055e-01 7.4618926e-01 9.6691372e-01 5.7609230e-01 3.0922892e-01 3.0490481e-01 5.0436965e-01 7.0184453e-01 1.1269424e-01 8.2635069e-01 3.0482299e-01 3.3813251e-01 3.0490481e-01 4.5148429e-01 9.3308891e-01 2.0181667e-01 2.1221982e+00 1.2089191e+00 2.0305682e+00 1.7009400e+00 1.9088256e+00 2.7434081e+00 9.1894698e-01 2.4265154e+00 1.9046790e+00 2.2453370e+00 1.2284047e+00 1.4060413e+00 1.6267848e+00 1.1281352e+00 1.3523310e+00 1.4562730e+00 1.6032169e+00 2.8519346e+00 3.0369970e+00 1.1020600e+00 1.8342569e+00 1.0426638e+00 2.8491513e+00 1.0116721e+00 1.8114932e+00 2.1335035e+00 9.1552373e-01 1.0090312e+00 1.7079369e+00 1.9522117e+00 2.2566913e+00 2.6406601e+00 1.7139238e+00 1.2013529e+00 1.7000137e+00 2.3442222e+00 1.7386523e+00 1.6019500e+00 9.1449234e-01 1.5517970e+00 1.7435092e+00 1.3757183e+00 1.2089191e+00 2.0167186e+00 1.8496945e+00 1.3938438e+00 1.1152390e+00 1.3189663e+00 1.5429659e+00 1.2037520e+00 6.7720957e-01 7.4262850e-01 7.0911112e-01 7.0633229e-01 1.0011648e+00 1.1020600e+00 7.2036951e-01 5.0477564e-01 1.1005460e+00 1.8106900e+00 9.0166431e-01 9.0192695e-01 9.0055475e-01 8.0055465e-01 2.1027376e+00 1.0003198e+00 1.0225570e+00 3.0474106e-01 1.1299441e+00 5.0517282e-01 7.5564478e-01 1.7513222e+00 1.1055799e+00 1.4140515e+00 7.8895472e-01 1.3181953e+00 5.7608844e-01 4.1315633e-01 8.1156529e-01 4.1317535e-01 8.0004523e-01 7.2044167e-01 5.2524663e-01 1.8787830e+00 1.9768256e+00 5.0001522e-01 9.4912864e-01 4.5148429e-01 1.8635775e+00 3.0915245e-01 7.8611860e-01 1.2373911e+00 3.0922892e-01 3.0922892e-01 5.7609230e-01 1.2089834e+00 1.4324608e+00 1.9471600e+00 6.3912943e-01 3.0017653e-01 5.0043842e-01 1.7149040e+00 8.6084272e-01 4.8391482e-01 3.4080442e-01 9.0668287e-01 8.6225026e-01 9.3424659e-01 3.0474106e-01 9.3861512e-01 9.5646231e-01 7.8935898e-01 3.4085233e-01 5.2491734e-01 7.8940551e-01 3.0482299e-01 6.0948506e-01 1.3000044e+00 9.3308891e-01 4.0243965e-01 5.6371422e-01 4.1212852e-01 7.0000303e-01 5.4219811e-01 1.2105001e+00 3.4342562e-01 3.6256305e-01 3.4085233e-01 8.0008964e-01 1.5005854e+00 4.1420960e-01 1.5358856e+00 6.1990228e-01 1.7849054e+00 1.1528477e+00 1.3788456e+00 2.4261894e+00 5.6370994e-01 2.0884901e+00 1.4655452e+00 1.9390732e+00 1.1074834e+00 1.0434746e+00 1.4340155e+00 6.0605366e-01 9.1554656e-01 1.0782857e+00 1.1897288e+00 2.5394068e+00 2.6516318e+00 8.3183606e-01 1.5694554e+00 5.2201750e-01 2.5386539e+00 9.0192695e-01 1.4157600e+00 1.8949500e+00 8.0097499e-01 7.0548138e-01 1.1935069e+00 1.8443040e+00 2.0853276e+00 2.5816887e+00 1.1989547e+00 9.1424659e-01 1.1138955e+00 2.3468430e+00 1.1963432e+00 1.1270327e+00 6.0365948e-01 1.5143051e+00 1.3938114e+00 1.5079206e+00 6.1990228e-01 1.5829749e+00 1.4450801e+00 1.3189240e+00 9.1132198e-01 1.1152300e+00 1.0159134e+00 6.3309012e-01 7.0096858e-01 1.1002025e+00 4.8852375e-01 9.1024401e-01 8.1112984e-01 4.0127250e-01 8.1117067e-01 1.3486924e+00 7.0633229e-01 4.6440171e-01 5.1257987e-01 5.0517282e-01 1.5260594e+00 6.1288055e-01 1.5131090e+00 7.4335736e-01 1.4549432e+00 1.1020600e+00 1.3036236e+00 2.1691920e+00 1.1527669e+00 1.8444686e+00 1.3309288e+00 1.6567564e+00 6.3925756e-01 8.5617086e-01 1.0458540e+00 9.0668287e-01 8.4540285e-01 8.5586571e-01 1.0039209e+00 2.2782572e+00 2.4540263e+00 1.2012866e+00 1.2440282e+00 6.2656178e-01 2.2782572e+00 7.0556260e-01 1.2101609e+00 1.5639802e+00 6.0219099e-01 4.5148429e-01 1.1079931e+00 1.4142064e+00 1.7087610e+00 2.1412345e+00 1.1115204e+00 6.7720957e-01 1.1281352e+00 1.8646736e+00 1.1282162e+00 1.0010209e+00 4.1315633e-01 1.0172673e+00 1.1401191e+00 9.4532171e-01 7.4335736e-01 1.4134218e+00 1.2440229e+00 8.4786353e-01 9.0557807e-01 7.2440846e-01 9.3308853e-01 6.0964891e-01 8.0296037e-01 1.1055799e+00 1.2124837e+00 1.2014191e+00 6.0000952e-01 9.3755356e-01 1.7874653e+00 1.1024913e+00 1.0032296e+00 1.0031018e+00 5.2201750e-01 1.8640262e+00 1.0088926e+00 1.3452347e+00 9.0417295e-01 1.2040344e+00 9.0168933e-01 1.1133986e+00 1.9046783e+00 1.8005318e+00 1.6009504e+00 1.1056691e+00 1.4335330e+00 5.2167829e-01 6.1990228e-01 8.2418141e-01 1.0118233e+00 1.0151880e+00 8.2458478e-01 8.0051115e-01 2.0076819e+00 2.2050331e+00 9.3329017e-01 1.0426638e+00 1.1020600e+00 2.0062587e+00 4.5847767e-01 1.0087393e+00 1.3009222e+00 5.0855778e-01 6.0202028e-01 9.1471442e-01 1.1019505e+00 1.4044980e+00 1.7386523e+00 9.3351278e-01 4.5783248e-01 9.1892454e-01 1.4407364e+00 1.0151880e+00 8.0093081e-01 7.0088627e-01 7.4269200e-01 1.0142482e+00 8.0250123e-01 9.0417295e-01 1.2189645e+00 1.1269510e+00 8.0879701e-01 6.1990228e-01 5.6371422e-01 8.6084272e-01 8.0291749e-01 7.8935813e-01 8.0250123e-01 8.0046685e-01 7.0017011e-01 5.2491734e-01 1.3741813e+00 7.0470720e-01 7.4269314e-01 6.7626502e-01 6.0000635e-01 1.4852616e+00 6.3309012e-01 1.6636721e+00 7.5826453e-01 1.5161847e+00 1.2049539e+00 1.4220925e+00 2.2191056e+00 1.4001717e+00 1.9083789e+00 1.4007861e+00 1.7945122e+00 9.6133119e-01 9.1552373e-01 1.1416778e+00 7.7603846e-01 1.1170561e+00 1.1351073e+00 1.1152390e+00 2.3540839e+00 2.5167781e+00 6.0202028e-01 1.3687074e+00 8.0713433e-01 2.3222107e+00 5.7608844e-01 1.3563898e+00 1.6193612e+00 5.7609230e-01 7.3090905e-01 1.2201578e+00 1.4221192e+00 1.7236083e+00 2.1360791e+00 1.2373857e+00 7.1629168e-01 1.2000731e+00 1.7962160e+00 1.3755348e+00 1.1298552e+00 7.2113820e-01 1.0843962e+00 1.3150470e+00 1.0664292e+00 7.5826453e-01 1.5362595e+00 1.4451976e+00 1.0604287e+00 6.7626502e-01 8.9540816e-01 1.2552585e+00 8.0073117e-01 5.0001522e-01 4.1212852e-01 5.6347549e-01 4.0127250e-01 8.7240114e-01 3.0008832e-01 1.2085435e-01 1.2085435e-01 6.0017982e-01 1.1038933e+00 2.0061436e-01 1.9231154e+00 1.0088926e+00 1.8971338e+00 1.5035330e+00 1.7143629e+00 2.6071033e+00 7.2440846e-01 2.2781292e+00 1.7231818e+00 2.0998984e+00 1.0923537e+00 1.2224463e+00 1.4922544e+00 9.3733589e-01 1.1896725e+00 1.2782589e+00 1.4186216e+00 2.7177641e+00 2.8857013e+00 9.6674360e-01 1.6882618e+00 8.5406616e-01 2.7167827e+00 8.6084272e-01 1.6345263e+00 2.0058565e+00 7.5508853e-01 8.1715593e-01 1.5132180e+00 1.8635428e+00 2.1554613e+00 2.5926811e+00 1.5194972e+00 1.0207533e+00 1.5005626e+00 2.3165875e+00 1.5429659e+00 1.4098467e+00 7.2036819e-01 1.4724918e+00 1.5757929e+00 1.3838027e+00 1.0088926e+00 1.8378491e+00 1.6745686e+00 1.2948699e+00 9.4912864e-01 1.1635325e+00 1.3473688e+00 1.0032293e+00 4.0004442e-01 6.9509552e-01 3.0017653e-01 7.1708289e-01 2.2573593e-01 5.0085236e-01 4.0243965e-01 7.0548138e-01 1.0008617e+00 3.0482299e-01 2.0206913e+00 1.1056785e+00 2.0075255e+00 1.6053217e+00 1.8156855e+00 2.7170183e+00 6.3912709e-01 2.3873965e+00 1.8286172e+00 2.2132187e+00 1.2079042e+00 1.3276450e+00 1.6018644e+00 1.0207396e+00 1.2397507e+00 1.3715471e+00 1.5245240e+00 2.8327619e+00 2.9941199e+00 1.0032443e+00 1.7962160e+00 9.3329055e-01 2.8269181e+00 9.6936870e-01 1.7435156e+00 2.1174156e+00 8.6084272e-01 9.2351241e-01 1.6144550e+00 1.9761215e+00 2.2674248e+00 2.7114616e+00 1.6190709e+00 1.1282247e+00 1.6009322e+00 2.4285500e+00 1.6432478e+00 1.5147271e+00 8.2512420e-01 1.5839544e+00 1.6753044e+00 1.4829434e+00 1.1056785e+00 1.9427965e+00 1.7734131e+00 1.3908238e+00 1.0498226e+00 1.2712749e+00 1.4523130e+00 1.1044111e+00 6.0980961e-01 4.1209001e-01 1.1020600e+00 2.0181667e-01 4.0243965e-01 3.0922892e-01 7.0088627e-01 1.4001688e+00 3.0922892e-01 1.6797901e+00 7.8935898e-01 1.7574606e+00 1.2224463e+00 1.4618181e+00 2.4274025e+00 6.3165225e-01 2.0887499e+00 1.4865883e+00 1.9561612e+00 1.0664292e+00 1.0336863e+00 1.3939836e+00 8.2421923e-01 1.2089895e+00 1.1997296e+00 1.1938630e+00 2.5462062e+00 2.6763758e+00 6.4049114e-01 1.5645185e+00 8.0883916e-01 2.5391583e+00 8.3183672e-01 1.4351453e+00 1.8644317e+00 7.4618926e-01 6.9987517e-01 1.2680580e+00 1.7844580e+00 2.0440071e+00 2.5329067e+00 1.2921474e+00 8.5440680e-01 1.2036924e+00 2.2838099e+00 1.3737025e+00 1.1587585e+00 6.4620889e-01 1.4491800e+00 1.4507713e+00 1.4583848e+00 7.8935898e-01 1.6277433e+00 1.5384791e+00 1.3150470e+00 8.7209348e-01 1.0788651e+00 1.2179890e+00 7.4954884e-01 6.1135434e-01 1.3790270e+00 5.2491734e-01 4.5147187e-01 4.5080200e-01 3.0026460e-01 1.6179159e+00 5.2167829e-01 1.4543196e+00 5.6838732e-01 1.3502290e+00 1.0008620e+00 1.2192919e+00 2.0611274e+00 1.2013529e+00 1.7369589e+00 1.2053003e+00 1.5767956e+00 6.3925756e-01 7.1779518e-01 9.6131279e-01 6.4620889e-01 1.0032443e+00 9.3331138e-01 9.0279223e-01 2.1713885e+00 2.3476282e+00 8.0245903e-01 1.1755517e+00 6.3322667e-01 2.1693131e+00 4.2362917e-01 1.1187430e+00 1.4544336e+00 4.0246123e-01 4.1209001e-01 1.0208844e+00 1.3018144e+00 1.5969056e+00 2.0303761e+00 1.0427944e+00 5.0085236e-01 1.0008465e+00 1.7574039e+00 1.1274284e+00 9.0166476e-01 4.0125062e-01 9.3437551e-01 1.1319099e+00 9.6935134e-01 5.6838732e-01 1.3309288e+00 1.2428507e+00 9.2745734e-01 5.7630313e-01 6.8160885e-01 9.6672602e-01 5.2167208e-01 8.5440680e-01 2.2608083e-01 4.0125062e-01 3.0490481e-01 4.2270142e-01 1.0207262e+00 2.0181667e-01 2.0282636e+00 1.1133895e+00 1.9386586e+00 1.6012719e+00 1.8114342e+00 2.6515677e+00 9.0999313e-01 2.3322945e+00 1.8060515e+00 2.1578375e+00 1.1447365e+00 1.3085752e+00 1.5359734e+00 1.0426516e+00 1.3018144e+00 1.3779960e+00 1.5044724e+00 2.7627283e+00 2.9432651e+00 1.0010209e+00 1.7447170e+00 9.6576136e-01 2.7579768e+00 9.1892454e-01 1.7159977e+00 2.0420308e+00 8.2635069e-01 9.1576742e-01 1.6105700e+00 1.8662195e+00 2.1696258e+00 2.5680120e+00 1.6184785e+00 1.1020600e+00 1.6000183e+00 2.2731185e+00 1.6529028e+00 1.5029725e+00 8.2635069e-01 1.4699000e+00 1.6570292e+00 1.3301857e+00 1.1133895e+00 1.9214944e+00 1.7646791e+00 1.3272142e+00 1.0235120e+00 1.2275665e+00 1.4621584e+00 1.1060939e+00 9.1576742e-01 9.6133119e-01 9.4532171e-01 1.2662318e+00 3.0490481e-01 8.6084272e-01 2.7230933e+00 1.8083405e+00 2.7192500e+00 2.3152780e+00 2.5278895e+00 3.4307167e+00 1.2089253e+00 3.1023107e+00 2.5446557e+00 2.9238544e+00 1.9071235e+00 2.0445123e+00 2.3113306e+00 1.7148932e+00 1.8686471e+00 2.0692591e+00 2.2408459e+00 3.5448121e+00 3.7102716e+00 1.7134856e+00 2.5076804e+00 1.6187837e+00 3.5398901e+00 1.6780493e+00 2.4594169e+00 2.8277283e+00 1.5698091e+00 1.6365650e+00 2.3270565e+00 2.6739856e+00 2.9710335e+00 3.3954286e+00 2.3304578e+00 1.8446316e+00 2.3054869e+00 3.1050823e+00 2.3405162e+00 2.2288004e+00 1.5327217e+00 2.2740189e+00 2.3840998e+00 2.1122339e+00 1.8083405e+00 2.6588338e+00 2.4791402e+00 2.0688078e+00 1.7632513e+00 1.9828965e+00 2.1428811e+00 1.8095574e+00 3.0017653e-01 2.0061436e-01 6.0017982e-01 1.2012991e+00 1.2085435e-01 1.8301371e+00 9.1424659e-01 1.8223693e+00 1.4049093e+00 1.6190709e+00 2.5271432e+00 7.0556260e-01 2.1953731e+00 1.6303102e+00 2.0261311e+00 1.0363096e+00 1.1330692e+00 1.4229011e+00 8.5406674e-01 1.1527746e+00 1.2106302e+00 1.3261864e+00 2.6410980e+00 2.8004332e+00 8.1117067e-01 1.6146557e+00 7.8886054e-01 2.6375763e+00 7.9824795e-01 1.5471213e+00 1.9317133e+00 6.9509552e-01 7.3155911e-01 1.4182194e+00 1.8031267e+00 2.0887452e+00 2.5422790e+00 1.4267527e+00 9.3308891e-01 1.4006149e+00 2.2706274e+00 1.4614246e+00 1.3141581e+00 6.4049114e-01 1.4230277e+00 1.5004249e+00 1.3669148e+00 9.1424659e-01 1.7490906e+00 1.5981930e+00 1.2553121e+00 8.7212232e-01 1.0924484e+00 1.2731059e+00 9.0557807e-01 1.1269424e-01 5.0002283e-01 1.2049541e+00 2.0121983e-01 1.8447840e+00 9.3329055e-01 1.7901165e+00 1.4035225e+00 1.6222582e+00 2.4980210e+00 8.1757693e-01 2.1693127e+00 1.6187837e+00 2.0049100e+00 1.0151397e+00 1.1261381e+00 1.3938113e+00 9.0657539e-01 1.2362756e+00 1.2472959e+00 1.3154932e+00 2.6088344e+00 2.7785257e+00 9.0207914e-01 1.5972548e+00 8.5406674e-01 2.6071034e+00 7.7652636e-01 1.5358856e+00 1.8953567e+00 6.9518117e-01 7.4612718e-01 1.4220925e+00 1.7511588e+00 2.0440071e+00 2.4804818e+00 1.4362913e+00 9.1449234e-01 1.4003402e+00 2.2077377e+00 1.4867147e+00 1.3085752e+00 6.7720780e-01 1.3734975e+00 1.5100598e+00 1.3269962e+00 9.3329055e-01 1.7441015e+00 1.6143613e+00 1.2552584e+00 8.7796615e-01 1.0782751e+00 1.3029195e+00 9.1424659e-01 5.0000761e-01 1.2040406e+00 1.1269424e-01 1.8289847e+00 9.1424701e-01 1.7872748e+00 1.4023776e+00 1.6144390e+00 2.4974488e+00 8.0533198e-01 2.1691714e+00 1.6179842e+00 1.9948417e+00 9.9013884e-01 1.1186586e+00 1.3842454e+00 8.5583357e-01 1.1527671e+00 1.1990152e+00 1.3139296e+00 2.6085083e+00 2.7775771e+00 8.5440680e-01 1.5835478e+00 7.8886139e-01 2.6068470e+00 7.5508853e-01 1.5300146e+00 1.8950932e+00 6.5712813e-01 7.2036951e-01 1.4134189e+00 1.7511120e+00 2.0435901e+00 2.4807480e+00 1.4220898e+00 9.1424701e-01 1.4002005e+00 2.2048860e+00 1.4562730e+00 1.3069754e+00 6.3309258e-01 1.3632211e+00 1.4815986e+00 1.2921474e+00 9.1424701e-01 1.7351894e+00 1.5836236e+00 1.2085436e+00 8.4725834e-01 1.0597879e+00 1.2653025e+00 9.0508756e-01 1.3743342e+00 5.0043084e-01 1.7369589e+00 8.2635069e-01 1.6144390e+00 1.3008770e+00 1.5131090e+00 2.3226028e+00 1.3004854e+00 2.0107294e+00 1.5010034e+00 1.8390199e+00 8.5471446e-01 1.0087539e+00 1.2223855e+00 8.0073117e-01 1.1286018e+00 1.1270411e+00 1.2013529e+00 2.4290388e+00 2.6198428e+00 7.8895472e-01 1.4363167e+00 7.7598796e-01 2.4266979e+00 6.3178782e-01 1.4100098e+00 1.7134977e+00 5.6347549e-01 6.3165225e-01 1.3130978e+00 1.5237265e+00 1.8289379e+00 2.1979419e+00 1.3253497e+00 8.0004602e-01 1.3000455e+00 1.9028805e+00 1.3748188e+00 1.2012991e+00 5.6371422e-01 1.1400420e+00 1.3748220e+00 1.0597992e+00 8.2635069e-01 1.6184929e+00 1.4858469e+00 1.0797702e+00 7.4612830e-01 9.3329055e-01 1.1910002e+00 8.0923926e-01 1.1056785e+00 3.0089448e+00 2.1019570e+00 2.9563162e+00 2.6052626e+00 2.8107271e+00 3.6717374e+00 1.5012719e+00 3.3522198e+00 2.8186527e+00 3.1596417e+00 2.1362161e+00 2.3151198e+00 2.5461024e+00 2.0036629e+00 2.1224665e+00 2.3234228e+00 2.5150159e+00 3.7801779e+00 3.9623034e+00 2.0033816e+00 2.7467405e+00 1.9044216e+00 3.7784933e+00 1.9231092e+00 2.7237438e+00 3.0623796e+00 1.8186729e+00 1.9089573e+00 2.6097166e+00 2.8846684e+00 3.1885494e+00 3.5706709e+00 2.6109888e+00 2.1139043e+00 2.6017555e+00 3.2706949e+00 2.6138780e+00 2.5099937e+00 1.8069857e+00 2.4748863e+00 2.6338874e+00 2.2385678e+00 2.1019570e+00 2.9251726e+00 2.7321685e+00 2.2661994e+00 2.0190735e+00 2.2288464e+00 2.4131370e+00 2.1020441e+00 1.9226845e+00 1.0087252e+00 1.8684939e+00 1.5017097e+00 1.7108631e+00 2.5816562e+00 8.0533198e-01 2.2563154e+00 1.7134977e+00 2.0770423e+00 1.0604287e+00 1.2124777e+00 1.4620239e+00 9.3329017e-01 1.1896594e+00 1.2705641e+00 1.4098496e+00 2.6923501e+00 2.8659663e+00 9.1449234e-01 1.6637458e+00 8.5403428e-01 2.6902417e+00 8.3183672e-01 1.6225614e+00 1.9757175e+00 7.3084048e-01 8.1117067e-01 1.5097082e+00 1.8197097e+00 2.1169795e+00 2.5408329e+00 1.5160570e+00 1.0087393e+00 1.5001233e+00 2.2573596e+00 1.5423651e+00 1.4049376e+00 7.1708289e-01 1.4229011e+00 1.5611429e+00 1.3142952e+00 1.0087252e+00 1.8271493e+00 1.6639408e+00 1.2552635e+00 9.2768675e-01 1.1400420e+00 1.3479052e+00 1.0031018e+00 9.3184922e-01 8.0291749e-01 7.0910969e-01 3.4342562e-01 1.3028005e+00 1.6474201e+00 1.0216374e+00 8.5586571e-01 9.0026543e-01 9.0508756e-01 7.7598796e-01 5.7832449e-01 1.0522594e+00 9.0999313e-01 7.0008735e-01 7.1708289e-01 1.4049376e+00 1.4220925e+00 1.2553121e+00 6.0202028e-01 1.1170561e+00 1.4055109e+00 1.1186497e+00 4.5783248e-01 9.3306769e-01 1.2101609e+00 1.1134939e+00 5.3914287e-01 1.0144117e+00 1.1074742e+00 1.6007365e+00 5.2491734e-01 1.0797700e+00 1.1139044e+00 1.4000349e+00 4.0004442e-01 7.1629303e-01 1.2090477e+00 6.8170466e-01 4.5148429e-01 9.1427000e-01 9.3184922e-01 5.0043842e-01 4.1209001e-01 8.0296037e-01 1.0498226e+00 8.0928056e-01 6.0018299e-01 9.3446811e-01 1.3131410e+00 5.6371422e-01 7.8985507e-01 1.8949500e+00 9.1427000e-01 1.5639785e+00 9.3308891e-01 1.4501583e+00 7.1621748e-01 6.0017665e-01 1.0010209e+00 2.0181667e-01 5.0000761e-01 6.3925756e-01 7.0548283e-01 2.0162299e+00 2.0885102e+00 5.2167829e-01 1.1079931e+00 2.2608083e-01 2.0057464e+00 5.0043084e-01 9.2747919e-01 1.4186217e+00 4.1212852e-01 3.4085233e-01 6.3178782e-01 1.4043632e+00 1.6175925e+00 2.1298991e+00 6.3309258e-01 5.2133179e-01 5.6595908e-01 1.9078843e+00 7.4418186e-01 6.1830764e-01 3.4085233e-01 1.1006468e+00 9.1132198e-01 1.1010711e+00 0.0000000e+00 1.0458540e+00 9.3984267e-01 9.0166476e-01 5.0043084e-01 7.0088627e-01 7.0993998e-01 3.0017653e-01 8.0093160e-01 6.0000635e-01 7.1621613e-01 2.2268632e+00 4.1317535e-01 5.2491734e-01 6.0964891e-01 8.2421923e-01 7.4335736e-01 4.1209001e-01 1.4186217e+00 1.3131410e+00 7.4275547e-01 6.1119267e-01 9.1566538e-01 1.0095513e+00 1.1796101e+00 2.5399984e-01 1.5237074e+00 8.2421923e-01 1.0429127e+00 4.1315633e-01 3.0490481e-01 1.1528553e+00 1.1270325e+00 7.0096708e-01 5.0001522e-01 3.1328089e-01 9.0657583e-01 7.0096858e-01 9.1568820e-01 1.0216374e+00 6.0035305e-01 8.0337471e-01 7.0548283e-01 1.2396937e+00 5.0043084e-01 4.2270142e-01 8.0008964e-01 1.3131410e+00 3.0915245e-01 4.5847767e-01 7.0470720e-01 9.6936870e-01 7.4262964e-01 9.0645118e-01 1.2190260e+00 4.0246123e-01 1.3450652e+00 1.4544312e+00 1.0207258e+00 4.5147187e-01 9.6501813e-01 5.0517282e-01 3.0490481e-01 5.0437695e-01 6.8170466e-01 6.5712813e-01 5.0855778e-01 2.0121983e-01 1.4695463e+00 1.5267750e+00 7.4395693e-01 6.3309258e-01 7.8890806e-01 1.4542932e+00 7.0008432e-01 4.5784410e-01 9.0166431e-01 8.0000160e-01 7.0008584e-01 3.0017653e-01 9.0005094e-01 1.1019505e+00 1.6144405e+00 4.0004442e-01 5.0436965e-01 4.1315633e-01 1.4012283e+00 6.3164729e-01 2.0121983e-01 8.0046685e-01 6.0219099e-01 6.0964597e-01 6.5724028e-01 5.6371422e-01 5.6838732e-01 7.0911112e-01 5.3914287e-01 6.0948506e-01 4.0246123e-01 5.6371422e-01 5.2133179e-01 1.1281267e+00 1.6745375e+00 8.1112984e-01 5.2167208e-01 7.4395693e-01 7.0016860e-01 5.0855778e-01 3.3813251e-01 9.0659977e-01 7.8895472e-01 5.0043842e-01 4.1209001e-01 1.2528048e+00 1.3020492e+00 9.3861512e-01 4.0127250e-01 1.0142766e+00 1.2362810e+00 9.0168933e-01 3.0490481e-01 7.0478886e-01 1.0010209e+00 9.0279223e-01 2.2608083e-01 7.4262850e-01 9.0055475e-01 1.4109657e+00 2.2573593e-01 7.8895472e-01 8.0492246e-01 1.2000668e+00 4.0363334e-01 4.1212852e-01 1.0039060e+00 4.5080200e-01 2.4195741e-01 7.0462844e-01 7.8985507e-01 3.0490481e-01 3.4085233e-01 6.0017982e-01 8.0928056e-01 6.0017665e-01 4.5784410e-01 7.4612718e-01 2.7992301e+00 3.6259865e-01 9.6953662e-01 6.4620889e-01 1.5401330e+00 1.4140789e+00 1.1281265e+00 2.0058560e+00 1.8949500e+00 1.4140515e+00 1.2396937e+00 8.0000239e-01 4.1317535e-01 1.8064092e+00 9.3310976e-01 2.1167364e+00 2.0181667e-01 1.7570696e+00 1.0143975e+00 6.1135434e-01 1.8661434e+00 1.8197089e+00 1.2632995e+00 8.1112909e-01 5.0126466e-01 8.0051115e-01 1.2632996e+00 1.5975200e+00 1.5266891e+00 5.0043084e-01 1.3452695e+00 1.3018553e+00 1.9314575e+00 1.2089192e+00 1.0777307e+00 1.5030978e+00 1.8949500e+00 8.5409862e-01 1.0151880e+00 1.4180463e+00 1.6742781e+00 1.4542907e+00 1.4853863e+00 1.8197089e+00 2.4725511e+00 1.8443040e+00 2.3518103e+00 1.6032169e+00 1.5066818e+00 1.9080163e+00 8.0923851e-01 9.4532171e-01 1.5109394e+00 1.6179000e+00 2.9132779e+00 2.9705619e+00 1.1020600e+00 2.0185049e+00 7.0633229e-01 2.9085831e+00 1.4001717e+00 1.8310931e+00 2.3325127e+00 1.3000908e+00 1.2016381e+00 1.5402579e+00 2.3143334e+00 2.5314248e+00 3.0393618e+00 1.5405402e+00 1.4018011e+00 1.3018553e+00 2.8185850e+00 1.4728279e+00 1.5248833e+00 1.1020506e+00 2.0036931e+00 1.8191683e+00 2.0009584e+00 9.1427000e-01 1.9534067e+00 1.8336293e+00 1.8020058e+00 1.4006178e+00 1.6026130e+00 1.3506710e+00 1.0116724e+00 6.3912709e-01 7.8890806e-01 1.2190319e+00 1.0776296e+00 8.0923926e-01 1.6740888e+00 1.5650163e+00 1.0798806e+00 9.0155438e-01 9.0417295e-01 6.4049114e-01 1.4685147e+00 6.4049114e-01 1.7843537e+00 4.5148429e-01 1.4324350e+00 6.8261201e-01 3.3813251e-01 1.5401311e+00 1.4852570e+00 9.3329055e-01 5.0043842e-01 2.0181667e-01 9.1424701e-01 9.3424697e-01 1.2633467e+00 1.2093243e+00 5.2167829e-01 1.0313359e+00 9.6574336e-01 1.5965767e+00 9.0168933e-01 7.7603846e-01 1.2016443e+00 1.5639785e+00 5.7832449e-01 7.7882758e-01 1.1074742e+00 1.3452311e+00 1.1281352e+00 1.1558746e+00 1.4852570e+00 1.1153247e+00 7.8895472e-01 5.0477564e-01 5.0855778e-01 1.0426636e+00 9.4532171e-01 7.3155911e-01 5.0476836e-01 1.3669148e+00 1.1910003e+00 8.5471446e-01 7.1629303e-01 1.1528553e+00 1.0777411e+00 9.0142636e-01 8.0046685e-01 7.1629168e-01 1.0032293e+00 9.1892413e-01 3.6452132e-01 5.6370994e-01 7.0176271e-01 1.4157327e+00 4.2362917e-01 7.0633229e-01 6.0964891e-01 1.0062544e+00 9.1554656e-01 6.0365948e-01 1.0235118e+00 6.1135434e-01 6.7626502e-01 7.5508853e-01 9.3308891e-01 7.1621884e-01 8.5403428e-01 6.5712608e-01 8.0245824e-01 6.3192325e-01 9.1132198e-01 8.6051414e-01 1.0242692e+00 1.0232576e+00 6.8685125e-01 1.5761415e+00 1.4407364e+00 9.0296858e-01 8.3689956e-01 6.3322667e-01 1.0451812e+00 1.5490134e+00 4.5847767e-01 1.6529028e+00 8.3916809e-01 1.2749306e+00 5.4219811e-01 7.0462697e-01 1.3611955e+00 1.3103292e+00 9.0792879e-01 9.1446896e-01 8.2421853e-01 7.1708289e-01 9.0683128e-01 1.1954899e+00 1.2957636e+00 6.3178534e-01 9.0508756e-01 8.7796615e-01 1.4198077e+00 7.2113820e-01 6.0427481e-01 1.0032443e+00 1.4501583e+00 4.5216167e-01 5.2491131e-01 9.1894698e-01 1.2738390e+00 9.4912864e-01 1.0207533e+00 1.3523292e+00 5.0043842e-01 4.1317535e-01 8.5403428e-01 7.0910969e-01 3.0482299e-01 4.0243965e-01 1.6491696e+00 1.8289467e+00 1.0060994e+00 6.1119267e-01 9.0142636e-01 1.6484371e+00 5.0126466e-01 6.0018299e-01 9.3308853e-01 4.2362917e-01 4.0363334e-01 5.2133802e-01 7.9153339e-01 1.0782107e+00 1.5275160e+00 5.2167829e-01 5.2167208e-01 6.9987517e-01 1.2633516e+00 5.2201750e-01 4.0127250e-01 5.0517282e-01 4.1212852e-01 5.2167829e-01 4.1210927e-01 7.1621748e-01 8.0093081e-01 6.3178782e-01 3.0922892e-01 7.0008735e-01 2.0061436e-01 3.6452132e-01 6.0035305e-01 4.1420960e-01 7.0096858e-01 6.3178782e-01 5.2132556e-01 3.0490481e-01 1.5634961e+00 1.6740875e+00 5.4219811e-01 5.8750389e-01 8.0245903e-01 1.5263478e+00 4.0004442e-01 6.1135434e-01 8.6051471e-01 5.0043842e-01 4.2270142e-01 3.0482299e-01 8.0967961e-01 1.0426513e+00 1.5757929e+00 3.3813251e-01 4.0127250e-01 5.0855778e-01 1.3133662e+00 7.1700909e-01 4.0125062e-01 5.2491734e-01 5.2167829e-01 5.2838320e-01 5.3943256e-01 6.0017665e-01 6.4620889e-01 6.8261201e-01 4.2270142e-01 3.0482299e-01 3.0026460e-01 7.0470867e-01 5.0477564e-01 1.1038840e+00 1.0010209e+00 4.0363334e-01 3.3808272e-01 1.2528049e+00 1.4182049e+00 9.2033101e-01 2.4195741e-01 1.2036925e+00 1.2362756e+00 6.3451734e-01 3.0482299e-01 5.2524663e-01 7.4335736e-01 7.4329414e-01 4.0125062e-01 5.2491131e-01 6.7636452e-01 1.1755449e+00 4.0127250e-01 6.3925756e-01 7.9148746e-01 9.1424659e-01 5.2491734e-01 4.1210927e-01 8.5437440e-01 1.2085435e-01 3.0026460e-01 4.0127250e-01 1.0010209e+00 4.0243965e-01 4.1317535e-01 3.0482299e-01 6.0444249e-01 3.3813251e-01 6.0964891e-01 9.0166431e-01 4.1212852e-01 7.8985507e-01 8.1719606e-01 2.1378157e+00 2.2009978e+00 5.0855077e-01 1.2175952e+00 3.0017653e-01 2.1167404e+00 6.0035621e-01 1.0597879e+00 1.5265797e+00 5.0517282e-01 5.2167829e-01 7.4329527e-01 1.5072282e+00 1.7227391e+00 2.2434054e+00 7.4335736e-01 6.3309258e-01 6.8161057e-01 2.0107350e+00 9.2867113e-01 7.5508853e-01 5.0517282e-01 1.2040344e+00 1.0178820e+00 1.2037520e+00 2.0181667e-01 1.1636098e+00 1.0621172e+00 1.0032443e+00 6.0000317e-01 8.0883841e-01 9.0668287e-01 5.0085236e-01 6.0964891e-01 7.4618926e-01 2.0118073e+00 2.0884859e+00 9.1424701e-01 1.1060939e+00 4.0243965e-01 2.0057764e+00 6.3178782e-01 9.1916394e-01 1.4198627e+00 6.1119267e-01 6.0219099e-01 6.3309012e-01 1.4134218e+00 1.6179000e+00 2.1265315e+00 6.3178534e-01 9.0506254e-01 1.0032443e+00 1.9078396e+00 6.5712608e-01 6.8261201e-01 6.0219099e-01 1.1003034e+00 9.0532049e-01 1.1001014e+00 5.0000761e-01 1.0433444e+00 9.1892454e-01 9.0002615e-01 5.6595908e-01 7.0470867e-01 6.1119558e-01 6.0017982e-01 5.0085236e-01 1.5275160e+00 1.6747664e+00 1.0434746e+00 5.2132556e-01 8.0533198e-01 1.5264802e+00 5.7609230e-01 4.1317535e-01 8.6051414e-01 5.7630313e-01 5.2524663e-01 4.1315633e-01 8.6054545e-01 1.0440350e+00 1.5412701e+00 4.1210927e-01 8.0250202e-01 9.1471442e-01 1.3130978e+00 3.0490481e-01 5.0043084e-01 5.7630313e-01 5.0043842e-01 3.3818226e-01 5.0043084e-01 6.3925756e-01 6.0948212e-01 4.1317535e-01 3.0482299e-01 7.0548283e-01 3.0490481e-01 2.2573593e-01 5.6394820e-01 1.3634093e+00 1.4858469e+00 8.1757693e-01 5.2201750e-01 9.1427000e-01 1.3523380e+00 6.0201716e-01 3.4342562e-01 7.1629168e-01 7.0096708e-01 6.0948212e-01 3.0490481e-01 7.0096708e-01 9.1424701e-01 1.4267554e+00 4.0127250e-01 4.1420960e-01 4.8342635e-01 1.2049539e+00 6.0964891e-01 1.1269424e-01 7.1621613e-01 4.1212852e-01 6.0018299e-01 5.3914287e-01 7.0548283e-01 5.2524663e-01 7.0105084e-01 5.0476836e-01 5.6371422e-01 3.0474106e-01 5.2491734e-01 6.0948212e-01 1.2000065e+00 2.0187441e+00 1.0498228e+00 2.2315584e+00 1.0000152e+00 1.8808952e+00 1.1286798e+00 7.5826453e-01 1.9821126e+00 1.9334872e+00 1.4094023e+00 9.7944085e-01 1.0090312e+00 3.0915245e-01 1.4094022e+00 1.7226330e+00 1.6785116e+00 8.2418071e-01 1.4544336e+00 1.4183053e+00 2.0448570e+00 1.3189240e+00 1.1989547e+00 1.6071563e+00 2.0162299e+00 9.7599312e-01 1.1287691e+00 1.5299044e+00 1.8304280e+00 1.5694554e+00 1.5966664e+00 1.9334872e+00 2.0448570e+00 1.2223853e+00 2.3135239e+00 3.0915245e-01 2.0415522e+00 1.2703001e+00 9.2351241e-01 2.1487275e+00 2.0854181e+00 1.4650300e+00 1.1157320e+00 8.0296037e-01 1.2013591e+00 1.4650276e+00 1.8684939e+00 1.6818191e+00 8.0245746e-01 1.5324961e+00 1.5271597e+00 2.1953922e+00 1.5071120e+00 1.3457716e+00 1.8029854e+00 2.0885102e+00 1.0837575e+00 1.2703001e+00 1.7133162e+00 1.9522053e+00 1.7369702e+00 1.6941963e+00 2.0286286e+00 1.1213597e+00 6.3912943e-01 1.9163315e+00 5.0855778e-01 1.1310337e+00 1.3142952e+00 6.0219099e-01 8.0046764e-01 7.2904264e-01 1.2366099e+00 1.4559030e+00 2.0467625e+00 7.7882758e-01 6.0184622e-01 6.0948800e-01 1.7296063e+00 1.2395260e+00 9.0668287e-01 8.0051036e-01 1.0231745e+00 1.0411548e+00 1.0543951e+00 5.2167829e-01 1.1356187e+00 1.2079042e+00 9.3439622e-01 4.2268438e-01 8.1719606e-01 1.2192978e+00 8.0046764e-01 1.3133662e+00 1.0434746e+00 8.3916809e-01 2.2573593e-01 5.0855077e-01 9.3848935e-01 9.0659977e-01 5.2167829e-01 7.0096858e-01 5.5450500e-01 1.0287902e+00 5.2133802e-01 8.4725834e-01 9.7599312e-01 8.0250123e-01 6.0018299e-01 5.6371422e-01 1.0171340e+00 3.0482299e-01 2.0181667e-01 6.0000317e-01 1.1079931e+00 2.0061436e-01 2.2573593e-01 5.0084481e-01 8.1683095e-01 5.2524663e-01 7.0096708e-01 1.0116865e+00 2.2278855e+00 7.0008584e-01 1.1298552e+00 1.6300950e+00 6.0017982e-01 5.0084481e-01 8.5403428e-01 1.6097507e+00 1.8284464e+00 2.3350903e+00 8.5406616e-01 7.1629168e-01 7.5508853e-01 2.1138813e+00 8.1683095e-01 8.2462252e-01 4.0246123e-01 1.3009222e+00 1.1139906e+00 1.3000951e+00 2.2608083e-01 1.2636227e+00 1.1315710e+00 1.1002119e+00 7.0088627e-01 9.0029018e-01 6.9600743e-01 3.1328089e-01 1.8661354e+00 1.1286798e+00 7.2044167e-01 1.9755679e+00 1.9314520e+00 1.3741466e+00 9.0645118e-01 6.0184622e-01 1.0001751e+00 1.3741498e+00 1.7083042e+00 1.6308665e+00 6.0201716e-01 1.4559030e+00 1.4140789e+00 2.0433026e+00 1.3131370e+00 1.1900969e+00 1.6049479e+00 2.0057464e+00 9.6691372e-01 1.1304042e+00 1.5237285e+00 1.7843627e+00 1.5639785e+00 1.5975352e+00 1.9314520e+00 8.2671175e-01 1.1543257e+00 1.2085435e-01 3.0474106e-01 7.0088627e-01 1.0144117e+00 1.3018103e+00 1.7709153e+00 7.0462844e-01 3.0482299e-01 7.0470867e-01 1.4857440e+00 8.1457587e-01 6.0948506e-01 3.3813251e-01 6.4049114e-01 7.4954884e-01 6.3925756e-01 5.0043084e-01 1.0090834e+00 8.7372177e-01 5.2838320e-01 2.0121983e-01 3.4342562e-01 7.3084171e-01 4.1315633e-01 5.0855778e-01 9.1024401e-01 8.2498722e-01 5.0436965e-01 5.6595908e-01 7.2044167e-01 1.2101609e+00 5.0437695e-01 6.9987517e-01 8.1457660e-01 1.0010209e+00 4.1212852e-01 3.4342562e-01 9.3351278e-01 3.0915245e-01 3.0482299e-01 6.0052920e-01 9.2747919e-01 2.2608083e-01 4.0000000e-01 5.0476836e-01 8.5586571e-01 5.0477564e-01 5.0477564e-01 8.2498722e-01 1.2635707e+00 1.2396421e+00 8.0533198e-01 2.4170870e-01 4.0127250e-01 7.4618926e-01 8.0726668e-01 1.0151880e+00 1.1066159e+00 5.6371422e-01 9.1554656e-01 8.0879701e-01 1.3523345e+00 6.0366256e-01 6.3912943e-01 9.0532093e-01 1.4186217e+00 5.2133179e-01 7.1700909e-01 8.1719606e-01 1.0923439e+00 8.5409862e-01 1.0116865e+00 1.3253496e+00 2.0121983e-01 8.0051036e-01 1.1269596e+00 1.4140457e+00 1.8721285e+00 8.0250123e-01 3.3813251e-01 8.0250202e-01 1.5969056e+00 8.4536936e-01 7.0096708e-01 2.2538848e-01 7.4395693e-01 8.3222261e-01 7.1779518e-01 4.1212852e-01 1.1079931e+00 9.4151244e-01 5.7630313e-01 3.0490481e-01 4.1420960e-01 6.9509395e-01 3.4080442e-01 7.0184453e-01 1.1527745e+00 1.4140486e+00 1.8971345e+00 7.0556260e-01 3.1328089e-01 7.0910969e-01 1.6486410e+00 7.4618926e-01 6.0184622e-01 1.1269424e-01 8.0923926e-01 7.7598796e-01 8.0883916e-01 3.4085233e-01 1.0235254e+00 8.7240114e-01 6.3309012e-01 5.0043842e-01 4.1315633e-01 5.7609230e-01 2.2538848e-01 8.0888055e-01 1.0030868e+00 1.5299044e+00 1.0000000e-01 6.3164977e-01 7.0096708e-01 1.3008855e+00 6.0184622e-01 3.3813251e-01 8.0296037e-01 5.0476836e-01 3.6256305e-01 5.6618864e-01 6.3178782e-01 4.5847767e-01 5.2491734e-01 4.1420960e-01 6.0202028e-01 4.0127250e-01 6.0052920e-01 5.6618864e-01 3.4342562e-01 8.7372177e-01 8.2425704e-01 9.3308891e-01 1.1005554e+00 7.1700774e-01 9.6674360e-01 8.0051115e-01 1.2632995e+00 5.2491734e-01 8.0883916e-01 7.8935898e-01 1.4043632e+00 7.0470867e-01 9.0532093e-01 7.5502989e-01 9.6953662e-01 7.4612718e-01 1.0222576e+00 1.3061180e+00 1.0032296e+00 1.0032293e+00 1.1900276e+00 1.3017553e+00 4.1315633e-01 1.1093621e+00 1.0088783e+00 1.5263498e+00 7.1708289e-01 7.3155911e-01 1.0040629e+00 1.6175925e+00 6.1845783e-01 7.5826453e-01 9.3426769e-01 1.2396937e+00 1.0142626e+00 1.2128138e+00 1.5237074e+00 1.5299064e+00 1.6885111e+00 1.8315348e+00 8.0097499e-01 1.6050900e+00 1.5160591e+00 2.0074169e+00 1.1389082e+00 1.2275665e+00 1.3502668e+00 2.1298991e+00 1.1075720e+00 1.2113964e+00 1.3632538e+00 1.7639471e+00 1.4922544e+00 1.7133283e+00 2.0290146e+00 7.1621748e-01 8.0051036e-01 1.3008813e+00 6.0017982e-01 4.1210927e-01 8.0492246e-01 5.0477564e-01 3.4080442e-01 5.6595908e-01 6.3309258e-01 4.5784410e-01 5.0855778e-01 4.1317535e-01 6.0366256e-01 4.0246123e-01 6.0035621e-01 5.7630313e-01 5.0085236e-01 1.4407390e+00 9.1892413e-01 4.2270142e-01 3.6452132e-01 6.7824250e-01 9.0668287e-01 8.2458409e-01 5.2133179e-01 9.0792879e-01 1.0124729e+00 8.0250202e-01 4.1210927e-01 5.0085236e-01 8.2458478e-01 4.1315633e-01 1.6100639e+00 1.0426636e+00 5.2491734e-01 8.0488008e-01 8.6054545e-01 1.0116721e+00 9.7291273e-01 5.6595908e-01 9.4532171e-01 1.1186499e+00 9.1681464e-01 6.3178782e-01 6.2656178e-01 9.6574369e-01 5.3943256e-01 1.4007831e+00 1.3033860e+00 1.7572550e+00 8.5406674e-01 1.0030724e+00 1.0426513e+00 1.9078843e+00 9.0005048e-01 1.0010209e+00 1.0776188e+00 1.4549432e+00 1.2363278e+00 1.5032156e+00 1.8103044e+00 6.0184934e-01 8.2671175e-01 6.0383105e-01 4.1209001e-01 6.3309258e-01 7.4418186e-01 5.0477564e-01 4.0006662e-01 4.8342635e-01 9.1892413e-01 4.8391482e-01 2.0121983e-01 6.4620889e-01 7.0462697e-01 5.0436965e-01 6.0184622e-01 5.7608844e-01 6.1830764e-01 5.3914287e-01 7.0105084e-01 5.0855778e-01 6.3165225e-01 3.0490481e-01 5.0477564e-01 5.2133179e-01 9.1446938e-01 8.7209348e-01 9.0532093e-01 3.4085233e-01 1.1298636e+00 9.6150595e-01 7.2036819e-01 5.0477564e-01 5.2167208e-01 6.3925756e-01 3.0008832e-01 3.0915245e-01 3.0474106e-01 1.1006468e+00 5.0043842e-01 4.1420960e-01 2.4195741e-01 6.8170466e-01 4.0127250e-01 7.0096708e-01 1.0003198e+00 5.0043084e-01 9.1132198e-01 3.0026460e-01 2.0121983e-01 4.0004442e-01 6.9987517e-01 4.5148429e-01 5.0477564e-01 8.3183672e-01 1.1010711e+00 8.0000160e-01 6.0052920e-01 2.0121983e-01 6.8161057e-01 4.1212852e-01 7.0176121e-01 1.0030721e+00 1.0458540e+00 9.3984267e-01 9.0166476e-01 5.0043084e-01 7.0088627e-01 7.0993998e-01 3.0017653e-01 2.2608083e-01 7.0008584e-01 9.3848935e-01 7.0184453e-01 6.3178534e-01 9.6936870e-01 5.0476836e-01 8.7372177e-01 5.6618864e-01 5.0477564e-01 8.7240114e-01 5.3943256e-01 3.0474106e-01 5.2167208e-01 8.0879701e-01 5.0085236e-01 9.0279268e-01 5.2133802e-01 4.2362917e-01 6.0017982e-01 5.2838320e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt b/voice_bridge/scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt new file mode 100644 index 0000000000000000000000000000000000000000..3e2759df30c14c1503818cc6a400a370e8fd8e89 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-seuclidean-ml-iris.txt @@ -0,0 +1 @@ + 1.1781739e+00 8.4573383e-01 1.1040164e+00 2.6033464e-01 1.0391769e+00 6.5951091e-01 2.6643250e-01 1.6215602e+00 9.6424206e-01 5.8926015e-01 4.4417668e-01 1.2158053e+00 1.5196051e+00 1.4342986e+00 2.2147971e+00 1.0267382e+00 1.3103399e-01 1.0246015e+00 7.0646671e-01 4.6190224e-01 5.3352899e-01 6.8496652e-01 6.2944414e-01 5.1453676e-01 1.1649855e+00 3.8639663e-01 1.3340137e-01 2.6033464e-01 8.5141186e-01 9.9757114e-01 5.0629646e-01 1.3963590e+00 1.6851313e+00 9.6424206e-01 7.1143905e-01 4.8636669e-01 9.6424206e-01 1.4309353e+00 2.3749211e-01 1.8699153e-01 2.8644037e+00 1.0938603e+00 5.4968254e-01 7.9227302e-01 1.2158053e+00 7.0111465e-01 9.1831777e-01 5.2374483e-01 4.7680727e-01 3.4225655e+00 2.9886388e+00 3.5231786e+00 3.4844976e+00 3.4140450e+00 2.8802393e+00 3.0292175e+00 2.9585178e+00 3.2500773e+00 2.8104851e+00 3.8076036e+00 2.7718515e+00 3.6661618e+00 3.0567513e+00 2.4313960e+00 3.1540282e+00 2.7718124e+00 2.7494216e+00 4.0917474e+00 3.0136333e+00 3.0855821e+00 2.8833448e+00 3.7756717e+00 3.0462644e+00 3.0262994e+00 3.1582448e+00 3.6064873e+00 3.6179250e+00 3.0140884e+00 2.7108793e+00 3.1480683e+00 3.0769261e+00 2.8006021e+00 3.5140011e+00 2.7293962e+00 2.7724816e+00 3.3142477e+00 3.8377035e+00 2.4725633e+00 3.1307104e+00 3.0248446e+00 2.9240118e+00 2.9852014e+00 3.1515797e+00 2.8921724e+00 2.4678110e+00 2.6524994e+00 2.9083443e+00 2.7444686e+00 2.7478281e+00 4.2652803e+00 3.6712837e+00 4.4571526e+00 3.7518856e+00 4.1563042e+00 5.0327536e+00 3.5110505e+00 4.5914343e+00 4.4347154e+00 4.7605836e+00 3.6465897e+00 3.9644198e+00 4.1403419e+00 3.9458900e+00 4.0035735e+00 3.9244083e+00 3.7394250e+00 5.1213512e+00 5.6085412e+00 4.1515197e+00 4.3260896e+00 3.5311267e+00 5.2010517e+00 3.7194928e+00 4.0104626e+00 4.2547108e+00 3.5326627e+00 3.3344440e+00 4.1152831e+00 4.1647603e+00 4.7306329e+00 5.0503286e+00 4.1958529e+00 3.4649010e+00 3.7290073e+00 5.0848756e+00 4.0161825e+00 3.6208873e+00 3.2588014e+00 4.1126592e+00 4.3082433e+00 4.1887411e+00 3.6712837e+00 4.3324309e+00 4.3552675e+00 4.1561376e+00 4.0674499e+00 3.7933592e+00 3.8117183e+00 3.3250640e+00 5.2374483e-01 4.3319335e-01 1.3890415e+00 2.1841707e+00 9.9973471e-01 9.3211669e-01 6.4636269e-01 2.7124234e-01 1.7245677e+00 9.3727156e-01 1.7819563e-01 7.5570839e-01 2.5520912e+00 3.3809114e+00 2.1782802e+00 1.1854382e+00 2.0937104e+00 1.8662528e+00 1.1155937e+00 1.6542533e+00 1.4482752e+00 8.4881492e-01 9.7259094e-01 1.6562722e-01 9.7322023e-01 1.2100516e+00 9.9111027e-01 5.3286499e-01 2.8394141e-01 1.1346946e+00 2.5666454e+00 2.8608436e+00 2.7124234e-01 4.9009568e-01 1.3630799e+00 2.7124234e-01 6.0647055e-01 9.5529726e-01 1.1682143e+00 1.6911681e+00 7.6195008e-01 1.2774622e+00 1.9003949e+00 1.7819563e-01 1.8642334e+00 5.8652824e-01 1.6860841e+00 7.0235100e-01 3.5517185e+00 3.0793995e+00 3.5669733e+00 2.7166736e+00 3.1838913e+00 2.5120824e+00 3.1938169e+00 2.0428688e+00 3.1039816e+00 2.2561090e+00 2.8016158e+00 2.6226738e+00 2.9050141e+00 2.8502197e+00 2.0976260e+00 3.1846091e+00 2.5890531e+00 2.2584354e+00 3.4434621e+00 2.3329638e+00 3.1272801e+00 2.5616006e+00 3.3203580e+00 2.7436923e+00 2.8484236e+00 3.0948526e+00 3.4151452e+00 3.5708989e+00 2.7939968e+00 2.0736054e+00 2.3834491e+00 2.2886612e+00 2.3204706e+00 3.1632397e+00 2.5205525e+00 3.0112889e+00 3.3433635e+00 3.2300528e+00 2.2657938e+00 2.4705763e+00 2.4462190e+00 2.8038852e+00 2.4332562e+00 2.2089299e+00 2.4060762e+00 2.2734727e+00 2.3627180e+00 2.7012637e+00 1.8976741e+00 2.3590971e+00 4.3837112e+00 3.3195686e+00 4.4453895e+00 3.6018522e+00 4.1012355e+00 5.0512932e+00 2.8774753e+00 4.5344585e+00 4.0827835e+00 5.0801762e+00 3.7291677e+00 3.6888814e+00 4.1064223e+00 3.4625304e+00 3.7552252e+00 3.9939605e+00 3.6781201e+00 5.5433523e+00 5.4381439e+00 3.4976392e+00 4.4223824e+00 3.2288234e+00 5.1217596e+00 3.4157742e+00 4.1643077e+00 4.3726405e+00 3.2842292e+00 3.2296198e+00 3.9190152e+00 4.1591876e+00 4.6244312e+00 5.4884429e+00 4.0035368e+00 3.2202996e+00 3.3301365e+00 5.1089381e+00 4.2054648e+00 3.6234874e+00 3.1421933e+00 4.1502382e+00 4.3306814e+00 4.2256435e+00 3.3195686e+00 4.4219948e+00 4.4973329e+00 4.1152664e+00 3.6487297e+00 3.7329401e+00 4.0033827e+00 3.2017663e+00 2.8394141e-01 9.9272943e-01 1.8549949e+00 4.9772204e-01 5.9738093e-01 7.8305765e-01 3.7622328e-01 1.4342986e+00 5.0621589e-01 4.9772204e-01 6.9001472e-01 2.2742100e+00 3.0330374e+00 1.8410898e+00 8.5582452e-01 1.8552074e+00 1.4758765e+00 9.8932340e-01 1.2824303e+00 9.4580058e-01 7.0175090e-01 5.8564715e-01 6.1067563e-01 6.6453319e-01 9.2528705e-01 7.6195008e-01 1.7002750e-01 3.1093967e-01 1.0044375e+00 2.1686473e+00 2.5011215e+00 3.7622328e-01 3.6669623e-01 1.1883075e+00 3.7622328e-01 5.8652824e-01 6.7745878e-01 7.9191984e-01 2.0937821e+00 3.6228991e-01 9.5582164e-01 1.5272557e+00 4.9772204e-01 1.4755007e+00 1.3340137e-01 1.3666101e+00 4.3319335e-01 3.7283414e+00 3.2257816e+00 3.7651559e+00 3.1082143e+00 3.4606263e+00 2.7705999e+00 3.2962379e+00 2.4179023e+00 3.3643791e+00 2.5175848e+00 3.2317517e+00 2.8135312e+00 3.3502574e+00 3.0859108e+00 2.3316915e+00 3.3832002e+00 2.7540885e+00 2.5906747e+00 3.8459512e+00 2.7110494e+00 3.2296198e+00 2.8510843e+00 3.6612067e+00 3.0231940e+00 3.1083626e+00 3.3221751e+00 3.6999785e+00 3.7824508e+00 3.0223050e+00 2.4549511e+00 2.7813487e+00 2.6993734e+00 2.6424988e+00 3.4348309e+00 2.6680185e+00 3.0548262e+00 3.5357687e+00 3.6340473e+00 2.4474337e+00 2.8211532e+00 2.7662396e+00 3.0069386e+00 2.7817508e+00 2.6121651e+00 2.7000040e+00 2.4677010e+00 2.5915377e+00 2.9544131e+00 2.2712863e+00 2.6277952e+00 4.4682388e+00 3.5629824e+00 4.6484688e+00 3.8140427e+00 4.2790737e+00 5.2629823e+00 3.1332305e+00 4.7710813e+00 4.3977196e+00 5.1429155e+00 3.8634878e+00 3.9555042e+00 4.3021827e+00 3.7450218e+00 3.9451569e+00 4.1141318e+00 3.8729361e+00 5.5923916e+00 5.7171219e+00 3.8836634e+00 4.5660923e+00 3.4290419e+00 5.3764415e+00 3.6907517e+00 4.2782894e+00 4.5393827e+00 3.5302656e+00 3.4102235e+00 4.1476932e+00 4.3814983e+00 4.8831870e+00 5.5467550e+00 4.2276454e+00 3.4820326e+00 3.6311162e+00 5.3207972e+00 4.2656428e+00 3.7854499e+00 3.3178004e+00 4.3254709e+00 4.4873379e+00 4.3956810e+00 3.5629824e+00 4.5607328e+00 4.6030755e+00 4.3016138e+00 3.9622355e+00 3.9225802e+00 4.0577906e+00 3.3684813e+00 1.2515236e+00 2.1021589e+00 7.0646671e-01 8.4383266e-01 5.2374483e-01 3.8525820e-01 1.6876653e+00 7.3502408e-01 3.6319073e-01 5.0299964e-01 2.5372015e+00 3.2897546e+00 2.1021589e+00 1.1117653e+00 2.0978519e+00 1.7286097e+00 1.1937015e+00 1.5323598e+00 1.1874605e+00 8.6297946e-01 7.6710016e-01 5.3827772e-01 8.8540687e-01 1.1730565e+00 1.0034646e+00 2.6643250e-01 2.4808718e-01 1.2168625e+00 2.4209958e+00 2.7605308e+00 3.8525820e-01 5.6164055e-01 1.4300979e+00 3.8525820e-01 3.5266705e-01 9.1831777e-01 1.0556536e+00 1.8570904e+00 3.5266705e-01 1.1671832e+00 1.7581227e+00 3.6319073e-01 1.7245677e+00 2.3749211e-01 1.6215602e+00 6.7030885e-01 3.7702988e+00 3.2513048e+00 3.7854692e+00 2.9445918e+00 3.4252075e+00 2.6854877e+00 3.3289662e+00 2.2084365e+00 3.3482393e+00 2.3872006e+00 3.0088381e+00 2.7858967e+00 3.2051999e+00 3.0423378e+00 2.2727200e+00 3.4066596e+00 2.7026316e+00 2.4942728e+00 3.7194777e+00 2.5718070e+00 3.2266664e+00 2.8009311e+00 3.5699473e+00 2.9607930e+00 3.0876835e+00 3.3257458e+00 3.6752903e+00 3.7792524e+00 2.9772187e+00 2.3405405e+00 2.6225184e+00 2.5379456e+00 2.5530959e+00 3.3522700e+00 2.6036900e+00 3.0973166e+00 3.5528019e+00 3.5210610e+00 2.4001125e+00 2.6797931e+00 2.6323855e+00 2.9822614e+00 2.6747723e+00 2.4035668e+00 2.5939621e+00 2.4241443e+00 2.5291529e+00 2.9226858e+00 2.0959349e+00 2.5480036e+00 4.4738080e+00 3.4750769e+00 4.6459778e+00 3.7712854e+00 4.2573656e+00 5.2660922e+00 2.9665251e+00 4.7582165e+00 4.3221591e+00 5.2027091e+00 3.8786505e+00 3.8957203e+00 4.2952898e+00 3.6300721e+00 3.8796854e+00 4.1217239e+00 3.8539421e+00 5.6722969e+00 5.6818418e+00 3.7421166e+00 4.5832488e+00 3.3486395e+00 5.3611963e+00 3.6296692e+00 4.3021827e+00 4.5620086e+00 3.4793229e+00 3.3827920e+00 4.0990020e+00 4.3836503e+00 4.8653269e+00 5.6359100e+00 4.1798856e+00 3.4290065e+00 3.5331571e+00 5.3326389e+00 4.2899049e+00 3.7762521e+00 3.2871170e+00 4.3357622e+00 4.4879065e+00 4.4101806e+00 3.4750769e+00 4.5719131e+00 4.6252914e+00 4.2958117e+00 3.8764095e+00 3.9087616e+00 4.0828629e+00 3.3281063e+00 8.9980139e-01 6.8064066e-01 4.6472955e-01 1.7695601e+00 1.1682143e+00 5.3827772e-01 5.3286499e-01 1.4108003e+00 1.6357068e+00 1.3406177e+00 2.0471148e+00 8.8540687e-01 2.9145160e-01 9.8663349e-01 4.9772204e-01 6.8921053e-01 3.7371902e-01 5.3360548e-01 8.2263932e-01 5.9279023e-01 1.3884168e+00 5.4248468e-01 3.3872939e-01 5.2066928e-01 9.9757114e-01 1.1836141e+00 7.1971771e-01 1.1867923e+00 1.5097838e+00 1.1682143e+00 9.2945909e-01 6.4884272e-01 1.1682143e+00 1.5630357e+00 4.8016385e-01 2.7124234e-01 3.0617227e+00 1.1744248e+00 5.8374436e-01 6.1345624e-01 1.4108003e+00 4.9009568e-01 1.0413386e+00 4.3319335e-01 6.9189100e-01 3.5573943e+00 3.1141702e+00 3.6648465e+00 3.6881887e+00 3.5883824e+00 3.0468381e+00 3.1315658e+00 3.1515797e+00 3.4214871e+00 2.9743594e+00 4.0164863e+00 2.9182492e+00 3.8928105e+00 3.2158144e+00 2.6006889e+00 3.3027067e+00 2.9031809e+00 2.9465762e+00 4.3027855e+00 3.2186029e+00 3.1845052e+00 3.0688421e+00 3.9670252e+00 3.2223969e+00 3.2005819e+00 3.3183883e+00 3.7835219e+00 3.7624114e+00 3.1706932e+00 2.9238797e+00 3.3563322e+00 3.2896970e+00 2.9943889e+00 3.6782511e+00 2.8525048e+00 2.8501434e+00 3.4560405e+00 4.0524463e+00 2.6189855e+00 3.3240937e+00 3.2080454e+00 3.0726532e+00 3.1844624e+00 3.3537486e+00 3.0707196e+00 2.6200714e+00 2.8136838e+00 3.0798325e+00 2.9434145e+00 2.9219863e+00 4.3385668e+00 3.8211670e+00 4.5879449e+00 3.8900790e+00 4.2758494e+00 5.1630878e+00 3.6606996e+00 4.7359279e+00 4.6113949e+00 4.8204842e+00 3.7540483e+00 4.1248784e+00 4.2705921e+00 4.1081181e+00 4.1285849e+00 4.0208302e+00 3.8718630e+00 5.1706118e+00 5.7653526e+00 4.3529699e+00 4.4302351e+00 3.6643052e+00 5.3499285e+00 3.8863166e+00 4.1025634e+00 4.3705834e+00 3.6895783e+00 3.4655259e+00 4.2576017e+00 4.3078329e+00 4.8848930e+00 5.1059908e+00 4.3355275e+00 3.6287723e+00 3.9015858e+00 5.2167033e+00 4.0809175e+00 3.7394250e+00 3.3885059e+00 4.2346521e+00 4.4182506e+00 4.3085803e+00 3.8211670e+00 4.4331391e+00 4.4402220e+00 4.2825037e+00 4.2456731e+00 3.9239772e+00 3.8761056e+00 3.4480528e+00 1.5196051e+00 1.2824303e+00 2.6220224e+00 1.9839744e+00 5.4248468e-01 1.3880441e+00 2.2398377e+00 2.5185753e+00 6.5993495e-01 1.2140269e+00 2.2670334e-01 1.0140902e+00 4.4901474e-01 4.6310132e-01 1.1825559e+00 5.9738093e-01 1.2799022e+00 1.4364110e+00 1.3915110e+00 2.1479410e+00 1.2515236e+00 9.9544409e-01 1.2188859e+00 1.8419620e+00 2.0002725e+00 1.1587093e+00 6.6217390e-01 7.6869104e-01 1.9839744e+00 1.7287715e+00 9.9282597e-01 1.9839744e+00 2.4262873e+00 1.2419907e+00 1.0737552e+00 3.8557204e+00 2.0456732e+00 1.0753036e+00 4.4417668e-01 2.2089621e+00 5.0629646e-01 1.9071648e+00 5.5576380e-01 1.4985933e+00 3.3087308e+00 2.9428879e+00 3.4716469e+00 4.0891690e+00 3.6027276e+00 3.2367228e+00 2.9085289e+00 3.7111791e+00 3.3928277e+00 3.3150290e+00 4.5928107e+00 2.9594198e+00 4.2678298e+00 3.2621413e+00 2.8156205e+00 3.1507918e+00 2.9937665e+00 3.2188610e+00 4.5717893e+00 3.5888229e+00 3.0697068e+00 3.2000980e+00 4.1198780e+00 3.3377308e+00 3.2155231e+00 3.2352945e+00 3.7547729e+00 3.6294383e+00 3.2310889e+00 3.2831808e+00 3.7736317e+00 3.7263104e+00 3.2475076e+00 3.7907966e+00 2.9840079e+00 2.6164035e+00 3.2920107e+00 4.3046992e+00 2.7582086e+00 3.6782988e+00 3.5276458e+00 3.0726914e+00 3.4670753e+00 3.9103065e+00 3.3340819e+00 2.7470239e+00 2.9746673e+00 3.1328218e+00 3.4555378e+00 3.1318122e+00 4.0752096e+00 3.9330937e+00 4.3762388e+00 3.8407425e+00 4.1274362e+00 4.9032073e+00 4.0261574e+00 4.5547766e+00 4.6534818e+00 4.3582690e+00 3.5326627e+00 4.1405270e+00 4.0947877e+00 4.2953569e+00 4.1533819e+00 3.7981559e+00 3.7518931e+00 4.6218430e+00 5.6203182e+00 4.6338556e+00 4.1503558e+00 3.7655153e+00 5.1552615e+00 3.9363994e+00 3.8053980e+00 4.0787299e+00 3.7177375e+00 3.4172455e+00 4.2121483e+00 4.1116484e+00 4.7277367e+00 4.5452377e+00 4.2828893e+00 3.6617103e+00 4.0381240e+00 4.9437127e+00 3.7768622e+00 3.5869495e+00 3.3594066e+00 4.0056296e+00 4.1979142e+00 4.0739556e+00 3.9330937e+00 4.1628496e+00 4.1341118e+00 4.1117268e+00 4.3552101e+00 3.7951858e+00 3.5859295e+00 3.4280549e+00 5.0370871e-01 1.1854382e+00 8.2574748e-01 1.1968529e+00 2.9724335e-01 9.8896933e-01 1.0391769e+00 2.0112023e+00 2.6653431e+00 1.5111262e+00 6.4636269e-01 1.6262201e+00 1.1040164e+00 9.8966705e-01 9.2934901e-01 5.3040146e-01 7.1789533e-01 3.9472619e-01 1.0556536e+00 5.1318506e-01 7.7368489e-01 7.3633268e-01 5.0731024e-01 7.5303835e-01 9.7659801e-01 1.7897583e+00 2.1453760e+00 8.2574748e-01 6.9001472e-01 1.1202045e+00 8.2574748e-01 9.6424206e-01 6.2046469e-01 5.3827772e-01 2.5404386e+00 5.3988754e-01 6.7372733e-01 1.1459117e+00 9.5361455e-01 1.1160907e+00 4.7951153e-01 1.1016806e+00 5.5109043e-01 3.7667766e+00 3.2399456e+00 3.8211099e+00 3.3920086e+00 3.5974024e+00 2.9126202e+00 3.2661365e+00 2.7296888e+00 3.4884812e+00 2.6863536e+00 3.5939578e+00 2.8820992e+00 3.6783922e+00 3.1916608e+00 2.4616677e+00 3.4465420e+00 2.8051321e+00 2.8088029e+00 4.1173049e+00 2.9788024e+00 3.2021702e+00 3.0140681e+00 3.8639979e+00 3.1756883e+00 3.2362495e+00 3.4136564e+00 3.8424217e+00 3.8484723e+00 3.1221021e+00 2.7251978e+00 3.0739866e+00 3.0068046e+00 2.8468838e+00 3.5726594e+00 2.7099355e+00 2.9743925e+00 3.5889631e+00 3.9062347e+00 2.5235038e+00 3.0623697e+00 2.9777787e+00 3.0820765e+00 3.0110500e+00 2.9445347e+00 2.8809757e+00 2.5543631e+00 2.7073441e+00 3.0792230e+00 2.5679169e+00 2.7817508e+00 4.4017095e+00 3.6741422e+00 4.6939918e+00 3.8825162e+00 4.3049794e+00 5.3127345e+00 3.3002804e+00 4.8514877e+00 4.5630927e+00 5.0475012e+00 3.8518880e+00 4.0758615e+00 4.3442223e+00 3.8984747e+00 3.9980344e+00 4.0855291e+00 3.9215612e+00 5.4851910e+00 5.8312870e+00 4.1416475e+00 4.5535489e+00 3.5028870e+00 5.4694510e+00 3.8234999e+00 4.2411026e+00 4.5531893e+00 3.6365882e+00 3.4540568e+00 4.2272070e+00 4.4531017e+00 4.9839411e+00 5.4520873e+00 4.3016933e+00 3.6054769e+00 3.7985950e+00 5.3693255e+00 4.1776636e+00 3.8035129e+00 3.3594554e+00 4.3469553e+00 4.4886880e+00 4.4112274e+00 3.6741422e+00 4.5435532e+00 4.5534752e+00 4.3346048e+00 4.1329859e+00 3.9643709e+00 3.9674740e+00 3.4024060e+00 1.3630799e+00 7.1446962e-01 8.4383266e-01 2.4808718e-01 9.6424206e-01 1.2783641e+00 1.6962086e+00 2.4702874e+00 1.2824303e+00 2.9691107e-01 1.2631980e+00 9.3957399e-01 4.9617437e-01 7.4965096e-01 7.2553812e-01 4.8492463e-01 3.3125444e-01 9.2426065e-01 2.6812643e-01 3.3395426e-01 2.4808718e-01 5.8926015e-01 7.3502408e-01 5.4956349e-01 1.6376300e+00 1.9421609e+00 7.1446962e-01 4.9160020e-01 6.5622658e-01 7.1446962e-01 1.1785203e+00 1.2076330e-01 2.8845946e-01 2.6135503e+00 8.6638670e-01 5.7543116e-01 9.9282597e-01 9.6424206e-01 9.3211669e-01 6.7030885e-01 7.8100392e-01 2.3749211e-01 3.4362741e+00 2.9772187e+00 3.5154539e+00 3.2993605e+00 3.3443673e+00 2.7564382e+00 3.0285957e+00 2.7337208e+00 3.1980682e+00 2.6433553e+00 3.5789723e+00 2.6973512e+00 3.4963203e+00 2.9759207e+00 2.3127671e+00 3.1412273e+00 2.6774449e+00 2.6095931e+00 3.9436181e+00 2.8415480e+00 3.0475523e+00 2.7865107e+00 3.6589666e+00 2.9471549e+00 2.9637920e+00 3.1238401e+00 3.5511257e+00 3.5866237e+00 2.9292978e+00 2.5500042e+00 2.9620296e+00 2.8874183e+00 2.6658729e+00 3.4048426e+00 2.6224103e+00 2.7775195e+00 3.2991484e+00 3.6986035e+00 2.3717154e+00 2.9594198e+00 2.8613259e+00 2.8592001e+00 2.8393888e+00 2.9284199e+00 2.7478281e+00 2.3715604e+00 2.5423572e+00 2.8329679e+00 2.5370255e+00 2.6226761e+00 4.2550363e+00 3.5587552e+00 4.4384175e+00 3.6863990e+00 4.1157774e+00 5.0262130e+00 3.3282378e+00 4.5651798e+00 4.3425674e+00 4.8115590e+00 3.6359466e+00 3.8813909e+00 4.1126592e+00 3.8106374e+00 3.9142555e+00 3.9091502e+00 3.6969354e+00 5.1996380e+00 5.5654572e+00 3.9942942e+00 4.3261607e+00 3.4228883e+00 5.1764012e+00 3.6303894e+00 4.0165247e+00 4.2627936e+00 3.4508614e+00 3.2748167e+00 4.0461381e+00 4.1489951e+00 4.6983104e+00 5.1372583e+00 4.1280577e+00 3.3829235e+00 3.6112201e+00 5.0844328e+00 4.0217556e+00 3.5877667e+00 3.1942059e+00 4.1021303e+00 4.2899049e+00 4.1807095e+00 3.5587552e+00 4.3276502e+00 4.3608503e+00 4.1273623e+00 3.9585431e+00 3.7540483e+00 3.8154519e+00 3.2543470e+00 7.7313507e-01 2.2058495e+00 1.2553676e+00 5.5109043e-01 3.3742167e-01 3.0507869e+00 3.8084614e+00 2.6171176e+00 1.6268459e+00 2.6113513e+00 2.2457527e+00 1.6784057e+00 2.0471148e+00 1.6480463e+00 1.3225313e+00 1.2819528e+00 7.6880092e-01 1.3915110e+00 1.6886167e+00 1.5043671e+00 7.8918675e-01 6.7745878e-01 1.6911617e+00 2.9348176e+00 3.2792996e+00 7.7313507e-01 1.0082548e+00 1.9190366e+00 7.7313507e-01 2.3749211e-01 1.4309353e+00 1.5685186e+00 1.3963590e+00 6.9420840e-01 1.6514950e+00 2.2742046e+00 5.5109043e-01 2.2440749e+00 7.3283576e-01 2.1421205e+00 1.1730565e+00 4.0382971e+00 3.5072516e+00 4.0204750e+00 2.8157524e+00 3.5602797e+00 2.7716933e+00 3.6026548e+00 1.9881684e+00 3.5249607e+00 2.3719578e+00 2.7108793e+00 2.9588139e+00 3.1000099e+00 3.1914275e+00 2.3942229e+00 3.6456797e+00 2.8533918e+00 2.5518058e+00 3.6496659e+00 2.5198146e+00 3.4451131e+00 2.9183683e+00 3.5989425e+00 3.0794356e+00 3.2576813e+00 3.5320163e+00 3.8261152e+00 3.9741896e+00 3.1180182e+00 2.3364082e+00 2.5170134e+00 2.4274466e+00 2.6068690e+00 3.4218333e+00 2.7386417e+00 3.3934324e+00 3.7851452e+00 3.4854109e+00 2.5636830e+00 2.6200486e+00 2.6174942e+00 3.1669559e+00 2.6880360e+00 2.1675629e+00 2.6284424e+00 2.5986852e+00 2.6571682e+00 3.0828753e+00 1.9438939e+00 2.6338308e+00 4.6899445e+00 3.5257224e+00 4.8360835e+00 3.9149039e+00 4.4239486e+00 5.4654338e+00 2.8575769e+00 4.9368840e+00 4.3795070e+00 5.4971381e+00 4.1073887e+00 3.9867349e+00 4.4778796e+00 3.6113294e+00 3.9521234e+00 4.3324165e+00 4.0348180e+00 6.0067497e+00 5.8007868e+00 3.6612067e+00 4.8067419e+00 3.4133837e+00 5.5245725e+00 3.7158962e+00 4.5501061e+00 4.8067452e+00 3.5898573e+00 3.5494058e+00 4.2132256e+00 4.5903044e+00 5.0235775e+00 5.9805492e+00 4.2919572e+00 3.5520531e+00 3.5821954e+00 5.5319518e+00 4.5355231e+00 3.9801247e+00 3.4489678e+00 4.5459210e+00 4.6801759e+00 4.6148655e+00 3.5257224e+00 4.7911899e+00 4.8567489e+00 4.4697071e+00 3.9039516e+00 4.0848538e+00 4.3320055e+00 3.4824517e+00 1.5154593e+00 7.1671402e-01 2.6643250e-01 7.9347379e-01 2.3528246e+00 3.1744385e+00 1.9839744e+00 9.9059199e-01 1.9029496e+00 1.6532822e+00 9.3451915e-01 1.4586696e+00 1.2483935e+00 7.4743804e-01 7.4957404e-01 2.9691107e-01 8.0686941e-01 9.9973471e-01 7.9394533e-01 3.6319073e-01 1.8699153e-01 9.9891776e-01 2.3345854e+00 2.6422396e+00 0.0000000e+00 3.3742167e-01 1.1857824e+00 0.0000000e+00 6.6918102e-01 7.4445830e-01 9.7322023e-01 1.9284841e+00 6.6918102e-01 1.1393372e+00 1.6942803e+00 3.7371902e-01 1.6386105e+00 4.5257749e-01 1.4715172e+00 4.9772204e-01 3.5602797e+00 3.0968979e+00 3.5933352e+00 2.8998722e+00 3.2656298e+00 2.6029746e+00 3.1974447e+00 2.2445103e+00 3.1601923e+00 2.3946216e+00 3.0209665e+00 2.6867317e+00 3.0775658e+00 2.9161244e+00 2.1946278e+00 3.2137635e+00 2.6502891e+00 2.3652711e+00 3.6096140e+00 2.4893065e+00 3.1578003e+00 2.6568473e+00 3.4426472e+00 2.8187901e+00 2.9128857e+00 3.1418202e+00 3.4847097e+00 3.6205958e+00 2.8694312e+00 2.2223283e+00 2.5588203e+00 2.4651138e+00 2.4413293e+00 3.2621861e+00 2.5834128e+00 2.9995857e+00 3.3733791e+00 3.3817876e+00 2.3263008e+00 2.6305758e+00 2.5756071e+00 2.8533918e+00 2.5683063e+00 2.4187322e+00 2.5258216e+00 2.3250308e+00 2.4413618e+00 2.7691536e+00 2.1006933e+00 2.4608850e+00 4.4119896e+00 3.4290419e+00 4.4942656e+00 3.6650933e+00 4.1590662e+00 5.0899438e+00 3.0333619e+00 4.5799469e+00 4.1882413e+00 5.0726081e+00 3.7613721e+00 3.7859992e+00 4.1623715e+00 3.6029758e+00 3.8608065e+00 4.0352348e+00 3.7266849e+00 5.5043247e+00 5.5172732e+00 3.6569442e+00 4.4568115e+00 3.3324016e+00 5.1765224e+00 3.5192065e+00 4.1799642e+00 4.3857400e+00 3.3769078e+00 3.2906843e+00 4.0034548e+00 4.1917183e+00 4.6854597e+00 5.4444866e+00 4.0904298e+00 3.2962678e+00 3.4250783e+00 5.1569386e+00 4.2213314e+00 3.6582637e+00 3.2059261e+00 4.1937040e+00 4.3826532e+00 4.2786320e+00 3.4290419e+00 4.4549850e+00 4.5270304e+00 4.1816268e+00 3.7777251e+00 3.7924144e+00 4.0173731e+00 3.2613828e+00 1.0034646e+00 1.7753099e+00 2.1070188e+00 8.6079202e-01 1.6751898e+00 5.4248468e-01 6.0365341e-01 4.6310132e-01 4.4901474e-01 7.0111465e-01 4.4713936e-01 1.0328871e+00 1.0722301e+00 1.0271920e+00 1.6860841e+00 8.8540687e-01 5.2066928e-01 7.3502408e-01 1.4309353e+00 1.5630357e+00 7.3985997e-01 9.6257499e-01 1.1608422e+00 1.5154593e+00 1.2617482e+00 4.9009568e-01 1.5154593e+00 2.0192952e+00 7.8100392e-01 6.9001472e-01 3.4112480e+00 1.6736143e+00 8.5090098e-01 5.5183182e-01 1.7753099e+00 4.3319335e-01 1.5054343e+00 1.2076330e-01 1.0428797e+00 3.2901237e+00 2.9292978e+00 3.4367371e+00 3.8111737e+00 3.4729880e+00 3.0672734e+00 2.9473505e+00 3.3901879e+00 3.2662947e+00 3.1144880e+00 4.2413542e+00 2.8660589e+00 3.9495965e+00 3.1433256e+00 2.6375433e+00 3.0908568e+00 2.9081458e+00 2.9702968e+00 4.3236374e+00 3.3103937e+00 3.0964304e+00 3.0179755e+00 3.9313682e+00 3.1669001e+00 3.0754584e+00 3.1432906e+00 3.6245464e+00 3.5873526e+00 3.1179878e+00 2.9918256e+00 3.4776059e+00 3.4142800e+00 3.0198617e+00 3.6568155e+00 2.8980988e+00 2.6944324e+00 3.2512254e+00 4.0479095e+00 2.6293791e+00 3.4291613e+00 3.2968205e+00 2.9799791e+00 3.2239661e+00 3.5774656e+00 3.1299499e+00 2.6069579e+00 2.8203994e+00 2.9888842e+00 3.1470877e+00 2.9476507e+00 4.1975965e+00 3.8311128e+00 4.3861256e+00 3.7921747e+00 4.1446583e+00 4.9211801e+00 3.8442384e+00 4.5238497e+00 4.5231439e+00 4.5451210e+00 3.5805575e+00 4.0469570e+00 4.0990882e+00 4.1579560e+00 4.1249171e+00 3.8727781e+00 3.7290616e+00 4.8292468e+00 5.5757877e+00 4.3965263e+00 4.2248397e+00 3.6936498e+00 5.1256160e+00 3.8221804e+00 3.8961870e+00 4.1176453e+00 3.6242665e+00 3.3807801e+00 4.1671042e+00 4.0787299e+00 4.6798442e+00 4.7374542e+00 4.2466909e+00 3.5432140e+00 3.8759165e+00 4.9689016e+00 3.9204411e+00 3.5927937e+00 3.3203721e+00 4.0348754e+00 4.2531600e+00 4.1147391e+00 3.8311128e+00 4.2401451e+00 4.2502727e+00 4.1279956e+00 4.2116129e+00 3.7856899e+00 3.7242025e+00 3.3954918e+00 9.3865015e-01 1.1459117e+00 1.8505741e+00 2.5636327e+00 1.3972701e+00 4.6310132e-01 1.4327294e+00 1.0013399e+00 7.2679299e-01 8.2574748e-01 6.2187934e-01 5.8496636e-01 1.7002750e-01 9.5361455e-01 3.5639126e-01 5.3827772e-01 4.9617437e-01 4.7680727e-01 6.9189100e-01 7.7259801e-01 1.6911681e+00 2.0326426e+00 7.1671402e-01 5.6788283e-01 8.9258315e-01 7.1671402e-01 1.0551281e+00 3.6669623e-01 3.9699460e-01 2.5716465e+00 6.8921053e-01 6.2148529e-01 1.0391769e+00 9.3865015e-01 9.9111027e-01 5.3286499e-01 9.2006504e-01 3.5266705e-01 3.5819899e+00 3.0902007e+00 3.6482741e+00 3.3284223e+00 3.4528558e+00 2.8062636e+00 3.1283731e+00 2.7130802e+00 3.3201500e+00 2.6478976e+00 3.5696084e+00 2.7728704e+00 3.5649049e+00 3.0583917e+00 2.3718219e+00 3.2763163e+00 2.7180029e+00 2.6779045e+00 4.0150919e+00 2.8864805e+00 3.1083977e+00 2.8822332e+00 3.7402558e+00 3.0304088e+00 3.0793541e+00 3.2506893e+00 3.6756049e+00 3.7003058e+00 3.0054875e+00 2.6160903e+00 2.9965118e+00 2.9238797e+00 2.7351276e+00 3.4650507e+00 2.6418165e+00 2.8577585e+00 3.4252075e+00 3.7832878e+00 2.4227175e+00 2.9917855e+00 2.8903467e+00 2.9460322e+00 2.9034030e+00 2.9191699e+00 2.7908173e+00 2.4332562e+00 2.6000033e+00 2.9338362e+00 2.5416562e+00 2.6797931e+00 4.3169600e+00 3.6002350e+00 4.5501061e+00 3.7611228e+00 4.1952298e+00 5.1491201e+00 3.2996439e+00 4.6835622e+00 4.4311275e+00 4.9194005e+00 3.7316761e+00 3.9622355e+00 4.2152785e+00 3.8426553e+00 3.9520057e+00 3.9894324e+00 3.7877346e+00 5.3234168e+00 5.6801405e+00 4.0465336e+00 4.4289683e+00 3.4509848e+00 5.3007226e+00 3.7123045e+00 4.1128937e+00 4.3848871e+00 3.5295907e+00 3.3480190e+00 4.1214168e+00 4.2758432e+00 4.8208082e+00 5.2754050e+00 4.2018689e+00 3.4688327e+00 3.6716137e+00 5.2146461e+00 4.0903576e+00 3.6733277e+00 3.2612647e+00 4.2126998e+00 4.3809966e+00 4.2914999e+00 3.6002350e+00 4.4223824e+00 4.4497684e+00 4.2250047e+00 4.0330034e+00 3.8460049e+00 3.8818416e+00 3.3084835e+00 6.2729876e-01 2.6091054e+00 3.4299177e+00 2.2340939e+00 1.2368073e+00 2.1640372e+00 1.8992968e+00 1.1925354e+00 1.7015647e+00 1.4288989e+00 9.5582164e-01 9.7391954e-01 2.9724335e-01 1.0376697e+00 1.2583645e+00 1.0495503e+00 5.0731024e-01 2.8845946e-01 1.2384679e+00 2.5831347e+00 2.8967543e+00 2.6643250e-01 5.4873947e-01 1.4369223e+00 2.6643250e-01 5.0370871e-01 1.0013399e+00 1.2082987e+00 1.6761482e+00 6.8299624e-01 1.3528452e+00 1.9417181e+00 2.6206799e-01 1.8882412e+00 5.3690447e-01 1.7295385e+00 7.4445830e-01 3.6974389e+00 3.2246529e+00 3.7127916e+00 2.8221999e+00 3.3289662e+00 2.6369282e+00 3.3348648e+00 2.1165503e+00 3.2465431e+00 2.3709411e+00 2.8608899e+00 2.7655496e+00 3.0110500e+00 2.9862341e+00 2.2391292e+00 3.3332541e+00 2.7176350e+00 2.3810734e+00 3.5657791e+00 2.4469788e+00 3.2638546e+00 2.7057900e+00 3.4512743e+00 2.8728052e+00 2.9934131e+00 3.2431147e+00 3.5582624e+00 3.7179545e+00 2.9335017e+00 2.1999209e+00 2.4893065e+00 2.3915367e+00 2.4540260e+00 3.2923304e+00 2.6414379e+00 3.1466196e+00 3.4901671e+00 3.3542627e+00 2.3973915e+00 2.5861640e+00 2.5561973e+00 2.9420425e+00 2.5609364e+00 2.2836399e+00 2.5303888e+00 2.4035745e+00 2.4950488e+00 2.8435005e+00 2.0000785e+00 2.4916202e+00 4.5218181e+00 3.4492861e+00 4.5921002e+00 3.7366932e+00 4.2432727e+00 5.1949299e+00 2.9709788e+00 4.6735988e+00 4.2160797e+00 5.2249989e+00 3.8759828e+00 3.8289542e+00 4.2545385e+00 3.5878027e+00 3.8924868e+00 4.1403048e+00 3.8179103e+00 5.6801405e+00 5.5805905e+00 3.6100547e+00 4.5709635e+00 3.3584734e+00 5.2629823e+00 3.5576748e+00 4.3070506e+00 4.5135384e+00 3.4273212e+00 3.3707040e+00 4.0596064e+00 4.2990937e+00 4.7676077e+00 5.6256469e+00 4.1454035e+00 3.3551224e+00 3.4472672e+00 5.2603070e+00 4.3452859e+00 3.7614312e+00 3.2825924e+00 4.3002370e+00 4.4796258e+00 4.3809022e+00 3.4492861e+00 4.5673965e+00 4.6446301e+00 4.2677070e+00 3.7864370e+00 3.8796124e+00 4.1423594e+00 3.3352922e+00 2.9361142e+00 3.6728262e+00 2.4980859e+00 1.5364600e+00 2.5390784e+00 2.1113072e+00 1.6578570e+00 1.9353585e+00 1.4375287e+00 1.3425463e+00 1.1993279e+00 9.0115406e-01 1.3418210e+00 1.6061161e+00 1.4416694e+00 7.3727571e-01 7.1781501e-01 1.6797637e+00 2.7692440e+00 3.1313820e+00 7.9347379e-01 9.7352372e-01 1.8600648e+00 7.9347379e-01 2.1119253e-01 1.3612390e+00 1.4580439e+00 1.6571634e+00 5.0731024e-01 1.5980974e+00 2.1674065e+00 6.7984069e-01 2.1059482e+00 6.2457556e-01 2.0330443e+00 1.1132823e+00 4.2319020e+00 3.7044235e+00 4.2326669e+00 3.1432906e+00 3.8172627e+00 3.0425144e+00 3.7866079e+00 2.3206274e+00 3.7650177e+00 2.6608343e+00 3.0454230e+00 3.1914925e+00 3.4221447e+00 3.4413652e+00 2.6453561e+00 3.8539838e+00 3.0892079e+00 2.8357998e+00 3.9683086e+00 2.8336784e+00 3.6477040e+00 3.1799040e+00 3.8944724e+00 3.3434129e+00 3.4994776e+00 3.7569353e+00 4.0775935e+00 4.2049294e+00 3.3684490e+00 2.6363662e+00 2.8414019e+00 2.7526519e+00 2.8906655e+00 3.7008234e+00 2.9737492e+00 3.5555916e+00 3.9977110e+00 3.7960948e+00 2.7978670e+00 2.9332077e+00 2.9200513e+00 3.4002561e+00 2.9851921e+00 2.5032729e+00 2.9159414e+00 2.8324645e+00 2.9104902e+00 3.3286096e+00 2.2670901e+00 2.9042354e+00 4.8902416e+00 3.8029663e+00 5.0697571e+00 4.1657422e+00 4.6611282e+00 5.6979337e+00 3.1565039e+00 5.1794156e+00 4.6677357e+00 5.6656906e+00 4.3138249e+00 4.2590423e+00 4.7118516e+00 3.9079657e+00 4.2090892e+00 4.5409993e+00 4.2707579e+00 6.1569683e+00 6.0684263e+00 3.9837013e+00 5.0178221e+00 3.6761530e+00 5.7743610e+00 3.9890690e+00 4.7480354e+00 5.0151961e+00 3.8518880e+00 3.7849163e+00 4.4740109e+00 4.8191103e+00 5.2745801e+00 6.1258486e+00 4.5520039e+00 3.8145791e+00 3.8707244e+00 5.7618968e+00 4.7193263e+00 4.2030300e+00 3.6843246e+00 4.7664506e+00 4.9031551e+00 4.8333734e+00 3.8029663e+00 5.0038635e+00 5.0562579e+00 4.7021393e+00 4.1966653e+00 4.3193180e+00 4.5127918e+00 3.7195417e+00 9.8143688e-01 5.9868400e-01 1.4402716e+00 5.6992880e-01 9.8663349e-01 1.4928150e+00 1.1361809e+00 1.7216149e+00 1.8856736e+00 1.8789959e+00 2.5107352e+00 1.7228721e+00 1.3724737e+00 1.5661153e+00 2.2847787e+00 2.4120925e+00 1.4985933e+00 7.9011741e-01 5.9738093e-01 2.3528246e+00 2.0826771e+00 1.2100516e+00 2.3528246e+00 2.8601865e+00 1.6304499e+00 1.5111262e+00 4.2257604e+00 2.5031609e+00 1.6091095e+00 1.0739839e+00 2.6091054e+00 9.8932340e-01 2.3488496e+00 9.3392552e-01 1.8848176e+00 3.4513181e+00 3.2138675e+00 3.6568023e+00 4.4832075e+00 3.8715598e+00 3.6399979e+00 3.2048569e+00 4.1609431e+00 3.6276988e+00 3.7852753e+00 5.0007603e+00 3.3356061e+00 4.5726588e+00 3.6020324e+00 3.2283316e+00 3.3543125e+00 3.4317803e+00 3.5762357e+00 4.8853618e+00 3.9697083e+00 3.4608105e+00 3.5194529e+00 4.4307529e+00 3.6664068e+00 3.4821665e+00 3.4661369e+00 3.9690303e+00 3.8732280e+00 3.5908374e+00 3.6384054e+00 4.1605480e+00 4.1054173e+00 3.6121763e+00 4.1591449e+00 3.4571840e+00 2.9726287e+00 3.5108834e+00 4.5938444e+00 3.1869025e+00 4.0859475e+00 3.9449709e+00 3.4111584e+00 3.8289196e+00 4.3382952e+00 3.7437946e+00 3.1530215e+00 3.3792174e+00 3.4400302e+00 3.8876642e+00 3.5288768e+00 4.4107365e+00 4.3401559e+00 4.5910423e+00 4.1731099e+00 4.4383007e+00 5.0605479e+00 4.5288382e+00 4.7400085e+00 4.9337127e+00 4.5282137e+00 3.8167418e+00 4.4582410e+00 4.3491396e+00 4.7099690e+00 4.5667632e+00 4.1110524e+00 4.0457882e+00 4.6970439e+00 5.8050200e+00 4.9831785e+00 4.3869525e+00 4.2045486e+00 5.3107398e+00 4.2598936e+00 4.0608563e+00 4.2495756e+00 4.0560071e+00 3.7740189e+00 4.5388860e+00 4.2824837e+00 4.9058468e+00 4.5708763e+00 4.6120616e+00 3.9763551e+00 4.3872260e+00 5.0860672e+00 4.0998055e+00 3.8946369e+00 3.7330702e+00 4.2352833e+00 4.4742220e+00 4.3047259e+00 4.3401559e+00 4.4192906e+00 4.4017154e+00 4.3831142e+00 4.6832544e+00 4.0909816e+00 3.9225445e+00 3.8229302e+00 1.2140269e+00 2.2031378e+00 1.3945864e+00 1.5674943e+00 2.3519816e+00 1.7695601e+00 2.3060361e+00 2.6440626e+00 2.5730128e+00 3.3484034e+00 2.4570006e+00 2.1775427e+00 2.3990667e+00 3.0314484e+00 3.2003667e+00 2.3345854e+00 9.9891776e-01 5.8565201e-01 3.1744385e+00 2.9106021e+00 2.1090950e+00 3.1744385e+00 3.6015962e+00 2.4316107e+00 2.2478972e+00 5.0583620e+00 3.1946200e+00 2.2571919e+00 1.5783735e+00 3.4098353e+00 1.5848534e+00 3.0815481e+00 1.7053877e+00 2.6874763e+00 3.8897688e+00 3.6527400e+00 4.1085323e+00 5.1878354e+00 4.4401043e+00 4.2306533e+00 3.5668974e+00 4.8855250e+00 4.1984227e+00 4.3936084e+00 5.7667342e+00 3.8604223e+00 5.3386416e+00 4.1481805e+00 3.8457422e+00 3.8556395e+00 3.9253518e+00 4.2633467e+00 5.5746944e+00 4.6805795e+00 3.8185179e+00 4.1531225e+00 5.0514886e+00 4.2706189e+00 4.0732692e+00 4.0031242e+00 4.5383244e+00 4.3266944e+00 4.1312924e+00 4.3745457e+00 4.8862079e+00 4.8484299e+00 4.2820168e+00 4.7051757e+00 3.9401848e+00 3.2884177e+00 3.9767257e+00 5.2985037e+00 3.7419338e+00 4.7600849e+00 4.5926355e+00 3.9322406e+00 4.5116211e+00 5.0823619e+00 4.3725296e+00 3.7236864e+00 3.9623539e+00 4.0300759e+00 4.6141946e+00 4.1447443e+00 4.5866744e+00 4.8386745e+00 4.9461657e+00 4.6106150e+00 4.7813164e+00 5.3858108e+00 5.0919277e+00 5.1446449e+00 5.4739987e+00 4.5885042e+00 4.1414025e+00 4.9586480e+00 4.7213864e+00 5.2471037e+00 4.9661074e+00 4.3830009e+00 4.4568388e+00 4.6901031e+00 6.2154767e+00 5.6469307e+00 4.6501660e+00 4.6626232e+00 5.7036296e+00 4.7932843e+00 4.3038060e+00 4.5618709e+00 4.5655950e+00 4.2115551e+00 4.9692114e+00 4.7030193e+00 5.3377504e+00 4.5914343e+00 5.0293150e+00 4.5146706e+00 4.9581881e+00 5.4087027e+00 4.2557771e+00 4.2671438e+00 4.1737149e+00 4.5756985e+00 4.7660397e+00 4.6314702e+00 4.8386745e+00 4.6734470e+00 4.5970177e+00 4.7412505e+00 5.2464126e+00 4.4890534e+00 4.0948317e+00 4.2440420e+00 1.0013399e+00 5.0299964e-01 4.6310132e-01 1.2040900e+00 5.9738093e-01 1.2286837e+00 1.4541908e+00 1.4279677e+00 2.1539145e+00 1.2617482e+00 9.9544409e-01 1.2082987e+00 1.8489243e+00 2.0066856e+00 1.1587093e+00 6.6217390e-01 7.5179033e-01 1.9839744e+00 1.7063292e+00 9.6659661e-01 1.9839744e+00 2.4156729e+00 1.2419907e+00 1.0495503e+00 3.8490499e+00 2.0330726e+00 1.0871867e+00 5.4779717e-01 2.2031378e+00 5.3106808e-01 1.9004159e+00 5.5576380e-01 1.4899949e+00 3.4307448e+00 3.0710756e+00 3.5952799e+00 4.1669813e+00 3.7116384e+00 3.3536981e+00 3.0466130e+00 3.7729829e+00 3.5082606e+00 3.4067800e+00 4.6484249e+00 3.0744089e+00 4.3424419e+00 3.3858347e+00 2.9098729e+00 3.2669110e+00 3.1198644e+00 3.3210229e+00 4.6553382e+00 3.6737423e+00 3.2048569e+00 3.2989479e+00 4.2245828e+00 3.4587219e+00 3.3255241e+00 3.3484846e+00 3.8660480e+00 3.7512964e+00 3.3482610e+00 3.3605387e+00 3.8511468e+00 3.8014113e+00 3.3411134e+00 3.9109127e+00 3.1105014e+00 2.7597977e+00 3.4146222e+00 4.3904048e+00 2.8767763e+00 3.7646132e+00 3.6317356e+00 3.1996946e+00 3.5585167e+00 3.9690108e+00 3.4365573e+00 2.8705339e+00 3.0890888e+00 3.2456270e+00 3.5108688e+00 3.2367228e+00 4.2147014e+00 4.0489906e+00 4.5035700e+00 3.9755362e+00 4.2591912e+00 5.0350768e+00 4.1207838e+00 4.6882252e+00 4.7707308e+00 4.4918348e+00 3.6612573e+00 4.2568131e+00 4.2184327e+00 4.3988058e+00 4.2632946e+00 3.9245996e+00 3.8864624e+00 4.7642090e+00 5.7424408e+00 4.7299069e+00 4.2784034e+00 3.8797952e+00 5.2832732e+00 4.0458553e+00 3.9446593e+00 4.2181053e+00 3.8300888e+00 3.5427774e+00 4.3354099e+00 4.2438935e+00 4.8511407e+00 4.6817036e+00 4.4041714e+00 3.7859242e+00 4.1665370e+00 5.0618540e+00 3.9138566e+00 3.7274783e+00 3.4833346e+00 4.1288327e+00 4.3215818e+00 4.1859543e+00 4.0489906e+00 4.2965095e+00 4.2626474e+00 4.2257654e+00 4.4572702e+00 3.9184477e+00 3.7230473e+00 3.5604297e+00 1.0161882e+00 6.9420840e-01 4.8012872e-01 4.8284931e-01 6.9738730e-01 5.5709100e-01 5.3095950e-01 1.1723315e+00 3.1271814e-01 1.8699153e-01 2.9145160e-01 8.6143605e-01 1.0061402e+00 4.5257749e-01 1.4146831e+00 1.6902182e+00 9.9059199e-01 7.2340544e-01 5.0370871e-01 9.9059199e-01 1.4369223e+00 2.7124234e-01 1.3340137e-01 2.8614050e+00 1.1016806e+00 4.2656951e-01 7.5906970e-01 1.2087236e+00 7.1325427e-01 9.2761923e-01 5.3988754e-01 4.9448466e-01 3.3643791e+00 2.9159414e+00 3.4617249e+00 3.4323687e+00 3.3505903e+00 2.8169505e+00 2.9517065e+00 2.9146662e+00 3.1941250e+00 2.7393282e+00 3.7736317e+00 2.6933089e+00 3.6308668e+00 2.9914579e+00 2.3560812e+00 3.0907905e+00 2.6932687e+00 2.7021788e+00 4.0389540e+00 2.9648098e+00 2.9980910e+00 2.8201257e+00 3.7183934e+00 2.9922398e+00 2.9661287e+00 3.0950932e+00 3.5513156e+00 3.5484439e+00 2.9420200e+00 2.6629529e+00 3.1013619e+00 3.0347859e+00 2.7417410e+00 3.4474072e+00 2.6495954e+00 2.6875764e+00 3.2488445e+00 3.7904353e+00 2.3985415e+00 3.0725852e+00 2.9704304e+00 2.8556850e+00 2.9300511e+00 3.1104511e+00 2.8291506e+00 2.4008046e+00 2.5836379e+00 2.8456807e+00 2.6907656e+00 2.6814159e+00 4.1737238e+00 3.5932878e+00 4.3853076e+00 3.6802688e+00 4.0749525e+00 4.9692376e+00 3.4394110e+00 4.5331007e+00 4.3742923e+00 4.6787296e+00 3.5632387e+00 3.8923023e+00 4.0628985e+00 3.8689922e+00 3.9102807e+00 3.8336686e+00 3.6675649e+00 5.0555526e+00 5.5454277e+00 4.0994961e+00 4.2439469e+00 3.4449831e+00 5.1429556e+00 3.6472400e+00 3.9304610e+00 4.1916938e+00 3.4565067e+00 3.2536517e+00 4.0373591e+00 4.1087274e+00 4.6703619e+00 4.9904781e+00 4.1152831e+00 3.4023949e+00 3.6756751e+00 5.0151763e+00 3.9231895e+00 3.5466262e+00 3.1760855e+00 4.0346845e+00 4.2216886e+00 4.1038501e+00 3.5932878e+00 4.2504107e+00 4.2656428e+00 4.0705667e+00 3.9971917e+00 3.7133040e+00 3.7182295e+00 3.2440381e+00 7.3339246e-01 9.9973471e-01 7.7988766e-01 1.4669572e+00 1.3868865e+00 1.4360884e+00 2.0344949e+00 1.2593779e+00 9.3451915e-01 1.1232628e+00 1.8421759e+00 1.9514085e+00 1.0061402e+00 9.6168382e-01 9.7747632e-01 1.9029496e+00 1.6513423e+00 7.7821113e-01 1.9029496e+00 2.4366790e+00 1.1857824e+00 1.1156669e+00 3.7575639e+00 2.1090460e+00 1.1623508e+00 7.4500632e-01 2.1481102e+00 7.3851064e-01 1.9301732e+00 5.6262711e-01 1.4458364e+00 3.0574507e+00 2.7604800e+00 3.2354442e+00 3.9296796e+00 3.3802783e+00 3.0910114e+00 2.7653977e+00 3.6086433e+00 3.1477156e+00 3.2299948e+00 4.4531271e+00 2.8182580e+00 4.0359059e+00 3.0838698e+00 2.6832031e+00 2.9127171e+00 2.8999239e+00 3.0235972e+00 4.3556993e+00 3.4142800e+00 2.9871882e+00 2.9947610e+00 3.9084388e+00 3.1359326e+00 2.9852014e+00 3.0007807e+00 3.4997296e+00 3.4243092e+00 3.0709062e+00 3.0889274e+00 3.6054231e+00 3.5510321e+00 3.0652992e+00 3.6307363e+00 2.9199707e+00 2.5302845e+00 3.0705222e+00 4.0683526e+00 2.6430958e+00 3.5303998e+00 3.3838093e+00 2.9011205e+00 3.2808511e+00 3.7876206e+00 3.1898591e+00 2.6081677e+00 2.8342553e+00 2.9259899e+00 3.3400468e+00 2.9809771e+00 4.0130133e+00 3.8156727e+00 4.1823301e+00 3.6854232e+00 3.9919300e+00 4.6844795e+00 3.9756960e+00 4.3245814e+00 4.4396397e+00 4.2453585e+00 3.3946374e+00 3.9634682e+00 3.9204865e+00 4.1772364e+00 4.0766155e+00 3.6959934e+00 3.5831715e+00 4.4790872e+00 5.3894840e+00 4.4405497e+00 4.0027890e+00 3.6857786e+00 4.9137586e+00 3.7567964e+00 3.6729588e+00 3.8728151e+00 3.5543966e+00 3.2848126e+00 4.0598486e+00 3.8712880e+00 4.4886485e+00 4.3722180e+00 4.1373490e+00 3.4683949e+00 3.8543469e+00 4.7248681e+00 3.7193644e+00 3.4383872e+00 3.2381387e+00 3.8297356e+00 4.0647650e+00 3.9099360e+00 3.8156727e+00 4.0266221e+00 4.0296168e+00 3.9579549e+00 4.1722551e+00 3.6379295e+00 3.5328511e+00 3.3224979e+00 1.0061402e+00 2.6525508e-01 8.2148003e-01 1.1879760e+00 1.0251165e+00 1.8544941e+00 9.4128180e-01 7.1446962e-01 9.4128180e-01 1.4726083e+00 1.6607117e+00 9.9973471e-01 7.4965096e-01 1.0510795e+00 1.6532822e+00 1.4055304e+00 8.6143605e-01 1.6532822e+00 2.0368618e+00 9.3178083e-01 7.1143905e-01 3.5363390e+00 1.6307900e+00 8.0686941e-01 2.6184788e-01 1.8811296e+00 1.4276574e-01 1.5165187e+00 3.5874135e-01 1.1682143e+00 3.5420892e+00 3.1213639e+00 3.6765721e+00 3.9907084e+00 3.7063187e+00 3.2329517e+00 3.1017380e+00 3.5164906e+00 3.5204595e+00 3.2215483e+00 4.4016409e+00 3.0251724e+00 4.2008255e+00 3.3367044e+00 2.7940225e+00 3.3344791e+00 3.0219495e+00 3.1880051e+00 4.5546425e+00 3.5075399e+00 3.1952269e+00 3.2406785e+00 4.1563139e+00 3.3848806e+00 3.3178898e+00 3.3859281e+00 3.8870730e+00 3.7997125e+00 3.2944053e+00 3.2110142e+00 3.6683444e+00 3.6131226e+00 3.2236002e+00 3.8317071e+00 2.9830921e+00 2.7973167e+00 3.4787185e+00 4.2995699e+00 2.7671612e+00 3.5982071e+00 3.4619260e+00 3.1665431e+00 3.4310890e+00 3.7235030e+00 3.2953414e+00 2.7679629e+00 2.9819595e+00 3.2106653e+00 3.2879746e+00 3.1196883e+00 4.2713770e+00 3.9634682e+00 4.5846990e+00 3.9587038e+00 4.2895460e+00 5.1416821e+00 3.9119997e+00 4.7572039e+00 4.7460626e+00 4.6638344e+00 3.7280454e+00 4.2349181e+00 4.2803486e+00 4.2908242e+00 4.2152718e+00 3.9857018e+00 3.9070797e+00 4.9741957e+00 5.8097018e+00 4.6049285e+00 4.3788345e+00 3.7893243e+00 5.3689314e+00 4.0140467e+00 4.0363955e+00 4.3259832e+00 3.8006490e+00 3.5269016e+00 4.3297076e+00 4.3216441e+00 4.9220177e+00 4.9100057e+00 4.4024600e+00 3.7489348e+00 4.0736926e+00 5.1891895e+00 3.9903212e+00 3.7514870e+00 3.4564041e+00 4.2166575e+00 4.3944655e+00 4.2851348e+00 3.9634682e+00 4.3836343e+00 4.3634474e+00 4.2898748e+00 4.4067690e+00 3.9524852e+00 3.7906894e+00 3.5162082e+00 8.3156200e-01 1.1417173e+00 5.8221430e-01 7.3339246e-01 1.0428797e+00 5.5247822e-01 3.5266705e-01 2.9537172e-01 9.6466498e-01 1.0034646e+00 2.8553149e-01 1.6415483e+00 1.8567917e+00 9.3451915e-01 7.2553812e-01 3.4520795e-01 9.3451915e-01 1.5364952e+00 3.7960845e-01 5.9589853e-01 2.7723424e+00 1.3124532e+00 7.5130648e-01 1.0314203e+00 1.1925354e+00 9.9272943e-01 1.0839891e+00 7.1143905e-01 5.6164055e-01 3.0511653e+00 2.6629268e+00 3.1545235e+00 3.1980310e+00 3.0467396e+00 2.5772061e+00 2.7369167e+00 2.7576827e+00 2.8651003e+00 2.5868532e+00 3.5774656e+00 2.4748633e+00 3.3139897e+00 2.7217211e+00 2.1506369e+00 2.7852281e+00 2.5158340e+00 2.4059801e+00 3.7433691e+00 2.7041074e+00 2.8389661e+00 2.5310559e+00 3.4176981e+00 2.6902386e+00 2.6527549e+00 2.7866168e+00 3.2144386e+00 3.2675625e+00 2.6971865e+00 2.3822357e+00 2.8532332e+00 2.7780124e+00 2.4721123e+00 3.1952919e+00 2.5042136e+00 2.5315299e+00 2.9556760e+00 3.4693709e+00 2.1993495e+00 2.8460127e+00 2.7344861e+00 2.5960616e+00 2.6558880e+00 2.9309653e+00 2.5980406e+00 2.1695355e+00 2.3550298e+00 2.5518802e+00 2.5245372e+00 2.4441489e+00 4.0319503e+00 3.3933783e+00 4.1146478e+00 3.4339804e+00 3.8578841e+00 4.6712206e+00 3.3248410e+00 4.2174528e+00 4.0704050e+00 4.4988341e+00 3.3546526e+00 3.6317702e+00 3.8139412e+00 3.6743372e+00 3.7645284e+00 3.6614225e+00 3.4131399e+00 4.8439861e+00 5.2322637e+00 3.8189229e+00 4.0256031e+00 3.2902097e+00 4.8190347e+00 3.3870859e+00 3.7223145e+00 3.9080259e+00 3.2141230e+00 3.0414452e+00 3.8022687e+00 3.7869665e+00 4.3507688e+00 4.7565318e+00 3.8893282e+00 3.1162635e+00 3.3877625e+00 4.7282676e+00 3.7917280e+00 3.3122397e+00 2.9763122e+00 3.7889092e+00 4.0173731e+00 3.8788191e+00 3.3933783e+00 4.0384828e+00 4.0914753e+00 3.8500022e+00 3.7349483e+00 3.4804621e+00 3.5920363e+00 3.0535851e+00 7.5284003e-01 9.3865015e-01 8.5442446e-01 1.6409761e+00 7.0463400e-01 5.4408162e-01 7.5179033e-01 1.2786676e+00 1.4553344e+00 7.8100392e-01 1.0100290e+00 1.2786676e+00 1.4586696e+00 1.2008045e+00 7.2638147e-01 1.4586696e+00 1.8445759e+00 7.3985997e-01 5.0731024e-01 3.3136601e+00 1.4580439e+00 5.4702555e-01 3.2339566e-01 1.6607117e+00 3.5366952e-01 1.3290015e+00 3.5639126e-01 9.6825676e-01 3.4059851e+00 2.9602214e+00 3.5257340e+00 3.7492673e+00 3.5115913e+00 3.0191277e+00 2.9517483e+00 3.2720571e+00 3.3411331e+00 2.9834222e+00 4.1580731e+00 2.8211532e+00 3.9717534e+00 3.1414634e+00 2.5643903e+00 3.1728076e+00 2.8176969e+00 2.9703822e+00 4.3244606e+00 3.2734582e+00 3.0210021e+00 3.0274003e+00 3.9433841e+00 3.1866772e+00 3.1269680e+00 3.2103093e+00 3.7065013e+00 3.6299273e+00 3.0909484e+00 2.9772513e+00 3.4297321e+00 3.3756936e+00 2.9971174e+00 3.6243253e+00 2.7759818e+00 2.6501700e+00 3.3189004e+00 4.0764633e+00 2.5559925e+00 3.3602243e+00 3.2356862e+00 2.9780147e+00 3.2026716e+00 3.4783251e+00 3.0685582e+00 2.5635668e+00 2.7679629e+00 3.0129566e+00 3.0370166e+00 2.8975180e+00 4.1264564e+00 3.7496421e+00 4.4295218e+00 3.7774558e+00 4.1191095e+00 5.0038078e+00 3.6756489e+00 4.6074750e+00 4.5494422e+00 4.5665604e+00 3.5702411e+00 4.0355009e+00 4.1137066e+00 4.0638414e+00 4.0067360e+00 3.8250619e+00 3.7375780e+00 4.9153359e+00 5.6444336e+00 4.3773916e+00 4.2331397e+00 3.5751580e+00 5.2199809e+00 3.8075775e+00 3.9003626e+00 4.1989415e+00 3.5967191e+00 3.3381510e+00 4.1394208e+00 4.1772604e+00 4.7627066e+00 4.8574416e+00 4.2113835e+00 3.5565415e+00 3.8744064e+00 5.0458107e+00 3.8530980e+00 3.5894641e+00 3.2635788e+00 4.0605147e+00 4.2327162e+00 4.1232607e+00 3.7496421e+00 4.2381044e+00 4.2216886e+00 4.1152818e+00 4.1901775e+00 3.7759339e+00 3.6506667e+00 3.3268510e+00 1.0748172e+00 7.2889003e-01 1.5046031e+00 7.9398919e-01 8.1148630e-01 8.8835337e-01 9.9058911e-01 1.2262672e+00 1.1380274e+00 1.3972288e+00 1.7741287e+00 1.2483935e+00 1.0474897e+00 1.1240042e+00 1.2483935e+00 1.4149548e+00 8.1096210e-01 5.7672351e-01 3.0082939e+00 9.6865373e-01 8.2273123e-01 9.5195566e-01 1.4288989e+00 8.3246212e-01 9.4996842e-01 9.2092295e-01 8.7375509e-01 4.0151215e+00 3.5231786e+00 4.1026785e+00 3.8908802e+00 3.9665585e+00 3.3438394e+00 3.5293285e+00 3.2540384e+00 3.8314935e+00 3.1634347e+00 4.1178317e+00 3.2512254e+00 4.1561446e+00 3.5717750e+00 2.8833448e+00 3.7345570e+00 3.1952822e+00 3.2548783e+00 4.5820687e+00 3.4621656e+00 3.5141919e+00 3.4137993e+00 4.2939653e+00 3.5777026e+00 3.5926398e+00 3.7328374e+00 4.1920808e+00 4.1652091e+00 3.5073274e+00 3.1921998e+00 3.5706840e+00 3.5044583e+00 3.2904241e+00 3.9914615e+00 3.1120432e+00 3.2204607e+00 3.8807669e+00 4.3582892e+00 2.9219284e+00 3.5476488e+00 3.4540639e+00 3.4397115e+00 3.4680001e+00 3.4670753e+00 3.3367044e+00 2.9471549e+00 3.1205392e+00 3.4518638e+00 3.0783517e+00 3.2145380e+00 4.6697624e+00 4.0951447e+00 4.9940386e+00 4.2442248e+00 4.6312366e+00 5.5957028e+00 3.7901714e+00 5.1629764e+00 4.9662629e+00 5.2245876e+00 4.1326097e+00 4.4648550e+00 4.6557857e+00 4.3477764e+00 4.3833898e+00 4.3689111e+00 4.2520226e+00 5.6153369e+00 6.1715044e+00 4.6178873e+00 4.8201070e+00 3.9129644e+00 5.7808751e+00 4.2195149e+00 4.4949009e+00 4.8099428e+00 4.0213766e+00 3.8049151e+00 4.5961475e+00 4.7475868e+00 5.3061067e+00 5.5699347e+00 4.6684257e+00 3.9900166e+00 4.2272640e+00 5.6441645e+00 4.4197984e+00 4.1176453e+00 3.7157925e+00 4.6326704e+00 4.7820862e+00 4.6921349e+00 4.0951447e+00 4.8160040e+00 4.8050680e+00 4.6459078e+00 4.5554678e+00 4.2905569e+00 4.2115152e+00 3.7649212e+00 5.9314593e-01 8.0686941e-01 2.9691107e-01 6.2826980e-01 5.0121118e-01 6.6653737e-01 7.0834786e-01 4.6310132e-01 1.9251840e+00 2.1737519e+00 7.4743804e-01 5.5009731e-01 8.0748088e-01 7.4743804e-01 1.1828955e+00 4.6964680e-01 5.8942278e-01 2.4421558e+00 9.8677196e-01 4.9772204e-01 1.1660949e+00 8.4116354e-01 1.2196311e+00 7.7538587e-01 1.0376697e+00 4.4499696e-01 3.0983271e+00 2.5986852e+00 3.1534326e+00 2.8897192e+00 2.9336987e+00 2.3392252e+00 2.6586759e+00 2.3702978e+00 2.8165027e+00 2.2079130e+00 3.2363154e+00 2.2664200e+00 3.1218253e+00 2.5673178e+00 1.8638939e+00 2.7710337e+00 2.2535803e+00 2.2156046e+00 3.5264693e+00 2.4375344e+00 2.6410495e+00 2.3635223e+00 3.2419868e+00 2.5535068e+00 2.5663186e+00 2.7372400e+00 3.1657714e+00 3.1910277e+00 2.5035271e+00 2.1450705e+00 2.5644558e+00 2.5011730e+00 2.2417546e+00 2.9810976e+00 2.2012005e+00 2.4146140e+00 2.9247451e+00 3.2953953e+00 1.9474034e+00 2.5368532e+00 2.4541089e+00 2.4554575e+00 2.4210504e+00 2.5661600e+00 2.3207574e+00 1.9628165e+00 2.1171985e+00 2.4261018e+00 2.1366217e+00 2.1917681e+00 3.8609963e+00 3.1157672e+00 4.0464741e+00 3.2769657e+00 3.7011972e+00 4.6584806e+00 2.9074576e+00 4.1962146e+00 3.9292452e+00 4.4717831e+00 3.2385313e+00 3.4507621e+00 3.7050324e+00 3.3601278e+00 3.4577371e+00 3.4991206e+00 3.2980589e+00 4.9174048e+00 5.1685262e+00 3.5822256e+00 3.9345678e+00 2.9743611e+00 4.8043725e+00 3.1946631e+00 3.6425792e+00 3.9147944e+00 3.0137993e+00 2.8509730e+00 3.6160190e+00 3.7930649e+00 4.3160863e+00 4.8705552e+00 3.6935350e+00 2.9765851e+00 3.2157655e+00 4.7030966e+00 3.6383061e+00 3.1964791e+00 2.7656084e+00 3.7055141e+00 3.8768834e+00 3.7701724e+00 3.1157672e+00 3.9366463e+00 3.9674740e+00 3.7027144e+00 3.5167570e+00 3.3369517e+00 3.4319544e+00 2.8332022e+00 9.6865373e-01 3.9487224e-01 5.8131330e-01 5.6003943e-01 5.0621589e-01 7.1247632e-01 8.0317491e-01 1.7053539e+00 2.0491684e+00 7.4957404e-01 6.5459290e-01 9.3991103e-01 7.4957404e-01 1.0954558e+00 4.2737382e-01 4.9430028e-01 2.5884539e+00 7.4949264e-01 6.4432393e-01 1.0251728e+00 9.7391954e-01 1.0055888e+00 5.9279023e-01 9.4588685e-01 4.3798311e-01 3.5017283e+00 3.0032209e+00 3.5640998e+00 3.2626300e+00 3.3723783e+00 2.7101865e+00 3.0361435e+00 2.6574564e+00 3.2363742e+00 2.5684615e+00 3.5220489e+00 2.6863775e+00 3.5035562e+00 2.9639854e+00 2.2954282e+00 3.1974234e+00 2.6186896e+00 2.5919605e+00 3.9485388e+00 2.8137879e+00 3.0123600e+00 2.8059985e+00 3.6581986e+00 2.9351026e+00 2.9984934e+00 3.1711590e+00 3.5947528e+00 3.6146776e+00 2.9159819e+00 2.5508141e+00 2.9298445e+00 2.8588898e+00 2.6582994e+00 3.3705985e+00 2.5395254e+00 2.7634723e+00 3.3411818e+00 3.7151762e+00 2.3273691e+00 2.9184140e+00 2.8006021e+00 2.8512853e+00 2.8277392e+00 2.8675465e+00 2.7048983e+00 2.3342128e+00 2.5075548e+00 2.8488482e+00 2.4938133e+00 2.5939117e+00 4.2210242e+00 3.5094230e+00 4.4613495e+00 3.6611526e+00 4.1011462e+00 5.0575391e+00 3.2183295e+00 4.5889909e+00 4.3421582e+00 4.8334387e+00 3.6441411e+00 3.8749352e+00 4.1286607e+00 3.7602700e+00 3.8694583e+00 3.9027404e+00 3.6910974e+00 5.2330448e+00 5.5920875e+00 3.9683831e+00 4.3421746e+00 3.3618744e+00 5.2099570e+00 3.6296154e+00 4.0192804e+00 4.2904705e+00 3.4453138e+00 3.2560920e+00 4.0303932e+00 4.1835729e+00 4.7330562e+00 5.1897695e+00 4.1126264e+00 3.3744864e+00 3.5691373e+00 5.1336306e+00 3.9986271e+00 3.5735980e+00 3.1698618e+00 4.1283627e+00 4.2954773e+00 4.2156054e+00 3.5094230e+00 4.3310092e+00 4.3633885e+00 4.1455698e+00 3.9545856e+00 3.7585686e+00 3.7901495e+00 3.2094268e+00 9.5902306e-01 1.1795364e+00 9.6032771e-01 5.8652824e-01 3.3395426e-01 1.0753036e+00 2.5524007e+00 2.8349345e+00 2.9691107e-01 5.1396090e-01 1.3127309e+00 2.9691107e-01 7.4426155e-01 9.3211669e-01 1.1729612e+00 1.7369516e+00 8.7560645e-01 1.2666796e+00 1.8751947e+00 2.9724335e-01 1.8489906e+00 6.7745878e-01 1.6555341e+00 7.0111465e-01 3.4067014e+00 2.9452188e+00 3.4231096e+00 2.6265336e+00 3.0474186e+00 2.3887954e+00 3.0652160e+00 1.9891259e+00 2.9589070e+00 2.1699637e+00 2.7527251e+00 2.5008826e+00 2.7949298e+00 2.7160947e+00 1.9851008e+00 3.0428102e+00 2.4755098e+00 2.1256864e+00 3.3327734e+00 2.2236827e+00 3.0131023e+00 2.4300526e+00 3.1928299e+00 2.6040863e+00 2.7075499e+00 2.9536823e+00 3.2710263e+00 3.4338296e+00 2.6673396e+00 1.9555334e+00 2.2858019e+00 2.1897213e+00 2.1973377e+00 3.0392888e+00 2.4158794e+00 2.8941568e+00 3.2025759e+00 3.1091590e+00 2.1471303e+00 2.3710990e+00 2.3347284e+00 2.6698387e+00 2.3133518e+00 2.1525596e+00 2.2918773e+00 2.1454625e+00 2.2398142e+00 2.5636830e+00 1.8343082e+00 2.2388656e+00 4.2714137e+00 3.2107728e+00 4.3091817e+00 3.4717121e+00 3.9768763e+00 4.9078859e+00 2.8122927e+00 4.3885241e+00 3.9504682e+00 4.9558940e+00 3.6044479e+00 3.5632387e+00 3.9760735e+00 3.3646188e+00 3.6594046e+00 3.8782141e+00 3.5443654e+00 5.4091146e+00 5.2988184e+00 3.3878489e+00 4.2952367e+00 3.1303129e+00 4.9761618e+00 3.2919446e+00 4.0362588e+00 4.2291285e+00 3.1618923e+00 3.1077588e+00 3.7959134e+00 4.0112445e+00 4.4810404e+00 5.3509795e+00 3.8831153e+00 3.0844802e+00 3.1980603e+00 4.9707249e+00 4.0945548e+00 3.4918172e+00 3.0237585e+00 4.0192804e+00 4.2092252e+00 4.1017980e+00 3.2107728e+00 4.2952415e+00 4.3790330e+00 3.9936934e+00 3.5312555e+00 3.6065699e+00 3.8937621e+00 3.0840989e+00 4.2827238e-01 3.7398306e-01 6.4241342e-01 7.7828522e-01 4.8636669e-01 1.6800011e+00 1.9622194e+00 8.0686941e-01 5.7691891e-01 7.1789533e-01 8.0686941e-01 1.2139401e+00 2.9406726e-01 3.1507080e-01 2.6166211e+00 9.1398375e-01 3.4909881e-01 9.4580058e-01 9.6922609e-01 9.6659661e-01 7.2638147e-01 8.2574748e-01 3.6704030e-01 3.2939549e+00 2.8018134e+00 3.3643791e+00 3.1688464e+00 3.1882120e+00 2.5926123e+00 2.8420400e+00 2.6229843e+00 3.0569434e+00 2.4659441e+00 3.4932809e+00 2.5062529e+00 3.4038365e+00 2.8103848e+00 2.1284730e+00 2.9880998e+00 2.4809350e+00 2.4830222e+00 3.8129321e+00 2.7155086e+00 2.8370039e+00 2.6306749e+00 3.5140670e+00 2.8045035e+00 2.8143558e+00 2.9698163e+00 3.4126571e+00 3.4177063e+00 2.7508387e+00 2.4282690e+00 2.8424700e+00 2.7781836e+00 2.5174968e+00 3.2360555e+00 2.4214382e+00 2.5753180e+00 3.1397228e+00 3.5790751e+00 2.1850441e+00 2.8131786e+00 2.7177153e+00 2.6876771e+00 2.6993734e+00 2.8253248e+00 2.5871836e+00 2.1990767e+00 2.3678133e+00 2.6762366e+00 2.4070540e+00 2.4551607e+00 4.0383784e+00 3.3671653e+00 4.2642537e+00 3.5070157e+00 3.9193977e+00 4.8684861e+00 3.1505769e+00 4.4165051e+00 4.1898287e+00 4.6203725e+00 3.4386758e+00 3.7047820e+00 3.9273366e+00 3.6237778e+00 3.6947343e+00 3.6968840e+00 3.5190023e+00 5.0398879e+00 5.4089760e+00 3.8611646e+00 4.1322470e+00 3.2145601e+00 5.0295849e+00 3.4546083e+00 3.8248698e+00 4.1055247e+00 3.2664139e+00 3.0788010e+00 3.8567883e+00 4.0060320e+00 4.5478501e+00 4.9912214e+00 3.9339246e+00 3.2236553e+00 3.4677443e+00 4.9178815e+00 3.8042163e+00 3.4041321e+00 2.9939885e+00 3.9171296e+00 4.0866646e+00 3.9845548e+00 3.3671653e+00 4.1322520e+00 4.1520423e+00 3.9277271e+00 3.7880802e+00 3.5624203e+00 3.5967687e+00 3.0549169e+00 2.3749211e-01 9.2006504e-01 1.0428797e+00 4.2450569e-01 1.3899721e+00 1.6555341e+00 9.9973471e-01 7.5230154e-01 3.7960845e-01 9.9973471e-01 1.5086315e+00 2.6033464e-01 2.9724335e-01 2.8989712e+00 1.1937015e+00 5.7988427e-01 7.8318003e-01 1.2583645e+00 7.0463400e-01 1.0034646e+00 4.7680727e-01 5.2374483e-01 3.3114294e+00 2.8933417e+00 3.4177063e+00 3.4461307e+00 3.3255941e+00 2.8176969e+00 2.9380167e+00 2.9507452e+00 3.1524130e+00 2.7797208e+00 3.7960371e+00 2.6995806e+00 3.6095704e+00 2.9762135e+00 2.3753547e+00 3.0506196e+00 2.7121484e+00 2.6831858e+00 4.0299127e+00 2.9653560e+00 3.0144396e+00 2.8058449e+00 3.7011663e+00 2.9654419e+00 2.9344166e+00 3.0597490e+00 3.5085997e+00 3.5226725e+00 2.9395346e+00 2.6564538e+00 3.1076159e+00 3.0365838e+00 2.7379532e+00 3.4446760e+00 2.6796909e+00 2.6912430e+00 3.2129972e+00 3.7686900e+00 2.4108238e+00 3.0879511e+00 2.9762529e+00 2.8408428e+00 2.9254092e+00 3.1396427e+00 2.8384396e+00 2.3985415e+00 2.5881776e+00 2.8229620e+00 2.7289403e+00 2.6869860e+00 4.1910480e+00 3.6130663e+00 4.3602249e+00 3.6707779e+00 4.0745117e+00 4.9277939e+00 3.4934873e+00 4.4880495e+00 4.3514534e+00 4.6654573e+00 3.5594056e+00 3.8864758e+00 4.0498126e+00 3.8963526e+00 3.9502563e+00 3.8456535e+00 3.6509387e+00 5.0146974e+00 5.5101576e+00 4.0937915e+00 4.2345704e+00 3.4807992e+00 5.0960662e+00 3.6438388e+00 3.9190152e+00 4.1487738e+00 3.4580678e+00 3.2588014e+00 4.0378648e+00 4.0580582e+00 4.6285945e+00 4.9381887e+00 4.1199488e+00 3.3816601e+00 3.6553790e+00 4.9813108e+00 3.9405186e+00 3.5335600e+00 3.1869497e+00 4.0186781e+00 4.2240093e+00 4.0988575e+00 3.6130663e+00 4.2429720e+00 4.2712088e+00 4.0719125e+00 3.9975817e+00 3.7087600e+00 3.7375363e+00 3.2561951e+00 7.6824760e-01 8.5141186e-01 3.6086962e-01 1.6207126e+00 1.8802756e+00 7.9394533e-01 5.3286499e-01 4.3319335e-01 7.9394533e-01 1.3370188e+00 1.3340137e-01 3.6319073e-01 2.6778759e+00 1.0720705e+00 6.3173774e-01 1.0072799e+00 1.0495503e+00 9.3727156e-01 8.5893964e-01 7.0463400e-01 3.3395426e-01 3.3027871e+00 2.8812178e+00 3.3955887e+00 3.2888081e+00 3.2512254e+00 2.7283479e+00 2.9463809e+00 2.7764635e+00 3.0911130e+00 2.6620270e+00 3.6054231e+00 2.6430457e+00 3.4442793e+00 2.9123088e+00 2.2793286e+00 3.0205073e+00 2.6595069e+00 2.5635668e+00 3.8866925e+00 2.8178210e+00 3.0060120e+00 2.7101865e+00 3.5930006e+00 2.8829084e+00 2.8651003e+00 3.0121201e+00 3.4400598e+00 3.4869142e+00 2.8725792e+00 2.5068325e+00 2.9480926e+00 2.8720011e+00 2.6183827e+00 3.3619078e+00 2.6263990e+00 2.7176350e+00 3.1874455e+00 3.6289342e+00 2.3459758e+00 2.9476507e+00 2.8536577e+00 2.7917808e+00 2.7959976e+00 2.9585178e+00 2.7268209e+00 2.3347284e+00 2.5080346e+00 2.7508387e+00 2.5565749e+00 2.5881776e+00 4.2068537e+00 3.5342439e+00 4.3380559e+00 3.6271373e+00 4.0499864e+00 4.9127682e+00 3.3748745e+00 4.4574738e+00 4.2666131e+00 4.7143178e+00 3.5549829e+00 3.8149934e+00 4.0227420e+00 3.7946027e+00 3.8919837e+00 3.8432323e+00 3.6208873e+00 5.0849580e+00 5.4596452e+00 3.9569474e+00 4.2354064e+00 3.4126422e+00 5.0611947e+00 3.5638945e+00 3.9334644e+00 4.1519487e+00 3.3885059e+00 3.2191166e+00 3.9849073e+00 4.0334328e+00 4.5859724e+00 5.0075986e+00 4.0680600e+00 3.3134027e+00 3.5670952e+00 4.9632121e+00 3.9675062e+00 3.5176551e+00 3.1453377e+00 4.0038982e+00 4.2114761e+00 4.0820077e+00 3.5342439e+00 4.2453199e+00 4.2844704e+00 4.0426067e+00 3.8984747e+00 3.6765607e+00 3.7642725e+00 3.2184749e+00 2.6033464e-01 9.9962901e-01 2.1664244e+00 2.5030472e+00 3.6319073e-01 4.2737382e-01 1.2004100e+00 3.6319073e-01 6.1067563e-01 6.7030885e-01 8.0996690e-01 2.1006743e+00 4.0020411e-01 9.4057729e-01 1.4985933e+00 5.0731024e-01 1.4656715e+00 1.6562722e-01 1.3630799e+00 4.4417668e-01 3.6433721e+00 3.1333439e+00 3.6757970e+00 3.0281244e+00 3.3717708e+00 2.6624050e+00 3.1998148e+00 2.3430115e+00 3.2729115e+00 2.4219924e+00 3.1700354e+00 2.7177110e+00 3.2760900e+00 2.9826961e+00 2.2410752e+00 3.2981044e+00 2.6452183e+00 2.4901533e+00 3.7687554e+00 2.6225184e+00 3.1280668e+00 2.7635526e+00 3.5692464e+00 2.9177616e+00 3.0187150e+00 3.2354747e+00 3.6116754e+00 3.6909005e+00 2.9234404e+00 2.3731183e+00 2.6987010e+00 2.6178190e+00 2.5515904e+00 3.3308561e+00 2.5554841e+00 2.9570491e+00 3.4460544e+00 3.5549612e+00 2.3407691e+00 2.7326628e+00 2.6614903e+00 2.9042354e+00 2.6919656e+00 2.5430017e+00 2.6000033e+00 2.3578684e+00 2.4871797e+00 2.8599439e+00 2.2045434e+00 2.5287499e+00 4.3690091e+00 3.4628576e+00 4.5552847e+00 3.7077077e+00 4.1799642e+00 5.1678263e+00 3.0379779e+00 4.6720960e+00 4.3013447e+00 5.0550361e+00 3.7713495e+00 3.8605708e+00 4.2104897e+00 3.6525334e+00 3.8549710e+00 4.0229434e+00 3.7708197e+00 5.5011831e+00 5.6245097e+00 3.7945557e+00 4.4755001e+00 3.3306776e+00 5.2815051e+00 3.5995462e+00 4.1814664e+00 4.4417427e+00 3.4376057e+00 3.3113002e+00 4.0501276e+00 4.2847585e+00 4.7905454e+00 5.4600808e+00 4.1319681e+00 3.3795104e+00 3.5192584e+00 5.2359042e+00 4.1708373e+00 3.6809073e+00 3.2190305e+00 4.2365575e+00 4.3973146e+00 4.3149219e+00 3.4628576e+00 4.4657183e+00 4.5132257e+00 4.2167698e+00 3.8749352e+00 3.8293474e+00 3.9628758e+00 3.2623926e+00 1.0371214e+00 2.3606689e+00 2.6764689e+00 1.8699153e-01 4.0363332e-01 1.2627589e+00 1.8699153e-01 5.6164055e-01 7.8305765e-01 9.7747632e-01 1.8924893e+00 5.6164055e-01 1.0881632e+00 1.6837963e+00 2.8845946e-01 1.6545637e+00 3.5266705e-01 1.5108472e+00 5.3286499e-01 3.5596461e+00 3.0642731e+00 3.5820652e+00 2.8366432e+00 3.2382208e+00 2.5375138e+00 3.1537738e+00 2.1559444e+00 3.1474432e+00 2.2926143e+00 2.9585178e+00 2.6250629e+00 3.0590120e+00 2.8699760e+00 2.1233327e+00 3.2024265e+00 2.5670381e+00 2.3272067e+00 3.5735096e+00 2.4368431e+00 3.0826231e+00 2.6212838e+00 3.4052824e+00 2.7833861e+00 2.8923043e+00 3.1255601e+00 3.4747564e+00 3.5908785e+00 2.8135312e+00 2.1839196e+00 2.5032729e+00 2.4158569e+00 2.3928312e+00 3.2017644e+00 2.4862299e+00 2.9403226e+00 3.3545989e+00 3.3587820e+00 2.2520446e+00 2.5607059e+00 2.5059290e+00 2.8073565e+00 2.5209771e+00 2.3430115e+00 2.4562939e+00 2.2633781e+00 2.3755041e+00 2.7368591e+00 2.0165442e+00 2.3969046e+00 4.3354025e+00 3.3475977e+00 4.4615703e+00 3.6095772e+00 4.0990362e+00 5.0710534e+00 2.9144612e+00 4.5627576e+00 4.1522674e+00 5.0316497e+00 3.7102339e+00 3.7341705e+00 4.1195531e+00 3.5174471e+00 3.7659275e+00 3.9693828e+00 3.6809073e+00 5.4858042e+00 5.4945039e+00 3.6088006e+00 4.4109170e+00 3.2362256e+00 5.1634795e+00 3.4678413e+00 4.1322470e+00 4.3666536e+00 3.3199204e+00 3.2266664e+00 3.9433408e+00 4.1815045e+00 4.6694810e+00 5.4392260e+00 4.0273519e+00 3.2552513e+00 3.3773249e+00 5.1375752e+00 4.1484621e+00 3.6075786e+00 3.1365574e+00 4.1554935e+00 4.3260165e+00 4.2353581e+00 3.3475977e+00 4.4043042e+00 4.4676627e+00 4.1295047e+00 3.7244531e+00 3.7408419e+00 3.9430201e+00 3.1856251e+00 1.6790448e+00 1.8683303e+00 9.9891776e-01 7.3735391e-01 3.8639663e-01 9.9891776e-01 1.5462701e+00 4.4713936e-01 5.6262711e-01 2.7653818e+00 1.3238834e+00 5.9868400e-01 1.0167074e+00 1.1817121e+00 1.0267382e+00 1.1036371e+00 7.4965096e-01 5.9868400e-01 2.9920629e+00 2.5767485e+00 3.0904477e+00 3.1383073e+00 2.9738737e+00 2.5155127e+00 2.6450302e+00 2.7097016e+00 2.8120208e+00 2.4963677e+00 3.5442384e+00 2.3737852e+00 3.2878773e+00 2.6552959e+00 2.0482711e+00 2.7132601e+00 2.4244329e+00 2.3725931e+00 3.6825624e+00 2.6567419e+00 2.7277627e+00 2.4551607e+00 3.3586468e+00 2.6490703e+00 2.5878996e+00 2.7146857e+00 3.1604264e+00 3.1862677e+00 2.6121387e+00 2.3320406e+00 2.8060955e+00 2.7397840e+00 2.4059801e+00 3.1251810e+00 2.4123722e+00 2.4266062e+00 2.8827369e+00 3.4219145e+00 2.1146056e+00 2.7787333e+00 2.6868306e+00 2.5237903e+00 2.5969195e+00 2.8858666e+00 2.5292454e+00 2.1030528e+00 2.2789104e+00 2.4843930e+00 2.4502513e+00 2.3681813e+00 3.9129285e+00 3.2963379e+00 4.0307050e+00 3.3579713e+00 3.7573925e+00 4.6072225e+00 3.2350676e+00 4.1666050e+00 4.0096938e+00 4.3939440e+00 3.2458961e+00 3.5448948e+00 3.7163166e+00 3.5735211e+00 3.6303034e+00 3.5366397e+00 3.3347301e+00 4.7764597e+00 5.1656525e+00 3.7678733e+00 3.9190152e+00 3.1752055e+00 4.7655871e+00 3.2963860e+00 3.6257668e+00 3.8480918e+00 3.1163356e+00 2.9401017e+00 3.7060704e+00 3.7400429e+00 4.2905130e+00 4.6982735e+00 3.7862785e+00 3.0555922e+00 3.3519252e+00 4.6433941e+00 3.6672706e+00 3.2313825e+00 2.8704346e+00 3.6888814e+00 3.9001229e+00 3.7578379e+00 3.2963379e+00 3.9355102e+00 3.9693842e+00 3.7298088e+00 3.6452459e+00 3.3776637e+00 3.4666091e+00 2.9570067e+00 4.5257749e-01 2.3345854e+00 2.1006743e+00 1.4408765e+00 2.3345854e+00 2.7201861e+00 1.6242170e+00 1.4334280e+00 4.2461520e+00 2.2960397e+00 1.5510150e+00 8.3619405e-01 2.5963945e+00 7.1671402e-01 2.2031378e+00 9.3957399e-01 1.8662528e+00 3.9018608e+00 3.5587525e+00 4.0758181e+00 4.6738622e+00 4.2315488e+00 3.8362958e+00 3.5101695e+00 4.2349456e+00 4.0096351e+00 3.8957954e+00 5.1177048e+00 3.5857491e+00 4.8511271e+00 3.8770675e+00 3.4324591e+00 3.7687554e+00 3.5952204e+00 3.8095276e+00 5.1880951e+00 4.1733990e+00 3.6719420e+00 3.8275992e+00 4.7391892e+00 3.9417326e+00 3.8406076e+00 3.8597390e+00 4.3729123e+00 4.2482658e+00 3.8534412e+00 3.8740219e+00 4.3496532e+00 4.2951960e+00 3.8572117e+00 4.4028225e+00 3.5707989e+00 3.2084035e+00 3.9057558e+00 4.9165227e+00 3.3635180e+00 4.2694314e+00 4.1082916e+00 3.6886188e+00 4.0716087e+00 4.4411156e+00 3.9335446e+00 3.3496034e+00 3.5830335e+00 3.7561390e+00 4.0088699e+00 3.7413425e+00 4.6436290e+00 4.5471223e+00 4.9787007e+00 4.4481204e+00 4.7341193e+00 5.4826137e+00 4.5863292e+00 5.1433212e+00 5.2725182e+00 4.8836508e+00 4.1393671e+00 4.7672599e+00 4.7092337e+00 4.9106562e+00 4.7707455e+00 4.3996661e+00 4.3591552e+00 5.0844032e+00 6.2257171e+00 5.2378683e+00 4.7433741e+00 4.3742533e+00 5.7435198e+00 4.5678545e+00 4.3840310e+00 4.6485090e+00 4.3482964e+00 4.0364176e+00 4.8328894e+00 4.6980887e+00 5.3298852e+00 5.0020990e+00 4.9051796e+00 4.2757519e+00 4.6314634e+00 5.5369700e+00 4.3420582e+00 4.1857666e+00 3.9786340e+00 4.6138259e+00 4.8044656e+00 4.6911542e+00 4.5471223e+00 4.7508760e+00 4.7161034e+00 4.7355095e+00 4.9879154e+00 4.4154797e+00 4.1545903e+00 4.0343137e+00 2.6422396e+00 2.3867296e+00 1.6154069e+00 2.6422396e+00 3.0703842e+00 1.9080710e+00 1.7295385e+00 4.5475791e+00 2.6621202e+00 1.8051284e+00 1.1105716e+00 2.8967543e+00 1.0474897e+00 2.5495727e+00 1.1795364e+00 2.1617152e+00 3.8171826e+00 3.5339654e+00 4.0163479e+00 4.8425911e+00 4.2514283e+00 3.9557653e+00 3.4792404e+00 4.4740529e+00 4.0150475e+00 4.0717494e+00 5.3501548e+00 3.6486698e+00 4.9910943e+00 3.9350582e+00 3.5547141e+00 3.7282030e+00 3.6962934e+00 3.9420318e+00 5.2895497e+00 4.3341610e+00 3.6960949e+00 3.8986276e+00 4.8106103e+00 4.0206152e+00 3.8664495e+00 3.8454452e+00 4.3675713e+00 4.2173036e+00 3.9169317e+00 4.0237417e+00 4.5248905e+00 4.4756871e+00 3.9778974e+00 4.4827624e+00 3.6962934e+00 3.1970228e+00 3.8646917e+00 5.0103465e+00 3.4775294e+00 4.4295579e+00 4.2690345e+00 3.7344524e+00 4.1995700e+00 4.6716989e+00 4.0716455e+00 3.4573201e+00 3.6936958e+00 3.8056210e+00 4.2211876e+00 3.8604223e+00 4.5958210e+00 4.6323449e+00 4.9087471e+00 4.4703758e+00 4.7121623e+00 5.3828288e+00 4.7798674e+00 5.0815530e+00 5.2996504e+00 4.7231435e+00 4.0911974e+00 4.7955081e+00 4.6606898e+00 5.0156099e+00 4.8233009e+00 4.3540706e+00 4.3488974e+00 4.8785742e+00 6.1614901e+00 5.3577416e+00 4.6571075e+00 4.4651793e+00 5.6630236e+00 4.6077729e+00 4.3065165e+00 4.5525984e+00 4.3873290e+00 4.0638414e+00 4.8447047e+00 4.6322938e+00 5.2676175e+00 4.7796156e+00 4.9133278e+00 4.3194697e+00 4.7202167e+00 5.4208419e+00 4.2794874e+00 4.1728215e+00 4.0165591e+00 4.5422714e+00 4.7447407e+00 4.6112705e+00 4.6323449e+00 4.6754921e+00 4.6293227e+00 4.6871898e+00 5.0428586e+00 4.3953592e+00 4.1024574e+00 4.0848110e+00 3.3742167e-01 1.1857824e+00 0.0000000e+00 6.6918102e-01 7.4445830e-01 9.7322023e-01 1.9284841e+00 6.6918102e-01 1.1393372e+00 1.6942803e+00 3.7371902e-01 1.6386105e+00 4.5257749e-01 1.4715172e+00 4.9772204e-01 3.5602797e+00 3.0968979e+00 3.5933352e+00 2.8998722e+00 3.2656298e+00 2.6029746e+00 3.1974447e+00 2.2445103e+00 3.1601923e+00 2.3946216e+00 3.0209665e+00 2.6867317e+00 3.0775658e+00 2.9161244e+00 2.1946278e+00 3.2137635e+00 2.6502891e+00 2.3652711e+00 3.6096140e+00 2.4893065e+00 3.1578003e+00 2.6568473e+00 3.4426472e+00 2.8187901e+00 2.9128857e+00 3.1418202e+00 3.4847097e+00 3.6205958e+00 2.8694312e+00 2.2223283e+00 2.5588203e+00 2.4651138e+00 2.4413293e+00 3.2621861e+00 2.5834128e+00 2.9995857e+00 3.3733791e+00 3.3817876e+00 2.3263008e+00 2.6305758e+00 2.5756071e+00 2.8533918e+00 2.5683063e+00 2.4187322e+00 2.5258216e+00 2.3250308e+00 2.4413618e+00 2.7691536e+00 2.1006933e+00 2.4608850e+00 4.4119896e+00 3.4290419e+00 4.4942656e+00 3.6650933e+00 4.1590662e+00 5.0899438e+00 3.0333619e+00 4.5799469e+00 4.1882413e+00 5.0726081e+00 3.7613721e+00 3.7859992e+00 4.1623715e+00 3.6029758e+00 3.8608065e+00 4.0352348e+00 3.7266849e+00 5.5043247e+00 5.5172732e+00 3.6569442e+00 4.4568115e+00 3.3324016e+00 5.1765224e+00 3.5192065e+00 4.1799642e+00 4.3857400e+00 3.3769078e+00 3.2906843e+00 4.0034548e+00 4.1917183e+00 4.6854597e+00 5.4444866e+00 4.0904298e+00 3.2962678e+00 3.4250783e+00 5.1569386e+00 4.2213314e+00 3.6582637e+00 3.2059261e+00 4.1937040e+00 4.3826532e+00 4.2786320e+00 3.4290419e+00 4.4549850e+00 4.5270304e+00 4.1816268e+00 3.7777251e+00 3.7924144e+00 4.0173731e+00 3.2613828e+00 9.2006504e-01 3.3742167e-01 8.6080744e-01 5.0621589e-01 7.0646671e-01 2.1664244e+00 7.2679299e-01 8.9712099e-01 1.4681660e+00 5.4873947e-01 1.4074199e+00 4.9617437e-01 1.2206236e+00 2.5698045e-01 3.4986942e+00 3.0427234e+00 3.5520531e+00 3.0444864e+00 3.2783159e+00 2.6723101e+00 3.1333743e+00 2.4360210e+00 3.1627463e+00 2.4904253e+00 3.2338077e+00 2.6808015e+00 3.2240677e+00 2.9412073e+00 2.2206950e+00 3.1669559e+00 2.6716144e+00 2.4623998e+00 3.7173707e+00 2.6198784e+00 3.1208539e+00 2.6854361e+00 3.5171200e+00 2.8753360e+00 2.9157449e+00 3.1157529e+00 3.4945103e+00 3.5956983e+00 2.8873581e+00 2.3297122e+00 2.7075732e+00 2.6220688e+00 2.5143128e+00 3.3225169e+00 2.6164571e+00 2.9213819e+00 3.3323415e+00 3.4842326e+00 2.3487772e+00 2.7507828e+00 2.6991998e+00 2.8571158e+00 2.6614903e+00 2.6122501e+00 2.6121387e+00 2.3527202e+00 2.4822998e+00 2.7826627e+00 2.2477567e+00 2.5188545e+00 4.3590737e+00 3.4800724e+00 4.4652192e+00 3.6820633e+00 4.1423404e+00 5.0632361e+00 3.1594575e+00 4.5764429e+00 4.2442248e+00 4.9703970e+00 3.7054123e+00 3.8144340e+00 4.1322520e+00 3.6772717e+00 3.8704422e+00 3.9786899e+00 3.7187193e+00 5.3973273e+00 5.5276243e+00 3.7838435e+00 4.3978718e+00 3.3669786e+00 5.1732410e+00 3.5478651e+00 4.1195682e+00 4.3422158e+00 3.3925731e+00 3.2818178e+00 4.0157845e+00 4.1753467e+00 4.6824968e+00 5.3318394e+00 4.0983108e+00 3.3321312e+00 3.5171976e+00 5.1116177e+00 4.1480571e+00 3.6395566e+00 3.1983718e+00 4.1451783e+00 4.3355345e+00 4.2161052e+00 3.4800724e+00 4.4037157e+00 4.4559384e+00 4.1399085e+00 3.8303307e+00 3.7678377e+00 3.9434740e+00 3.2672961e+00 1.1857824e+00 1.7590894e+00 5.4715569e-01 6.1787077e-01 3.0224093e+00 1.4977817e+00 8.1744862e-01 9.4676850e-01 1.4369223e+00 8.6079202e-01 1.2896554e+00 5.3286499e-01 7.6195008e-01 3.1536923e+00 2.8019556e+00 3.2823965e+00 3.4754319e+00 3.2348803e+00 2.8339836e+00 2.8678686e+00 3.0569237e+00 3.0422163e+00 2.8599505e+00 3.8711727e+00 2.6769819e+00 3.5769114e+00 2.9369339e+00 2.3887701e+00 2.9172679e+00 2.7450499e+00 2.6744413e+00 3.9868196e+00 2.9825819e+00 3.0070640e+00 2.7478281e+00 3.6492544e+00 2.9260177e+00 2.8398296e+00 2.9417237e+00 3.3879693e+00 3.4191352e+00 2.9103957e+00 2.6495864e+00 3.1359829e+00 3.0635119e+00 2.7246726e+00 3.4310966e+00 2.7450499e+00 2.6593850e+00 3.0929063e+00 3.7090709e+00 2.4372581e+00 3.1206171e+00 3.0186562e+00 2.7973689e+00 2.9151879e+00 3.2261028e+00 2.8631702e+00 2.4096686e+00 2.5984928e+00 2.7564382e+00 2.8056103e+00 2.6945402e+00 4.1622883e+00 3.6243461e+00 4.2495237e+00 3.6308369e+00 4.0200378e+00 4.7940036e+00 3.6050689e+00 4.3664479e+00 4.2800934e+00 4.5553898e+00 3.4840330e+00 3.8323637e+00 3.9571437e+00 3.9163572e+00 3.9605759e+00 3.7909588e+00 3.5846709e+00 4.8756388e+00 5.3862972e+00 4.0807979e+00 4.1385728e+00 3.5138167e+00 4.9592897e+00 3.5910983e+00 3.8379532e+00 4.0230039e+00 3.4134019e+00 3.2269518e+00 3.9906410e+00 3.9261146e+00 4.4982182e+00 4.7746016e+00 4.0736767e+00 3.3286256e+00 3.6393910e+00 4.8333249e+00 3.9033387e+00 3.4776516e+00 3.1661860e+00 3.9124707e+00 4.1473618e+00 3.9899548e+00 3.6243461e+00 4.1607944e+00 4.1969547e+00 3.9859042e+00 3.9511939e+00 3.6382505e+00 3.7066628e+00 3.2552942e+00 6.6918102e-01 7.4445830e-01 9.7322023e-01 1.9284841e+00 6.6918102e-01 1.1393372e+00 1.6942803e+00 3.7371902e-01 1.6386105e+00 4.5257749e-01 1.4715172e+00 4.9772204e-01 3.5602797e+00 3.0968979e+00 3.5933352e+00 2.8998722e+00 3.2656298e+00 2.6029746e+00 3.1974447e+00 2.2445103e+00 3.1601923e+00 2.3946216e+00 3.0209665e+00 2.6867317e+00 3.0775658e+00 2.9161244e+00 2.1946278e+00 3.2137635e+00 2.6502891e+00 2.3652711e+00 3.6096140e+00 2.4893065e+00 3.1578003e+00 2.6568473e+00 3.4426472e+00 2.8187901e+00 2.9128857e+00 3.1418202e+00 3.4847097e+00 3.6205958e+00 2.8694312e+00 2.2223283e+00 2.5588203e+00 2.4651138e+00 2.4413293e+00 3.2621861e+00 2.5834128e+00 2.9995857e+00 3.3733791e+00 3.3817876e+00 2.3263008e+00 2.6305758e+00 2.5756071e+00 2.8533918e+00 2.5683063e+00 2.4187322e+00 2.5258216e+00 2.3250308e+00 2.4413618e+00 2.7691536e+00 2.1006933e+00 2.4608850e+00 4.4119896e+00 3.4290419e+00 4.4942656e+00 3.6650933e+00 4.1590662e+00 5.0899438e+00 3.0333619e+00 4.5799469e+00 4.1882413e+00 5.0726081e+00 3.7613721e+00 3.7859992e+00 4.1623715e+00 3.6029758e+00 3.8608065e+00 4.0352348e+00 3.7266849e+00 5.5043247e+00 5.5172732e+00 3.6569442e+00 4.4568115e+00 3.3324016e+00 5.1765224e+00 3.5192065e+00 4.1799642e+00 4.3857400e+00 3.3769078e+00 3.2906843e+00 4.0034548e+00 4.1917183e+00 4.6854597e+00 5.4444866e+00 4.0904298e+00 3.2962678e+00 3.4250783e+00 5.1569386e+00 4.2213314e+00 3.6582637e+00 3.2059261e+00 4.1937040e+00 4.3826532e+00 4.2786320e+00 3.4290419e+00 4.4549850e+00 4.5270304e+00 4.1816268e+00 3.7777251e+00 3.7924144e+00 4.0173731e+00 3.2613828e+00 1.2563834e+00 1.3681903e+00 1.6242170e+00 4.6126066e-01 1.4691503e+00 2.0743925e+00 5.0370871e-01 2.0365895e+00 5.2374483e-01 1.9494772e+00 1.0034646e+00 4.0320101e+00 3.4981749e+00 4.0289839e+00 2.9648238e+00 3.6116412e+00 2.8362334e+00 3.5807825e+00 2.1594400e+00 3.5619276e+00 2.4608850e+00 2.9150653e+00 2.9806848e+00 3.2524084e+00 3.2332049e+00 2.4351674e+00 3.6506644e+00 2.8794131e+00 2.6371069e+00 3.7842148e+00 2.6442387e+00 3.4386758e+00 2.9743384e+00 3.6958304e+00 3.1396988e+00 3.2947222e+00 3.5521670e+00 3.8756118e+00 3.9969338e+00 3.1587328e+00 2.4432066e+00 2.6604221e+00 2.5745994e+00 2.6880360e+00 3.4951118e+00 2.7657429e+00 3.3524671e+00 3.7924883e+00 3.6104716e+00 2.5876530e+00 2.7410968e+00 2.7238850e+00 3.1914275e+00 2.7871336e+00 2.3484202e+00 2.7125180e+00 2.6235600e+00 2.7012637e+00 3.1219910e+00 2.0888843e+00 2.6969064e+00 4.6820911e+00 3.5968849e+00 4.8607426e+00 3.9563487e+00 4.4501699e+00 5.4913616e+00 2.9743611e+00 4.9743359e+00 4.4659463e+00 5.4618868e+00 4.1043393e+00 4.0513907e+00 4.5016468e+00 3.7087600e+00 4.0024694e+00 4.3310092e+00 4.0611789e+00 5.9599083e+00 5.8632763e+00 3.7995759e+00 4.8081465e+00 3.4697005e+00 5.5699347e+00 3.7817852e+00 4.5398887e+00 4.8101536e+00 3.6425657e+00 3.5739550e+00 4.2642554e+00 4.6155808e+00 5.0696209e+00 5.9318766e+00 4.3420618e+00 3.6079861e+00 3.6711700e+00 5.5546786e+00 4.5127918e+00 3.9935485e+00 3.4733020e+00 4.5569739e+00 4.6922818e+00 4.6236700e+00 3.5968849e+00 4.7939394e+00 4.8471780e+00 4.4913725e+00 3.9942507e+00 4.1085491e+00 4.3067090e+00 3.5093006e+00 3.1271814e-01 2.6440626e+00 9.6964683e-01 5.8796666e-01 9.8545402e-01 1.0013399e+00 9.2426065e-01 7.6195008e-01 7.3283576e-01 2.6643250e-01 3.3524935e+00 2.9103383e+00 3.4378505e+00 3.2794093e+00 3.2805279e+00 2.7218307e+00 2.9677934e+00 2.7417113e+00 3.1265865e+00 2.6350666e+00 3.5810092e+00 2.6509959e+00 3.4564670e+00 2.9240118e+00 2.2778214e+00 3.0636652e+00 2.6473174e+00 2.5673371e+00 3.9008585e+00 2.8131786e+00 3.0066012e+00 2.7310040e+00 3.6088006e+00 2.8947302e+00 2.8966017e+00 3.0506196e+00 3.4785143e+00 3.5188926e+00 2.8816131e+00 2.5125550e+00 2.9397900e+00 2.8645995e+00 2.6245231e+00 3.3639057e+00 2.6028733e+00 2.7271822e+00 3.2253861e+00 3.6489825e+00 2.3376510e+00 2.9371604e+00 2.8382974e+00 2.8051321e+00 2.8006021e+00 2.9309089e+00 2.7184808e+00 2.3312464e+00 2.5047936e+00 2.7731354e+00 2.5341497e+00 2.5862794e+00 4.2119757e+00 3.5278864e+00 4.3705395e+00 3.6366115e+00 4.0640736e+00 4.9516709e+00 3.3348040e+00 4.4927271e+00 4.2867968e+00 4.7459453e+00 3.5773144e+00 3.8303307e+00 4.0501276e+00 3.7856794e+00 3.8862115e+00 3.8584574e+00 3.6392860e+00 5.1247727e+00 5.4955777e+00 3.9594564e+00 4.2633399e+00 3.3993739e+00 5.1011948e+00 3.5798230e+00 3.9561606e+00 4.1885925e+00 3.4019138e+00 3.2277183e+00 3.9971830e+00 4.0727212e+00 4.6247900e+00 5.0557045e+00 4.0800856e+00 3.3285999e+00 3.5685643e+00 5.0078455e+00 3.9761694e+00 3.5324648e+00 3.1505332e+00 4.0358238e+00 4.2334406e+00 4.1156691e+00 3.5278864e+00 4.2682695e+00 4.3053166e+00 4.0686429e+00 3.9122205e+00 3.6972894e+00 3.7712394e+00 3.2160302e+00 2.8326674e+00 1.0103954e+00 4.2829723e-01 7.9126749e-01 1.1795364e+00 7.3442235e-01 8.5582452e-01 6.1158310e-01 4.8284931e-01 3.4789406e+00 3.0164286e+00 3.5708825e+00 3.4760111e+00 3.4435701e+00 2.8856827e+00 3.0483404e+00 2.9286177e+00 3.2959553e+00 2.7769569e+00 3.7899650e+00 2.7721706e+00 3.6919547e+00 3.0773843e+00 2.4199353e+00 3.1984671e+00 2.7597977e+00 2.7743820e+00 4.1049898e+00 3.0189963e+00 3.0754044e+00 2.9033794e+00 3.7972498e+00 3.0781443e+00 3.0628742e+00 3.1980682e+00 3.6529321e+00 3.6479042e+00 3.0224061e+00 2.7237896e+00 3.1475538e+00 3.0809334e+00 2.8106441e+00 3.5217353e+00 2.7064382e+00 2.7753422e+00 3.3543209e+00 3.8636687e+00 2.4678110e+00 3.1212622e+00 3.0250044e+00 2.9444840e+00 2.9956969e+00 3.1281937e+00 2.8892226e+00 2.4772050e+00 2.6547819e+00 2.9364676e+00 2.7130802e+00 2.7488632e+00 4.2524457e+00 3.6566905e+00 4.4856620e+00 3.7659004e+00 4.1610154e+00 5.0768465e+00 3.4623926e+00 4.6393183e+00 4.4611186e+00 4.7773180e+00 3.6552032e+00 3.9746118e+00 4.1574253e+00 3.9234139e+00 3.9686223e+00 3.9172103e+00 3.7603950e+00 5.1648090e+00 5.6463490e+00 4.1614237e+00 4.3393711e+00 3.5009132e+00 5.2503936e+00 3.7276020e+00 4.0260707e+00 4.3007127e+00 3.5361708e+00 3.3347521e+00 4.1191095e+00 4.2183675e+00 4.7752353e+00 5.1049558e+00 4.1955154e+00 3.4902431e+00 3.7536489e+00 5.1215318e+00 4.0036288e+00 3.6385337e+00 3.2536517e+00 4.1326097e+00 4.3100988e+00 4.1978681e+00 3.6566905e+00 4.3438151e+00 4.3538983e+00 4.1591001e+00 4.0714399e+00 3.8024850e+00 3.7974783e+00 3.3185266e+00 2.0833080e+00 2.8648636e+00 3.5532593e+00 1.6555341e+00 3.5410343e+00 2.0840787e+00 3.3747131e+00 2.3883072e+00 4.3833871e+00 3.9159762e+00 4.2941647e+00 2.3488350e+00 3.6240540e+00 2.9044888e+00 4.0815608e+00 1.5532921e+00 3.6825697e+00 2.4113529e+00 1.7998094e+00 3.2616916e+00 2.5529439e+00 3.3821744e+00 2.6637769e+00 3.9531209e+00 3.1831859e+00 2.5836708e+00 3.1669559e+00 2.2907828e+00 3.8684560e+00 3.0202406e+00 3.4019580e+00 3.1886068e+00 3.4332960e+00 3.7685816e+00 3.8803374e+00 4.1746389e+00 3.3102735e+00 2.2304221e+00 2.1489616e+00 2.0501444e+00 2.6225712e+00 3.4164974e+00 3.0901976e+00 3.9885258e+00 4.0802502e+00 3.0869095e+00 2.9336463e+00 2.3936974e+00 2.5327316e+00 3.4518638e+00 2.5837552e+00 1.5782205e+00 2.6521862e+00 2.9662386e+00 2.9040188e+00 3.2768109e+00 1.6628177e+00 2.7685987e+00 5.0448046e+00 3.5141919e+00 4.9824611e+00 4.0549341e+00 4.5981277e+00 5.5863508e+00 2.6647036e+00 5.0241554e+00 4.1998979e+00 5.9440534e+00 4.4432394e+00 3.9560997e+00 4.6422438e+00 3.4164839e+00 4.0005863e+00 4.6454886e+00 4.2390210e+00 6.5166388e+00 5.6880370e+00 3.1944389e+00 5.0789131e+00 3.4956324e+00 5.5310286e+00 3.6881384e+00 4.9152167e+00 5.0890922e+00 3.6527501e+00 3.7902440e+00 4.2540354e+00 4.7585941e+00 5.0389487e+00 6.4918086e+00 4.3280601e+00 3.6284588e+00 3.4969965e+00 5.6399353e+00 4.9671290e+00 4.2659568e+00 3.6994310e+00 4.7714894e+00 4.8963175e+00 4.8281202e+00 3.5141919e+00 5.0683437e+00 5.1871515e+00 4.6280145e+00 3.7055141e+00 4.2764028e+00 4.7873094e+00 3.7371542e+00 1.1433971e+00 1.6774310e+00 6.8299624e-01 1.6304499e+00 2.4808718e-01 1.5886765e+00 7.6250797e-01 4.0055392e+00 3.4676312e+00 4.0289839e+00 3.2391776e+00 3.6989507e+00 2.9466089e+00 3.5208636e+00 2.4804256e+00 3.6211670e+00 2.6281173e+00 3.2921089e+00 3.0161637e+00 3.5345457e+00 3.2983536e+00 2.5210242e+00 3.6506644e+00 2.9161244e+00 2.7938108e+00 4.0292846e+00 2.8755116e+00 3.4075988e+00 3.0797683e+00 3.8646774e+00 3.2397520e+00 3.3586779e+00 3.5819899e+00 3.9571013e+00 4.0234614e+00 3.2253861e+00 2.6519927e+00 2.9269738e+00 2.8491914e+00 2.8419330e+00 3.6148101e+00 2.8039428e+00 3.2558795e+00 3.7924883e+00 3.8389577e+00 2.6284424e+00 2.9648238e+00 2.9126202e+00 3.2245885e+00 2.9718548e+00 2.6864788e+00 2.8651003e+00 2.6637996e+00 2.7789114e+00 3.1894122e+00 2.3748697e+00 2.8127546e+00 4.6364269e+00 3.7133040e+00 4.8825792e+00 4.0097653e+00 4.4740109e+00 5.5106999e+00 3.1817279e+00 5.0169254e+00 4.6066522e+00 5.3636182e+00 4.0783379e+00 4.1550948e+00 4.5252166e+00 3.8770438e+00 4.0814269e+00 4.3063766e+00 4.0872895e+00 5.8336247e+00 5.9533030e+00 4.0437148e+00 4.7859703e+00 3.5604924e+00 5.6269403e+00 3.8926783e+00 4.4927794e+00 4.7879866e+00 3.7291513e+00 3.6035976e+00 4.3384511e+00 4.6385717e+00 5.1321867e+00 5.8049832e+00 4.4149501e+00 3.6953819e+00 3.8133051e+00 5.5737973e+00 4.4415093e+00 3.9935485e+00 3.5037963e+00 4.5569739e+00 4.6922818e+00 4.6236700e+00 3.7133040e+00 4.7716971e+00 4.8030835e+00 4.5149959e+00 4.1509766e+00 4.1343605e+00 4.2319568e+00 3.5394847e+00 7.6869104e-01 1.2471855e+00 8.7636491e-01 9.9981032e-01 7.8863556e-01 7.0733904e-01 3.2400577e+00 2.7256768e+00 3.3173156e+00 3.2734582e+00 3.1889456e+00 2.6198618e+00 2.7351941e+00 2.7665224e+00 3.0627699e+00 2.5021229e+00 3.6608924e+00 2.4643905e+00 3.5457670e+00 2.8045035e+00 2.1368327e+00 2.9466857e+00 2.4386380e+00 2.5729082e+00 3.8963334e+00 2.8235662e+00 2.7242811e+00 2.6575342e+00 3.5598437e+00 2.8418228e+00 2.8206835e+00 2.9462527e+00 3.4233881e+00 3.3667899e+00 2.7322904e+00 2.5411273e+00 2.9638750e+00 2.9140871e+00 2.5797070e+00 3.2425969e+00 2.3780833e+00 2.4351544e+00 3.0892387e+00 3.6720185e+00 2.1688001e+00 2.8939857e+00 2.7945402e+00 2.6616170e+00 2.7767058e+00 2.9769851e+00 2.6347557e+00 2.1986118e+00 2.3753308e+00 2.6828901e+00 2.5283291e+00 2.4839186e+00 3.8851614e+00 3.3427747e+00 4.1909067e+00 3.4628626e+00 3.8305140e+00 4.8043725e+00 3.1800308e+00 4.3815267e+00 4.2038608e+00 4.4513681e+00 3.3256952e+00 3.6826282e+00 3.8475722e+00 3.6210755e+00 3.6107649e+00 3.5632387e+00 3.4596612e+00 4.8847297e+00 5.3781989e+00 3.9435460e+00 4.0131264e+00 3.1614358e+00 4.9957986e+00 3.4408340e+00 3.7000441e+00 4.0284550e+00 3.2354442e+00 3.0107962e+00 3.8036057e+00 3.9713385e+00 4.5180638e+00 4.8486868e+00 3.8729425e+00 3.2243808e+00 3.5087561e+00 4.8402519e+00 3.6359784e+00 3.3268022e+00 2.9240118e+00 3.8232660e+00 3.9709252e+00 3.8746323e+00 3.3427747e+00 4.0131316e+00 4.0031779e+00 3.8300809e+00 3.7945557e+00 3.4841580e+00 3.4283673e+00 2.9863682e+00 1.9060194e+00 3.1239235e-01 1.5583422e+00 4.8124784e-01 1.2220171e+00 3.3785962e+00 2.9374280e+00 3.5071305e+00 3.8740792e+00 3.5491790e+00 3.0669573e+00 2.9018296e+00 3.4251046e+00 3.3648459e+00 3.0744865e+00 4.3230411e+00 2.8485664e+00 4.1027662e+00 3.1626116e+00 2.6442554e+00 3.1724373e+00 2.8315630e+00 3.0534300e+00 4.4306138e+00 3.3882074e+00 2.9857887e+00 3.0959220e+00 4.0072094e+00 3.2240677e+00 3.1644964e+00 3.2264712e+00 3.7352584e+00 3.6230125e+00 3.1206853e+00 3.1023948e+00 3.5580276e+00 3.5096299e+00 3.0877778e+00 3.6577352e+00 2.7900553e+00 2.5838366e+00 3.3069107e+00 4.1792641e+00 2.5911812e+00 3.4684046e+00 3.3165070e+00 2.9868392e+00 3.2999163e+00 3.6373219e+00 3.1449351e+00 2.5937039e+00 2.8148578e+00 3.0518873e+00 3.1967428e+00 2.9647081e+00 4.0498613e+00 3.7819451e+00 4.3976398e+00 3.7644678e+00 4.0879497e+00 4.9574979e+00 3.7577430e+00 4.5772252e+00 4.5796941e+00 4.4589650e+00 3.5295907e+00 4.0592075e+00 4.0919364e+00 4.1226882e+00 4.0237849e+00 3.7803562e+00 3.7136035e+00 4.7772875e+00 5.6344258e+00 4.4679358e+00 4.1805116e+00 3.6013971e+00 5.1936459e+00 3.8460802e+00 3.8293150e+00 4.1365715e+00 3.6263469e+00 3.3344860e+00 4.1404384e+00 4.1465376e+00 4.7500857e+00 4.7258629e+00 4.2123837e+00 3.5757376e+00 3.9028467e+00 5.0127222e+00 3.7704782e+00 3.5495399e+00 3.2637691e+00 4.0284560e+00 4.1958516e+00 4.1011034e+00 3.7819451e+00 4.1793948e+00 4.1561376e+00 4.1029254e+00 4.2472742e+00 3.7624633e+00 3.5705606e+00 3.3154318e+00 1.8882412e+00 5.3690447e-01 1.7295385e+00 7.4445830e-01 3.5842571e+00 3.0831073e+00 3.5905412e+00 2.6850208e+00 3.1920496e+00 2.4895611e+00 3.1874455e+00 1.9825106e+00 3.1280291e+00 2.1902526e+00 2.7631963e+00 2.5991209e+00 2.9183874e+00 2.8448970e+00 2.0635463e+00 3.2072460e+00 2.5480786e+00 2.2627580e+00 3.4383056e+00 2.3172372e+00 3.0909340e+00 2.5623863e+00 3.3194064e+00 2.7506751e+00 2.8644451e+00 3.1134606e+00 3.4405052e+00 3.5767292e+00 2.7771563e+00 2.0712834e+00 2.3618912e+00 2.2737657e+00 2.3098587e+00 3.1429166e+00 2.4666493e+00 2.9899331e+00 3.3598261e+00 3.2396917e+00 2.2342806e+00 2.4357274e+00 2.4181291e+00 2.7984743e+00 2.4231383e+00 2.1599940e+00 2.3764241e+00 2.2561857e+00 2.3387588e+00 2.7074009e+00 1.8390751e+00 2.3351007e+00 4.3436399e+00 3.2756710e+00 4.4477490e+00 3.5866422e+00 4.0782068e+00 5.0677790e+00 2.7922251e+00 4.5545199e+00 4.0836814e+00 5.0715858e+00 3.7130862e+00 3.6733277e+00 4.0983149e+00 3.4111720e+00 3.6933050e+00 3.9623038e+00 3.6711803e+00 5.5579136e+00 5.4498365e+00 3.4842014e+00 4.4103781e+00 3.1690867e+00 5.1441957e+00 3.3997317e+00 4.1528029e+00 4.3901202e+00 3.2630747e+00 3.2035560e+00 3.8955732e+00 4.1857725e+00 4.6435471e+00 5.5146776e+00 3.9762768e+00 3.2193184e+00 3.3255820e+00 5.1213824e+00 4.1678001e+00 3.6124079e+00 3.1107135e+00 4.1457358e+00 4.3076787e+00 4.2130787e+00 3.2756710e+00 4.4066811e+00 4.4713485e+00 4.0952473e+00 3.6289876e+00 3.7168749e+00 3.9644506e+00 3.1662754e+00 1.5140329e+00 3.3872939e-01 1.1649855e+00 3.5691650e+00 3.1595321e+00 3.7055656e+00 4.0160835e+00 3.7376603e+00 3.2592987e+00 3.1435650e+00 3.5370651e+00 3.5437638e+00 3.2591886e+00 4.4166410e+00 3.0676819e+00 4.2127293e+00 3.3654329e+00 2.8346837e+00 3.3660906e+00 3.0613575e+00 3.2026716e+00 4.5808841e+00 3.5275705e+00 3.2454510e+00 3.2718756e+00 4.1819826e+00 3.4031279e+00 3.3454884e+00 3.4170637e+00 3.9109404e+00 3.8358967e+00 3.3305911e+00 3.2315455e+00 3.6883726e+00 3.6296117e+00 3.2506700e+00 3.8623185e+00 3.0230066e+00 2.8458833e+00 3.5111771e+00 4.3201594e+00 2.8024863e+00 3.6263297e+00 3.4825375e+00 3.1978058e+00 3.4556048e+00 3.7429398e+00 3.3240937e+00 2.7959976e+00 3.0137031e+00 3.2391776e+00 3.3180586e+00 3.1510639e+00 4.3279818e+00 4.0059487e+00 4.6233424e+00 3.9929211e+00 4.3355275e+00 5.1718231e+00 3.9512216e+00 4.7810147e+00 4.7732950e+00 4.7150495e+00 3.7777251e+00 4.2731987e+00 4.3246862e+00 4.3347988e+00 4.2753666e+00 4.0433740e+00 3.9425600e+00 5.0081332e+00 5.8406251e+00 4.6274156e+00 4.4284929e+00 3.8398842e+00 5.3940263e+00 4.0533472e+00 4.0818092e+00 4.3543675e+00 3.8429689e+00 3.5715666e+00 4.3728103e+00 4.3436347e+00 4.9498040e+00 4.9393850e+00 4.4487185e+00 3.7756717e+00 4.0901950e+00 5.2287042e+00 4.0497882e+00 3.7884247e+00 3.5028854e+00 4.2624115e+00 4.4485334e+00 4.3403092e+00 4.0059487e+00 4.4317896e+00 4.4210531e+00 4.3442496e+00 4.4457375e+00 3.9985746e+00 3.8504489e+00 3.5592028e+00 1.4309353e+00 5.3528567e-01 3.7908776e+00 3.2731841e+00 3.8215973e+00 3.1206853e+00 3.5080970e+00 2.7892862e+00 3.3363505e+00 2.4070515e+00 3.4174587e+00 2.5169099e+00 3.2261716e+00 2.8456035e+00 3.3834513e+00 3.1193847e+00 2.3599428e+00 3.4420978e+00 2.7676217e+00 2.6211360e+00 3.8782821e+00 2.7318604e+00 3.2516765e+00 2.8950591e+00 3.6956240e+00 3.0573546e+00 3.1595622e+00 3.3778208e+00 3.7543715e+00 3.8301935e+00 3.0538048e+00 2.4889600e+00 2.7975756e+00 2.7172725e+00 2.6747723e+00 3.4570094e+00 2.6710885e+00 3.0859941e+00 3.5894820e+00 3.6730945e+00 2.4678645e+00 2.8348872e+00 2.7756196e+00 3.0423378e+00 2.8112845e+00 2.6077230e+00 2.7173555e+00 2.4925318e+00 2.6151930e+00 2.9985224e+00 2.2768387e+00 2.6523384e+00 4.4886181e+00 3.5762214e+00 4.6936725e+00 3.8412437e+00 4.3086181e+00 5.3124523e+00 3.1125049e+00 4.8185220e+00 4.4330566e+00 5.1853832e+00 3.9019516e+00 3.9878172e+00 4.3438773e+00 3.7545919e+00 3.9571174e+00 4.1452084e+00 3.9080206e+00 5.6409887e+00 5.7635531e+00 3.9041153e+00 4.6071696e+00 3.4361835e+00 5.4269728e+00 3.7248960e+00 4.3153491e+00 4.5881410e+00 3.5627565e+00 3.4386758e+00 4.1762134e+00 4.4334431e+00 4.9338087e+00 5.6026788e+00 4.2556298e+00 3.5163766e+00 3.6516985e+00 5.3754384e+00 4.2899814e+00 3.8175194e+00 3.3436392e+00 4.3710164e+00 4.5233951e+00 4.4426760e+00 3.5762214e+00 4.5972906e+00 4.6375405e+00 4.3421746e+00 3.9932553e+00 3.9596590e+00 4.0813696e+00 3.3867904e+00 9.9272943e-01 3.3624661e+00 2.9811147e+00 3.5018936e+00 3.8169093e+00 3.5209477e+00 3.0838698e+00 2.9939885e+00 3.3707744e+00 3.3216375e+00 3.1074562e+00 4.2293025e+00 2.8939100e+00 3.9735251e+00 3.1779321e+00 2.6513305e+00 3.1539115e+00 2.9206559e+00 2.9923096e+00 4.3522137e+00 3.3213891e+00 3.1222272e+00 3.0540027e+00 3.9664528e+00 3.2012517e+00 3.1248526e+00 3.2007609e+00 3.6824267e+00 3.6418210e+00 3.1482435e+00 3.0088381e+00 3.4838907e+00 3.4206811e+00 3.0415159e+00 3.6826470e+00 2.9006138e+00 2.7293873e+00 3.3112277e+00 4.0819926e+00 2.6432089e+00 3.4355346e+00 3.3034492e+00 3.0164601e+00 3.2442582e+00 3.5631690e+00 3.1415769e+00 2.6264645e+00 2.8384396e+00 3.0300747e+00 3.1354811e+00 2.9649167e+00 4.2304738e+00 3.8482047e+00 4.4439318e+00 3.8285351e+00 4.1849277e+00 4.9874125e+00 3.8271288e+00 4.5862820e+00 4.5664639e+00 4.6040986e+00 3.6270951e+00 4.0846199e+00 4.1503558e+00 4.1702140e+00 4.1407964e+00 3.9121183e+00 3.7737683e+00 4.8997002e+00 5.6369181e+00 4.4180349e+00 4.2780098e+00 3.7035075e+00 5.1920494e+00 3.8582580e+00 3.9463952e+00 4.1826549e+00 3.6583100e+00 3.4129797e+00 4.2036908e+00 4.1443501e+00 4.7432980e+00 4.8153136e+00 4.2825977e+00 3.5821023e+00 3.9040345e+00 5.0374022e+00 3.9556226e+00 3.6351652e+00 3.3487998e+00 4.0905154e+00 4.2992014e+00 4.1693135e+00 3.8482047e+00 4.2897273e+00 4.2963449e+00 4.1754173e+00 4.2443816e+00 3.8297356e+00 3.7573406e+00 3.4190329e+00 3.4434283e+00 2.9833205e+00 3.5091456e+00 3.1516030e+00 3.2866494e+00 2.6849207e+00 3.0541761e+00 2.5654361e+00 3.1545670e+00 2.5403244e+00 3.3918434e+00 2.6608343e+00 3.3413616e+00 2.9302185e+00 2.2379234e+00 3.1290373e+00 2.6442995e+00 2.5077372e+00 3.8111267e+00 2.7069456e+00 3.0566678e+00 2.7098649e+00 3.5644689e+00 2.8826061e+00 2.9134932e+00 3.0944404e+00 3.4986589e+00 3.5664548e+00 2.8806273e+00 2.4158569e+00 2.8131786e+00 2.7333339e+00 2.5637470e+00 3.3370594e+00 2.5885603e+00 2.8220110e+00 3.2904739e+00 3.5710204e+00 2.3287218e+00 2.8315630e+00 2.7529707e+00 2.8293212e+00 2.7254528e+00 2.7527251e+00 2.6524994e+00 2.3299431e+00 2.4822440e+00 2.7803033e+00 2.3731496e+00 2.5423572e+00 4.2830420e+00 3.4939592e+00 4.4286678e+00 3.6575173e+00 4.1044791e+00 5.0220848e+00 3.2200733e+00 4.5468375e+00 4.2700249e+00 4.8698851e+00 3.6462342e+00 3.8237490e+00 4.0990020e+00 3.7208580e+00 3.8692104e+00 3.9203597e+00 3.6817365e+00 5.2775206e+00 5.5250867e+00 3.8676958e+00 4.3392543e+00 3.3693781e+00 5.1524083e+00 3.5650934e+00 4.0437994e+00 4.2783342e+00 3.3968462e+00 3.2517273e+00 4.0065881e+00 4.1377875e+00 4.6677357e+00 5.2142247e+00 4.0893000e+00 3.3307003e+00 3.5369003e+00 5.0771896e+00 4.0613196e+00 3.5869628e+00 3.1695162e+00 4.1006440e+00 4.2899814e+00 4.1769447e+00 3.4939592e+00 4.3422191e+00 4.3859843e+00 4.1114107e+00 3.8721945e+00 3.7365034e+00 3.8554667e+00 3.2330990e+00 7.4500632e-01 3.1271814e-01 2.7864553e+00 1.1117653e+00 1.8291315e+00 9.1459005e-01 3.2771828e+00 8.5582452e-01 2.5020950e+00 3.7722922e+00 1.4404415e+00 2.6850561e+00 1.2884095e+00 1.9346765e+00 4.6190224e-01 1.7610224e+00 1.9545276e+00 2.5064746e+00 2.4134734e+00 1.4291842e+00 1.4855627e+00 1.8305602e+00 1.4494865e+00 1.0355160e+00 6.8921053e-01 9.5529726e-01 7.2626021e-01 1.4025367e+00 2.2620298e+00 2.6646285e+00 2.6984190e+00 1.9245986e+00 1.7053476e+00 1.9940477e+00 1.3238834e+00 4.4901474e-01 2.2514668e+00 1.7899689e+00 2.4621620e+00 2.3008240e+00 1.1820572e+00 2.0593667e+00 3.3235867e+00 2.0701817e+00 1.6811909e+00 1.7438018e+00 1.2168151e+00 2.9923086e+00 1.8570167e+00 1.8407084e+00 1.9774894e+00 1.2374249e+00 1.3146181e+00 1.4369760e+00 1.6548985e+00 3.0339990e+00 1.3065206e+00 1.8441719e+00 1.9017153e+00 1.0169098e+00 1.5490835e+00 1.1480416e+00 2.3912363e+00 2.1724397e+00 1.4252775e+00 1.0284221e+00 2.2390158e+00 2.3611228e+00 2.6121814e+00 1.3139868e+00 2.0833701e+00 1.8624251e+00 1.5270661e+00 1.1605968e+00 9.3589904e-01 1.4360842e+00 1.2967707e+00 1.5740302e+00 8.5349066e-01 1.4639724e+00 2.1546616e+00 1.6538197e+00 1.2783641e+00 1.8320269e+00 1.7168897e+00 1.7042715e+00 1.0288355e+00 1.3960908e+00 1.0327124e+00 1.4702446e+00 1.2287925e+00 1.9774894e+00 1.3826233e+00 1.6072393e+00 1.3472497e+00 1.9439880e+00 1.1295026e+00 1.6414265e+00 1.5177322e+00 6.8496652e-01 2.3745921e+00 9.3211669e-01 1.2784093e+00 3.1271814e-01 2.7526949e+00 7.8034610e-01 1.8874930e+00 3.3568278e+00 7.7863029e-01 2.4620981e+00 7.9999102e-01 1.3194463e+00 4.5257749e-01 1.0705713e+00 1.5282070e+00 2.3189157e+00 1.9824340e+00 7.4029244e-01 1.0636179e+00 1.6347187e+00 1.0722301e+00 7.4849274e-01 5.3988754e-01 1.0632334e+00 7.0213871e-01 8.4383266e-01 1.8384560e+00 2.2399960e+00 2.2847962e+00 1.4577178e+00 1.3022697e+00 1.2927254e+00 6.8064066e-01 4.4417668e-01 2.0964002e+00 1.1252542e+00 1.9840858e+00 1.8038514e+00 6.0365341e-01 1.6354514e+00 2.8387736e+00 1.5364600e+00 1.0539473e+00 1.1361809e+00 7.8649633e-01 2.4634199e+00 1.2983546e+00 1.5835083e+00 1.4983760e+00 1.4748100e+00 1.0180846e+00 1.2694582e+00 2.0850659e+00 2.4405647e+00 1.6897529e+00 1.8533655e+00 2.0793529e+00 7.4797652e-01 1.3453828e+00 1.1770444e+00 1.9571621e+00 1.6977813e+00 1.1421260e+00 8.3850424e-01 2.6029823e+00 2.7071356e+00 2.3733266e+00 1.3878105e+00 1.5050081e+00 2.3020930e+00 1.2450968e+00 1.1247714e+00 1.3455945e+00 1.0453799e+00 7.4157869e-01 1.3630233e+00 1.3061954e+00 1.8456576e+00 2.6048102e+00 1.4425815e+00 9.9058911e-01 1.5658693e+00 2.1444356e+00 1.4166079e+00 7.2727886e-01 7.9343577e-01 1.1384575e+00 1.4013840e+00 1.2776135e+00 1.4983760e+00 1.4006413e+00 1.5375255e+00 1.2650236e+00 1.7250893e+00 9.0221296e-01 1.2767751e+00 9.2060977e-01 2.5673851e+00 8.6079202e-01 1.6428179e+00 8.7623959e-01 3.1131006e+00 6.6453319e-01 2.3246810e+00 3.5720588e+00 1.2918836e+00 2.4857868e+00 1.0845006e+00 1.8135469e+00 3.9472619e-01 1.6028858e+00 1.8029164e+00 2.2526468e+00 2.2305704e+00 1.2920175e+00 1.3194463e+00 1.5620078e+00 1.2567627e+00 8.7273869e-01 5.3095950e-01 7.1671402e-01 4.2827238e-01 1.2022652e+00 2.1186438e+00 2.4755072e+00 2.5212188e+00 1.7582453e+00 1.4360884e+00 1.8400908e+00 1.3147484e+00 2.6680274e-01 2.0194508e+00 1.6709595e+00 2.2587909e+00 2.1030957e+00 1.0161846e+00 1.8732616e+00 3.1496799e+00 1.8819614e+00 1.5700887e+00 1.5933926e+00 1.0543640e+00 2.8415314e+00 1.6890927e+00 1.6862498e+00 1.7038925e+00 1.0251132e+00 1.0245496e+00 1.1781513e+00 1.5212572e+00 2.8050734e+00 1.1091494e+00 1.5452835e+00 1.9080234e+00 8.5359653e-01 1.2416734e+00 8.9528108e-01 2.1088803e+00 1.9097018e+00 1.2522193e+00 7.4612152e-01 2.3284654e+00 2.1556564e+00 2.3436971e+00 1.1651790e+00 1.8364691e+00 1.6976628e+00 1.2371704e+00 1.0463225e+00 8.5302032e-01 1.1623508e+00 1.0682140e+00 1.2723284e+00 6.7955751e-01 1.2572095e+00 2.2840066e+00 1.3572135e+00 1.0082548e+00 1.5613089e+00 1.5962380e+00 1.5974627e+00 7.9671887e-01 1.1799226e+00 8.3571552e-01 1.2674750e+00 1.0543826e+00 1.7038925e+00 1.2197800e+00 1.4811026e+00 1.1132425e+00 1.6485749e+00 8.6295295e-01 1.5402909e+00 1.2957413e+00 1.7240804e+00 1.2117746e+00 2.5620931e+00 9.4346743e-01 1.9481085e+00 1.0013399e+00 1.0383354e+00 1.7091506e+00 7.5651431e-01 1.6169211e+00 1.4074199e+00 2.3606801e+00 1.6642999e+00 1.0677270e+00 9.5748562e-01 5.4702555e-01 2.2752108e+00 1.3619011e+00 1.2144903e+00 1.4245490e+00 1.7677805e+00 2.1070188e+00 2.0042865e+00 2.3026776e+00 1.5583422e+00 8.7856768e-01 3.6704030e-01 4.8644514e-01 1.0013399e+00 1.3262124e+00 1.6642999e+00 2.6524441e+00 2.3938089e+00 9.9234874e-01 1.6199145e+00 4.6126066e-01 7.3978204e-01 1.8066960e+00 7.9191984e-01 8.2250769e-01 9.3727156e-01 1.6415483e+00 1.4092680e+00 1.6304499e+00 9.1432842e-01 1.1795364e+00 3.1638147e+00 1.4103498e+00 2.9322745e+00 2.0247895e+00 2.5487652e+00 3.5082844e+00 1.0453705e+00 2.9611604e+00 1.9449446e+00 4.1343567e+00 2.6451449e+00 1.7869811e+00 2.6253748e+00 1.1973458e+00 1.9817270e+00 2.7838011e+00 2.2840066e+00 4.7706180e+00 3.4576971e+00 8.9870984e-01 3.1324336e+00 1.5639220e+00 3.4016597e+00 1.5728443e+00 3.0734808e+00 3.1995683e+00 1.6368228e+00 1.9546803e+00 2.1052860e+00 2.8313078e+00 2.9375460e+00 4.8020420e+00 2.1735035e+00 1.6493848e+00 1.3576485e+00 3.5774884e+00 3.2045691e+00 2.3952974e+00 1.8988804e+00 2.8268459e+00 2.8989852e+00 2.8927951e+00 1.4103498e+00 3.1063891e+00 3.2893581e+00 2.6241058e+00 1.4441104e+00 2.3170196e+00 3.0817543e+00 1.9124815e+00 1.0026233e+00 1.1867923e+00 2.3572427e+00 3.6939647e-01 1.6408576e+00 2.7392425e+00 8.8835337e-01 1.6805749e+00 5.5399712e-01 1.2745081e+00 7.5303835e-01 1.1820572e+00 1.1301977e+00 1.4315442e+00 1.4464138e+00 1.2423523e+00 6.4626422e-01 7.5230154e-01 6.2536527e-01 4.0664863e-01 5.0731024e-01 4.0158746e-01 6.2543628e-01 6.4884272e-01 1.4014424e+00 1.6702453e+00 1.7317202e+00 1.0390957e+00 7.1781501e-01 1.4073416e+00 1.5165187e+00 7.3502408e-01 1.2122797e+00 1.2421878e+00 1.4565053e+00 1.3559191e+00 6.8064066e-01 1.0943185e+00 2.3628816e+00 1.1638514e+00 1.1627754e+00 1.0519629e+00 5.3106808e-01 2.1057450e+00 1.0403581e+00 1.9325284e+00 1.0596309e+00 1.3779504e+00 7.6633520e-01 1.2315180e+00 1.9698667e+00 2.0697950e+00 1.4385383e+00 1.0743031e+00 2.5609592e+00 1.1664463e+00 7.0702759e-01 1.1055841e+00 1.3757605e+00 1.4784016e+00 1.4566739e+00 7.9213303e-01 3.1107848e+00 2.2607358e+00 1.5267093e+00 1.6037239e+00 1.2804073e+00 1.9864213e+00 5.4310586e-01 1.5475402e+00 1.5328931e+00 5.4647209e-01 7.9343577e-01 9.7668596e-01 1.1862065e+00 1.4760549e+00 3.1060327e+00 1.0849536e+00 3.7234239e-01 8.8571256e-01 2.0333305e+00 1.9196784e+00 9.5289573e-01 8.6297946e-01 1.2392529e+00 1.4996752e+00 1.3752205e+00 1.0596309e+00 1.6198849e+00 1.8691588e+00 1.2188552e+00 9.2906468e-01 8.7042892e-01 1.8304530e+00 9.8621003e-01 1.4220241e+00 1.5496729e+00 1.1125144e+00 7.4201890e-01 2.1434858e+00 6.0719477e-01 1.5102780e+00 5.6262711e-01 5.7267643e-01 1.3990971e+00 5.4408162e-01 5.2316125e-01 1.5323598e+00 8.2317311e-01 1.1694177e+00 5.6003943e-01 1.0600958e+00 5.1318506e-01 8.8354057e-01 1.1892978e+00 1.3456285e+00 1.4234329e+00 5.0311426e-01 8.2976237e-01 1.0655776e+00 1.1267154e+00 4.4786319e-01 6.7424840e-01 6.4241342e-01 1.4834540e+00 1.4207811e+00 1.3630799e+00 5.2795793e-01 7.8571751e-01 5.3988754e-01 6.8299624e-01 5.6992880e-01 1.6313928e+00 3.1093967e-01 5.0876385e-01 2.8653046e-01 6.5622658e-01 1.3398293e+00 2.2670334e-01 2.2472150e+00 8.9528108e-01 2.1908074e+00 1.1815770e+00 1.7549185e+00 2.8271782e+00 1.2987661e+00 2.2927322e+00 1.7056353e+00 3.1591627e+00 1.6557083e+00 1.2615426e+00 1.8432274e+00 1.1833606e+00 1.4858600e+00 1.8676774e+00 1.3771658e+00 3.7547288e+00 3.1005581e+00 1.4815836e+00 2.2650937e+00 9.5252488e-01 2.8687133e+00 1.0290036e+00 2.0855599e+00 2.2987772e+00 9.0705646e-01 9.6267538e-01 1.4839640e+00 2.0473137e+00 2.3780534e+00 3.7918985e+00 1.5792523e+00 8.4221906e-01 9.2300698e-01 2.9301148e+00 2.2149712e+00 1.3941954e+00 8.9564079e-01 1.9843979e+00 2.0984088e+00 2.1003345e+00 8.9528108e-01 2.2276119e+00 2.3923111e+00 1.8829565e+00 1.3046645e+00 1.4645285e+00 2.0631582e+00 9.0331700e-01 2.9007820e+00 1.0677270e+00 1.9884030e+00 3.5404087e+00 8.9973730e-01 2.7097598e+00 9.8896933e-01 1.4521880e+00 7.3735391e-01 1.1060455e+00 1.7358574e+00 2.5457091e+00 2.1802781e+00 5.9868400e-01 1.3038475e+00 1.8531597e+00 1.2895008e+00 1.0351584e+00 8.4116354e-01 1.3290015e+00 8.7070822e-01 1.0061402e+00 2.0523180e+00 2.4354079e+00 2.4861842e+00 1.6612475e+00 1.4482752e+00 1.3000067e+00 4.4417668e-01 6.8064066e-01 2.3457352e+00 1.2097457e+00 2.1562626e+00 1.9604379e+00 7.8034610e-01 1.8447318e+00 3.0052273e+00 1.6924215e+00 1.1656549e+00 1.2692102e+00 1.0351584e+00 2.6195047e+00 1.4577178e+00 1.3905452e+00 1.5765058e+00 1.5178511e+00 1.0862363e+00 1.2425116e+00 2.1288976e+00 2.5085097e+00 1.7889699e+00 2.0235792e+00 1.9184220e+00 6.6154242e-01 1.4831058e+00 1.2157849e+00 2.0573833e+00 1.6866006e+00 1.0122929e+00 9.0072498e-01 2.4680266e+00 2.8037035e+00 2.5716465e+00 1.3193736e+00 1.5270661e+00 2.3974481e+00 1.4129334e+00 9.9186850e-01 1.3586792e+00 1.1900564e+00 7.8649633e-01 1.4261046e+00 1.4313173e+00 1.9693923e+00 2.5032449e+00 1.4908532e+00 1.1825071e+00 1.7301809e+00 2.1927243e+00 1.1883807e+00 7.0823896e-01 8.2574748e-01 1.1508346e+00 1.3435624e+00 1.2769092e+00 1.5765058e+00 1.3121204e+00 1.3947465e+00 1.2781560e+00 1.8941016e+00 9.4449485e-01 1.0327124e+00 9.1221028e-01 2.4983699e+00 1.0001615e+00 9.3727156e-01 2.0156046e+00 1.4610933e+00 2.0818555e+00 1.4925824e+00 2.8275182e+00 1.8765007e+00 1.3658611e+00 1.8892371e+00 9.4900087e-01 2.5853758e+00 1.8063869e+00 2.0403844e+00 1.9103325e+00 2.2554051e+00 2.6063293e+00 2.6670659e+00 2.8999367e+00 1.9965452e+00 1.0765554e+00 7.8898008e-01 7.5921691e-01 1.3580560e+00 1.9753995e+00 1.7807988e+00 2.8573306e+00 2.8966014e+00 1.8587118e+00 1.7290357e+00 9.4346743e-01 1.0932187e+00 2.1982921e+00 1.2728402e+00 2.6033464e-01 1.2680818e+00 1.7824360e+00 1.6364088e+00 2.0664368e+00 3.9699460e-01 1.4644159e+00 3.6567369e+00 2.0227452e+00 3.6362580e+00 2.6431572e+00 3.1825084e+00 4.2569960e+00 1.1649315e+00 3.7040279e+00 2.8079889e+00 4.6643094e+00 3.1456885e+00 2.5368652e+00 3.2881355e+00 1.9057424e+00 2.5373943e+00 3.2972877e+00 2.8812938e+00 5.2957248e+00 4.3256332e+00 1.8261865e+00 3.7402681e+00 2.0260681e+00 4.2089146e+00 2.2931022e+00 3.6001832e+00 3.8156954e+00 2.2665638e+00 2.4364115e+00 2.8123267e+00 3.5007685e+00 3.7249152e+00 5.3249013e+00 2.8816817e+00 2.2758405e+00 2.0704519e+00 4.3322711e+00 3.6389537e+00 2.9225385e+00 2.3454418e+00 3.4545556e+00 3.5207953e+00 3.5188476e+00 2.0227452e+00 3.7070275e+00 3.8401809e+00 3.2712836e+00 2.2870689e+00 2.9197390e+00 3.4787880e+00 2.3479440e+00 1.8015956e+00 2.9300280e+00 9.4226820e-01 1.8443182e+00 6.2046469e-01 1.3340137e+00 5.0731024e-01 1.2583559e+00 1.1751408e+00 1.7063292e+00 1.5923247e+00 1.2788332e+00 7.3035754e-01 1.0391769e+00 6.6194168e-01 2.9537172e-01 2.8845946e-01 3.7622328e-01 6.2760421e-01 7.7259801e-01 1.4843174e+00 1.8353890e+00 1.8732616e+00 1.1492120e+00 9.8621003e-01 1.4916922e+00 1.4186317e+00 5.4702555e-01 1.4349060e+00 1.2616939e+00 1.6526705e+00 1.5077694e+00 6.5951091e-01 1.2429329e+00 2.5190636e+00 1.3124532e+00 1.1415080e+00 1.1102613e+00 5.1210327e-01 2.2412909e+00 1.1466385e+00 2.0209769e+00 1.3581397e+00 1.4351001e+00 9.3899770e-01 1.3860326e+00 1.9736520e+00 2.3116417e+00 1.4395013e+00 1.3256797e+00 2.5152621e+00 1.1895067e+00 1.0230389e+00 1.2126763e+00 1.7102781e+00 1.7732497e+00 1.5528794e+00 8.7017583e-01 2.9799960e+00 2.3789847e+00 1.8031686e+00 1.6479163e+00 1.5433090e+00 2.0188390e+00 8.9564079e-01 1.5340057e+00 1.4361609e+00 8.5359653e-01 9.3591761e-01 1.2375842e+00 1.0932909e+00 1.5255827e+00 2.9419617e+00 1.3503714e+00 5.7743200e-01 1.0870568e+00 2.0633836e+00 1.9646340e+00 9.8006549e-01 1.0101003e+00 1.2839264e+00 1.6205305e+00 1.4633215e+00 1.3581397e+00 1.6723912e+00 1.9304834e+00 1.3785508e+00 1.2852279e+00 1.0122929e+00 1.8669942e+00 1.1301977e+00 1.7293858e+00 1.1132823e+00 1.5940674e+00 1.2647627e+00 7.0155617e-01 2.0524860e+00 9.1916314e-01 9.0143387e-01 1.7090768e+00 7.7500385e-01 1.6060095e+00 1.1202045e+00 1.5217697e+00 1.2283051e+00 1.5431751e+00 1.8486311e+00 2.0116712e+00 2.0744305e+00 1.1308980e+00 8.6249502e-01 8.7618973e-01 9.4738284e-01 7.7051641e-01 1.2102028e+00 8.1844705e-01 1.9297683e+00 2.0868978e+00 1.6471661e+00 8.6143605e-01 6.0365341e-01 5.7743200e-01 1.3481077e+00 8.0628657e-01 1.1400599e+00 5.2860161e-01 9.6999820e-01 7.8957903e-01 1.3189781e+00 8.0128554e-01 6.6918102e-01 2.6783589e+00 1.1902996e+00 2.8052881e+00 1.7833755e+00 2.2807524e+00 3.4730319e+00 7.8369762e-01 2.9612706e+00 2.2200035e+00 3.7113566e+00 2.2079590e+00 1.7773274e+00 2.4240040e+00 1.2586273e+00 1.6606465e+00 2.3345591e+00 2.0100747e+00 4.3781379e+00 3.6673897e+00 1.6336953e+00 2.8241758e+00 1.1071867e+00 3.5077760e+00 1.5364148e+00 2.6605007e+00 2.9756588e+00 1.4305490e+00 1.5019762e+00 1.9806289e+00 2.7459951e+00 3.0159022e+00 4.4377151e+00 2.0446123e+00 1.5157660e+00 1.4706419e+00 3.5410473e+00 2.6488235e+00 2.0119986e+00 1.3953413e+00 2.5748429e+00 2.6034011e+00 2.6304123e+00 1.1902996e+00 2.7818749e+00 2.8834870e+00 2.3861430e+00 1.6719199e+00 2.0259174e+00 2.4855987e+00 1.3894554e+00 2.6621352e+00 1.3234208e+00 2.6096596e+00 2.2340939e+00 3.3444949e+00 2.5679785e+00 1.9118907e+00 1.7502251e+00 1.3868450e+00 3.2376571e+00 2.3245757e+00 2.2030084e+00 2.3874774e+00 2.7435279e+00 3.0963501e+00 2.9911365e+00 3.3313369e+00 2.5528922e+00 1.6215602e+00 1.1232628e+00 1.1083720e+00 1.9130507e+00 2.3463017e+00 2.5105454e+00 3.5809242e+00 3.3974315e+00 1.8325077e+00 2.4726944e+00 1.3889514e+00 1.6150266e+00 2.7833542e+00 1.7312416e+00 7.0111465e-01 1.8556044e+00 2.5019422e+00 2.3097506e+00 2.6016512e+00 1.2007565e+00 2.0949830e+00 4.1622891e+00 2.3984916e+00 3.9595754e+00 3.0477055e+00 3.5738045e+00 4.5102220e+00 1.5833139e+00 3.9547990e+00 2.8883510e+00 5.1681640e+00 3.6715203e+00 2.8100260e+00 3.6615020e+00 2.1175666e+00 2.9197870e+00 3.8026677e+00 3.3142297e+00 5.7988752e+00 4.3773719e+00 1.6802144e+00 4.1689985e+00 2.5051488e+00 4.3637125e+00 2.6075737e+00 4.1031841e+00 4.2218983e+00 2.6731954e+00 2.9685228e+00 3.1235748e+00 3.8333962e+00 3.9200272e+00 5.8238336e+00 3.1861618e+00 2.6684075e+00 2.3174914e+00 4.5851647e+00 4.2037872e+00 3.4173362e+00 2.9015754e+00 3.8649606e+00 3.9284352e+00 3.9274419e+00 2.3984916e+00 4.1396215e+00 4.3152972e+00 3.6556493e+00 2.4306199e+00 3.3534589e+00 4.0726739e+00 2.9019830e+00 1.9649078e+00 4.5716421e-01 6.0725725e-01 1.0082512e+00 4.0020411e-01 9.6216255e-01 1.8879475e+00 1.3283978e+00 6.9493020e-01 5.9382214e-01 1.3116762e+00 7.1128716e-01 6.9976890e-01 8.6291569e-01 1.2356595e+00 1.0989171e+00 3.1093967e-01 1.2231205e+00 1.5729927e+00 1.6302590e+00 8.2263932e-01 8.7786730e-01 6.2729876e-01 9.5483435e-01 1.0328871e+00 1.7091506e+00 4.5071694e-01 1.2824303e+00 1.1188225e+00 3.5622944e-01 1.0163696e+00 2.1159028e+00 8.2380019e-01 4.6137216e-01 4.2450569e-01 5.0629646e-01 1.7321630e+00 5.8565201e-01 1.8627348e+00 1.0140018e+00 1.9095789e+00 1.0347180e+00 1.4794093e+00 2.5851550e+00 1.6987423e+00 2.1172382e+00 1.7999889e+00 2.6937126e+00 1.1946586e+00 1.2274756e+00 1.5304430e+00 1.4222936e+00 1.3705079e+00 1.4369760e+00 1.1056213e+00 3.3133435e+00 3.0027854e+00 1.9037709e+00 1.8688892e+00 9.6470639e-01 2.7156484e+00 1.0119180e+00 1.6591942e+00 1.9679139e+00 7.8369762e-01 6.0848963e-01 1.3509456e+00 1.8177289e+00 2.2200035e+00 3.3498688e+00 1.4311753e+00 8.4040822e-01 1.2474502e+00 2.6426508e+00 1.7620244e+00 1.0560148e+00 5.3362004e-01 1.6100417e+00 1.7340403e+00 1.6942922e+00 1.0140018e+00 1.8496575e+00 1.9626001e+00 1.5340961e+00 1.4294738e+00 1.1293709e+00 1.5949054e+00 6.4398240e-01 1.7472907e+00 1.7451622e+00 2.3128200e+00 2.0364367e+00 1.1795364e+00 7.5358247e-01 8.5582452e-01 2.5764453e+00 1.4435947e+00 1.1399118e+00 1.4681660e+00 1.7387082e+00 2.0628406e+00 1.8244206e+00 2.2981140e+00 1.7651851e+00 1.0308265e+00 7.7934221e-01 7.7863029e-01 1.2082987e+00 1.5285763e+00 2.1068341e+00 2.8909913e+00 2.3684734e+00 6.2479428e-01 1.9481438e+00 9.9891776e-01 1.1557309e+00 1.9516972e+00 9.8896933e-01 1.2918836e+00 1.3154759e+00 1.9018319e+00 1.7043940e+00 1.6876317e+00 1.4136421e+00 1.4845363e+00 3.4227731e+00 1.7797546e+00 2.8993041e+00 2.1584174e+00 2.6985144e+00 3.3744038e+00 1.7790388e+00 2.8051892e+00 1.8256311e+00 4.2196161e+00 2.7909300e+00 1.8699505e+00 2.6716730e+00 1.6273208e+00 2.3931485e+00 2.9994905e+00 2.3643994e+00 4.7587356e+00 3.2663266e+00 8.6629251e-01 3.2140857e+00 2.0311002e+00 3.1918979e+00 1.6793067e+00 3.1869276e+00 3.1309476e+00 1.8104252e+00 2.1858235e+00 2.2467893e+00 2.6763967e+00 2.7532877e+00 4.7380019e+00 2.3330174e+00 1.6923429e+00 1.4009491e+00 3.4550204e+00 3.4609647e+00 2.5225714e+00 2.1699387e+00 2.8630132e+00 3.0349029e+00 2.9631214e+00 1.7797546e+00 3.2114945e+00 3.4557456e+00 2.7355167e+00 1.5237929e+00 2.4389172e+00 3.3539591e+00 2.2150193e+00 8.7774396e-01 8.7560645e-01 6.6918102e-01 8.5695467e-01 1.6281675e+00 1.2552875e+00 9.0276183e-01 4.7723749e-01 9.6922609e-01 3.4909881e-01 4.4701039e-01 6.6835176e-01 8.7807032e-01 8.7272262e-01 2.1119253e-01 1.2038778e+00 1.5064820e+00 1.5654738e+00 7.8630314e-01 5.8942278e-01 8.9320425e-01 1.1940983e+00 8.6887698e-01 1.4210091e+00 7.4201890e-01 1.2452417e+00 1.0494370e+00 2.3749211e-01 9.1435339e-01 2.1409786e+00 8.2148003e-01 6.5993495e-01 5.7516438e-01 2.8835410e-01 1.8418099e+00 6.4756318e-01 1.8787743e+00 9.0810653e-01 1.6779282e+00 7.7021931e-01 1.3319441e+00 2.3098596e+00 1.7659238e+00 1.7880416e+00 1.4280932e+00 2.6604707e+00 1.1753998e+00 9.4281519e-01 1.3471074e+00 1.3158313e+00 1.3974368e+00 1.4547739e+00 8.7568652e-01 3.2288696e+00 2.6753697e+00 1.6330922e+00 1.7674989e+00 1.0240850e+00 2.3852911e+00 7.4743804e-01 1.5932991e+00 1.7495490e+00 5.8796666e-01 5.8374436e-01 1.1339991e+00 1.5083690e+00 1.8912106e+00 3.2526896e+00 1.2423778e+00 4.2436984e-01 8.5959137e-01 2.4097678e+00 1.8344669e+00 9.0791603e-01 5.8796666e-01 1.4645285e+00 1.6477112e+00 1.6088132e+00 9.0810653e-01 1.7454599e+00 1.9428935e+00 1.4315280e+00 1.1694177e+00 9.9244707e-01 1.7007353e+00 6.6154242e-01 1.4832888e+00 6.1810529e-01 7.1128716e-01 1.8601631e+00 9.7397874e-01 1.2254650e+00 6.8496652e-01 1.4755282e+00 9.0753778e-01 1.0443931e+00 1.3169341e+00 1.6226440e+00 1.6498870e+00 7.4980278e-01 8.0686941e-01 1.1940983e+00 1.2255953e+00 5.6318359e-01 1.1503759e+00 6.6361830e-01 1.4063472e+00 1.5603679e+00 1.6837563e+00 3.6536845e-01 9.5761359e-01 8.4619410e-01 8.6958016e-01 7.7821113e-01 1.6196626e+00 5.7306091e-01 4.4786319e-01 3.6086172e-01 8.2608188e-01 1.1831978e+00 3.8480889e-01 2.4265852e+00 1.2696247e+00 2.4764170e+00 1.5584328e+00 2.0444851e+00 3.1426915e+00 1.4493286e+00 2.6430316e+00 2.1446703e+00 3.2893516e+00 1.7955663e+00 1.6408995e+00 2.1004081e+00 1.5285733e+00 1.7064050e+00 2.0142932e+00 1.6802708e+00 3.9009617e+00 3.4821230e+00 1.8809382e+00 2.4651410e+00 1.1989033e+00 3.2268928e+00 1.3782117e+00 2.2651964e+00 2.5478630e+00 1.2124370e+00 1.1789342e+00 1.8358339e+00 2.3443222e+00 2.7210373e+00 3.9221023e+00 1.9136809e+00 1.2486828e+00 1.4646971e+00 3.1951870e+00 2.3252489e+00 1.6537705e+00 1.0855082e+00 2.1947734e+00 2.3108044e+00 2.2621105e+00 1.2696247e+00 2.4484679e+00 2.5504328e+00 2.0873736e+00 1.6773040e+00 1.7023842e+00 2.1476736e+00 1.1560388e+00 1.3558057e+00 1.5283845e+00 2.1664244e+00 1.9784646e+00 1.1457159e+00 1.0355160e+00 1.4985549e+00 1.0494370e+00 6.0365341e-01 2.6033464e-01 7.3803207e-01 5.6864482e-01 9.7352372e-01 1.8229204e+00 2.2308199e+00 2.2668270e+00 1.4769275e+00 1.3385535e+00 1.5931825e+00 1.1248155e+00 2.1466080e-01 1.9117251e+00 1.3652496e+00 2.0207624e+00 1.8704283e+00 7.6880092e-01 1.6220723e+00 2.8778954e+00 1.6265611e+00 1.2621791e+00 1.3042843e+00 7.7313507e-01 2.5362196e+00 1.4082507e+00 1.8291996e+00 1.6183246e+00 1.3603639e+00 1.0878281e+00 1.3564590e+00 1.9053825e+00 2.6072470e+00 1.4737985e+00 1.6790332e+00 2.1679999e+00 9.4182667e-01 1.2929545e+00 1.1391970e+00 2.0265696e+00 1.8799960e+00 1.3547660e+00 8.8029208e-01 2.6196958e+00 2.4872661e+00 2.2706454e+00 1.4300844e+00 1.7151590e+00 2.0626281e+00 1.1997534e+00 1.2637010e+00 1.2307777e+00 1.0813975e+00 9.6603754e-01 1.3834169e+00 1.0564307e+00 1.5971466e+00 2.5708690e+00 1.4735640e+00 9.4160439e-01 1.5222760e+00 1.9572025e+00 1.7004687e+00 8.9142733e-01 1.0459007e+00 1.1049324e+00 1.4763267e+00 1.2674750e+00 1.6183246e+00 1.4769125e+00 1.6832034e+00 1.2843405e+00 1.6410601e+00 9.6706760e-01 1.5985259e+00 1.1910776e+00 1.0088064e+00 1.9822205e+00 1.3115314e+00 7.2626021e-01 8.5225534e-01 1.4476734e+00 8.6297946e-01 1.0334797e+00 1.2160426e+00 1.5358725e+00 1.3833366e+00 5.3528567e-01 1.2712561e+00 1.5367336e+00 1.6013312e+00 8.9845135e-01 9.1916314e-01 2.4152660e-01 1.0495503e+00 1.3530247e+00 1.8419620e+00 3.4651700e-01 1.2220171e+00 1.0116179e+00 6.2046469e-01 1.0696792e+00 2.0057768e+00 7.5914566e-01 4.4499696e-01 4.0664863e-01 8.1224041e-01 1.6406723e+00 5.8942278e-01 1.9060542e+00 9.6301827e-01 2.1281559e+00 1.1449868e+00 1.6017068e+00 2.8050285e+00 1.4536311e+00 2.3373419e+00 1.9472489e+00 2.8613983e+00 1.3924555e+00 1.3756347e+00 1.7433862e+00 1.3615778e+00 1.3332278e+00 1.5654312e+00 1.2872568e+00 3.4973752e+00 3.1986815e+00 1.9281666e+00 2.0588451e+00 8.3270853e-01 2.9373687e+00 1.1828955e+00 1.8231885e+00 2.1962402e+00 9.5979989e-01 7.5532639e-01 1.4672797e+00 2.0720690e+00 2.4566102e+00 3.5648048e+00 1.5414664e+00 1.0212756e+00 1.2733735e+00 2.8900916e+00 1.8289569e+00 1.2092544e+00 6.4558417e-01 1.8428644e+00 1.8966444e+00 1.9319316e+00 9.6301827e-01 2.0102936e+00 2.1030669e+00 1.7380754e+00 1.5489952e+00 1.3296349e+00 1.6538197e+00 6.3357758e-01 1.4295948e+00 5.4873947e-01 1.6126413e+00 5.8496636e-01 1.1009910e+00 6.0725725e-01 9.5139638e-01 1.3098483e+00 1.3941599e+00 1.6617787e+00 8.6702860e-01 4.2826573e-01 8.0996690e-01 8.1324137e-01 2.8553149e-01 9.9883272e-01 1.0921061e+00 1.8259719e+00 1.6053711e+00 1.1828265e+00 8.3161134e-01 7.0834786e-01 5.3106808e-01 9.8233874e-01 3.5366952e-01 1.4106682e+00 4.6484021e-01 7.5179033e-01 6.2055338e-01 7.8324937e-01 1.1546456e+00 4.7149050e-01 2.7022699e+00 1.3084256e+00 2.4620452e+00 1.5488588e+00 2.1433843e+00 3.0477876e+00 1.5122060e+00 2.4794486e+00 1.8496575e+00 3.5092631e+00 2.0205369e+00 1.5421829e+00 2.1550478e+00 1.4847626e+00 1.9338323e+00 2.2845215e+00 1.7093196e+00 4.0428524e+00 3.2768847e+00 1.4413624e+00 2.6112105e+00 1.4262166e+00 3.0341947e+00 1.2919157e+00 2.4486747e+00 2.5390232e+00 1.2420951e+00 1.3836252e+00 1.8380693e+00 2.2098781e+00 2.5420974e+00 4.0353061e+00 1.9425259e+00 1.0808550e+00 1.0871507e+00 3.1511951e+00 2.6568698e+00 1.7619640e+00 1.3391481e+00 2.2882514e+00 2.4739376e+00 2.4163220e+00 1.3084256e+00 2.5943375e+00 2.7895659e+00 2.2249457e+00 1.4927500e+00 1.8163092e+00 2.5068377e+00 1.3832520e+00 1.1807138e+00 2.3735475e+00 1.4416726e+00 7.3803207e-01 1.4480380e+00 1.6571634e+00 1.9125650e+00 1.5766889e+00 1.9793333e+00 1.6323793e+00 1.4021778e+00 1.1659675e+00 1.2498767e+00 1.3539814e+00 1.2332482e+00 2.0826771e+00 2.7811716e+00 2.1646850e+00 3.7371902e-01 2.0122804e+00 1.1585774e+00 1.3127802e+00 1.8544941e+00 1.1485726e+00 1.7450075e+00 1.3972701e+00 1.9880179e+00 1.7517164e+00 1.6394680e+00 1.8002228e+00 1.5490387e+00 2.9816674e+00 1.3976606e+00 2.4151949e+00 1.7787946e+00 2.2180206e+00 2.8804995e+00 1.7355261e+00 2.3592859e+00 1.2412454e+00 3.7977608e+00 2.4485045e+00 1.3668906e+00 2.2064746e+00 1.1631247e+00 1.9116990e+00 2.5849220e+00 2.0027932e+00 4.3925033e+00 2.6611027e+00 3.7234239e-01 2.7559143e+00 1.7089501e+00 2.6795765e+00 1.2450968e+00 2.8073641e+00 2.7667084e+00 1.4485479e+00 1.9038618e+00 1.7262603e+00 2.3286441e+00 2.2609611e+00 4.4068441e+00 1.7897439e+00 1.4300608e+00 1.1275945e+00 2.9337206e+00 3.0746426e+00 2.2005676e+00 1.9094387e+00 2.4292641e+00 2.5401664e+00 2.4975057e+00 1.3976606e+00 2.7518188e+00 2.9966927e+00 2.2416615e+00 9.2104245e-01 2.0302905e+00 3.0030765e+00 1.9507955e+00 1.9593598e+00 9.5666050e-01 1.1447875e+00 1.0324994e+00 1.3800294e+00 1.7386688e+00 1.7301705e+00 2.0251376e+00 1.2143895e+00 3.6924035e-01 2.6643250e-01 3.1271814e-01 5.3690447e-01 1.1566759e+00 1.3335853e+00 2.2553589e+00 2.0395552e+00 1.0374728e+00 1.1879760e+00 2.9406726e-01 4.0650681e-01 1.4164313e+00 3.6319073e-01 9.3305124e-01 5.5709100e-01 1.1791615e+00 9.8143688e-01 1.2231662e+00 7.9042934e-01 7.5817225e-01 2.9833953e+00 1.3537061e+00 2.7591591e+00 1.8262769e+00 2.3975382e+00 3.3499130e+00 1.2034779e+00 2.7851895e+00 1.9405021e+00 3.8845156e+00 2.3750632e+00 1.6954582e+00 2.4431790e+00 1.3394090e+00 1.9751740e+00 2.5771567e+00 2.0432035e+00 4.4739802e+00 3.4420978e+00 1.1727925e+00 2.9298786e+00 1.4800982e+00 3.2892623e+00 1.4456510e+00 2.8154123e+00 2.9321762e+00 1.4509441e+00 1.6902348e+00 2.0142932e+00 2.5791547e+00 2.8031074e+00 4.4835636e+00 2.1018908e+00 1.3894554e+00 1.2250001e+00 3.4334168e+00 2.9754074e+00 2.1241116e+00 1.6323629e+00 2.6113665e+00 2.7403495e+00 2.7045382e+00 1.3537061e+00 2.9092469e+00 3.0943267e+00 2.4717839e+00 1.4839640e+00 2.1082363e+00 2.8334846e+00 1.6627952e+00 1.2426609e+00 1.7313027e+00 1.2372185e+00 1.1631247e+00 1.1195889e+00 1.5188976e+00 1.0845006e+00 8.2263932e-01 1.9012930e+00 2.1909047e+00 2.2638611e+00 1.4908532e+00 1.2008045e+00 8.7223523e-01 5.7002996e-01 1.0697164e+00 2.2410714e+00 9.6470639e-01 1.8639992e+00 1.6786018e+00 7.4743804e-01 1.6592561e+00 2.7039438e+00 1.4162972e+00 1.0024224e+00 1.0401603e+00 1.0580730e+00 2.3284654e+00 1.2231205e+00 1.2611129e+00 1.1791615e+00 1.6899776e+00 9.5793067e-01 1.1548640e+00 2.3712314e+00 2.0275068e+00 2.0149111e+00 1.9649183e+00 2.1679212e+00 7.8905316e-01 1.3385913e+00 1.3061285e+00 1.6571634e+00 1.2299006e+00 9.3495766e-01 9.4613565e-01 2.8415314e+00 2.9130399e+00 2.3454203e+00 1.4655406e+00 1.0267382e+00 2.6085350e+00 1.2515236e+00 1.1837505e+00 1.7109084e+00 9.9111027e-01 5.2374483e-01 1.2552875e+00 1.7513749e+00 2.1661990e+00 2.9392776e+00 1.3022811e+00 1.1259771e+00 1.5663601e+00 2.4310503e+00 1.1268523e+00 7.5840628e-01 4.7680727e-01 1.3348163e+00 1.3454539e+00 1.4034689e+00 1.1791615e+00 1.4139320e+00 1.4450127e+00 1.2754470e+00 1.6940148e+00 9.2620264e-01 9.4281519e-01 4.9160020e-01 9.3054395e-01 4.1781009e-01 4.6190224e-01 8.0369154e-01 9.6816967e-01 1.1548640e+00 4.6557224e-01 8.2518769e-01 1.2073068e+00 1.2487994e+00 4.5257749e-01 7.8164792e-01 1.0374728e+00 1.4711456e+00 1.1089653e+00 1.1997868e+00 7.6195008e-01 1.0018628e+00 8.9796526e-01 5.8785093e-01 6.0098693e-01 1.8456219e+00 6.5622658e-01 6.9001472e-01 5.4715569e-01 3.1093967e-01 1.5254459e+00 4.8636669e-01 2.2683520e+00 1.0914354e+00 1.9823217e+00 1.1675117e+00 1.6963493e+00 2.6008457e+00 1.7128336e+00 2.0692339e+00 1.5728043e+00 3.0096252e+00 1.5213092e+00 1.1599200e+00 1.6580031e+00 1.3691582e+00 1.6116709e+00 1.8005954e+00 1.2641532e+00 3.5755981e+00 2.8921647e+00 1.5229350e+00 2.1046875e+00 1.2108278e+00 2.6299105e+00 8.9496218e-01 1.9702691e+00 2.0808148e+00 8.0585922e-01 9.4983854e-01 1.4326334e+00 1.7811974e+00 2.1211631e+00 3.5687116e+00 1.5311195e+00 7.1811205e-01 1.0257884e+00 2.6607812e+00 2.2075002e+00 1.3273841e+00 9.2853136e-01 1.7721541e+00 1.9757526e+00 1.8755628e+00 1.0914354e+00 2.1076593e+00 2.2924992e+00 1.7080157e+00 1.2150638e+00 1.3228669e+00 2.0678512e+00 1.0435585e+00 8.3930091e-01 1.0246689e+00 1.2483935e+00 9.2934901e-01 1.2786676e+00 1.0167074e+00 1.2794668e+00 1.2845002e+00 1.3705288e+00 1.0262066e+00 6.1158310e-01 1.6007620e+00 2.1232609e+00 1.4700482e+00 6.0145223e-01 1.5227019e+00 1.1234881e+00 1.1051628e+00 1.1975697e+00 9.1241332e-01 1.9821649e+00 1.0739839e+00 1.4719712e+00 1.2657553e+00 1.0246689e+00 1.8799916e+00 1.1304806e+00 2.3473055e+00 9.3001231e-01 1.7895396e+00 1.0784109e+00 1.5778477e+00 2.3110268e+00 1.7258314e+00 1.7588444e+00 8.0501785e-01 3.1299934e+00 1.7625999e+00 7.4394765e-01 1.5582385e+00 9.7850690e-01 1.4989725e+00 1.9419525e+00 1.2877346e+00 3.7053544e+00 2.3011615e+00 7.8305765e-01 2.1061327e+00 1.2737998e+00 2.1925140e+00 6.0604502e-01 2.1121593e+00 2.0810604e+00 8.0686941e-01 1.2420238e+00 1.1264142e+00 1.6698499e+00 1.7264467e+00 3.7248620e+00 1.2214818e+00 7.0111465e-01 5.3487449e-01 2.3978329e+00 2.4200364e+00 1.4831058e+00 1.2723027e+00 1.7715216e+00 1.9225896e+00 1.8845666e+00 9.3001231e-01 2.0954738e+00 2.3579846e+00 1.6403910e+00 5.2719130e-01 1.3587682e+00 2.3456726e+00 1.3154759e+00 5.0299964e-01 8.2155022e-01 8.8684653e-01 1.0935878e+00 4.8492463e-01 9.8860056e-01 1.2858521e+00 1.3288928e+00 6.2451737e-01 6.2760421e-01 1.0463002e+00 1.4889605e+00 1.0762241e+00 1.1975697e+00 8.4271175e-01 1.0854927e+00 8.7560645e-01 5.3352899e-01 7.0810362e-01 1.9474744e+00 7.1781501e-01 7.2553812e-01 6.1968090e-01 3.6924035e-01 1.6978139e+00 6.0510141e-01 2.1983316e+00 1.0378652e+00 1.8773521e+00 9.9490014e-01 1.5974238e+00 2.4585483e+00 1.7380659e+00 1.8957007e+00 1.4179266e+00 2.9495957e+00 1.4948761e+00 1.0683666e+00 1.5886178e+00 1.3564059e+00 1.6294524e+00 1.7819921e+00 1.1268523e+00 3.4719349e+00 2.7528980e+00 1.4535731e+00 2.0452826e+00 1.2150379e+00 2.4732935e+00 8.6167901e-01 1.8885847e+00 1.9433611e+00 7.9744128e-01 9.1854596e-01 1.3349909e+00 1.6250498e+00 1.9838258e+00 3.4743868e+00 1.4520430e+00 5.1406096e-01 7.3595190e-01 2.5794085e+00 2.1692945e+00 1.1973560e+00 9.2123504e-01 1.7205327e+00 1.9329718e+00 1.8817619e+00 1.0378652e+00 2.0262673e+00 2.2533761e+00 1.7016580e+00 1.1862896e+00 1.2748646e+00 2.0406838e+00 9.6984925e-01 3.6319073e-01 6.1968090e-01 7.8521221e-01 5.6113157e-01 1.2463647e+00 1.6309592e+00 1.6676963e+00 8.9796526e-01 8.9789119e-01 1.2621791e+00 1.3154759e+00 6.8124108e-01 1.3901973e+00 9.9970024e-01 1.4357022e+00 1.2962951e+00 4.8012872e-01 1.0246015e+00 2.2910733e+00 1.0720705e+00 8.8779355e-01 8.4724089e-01 2.4152660e-01 1.9817257e+00 8.8354057e-01 2.0655284e+00 1.2495886e+00 1.6398109e+00 9.9332012e-01 1.4769125e+00 2.2251642e+00 2.1023706e+00 1.7015856e+00 1.4609179e+00 2.6557282e+00 1.2410479e+00 1.0733560e+00 1.3593949e+00 1.6013655e+00 1.6915504e+00 1.5864800e+00 9.7957718e-01 3.1644964e+00 2.6137665e+00 1.7509262e+00 1.7860234e+00 1.3941000e+00 2.2824049e+00 8.7876634e-01 1.6464371e+00 1.6642217e+00 7.8808432e-01 8.5400786e-01 1.3018901e+00 1.3652161e+00 1.7805677e+00 3.1380968e+00 1.4095411e+00 5.8483448e-01 1.0816610e+00 2.2968622e+00 1.9911692e+00 1.0509799e+00 8.9223438e-01 1.4369760e+00 1.7217513e+00 1.5811148e+00 1.2495886e+00 1.8031513e+00 2.0209769e+00 1.4702446e+00 1.2810704e+00 1.0813343e+00 1.8691588e+00 1.0259679e+00 5.6788283e-01 5.3362004e-01 7.7368489e-01 1.6022591e+00 1.9873741e+00 2.0277089e+00 1.2494231e+00 1.1089653e+00 1.4561750e+00 1.2033093e+00 3.3742167e-01 1.6597443e+00 1.2265630e+00 1.7784712e+00 1.6384023e+00 6.1436388e-01 1.3800294e+00 2.6463489e+00 1.4025367e+00 1.1237500e+00 1.1244975e+00 5.5399712e-01 2.3227610e+00 1.2000527e+00 1.8734557e+00 1.4137602e+00 1.3887598e+00 9.6005858e-01 1.3202421e+00 1.9632584e+00 2.3879303e+00 1.4839475e+00 1.4995474e+00 2.3336107e+00 1.0014276e+00 1.1074656e+00 1.1350466e+00 1.8013325e+00 1.7379612e+00 1.3863777e+00 8.2339084e-01 2.8225734e+00 2.4523537e+00 2.0154418e+00 1.5091823e+00 1.5393373e+00 2.0723760e+00 9.8233874e-01 1.3702101e+00 1.3545502e+00 8.7875749e-01 8.4830222e-01 1.2549787e+00 1.1060185e+00 1.5823028e+00 2.7877979e+00 1.3537061e+00 7.2012544e-01 1.2954496e+00 2.0208193e+00 1.7781563e+00 8.8029208e-01 9.2256644e-01 1.1605968e+00 1.4991046e+00 1.3162835e+00 1.4137602e+00 1.5442127e+00 1.7645705e+00 1.2692218e+00 1.4162972e+00 9.1557526e-01 1.6722331e+00 1.0708496e+00 6.2826980e-01 1.0161846e+00 1.6718164e+00 1.9471640e+00 1.9947662e+00 1.3566251e+00 1.0412209e+00 1.7655766e+00 1.7163342e+00 7.1671402e-01 1.3277490e+00 1.5771462e+00 1.7793591e+00 1.6725709e+00 9.6964683e-01 1.3947746e+00 2.6556269e+00 1.5119726e+00 1.4702773e+00 1.3966512e+00 8.2199752e-01 2.4266623e+00 1.3925524e+00 2.0577807e+00 1.4034689e+00 1.2545960e+00 9.4767129e-01 1.3281960e+00 1.7401681e+00 2.4345214e+00 1.1896374e+00 1.0436620e+00 2.5015865e+00 1.2764504e+00 8.9223438e-01 1.1006735e+00 1.6953806e+00 1.7900496e+00 1.5985782e+00 8.8098199e-01 2.9595238e+00 2.0497239e+00 1.6965355e+00 1.5863720e+00 1.6496643e+00 1.7201711e+00 8.3409556e-01 1.5639220e+00 1.3496867e+00 8.9427872e-01 1.0978602e+00 1.1314785e+00 9.1432842e-01 1.2235673e+00 2.9196060e+00 1.2400775e+00 6.4083823e-01 1.0643984e+00 1.8241883e+00 2.0498818e+00 1.0696576e+00 1.1919906e+00 1.2042673e+00 1.5543054e+00 1.3831011e+00 1.4034689e+00 1.6218749e+00 1.9188761e+00 1.2920921e+00 1.1337565e+00 1.0067405e+00 1.9865217e+00 1.3029486e+00 9.5748562e-01 1.9681165e+00 2.2573397e+00 2.3235953e+00 1.5741400e+00 1.1016806e+00 1.6166760e+00 1.2896217e+00 3.8830315e-01 1.7972266e+00 1.5164233e+00 2.0064286e+00 1.8697578e+00 8.5494999e-01 1.6681709e+00 2.9309853e+00 1.6503473e+00 1.4467905e+00 1.4113341e+00 9.2189946e-01 2.6393526e+00 1.4852749e+00 1.4601858e+00 1.3160132e+00 8.7649478e-01 6.4756318e-01 8.3256255e-01 1.5094087e+00 2.4769347e+00 1.0668784e+00 1.2459961e+00 1.9408738e+00 6.5485710e-01 8.4116354e-01 6.0795234e-01 1.7154199e+00 1.4961901e+00 9.9551062e-01 3.9472619e-01 2.4940166e+00 2.0216642e+00 2.0463301e+00 1.0230389e+00 1.4612117e+00 1.6595118e+00 8.5582452e-01 9.5437259e-01 9.5694342e-01 7.7934221e-01 7.3851064e-01 8.5695467e-01 7.6638235e-01 1.1767396e+00 2.5076596e+00 9.4281519e-01 7.1971771e-01 1.2830542e+00 1.5700842e+00 1.4287578e+00 5.3095950e-01 8.6291569e-01 6.6154242e-01 1.0050638e+00 8.5606908e-01 1.3160132e+00 1.0514970e+00 1.3171874e+00 7.9433323e-01 1.2774110e+00 4.7509249e-01 1.3730080e+00 9.7659801e-01 1.1663747e+00 1.4582411e+00 1.5261646e+00 7.3570584e-01 5.8785093e-01 7.6039875e-01 1.1605726e+00 9.6964683e-01 1.4553344e+00 6.3765570e-01 1.1681709e+00 1.0005243e+00 2.9691107e-01 8.7856768e-01 2.0651943e+00 7.3735391e-01 6.0653347e-01 4.7837533e-01 3.7398306e-01 1.7406274e+00 5.5183182e-01 1.8498714e+00 8.1329726e-01 1.7508641e+00 8.2125107e-01 1.3423724e+00 2.4127395e+00 1.6384023e+00 1.9130927e+00 1.5043381e+00 2.6917823e+00 1.1782159e+00 9.6249568e-01 1.3877621e+00 1.2214135e+00 1.2719770e+00 1.4200371e+00 9.4526659e-01 3.3044115e+00 2.7645066e+00 1.6390945e+00 1.7948322e+00 8.7588404e-01 2.5003659e+00 7.4157869e-01 1.6267504e+00 1.8590427e+00 5.4310586e-01 5.2316125e-01 1.1372412e+00 1.6472029e+00 2.0021586e+00 3.3409572e+00 1.2314733e+00 5.4779717e-01 9.4822835e-01 2.4877874e+00 1.8001237e+00 9.6012811e-01 4.8644514e-01 1.5074309e+00 1.6452353e+00 1.6151033e+00 8.1329726e-01 1.7721541e+00 1.9352496e+00 1.4226963e+00 1.1564263e+00 1.0022114e+00 1.6574535e+00 5.8132667e-01 5.6318359e-01 5.3286499e-01 4.3341454e-01 1.2747045e+00 1.3163443e+00 2.1153648e+00 1.9183153e+00 1.1909838e+00 1.0657373e+00 5.8852220e-01 6.2225308e-01 1.3220343e+00 4.0443437e-01 1.0982562e+00 6.1619693e-01 1.0378442e+00 8.8917809e-01 1.0970024e+00 8.2199752e-01 6.9493020e-01 3.0003610e+00 1.5102474e+00 2.7635526e+00 1.8759428e+00 2.4405125e+00 3.3586044e+00 1.4659783e+00 2.7980993e+00 2.0759743e+00 3.8255754e+00 2.3211022e+00 1.7886572e+00 2.4450156e+00 1.5788967e+00 2.1011800e+00 2.5635734e+00 2.0416027e+00 4.3880097e+00 3.5282390e+00 1.4609179e+00 2.9105517e+00 1.6043433e+00 3.3245307e+00 1.5187698e+00 2.7743367e+00 2.8814354e+00 1.4896588e+00 1.6771528e+00 2.1027324e+00 2.5396337e+00 2.8265966e+00 4.3745133e+00 2.1946278e+00 1.4104380e+00 1.3873057e+00 3.4289479e+00 2.9514502e+00 2.1043046e+00 1.6198849e+00 2.5820471e+00 2.7513626e+00 2.6746677e+00 1.5102474e+00 2.9036877e+00 3.0793854e+00 2.4777911e+00 1.6406409e+00 2.1046875e+00 2.7982275e+00 1.6824284e+00 1.4276574e-01 7.9394533e-01 1.3473710e+00 1.5367336e+00 2.5040512e+00 2.2893871e+00 1.0820670e+00 1.4237364e+00 3.6704030e-01 5.8785093e-01 1.6733127e+00 6.1158310e-01 7.1781501e-01 7.8318003e-01 1.4288989e+00 1.2280749e+00 1.4809953e+00 7.0150436e-01 1.0034788e+00 3.1877520e+00 1.5005648e+00 2.9634183e+00 2.0359721e+00 2.5953129e+00 3.5470571e+00 1.1634940e+00 2.9839272e+00 2.0686806e+00 4.1156592e+00 2.6069476e+00 1.8659064e+00 2.6504363e+00 1.4017266e+00 2.1040122e+00 2.7893850e+00 2.2677890e+00 4.7183507e+00 3.5819899e+00 1.1465705e+00 3.1455771e+00 1.6263647e+00 3.4643576e+00 1.6254447e+00 3.0471395e+00 3.1646326e+00 1.6517237e+00 1.9156890e+00 2.1886203e+00 2.8006555e+00 2.9856138e+00 4.7315684e+00 2.2694993e+00 1.6130651e+00 1.3903392e+00 3.6256164e+00 3.1929372e+00 2.3573820e+00 1.8552594e+00 2.8291427e+00 2.9408913e+00 2.9120555e+00 1.5005648e+00 3.1237582e+00 3.3065647e+00 2.6677639e+00 1.5962380e+00 2.3224070e+00 3.0542458e+00 1.8794605e+00 8.3156200e-01 1.4460310e+00 1.6013312e+00 2.5509456e+00 2.3359909e+00 1.1395071e+00 1.4612871e+00 4.8644514e-01 6.6244727e-01 1.7247525e+00 6.6453319e-01 6.8496652e-01 8.5330525e-01 1.4567673e+00 1.2739414e+00 1.5213580e+00 6.7904052e-01 1.0560797e+00 3.2869799e+00 1.6218234e+00 3.0463976e+00 2.1264009e+00 2.6948572e+00 3.6228821e+00 1.2747978e+00 3.0537173e+00 2.1607143e+00 4.1937512e+00 2.6849827e+00 1.9680122e+00 2.7382122e+00 1.5399252e+00 2.2309601e+00 2.8826191e+00 2.3479440e+00 4.7798805e+00 3.6690959e+00 1.2447718e+00 3.2325186e+00 1.7450415e+00 3.5380106e+00 1.7243835e+00 3.1258440e+00 3.2275370e+00 1.7473394e+00 2.0003230e+00 2.2955340e+00 2.8573132e+00 3.0588804e+00 4.7837445e+00 2.3799967e+00 1.6861899e+00 1.4737985e+00 3.7047689e+00 3.2828775e+00 2.4345890e+00 1.9408738e+00 2.9104325e+00 3.0383020e+00 2.9993404e+00 1.6218234e+00 3.2132904e+00 3.3994957e+00 2.7639400e+00 1.7088499e+00 2.4110070e+00 3.1406474e+00 1.9689207e+00 8.9196595e-01 9.9107019e-01 1.7478610e+00 1.5467508e+00 1.1459117e+00 7.5303835e-01 6.0365341e-01 5.1453676e-01 9.1435339e-01 2.3749211e-01 1.4031123e+00 3.2313211e-01 7.2263841e-01 5.2290002e-01 7.1740234e-01 1.0975976e+00 3.1271814e-01 2.5686027e+00 1.1418735e+00 2.3704417e+00 1.4573208e+00 2.0173981e+00 2.9893605e+00 1.3924555e+00 2.4418079e+00 1.7809408e+00 3.4092863e+00 1.8988911e+00 1.4127713e+00 2.0371950e+00 1.3095379e+00 1.7286430e+00 2.1358640e+00 1.6228818e+00 3.9920035e+00 3.2072460e+00 1.3897098e+00 2.4925270e+00 1.2375842e+00 2.9891693e+00 1.1418958e+00 2.3510928e+00 2.4945648e+00 1.0792736e+00 1.2447083e+00 1.7021399e+00 2.1843621e+00 2.4864921e+00 3.9967418e+00 1.7954132e+00 1.0172824e+00 1.0869385e+00 3.0619758e+00 2.5242226e+00 1.6782397e+00 1.1896845e+00 2.1746675e+00 2.3309032e+00 2.2706367e+00 1.1418735e+00 2.4800313e+00 2.6530340e+00 2.0689155e+00 1.3443777e+00 1.6837594e+00 2.3748600e+00 1.2545769e+00 1.0660846e+00 1.6498377e+00 1.2783641e+00 1.1376397e+00 1.0898613e+00 1.0585628e+00 9.2189946e-01 8.0142393e-01 8.8029208e-01 1.9920533e+00 8.0501785e-01 1.0699859e+00 8.7105035e-01 7.9448303e-01 1.7999592e+00 8.1251986e-01 1.9227723e+00 4.6137216e-01 1.6965186e+00 7.0213871e-01 1.2723284e+00 2.3160615e+00 1.4526546e+00 1.7912701e+00 1.0739839e+00 2.8496420e+00 1.4032361e+00 6.3302304e-01 1.3757605e+00 7.8863556e-01 1.1001816e+00 1.5547583e+00 9.8152003e-01 3.4772360e+00 2.4799120e+00 1.1619556e+00 1.8622587e+00 7.5769247e-01 2.3162319e+00 4.6128322e-01 1.7816685e+00 1.9387331e+00 4.5729032e-01 7.5817225e-01 8.9223438e-01 1.6541379e+00 1.8404767e+00 3.5381276e+00 9.9244707e-01 4.4901474e-01 4.6557224e-01 2.4199101e+00 1.9790803e+00 1.0974789e+00 7.5914566e-01 1.5781281e+00 1.6567524e+00 1.6951864e+00 4.6137216e-01 1.8193470e+00 2.0336808e+00 1.4275349e+00 7.0834786e-01 1.0588854e+00 1.8801322e+00 7.4965096e-01 1.1803522e+00 1.5908164e+00 1.9645622e+00 4.2238505e-01 1.2220171e+00 1.0116179e+00 8.5731385e-01 1.1485726e+00 1.9317000e+00 7.9664122e-01 5.6097460e-01 5.3106808e-01 1.0334797e+00 1.5679493e+00 6.8124108e-01 2.0247774e+00 1.0499569e+00 2.3371797e+00 1.3332950e+00 1.7744903e+00 3.0154970e+00 1.3277924e+00 2.5520970e+00 2.1193865e+00 3.0297355e+00 1.5881698e+00 1.5547948e+00 1.9487821e+00 1.4037678e+00 1.3973195e+00 1.7249900e+00 1.4967902e+00 3.6762761e+00 3.3933664e+00 2.0023741e+00 2.2484516e+00 8.6702860e-01 3.1482546e+00 1.3659877e+00 2.0060004e+00 2.4114658e+00 1.1530661e+00 9.5944179e-01 1.6364369e+00 2.2989490e+00 2.6726954e+00 3.7560452e+00 1.7032717e+00 1.2286922e+00 1.4040978e+00 3.1041910e+00 1.9523740e+00 1.4097206e+00 8.4169735e-01 2.0525205e+00 2.0729884e+00 2.1328505e+00 1.0499569e+00 2.1908074e+00 2.2633850e+00 1.9289706e+00 1.6929462e+00 1.5333884e+00 1.7729821e+00 7.9671887e-01 1.1060455e+00 2.5932658e+00 1.1359179e+00 2.2153658e+00 2.0116430e+00 9.6825676e-01 1.9538525e+00 2.9958431e+00 1.7387082e+00 1.1339874e+00 1.2823616e+00 1.2471855e+00 2.5771468e+00 1.5006766e+00 1.5158960e+00 1.7131342e+00 1.9169015e+00 1.3850497e+00 1.5416258e+00 2.5358032e+00 2.4678381e+00 2.2144600e+00 2.3737220e+00 2.1274158e+00 9.8372339e-01 1.7887913e+00 1.5921275e+00 2.1896791e+00 1.7854129e+00 1.2218858e+00 1.2670969e+00 2.6904561e+00 3.2109839e+00 2.7851183e+00 1.6425353e+00 1.5729927e+00 2.8211634e+00 1.6904600e+00 1.2882518e+00 1.7618849e+00 1.4390193e+00 9.9282597e-01 1.7222401e+00 1.8692144e+00 2.3979391e+00 2.7477433e+00 1.7762263e+00 1.4761145e+00 1.9687854e+00 2.5941073e+00 1.2723200e+00 1.0497372e+00 9.7397874e-01 1.5327853e+00 1.6373339e+00 1.6177026e+00 1.7131342e+00 1.6177237e+00 1.6189835e+00 1.6013655e+00 2.1620604e+00 1.2836487e+00 1.0769609e+00 1.0246689e+00 1.9326437e+00 1.4149714e+00 2.0593667e+00 1.9008579e+00 7.7368489e-01 1.6801694e+00 2.9457974e+00 1.6627284e+00 1.3215146e+00 1.3491191e+00 8.3512263e-01 2.6175043e+00 1.4565053e+00 1.6449760e+00 1.5357227e+00 1.1692720e+00 9.2780124e-01 1.1582405e+00 1.7355630e+00 2.5925902e+00 1.3094338e+00 1.5678176e+00 2.0102053e+00 7.6952423e-01 1.1716038e+00 9.4417605e-01 1.9573929e+00 1.7612938e+00 1.1827746e+00 6.8675486e-01 2.4881500e+00 2.3327432e+00 2.2476505e+00 1.2375842e+00 1.6387331e+00 1.9108109e+00 1.1188225e+00 1.0733560e+00 1.0560148e+00 1.0005243e+00 8.6347207e-01 1.2199459e+00 9.0753778e-01 1.4483156e+00 2.4625089e+00 1.3082342e+00 8.7375509e-01 1.4601811e+00 1.8000065e+00 1.5372053e+00 7.0097130e-01 9.6204815e-01 9.1315231e-01 1.2848917e+00 1.0993651e+00 1.5357227e+00 1.2764003e+00 1.5003224e+00 1.1101208e+00 1.5658291e+00 7.8808432e-01 1.4489917e+00 1.0920053e+00 1.8302572e+00 1.0943114e+00 1.1955102e+00 1.6415483e+00 9.5491981e-01 1.7343175e+00 1.2563834e+00 1.7780218e+00 1.5661153e+00 1.3901973e+00 1.7352481e+00 1.3724737e+00 2.9349297e+00 1.4110819e+00 2.3154474e+00 1.6753059e+00 2.1644875e+00 2.7793057e+00 1.8300579e+00 2.2275691e+00 1.2267563e+00 3.6839155e+00 2.3163493e+00 1.3205795e+00 2.1115079e+00 1.3018219e+00 1.9822480e+00 2.5100153e+00 1.8661672e+00 4.2327578e+00 2.6573893e+00 6.0725725e-01 2.6633209e+00 1.7222058e+00 2.5939799e+00 1.1664463e+00 2.6821825e+00 2.5963941e+00 1.3509199e+00 1.7816323e+00 1.7046308e+00 2.1381589e+00 2.1542571e+00 4.2222578e+00 1.7881987e+00 1.2473306e+00 1.0083490e+00 2.8478148e+00 2.9960208e+00 2.0583207e+00 1.7939407e+00 2.3128527e+00 2.4854841e+00 2.4090630e+00 1.4110819e+00 2.6669716e+00 2.9270626e+00 2.1822548e+00 9.7289027e-01 1.9265423e+00 2.9135583e+00 1.8510296e+00 1.1608422e+00 9.5483435e-01 6.7975587e-01 9.6424206e-01 1.8685422e+00 6.9420840e-01 1.8699153e-01 2.6643250e-01 7.6880092e-01 1.4668684e+00 4.7680727e-01 2.1966727e+00 1.2150638e+00 2.3282955e+00 1.3855601e+00 1.8709248e+00 2.9899796e+00 1.5396352e+00 2.5003659e+00 2.1099656e+00 3.0668593e+00 1.5989333e+00 1.5788417e+00 1.9566602e+00 1.5639220e+00 1.6339738e+00 1.8236402e+00 1.4967013e+00 3.6603010e+00 3.3937895e+00 1.9915789e+00 2.2840141e+00 1.1223478e+00 3.1075626e+00 1.3520885e+00 2.0407191e+00 2.3526669e+00 1.1508346e+00 9.9970980e-01 1.7227103e+00 2.1946041e+00 2.6155078e+00 3.6958832e+00 1.8054416e+00 1.1477199e+00 1.3984076e+00 3.0713671e+00 2.0894840e+00 1.4301682e+00 9.0552938e-01 2.0395038e+00 2.1489811e+00 2.1344915e+00 1.2150638e+00 2.2517887e+00 2.3533226e+00 1.9673072e+00 1.7095802e+00 1.5528301e+00 1.9068051e+00 9.3899770e-01 3.4893361e-01 1.4098163e+00 4.4901474e-01 9.4301660e-01 4.9009568e-01 1.1908452e+00 9.6032771e-01 1.2627589e+00 7.8945238e-01 7.3502408e-01 2.8451486e+00 1.1622402e+00 2.7058576e+00 1.7424022e+00 2.2846522e+00 3.3213689e+00 9.3810350e-01 2.7757276e+00 1.8894571e+00 3.8131048e+00 2.3010216e+00 1.5984421e+00 2.3698153e+00 1.1049324e+00 1.7539088e+00 2.4591578e+00 1.9849730e+00 4.4474865e+00 3.3956070e+00 1.1104964e+00 2.8478148e+00 1.2628565e+00 3.2741783e+00 1.3548265e+00 2.7443454e+00 2.9214972e+00 1.3520885e+00 1.5950569e+00 1.8924015e+00 2.5961001e+00 2.7889301e+00 4.4811770e+00 1.9680122e+00 1.3672690e+00 1.1906665e+00 3.3943858e+00 2.8533575e+00 2.0610968e+00 1.5261646e+00 2.5498486e+00 2.6295980e+00 2.6227721e+00 1.1622402e+00 2.8191421e+00 2.9841287e+00 2.3684093e+00 1.3684638e+00 2.0228721e+00 2.7146999e+00 1.5430544e+00 1.2073068e+00 4.2737382e-01 1.1404637e+00 3.1271814e-01 9.6032771e-01 7.5303835e-01 1.1016806e+00 9.6606527e-01 5.6318359e-01 2.6951278e+00 1.0877340e+00 2.5880472e+00 1.5788417e+00 2.1577755e+00 3.1981141e+00 1.0053189e+00 2.6422640e+00 1.8441670e+00 3.6556493e+00 2.1516272e+00 1.5283932e+00 2.2572409e+00 1.1515368e+00 1.7244934e+00 2.3310687e+00 1.8210464e+00 4.2584241e+00 3.3382181e+00 1.2189366e+00 2.7191331e+00 1.1859692e+00 3.1732334e+00 1.2980649e+00 2.5768210e+00 2.7513616e+00 1.2636762e+00 1.4403062e+00 1.8020431e+00 2.4433699e+00 2.6920515e+00 4.2945779e+00 1.8903935e+00 1.2074964e+00 1.0277379e+00 3.3038558e+00 2.6967685e+00 1.8755883e+00 1.3730080e+00 2.4290237e+00 2.5228632e+00 2.5343900e+00 1.0877340e+00 2.6795155e+00 2.8549884e+00 2.2878474e+00 1.3941000e+00 1.9010194e+00 2.5529515e+00 1.3637808e+00 1.0801003e+00 2.2778358e+00 9.5491981e-01 5.9448670e-01 5.9589853e-01 3.3742167e-01 1.9403546e+00 7.3727571e-01 1.8011631e+00 1.0580730e+00 1.6859882e+00 8.4110582e-01 1.3396881e+00 2.3254107e+00 1.8940865e+00 1.8320164e+00 1.6099822e+00 2.5455416e+00 1.0698235e+00 1.0938968e+00 1.3476330e+00 1.4941922e+00 1.4633215e+00 1.3755629e+00 8.7649478e-01 3.1069376e+00 2.7702855e+00 1.8674396e+00 1.7104256e+00 1.1065179e+00 2.4455843e+00 9.1688392e-01 1.4945651e+00 1.6975565e+00 7.1757390e-01 5.5102439e-01 1.2274184e+00 1.5152116e+00 1.9568855e+00 3.1286065e+00 1.3281960e+00 6.0709980e-01 1.0827099e+00 2.4180452e+00 1.7168537e+00 8.4814328e-01 5.4968254e-01 1.4259927e+00 1.6175327e+00 1.5676792e+00 1.0580730e+00 1.6914438e+00 1.8627823e+00 1.4252775e+00 1.3670172e+00 9.8340962e-01 1.5690664e+00 6.4292875e-01 1.2799022e+00 3.7622328e-01 9.3727156e-01 7.2340544e-01 8.7070822e-01 1.0517510e+00 4.9772204e-01 2.6753497e+00 1.1327780e+00 2.4219935e+00 1.5112031e+00 2.0792734e+00 3.0229728e+00 1.3206164e+00 2.4652397e+00 1.7009790e+00 3.5349296e+00 2.0290396e+00 1.4008516e+00 2.1030738e+00 1.2197800e+00 1.7532536e+00 2.2495068e+00 1.7048463e+00 4.1210195e+00 3.1691829e+00 1.1769133e+00 2.5856062e+00 1.2767751e+00 2.9863081e+00 1.1384575e+00 2.4711712e+00 2.5838439e+00 1.1268523e+00 1.3640383e+00 1.7178039e+00 2.2416335e+00 2.4908014e+00 4.1279448e+00 1.8102703e+00 1.0585628e+00 1.0110609e+00 3.0999848e+00 2.6577347e+00 1.7876312e+00 1.3164631e+00 2.2615790e+00 2.4095273e+00 2.3580977e+00 1.1327780e+00 2.5710650e+00 2.7600070e+00 2.1383264e+00 1.2571099e+00 1.7683536e+00 2.5188615e+00 1.3683626e+00 1.3381984e+00 1.9104439e+00 1.7447553e+00 2.1191178e+00 5.2290002e-01 1.5506355e+00 3.7401310e+00 2.0532672e+00 3.6450987e+00 2.6791066e+00 3.2198971e+00 4.2474168e+00 1.2374249e+00 3.6904578e+00 2.7448024e+00 4.7359552e+00 3.2167523e+00 2.5268732e+00 3.3111493e+00 1.8901504e+00 2.5824715e+00 3.3694826e+00 2.9225385e+00 5.3651760e+00 4.2632085e+00 1.6938497e+00 3.7848480e+00 2.0962051e+00 4.1703198e+00 2.2884248e+00 3.6689921e+00 3.8480511e+00 2.2915999e+00 2.5084155e+00 2.8222270e+00 3.5057930e+00 3.6931154e+00 5.3885667e+00 2.8913445e+00 2.2944283e+00 2.0536056e+00 4.3194837e+00 3.7370068e+00 2.9859935e+00 2.4261724e+00 3.4875553e+00 3.5613793e+00 3.5512500e+00 2.0532672e+00 3.7558873e+00 3.9047630e+00 3.2988390e+00 2.2352837e+00 2.9604482e+00 3.5852990e+00 2.4345890e+00 7.1446962e-01 4.7680727e-01 8.6080744e-01 1.0528937e+00 2.6643250e-01 2.4784392e+00 9.6779954e-01 2.4056700e+00 1.4093245e+00 1.9680122e+00 3.0432377e+00 1.1095018e+00 2.5046513e+00 1.7969297e+00 3.4167115e+00 1.9006720e+00 1.3928921e+00 2.0543866e+00 1.1288261e+00 1.5650139e+00 2.0901630e+00 1.6223748e+00 4.0330923e+00 3.2470423e+00 1.3554912e+00 2.4968262e+00 1.0256272e+00 3.0550867e+00 1.1407226e+00 2.3454418e+00 2.5560105e+00 1.0597600e+00 1.1958054e+00 1.6477280e+00 2.2779375e+00 2.5604758e+00 4.0677826e+00 1.7340403e+00 1.0472149e+00 1.0317636e+00 3.1283758e+00 2.4552133e+00 1.6602736e+00 1.1211328e+00 2.2084219e+00 2.3071243e+00 2.3006257e+00 9.6779954e-01 2.4647768e+00 2.6219630e+00 2.0691920e+00 1.3232765e+00 1.6800415e+00 2.3045354e+00 1.1399118e+00 2.6525508e-01 6.6194168e-01 1.5279052e+00 4.8284931e-01 2.2240009e+00 1.2628565e+00 2.2754107e+00 1.3512603e+00 1.8635082e+00 2.9164540e+00 1.6496295e+00 2.4127395e+00 2.0563471e+00 3.0426144e+00 1.5827764e+00 1.5566996e+00 1.9230842e+00 1.6230251e+00 1.7204639e+00 1.8421714e+00 1.4471806e+00 3.6003163e+00 3.3322323e+00 1.9737130e+00 2.2612204e+00 1.2180372e+00 3.0253495e+00 1.3338820e+00 2.0126070e+00 2.2700141e+00 1.1450371e+00 1.0044165e+00 1.7168897e+00 2.0924575e+00 2.5354258e+00 3.6216411e+00 1.8094028e+00 1.0735412e+00 1.3351581e+00 3.0117528e+00 2.1161544e+00 1.3888000e+00 9.3005809e-01 2.0016409e+00 2.1479330e+00 2.1191972e+00 1.2628565e+00 2.2323235e+00 2.3582915e+00 1.9639632e+00 1.7034312e+00 1.5340961e+00 1.9379753e+00 9.6779954e-01 6.0647055e-01 1.3810470e+00 2.3749211e-01 2.2111682e+00 1.0514970e+00 2.2223402e+00 1.2585091e+00 1.7887495e+00 2.8752401e+00 1.4450035e+00 2.3620441e+00 1.8870562e+00 3.0854059e+00 1.5854288e+00 1.3907384e+00 1.8599878e+00 1.3776606e+00 1.5509730e+00 1.8163092e+00 1.3995189e+00 3.6797126e+00 3.2203837e+00 1.7354649e+00 2.2402012e+00 1.0327124e+00 2.9556082e+00 1.1508346e+00 2.0324938e+00 2.2869296e+00 9.8115739e-01 9.3443769e-01 1.5799528e+00 2.0763861e+00 2.4587811e+00 3.7098479e+00 1.6697722e+00 9.5240225e-01 1.1656779e+00 2.9602833e+00 2.1358640e+00 1.3782117e+00 8.5400786e-01 1.9683111e+00 2.0924338e+00 2.0712315e+00 1.0514970e+00 2.2110297e+00 2.3461934e+00 1.8840843e+00 1.4831574e+00 1.4659783e+00 1.9682209e+00 8.9496218e-01 1.7964452e+00 6.5622658e-01 2.0655284e+00 1.1268523e+00 1.7764179e+00 9.9332012e-01 1.5158960e+00 2.3895003e+00 1.8982283e+00 1.8651393e+00 1.5387077e+00 2.7528000e+00 1.2871947e+00 1.1001946e+00 1.4627474e+00 1.4880729e+00 1.6030181e+00 1.6047598e+00 1.0374207e+00 3.2910073e+00 2.7655862e+00 1.7002168e+00 1.8814596e+00 1.2390194e+00 2.4548042e+00 8.7876634e-01 1.7158367e+00 1.8151170e+00 7.5016118e-01 7.8272551e-01 1.3241046e+00 1.5455843e+00 1.9524619e+00 3.2834453e+00 1.4300844e+00 5.8483448e-01 1.0263139e+00 2.4682518e+00 1.9911692e+00 1.0783755e+00 7.8808432e-01 1.5539983e+00 1.7882304e+00 1.6881750e+00 1.1268523e+00 1.8822939e+00 2.0779047e+00 1.5475657e+00 1.2810704e+00 1.1339991e+00 1.8534885e+00 9.0513514e-01 1.2087510e+00 3.4293561e+00 1.8554780e+00 3.4031864e+00 2.4420991e+00 2.9637920e+00 4.0403658e+00 1.1828717e+00 3.4998500e+00 2.6632870e+00 4.3954130e+00 2.8761203e+00 2.3399831e+00 3.0445122e+00 1.7890328e+00 2.3476777e+00 3.0401775e+00 2.6527529e+00 5.0321758e+00 4.1557153e+00 1.7943181e+00 3.4850530e+00 1.8421879e+00 4.0156620e+00 2.0769537e+00 3.3460838e+00 3.5735182e+00 2.0311110e+00 2.1883894e+00 2.6137665e+00 3.2724268e+00 3.5184202e+00 5.0524110e+00 2.6818545e+00 2.0664108e+00 1.9589565e+00 4.0923995e+00 3.3884524e+00 2.6885740e+00 2.0959739e+00 3.1948685e+00 3.2743590e+00 3.2448452e+00 1.8554780e+00 3.4633688e+00 3.5839347e+00 3.0150212e+00 2.1174980e+00 2.6708841e+00 3.2242395e+00 2.1262653e+00 2.3423978e+00 1.0035466e+00 2.2827159e+00 1.3153660e+00 1.8615039e+00 2.9298417e+00 1.3184035e+00 2.4022001e+00 1.8151170e+00 3.2315412e+00 1.7166676e+00 1.3595814e+00 1.9250594e+00 1.2570691e+00 1.5534985e+00 1.9352496e+00 1.4849081e+00 3.8359772e+00 3.2064915e+00 1.5410939e+00 2.3431625e+00 1.0302848e+00 2.9742645e+00 1.1013771e+00 2.1700970e+00 2.3919091e+00 9.7531402e-01 1.0396764e+00 1.5925492e+00 2.1393811e+00 2.4733960e+00 3.8624002e+00 1.6816960e+00 9.5650957e-01 1.0890388e+00 3.0080097e+00 2.2891398e+00 1.5007156e+00 9.6470639e-01 2.0543866e+00 2.1765531e+00 2.1487165e+00 1.0035466e+00 2.3180617e+00 2.4663563e+00 1.9433990e+00 1.3718709e+00 1.5414664e+00 2.1305612e+00 1.0107221e+00 1.7770053e+00 1.3000021e+00 1.3205171e+00 8.3930091e-01 1.8258497e+00 2.8432746e+00 1.7831595e+00 2.1193624e+00 1.2896554e+00 8.9496218e-01 1.6446727e+00 1.0946825e+00 2.1632524e+00 1.4041749e+00 5.4207852e-01 1.2077572e+00 2.1213447e+00 2.4069921e+00 2.9335119e+00 8.2206766e-01 1.6918279e+00 2.1851365e+00 1.7733720e+00 7.3278119e-01 1.4407642e+00 1.6273345e+00 1.3293020e+00 1.2924610e+00 1.7503171e+00 1.9276214e+00 2.3545376e+00 1.2450968e+00 1.8184976e+00 2.1894327e+00 1.8463545e+00 3.4893361e-01 1.0719022e+00 1.3834169e+00 1.0621362e+00 7.1740234e-01 1.0327832e+00 1.7770053e+00 6.9976890e-01 5.1210327e-01 9.9313181e-01 2.0841099e+00 1.0825311e+00 5.0208681e-01 1.3466860e+00 1.7937749e+00 8.2148003e-01 1.2268833e+00 2.4485240e+00 1.2563297e+00 1.9934469e+00 1.2524426e+00 2.8471336e+00 1.4358042e+00 7.3339246e-01 1.4342819e+00 4.9772204e-01 6.9457760e-01 1.4636741e+00 1.1233354e+00 3.5605638e+00 2.5755365e+00 1.2907457e+00 1.8667489e+00 3.7622328e-01 2.4814136e+00 6.2818221e-01 1.8112028e+00 2.1131807e+00 5.7672351e-01 7.9999102e-01 8.5275415e-01 1.9102507e+00 2.0267836e+00 3.6643554e+00 9.0168685e-01 8.3216780e-01 8.3306409e-01 2.5178144e+00 1.8656026e+00 1.2019259e+00 7.6362786e-01 1.6472011e+00 1.5943283e+00 1.7001179e+00 0.0000000e+00 1.8078806e+00 1.9570111e+00 1.3920954e+00 7.6195008e-01 1.1016806e+00 1.7729341e+00 7.1446962e-01 1.0816610e+00 7.3851064e-01 7.2248857e-01 3.0483776e+00 5.6342615e-01 1.3118081e+00 1.4889605e+00 9.8006369e-01 1.1737270e+00 4.2737382e-01 2.1131807e+00 1.7428500e+00 1.0543640e+00 8.5494999e-01 2.0376324e+00 1.3288928e+00 2.4590893e+00 5.9382214e-01 1.9576761e+00 9.8006369e-01 1.3739792e+00 8.5141186e-01 6.2055338e-01 1.3918500e+00 1.3907270e+00 9.7789352e-01 6.6861320e-01 6.5233704e-01 2.1059482e+00 9.8663349e-01 1.4035018e+00 1.7831595e+00 7.7880944e-01 1.4027992e+00 9.8677196e-01 1.5191564e+00 4.3798311e-01 6.8554305e-01 6.2111408e-01 1.7937749e+00 6.4241342e-01 9.9981032e-01 6.7780188e-01 1.6099640e+00 8.3640969e-01 1.4769275e+00 1.5684930e+00 6.3173774e-01 1.7302001e+00 2.0286216e+00 1.2711306e+00 1.0474897e+00 2.1700788e+00 8.2827027e-01 5.2290002e-01 7.5863433e-01 1.2491511e+00 1.0565061e+00 9.7542502e-01 3.3872939e-01 2.7982543e+00 2.0758695e+00 1.7342859e+00 1.1984110e+00 9.9693045e-01 1.8354727e+00 6.0840510e-01 1.1145077e+00 1.3082023e+00 5.2283051e-01 5.1857575e-01 4.7149050e-01 1.1471723e+00 1.3839439e+00 2.8837687e+00 5.8522871e-01 5.3667800e-01 9.0098101e-01 1.8496383e+00 1.3956631e+00 4.8016385e-01 6.2451737e-01 9.5139638e-01 1.0316097e+00 1.1168387e+00 8.2148003e-01 1.1408175e+00 1.3888569e+00 8.7588404e-01 9.9189360e-01 4.8124784e-01 1.3365773e+00 6.0566865e-01 1.4097462e+00 2.4566860e+00 1.1582635e+00 1.2895008e+00 1.6771691e+00 6.6244727e-01 8.5330525e-01 4.2110953e-01 1.5929148e+00 1.0739839e+00 5.6992880e-01 5.5102439e-01 2.4009228e+00 1.8321979e+00 2.1944657e+00 6.8299624e-01 1.3125970e+00 1.6253273e+00 1.0353506e+00 7.4661256e-01 1.1022402e+00 9.6950963e-01 8.7649478e-01 5.0731024e-01 1.1544356e+00 1.2559800e+00 2.5390784e+00 4.9009568e-01 1.1268617e+00 1.4819274e+00 1.4649720e+00 9.9544409e-01 6.0942760e-01 9.8006526e-01 5.9589853e-01 4.3937875e-01 6.7904052e-01 1.2268833e+00 6.0365341e-01 8.3354038e-01 4.3719837e-01 1.3221954e+00 4.2932160e-01 1.0251165e+00 9.7833010e-01 3.6949435e+00 6.0653347e-01 1.6944472e+00 1.5821553e+00 1.6484241e+00 1.7861438e+00 1.1497964e+00 2.7265330e+00 2.4114658e+00 1.7100754e+00 1.5191564e+00 1.8544941e+00 9.8143688e-01 2.9288318e+00 1.1208167e+00 2.6441923e+00 4.9772204e-01 2.0065422e+00 1.3857067e+00 8.4632640e-01 2.0655380e+00 2.0890644e+00 1.6229726e+00 9.3175410e-01 6.4813570e-01 1.8882412e+00 1.6282537e+00 2.0045623e+00 2.3010727e+00 4.0443437e-01 1.9471640e+00 1.6420881e+00 2.2200703e+00 1.1092092e+00 1.3077539e+00 1.2486828e+00 2.4485240e+00 1.1714086e+00 1.4815200e+00 1.3709657e+00 2.1645801e+00 1.5528645e+00 2.0592947e+00 2.2565403e+00 3.2107953e+00 2.2989490e+00 4.0089941e+00 2.5709804e+00 1.9412285e+00 2.6814987e+00 1.0808305e+00 1.6177026e+00 2.5906314e+00 2.3241321e+00 4.7335798e+00 3.7356640e+00 1.5467170e+00 3.0855313e+00 1.1828955e+00 3.6907456e+00 1.7719327e+00 2.9776826e+00 3.3258154e+00 1.7290027e+00 1.8703975e+00 2.1032002e+00 3.0991288e+00 3.2379874e+00 4.8403184e+00 2.1396216e+00 1.8765527e+00 1.6420881e+00 3.7688016e+00 2.8977291e+00 2.3525703e+00 1.7721385e+00 2.8780663e+00 2.8053505e+00 2.9124074e+00 1.2563297e+00 3.0197298e+00 3.1129968e+00 2.6135061e+00 1.7341866e+00 2.3184326e+00 2.7661123e+00 1.7090768e+00 1.2067996e+00 1.8641580e+00 1.3940244e+00 1.3162189e+00 8.8198158e-01 2.2794791e+00 2.1012392e+00 1.5525661e+00 1.0918469e+00 2.2063254e+00 1.1211328e+00 2.4017426e+00 1.1211328e+00 2.2284888e+00 6.3765570e-01 1.5168126e+00 1.2830542e+00 7.2263841e-01 1.5939137e+00 1.6681833e+00 1.2435436e+00 4.6557224e-01 3.1271814e-01 2.2147971e+00 1.2909648e+00 1.4589882e+00 1.7351918e+00 8.5359653e-01 1.8877628e+00 1.2647627e+00 1.8001617e+00 9.2780124e-01 1.2301583e+00 1.1566759e+00 1.9934469e+00 1.1506301e+00 1.5274241e+00 1.1815770e+00 1.6939440e+00 1.2016246e+00 1.9452063e+00 1.8368886e+00 2.7696320e+00 1.7002168e+00 6.6444642e-01 1.2360344e+00 1.3162958e+00 1.5606124e+00 1.8019802e+00 1.1903794e+00 3.3139779e+00 1.5262653e+00 1.2463647e+00 1.7598642e+00 1.6038123e+00 1.5053088e+00 8.4040822e-01 1.8873059e+00 1.7273593e+00 1.0791305e+00 1.4542898e+00 8.8167165e-01 1.3277924e+00 1.1132823e+00 3.3576107e+00 9.4738284e-01 1.0119180e+00 9.3046944e-01 1.8017473e+00 2.2743623e+00 1.4404916e+00 1.5380438e+00 1.4761813e+00 1.5955619e+00 1.5999471e+00 1.2524426e+00 1.7473897e+00 2.0612423e+00 1.3691763e+00 6.7534282e-01 1.2539581e+00 2.2701663e+00 1.5558007e+00 1.5218782e+00 2.4628184e+00 1.5932824e+00 3.2458126e+00 2.5692387e+00 1.4348047e+00 1.8937847e+00 9.2060977e-01 2.4408782e+00 3.8250534e+00 1.0499398e+00 2.8336242e+00 2.0769357e+00 2.6064495e+00 1.0813975e+00 1.3021456e+00 2.4993477e+00 2.2323451e+00 2.1662332e+00 1.8260680e+00 2.0200580e+00 1.1770826e+00 2.1383117e+00 2.5736499e+00 3.0399892e+00 1.5323598e+00 1.2212784e+00 1.7944590e+00 2.3235953e+00 1.3759094e+00 1.3385913e+00 1.3604806e+00 2.8471336e+00 1.0797751e+00 9.4588685e-01 1.6150266e+00 2.9366827e+00 1.8217819e+00 1.3773939e+00 2.3541561e+00 1.1723315e+00 6.4232366e-01 1.8822595e+00 1.3566020e+00 4.2656951e-01 5.7691891e-01 2.2149281e+00 2.2825823e+00 2.4730728e+00 7.0958226e-01 1.4300979e+00 1.9425292e+00 1.2122797e+00 4.9430028e-01 1.0215032e+00 1.0391769e+00 7.2638147e-01 9.8137813e-01 1.1659675e+00 1.5397131e+00 2.3056726e+00 1.0072799e+00 1.1569911e+00 1.6871910e+00 1.6699010e+00 7.9127668e-01 4.3341454e-01 8.2155022e-01 5.7672351e-01 6.8304299e-01 6.6412342e-01 1.4358042e+00 7.0097130e-01 8.1019167e-01 6.5485710e-01 1.6386105e+00 4.6472955e-01 7.2626021e-01 8.9802947e-01 8.9083207e-01 9.8663349e-01 1.0101003e+00 1.2666796e+00 7.2340544e-01 3.1120413e+00 1.9012831e+00 1.3662822e+00 1.4214310e+00 1.0271885e+00 1.7789322e+00 2.8835410e-01 1.4717950e+00 1.5613089e+00 4.5716421e-01 8.2373020e-01 3.8830315e-01 1.2833190e+00 1.3103990e+00 3.1817011e+00 4.8644514e-01 5.9610506e-01 8.0162421e-01 1.8503155e+00 1.7547273e+00 9.3865015e-01 8.9973730e-01 1.1346946e+00 1.2001902e+00 1.2260535e+00 7.3339246e-01 1.3976606e+00 1.6479131e+00 9.4228329e-01 5.0621589e-01 7.1671402e-01 1.7153988e+00 9.3451915e-01 1.7865803e+00 1.3700593e+00 7.2638147e-01 5.3458689e-01 2.2505972e+00 1.6524504e+00 2.2440955e+00 5.5576380e-01 1.5638518e+00 1.3688560e+00 1.0552128e+00 7.1143905e-01 8.2518769e-01 1.0245496e+00 9.9235657e-01 6.7030885e-01 8.3156325e-01 9.6025744e-01 2.3337038e+00 6.8299624e-01 1.1166319e+00 1.5524781e+00 1.1685901e+00 1.1719135e+00 6.6412342e-01 1.1159239e+00 2.6643250e-01 4.7488466e-01 4.3341454e-01 1.4342819e+00 5.7691891e-01 8.8366512e-01 3.3492202e-01 1.3576953e+00 4.2110953e-01 1.2033093e+00 1.1777985e+00 8.7819565e-01 1.8719964e+00 1.5530949e+00 3.9773949e+00 2.6834336e+00 1.0194189e+00 2.2401597e+00 7.0463400e-01 2.6908242e+00 8.9981614e-01 2.2443541e+00 2.5055082e+00 9.6168382e-01 1.2786676e+00 1.1515752e+00 2.2564140e+00 2.2581551e+00 4.0837847e+00 1.1737270e+00 1.1984110e+00 1.0100915e+00 2.7760526e+00 2.2855612e+00 1.6668656e+00 1.2419907e+00 2.0207624e+00 1.9399964e+00 2.0427084e+00 4.9772204e-01 2.1876191e+00 2.3343528e+00 1.7191609e+00 7.3633268e-01 1.5086315e+00 2.2088314e+00 1.2082987e+00 1.1857824e+00 1.2636762e+00 3.3874427e+00 2.5564450e+00 1.8349829e+00 1.6578570e+00 5.8813453e-01 2.5222553e+00 1.0240850e+00 1.6676963e+00 2.1419072e+00 9.3827844e-01 9.8741108e-01 8.7169308e-01 2.0802526e+00 2.1175243e+00 3.5451448e+00 8.2097460e-01 1.3248988e+00 1.4633215e+00 2.4116155e+00 1.5361480e+00 1.2935378e+00 9.5818710e-01 1.5578153e+00 1.3192053e+00 1.5035025e+00 6.9457760e-01 1.5912796e+00 1.6259926e+00 1.1892978e+00 1.1294987e+00 1.0978602e+00 1.4813076e+00 9.1948999e-01 8.1819403e-01 2.2419326e+00 2.2807501e+00 2.5846003e+00 6.4497192e-01 1.4107908e+00 2.0247998e+00 1.3509199e+00 5.5183182e-01 1.2328847e+00 1.1911894e+00 9.0810653e-01 9.7397874e-01 1.4379681e+00 1.6702453e+00 2.3957048e+00 9.4716675e-01 1.4061835e+00 1.8616601e+00 1.6979390e+00 5.2290002e-01 7.0376604e-01 9.7757519e-01 6.9976890e-01 4.8012872e-01 6.5622658e-01 1.4636741e+00 5.9074344e-01 5.5183182e-01 5.8926015e-01 1.7101283e+00 6.2055338e-01 5.2374483e-01 1.0096792e+00 2.4983023e+00 2.0024830e+00 2.0009022e+00 9.4244262e-01 1.2563297e+00 1.6864324e+00 8.0788963e-01 8.3930091e-01 1.0038277e+00 7.0810362e-01 5.9074344e-01 6.2055338e-01 9.0121804e-01 1.2356595e+00 2.5673851e+00 7.1082758e-01 6.9066640e-01 1.1671832e+00 1.6263298e+00 1.2372185e+00 2.6033464e-01 7.2248857e-01 6.6653737e-01 8.5606908e-01 8.7588404e-01 1.1233354e+00 9.0810653e-01 1.1795009e+00 7.1867388e-01 1.2188386e+00 3.1239235e-01 1.1894366e+00 7.5921691e-01 2.7729820e+00 4.4273104e+00 1.7851048e+00 3.5860697e+00 2.3211451e+00 3.2572853e+00 1.7681972e+00 1.6466818e+00 3.1677578e+00 2.9074188e+00 2.8617360e+00 2.1557081e+00 2.3917474e+00 3.9487224e-01 2.8587345e+00 3.1370513e+00 3.5889276e+00 1.8806886e+00 2.0412779e+00 2.4100318e+00 3.0088533e+00 2.0247746e+00 2.1265123e+00 2.0926464e+00 3.5605638e+00 1.8217810e+00 1.8066213e+00 2.3669524e+00 3.5958907e+00 2.5091153e+00 2.1661990e+00 3.0374915e+00 2.7063291e+00 1.8195505e+00 2.8431665e+00 6.1655427e-01 2.1507483e+00 2.1438129e+00 1.7230435e+00 2.3108260e+00 2.5097011e+00 1.8135469e+00 1.5638528e+00 9.0791603e-01 2.8200316e+00 1.7992895e+00 2.2827159e+00 2.3805592e+00 1.0279218e+00 2.6120156e+00 2.2030084e+00 2.6289851e+00 1.7477225e+00 1.8297977e+00 1.8176515e+00 2.5755365e+00 1.8486085e+00 2.1438129e+00 1.7993706e+00 2.0846858e+00 2.0084695e+00 2.7218129e+00 2.6544629e+00 2.7850656e+00 1.6064410e+00 2.7362607e+00 1.2723027e+00 2.8153418e+00 2.8097763e+00 1.4630671e+00 1.8911656e+00 1.6976298e+00 2.3931138e+00 2.3316649e+00 4.4654565e+00 1.7621453e+00 1.4315442e+00 9.9921804e-01 3.0176875e+00 3.0491088e+00 2.1855415e+00 1.8898572e+00 2.4817766e+00 2.5552736e+00 2.5674482e+00 1.2907457e+00 2.7588865e+00 3.0041677e+00 2.2870308e+00 9.4057729e-01 2.0520412e+00 2.9779211e+00 1.8911656e+00 1.9172403e+00 1.5033800e+00 1.5778466e+00 4.2450569e-01 7.6773108e-01 1.5016932e+00 1.3345218e+00 1.1346946e+00 1.0902078e+00 1.2416734e+00 1.9196757e+00 1.1117653e+00 1.6095257e+00 2.0596575e+00 1.0943114e+00 8.7072347e-01 9.2729770e-01 1.4434261e+00 3.8830315e-01 3.6319073e-01 4.1088655e-01 1.8667489e+00 1.6562722e-01 4.2450569e-01 5.9279023e-01 1.8877121e+00 8.2518769e-01 9.7789352e-01 1.4886316e+00 2.7335291e+00 9.1459005e-01 1.8213026e+00 2.2454046e+00 7.7259801e-01 8.0376328e-01 1.0525811e+00 2.1168634e+00 2.2814168e+00 3.7089872e+00 1.0767714e+00 1.0755005e+00 1.1631285e+00 2.6946772e+00 1.7497347e+00 1.2634840e+00 7.1971771e-01 1.7438018e+00 1.6356845e+00 1.7637315e+00 3.7622328e-01 1.8511762e+00 1.9311191e+00 1.4699785e+00 1.1016806e+00 1.1928774e+00 1.6354514e+00 6.5233704e-01 2.0052498e+00 1.7681972e+00 1.2007144e+00 2.1235855e+00 2.2484715e+00 1.6942544e+00 1.0546367e+00 5.1386894e-01 2.3251407e+00 1.7093881e+00 2.0273081e+00 2.2255325e+00 6.9493020e-01 2.3316649e+00 1.8640280e+00 2.3781796e+00 1.4043141e+00 1.6126002e+00 1.5456113e+00 2.4814136e+00 1.5467508e+00 1.8811164e+00 1.5963715e+00 2.0694478e+00 1.7422855e+00 2.4276706e+00 2.4143104e+00 1.5837613e+00 1.7028549e+00 2.6643250e-01 7.3283576e-01 6.1619693e-01 1.4102704e+00 1.5157660e+00 3.3107238e+00 7.0702759e-01 4.6964680e-01 7.3731902e-01 2.0564363e+00 1.8389778e+00 9.9058911e-01 7.8305765e-01 1.2692102e+00 1.3637808e+00 1.3483908e+00 6.2818221e-01 1.5635907e+00 1.7874832e+00 1.0817627e+00 4.8284931e-01 7.9664122e-01 1.7693113e+00 8.5141186e-01 7.7538587e-01 1.4522625e+00 1.1678338e+00 1.2100516e+00 1.1294987e+00 1.4712028e+00 1.8985225e+00 1.2171256e+00 1.5155373e+00 1.9939611e+00 1.4342819e+00 6.6653737e-01 7.1511757e-01 1.2680818e+00 5.4772790e-01 6.0868934e-01 6.7484334e-01 1.8112028e+00 3.8639663e-01 5.2413598e-01 7.9227302e-01 1.9656037e+00 7.9656884e-01 7.1789533e-01 1.2970125e+00 1.6649242e+00 1.5382030e+00 1.4107908e+00 5.4248468e-01 9.6424206e-01 1.6581712e+00 1.4527629e+00 1.5643033e+00 2.0014001e+00 1.0048958e+00 1.4365091e+00 1.0328871e+00 1.6659456e+00 6.7424840e-01 1.0427348e+00 9.3481345e-01 2.1131807e+00 8.1596583e-01 1.1349095e+00 1.1009910e+00 2.0312552e+00 1.0961859e+00 1.4886316e+00 1.7139439e+00 4.8016385e-01 6.4687084e-01 1.4356300e+00 1.6309773e+00 3.2287360e+00 7.3391501e-01 4.4499696e-01 8.4121419e-01 2.1133414e+00 1.6592561e+00 8.3333283e-01 5.2066928e-01 1.2097457e+00 1.2911242e+00 1.2850973e+00 5.7672351e-01 1.4812088e+00 1.6720834e+00 1.0285902e+00 7.2340544e-01 6.8124108e-01 1.5683551e+00 6.1067563e-01 8.0990117e-01 1.4468934e+00 1.7768340e+00 2.9867606e+00 8.8098199e-01 6.6217390e-01 1.1327663e+00 2.1506380e+00 1.2980342e+00 5.4779717e-01 1.3340137e-01 1.1051628e+00 1.1634940e+00 1.1952607e+00 7.9999102e-01 1.2953104e+00 1.4320028e+00 9.9155078e-01 1.1867923e+00 5.7526462e-01 1.1726810e+00 2.6680274e-01 1.2602457e+00 1.2678174e+00 2.9703757e+00 1.3103399e-01 8.4439576e-01 1.0887336e+00 1.6811909e+00 1.4435947e+00 7.9778097e-01 8.9789119e-01 9.2528705e-01 8.7435479e-01 9.9613800e-01 8.5275415e-01 1.0871867e+00 1.3186900e+00 6.8124108e-01 8.2317311e-01 5.4397563e-01 1.4334280e+00 9.0121513e-01 6.7419212e-01 2.1234744e+00 1.3330747e+00 1.2524426e+00 1.6423187e+00 1.1112287e+00 1.7731481e+00 1.0412209e+00 1.5779602e+00 8.1552831e-01 1.2367326e+00 1.0877340e+00 1.9102507e+00 1.1360631e+00 1.4957547e+00 1.1495900e+00 1.6944472e+00 1.0511712e+00 1.7894533e+00 1.6403454e+00 2.3936810e+00 1.3012342e+00 1.5364148e+00 1.7852089e+00 7.8659640e-01 2.0467146e+00 1.4387140e+00 1.9055720e+00 1.0341095e+00 1.3049404e+00 1.1996837e+00 2.0267836e+00 1.2898173e+00 1.6473842e+00 1.2092432e+00 1.6223502e+00 1.2928268e+00 2.1087983e+00 1.9576761e+00 2.9790337e+00 3.1661621e+00 3.6343153e+00 1.9094387e+00 2.2505084e+00 2.4932991e+00 3.0919113e+00 2.0983540e+00 2.2774299e+00 2.1822207e+00 3.6643554e+00 1.9784646e+00 2.0041121e+00 2.4741311e+00 3.6564145e+00 2.5932898e+00 2.3540393e+00 3.1383476e+00 9.6758101e-01 1.2012033e+00 1.6658010e+00 1.4135473e+00 8.6985276e-01 9.6249568e-01 9.3451915e-01 8.2380019e-01 9.6993876e-01 9.0168685e-01 1.0632334e+00 1.2723027e+00 6.4232366e-01 8.7376399e-01 5.8942278e-01 1.4153467e+00 9.6559725e-01 6.0709980e-01 2.1192618e+00 1.8400866e+00 8.3619405e-01 7.2626021e-01 1.2848171e+00 1.4775384e+00 1.4500356e+00 8.3216780e-01 1.5874797e+00 1.8427499e+00 1.2442620e+00 8.6985276e-01 8.3878265e-01 1.7484907e+00 7.7500385e-01 2.4608044e+00 2.2758533e+00 1.3186900e+00 1.1601403e+00 1.7655861e+00 1.8899115e+00 1.9324044e+00 8.3306409e-01 2.0122449e+00 2.2830055e+00 1.6787550e+00 8.1019167e-01 1.3243478e+00 2.1959912e+00 1.1244567e+00 1.9511379e+00 1.7500667e+00 2.2774574e+00 1.1011934e+00 1.2684800e+00 1.1435764e+00 2.5178144e+00 1.1861264e+00 1.4342819e+00 1.3109392e+00 2.2026274e+00 1.5858048e+00 2.0711809e+00 2.3400013e+00 1.0557584e+00 1.3438727e+00 1.0821769e+00 8.4383266e-01 1.0493821e+00 1.8656026e+00 7.8957903e-01 5.5399712e-01 1.0737552e+00 2.2030214e+00 1.1115276e+00 2.1119253e-01 1.3352177e+00 6.6627781e-01 7.2272795e-01 8.6751530e-01 9.1936743e-01 1.2019259e+00 8.7588404e-01 1.0946184e+00 8.0162421e-01 1.4236959e+00 4.0664863e-01 9.8463602e-01 6.8496652e-01 1.2266388e+00 1.2615426e+00 1.3010124e+00 7.6362786e-01 1.4014424e+00 1.5148689e+00 1.0932736e+00 1.2210779e+00 6.9618131e-01 1.2059294e+00 2.0855006e-01 4.7509249e-01 3.1239235e-01 1.6472011e+00 4.6557224e-01 7.5810578e-01 4.3937875e-01 1.5999820e+00 5.6262711e-01 1.1233867e+00 1.3019241e+00 3.9472619e-01 1.5943283e+00 3.3742167e-01 4.8284931e-01 3.4893361e-01 1.6410601e+00 6.6154242e-01 9.3451915e-01 1.2980649e+00 1.7001179e+00 5.2283051e-01 6.7484334e-01 3.3872939e-01 1.6485749e+00 6.6653737e-01 1.1055440e+00 1.3931316e+00 1.8078806e+00 1.9570111e+00 1.3920954e+00 7.6195008e-01 1.1016806e+00 1.7729341e+00 7.1446962e-01 3.8639663e-01 6.2027457e-01 1.8723846e+00 8.0990117e-01 9.0447834e-01 1.4243850e+00 7.9227302e-01 2.1007225e+00 1.0230346e+00 7.1789533e-01 1.5391678e+00 1.3603920e+00 4.6137216e-01 1.1083720e+00 1.1686836e+00 1.1908452e+00 2.1561807e+00 1.2583645e+00 1.0722301e+00 7.7259801e-01 1.2001902e+00 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-seuclidean-ml.txt b/voice_bridge/scipy/spatial/tests/data/pdist-seuclidean-ml.txt new file mode 100644 index 0000000000000000000000000000000000000000..ce80cb1ead3f88bec0fbaf6d48cb4fc584e52168 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-seuclidean-ml.txt @@ -0,0 +1 @@ + 1.4330520e+01 1.4635426e+01 1.3450855e+01 1.4761140e+01 1.3508642e+01 1.5434417e+01 1.3887693e+01 1.5166776e+01 1.3966038e+01 1.4950451e+01 1.4564587e+01 1.3834201e+01 1.4347008e+01 1.5641962e+01 1.4689053e+01 1.4418720e+01 1.4545856e+01 1.4151822e+01 1.4669017e+01 1.5150750e+01 1.3770166e+01 1.3288969e+01 1.4048191e+01 1.4049959e+01 1.4164158e+01 1.3727834e+01 1.4074687e+01 1.4321303e+01 1.2497330e+01 1.3820273e+01 1.4441030e+01 1.4780222e+01 1.2504339e+01 1.5022245e+01 1.4263650e+01 1.3704507e+01 1.3694385e+01 1.3667517e+01 1.3177468e+01 1.4391931e+01 1.4893903e+01 1.4475753e+01 1.4440707e+01 1.3603096e+01 1.6889651e+01 1.4731174e+01 1.3337775e+01 1.5187532e+01 1.5667271e+01 1.4226037e+01 1.4203554e+01 1.5272898e+01 1.6031460e+01 1.5991549e+01 1.1855060e+01 1.4844776e+01 1.2475182e+01 1.4408126e+01 1.4836870e+01 1.3472986e+01 1.4089281e+01 1.1018298e+01 1.3183296e+01 1.4590802e+01 1.4404230e+01 1.2717623e+01 1.3983283e+01 1.4017133e+01 1.4608005e+01 1.4402553e+01 1.3977803e+01 1.4091040e+01 1.3977459e+01 1.2630449e+01 1.4160109e+01 1.3029417e+01 1.2654432e+01 1.2794946e+01 1.3194978e+01 1.4378745e+01 1.2431908e+01 1.3852651e+01 1.3748358e+01 1.4003568e+01 1.5066681e+01 1.5192826e+01 1.4370013e+01 1.5792545e+01 1.3547546e+01 1.4411543e+01 1.4794215e+01 1.4924312e+01 1.4789153e+01 1.4875055e+01 1.4208537e+01 1.2786148e+01 1.4882476e+01 1.3302010e+01 1.4354774e+01 1.4542129e+01 1.5889633e+01 1.2928185e+01 1.4877868e+01 1.2890902e+01 1.4406165e+01 1.4498123e+01 1.4303273e+01 1.3207002e+01 1.3954732e+01 1.4841248e+01 1.5427799e+01 1.4363463e+01 1.3976277e+01 1.4284878e+01 1.4457991e+01 1.3369469e+01 1.5246610e+01 1.4487573e+01 1.4525176e+01 1.4505865e+01 1.5037347e+01 1.3834927e+01 1.3758988e+01 1.3424987e+01 1.4914766e+01 1.3783923e+01 1.3434291e+01 1.2895927e+01 1.3870360e+01 1.3342977e+01 1.3094322e+01 1.3057847e+01 1.3322375e+01 1.4940650e+01 1.4476829e+01 1.4197503e+01 1.4597035e+01 1.2963234e+01 1.4011414e+01 1.3181409e+01 1.3339615e+01 1.3928735e+01 1.3508015e+01 1.3170749e+01 1.3529133e+01 1.3454724e+01 1.4883437e+01 1.4564565e+01 1.2474313e+01 1.4435790e+01 1.5285703e+01 1.3701736e+01 1.3578312e+01 1.4807311e+01 1.4281072e+01 1.2920213e+01 1.4427803e+01 1.1408611e+01 1.4097334e+01 1.2868115e+01 1.3903683e+01 1.3800332e+01 1.3439339e+01 1.4062651e+01 1.3242107e+01 1.4400424e+01 1.3826132e+01 1.5991146e+01 1.3118258e+01 1.5377390e+01 1.2858378e+01 1.5249567e+01 1.4081585e+01 1.4458052e+01 1.4175623e+01 1.4850069e+01 1.5506668e+01 1.5014770e+01 1.4337030e+01 1.5214705e+01 1.4803729e+01 1.3188675e+01 1.3437739e+01 1.3409394e+01 1.4607386e+01 1.5394271e+01 1.5946451e+01 1.3769364e+01 1.4181208e+01 1.2551765e+01 diff --git a/voice_bridge/scipy/spatial/tests/data/pdist-spearman-ml.txt b/voice_bridge/scipy/spatial/tests/data/pdist-spearman-ml.txt new file mode 100644 index 0000000000000000000000000000000000000000..b50fe3af1912d20aaa737eedc1b6e096a7005876 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/pdist-spearman-ml.txt @@ -0,0 +1 @@ + 9.3540954e-01 9.7904590e-01 8.6703870e-01 1.1569997e+00 8.7174317e-01 1.0627183e+00 9.1272727e-01 1.1593999e+00 9.7573357e-01 1.0072127e+00 1.0536814e+00 9.6276028e-01 9.7700570e-01 1.1513951e+00 1.0719592e+00 9.2178818e-01 1.0004680e+00 9.3689769e-01 9.8205821e-01 1.0332673e+00 9.4517852e-01 8.9437744e-01 9.7556556e-01 9.0460246e-01 9.7210921e-01 9.2230423e-01 9.9605161e-01 9.6852085e-01 8.4162016e-01 9.6667267e-01 9.7759376e-01 9.9757576e-01 7.6992499e-01 1.0151695e+00 9.8691869e-01 9.0325833e-01 8.6665467e-01 8.8844884e-01 8.4553255e-01 9.7700570e-01 9.5159916e-01 9.8906691e-01 1.0551935e+00 9.1973597e-01 1.3266247e+00 1.0982778e+00 8.4531653e-01 1.0887369e+00 1.0984938e+00 9.9851185e-01 9.0701470e-01 1.0639304e+00 1.2392919e+00 1.1422502e+00 8.1725773e-01 1.1844944e+00 7.8219022e-01 1.0817162e+00 1.2196100e+00 1.0003120e+00 1.0164536e+00 7.0724272e-01 9.7981398e-01 1.1134953e+00 1.0671107e+00 9.3600960e-01 9.9984398e-01 1.0356916e+00 1.1248005e+00 1.0696310e+00 1.0634263e+00 9.6472847e-01 9.9365137e-01 8.5724572e-01 1.1257846e+00 8.9930993e-01 9.4903090e-01 9.0667867e-01 9.1231923e-01 1.0573777e+00 9.0105011e-01 9.5255926e-01 1.0177978e+00 1.0606901e+00 1.1966997e+00 1.0891929e+00 1.0085089e+00 1.2640264e+00 9.3246925e-01 1.0198020e+00 1.2055806e+00 1.1237924e+00 1.1060666e+00 1.0517252e+00 1.0684668e+00 7.6844884e-01 1.0572697e+00 8.7373537e-01 9.6283228e-01 9.9350735e-01 1.2412601e+00 7.6322832e-01 1.0298950e+00 8.6148215e-01 1.0042724e+00 9.7012901e-01 9.3712571e-01 8.5845785e-01 8.5862586e-01 1.0336634e+00 1.0955536e+00 9.5302730e-01 9.8696670e-01 1.0633063e+00 1.0026643e+00 9.6380438e-01 1.1711251e+00 9.9273927e-01 1.0260906e+00 1.0863966e+00 1.0482808e+00 9.0361836e-01 9.2358836e-01 8.7794779e-01 1.2461206e+00 9.2985299e-01 1.0418962e+00 9.4660666e-01 9.5636364e-01 9.0646265e-01 9.9113111e-01 8.3027903e-01 9.3341734e-01 1.1378938e+00 1.0548215e+00 1.0086889e+00 1.1998920e+00 8.6063006e-01 1.0255506e+00 8.4786079e-01 1.0090729e+00 9.2542454e-01 9.5176718e-01 9.3477348e-01 9.0091809e-01 9.6404440e-01 1.1158716e+00 9.9614761e-01 7.7682568e-01 1.0605461e+00 1.0895650e+00 9.0065407e-01 8.7173117e-01 9.9821182e-01 1.2165617e+00 8.6127813e-01 1.1111071e+00 7.9015902e-01 1.0433843e+00 8.6510651e-01 1.0019202e+00 1.0154815e+00 9.4381038e-01 9.8646265e-01 1.0062526e+00 9.7426943e-01 9.8191419e-01 1.3038944e+00 8.6277828e-01 1.0830243e+00 8.6851485e-01 1.1192559e+00 9.9120312e-01 9.6540054e-01 9.1072307e-01 1.1775698e+00 1.1139154e+00 1.1083468e+00 9.9593159e-01 1.0825923e+00 1.1115032e+00 9.7430543e-01 9.5605161e-01 9.2800480e-01 9.4369037e-01 1.1136034e+00 1.1382898e+00 9.5937594e-01 9.8843084e-01 7.4563456e-01 diff --git a/voice_bridge/scipy/spatial/tests/data/random-bool-data.txt b/voice_bridge/scipy/spatial/tests/data/random-bool-data.txt new file mode 100644 index 0000000000000000000000000000000000000000..df0d838f517f6c0afd8954ad87bc83886e0e3de4 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/random-bool-data.txt @@ -0,0 +1,100 @@ +0 1 1 0 1 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 1 1 0 0 1 1 +1 1 1 1 1 1 1 0 0 1 1 1 0 0 0 0 1 0 1 0 1 1 1 0 1 0 1 1 1 1 +0 1 0 1 1 0 0 1 1 1 1 0 1 0 0 1 1 0 1 1 1 0 1 1 0 1 1 1 0 1 +1 1 1 0 0 1 1 0 0 1 1 1 0 0 1 1 0 1 1 1 0 1 1 0 0 0 0 1 0 0 +1 0 0 0 0 1 1 0 1 1 0 1 0 0 0 0 1 0 0 1 0 1 0 0 1 1 1 1 0 0 +1 0 1 1 0 0 0 1 1 1 1 1 0 1 1 0 1 0 1 0 1 0 0 0 0 0 0 0 1 1 +0 1 0 0 1 0 0 0 1 0 0 1 1 0 0 0 0 1 1 0 0 1 0 1 1 1 1 0 1 0 +1 0 1 1 1 0 0 0 0 1 1 0 0 0 0 1 0 1 0 0 0 1 1 1 0 1 0 0 1 0 +1 1 1 0 0 1 1 0 0 1 0 0 1 0 0 1 0 1 1 0 1 1 0 1 1 1 0 0 1 1 +1 1 0 1 0 0 1 1 1 1 1 1 1 0 1 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 +1 0 1 0 1 1 0 1 1 0 1 1 0 1 1 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 +1 1 1 1 0 1 0 0 0 0 0 1 0 1 1 1 1 0 1 1 1 1 1 1 0 1 0 1 1 1 +1 1 1 1 1 1 1 1 1 0 1 1 0 0 1 0 1 0 1 0 1 0 0 0 1 0 0 1 0 1 +0 1 1 0 0 1 1 0 0 0 0 1 0 1 1 0 1 0 1 0 1 1 0 1 0 0 1 1 1 1 +1 0 0 1 0 0 1 0 1 0 0 1 0 0 0 1 1 0 0 0 1 0 1 0 0 1 1 0 1 1 +1 0 0 1 1 0 0 1 1 0 0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 1 1 1 0 0 +1 1 0 0 1 0 0 0 1 0 0 1 0 1 0 0 1 0 1 1 0 1 0 0 0 1 1 1 1 1 +0 0 0 1 1 1 1 1 0 1 0 1 1 1 1 0 0 1 1 1 1 1 0 0 1 0 1 0 0 0 +1 0 1 1 0 1 0 0 1 0 0 1 0 0 1 0 0 0 1 0 0 1 1 1 0 1 1 0 1 1 +0 0 0 0 1 0 1 0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 1 1 1 +0 1 0 0 1 1 0 0 1 1 1 0 0 0 1 0 0 0 0 1 1 0 0 1 0 1 1 0 1 0 +1 0 1 0 1 1 1 0 0 0 1 0 1 1 0 0 0 0 0 0 0 1 0 0 1 1 1 0 1 1 +0 0 1 0 0 0 0 0 1 1 0 0 1 1 1 1 1 1 1 1 0 1 0 0 0 0 0 0 1 0 +0 1 0 1 1 1 0 1 1 1 0 1 0 1 1 1 0 0 0 0 1 1 1 0 0 1 1 0 0 1 +0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 0 1 1 0 1 1 0 0 +1 0 0 0 1 0 1 0 0 1 0 1 1 0 1 0 1 0 1 0 1 1 1 0 0 0 1 1 1 0 +1 0 0 0 1 1 1 0 0 1 0 1 1 1 0 0 0 1 1 1 0 0 0 0 1 0 0 0 1 1 +0 1 0 0 0 1 1 1 0 1 1 1 0 1 0 0 1 1 1 1 0 1 0 1 0 1 1 0 1 1 +0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 0 0 1 0 0 0 1 0 1 0 1 0 1 0 1 +0 0 1 0 1 0 1 1 1 1 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 0 1 0 1 0 +1 1 0 1 1 1 1 1 0 1 0 0 0 1 1 1 0 1 0 0 0 1 1 0 1 0 0 0 0 1 +0 1 0 0 0 1 0 1 1 0 0 1 0 0 0 1 1 1 0 0 1 1 0 1 1 0 0 1 0 1 +1 1 0 0 0 0 0 1 1 0 1 1 0 0 1 0 1 1 0 0 0 1 0 1 0 1 0 1 0 1 +1 1 1 0 1 0 0 1 1 0 1 1 1 0 1 0 1 1 0 0 0 1 1 0 0 1 1 1 1 1 +0 1 0 0 1 1 0 0 1 1 1 1 0 1 0 1 0 1 1 1 0 1 1 0 1 1 0 0 1 0 +1 1 1 1 0 1 0 1 0 1 1 1 1 0 1 0 1 0 1 0 1 1 0 0 1 0 1 0 0 0 +0 0 0 0 1 1 1 0 1 1 0 0 1 1 1 1 0 1 0 1 1 1 1 1 1 0 0 0 0 0 +0 1 1 1 0 0 0 1 1 1 0 1 0 0 1 1 1 1 1 0 1 0 0 1 0 0 0 0 1 1 +0 1 0 0 1 1 1 1 0 0 1 0 1 0 1 1 0 0 1 0 0 1 1 0 0 0 0 1 0 0 +1 1 0 1 0 0 1 1 0 0 1 1 1 0 0 1 1 1 0 0 0 0 1 1 1 0 1 0 0 1 +0 1 1 0 1 0 1 1 0 0 0 1 1 0 0 0 0 0 0 1 0 0 1 1 0 1 0 0 1 1 +0 0 1 1 1 0 1 0 0 1 1 0 0 0 1 1 1 0 1 0 0 0 0 1 1 0 1 1 0 0 +1 0 1 1 1 1 1 1 1 1 0 1 0 0 0 1 0 1 0 0 0 1 1 0 0 1 0 0 0 0 +1 0 1 1 1 0 1 1 1 1 0 0 1 0 1 1 1 0 0 0 0 1 1 1 1 1 0 1 0 0 +1 0 0 0 1 1 1 0 1 1 0 0 1 1 1 0 1 0 0 1 0 1 0 1 1 1 0 0 0 1 +1 0 1 0 1 0 0 0 1 0 0 1 1 0 1 1 0 0 0 1 0 1 1 0 1 0 0 1 0 0 +0 1 1 0 1 0 1 1 1 1 1 0 0 0 0 1 0 1 0 0 1 1 1 1 0 1 0 1 1 1 +0 1 0 1 1 0 1 0 0 1 0 0 1 0 0 1 1 0 1 0 0 0 1 1 1 0 0 1 0 1 +1 0 1 1 1 0 1 0 1 0 1 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 +1 1 1 1 1 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 0 0 1 1 1 1 0 1 0 1 +1 1 1 1 0 0 0 1 0 1 1 0 0 0 1 1 0 0 1 1 1 1 0 0 0 1 0 1 0 0 +1 0 1 0 0 1 1 1 1 0 1 1 0 0 1 0 0 1 1 0 1 1 1 1 1 1 0 0 0 0 +0 1 1 0 0 1 0 0 0 0 0 1 0 1 0 0 1 1 0 1 0 1 0 0 0 1 0 0 1 0 +0 0 0 1 0 0 0 1 1 1 1 1 0 0 0 1 1 0 0 0 1 1 1 0 1 0 1 1 1 0 +1 1 0 0 0 0 1 1 1 0 1 0 1 1 1 0 0 1 0 0 0 0 0 0 1 1 1 0 0 0 +1 0 1 1 1 0 1 0 1 0 0 1 1 1 1 1 0 0 1 1 0 1 1 1 1 0 0 0 0 1 +0 0 1 1 1 0 0 0 0 1 0 0 0 0 0 0 1 0 0 1 0 0 1 1 1 0 0 1 0 0 +0 0 1 1 1 1 1 0 1 0 1 0 0 1 1 1 1 0 0 0 1 0 1 1 0 1 1 1 0 0 +0 0 0 0 0 1 0 0 1 1 0 1 1 0 0 0 0 1 0 1 1 0 0 1 0 0 1 0 1 0 +1 0 0 1 0 1 1 1 0 1 0 1 1 0 0 1 1 0 1 1 1 0 1 0 0 0 1 1 1 1 +0 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 1 1 0 0 1 1 0 0 0 +1 0 0 1 1 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 1 0 1 1 0 0 1 0 1 0 +0 1 0 1 1 1 1 1 0 1 0 1 1 0 0 1 1 0 1 1 0 1 1 0 1 1 0 0 0 1 +1 0 1 1 1 0 0 0 1 0 0 1 0 0 0 1 0 1 1 1 0 0 1 1 1 1 0 0 0 1 +0 1 0 0 1 1 1 1 1 1 0 0 1 0 0 1 1 0 1 0 1 0 1 1 1 0 1 1 0 1 +0 0 1 0 1 1 1 0 0 0 1 0 1 0 1 1 0 0 1 1 0 1 0 1 1 0 0 1 0 1 +0 1 1 1 1 1 0 0 0 0 0 1 0 1 1 1 1 1 0 1 1 1 0 0 1 0 0 1 1 1 +1 1 1 1 0 1 1 1 1 1 1 0 0 1 1 0 1 1 0 1 0 1 0 1 0 1 1 0 0 0 +1 0 0 0 1 0 1 0 0 0 1 0 1 0 0 1 0 1 1 1 1 1 0 0 1 1 1 1 1 0 +0 0 0 0 1 1 1 0 1 0 0 1 1 0 0 1 1 1 1 0 0 1 0 1 0 0 0 1 0 0 +1 1 1 1 1 0 0 0 1 1 0 0 1 1 1 1 0 1 0 1 0 0 0 0 1 1 0 1 1 0 +1 0 1 1 0 1 0 1 0 1 1 0 1 1 1 0 0 1 0 0 1 1 0 0 1 1 0 1 0 1 +1 1 1 1 1 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 1 +0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 1 0 1 1 1 0 1 1 1 1 1 +1 1 1 0 1 1 1 1 1 0 0 0 0 1 0 0 1 0 1 0 1 1 1 0 0 1 0 0 1 1 +1 1 0 1 0 1 0 1 0 0 1 0 0 0 1 0 1 1 0 1 1 0 1 0 0 1 0 0 1 0 +1 0 1 1 0 0 1 1 0 0 1 1 0 0 0 1 1 0 0 1 0 0 0 0 0 1 0 1 1 0 +1 1 1 1 1 0 0 1 0 0 1 1 1 0 1 0 0 1 1 1 0 1 1 1 1 1 1 1 1 1 +1 0 1 1 0 0 1 1 0 1 1 1 0 0 0 1 0 1 0 0 0 1 1 1 1 1 0 0 1 0 +0 0 0 0 0 1 1 1 0 0 0 0 0 1 1 1 1 1 0 0 1 1 0 0 1 0 0 1 0 0 +1 1 1 0 0 0 0 1 0 1 1 1 1 1 1 1 0 1 0 1 1 1 1 0 1 1 1 0 1 0 +1 0 0 1 0 1 0 0 0 0 0 0 1 0 1 0 1 1 0 1 0 1 1 0 0 1 0 1 0 1 +1 0 0 0 1 0 1 1 0 1 0 0 0 1 0 1 0 0 0 0 1 1 1 0 1 0 1 1 0 1 +0 1 0 0 0 0 1 0 1 1 1 0 1 1 0 1 0 1 0 1 1 0 0 0 0 0 0 1 1 1 +0 1 0 0 1 0 1 1 0 0 0 0 1 1 0 1 1 1 0 0 1 1 0 0 1 0 1 0 0 0 +0 1 0 1 1 1 1 1 1 1 0 0 1 0 1 0 0 0 0 0 0 1 0 0 1 0 0 1 1 0 +0 0 0 1 0 0 1 0 0 1 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 1 0 1 0 0 +1 0 0 0 1 0 1 1 1 1 1 1 1 0 1 0 1 1 1 0 0 1 0 1 0 1 0 1 0 0 +1 0 0 0 1 0 1 0 0 0 1 1 0 0 0 1 1 0 0 1 1 1 1 1 1 0 1 1 1 0 +0 0 0 1 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 1 1 1 0 1 0 0 0 1 1 0 +1 0 0 0 0 0 1 0 1 0 1 0 0 1 1 1 0 1 1 1 0 0 1 0 1 1 1 0 1 0 +0 1 0 0 1 1 1 0 0 1 0 0 0 1 0 1 0 0 0 1 0 0 1 0 0 1 1 1 0 1 +0 0 0 1 1 0 1 0 1 0 1 0 0 0 1 1 1 0 1 1 0 0 0 1 1 0 0 1 0 1 +1 1 1 1 1 1 1 1 0 0 1 1 0 0 0 1 0 1 0 1 0 0 0 1 1 0 1 0 1 0 +0 1 1 0 0 0 1 1 0 0 1 1 0 1 1 1 1 1 0 1 0 0 0 0 1 0 1 0 0 0 +1 1 1 0 1 1 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 1 1 0 1 1 0 0 1 +0 0 1 0 0 1 0 0 1 0 0 0 1 0 0 1 1 1 1 1 1 1 0 1 0 0 0 1 1 0 +1 1 1 0 1 1 0 1 1 0 1 1 0 1 0 0 1 0 0 0 1 1 1 1 0 1 1 0 1 1 +0 0 1 1 1 0 0 0 0 1 1 0 0 1 1 0 1 0 1 0 0 1 0 0 0 1 1 0 0 1 +0 0 0 1 0 0 1 1 1 1 1 1 0 0 1 0 0 1 0 0 0 0 1 1 1 1 1 1 0 0 diff --git a/voice_bridge/scipy/spatial/tests/data/random-double-data.txt b/voice_bridge/scipy/spatial/tests/data/random-double-data.txt new file mode 100644 index 0000000000000000000000000000000000000000..039ac506f5590f953ffc6598c11197d3fade2bbd --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/random-double-data.txt @@ -0,0 +1,100 @@ +1.172993630434470589e+02 1.905532343119886605e+02 2.613653823499444115e+02 1.570270816248337269e+02 2.373767637129340642e+02 2.175366144750510671e+02 2.609909144757107242e+02 2.086671686166440622e+02 2.674986450118991002e+02 1.395992762090408235e+02 1.115453060949917159e+02 1.531034842395609701e+02 2.621042034264289668e+02 2.958729454449504033e+02 2.137960368830719062e+02 2.606436280968571282e+02 2.492136530687155869e+02 2.770806237064748530e+02 2.667325121892417883e+02 2.909243437665674037e+02 1.570328417294508085e+02 1.738762543815240917e+02 1.514157955792608163e+02 2.264748814818163396e+02 1.911869834397498380e+02 2.083706054660671043e+02 2.778115921852293013e+02 1.330374814391803397e+02 2.988697222234711717e+02 2.534167825404447001e+02 +2.090964891529778242e+02 1.322006104643973003e+02 1.443415945355371832e+02 2.991388772264980389e+02 1.649302344777914868e+02 2.839528641910463875e+02 1.677159709681393736e+02 2.597553832458208944e+02 2.861055547321268477e+02 1.866431671806918189e+02 2.131812134614140177e+02 1.881465139477683124e+02 1.271865139985419262e+02 1.821608865941132649e+02 2.793653846657656459e+02 1.745982981552271838e+02 2.440893905635109888e+02 1.926469151980868446e+02 2.752453852984189098e+02 1.333479229516146347e+02 1.756311805755703404e+02 2.039367615619088383e+02 2.441861159155101575e+02 2.136111324500645594e+02 2.893808960992043922e+02 2.723220466017930335e+02 2.367879096909125565e+02 2.831541206793258425e+02 2.017643187924728068e+02 1.293072046241175030e+02 +2.311242818257193221e+02 2.180694109009666306e+02 2.728791416531455525e+02 1.239345918565636993e+02 2.885729762050686418e+02 2.082619393005260804e+02 2.331416004257805525e+02 1.003112528445347778e+02 2.796331120515330895e+02 2.804679740148056339e+02 2.466936828597247597e+02 1.422398585800914361e+02 1.312115029632765015e+02 1.324417143647877708e+02 2.161716508991076466e+02 1.791489656100356171e+02 2.239038785146145472e+02 2.456511993086799919e+02 2.885023077068626662e+02 2.127338775308419940e+02 2.468090724782538246e+02 2.704135008577740109e+02 1.144148504575758665e+02 1.641571759150080538e+02 2.473349551308716343e+02 2.366620528761779667e+02 1.208143167141831498e+02 1.403705034199327599e+02 2.061073908129479548e+02 1.482034962693051057e+02 +1.938319500339997035e+02 2.000523826243218650e+02 1.356134735235139317e+02 1.224357428573656250e+02 1.262840705282213918e+02 1.112797762573139977e+02 1.727826315738305993e+02 2.199559683100150664e+02 1.817290208723558180e+02 2.185579898773881951e+02 1.772844462934412491e+02 1.589145011846130728e+02 1.017520743541414703e+02 2.836990856171782980e+02 1.265544072638776640e+02 2.503473341476423855e+02 2.178539278172635534e+02 2.063574432066289432e+02 1.473169457524925861e+02 1.112719632489760784e+02 1.195996070145015722e+02 1.345099678548529312e+02 2.992645259487585463e+02 2.692242364540683752e+02 2.139649193607747861e+02 2.313659165106297451e+02 2.524185025119667785e+02 2.678714004815313388e+02 1.111457754393238702e+02 1.296443575800298902e+02 +1.183944097426736306e+02 2.750477277868330930e+02 1.688558971333346790e+02 1.432283295687057034e+02 2.226043174503911359e+02 1.825124733235978169e+02 1.806485153578007612e+02 2.270256019866706936e+02 2.852913053786990076e+02 2.867562520175486043e+02 2.795056496733417362e+02 1.142488895870292822e+02 1.502985045661773427e+02 2.246907359526948937e+02 2.051158858061974115e+02 2.663351441156772808e+02 2.864853431806749882e+02 2.276548949573071070e+02 2.678087640355958001e+02 2.266463576941352187e+02 1.886763304826383774e+02 1.150603609957262563e+02 1.596187994714221929e+02 1.844565420383776484e+02 1.730173420200940768e+02 1.427940137102308995e+02 1.774757620992130001e+02 2.563086691508434001e+02 1.666317348809653822e+02 1.878143419608473437e+02 +1.642344698640436036e+02 1.591648429561690818e+02 1.561851029939521140e+02 1.854367091922420059e+02 1.494951311500319093e+02 2.443780767043579942e+02 2.741090240793212160e+02 1.519200656263381006e+02 1.391711947382712538e+02 1.482414334940778815e+02 2.574425018646875287e+02 1.455120022089010945e+02 1.620904376421240727e+02 2.098493186451893848e+02 2.377904829227144887e+02 2.881187570801528750e+02 1.785609418793050054e+02 1.500483139796714340e+02 1.697371065898091729e+02 1.824143324642365087e+02 2.329862749140337712e+02 1.372006180078979298e+02 2.250666134242961789e+02 1.760894707637434067e+02 1.874161150869196035e+02 2.860410495381969440e+02 1.539271628213176086e+02 1.051658254213322152e+02 1.501619097950496666e+02 1.205717364486104515e+02 +1.275638286377957371e+02 2.620802183565458563e+02 2.290828196339760723e+02 2.591630015014513333e+02 2.102568650793322149e+02 2.385080320420775593e+02 2.683788150825365619e+02 1.808700201925492763e+02 1.972184450648797451e+02 2.382313686117472287e+02 1.733526990293641177e+02 2.369802981553972074e+02 1.835652530901061823e+02 1.274084560526275141e+02 2.403488205519001326e+02 2.713515297463850402e+02 1.455311801633137065e+02 1.889430214806582171e+02 1.676324321357484735e+02 2.327799977696781184e+02 2.846419393176552148e+02 1.510702433968490936e+02 1.361559014852734606e+02 1.732199851325496525e+02 2.451323003571785364e+02 1.833444866660036894e+02 2.451280287301300405e+02 1.669088211440060832e+02 2.768492228383354359e+02 2.445882168033535038e+02 +2.905092787520428601e+02 2.948076984760371033e+02 1.731080208454208673e+02 2.825532355845657548e+02 1.108820315678514845e+02 2.862013985457700755e+02 2.111453776876104769e+02 2.614428154999528147e+02 1.461523265575596042e+02 2.304914832379158156e+02 2.502987607420118934e+02 2.474276046141548875e+02 1.739607960146905725e+02 2.098700376203710789e+02 2.373226438948917121e+02 1.258493219462072119e+02 2.692932028872633055e+02 2.819145908444669999e+02 1.941653933285864468e+02 1.666395497972145847e+02 2.371919109091950588e+02 1.978302896313488191e+02 1.951483674191611613e+02 2.694357972099330141e+02 2.387068160427941450e+02 2.826084316255729618e+02 1.350954172043159929e+02 1.414479610501084039e+02 1.407657276334374501e+02 2.725513503737778365e+02 +2.055761393809777360e+02 1.070553196069381556e+02 1.045726024365074096e+02 1.611577217417760153e+02 1.258091705742062629e+02 1.038769334534844120e+02 2.956016304760584035e+02 1.586570076132481972e+02 1.636816353299032585e+02 2.375674325770941095e+02 2.085436646116971531e+02 2.088922128397473443e+02 2.316234644183506930e+02 2.623581653234684268e+02 1.714245300492981698e+02 2.844387943099641234e+02 1.469270259610659650e+02 1.157700922187784727e+02 2.367694595159086361e+02 1.548671738744121740e+02 2.013687686570863207e+02 1.860374943080277887e+02 1.733446602950305930e+02 2.488507085609763010e+02 2.929099979257852056e+02 1.825615338506695480e+02 1.338575452835397925e+02 1.491478381149757979e+02 1.116052925520655066e+02 2.341983606431906537e+02 +1.014445800974648222e+02 2.539987638010908597e+02 1.871788778457793399e+02 1.454231386314719998e+02 2.284640297096368045e+02 1.174773591296971915e+02 1.395683165851895637e+02 1.137193571402578414e+02 2.370662356797280950e+02 1.767292649815032064e+02 2.688513591587910696e+02 2.913902923086397436e+02 1.122392290694582897e+02 1.366157623619356229e+02 2.667409125457835444e+02 1.834435599491967537e+02 1.437174343391732236e+02 1.130622879516462120e+02 2.898543289046954214e+02 1.559795378531963479e+02 1.765577834073310157e+02 2.422955620302867885e+02 2.384835032255701321e+02 1.708163174135501094e+02 2.012159081107001839e+02 2.825663186839160517e+02 2.627299211659199045e+02 2.173916205317264883e+02 1.878835852278120910e+02 2.578733373077019451e+02 +2.843897417914848802e+02 2.685865547709703378e+02 2.810255710736182664e+02 2.572690897085278152e+02 2.416998564827035523e+02 1.770932574976374099e+02 2.021652319180342943e+02 1.414744641219446351e+02 1.464677002516696405e+02 1.831165552459343644e+02 1.157177632931430651e+02 2.625289386264841482e+02 2.972225480003540952e+02 1.024156386789293265e+02 2.305099741095138768e+02 2.241903749843916671e+02 1.157222019118702292e+02 1.533205318359311775e+02 1.179505454242311799e+02 2.666741766563739020e+02 2.792728900733587238e+02 1.222170248460037811e+02 2.573772727215269924e+02 1.535874607134987286e+02 1.231830862844115728e+02 2.584552954023608891e+02 2.541883057030129862e+02 1.001259630352790566e+02 2.332879439260797767e+02 2.240027888381033563e+02 +1.537092645679641123e+02 1.737278083620151392e+02 1.736358049797527201e+02 2.251608985235982630e+02 1.812387130195175473e+02 1.605621432944637377e+02 1.880655312831545700e+02 2.234500385148787700e+02 1.156918728696038272e+02 2.243685096423413654e+02 1.934342626327970720e+02 1.850952349553267027e+02 2.629944548485545965e+02 1.410418270562070973e+02 1.442479234012843960e+02 2.244518961458842909e+02 1.350755563946989923e+02 1.207094763037939913e+02 1.849900977633715797e+02 1.712315707730903398e+02 1.136025349108833495e+02 2.266901327137990734e+02 2.049289406654929735e+02 2.168279721613268407e+02 2.802488024880154285e+02 2.288593244920211873e+02 2.512942787545493957e+02 1.605416563468323261e+02 1.449848598254574483e+02 1.444073785399158396e+02 +1.576600406756634243e+02 1.316580100950168912e+02 2.530050469343043460e+02 1.319013133578224028e+02 2.708693079386434306e+02 1.256852413190491689e+02 1.471714019119002046e+02 1.119112141125198576e+02 1.482405279774543772e+02 2.151504825709631064e+02 1.449998801809978488e+02 2.163638771503673581e+02 1.272949254250747657e+02 2.476027791419436141e+02 2.891208457332292028e+02 2.642744540427622724e+02 1.972643066216432999e+02 2.480891057982425423e+02 1.265454595896786003e+02 2.957735252703171227e+02 1.831389323451852533e+02 2.674516147697771657e+02 1.404389674972707667e+02 1.350952754772052913e+02 2.169062951790871807e+02 2.445227715623778408e+02 1.771545655819627427e+02 2.729961759152714649e+02 2.655105689521545855e+02 1.887977700062222084e+02 +1.336462666694000632e+02 1.333709897858500995e+02 2.263366393511863350e+02 1.847175439991091821e+02 1.121699721143812383e+02 1.985314153845103533e+02 2.097626398761568396e+02 1.994292542548276970e+02 2.119822099620050722e+02 1.121578896112172430e+02 2.285640262135607372e+02 1.530452060058861719e+02 2.280757825791220625e+02 1.002584314437652893e+02 1.549763597162410349e+02 1.962603185897801836e+02 1.520023734031539107e+02 2.188357004065238129e+02 2.078620274892635678e+02 2.253215106546470281e+02 1.707542413836397373e+02 2.818584030117174279e+02 2.256862624833151472e+02 1.123882683852972377e+02 2.188298604829752776e+02 1.623779544769217296e+02 2.272253780943444212e+02 1.236449568833132560e+02 1.456708971140968174e+02 2.173334506159979753e+02 +1.355111076933105210e+02 2.882277378633141325e+02 1.458332953325788139e+02 2.038461345794760007e+02 2.077052275373579278e+02 2.430957456359013804e+02 2.398926697516154150e+02 1.861334604823129553e+02 1.056851094080089695e+02 1.250491536199931772e+02 1.475324860190441427e+02 2.446126161547439324e+02 2.283994822545897705e+02 1.411463500178549850e+02 1.017206978570942510e+02 2.805514386584911790e+02 1.128847993259780083e+02 2.326583828053989862e+02 1.968387029218569069e+02 2.013375618903088480e+02 2.981010702857409456e+02 1.018614681114941902e+02 1.799507821883679526e+02 1.133741465580100396e+02 1.235533581072856038e+02 1.980629645203880500e+02 2.289642287691829097e+02 1.596082722591768288e+02 1.905110471998515322e+02 1.789448781159623820e+02 +2.588286452268601465e+02 1.978130463173739599e+02 1.052689337312009599e+02 1.316763830509305251e+02 2.659236586726388509e+02 1.637014132384438767e+02 1.416031833329826668e+02 2.638665530652568236e+02 1.007257384115875425e+02 1.143900271701907769e+02 2.977834670475828602e+02 1.589765734727692745e+02 1.903975572290986520e+02 2.371635535037608804e+02 1.840341975670916668e+02 2.047003785265828242e+02 2.798969769773281655e+02 2.731706896262927557e+02 1.266878907904394254e+02 1.882415083052244427e+02 2.273996647906652129e+02 1.051754139634791869e+02 1.949647447346334843e+02 2.153583447980240919e+02 2.763468452623635585e+02 1.126493843527773322e+02 1.566047572050934491e+02 1.655928523150526246e+02 1.733528322945315949e+02 1.292815908595541146e+02 +1.453195062153936874e+02 1.443849872704900008e+02 2.393030362110915519e+02 2.203850914291498668e+02 2.628192548589183275e+02 1.142161203389242132e+02 2.954875947743198594e+02 1.914138981839176950e+02 1.956478457154231023e+02 1.282875398486639824e+02 2.801001077571227142e+02 2.478095646281364566e+02 2.467477848581343949e+02 2.819656424464902784e+02 2.951823714077539194e+02 1.777239847229775478e+02 1.197979896746704185e+02 1.481181033052623661e+02 1.906710229153984528e+02 2.142395628283543658e+02 2.300980272040501973e+02 2.228884003748859186e+02 2.473330601440014220e+02 1.391193242835927322e+02 2.836257563055140736e+02 1.510096324299383923e+02 2.202302141125946946e+02 1.931468179284185851e+02 1.332427495686727639e+02 2.591048546650930575e+02 +1.878681542531208208e+02 1.576240359584147654e+02 2.653849736815447500e+02 2.963544993865212973e+02 2.044592436730770828e+02 2.022626486161902903e+02 2.692262675681025144e+02 2.660999355751699227e+02 2.275843495473382347e+02 1.090849337992742818e+02 2.095602584555617227e+02 1.896271059113536808e+02 1.103822849104477513e+02 2.916911739044173260e+02 1.131212278363718582e+02 2.998892666268029643e+02 2.476782245756396605e+02 2.259689579913920738e+02 1.853942231198421950e+02 1.358270117521841200e+02 1.538630682720535674e+02 1.002148317174243601e+02 2.538393939061405433e+02 1.631649956267838206e+02 2.086654853664906000e+02 2.065167771482954322e+02 2.184161808630845485e+02 2.204789814939956045e+02 2.876785893506615821e+02 2.415299687386639675e+02 +2.578989465605797591e+02 2.309888943086805853e+02 2.139372792253111584e+02 1.438019921733897775e+02 2.686852572045135616e+02 1.347038004304963579e+02 2.662658866335509060e+02 2.378358170108797367e+02 2.901455078003721155e+02 2.653867524737770509e+02 1.011162296015096302e+02 1.236447329941733528e+02 2.440241295351771669e+02 1.285889645706482725e+02 1.234088480316093808e+02 2.765916670935633874e+02 1.132915304101479421e+02 2.967043774237617413e+02 2.960414394814537786e+02 1.923965028192617410e+02 2.177448618307050765e+02 2.328047369831131732e+02 1.702256773965170282e+02 2.320080409490440729e+02 2.962065584958517093e+02 1.421971909775941185e+02 1.416181340866144183e+02 2.318260414882616374e+02 1.990521696869427046e+02 1.291045564046920333e+02 +1.562042774178686386e+02 1.004265446278790392e+02 2.987714610921041185e+02 1.843637355858842284e+02 1.975513718825063165e+02 2.869996482942455032e+02 1.598134132589713943e+02 1.814921031876193638e+02 2.433389905907341983e+02 2.220363745053336970e+02 1.548306942100590504e+02 2.274512269554506361e+02 2.173006200058655963e+02 2.139515436667214772e+02 1.820439741095771353e+02 2.954110718222074183e+02 2.706126458816278273e+02 2.546812106115172583e+02 1.499899738326257363e+02 1.498010641912065921e+02 1.897725780579399668e+02 2.531561160917130167e+02 2.568891780637028432e+02 2.223136077092870551e+02 1.518604819103856585e+02 1.610422120589223027e+02 1.090455809489133259e+02 1.950503873748027388e+02 1.235704160644129388e+02 2.711492093024702967e+02 +2.039597038432034424e+02 2.026680584622021684e+02 1.365818873512059213e+02 2.909476552420245525e+02 1.721994194158640425e+02 1.854386667051114443e+02 2.287109571295530372e+02 1.912591665763447963e+02 1.607322994166321450e+02 2.949516230628389053e+02 2.522065912002103403e+02 1.869433122585654701e+02 1.235797649248940644e+02 1.522422059501078024e+02 2.738245135411146975e+02 1.059681837441489307e+02 1.013027238331489173e+02 1.660100598156148237e+02 2.454471731623151243e+02 2.467503196183328100e+02 2.584564749953993896e+02 2.079587352810677316e+02 1.650926041957846451e+02 2.269719270682073784e+02 2.376254891983122093e+02 1.510146656008620596e+02 2.672848371954185041e+02 2.692845974117340688e+02 2.180714754246087921e+02 2.186797802447831884e+02 +1.704231257711912519e+02 1.993416036368699906e+02 2.293703655438095268e+02 1.494582642918422266e+02 1.988970317734676030e+02 2.329763291241497711e+02 2.594871448385057420e+02 2.168089936885102134e+02 1.825320854593447280e+02 1.816754553181755796e+02 2.164740515812325725e+02 2.676208645391697019e+02 1.298365075936954725e+02 1.802664596093496243e+02 1.015344620621038132e+02 1.955048336384612639e+02 1.938953913674110083e+02 2.716932071347151805e+02 2.391085978949223829e+02 1.852300387899809380e+02 2.933293185307651356e+02 2.502753353909542966e+02 1.326128348575908262e+02 1.132638325194699433e+02 1.382024010322260494e+02 1.899310337488860796e+02 2.577639546186944699e+02 2.130234590296898887e+02 2.056292296528304746e+02 2.070746044453983927e+02 +2.712524956603344890e+02 1.103212761114690750e+02 1.501201791543782917e+02 1.588084859702673555e+02 1.780379814134324192e+02 1.938691258391782810e+02 1.322057441019641146e+02 1.105823874551086590e+02 2.879365916037821194e+02 2.457617763012990224e+02 1.036189749330240488e+02 1.682919366264929124e+02 2.271749409116763161e+02 2.468308259697249127e+02 2.530034131464132088e+02 2.481420904342841709e+02 1.546080547019561209e+02 1.278414739842506265e+02 2.234886960240669111e+02 2.535365186455997843e+02 1.599130733896959669e+02 1.151371295028686035e+02 2.378656188176093451e+02 2.901072209563180877e+02 2.524076257924749882e+02 2.849501171254129304e+02 1.802791659856764568e+02 1.527418387706650833e+02 2.578820596338672431e+02 1.208856989199291263e+02 +1.884906470590645711e+02 2.304295185581007672e+02 1.035923344330140736e+02 1.647061655195892627e+02 1.910201770870304472e+02 1.752788518438422614e+02 2.763014227316762117e+02 2.545709641405486252e+02 1.642694881393259152e+02 1.850698110761380804e+02 2.423689469305483328e+02 2.821007056776016384e+02 1.440765548977453250e+02 1.082195827231368952e+02 1.292487205530619008e+02 2.136496853657876613e+02 2.268509220579896635e+02 2.999629735037570981e+02 2.135306905316524535e+02 2.807718279523737692e+02 1.079256111018183759e+02 2.233050677333321801e+02 1.960571416898615951e+02 2.930642308139058514e+02 1.350490077967585307e+02 2.626074042719769750e+02 2.812196827814445328e+02 2.812753678081913336e+02 1.893738913514469004e+02 1.237248675858835725e+02 +2.024005284879252144e+02 2.663611407988397559e+02 2.687079844301063076e+02 1.583164038086077312e+02 1.451019436850150441e+02 1.100558451420041450e+02 2.083655450975085159e+02 2.034012033819327598e+02 2.745375932717230398e+02 1.454718097055225599e+02 1.519068131933423729e+02 2.522666952972969625e+02 2.409340029943109300e+02 1.697386944425205115e+02 1.092659514648129289e+02 2.785598218078254149e+02 1.404092026094307357e+02 2.152301424167146990e+02 1.170396027347833723e+02 2.495323893679063474e+02 2.070836095469416591e+02 2.187978925167305135e+02 1.478606128149070855e+02 1.189323178954538207e+02 2.012925160284665651e+02 2.080878545398990127e+02 1.510128433840351647e+02 1.657302151838663065e+02 2.177026636795220043e+02 1.221198981216710422e+02 +1.411258561955272341e+02 1.419717097672817374e+02 2.247481951315160984e+02 2.805973971111802712e+02 2.755562061324142178e+02 2.039769327420251557e+02 2.994080883760036045e+02 2.417843309736466040e+02 1.023751441731232319e+02 1.491356884971497152e+02 2.542464200475323821e+02 1.496044144381669128e+02 2.829129207809560285e+02 2.479316882407134699e+02 2.441205876677642550e+02 2.045492313770996020e+02 2.855582203360229414e+02 2.884005586284110336e+02 2.039668453101600676e+02 1.690279206477617890e+02 2.136822090795746760e+02 1.254275901194574772e+02 1.084851042192170922e+02 1.656011685190305229e+02 1.415195951026897774e+02 1.578115814760412263e+02 2.619737257057257693e+02 1.492347147839753347e+02 1.627213988646173561e+02 1.343297485726322691e+02 +2.544675070683062756e+02 1.367461330002975899e+02 2.928364121110963652e+02 2.024865028281971036e+02 2.758937379397792142e+02 1.293527538914390220e+02 1.003170531204512059e+02 1.514803620238746760e+02 2.603616046431354789e+02 1.790387290949859960e+02 1.954717187769221027e+02 1.325226280128280223e+02 1.522166198122710625e+02 1.162911821325583048e+02 2.798489406348742250e+02 2.521718932296424498e+02 2.622327475379161115e+02 1.027798265388270949e+02 2.437256510683693023e+02 1.911771820917219884e+02 2.722604457055863350e+02 2.850557929858495640e+02 1.953760157441756746e+02 2.473572905253965644e+02 1.891404804097296051e+02 1.514672503279451803e+02 2.213565012031598940e+02 2.253356064978207769e+02 2.044629345029305227e+02 2.805872739342098612e+02 +2.859142434488251183e+02 1.016009480575973356e+02 1.779351649172412522e+02 2.205171340775500539e+02 2.104472905774927369e+02 1.755755724600441567e+02 2.751836189782782185e+02 2.820692049982218350e+02 1.337557428916256015e+02 1.569761138230965969e+02 1.991757527032745543e+02 2.615974376894962461e+02 1.944849272958306017e+02 1.868411694165790777e+02 2.994394032068257729e+02 2.802783326794233290e+02 2.693871918204162625e+02 1.750293298802730249e+02 1.468161278725061720e+02 1.272003326865558108e+02 2.233103517167062932e+02 2.103066399402185027e+02 2.720825853079193735e+02 2.728915492341989193e+02 2.160004538807991992e+02 1.325145501710478015e+02 2.549827549782140466e+02 2.921469675413995901e+02 1.846231529604695822e+02 1.391152989663993651e+02 +2.538717579982014456e+02 1.450483481068324352e+02 2.720200816305956550e+02 1.120834821105324011e+02 1.703801876168104741e+02 1.091293661435919233e+02 1.410263490040598526e+02 1.910022197757120352e+02 2.505223413771657022e+02 2.069613533172621374e+02 1.367200764291426935e+02 1.269156762039037574e+02 1.459486945063737267e+02 1.585863332989725905e+02 1.433846106215619329e+02 2.893202513225785424e+02 1.754070497414596730e+02 1.678900237854272746e+02 2.363821059303507752e+02 1.088858921730617908e+02 1.962435837543239927e+02 2.151311182954276831e+02 1.943029551670006754e+02 1.670799798236046172e+02 1.348235227224938910e+02 2.005836112104490212e+02 2.601588534628079969e+02 1.194827586439497935e+02 2.131891535893303740e+02 1.835674362703964277e+02 +2.872207377280434457e+02 1.680389491751975299e+02 2.268072198735419533e+02 1.324343035526375729e+02 2.746241572770433095e+02 2.142161570690199710e+02 1.852290440736100550e+02 1.772431485621305285e+02 1.144750125154023266e+02 2.162070901557998468e+02 1.490690769171257557e+02 2.904041493178549445e+02 2.673617561413327621e+02 2.904362235840736730e+02 1.438791831406123833e+02 2.596893065528289526e+02 2.617155941751458386e+02 2.388486986717779246e+02 2.718819501315180105e+02 1.265484539827731680e+02 2.508989305854047700e+02 1.677208481362706323e+02 1.527665277518251230e+02 2.069026506407369084e+02 2.223100964495413336e+02 2.859845330217733022e+02 1.430291068893224349e+02 1.186508486537613436e+02 2.043257492072551713e+02 2.909823892985461953e+02 +2.385945641230763670e+02 2.011887933217761031e+02 1.622448188725907983e+02 1.738874847453056987e+02 1.669498482708885376e+02 1.853462372214463016e+02 1.514500885098960907e+02 1.569159134451362547e+02 2.521399095730983504e+02 1.246878140446721659e+02 1.758330561641313352e+02 2.722601647479554003e+02 1.679012078705679869e+02 1.710944469563905272e+02 2.012619557548435978e+02 2.130692925302264200e+02 2.489118511754019778e+02 1.553758318484749452e+02 2.531318516516165857e+02 1.895498740333992487e+02 2.010265603399928409e+02 1.805605111948569856e+02 2.471772127430102159e+02 2.822665908577009759e+02 1.256656757093761314e+02 1.218957078832023626e+02 2.851942693987446660e+02 2.434079459678487751e+02 2.183256665756584312e+02 1.881473862468819220e+02 +2.878274557836845133e+02 1.654481949983921254e+02 1.215681808546938214e+02 2.567820905945674781e+02 2.104106688330284101e+02 2.960796083414018085e+02 2.020680111052573693e+02 2.328934707961639106e+02 1.081575190462602336e+02 1.003340046261853189e+02 2.009697278729638299e+02 2.231963192062537757e+02 1.203849639323555323e+02 1.187994179134823156e+02 2.211937485225296030e+02 1.667300587261732119e+02 1.727379541915926211e+02 2.085029285798690353e+02 2.440827389167183981e+02 2.864522928573259151e+02 2.974890568790378893e+02 2.102945085846974393e+02 1.972598274048171447e+02 1.762889209976547136e+02 1.346946323322499666e+02 1.554434255958064170e+02 2.915634104756007901e+02 1.434053307556222876e+02 1.055800565037633163e+02 2.043924431141962259e+02 +1.494596010135965116e+02 1.369114048625681335e+02 1.414146701131132886e+02 1.383970135097982848e+02 1.734304788623498155e+02 1.594301265610334610e+02 1.040146208229407137e+02 2.208381597698417806e+02 2.904998286250861383e+02 1.300157615397056929e+02 2.667076669416877621e+02 1.062418844419948556e+02 2.717657999079561364e+02 1.054097765488278640e+02 2.401074677516734823e+02 1.045408432466875297e+02 1.330046749931937882e+02 2.297648034226271534e+02 1.488059718063634307e+02 1.725671935994615183e+02 1.330818497812682608e+02 2.341687919103425770e+02 2.983144736799429211e+02 2.798846823197050071e+02 2.218705077010061473e+02 2.681931695329894865e+02 2.339384973461015420e+02 2.893058480095726281e+02 1.539801301873031321e+02 2.746688360458649640e+02 +1.330701439354522222e+02 1.727884450558678395e+02 2.309082669627648272e+02 2.027633892073664299e+02 2.725503026364725656e+02 1.999882667367585896e+02 1.904108867169430255e+02 2.952458047945178805e+02 2.903769421220866320e+02 1.593020200554085477e+02 1.236139458806368623e+02 2.670862420061573062e+02 2.910830183895285472e+02 1.860711175093342149e+02 2.161724988935532963e+02 2.564488756979296795e+02 1.231566645138573648e+02 1.554206254375235403e+02 1.148558104746345521e+02 1.512714227454516163e+02 1.953024826710307025e+02 1.296022137194406127e+02 1.500450396815122076e+02 2.611742573447975246e+02 1.601671705158374550e+02 2.391666762859087214e+02 2.566415095930981352e+02 1.923304801412870404e+02 1.194174883996373353e+02 1.970722090829630986e+02 +1.912113734453868688e+02 1.498407015577022605e+02 2.038188614169363007e+02 1.315017316695561647e+02 2.564290419741012101e+02 1.890015309531812022e+02 2.451565642315005960e+02 2.794356592632736920e+02 2.286941218755985972e+02 1.959549984609147941e+02 1.183834182035568716e+02 2.102820643179567242e+02 1.748108698585573393e+02 1.534379248653211221e+02 1.919662859034699522e+02 1.273611408042816464e+02 1.848163823983119585e+02 1.719445827292381637e+02 1.098466009889928898e+02 2.781108902268393877e+02 2.089286134506138524e+02 2.324518337977864348e+02 1.983840049195213169e+02 1.897881971862217370e+02 1.057077761008814605e+02 2.693629461665184408e+02 1.359710117509105487e+02 2.191184409971657487e+02 1.295811391257115304e+02 1.272165218667991553e+02 +1.987244486959793903e+02 1.516360617950651317e+02 2.198509518241761498e+02 2.494181713303175911e+02 2.903223989223247372e+02 2.847249789220907132e+02 1.747037051964282171e+02 1.610307305098726829e+02 1.866621867053561061e+02 1.016530888490581503e+02 2.606194448419089440e+02 1.820037020201941402e+02 2.650669443765450524e+02 1.137210849453726098e+02 1.329244106101075715e+02 1.741312140090854257e+02 2.301425980066611885e+02 1.051708772384664030e+02 1.994040172335078864e+02 1.874773290907829733e+02 2.745616984783777070e+02 2.354781865911449756e+02 1.598287033335407159e+02 2.650689470710170212e+02 1.643692352330562017e+02 2.991199217036622713e+02 2.713535332162406348e+02 2.516280148665988463e+02 1.124367393830256532e+02 1.725070309959049837e+02 +1.637875882282461077e+02 1.407642428016634426e+02 2.759741260511348173e+02 1.982469453863400304e+02 2.966736241669494802e+02 2.756530253528777052e+02 1.426661371226006167e+02 1.585144634205103102e+02 2.836415355000413001e+02 2.468213340046699784e+02 2.898204535963063790e+02 1.711408259966125343e+02 1.900542569026269177e+02 1.112151031999617032e+02 2.679918109779015936e+02 2.737346364036235400e+02 2.597479311885246602e+02 1.719445390286030886e+02 2.361360157374418236e+02 1.123330408578339785e+02 1.214203690485689719e+02 2.552722899309185891e+02 2.436705678248840456e+02 1.596697357728296254e+02 2.533254006866929444e+02 2.066863222258713790e+02 1.194370826184286329e+02 2.943584774485435673e+02 1.636272134478143130e+02 1.191267138602315185e+02 +2.350924626651462006e+02 2.263138093076711357e+02 2.206572605284771385e+02 1.704171521239532296e+02 2.000250897638135257e+02 2.966317084215347109e+02 1.350543763227695138e+02 1.248113195978286285e+02 1.480602782771696297e+02 2.391913401309390679e+02 1.908758915801345779e+02 2.476074601271855045e+02 2.408834383325319095e+02 1.009169451940341560e+02 2.567526834523320645e+02 1.791854948779896688e+02 1.412277552146151152e+02 2.660711025781407670e+02 2.073940326990519054e+02 2.509760072499196610e+02 1.358593750308925223e+02 2.127422683140523532e+02 1.874643773621423293e+02 2.844455725631112273e+02 2.197223292953194118e+02 2.049519862750077266e+02 1.674367936692717365e+02 2.806316262053937294e+02 2.040091003350897836e+02 2.675290975004411962e+02 +1.483513543637005796e+02 2.384008274111940011e+02 2.834409911154408519e+02 1.344593118283445392e+02 2.346883831968173979e+02 1.381882879805813218e+02 1.241165074750676638e+02 2.186327911062819567e+02 2.466602279029802673e+02 1.573094529523951906e+02 1.568918412618390903e+02 2.289205163045023710e+02 1.170165333644822283e+02 1.742406104080407658e+02 2.082974381484526702e+02 1.600869123712819260e+02 2.399160913983472199e+02 2.877189278027444743e+02 2.845252294036096146e+02 2.342337907657317544e+02 1.496264758341107779e+02 2.905797831387872066e+02 2.824703799011629144e+02 1.047015685176013307e+02 1.056531628249932169e+02 2.778559625738202499e+02 1.693549799118289343e+02 1.654193764711911570e+02 1.062077606699500762e+02 1.159643419206647792e+02 +2.694780377267857716e+02 2.229138360502907403e+02 2.407432883969363218e+02 1.240072643521201741e+02 2.128611568148922970e+02 2.114050669978733481e+02 1.042337934877265297e+02 1.044783539591350490e+02 2.706611056394938259e+02 1.972285130309975898e+02 1.959046941044780681e+02 2.915493579522836853e+02 1.131994346897827342e+02 1.197362406389762839e+02 2.877593780027675621e+02 1.089470964294721824e+02 1.996015695685267417e+02 2.185569019121031999e+02 2.102686704320404374e+02 2.955299037924150980e+02 2.987478446256551479e+02 2.517129931888254646e+02 1.552463625479420557e+02 2.295020326441428153e+02 2.886454895961533111e+02 1.869792800456660871e+02 2.703426621835664037e+02 1.873514421416134326e+02 2.714620374401066556e+02 1.623625260081516331e+02 +1.457420078291350194e+02 1.926195242081234369e+02 1.841639049563959247e+02 1.397830290030836125e+02 1.287503203163068406e+02 1.684614546803193775e+02 2.820658047345126533e+02 2.986548244924653090e+02 2.631399932039782925e+02 2.870930868530864473e+02 1.141938207690214426e+02 2.868552010662050407e+02 2.019110175402121286e+02 2.840219745246005232e+02 2.848478851173646262e+02 1.902287203163165259e+02 2.696968940302964484e+02 1.690355482825476656e+02 2.171695948786692725e+02 1.960363641465239652e+02 2.930566891688549731e+02 1.380341365242818483e+02 1.769912313914243214e+02 1.164985277343077996e+02 2.079184380436491324e+02 2.871364788135472850e+02 1.796231479741346391e+02 1.115892945700443875e+02 1.922852518794877028e+02 1.851500906627327083e+02 +2.894943401361737187e+02 1.972990286414578804e+02 2.801948561309920933e+02 1.993490085147259947e+02 2.539099743775018112e+02 2.972486389690005240e+02 1.162404922698449354e+02 1.801898545246462504e+02 1.283416456049016858e+02 2.289248555429664407e+02 2.419505668531598985e+02 2.755101537543703216e+02 2.786083442131507013e+02 2.461931811431258552e+02 2.699066237266536064e+02 1.088542193903703179e+02 2.302113104476973149e+02 2.158136503417114227e+02 2.797451432348925096e+02 2.832754349673875822e+02 2.207567008139471909e+02 2.920947868166995249e+02 1.300092217647513735e+02 2.953259288980694350e+02 2.539624465668687492e+02 1.304833679125420645e+02 1.051395153781939484e+02 1.855592224876973830e+02 2.160289702497469477e+02 1.227895712666205981e+02 +1.029685235386965587e+02 1.410297052380113882e+02 1.832105986621241982e+02 1.016727951098498579e+02 2.130361696974732126e+02 1.817578553203918830e+02 2.644724203174304193e+02 1.713346250427240420e+02 1.297164370175517547e+02 1.072810924841072193e+02 1.083932811014470161e+02 2.860684171745337494e+02 2.893854146138399983e+02 1.677808320623732925e+02 2.343535290724524600e+02 1.209564642240636090e+02 1.329537830609780542e+02 2.924542956964438645e+02 2.733376468658280487e+02 1.397146179999238598e+02 1.103570089598620285e+02 2.231457082965310690e+02 1.056672424832338635e+02 2.887779644840117612e+02 1.127167878193751704e+02 1.387640376146708263e+02 1.791595456124304633e+02 2.709107895779202408e+02 2.238624693992912569e+02 1.773395240564728397e+02 +2.317578772498348769e+02 1.294950944138938667e+02 1.126253428029936572e+02 1.371351849575549693e+02 1.785990678455200964e+02 1.021081186758702444e+02 1.471984209931611360e+02 2.907355141803875540e+02 1.881128962816476644e+02 2.776434621780599628e+02 2.231668573818950279e+02 1.905362514139340817e+02 1.921875823712000226e+02 1.027725913116546792e+02 2.939602582690168902e+02 1.776540079128602656e+02 2.761214484196684111e+02 1.042033722248946646e+02 1.812858538041361385e+02 1.739774673118114663e+02 2.626640185867897799e+02 1.702975408841979288e+02 2.558138050153142729e+02 1.733257751657050392e+02 2.918973111180089859e+02 2.499103812623473857e+02 1.210050998380505973e+02 2.819910650801346605e+02 1.887952629909842699e+02 1.910084514453274380e+02 +2.212539479167726029e+02 2.774434360961662378e+02 2.337566454731646104e+02 2.345785537275947661e+02 2.365459264006348405e+02 1.983982238092833086e+02 2.030822332599765332e+02 1.995891111618029186e+02 2.834365683300363798e+02 1.036872616932399609e+02 2.192093181482490252e+02 2.601252995545215754e+02 2.498786393235831724e+02 2.102914196276636858e+02 1.344974807588668000e+02 2.319076536245909210e+02 2.769341510052834110e+02 2.705990780330756138e+02 1.679097240924248240e+02 2.394521666103182724e+02 2.042111123157340842e+02 1.679545908808316028e+02 1.638112120198904051e+02 2.498667640522866407e+02 1.298749690282424183e+02 2.953546510122243944e+02 2.420377599473625025e+02 1.972281420856064642e+02 1.511153679243939223e+02 1.785899871179086063e+02 +2.568297621323404201e+02 2.469847896802298237e+02 2.766623631158322496e+02 2.476135901735717937e+02 1.788596740963971570e+02 1.849716544556056874e+02 2.568516536462929594e+02 1.692762419184084877e+02 1.468834240718183537e+02 2.716053370235183593e+02 1.674083895790932957e+02 2.340636951853666687e+02 1.637725360284847227e+02 1.316562872243186177e+02 2.850086566701365882e+02 2.066513343106022944e+02 2.990778363456342390e+02 1.780020440519503495e+02 2.906711993591478631e+02 2.149926413975278479e+02 2.151504627144789765e+02 1.458362697904619836e+02 2.339644011324822657e+02 1.740513991402896181e+02 1.804876886135730842e+02 1.706585538790989176e+02 1.113370339871644603e+02 2.032819788543359039e+02 1.225434838619497526e+02 1.558188197132453183e+02 +2.752385657001058803e+02 1.704994416021052643e+02 1.607090409105587696e+02 2.031247490318933444e+02 1.333383797740430339e+02 1.922643047184382112e+02 2.665685682619526915e+02 2.611043497447243453e+02 2.444450591022788615e+02 1.012899678037660181e+02 2.236752860048796947e+02 1.164606756896235993e+02 1.768812782093617955e+02 2.532808672341815850e+02 1.308823477633827395e+02 1.683394957344131626e+02 1.787390150786144716e+02 1.962681762314343530e+02 1.178176219749694980e+02 2.151624908275416885e+02 2.951256579216935734e+02 2.058583926262361388e+02 2.348769662163374790e+02 2.500118096543036472e+02 2.065978549387351109e+02 1.732426267043477139e+02 2.575950640438621804e+02 1.826939497339359946e+02 1.586062531006688801e+02 1.141086110094916819e+02 +2.107478059550890066e+02 1.212326460542207940e+02 2.154852140069355073e+02 2.624147598788578648e+02 1.169795422214265699e+02 1.682202484364929660e+02 2.987700686247625299e+02 2.259973608163532504e+02 1.912690930240648015e+02 1.896338093439390775e+02 2.747727757049322008e+02 2.388804299971102978e+02 2.538821160842531128e+02 1.839990833334872491e+02 2.839611350159472067e+02 2.953225980324958755e+02 1.674336071760058076e+02 1.609172697163818953e+02 2.902596210806400450e+02 1.513824951234124114e+02 1.873458283487339600e+02 1.695960935104061491e+02 2.116215526550050470e+02 1.849422962892989233e+02 1.434256749723924713e+02 1.304784783123307079e+02 2.632948417544853328e+02 1.656472047377057777e+02 2.303125851744007377e+02 1.681993961373014486e+02 +1.104191565760665128e+02 1.750924257030650040e+02 1.242494131306669090e+02 1.541741282893887899e+02 2.585460716706878657e+02 2.286423505464783261e+02 1.890990979891397501e+02 2.707781238779197679e+02 2.619171833457787670e+02 2.695823002806438353e+02 1.941989480397771786e+02 1.389058748786196134e+02 1.283479072532797431e+02 2.347481590897206729e+02 1.518985431591505630e+02 1.757095590143896402e+02 2.225334593093496096e+02 2.231309387578290568e+02 1.039310896134069395e+02 2.614149485334186238e+02 2.212890027388380076e+02 1.425609106790709859e+02 1.376620423520403733e+02 2.403640719649376933e+02 1.152284694789922526e+02 2.108068210397188409e+02 2.526640691383259991e+02 2.323633859683563969e+02 2.720522122905912283e+02 2.498034621012949685e+02 +2.223449436042899947e+02 2.823923482876032267e+02 1.728419664392092727e+02 1.542710015610415724e+02 2.699062389875002737e+02 1.776741825057288793e+02 1.800001384193664080e+02 1.819433000632012636e+02 1.436484983468620840e+02 2.344086094824976954e+02 2.824459866922626361e+02 1.860318500101035681e+02 1.749968777772715498e+02 2.792448396035428004e+02 2.134719239619671498e+02 2.649346822194891047e+02 2.535109715864082602e+02 1.651109960016319178e+02 2.407385671793928736e+02 2.276937454871455770e+02 2.965404491761371446e+02 1.771850291606413634e+02 2.317902380753697855e+02 2.233400563607936817e+02 2.471010629200553694e+02 2.999085009765063319e+02 1.263611681933084725e+02 2.954593528043474180e+02 2.279026703099021915e+02 2.630592311905735414e+02 +1.662671322607742752e+02 1.600442354914371208e+02 2.476541290397616137e+02 1.471310870365195740e+02 2.302232198157895198e+02 2.833854716762933776e+02 1.464787719165046553e+02 1.913553080525503560e+02 1.014594285276723156e+02 2.182963956218923158e+02 1.629807715448000636e+02 2.692152036144454428e+02 2.287521686048013976e+02 2.982465613581407524e+02 1.646080094271899839e+02 1.685350412843276899e+02 2.638506951547767585e+02 2.931520510309920837e+02 1.395453733045734168e+02 2.192750645467382355e+02 1.118562057344099543e+02 2.210439168983162972e+02 1.977199388190010438e+02 2.248771354041466566e+02 2.967583759675493411e+02 1.144799677712354793e+02 2.877369511761256149e+02 2.831237961244747225e+02 2.909105411130262269e+02 2.550977837950437390e+02 +1.519738194711488006e+02 1.042788193386050608e+02 1.298121344332743377e+02 1.827398187867084971e+02 2.371985543371917800e+02 1.647119082252074236e+02 2.792046599520904238e+02 1.737333830141970452e+02 2.019611337599129968e+02 2.402390448779260623e+02 2.107045415433176174e+02 2.447101973248666411e+02 1.584507446746840174e+02 2.877533155913679366e+02 1.209142860803932251e+02 1.903846717728129931e+02 1.485923447895592631e+02 1.040627746119376695e+02 2.329784390325348795e+02 1.136264746597146882e+02 1.019818146651219024e+02 2.395077159260278847e+02 2.571474008697522322e+02 2.507839876514990465e+02 2.649762964978717719e+02 1.398370322453145889e+02 1.116668292809188614e+02 1.262068209877756289e+02 2.561228606182183967e+02 1.019925993853918413e+02 +2.525550526067758881e+02 2.649927164229666232e+02 1.457764901336312846e+02 1.519121804298574148e+02 1.112983565335166247e+02 2.979018464293943680e+02 2.517559946611144142e+02 1.257251989750113239e+02 2.377842966816966737e+02 2.692916709774201536e+02 1.558791612193160745e+02 2.988101508442036334e+02 1.264682305510686575e+02 2.586186621657187743e+02 2.397705732393993969e+02 1.799773948514575750e+02 2.289212202830902072e+02 2.551439950194432242e+02 2.270410183155361210e+02 2.624250216967006395e+02 2.894508375480465361e+02 1.106681053253299183e+02 1.696755343387707171e+02 2.302155275158106917e+02 1.445113211107399138e+02 1.886794441144848236e+02 2.129906512422033131e+02 2.340704769023953986e+02 1.082933010325512981e+02 1.977265970892881626e+02 +2.874406426475449052e+02 1.913451373833616742e+02 2.647704607931181044e+02 1.881279366057496532e+02 2.840067538093052804e+02 2.179159896935567247e+02 1.839859875309309132e+02 1.189702187115672132e+02 2.794517441847542614e+02 2.815599370853284427e+02 1.258259904677427699e+02 1.428483537633051412e+02 2.541426109645265967e+02 1.338781623221585164e+02 2.877181693280556374e+02 2.041742222547631513e+02 2.429167887622087392e+02 1.861891141000048435e+02 2.815058357304060337e+02 2.932279451804108703e+02 1.428092602118218792e+02 1.129541128601477595e+02 1.104970415865426503e+02 1.361068733124779726e+02 1.702082770497633533e+02 1.583852379729134157e+02 1.614070717213254511e+02 1.054529192214523476e+02 1.116913943762218366e+02 1.806474879921846366e+02 +1.904583320230821926e+02 1.477903225290235980e+02 2.926623631581093150e+02 2.267002240281469199e+02 1.643763662729302268e+02 2.199235242233247902e+02 1.853923849032223359e+02 2.941726936508506469e+02 2.665966841434134835e+02 1.199566433868006357e+02 2.951991052054676175e+02 1.594510101065885124e+02 1.458298791153635534e+02 1.532145001211049475e+02 1.411023254500616133e+02 2.140513226665028128e+02 1.678784758049908419e+02 1.708308530430679184e+02 2.099440033407245778e+02 2.664570659333852518e+02 2.959905162222905801e+02 2.829445582187913715e+02 2.588706049990775000e+02 1.722199615074994483e+02 2.869184560072056343e+02 1.681559218785307053e+02 1.503240659973911306e+02 2.588597461006905291e+02 2.678295026364270939e+02 2.154561503934444886e+02 +2.071927904539387839e+02 2.171736003654224305e+02 1.593735315924418785e+02 2.947356579175152547e+02 1.742775794491871011e+02 2.184611101357660914e+02 2.225198306238390842e+02 2.168369296352294668e+02 1.755672175076374231e+02 2.252214925755263835e+02 1.563369877784152209e+02 2.085332604119019209e+02 2.572482649031854862e+02 2.951800051631508950e+02 1.079183556031880329e+02 1.218838648771928774e+02 2.685371616407055626e+02 2.419162624723466877e+02 1.022244855205179022e+02 1.101224552326326602e+02 2.597819405832950679e+02 1.134555412120959517e+02 2.870491931154815575e+02 1.374365654160442318e+02 2.645641258978021142e+02 2.531141673781916666e+02 2.361747183362105886e+02 1.893108861581111171e+02 1.539026912190118139e+02 2.501170032332128415e+02 +2.547888423116186232e+02 1.853670755857669974e+02 1.389074705955763420e+02 2.709929622842061008e+02 1.228800068832790515e+02 2.778321736112652616e+02 1.309641642706778555e+02 1.156980811627219055e+02 1.431313378740429982e+02 1.646591400066212714e+02 1.920182917083556049e+02 2.178001706163468043e+02 2.235489712948179886e+02 1.079088316874027242e+02 2.447091545393394370e+02 2.320303973549428065e+02 2.359105911115680101e+02 2.382951907588607128e+02 1.062067779247245483e+02 2.905379355334102911e+02 2.023335418134440715e+02 2.128348219019524095e+02 2.865957710750057004e+02 1.782427960783044796e+02 2.856139874187100531e+02 1.139905905655008098e+02 2.264676166669663360e+02 2.479179013019825675e+02 1.746165350218777803e+02 2.255842464851874070e+02 +1.883869033800616819e+02 1.965817072065136699e+02 1.890868666652849015e+02 1.898737766004000491e+02 2.779218373710688184e+02 2.134628932560298722e+02 1.100835458783813436e+02 2.768750976313177148e+02 2.547073561014202880e+02 2.728160162818061281e+02 1.733645011505617504e+02 1.625036971255624394e+02 2.977754324167240156e+02 1.632372616873928450e+02 2.174045665187836107e+02 2.606964806055048030e+02 1.625508452643421720e+02 1.715067940576683441e+02 1.218481476549646629e+02 2.842560845538128547e+02 1.928678337146606623e+02 2.708765321293922739e+02 2.077020047066411621e+02 2.923591890868326004e+02 2.230876482822842206e+02 2.689925468225608256e+02 1.036588336737814586e+02 2.052618530546818363e+02 2.648220111560104897e+02 1.868396012623422280e+02 +1.785937212608853315e+02 2.973454718025594161e+02 2.368986004504845084e+02 1.146953890760472348e+02 1.265905165006724644e+02 2.255973396401841455e+02 2.163675674740596264e+02 1.527913853500098185e+02 2.283358642424602465e+02 2.759303134283557597e+02 2.876072117803540777e+02 2.029362495845153944e+02 1.212425121544320490e+02 1.100001317370093830e+02 2.335268996183764330e+02 2.375268130741384027e+02 2.336339660612213436e+02 2.462747325703657282e+02 2.841981652294566061e+02 1.081959034831858446e+02 1.291296469376330833e+02 2.602425849072438950e+02 2.575669438145553727e+02 2.135342654708205714e+02 2.294373105308322067e+02 2.706502840281193016e+02 2.928412927772634475e+02 1.330151104176747765e+02 1.533759962548247131e+02 2.744006234275867655e+02 +2.257735103076358882e+02 2.728385269717355186e+02 2.290872800510813363e+02 2.330934692803050154e+02 1.037274604992595215e+02 2.674079561164307961e+02 1.195755645916240866e+02 1.402804464035359047e+02 2.170516922702277611e+02 2.744725918691634661e+02 2.930458735600458908e+02 1.496408395971007224e+02 1.595562419103408729e+02 2.835538666488008630e+02 1.780163567793609332e+02 2.906408145890961237e+02 1.133853019218590248e+02 1.494630592331960770e+02 1.214592101712915451e+02 2.263015460193574881e+02 2.598100406717117608e+02 1.963383361449393192e+02 2.235083985338561376e+02 2.946475410923074492e+02 1.758055989844200724e+02 2.637780439251395137e+02 2.875400021086666698e+02 1.577781508415756662e+02 2.146553072676672684e+02 1.798181279868336446e+02 +2.620574340171276617e+02 2.153711882285265915e+02 2.245961661539886904e+02 2.054509343172356921e+02 2.926008719008261210e+02 2.432564531143420652e+02 2.303655720936658611e+02 1.615953803481287991e+02 2.918921003884012748e+02 2.760746977013722017e+02 1.909442200188182710e+02 1.596536528765051060e+02 2.491411570718119037e+02 2.924629085319008936e+02 2.587604848561293807e+02 1.524605619386706792e+02 2.737599884275671798e+02 2.090365453766356723e+02 1.610548024559351461e+02 1.018774121963877803e+02 2.410901898572944049e+02 1.875862586601133444e+02 2.588626077539996686e+02 2.579873618626863845e+02 2.838744453525392828e+02 2.580071516854936817e+02 2.114887112935771256e+02 2.675506009048368696e+02 1.260391751775616029e+02 1.858866479221875920e+02 +1.963224789638335892e+02 2.444908535968891954e+02 1.962779352478895589e+02 1.553096436749702889e+02 2.483662294276224429e+02 1.067992874414757978e+02 2.633849667942634483e+02 2.454321751613854588e+02 1.854433418739394028e+02 2.562889653665436072e+02 2.506342746416453622e+02 1.900819942764665598e+02 1.704565979131312474e+02 2.916979173024495822e+02 1.898592592817412310e+02 2.687872145548625440e+02 1.525347862509104004e+02 2.786582104923993484e+02 2.310813531087783872e+02 1.166208530157265386e+02 2.602471623613457723e+02 2.102772607982462034e+02 2.183751071150112466e+02 1.065011561509572999e+02 2.813176394708128782e+02 1.792292558016025623e+02 2.804083600455996361e+02 1.557890480883644102e+02 2.439522159916458861e+02 2.652201783594097719e+02 +1.425266334964659904e+02 2.075049705342416928e+02 1.704914602333145126e+02 1.886474594627911756e+02 1.252313163849750595e+02 2.836097447326676502e+02 1.406399617929505439e+02 2.414245225193989768e+02 2.576349788827002385e+02 1.486724691707949262e+02 1.092388214497626961e+02 1.685935770192617724e+02 2.033388664740227227e+02 1.390809359458484948e+02 1.056188661648174758e+02 2.350581131530574055e+02 1.964295662906907012e+02 2.578831766420791496e+02 1.109952979966328144e+02 2.027546721440710940e+02 2.501377690830167637e+02 2.111868593440530617e+02 2.324728205186171692e+02 2.453971856382445935e+02 1.723822394524685819e+02 2.872924628066301693e+02 1.140766727214026446e+02 2.221345013854892159e+02 1.728173248741775296e+02 2.676400838220500873e+02 +1.711571121866394947e+02 1.085759247733173396e+02 2.001753766691515750e+02 2.760446855018309407e+02 2.056587091496190567e+02 1.121827347031253197e+02 2.274644480946081444e+02 2.571858980756533128e+02 2.945439217283808375e+02 1.913312305877045674e+02 1.500446430731354894e+02 1.650397772114545489e+02 2.581660073502400792e+02 2.094009769144933273e+02 1.731816092302842094e+02 2.727903589313663133e+02 2.606648610353666982e+02 1.460656197586831695e+02 2.016951883706858268e+02 1.247477859691891240e+02 1.732157361502286221e+02 1.195560196858487245e+02 1.253893910664414904e+02 2.455457677441618216e+02 1.778732818035962850e+02 2.490436815297808266e+02 1.487573988963908960e+02 1.937302250034929898e+02 1.502426775501600389e+02 1.110841009912817583e+02 +2.382535443835092508e+02 1.972031918916456732e+02 2.576267295349729807e+02 1.730194312205534288e+02 1.301593684828995094e+02 1.624008376323430127e+02 2.060036399923972681e+02 1.233366573394677630e+02 2.194763391620297739e+02 1.701495187616251314e+02 1.223397596968992218e+02 1.987622577877627350e+02 2.511738650001373117e+02 2.130204435763062634e+02 1.993899817227978133e+02 1.597764561560970265e+02 1.205224890815559604e+02 2.184250491898233690e+02 1.755709834516516139e+02 2.741081010321077542e+02 2.104755291992826187e+02 2.698148014221883386e+02 1.299106544858947814e+02 2.008369880697999292e+02 2.938716155581552130e+02 2.671516623028076083e+02 1.332347035771324215e+02 1.291435420390463378e+02 1.835021202063177554e+02 2.002866194329941720e+02 +2.554906544300547182e+02 2.365682876454178540e+02 2.924004211094360244e+02 1.662852505275750730e+02 1.123350814405425808e+02 1.910015128879867632e+02 1.341551373493250594e+02 1.313122940860927770e+02 2.397311819484906152e+02 1.559268654058377024e+02 1.407120959783594003e+02 2.371419051640040152e+02 2.217591327496910480e+02 1.881187811266301537e+02 1.632462641154496907e+02 2.970940639140721373e+02 2.422917505999918433e+02 1.356966040631749593e+02 1.702398486895437486e+02 2.608644720933497183e+02 2.783751927848827563e+02 2.951746624002826138e+02 1.720706565846523688e+02 1.275268866601749096e+02 1.880990845238362681e+02 1.129502795714700625e+02 2.919985401845127626e+02 2.747497807112307555e+02 2.667734033775608395e+02 1.373740617490475699e+02 +2.115416415080857746e+02 1.431719947715498336e+02 1.718744824503889674e+02 1.075365968452523902e+02 2.220100335193473029e+02 1.965127222891928795e+02 1.062726056237197838e+02 2.631794488147562561e+02 1.658640190278337627e+02 1.169182569761068464e+02 1.645780782039788619e+02 2.940728738870184316e+02 2.979920277570993790e+02 2.125849825405138631e+02 1.533327700316632161e+02 2.655551337415409421e+02 1.329075684859120088e+02 2.686536376777100941e+02 2.299223677315555676e+02 2.123135030200585334e+02 1.474417961566917654e+02 2.899688778344954017e+02 1.439992490259426461e+02 1.606165457016644780e+02 2.854253601360321682e+02 2.837928223954166924e+02 1.868865943198568402e+02 1.809928275876523571e+02 1.583918020284682484e+02 2.384217495701244331e+02 +1.181670050605631417e+02 1.525653020190297582e+02 2.615084872177121724e+02 1.755024420886775829e+02 2.989795566898581001e+02 1.573585789513378188e+02 1.903575226478752711e+02 1.641861715477102166e+02 2.943146494922903003e+02 2.038802368327418719e+02 2.581560000437879694e+02 1.504995935930718076e+02 1.095655891680627008e+02 2.628623226127134558e+02 1.069018430130149255e+02 2.750818506761686422e+02 1.121786007219489818e+02 1.106710601660877415e+02 1.217291564359016149e+02 2.915199334459504144e+02 1.325859381653097557e+02 1.737237090326784141e+02 1.036075961875061751e+02 2.392327113385031510e+02 2.486092083099548233e+02 1.259492139939950306e+02 2.665249241620523435e+02 2.103119814995928039e+02 2.718465347096271216e+02 2.018653364759854298e+02 +2.085808638159350608e+02 2.977621083099649582e+02 1.394173606621695285e+02 2.232898484647512873e+02 1.347812725162832521e+02 1.574683348766579627e+02 1.827258429860655724e+02 2.827887224427595356e+02 2.608349632236463549e+02 2.370910079389979046e+02 2.033290260845359398e+02 1.566531500677691042e+02 2.982287288081304837e+02 2.998057140577807900e+02 1.906108269451214596e+02 2.023344526730545851e+02 1.717672594576409040e+02 2.093320563180507747e+02 2.649028095061802333e+02 2.840422446800275793e+02 2.111868958418739908e+02 1.803076798272542760e+02 2.311954915496957312e+02 1.563425451766251513e+02 2.610066662710300989e+02 1.855286443040786537e+02 1.478912573842241045e+02 2.544380211258828410e+02 2.799416317427427430e+02 2.238937193404353252e+02 +1.269470316997365131e+02 1.895539822645488357e+02 2.443421824114378467e+02 2.632321641240823737e+02 2.164919638664115951e+02 1.042697198382110884e+02 2.896061632271033659e+02 2.068164163046922681e+02 2.059671371408958294e+02 2.352532326493898722e+02 1.046233655847859296e+02 2.755187319279126541e+02 2.344641322699609987e+02 1.434858288567621969e+02 1.255438908126368176e+02 2.548141480364848803e+02 1.466719626681152704e+02 2.020892715394597872e+02 1.195107046056347713e+02 2.012968701954913797e+02 1.996902768982717191e+02 1.560547951636197013e+02 2.162555170020900164e+02 1.483278604161245084e+02 2.615607136845001151e+02 2.424344777210258997e+02 2.524090919470299070e+02 1.726167614603126026e+02 2.199373130240069258e+02 2.318614758097714912e+02 +1.590143031424979370e+02 1.933970326403360502e+02 1.227042846200323112e+02 2.107086401017011781e+02 2.844049872407889552e+02 1.420899421875644464e+02 1.736571760246831673e+02 1.130876049831349661e+02 1.470306210908964317e+02 2.959723384067232246e+02 1.438030965279091049e+02 1.685928342779160403e+02 1.351720793691902713e+02 1.909711091249450590e+02 1.477005416416634205e+02 1.010528808923594681e+02 2.205493627613245167e+02 2.367352422049318079e+02 1.224997665062844305e+02 1.620949451166091251e+02 1.270634404764108467e+02 2.673321646154778932e+02 1.618882934467209225e+02 1.208967331765591524e+02 2.073956586593529607e+02 1.223277950209799059e+02 2.625820210851194361e+02 2.262632377752408672e+02 2.222881433937307349e+02 1.716205611551696961e+02 +2.376094214038359667e+02 2.287867757784330820e+02 2.035778067022395703e+02 2.546588007138803391e+02 1.514832565507949198e+02 1.736683542684334327e+02 1.991020520349750598e+02 1.873563480883249213e+02 1.589186331386689801e+02 1.042563150975229149e+02 2.019924784676414902e+02 1.136537158101241971e+02 1.091264020137841158e+02 1.352770409719844054e+02 2.178414513482917414e+02 1.831380105899948489e+02 1.114225947990316570e+02 1.736029819106907439e+02 1.354612112967272424e+02 1.996055424300992627e+02 2.905125217944571432e+02 2.980326934372309893e+02 1.560898949881966473e+02 1.943286005606112212e+02 2.429797193518882636e+02 2.652714760000731076e+02 2.863852813340179182e+02 1.838252831614893239e+02 1.814799327205894315e+02 2.338290144642930954e+02 +2.526381992552952340e+02 2.089745531365245483e+02 1.869938021147821701e+02 2.864405091884094645e+02 1.736924996547539877e+02 1.479914815134324613e+02 2.132537252074255321e+02 1.830098172980584934e+02 2.476607236946428827e+02 1.066503395377639265e+02 1.405219898965278276e+02 2.743866427972425299e+02 2.269305408710248173e+02 2.791638036143738191e+02 1.824422387811073634e+02 1.852994662516045423e+02 2.777032940597408128e+02 2.109153407914434126e+02 2.214759900082639490e+02 1.857033490029854761e+02 1.302118293337227328e+02 1.889562709124264188e+02 1.844813915245081546e+02 2.875482403705134402e+02 2.022892465111445404e+02 2.230217175841083872e+02 2.843056043891419904e+02 2.350834055358549222e+02 2.080929758762673032e+02 2.770814576487081240e+02 +2.389430507965955428e+02 2.463651891862864147e+02 2.369578462650186452e+02 1.902366989508459199e+02 2.003468797600664004e+02 2.681735461841141728e+02 2.362787745532336601e+02 2.323782975776413480e+02 2.525302892415198812e+02 2.828059530799229151e+02 2.840327053185673662e+02 1.223941816187275435e+02 1.056255174412387134e+02 1.386503050117574105e+02 1.384325506562210535e+02 1.176641636239777426e+02 1.670688688422628161e+02 2.506322552784647826e+02 1.181229702988334083e+02 2.607048520072489737e+02 1.667476448166365515e+02 1.310085831735554223e+02 1.553111545647699927e+02 2.907454039462255651e+02 2.844644695877585718e+02 1.989933906493695019e+02 2.662036190025202131e+02 1.792754658114438371e+02 1.073664330563030944e+02 2.793141822468826376e+02 +2.640306978448612654e+02 2.458161373226257069e+02 1.015510894380497575e+02 1.527048938693112916e+02 2.893334394723561900e+02 2.994916089563248534e+02 1.054055716033572452e+02 2.278819528330843127e+02 1.890909183007994443e+02 2.134436011261824433e+02 2.654189934957544210e+02 1.780852604264427725e+02 2.222277079756825628e+02 2.689688042831336361e+02 2.232046857529678050e+02 1.778434593737022169e+02 1.336418515516146783e+02 2.739064893378349552e+02 2.065065746675076355e+02 1.329712924393647313e+02 2.176938186185978736e+02 1.918043587714230114e+02 2.280421349429639122e+02 1.182282112372680842e+02 1.370131137248831692e+02 1.716251366233928195e+02 2.412427837766657888e+02 2.738208811966829899e+02 1.471415247536169488e+02 1.638288393831292353e+02 +2.669085627842696908e+02 2.477147782526785136e+02 1.718200513884793565e+02 2.299346472745743597e+02 2.016242169414389309e+02 1.631378839470685307e+02 1.859938403107781255e+02 1.609729169019194330e+02 1.536303039404505171e+02 2.234728543554556950e+02 1.953401084257108096e+02 2.920381588589057174e+02 2.034966688752892310e+02 1.019427894404581139e+02 2.980736970140829953e+02 1.738263823108001418e+02 1.531314323312329293e+02 1.400030133312995702e+02 1.802287961283190043e+02 1.719909696301723443e+02 1.974918793689569725e+02 1.666882741246514001e+02 2.879569025675030502e+02 1.334044307903087088e+02 1.016937569869423896e+02 1.660343944328368764e+02 2.214967229035601974e+02 2.539424882366704992e+02 1.211914878013190133e+02 2.835892388637473687e+02 +1.704109091340931741e+02 1.337843054639438378e+02 1.570106251098002588e+02 2.123587857442842335e+02 2.788290802167920219e+02 2.795601449888932848e+02 1.220747715539721696e+02 1.179984498565524405e+02 1.552783750686872963e+02 1.257256444039083192e+02 2.312614004137946893e+02 1.971625968209403084e+02 1.208837070227885135e+02 2.231693789143681386e+02 2.332576722664892941e+02 1.659208209363902711e+02 1.979623049620595907e+02 2.497459328714609512e+02 2.540243570817084446e+02 1.309045902221261599e+02 2.376613837929333499e+02 2.140333351750954023e+02 2.231625169053620539e+02 2.869160136215916737e+02 1.282002159167354023e+02 1.029173927424986488e+02 2.432034421383394545e+02 1.495648010251883306e+02 1.971910657968611247e+02 1.358409247687675361e+02 +1.833826243837603442e+02 2.960483510370855811e+02 2.343723986770386318e+02 1.560358896543934293e+02 2.499669478251469172e+02 1.762005778153444169e+02 1.918050503412152921e+02 2.089352602085182866e+02 2.770127170480132008e+02 1.268157216157417224e+02 2.670673189640755822e+02 1.547628252866769287e+02 2.602514896343354849e+02 1.557532905756793866e+02 2.574076233589491949e+02 2.646855654359934533e+02 1.749681240869035719e+02 2.465698370051858035e+02 1.076897610845538082e+02 2.337637497458482301e+02 1.791847918196868932e+02 1.967068388721293104e+02 2.340964493346380095e+02 2.762770912600988140e+02 1.174465260954359564e+02 2.950490567997024982e+02 1.354710376622284116e+02 2.342233227246520642e+02 1.617966271393036379e+02 2.107879984327653915e+02 +2.493754578342164336e+02 2.275093847135933061e+02 1.466148442335522191e+02 2.261697123059220189e+02 1.213252451599347950e+02 1.628949300801819504e+02 2.100466501082228206e+02 1.508908296808102989e+02 1.488199564735201079e+02 1.727131563468088302e+02 2.306747713688439205e+02 2.570279850661015644e+02 2.309125192178541113e+02 2.422081718543400370e+02 1.769407234272878782e+02 2.688532243604371956e+02 2.276780878660686085e+02 1.065345319601523641e+02 1.535069430280279050e+02 1.717902253122074967e+02 2.876755354986605084e+02 1.683056100689713332e+02 1.120105413679224569e+02 1.755508096146901664e+02 2.095863991316655870e+02 1.523590730880595174e+02 2.944635547123552897e+02 1.444697311944634066e+02 2.165062978405008494e+02 1.410128743297030098e+02 +1.434402193906418006e+02 2.368914090178307106e+02 1.963465933374949941e+02 1.914557752364961516e+02 2.870767419320768568e+02 2.044699144835463187e+02 1.223520556576680036e+02 2.352284247043744472e+02 2.917945011866975165e+02 2.225925999946875322e+02 2.240309397680480288e+02 2.048455962243571093e+02 1.188048963943729035e+02 2.200553599997707579e+02 1.885605934416515765e+02 2.863412817843446874e+02 2.913876692311304737e+02 2.446563674684449552e+02 2.981153955140326843e+02 1.111775924383378253e+02 2.239868361016714857e+02 2.540473271011064469e+02 1.343930974769885438e+02 2.368686732696482409e+02 1.175691554116390591e+02 1.014879352562223715e+02 1.330784448687188046e+02 2.045426156006566885e+02 1.168174380391246245e+02 1.704438548713551995e+02 +2.696784010384477597e+02 2.991318545155386346e+02 2.120364825583467336e+02 1.950895785161033018e+02 1.216112431291165592e+02 2.438998438799096391e+02 1.588292735755803733e+02 2.347670069791354024e+02 1.862846309471772770e+02 2.258642611266068343e+02 1.423367506635381119e+02 2.692888471853933083e+02 2.950212092401994255e+02 2.331327670110776467e+02 1.542291422318579635e+02 2.809064569107727038e+02 2.358857646534314654e+02 2.378124255062788563e+02 2.664164586086786812e+02 1.387157904298663880e+02 2.297158046581682243e+02 2.386372312695162634e+02 1.246509391338716171e+02 2.338956320284196408e+02 1.820257170558419944e+02 1.957425768708682767e+02 1.680974560138464540e+02 1.288235048549348676e+02 1.483029350020115089e+02 1.744880718659300669e+02 +2.512494238114035738e+02 1.112846425403449615e+02 2.472643304237797395e+02 1.241745840646870818e+02 1.808849124644312099e+02 2.524760780760417731e+02 1.836118621524309447e+02 1.408362492891266982e+02 1.099623406752946693e+02 2.383967522197594064e+02 2.436606913384966049e+02 2.770699525768120566e+02 2.597573569531676867e+02 2.935649366424795517e+02 2.702790297508025219e+02 2.563597369995835606e+02 2.279477293752616447e+02 2.477470305460766440e+02 1.962131167814513333e+02 2.859744526791636190e+02 2.703401534622389590e+02 2.763052603711840902e+02 2.934416645125817809e+02 2.193475948646207030e+02 2.822891098008749395e+02 1.085391177109117820e+02 1.782208012387337703e+02 2.335496863699061976e+02 1.715066387390946829e+02 1.948062204233656303e+02 +2.879262290016004613e+02 1.676743911135137068e+02 1.403503828589753937e+02 2.744454339345198832e+02 2.935124358491533485e+02 2.920282649929100671e+02 1.390240222956847447e+02 2.426642861805074745e+02 1.217336684570653489e+02 1.311823750440439085e+02 1.647679902066092836e+02 2.962811279981685288e+02 2.945746172932865647e+02 2.005257587949587332e+02 2.072045953580022228e+02 2.893049469033056766e+02 1.913962360581630833e+02 1.823675529874825543e+02 1.830342103129283373e+02 1.222396004373517400e+02 2.248239872372262482e+02 1.170253438297526429e+02 2.853825568202013301e+02 2.214973458763422514e+02 2.563932510909227176e+02 2.144837192650675206e+02 1.793062298958048473e+02 2.920176466690815005e+02 1.515607839109829627e+02 1.981203765908239802e+02 +1.733053660232129403e+02 1.312183264386245583e+02 1.276233157677672807e+02 2.020942572504836789e+02 2.314817368496994732e+02 2.242589617101967008e+02 2.160504620978007893e+02 2.360595788588375399e+02 2.952977074031120992e+02 2.334652590044975682e+02 1.243453875174208747e+02 1.916144242306085630e+02 1.092365115042800596e+02 1.478765005471206280e+02 2.191946613400726278e+02 2.879274886834762697e+02 2.733443652356662597e+02 1.858481832262083344e+02 2.193747651131673706e+02 2.695165737089945424e+02 2.960753121523491700e+02 1.890691006834304631e+02 2.638343907584013550e+02 1.510492177865631334e+02 1.878288206285384661e+02 2.726561149875388992e+02 1.704246795027074199e+02 1.006381753343381718e+02 2.153734239260733148e+02 2.551451126036402854e+02 +1.591849792872858984e+02 1.304671215023752779e+02 1.427456440770346831e+02 2.882324895344759170e+02 1.680635293254793510e+02 1.205800311663507642e+02 2.861305963205076637e+02 1.219224106654408928e+02 2.467003871618023538e+02 2.830287806498602095e+02 1.445950870572595193e+02 2.496562286252286640e+02 1.464987579205844099e+02 2.848280464142704318e+02 2.785616857190397013e+02 1.837468579783306950e+02 1.246964377230690673e+02 1.251791080124520050e+02 1.496399061799681363e+02 1.375936265087168522e+02 2.547928467777094852e+02 2.554856419260690927e+02 1.285559318166884850e+02 2.092144446410586909e+02 2.868951534942014518e+02 1.178319347908447270e+02 1.347784205269015274e+02 2.851299399919766984e+02 1.754694686670390809e+02 1.016886128619324694e+02 +2.606618423405234353e+02 2.125366732076933545e+02 2.822772640751277322e+02 1.096405633955119185e+02 2.437561663288932721e+02 2.129146561548243994e+02 1.148823764090175530e+02 1.516868774610028368e+02 2.090025176018670265e+02 1.817684320186263562e+02 1.584667226055155709e+02 1.501973711988126468e+02 2.530199923706828713e+02 1.847948752811591930e+02 1.778871618489498303e+02 1.664551902511519188e+02 1.100020157933824265e+02 1.352000835393275509e+02 1.710981737682794801e+02 1.530513645967782566e+02 2.588476693974693035e+02 1.775587245068043956e+02 2.006331886716666588e+02 1.389709403689849694e+02 2.489553638298030194e+02 1.673604491791948021e+02 1.991154502489720812e+02 2.423848982654565418e+02 2.882603768001737308e+02 1.620650086718309240e+02 +2.723642490909132903e+02 1.680927290528325670e+02 1.005734627393615455e+02 1.598916606218045047e+02 1.672547346703738071e+02 2.361420151042074451e+02 2.741857058408131707e+02 2.533004150866734392e+02 2.036092771261417340e+02 1.091915011443997230e+02 1.145604210422382323e+02 1.209982156413156247e+02 2.749595368914399387e+02 2.177794513808643160e+02 2.054163746311436967e+02 2.185860861470465579e+02 1.504022045473846845e+02 1.713704456854883347e+02 2.175221629008602804e+02 1.230663148243889253e+02 2.419648244223723168e+02 1.383010418990747326e+02 2.040260833828849059e+02 2.966316994044250919e+02 1.630596872908637351e+02 2.562534082821714492e+02 2.549425872735235998e+02 1.983522705781282127e+02 1.524860865223137694e+02 2.736848821358530586e+02 +1.277021385004174192e+02 2.448445434866889343e+02 1.296687360965440803e+02 1.874271582575348702e+02 1.145742775945452792e+02 1.884744688522491742e+02 1.336298647132909423e+02 1.523816963142488419e+02 2.658270705367647224e+02 1.781637174983711134e+02 1.154610011723892171e+02 2.005342781476718415e+02 1.303166615041172918e+02 2.397284110571510496e+02 1.612912854182502542e+02 2.821645080329541315e+02 2.544831471501324813e+02 2.622237400581972224e+02 1.417212269902922230e+02 2.054005404298748658e+02 1.092142219674599062e+02 1.652051184306486107e+02 2.825679563619778492e+02 2.056286073102957630e+02 1.772062144904277545e+02 1.163520479257007310e+02 1.006186351926139366e+02 1.734025793931427586e+02 1.446958902579306709e+02 2.025820689614877779e+02 +1.798382687901162740e+02 1.604629760861514001e+02 2.668981169240885265e+02 2.763242846779806996e+02 1.318105471716862098e+02 2.191362245125996537e+02 2.770758446308884686e+02 2.308910816293108326e+02 2.956895796828827656e+02 1.566426856848869988e+02 2.326210561246332418e+02 1.206555816723871715e+02 2.603144096756907970e+02 1.172571782204154829e+02 2.219493974369055991e+02 2.385109304229506790e+02 2.599678734377965839e+02 2.850516346518521686e+02 1.472948582444382168e+02 2.234296740595885922e+02 1.427895312415343199e+02 2.848238578369252423e+02 2.260232767550441508e+02 1.544648385858973541e+02 1.163971462755376791e+02 1.762731012775239492e+02 1.089523563056807660e+02 1.663966154222005116e+02 1.342495772836978745e+02 2.922401077696804350e+02 +2.806557294060240224e+02 1.077657131130299604e+02 1.622983596366119059e+02 1.723469481204717795e+02 2.678046848873893850e+02 1.442059922525422451e+02 2.629931208031973711e+02 2.741083495447689415e+02 1.194142462414748707e+02 1.688961325073638022e+02 2.967954354880449728e+02 1.822107331135221671e+02 1.292333403080546645e+02 1.856814508383810391e+02 2.103923137448445573e+02 2.517859299913771451e+02 2.551152596962431574e+02 2.077883190793959898e+02 2.986930461834413677e+02 1.196764061335889551e+02 2.378823960447958257e+02 1.692017967083341432e+02 1.471250494556689432e+02 2.608355254883699672e+02 1.757172426071724942e+02 2.629426236813185369e+02 1.040244734248400533e+02 1.533558690719498827e+02 2.011860465194789072e+02 1.720545334339216765e+02 +2.966488050331527688e+02 1.809989340563203086e+02 1.871527370563514978e+02 2.315558973515319394e+02 2.657682292004950000e+02 2.237816732699509998e+02 2.282045922056215090e+02 1.846236325909775928e+02 1.644827554373339353e+02 2.760250360653360531e+02 2.492622345937652995e+02 1.483432536002697191e+02 1.527550390024584601e+02 1.573429964258168070e+02 2.090721206423400247e+02 2.535819867756219708e+02 2.420536340362719159e+02 1.691914404667937788e+02 2.388696721384086459e+02 2.593840245957078423e+02 1.331872961625781500e+02 1.116342264469163581e+02 1.680964276125217793e+02 1.555020753508222526e+02 2.422052215908822177e+02 2.626184375196450560e+02 2.674230788003709449e+02 1.948146659156083729e+02 2.663681889818526543e+02 2.795342087705012659e+02 +1.674728956867265310e+02 2.635505920196726493e+02 1.395353777027027604e+02 1.883233466008314565e+02 1.249441512057495913e+02 2.512189370435067417e+02 2.719913755602378842e+02 1.237326636617429614e+02 2.939951219495833357e+02 1.686366002602222807e+02 1.800181056076297068e+02 2.288525977776352818e+02 2.717306800175948638e+02 1.565292507387619594e+02 1.445460932655216766e+02 2.092313282690445249e+02 2.370375511382032698e+02 2.880525812713749474e+02 1.172567175017127141e+02 1.112412797274302250e+02 2.246954385922853135e+02 2.812359340959551446e+02 1.004168603505609241e+02 1.005387863078678805e+02 1.815971195408835683e+02 2.811251817522295937e+02 2.605765849402707772e+02 2.298114360271968621e+02 2.557293814584297706e+02 2.542416589790913122e+02 +2.943583269632734414e+02 1.442274778682184717e+02 2.700917391987959491e+02 2.527420049761408904e+02 1.527279900348522688e+02 1.841979337126335281e+02 2.902442440856567600e+02 2.889101481258517765e+02 1.828125218264408716e+02 1.133179379993730862e+02 1.484787634874768116e+02 2.676352293304336740e+02 1.452118425579454311e+02 2.636966617786087568e+02 1.313546620759107100e+02 1.834019443937838787e+02 2.892465421328221282e+02 2.575015388377624959e+02 1.970702343003932242e+02 2.507528167727347181e+02 1.724897096143170074e+02 2.664268628760375464e+02 1.365257050051324370e+02 1.198011035974838308e+02 1.176831988053894520e+02 1.070946883963453899e+02 1.964638491125322446e+02 2.570844982939356100e+02 1.593905150913052466e+02 1.202569936867807598e+02 +2.734271498156417692e+02 2.352133531486530842e+02 2.590835237087205769e+02 2.260994493040042528e+02 1.805421354394846105e+02 2.728408805160995598e+02 2.367263522625478913e+02 2.580210451062748689e+02 1.204524877415260562e+02 2.946465680607327613e+02 1.547220269335912803e+02 1.186203172746691337e+02 1.923878728892914864e+02 1.094127410697402354e+02 2.222837240826847278e+02 1.529333599077602628e+02 1.861450256630199647e+02 2.125583079944122176e+02 1.527591657960447264e+02 2.694001797345342766e+02 1.986063989766776388e+02 2.192493126389772442e+02 2.986827335637019587e+02 2.790660387254000625e+02 2.781487003899754313e+02 2.564198676846006606e+02 2.597551240338123648e+02 2.358970425952163907e+02 1.951628676328612357e+02 1.078208269500064347e+02 +1.190762776130697205e+02 2.951075493308472346e+02 1.091043363430719069e+02 2.824365312299846664e+02 2.445811468414383398e+02 2.538090805786315514e+02 1.230092364266577363e+02 2.633887649939744051e+02 1.865216093980499181e+02 1.540388898662323243e+02 2.047343894245035756e+02 1.431412534309083640e+02 2.857794001060171922e+02 1.492366175285521592e+02 1.380934567887849198e+02 1.331831467466375898e+02 1.149412013934811796e+02 2.205070844660474734e+02 2.939252657951740844e+02 2.049464694042562769e+02 2.047902832862141054e+02 1.810793422252176015e+02 2.005356992447976836e+02 1.381400138775680375e+02 2.582445444487385657e+02 1.698212931623984616e+02 2.252085951830697468e+02 1.808378144669676999e+02 1.307311344108444473e+02 1.050024101356033697e+02 +1.722314120162143354e+02 2.530014253763471856e+02 1.298340795948372772e+02 2.948664870226410812e+02 2.383106068289312702e+02 1.822969205106659558e+02 2.285226769051377005e+02 2.759417691711663565e+02 2.120970517474504220e+02 2.831046044310812704e+02 2.320579821788242612e+02 1.286125039667014960e+02 1.609837368065715282e+02 2.931112965353385107e+02 1.441758663366052531e+02 2.810263276191118962e+02 1.239857273771131077e+02 2.399447548605567988e+02 1.460208836055017514e+02 1.205325462037979491e+02 2.112513935912650993e+02 1.036793750016967692e+02 1.113202625217208777e+02 1.646612561683649574e+02 1.018350908838390581e+02 1.263835026124204859e+02 2.766683711501553944e+02 1.682407929561517506e+02 2.677103056024840271e+02 2.147294480454548307e+02 +2.763536852866382105e+02 1.511976958084401872e+02 1.026794659371155944e+02 1.805990415690671398e+02 2.442493962549426385e+02 1.881796213041043018e+02 1.028768312506858535e+02 2.787706953534510603e+02 2.589640601731795755e+02 1.730107396932538677e+02 2.218419822849910190e+02 2.651646152747807719e+02 1.476149140151474342e+02 1.986450675254654072e+02 1.050693447352362853e+02 1.819666738706916931e+02 2.873544952103893593e+02 1.472060704631180954e+02 1.297023844405691761e+02 2.824778443572924971e+02 2.918073394139615289e+02 2.128134400148996974e+02 2.223096450508596149e+02 2.761940547406351811e+02 1.348708672340777639e+02 1.857009592938832441e+02 1.062906640064134649e+02 2.104442283262811202e+02 2.812954268214299418e+02 2.739038950945439979e+02 +1.837264129055918147e+02 2.399207190527903322e+02 2.843910623120511900e+02 1.773207161532972975e+02 2.056581469496123873e+02 1.558029517788254168e+02 1.458438122541016924e+02 1.893030782939712253e+02 1.139027557376393673e+02 2.228775749423569437e+02 1.367670384452707140e+02 2.854480456674787092e+02 2.424985140340279202e+02 2.940521113211518696e+02 1.330693282221190259e+02 1.212599008475133076e+02 2.754747741586869552e+02 1.062856492128348549e+02 1.212724485003486166e+02 2.100514698158626743e+02 2.547262582240854272e+02 1.999488755181088777e+02 2.578561029518564283e+02 2.784200494851090752e+02 2.728829168298310606e+02 2.071711407548560544e+02 1.708729380756020362e+02 2.726254883308487251e+02 1.104364015278258364e+02 1.175773277008901090e+02 +2.554381337818412305e+02 1.634513906120204183e+02 2.309962436793083214e+02 2.460443770945291249e+02 1.618890365991254896e+02 1.046310291743186980e+02 2.772116654811295575e+02 2.098555252827713957e+02 2.309383801112169863e+02 2.845300950466865402e+02 1.268119123926061320e+02 1.697885006171669602e+02 1.901887742560337529e+02 2.605757830463372215e+02 2.755463791239279772e+02 1.771647294768940810e+02 2.403902735905423356e+02 1.774352552408031443e+02 1.796883744424403631e+02 2.736192366006921475e+02 2.118505050785533967e+02 1.873353967662169453e+02 1.802980863638028950e+02 1.869858546159753132e+02 1.200946851663063342e+02 2.350811068219035178e+02 2.018941614745772313e+02 1.010158706413519525e+02 1.661546933057649937e+02 2.570882207683835077e+02 +2.856134023048114159e+02 1.356279054667102741e+02 1.225310201562991494e+02 1.529777144242077327e+02 2.936506440162480658e+02 2.589580133771784176e+02 1.864782805190425279e+02 1.931182124516369640e+02 2.913608028278327993e+02 1.555662042949096531e+02 1.173676742008071301e+02 2.242990267171766732e+02 2.651338851871976203e+02 1.128980005738893482e+02 1.283582653966309408e+02 2.071495534530326097e+02 1.241509031508031740e+02 2.393403040292282640e+02 2.829812266966206380e+02 2.294799861563923287e+02 2.129576840814710295e+02 2.165539860914115877e+02 1.357366103660294243e+02 2.396252028023287153e+02 1.395106368224716107e+02 1.700689743264745744e+02 1.253435651632085950e+02 1.508112259783626428e+02 2.310267786371028933e+02 2.311667616985857876e+02 diff --git a/voice_bridge/scipy/spatial/tests/data/random-int-data.txt b/voice_bridge/scipy/spatial/tests/data/random-int-data.txt new file mode 100644 index 0000000000000000000000000000000000000000..4fd11b7509e65b01393a6af6125d1b304d524bd7 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/random-int-data.txt @@ -0,0 +1,100 @@ +-67 65 82 64 51 1 -12 2 -84 -52 12 82 -45 -84 -41 31 -49 36 -70 40 -74 -99 32 64 -6 43 -53 -43 43 96 +-58 20 25 99 -25 78 -6 59 -23 30 36 25 -8 83 -43 -7 -8 42 -90 96 46 88 31 12 68 -21 -6 7 78 -19 +-66 -51 0 13 42 -43 -30 -29 20 10 -24 -5 -42 38 -56 6 1 -80 -65 -91 89 64 -21 49 -84 41 6 -78 71 -2 +-50 -84 -50 -66 46 -88 -10 -28 -25 6 -7 10 -35 86 41 -17 72 -67 13 -67 -76 -84 -15 35 67 40 90 38 -1 -47 +-51 27 -48 26 -73 -46 -68 -56 -38 -4 49 -64 57 -86 -80 70 50 34 84 97 -76 3 -54 -89 -7 -53 15 36 -28 85 +2 -59 4 30 70 -42 -26 -1 27 -90 -18 95 -10 -36 43 24 86 -8 -100 92 80 -40 17 -93 -81 54 -8 84 -53 38 +-80 0 -71 -41 -33 9 -61 0 26 80 64 67 74 68 -72 78 -72 -52 -19 37 -33 -24 -11 -71 -53 -16 25 56 -74 0 +71 -23 49 -36 -43 -70 82 69 -100 -27 50 20 30 84 -33 90 49 39 -52 -51 -86 -76 -72 -88 12 91 -96 -61 -87 -47 +21 39 1 78 68 -80 -54 71 17 -94 34 -20 14 -5 -24 55 -84 -50 -90 -24 -79 -81 53 -50 22 -13 -92 78 -22 -50 +-47 -73 77 -93 -20 51 -37 -14 -37 -18 -8 -14 -71 29 -27 -5 54 77 -7 -2 15 -23 98 -34 -65 -78 -77 -90 -5 -35 +92 -33 71 24 43 -19 50 -40 -48 -33 -51 -14 23 40 -78 -14 -76 1 52 69 93 5 -13 30 -60 -20 -54 49 -52 93 +32 -86 21 -41 -86 -38 97 -35 -37 -89 -15 -18 -46 -37 8 63 -63 -61 57 50 43 -27 -45 98 -56 -81 16 -38 -25 -28 +-18 19 -52 -86 92 -72 23 35 20 57 69 -22 52 -66 -74 -29 -1 -10 -97 22 -97 -93 -70 87 85 -31 42 -29 -10 -36 +78 80 -93 68 41 84 -37 -62 38 -9 99 -60 90 47 -33 -40 -59 97 -28 9 35 -6 -60 -83 -39 -97 -25 -78 95 40 +79 -35 -45 -46 69 10 29 -88 98 -44 66 11 45 -58 -11 -25 51 -44 54 30 59 98 35 -28 93 86 99 19 -27 -83 +80 77 -72 57 -35 -27 86 -67 11 77 -28 -89 -30 -31 -72 64 -95 -75 92 -32 -96 -14 6 -83 -66 -58 71 -17 58 -53 +-1 17 -72 82 -57 -48 -7 -44 -80 85 -99 -9 27 -11 24 13 86 18 67 -9 12 77 98 49 49 12 -82 45 31 -68 +-13 -75 -26 17 91 12 -95 -62 -54 -60 22 50 86 58 -11 -11 -21 31 16 -15 67 90 1 80 -57 -98 35 -54 51 91 +28 -75 -31 49 0 73 75 -66 50 -77 -20 82 -40 -90 -28 32 -44 89 -75 -33 -11 -19 -55 79 18 2 -39 -49 78 -72 +14 56 78 69 -40 -20 -39 71 99 -89 60 -82 -1 -77 -42 94 -41 35 72 11 -13 89 -52 -41 -93 43 -39 -61 68 -4 +88 18 -90 -75 -49 46 -28 -48 -69 -64 77 -8 91 -65 62 -27 -19 34 10 78 82 49 -34 63 78 -88 -17 -37 -85 91 +4 36 -77 -75 -12 70 42 8 7 -31 -69 -74 -65 18 85 -92 91 16 -15 24 -74 -56 71 -70 -90 20 13 73 -68 -65 +92 22 -31 -73 -59 -78 -20 -11 -61 36 -40 34 -96 -12 51 -45 -12 12 -3 -42 -71 68 -8 -91 50 -73 -96 -46 -38 -4 +-87 44 -58 -83 70 -81 32 29 -79 45 -64 -52 57 73 -80 69 7 -22 31 -71 -34 -33 47 79 -17 6 -77 -89 3 50 +85 2 73 -88 -99 -13 -76 1 -90 51 30 -52 75 -2 -8 10 -83 -40 -5 -79 82 19 79 94 49 4 66 -76 6 -48 +29 -34 66 -93 45 -1 -98 92 -92 29 -10 64 -23 -81 -73 -62 -18 37 -29 -50 -52 90 -28 24 -4 -67 -33 25 -78 93 +57 -46 36 -16 34 -59 -96 -86 64 2 28 42 -32 6 -17 37 38 -40 -92 55 -22 -42 11 -77 12 81 -89 -39 -30 -39 +-72 -68 -41 -5 93 55 24 -6 84 77 30 33 -51 -62 6 -5 -83 60 -1 -64 7 -7 -92 31 5 -21 -34 -14 21 -33 +26 -75 -36 -54 -21 -38 -49 -20 82 73 -84 -5 -69 84 -87 12 7 -67 -40 -50 -35 -65 80 -83 -2 1 34 -16 91 82 +61 -21 1 -64 -56 -61 74 16 0 38 51 34 -35 37 -28 -52 -14 61 14 58 50 27 -43 -27 14 56 -16 -78 50 -89 +45 -47 -61 68 -41 -70 14 -51 49 -84 64 -65 88 -39 -88 28 -55 -18 81 -2 -1 -45 65 -6 62 16 71 71 -1 47 +47 60 22 -42 -5 -74 12 66 89 -82 -85 65 74 0 -18 56 -39 84 -65 -42 -33 -60 23 33 -8 -72 3 -64 -3 -25 +-70 11 -19 -12 -1 -50 -89 -61 78 28 55 92 -17 86 -17 -45 -31 68 -24 -99 -59 27 79 -2 21 -80 54 9 14 -70 +-38 52 -99 50 -46 -63 -74 -41 -43 -62 -81 38 -99 17 -94 -6 44 -20 -13 -30 71 -43 43 -28 -8 57 -93 98 4 42 +-17 -27 -60 -22 86 -49 39 -83 72 -16 82 74 73 -29 16 -59 81 -60 -96 51 -62 -55 -79 -31 -15 -67 -18 -83 -61 -86 +28 37 -44 7 -17 -10 -65 8 -78 -17 -46 -5 -35 -86 13 -16 27 24 60 -12 -48 -45 16 -33 70 -45 -63 -60 21 70 +-75 -89 -93 -93 62 -44 -39 46 31 57 72 30 -65 29 66 -53 2 -2 71 -90 -73 -40 -63 32 68 30 25 98 38 92 +88 3 5 73 -2 -61 -94 79 99 94 71 -83 -40 80 -79 -14 -34 -99 -52 27 23 13 13 -35 -74 13 43 -19 2 -62 +92 -47 -27 9 -68 -86 -57 43 9 -81 -9 69 52 -28 80 -13 -6 -44 -81 -89 -10 30 -64 86 -76 -11 -100 15 12 -62 +76 -42 39 70 74 79 84 -52 18 -58 78 53 89 58 -32 20 -51 35 12 37 -70 -21 5 97 67 -25 -25 -10 2 30 +-84 26 -60 -34 11 -27 47 85 -89 29 54 -53 66 -9 12 4 92 70 2 -12 -55 72 -62 -79 -8 68 -19 12 -8 -100 +78 -97 -76 86 -47 42 99 -3 9 49 -84 86 26 43 -26 90 23 -66 -37 -35 25 -12 -42 -12 96 -15 48 87 -95 -12 +-60 57 -30 -4 -84 24 -82 -5 34 56 76 81 -64 23 32 34 -41 -48 -6 77 -42 64 87 92 82 59 9 -71 -56 -45 +-74 -90 -27 93 33 15 -35 -73 78 23 17 -28 9 63 9 35 15 32 0 -4 -32 54 -76 14 -14 -8 16 -43 -81 57 +-2 22 85 -33 -48 74 64 -59 -27 17 -65 27 -50 -81 41 -69 -26 -29 -83 48 -81 51 58 -62 -63 -55 -63 39 32 -34 +98 -99 13 25 -10 43 -62 50 82 -90 -51 40 -71 82 27 -73 19 -62 37 10 -21 45 -94 -45 -41 -3 44 86 -2 27 +-80 -89 -57 87 -42 19 32 -49 37 -4 -30 54 46 -3 -92 89 60 37 -86 38 61 93 45 -45 -86 54 21 45 50 -53 +7 -68 71 -6 41 -72 67 45 15 46 85 59 82 19 65 75 -62 -35 47 -51 23 41 -54 27 -99 14 9 69 60 62 +99 -51 83 -47 -19 -57 -22 51 -52 52 92 80 69 1 -31 0 -19 -54 73 -5 3 82 -86 -84 -95 -83 -92 -52 -90 -79 +43 -75 62 99 66 -43 -38 -21 23 35 -63 -61 -46 5 3 -90 -28 55 87 89 -29 -46 23 -61 -5 10 -70 -63 50 -14 +39 38 10 66 -24 -45 55 -33 31 29 44 31 73 44 6 69 -21 -58 -3 93 -51 86 -16 -88 88 -30 75 78 -20 -12 +-11 11 -19 40 82 6 10 22 90 -78 -88 -49 72 69 -62 42 -23 22 -38 -98 0 -3 -43 20 9 18 -67 -7 22 21 +99 80 -55 74 43 -31 60 -26 -29 -6 75 60 92 -42 85 18 1 1 -74 -44 -12 72 -57 -98 99 62 45 -40 -39 -75 +50 30 -18 -29 -80 -59 -96 46 -99 -76 -13 -75 -93 -95 -45 62 -37 53 -96 57 -40 3 14 -45 -84 58 75 16 37 -6 +1 -47 87 -99 -22 -22 -20 71 -91 13 35 -80 75 65 -87 16 -37 99 -60 49 52 18 55 -11 18 24 -65 -80 8 -79 +-8 -87 86 -9 -64 -76 59 -52 -89 18 13 70 44 93 99 62 39 49 83 28 72 -71 -13 -71 -22 44 -87 73 -68 80 +41 -26 44 -63 -26 -83 -44 63 -51 -48 52 -8 55 73 -45 84 40 45 32 -34 -78 -46 -79 57 -40 11 34 -75 -20 91 +94 9 -35 -5 3 59 -63 2 -7 -72 -34 -70 78 99 -29 37 11 91 61 29 85 -15 59 79 47 41 19 -18 -92 47 +-59 -89 57 -72 -79 88 -85 18 -35 -96 -57 33 83 70 -55 -16 -21 72 -53 89 -44 -86 9 -44 -26 78 2 -93 -75 6 +55 73 89 80 -69 -93 -39 -88 62 49 91 -68 87 -26 40 16 -49 -53 -57 23 -97 39 -78 44 -15 1 60 -87 43 -42 +-2 -23 -74 -80 -59 52 -58 68 64 97 -86 -41 -88 35 49 3 -40 90 34 -2 3 13 -95 8 -1 6 75 92 19 -31 +57 76 65 3 37 -72 -43 57 64 -23 41 87 26 76 -18 -32 28 47 11 47 -33 -12 4 81 -92 -47 -81 43 -2 5 +68 74 66 -89 -95 -40 -78 -58 -54 -20 2 20 94 -35 58 -20 41 77 0 95 39 14 36 -40 -85 -60 -63 82 0 58 +-61 -99 61 10 -2 -31 -70 37 -77 -10 85 95 -28 70 -81 -78 -68 -33 -77 77 -6 42 -100 -68 -59 -86 -42 -74 35 -32 +64 -1 -1 -64 51 11 -65 47 -87 -8 5 58 22 -80 68 -25 24 59 -25 -75 95 -22 -73 27 86 -39 -98 -1 -17 -32 +94 -50 -53 -62 -53 46 50 38 -95 -77 40 -38 -23 -14 -68 -20 -47 23 -8 -12 -92 -69 -97 30 94 -45 47 -81 82 -60 +28 67 -48 4 74 27 -30 12 -32 35 91 -83 30 -55 -7 79 97 11 93 -45 -79 31 78 65 84 -23 -26 17 -61 43 +44 60 -88 72 31 98 55 -4 66 -14 10 -81 -40 66 -15 21 69 -98 34 3 75 18 98 -6 47 -39 31 -19 30 -51 +-6 18 -93 31 51 -20 -16 -33 -38 -19 71 4 -53 23 97 1 -28 -72 -44 -48 45 33 -76 86 64 49 -45 -34 -9 -76 +-19 8 28 -27 -51 -58 -36 63 -92 -95 70 41 -38 -49 -95 -100 43 97 -60 -5 -56 45 -13 -3 20 -10 -21 -85 -5 63 +-74 -74 -74 -39 -57 -12 51 11 -11 -22 -26 -54 71 24 -37 77 -90 77 75 86 -53 3 69 -99 -82 -59 30 81 -21 -86 +67 63 87 -15 60 -82 87 51 -39 -49 -16 74 51 17 6 47 98 89 -20 -98 97 -61 18 34 37 -36 37 -96 90 44 +53 -8 37 -76 -61 70 -77 -11 98 -80 12 -80 6 -89 8 -59 -69 -100 -52 -30 95 -58 61 29 52 -64 -51 10 16 -58 +54 -10 49 62 76 -25 80 36 13 5 59 -65 14 41 26 -78 23 -45 -51 -85 91 -43 -61 -37 94 27 -11 49 98 48 +53 -51 27 34 28 -53 18 17 31 -31 59 71 -34 25 54 -84 -34 -24 76 38 -36 15 -1 56 2 -12 0 26 -38 -62 +4 -94 -63 -21 -95 -42 -12 86 14 -86 -1 80 -48 62 -47 -52 3 91 -86 11 79 32 -24 -33 -54 19 -17 28 -33 -97 +-18 41 84 1 -83 48 -99 -64 26 -52 3 -64 68 -98 93 -79 -97 11 88 74 41 -31 -42 -35 -66 18 97 -30 19 -93 +-19 42 61 -91 -20 59 -11 -64 -60 85 -6 -71 33 -52 46 51 -86 -77 74 -4 74 -81 1 -39 -30 12 -12 20 66 60 +86 1 -67 -91 -92 -22 91 -90 -45 26 53 -6 99 46 -29 -40 -99 57 -45 -47 -3 -86 90 -78 -33 73 90 -51 -75 2 +88 -34 -2 30 -18 35 -23 90 99 -49 90 -79 94 -38 48 67 -35 -58 81 -24 18 -54 83 65 -58 -12 13 89 -59 57 +92 -99 94 -73 97 -78 -93 98 -78 95 -21 -17 -11 -92 69 -60 86 9 -36 -18 -33 -39 -65 74 -65 37 -49 87 -28 -81 +-95 2 -18 20 93 54 86 -63 -5 -89 17 -9 75 -66 -64 -82 -46 -48 82 5 -89 19 -32 -45 53 -47 21 -9 40 34 +86 87 55 -41 49 -10 -6 -7 -99 23 90 -50 -9 -81 77 65 29 -21 22 -82 19 48 -24 -72 75 -66 -69 -17 72 6 +13 37 96 31 -65 -54 -91 -27 84 52 -9 -28 85 96 14 63 -34 -29 -85 78 -75 -44 -30 -5 4 72 -45 6 13 71 +96 -69 67 59 69 46 80 42 81 30 89 -45 -10 -44 25 31 89 16 -36 86 31 92 1 5 -2 92 -11 77 20 40 +-48 98 -100 30 54 9 84 -88 5 48 93 56 -94 -89 81 33 44 -30 -95 -98 29 -33 13 -26 -59 -80 -68 -40 12 11 +82 -63 -30 -67 54 -68 50 -63 -91 -68 -45 -66 -58 16 -25 9 -50 -59 -55 4 -2 0 -63 67 30 -21 -8 55 21 -68 +9 -8 56 -6 84 81 -63 -35 81 56 -50 -54 96 -51 86 0 66 -4 -18 65 -26 -57 8 78 -54 17 18 86 21 68 +9 38 33 16 3 86 -57 28 -6 -44 -42 -2 3 -71 -86 23 34 -29 33 -30 67 63 -11 76 -65 92 30 -66 61 1 +-72 -85 -1 64 -79 -78 -1 15 -35 -32 80 33 -36 -82 24 -65 -23 29 38 -31 87 55 -18 -52 -77 -22 -11 54 62 -48 +65 -77 50 16 41 -94 -21 16 85 24 60 86 -78 -13 69 46 55 5 -27 -18 -6 -1 59 -62 -58 -99 -49 -84 89 18 +-21 -15 -55 60 78 98 67 94 58 -5 -36 42 36 73 13 72 -78 -68 41 -37 -33 -46 -80 40 13 -44 -71 -8 15 -77 +16 -93 -42 -10 14 57 -54 -3 -44 -21 30 -93 71 25 -60 -94 93 5 -94 -84 -72 1 -50 -34 23 -15 15 18 72 -29 +-22 -82 -30 -87 -88 -25 46 32 -30 -55 -79 -85 71 -89 -57 -88 21 53 -100 -64 -92 -97 56 -51 -17 -34 -31 6 -68 84 +-53 -51 90 -38 -61 57 -63 67 22 22 70 44 43 97 20 -62 -74 72 83 -32 35 -66 -29 5 -88 55 -94 94 -19 55 +57 51 29 -42 -21 63 -57 7 -48 -87 -60 -55 -77 -53 -1 -85 64 60 53 71 41 59 -61 -73 -12 86 90 10 -60 -38 +2 -9 14 67 -2 70 11 -78 26 -55 -86 -25 99 66 63 64 46 59 66 -37 -78 -70 63 1 -20 2 46 50 34 19 +-87 -40 75 -11 -88 -80 -95 -20 -92 -28 83 24 88 -39 83 -36 -61 56 99 -73 -59 -85 -49 -10 91 12 -79 -18 -15 6 +35 -74 -4 -15 40 -87 81 -22 -12 -46 14 9 98 -35 -2 -12 57 -74 -52 71 70 -70 -61 -47 89 44 33 -100 54 42 +-4 -34 80 -12 -15 -9 -8 -29 89 -55 -33 89 16 -33 -73 -82 98 27 88 59 48 20 -67 -21 -86 11 -50 46 64 -8 diff --git a/voice_bridge/scipy/spatial/tests/data/random-uint-data.txt b/voice_bridge/scipy/spatial/tests/data/random-uint-data.txt new file mode 100644 index 0000000000000000000000000000000000000000..c1ec7a5d64e540428507ee5a3358743b6c034ebc --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/random-uint-data.txt @@ -0,0 +1,100 @@ +52 34 59 34 64 20 89 69 26 93 95 32 17 93 77 49 51 60 51 27 60 10 61 2 16 30 41 68 65 0 +43 74 11 37 32 61 72 29 47 21 7 47 68 58 22 33 29 37 14 45 71 1 67 79 69 9 6 6 95 78 +86 20 68 67 43 5 77 70 96 37 79 71 35 30 22 4 56 28 33 50 97 17 85 52 21 5 57 19 35 97 +15 21 99 4 54 39 15 29 68 21 50 76 64 51 79 0 24 5 65 95 90 51 99 82 9 80 61 32 2 38 +46 97 53 96 51 84 18 42 30 52 82 77 72 59 1 67 72 16 14 63 70 94 20 27 38 70 86 95 41 75 +2 35 45 63 92 76 81 60 62 72 90 46 47 33 1 30 54 22 50 85 63 61 22 79 45 53 45 33 8 28 +43 41 14 79 2 77 95 16 74 19 17 78 47 12 68 55 3 2 77 10 35 86 52 33 47 26 98 42 48 86 +18 32 85 4 91 10 69 68 15 42 58 77 88 64 91 43 56 30 92 11 52 23 43 92 65 50 68 8 80 81 +20 57 38 44 62 10 80 25 32 11 70 32 13 50 41 55 44 0 28 83 5 1 34 94 55 52 56 24 76 21 +36 43 59 28 10 59 4 41 64 98 54 66 44 3 37 41 67 10 85 23 58 35 58 34 35 79 46 18 1 51 +72 63 85 51 23 91 3 56 35 72 38 26 91 0 68 98 27 10 12 71 30 1 14 47 47 88 17 68 78 46 +53 47 1 89 95 53 11 45 46 6 91 20 57 35 58 79 60 3 21 45 4 18 59 96 36 12 13 83 52 46 +33 91 82 24 97 28 50 43 65 22 14 44 32 57 33 10 34 77 58 6 27 90 26 77 62 81 87 96 0 32 +96 44 59 3 47 18 0 91 83 68 48 26 67 82 39 18 88 47 80 0 57 40 30 7 57 74 49 37 57 65 +18 44 0 46 47 30 65 79 53 8 26 42 80 76 30 61 82 93 78 25 89 49 55 15 86 63 35 74 41 11 +18 14 40 90 91 79 80 36 33 72 25 56 73 28 65 27 62 17 60 84 23 70 32 26 77 97 47 94 72 1 +82 36 68 10 83 83 40 42 51 55 82 6 37 69 93 82 64 13 54 30 45 36 87 59 1 80 39 93 11 61 +78 34 53 39 64 52 52 22 33 69 71 82 57 37 78 52 62 31 87 68 70 5 85 94 41 75 38 45 84 22 +36 23 51 15 61 76 88 85 36 96 21 60 34 61 72 60 69 81 5 17 16 82 30 61 39 96 40 70 42 71 +45 30 60 50 78 90 36 40 11 85 42 14 61 3 66 53 68 14 41 30 97 74 79 91 64 8 1 53 52 33 +55 24 35 4 49 51 44 70 93 78 25 65 1 29 96 12 93 94 13 65 4 47 84 10 90 12 36 48 21 36 +17 74 61 54 21 83 35 97 47 90 57 11 16 39 95 78 23 40 23 55 17 51 20 73 98 93 50 32 58 4 +84 76 78 33 50 29 11 20 5 93 63 22 91 92 44 85 62 25 63 92 36 26 57 33 8 74 69 64 78 91 +58 34 91 71 37 84 28 90 28 37 97 7 26 44 59 18 58 64 31 83 16 17 50 36 65 81 19 63 66 64 +20 71 1 35 87 5 47 27 6 95 86 75 74 9 94 93 26 5 61 3 97 88 0 57 21 64 46 24 86 12 +23 53 31 39 37 77 29 51 85 10 41 91 67 82 50 91 53 72 75 81 50 63 52 92 83 49 92 50 26 9 +38 43 13 87 11 45 28 16 27 61 70 52 77 9 57 42 73 22 32 95 23 91 93 63 16 44 26 9 93 83 +77 68 21 96 44 45 9 2 14 2 67 90 55 82 67 21 18 64 31 16 2 27 86 42 34 72 22 98 91 33 +89 66 87 76 0 32 81 39 55 76 23 56 51 53 75 79 30 86 1 66 64 14 46 84 92 19 95 47 77 97 +88 79 61 26 66 92 54 22 15 25 26 0 76 27 17 59 48 4 42 61 65 91 0 62 55 79 29 88 10 11 +24 89 91 39 56 36 16 86 41 31 14 35 7 71 77 74 33 11 49 7 96 83 31 63 90 49 96 22 58 86 +45 7 93 44 50 54 83 80 3 36 11 38 14 17 10 84 96 94 26 34 26 75 72 0 41 89 96 47 39 88 +0 95 2 22 68 38 0 3 51 6 13 10 14 49 75 69 25 39 63 67 12 80 37 77 10 90 60 35 84 37 +98 56 99 75 49 66 3 33 65 86 1 79 91 23 69 98 91 73 95 45 64 26 99 75 49 77 71 55 42 18 +80 39 26 94 85 42 91 27 14 57 36 34 10 44 38 77 23 39 54 25 32 5 17 9 66 3 67 94 20 11 +88 80 30 77 72 67 16 75 84 87 60 89 21 94 24 11 63 8 79 89 37 18 6 82 76 70 81 95 67 95 +92 36 55 55 43 18 76 94 30 74 95 38 45 95 54 87 22 57 4 65 15 90 90 38 73 24 67 24 36 25 +98 30 34 68 11 48 42 38 80 23 12 91 77 22 65 2 88 31 70 12 46 63 17 63 27 76 21 71 70 7 +76 29 56 12 41 66 22 96 8 6 7 13 27 10 77 90 2 76 30 24 81 88 19 16 93 13 30 24 98 96 +45 94 89 41 52 14 71 88 80 74 7 85 44 69 65 88 4 15 84 97 86 5 53 15 39 34 9 10 45 20 +95 47 45 96 71 10 36 10 90 49 7 68 14 46 97 89 82 58 69 34 93 77 90 9 27 91 29 27 22 17 +80 6 29 26 34 59 10 55 32 53 18 72 39 40 29 35 52 64 2 64 38 83 16 46 53 20 19 8 10 67 +47 44 79 32 58 82 26 69 0 26 4 73 95 98 61 96 20 38 3 92 6 5 25 24 42 49 15 92 80 16 +74 37 86 84 47 15 56 36 43 59 72 72 74 73 49 54 26 5 40 80 78 48 4 65 31 70 14 91 88 72 +91 45 73 62 83 40 49 3 27 79 80 90 3 3 58 44 7 66 77 42 37 25 20 91 47 63 71 7 72 22 +51 3 36 90 45 84 18 55 75 78 42 62 86 63 65 67 46 75 1 79 2 85 85 60 36 92 34 89 66 99 +36 99 0 63 89 65 54 58 52 28 98 27 67 1 45 71 35 52 55 55 44 23 46 89 83 37 8 2 92 75 +51 13 71 2 9 95 23 60 24 98 86 43 32 16 75 70 92 78 26 84 29 14 35 55 61 89 73 59 76 44 +59 57 28 92 33 50 70 94 89 67 70 38 53 16 35 70 35 92 39 78 88 80 71 1 93 21 87 64 49 84 +29 6 17 45 38 65 41 48 81 69 34 12 2 14 41 71 16 92 69 27 61 74 58 20 75 19 39 66 57 82 +12 8 14 85 97 31 58 31 20 76 6 42 29 95 60 94 15 84 86 69 73 52 73 57 12 66 89 65 60 84 +20 74 96 34 83 41 8 37 22 36 30 25 20 8 58 73 9 75 76 73 84 38 16 24 95 95 68 66 43 19 +33 15 25 80 48 69 63 39 16 45 6 77 14 46 38 15 64 85 49 5 59 28 9 4 23 68 59 26 1 75 +35 45 3 6 34 59 55 51 81 59 59 93 18 41 8 44 88 7 86 4 88 90 24 54 73 62 89 13 44 92 +72 60 68 83 39 32 30 15 98 92 69 94 51 48 9 0 4 1 30 92 40 1 61 82 66 4 39 10 93 87 +12 20 34 72 33 31 67 71 67 47 98 76 53 29 17 17 13 31 43 76 25 37 8 39 9 5 96 41 87 66 +96 30 2 57 57 10 14 17 86 76 35 94 42 54 18 24 19 34 12 42 18 11 83 65 86 38 45 17 60 70 +19 62 71 99 35 60 96 30 44 80 78 15 14 5 32 43 10 26 81 72 41 98 30 87 75 8 53 33 25 95 +22 0 38 57 88 7 47 83 49 41 52 1 14 93 41 3 18 42 15 57 28 74 97 2 18 48 64 25 77 69 +36 95 65 81 44 41 6 74 62 16 72 81 15 72 31 5 22 17 19 6 7 15 82 10 31 93 11 45 41 11 +22 76 14 62 34 65 82 5 57 51 51 5 1 6 17 43 28 31 90 99 48 14 96 49 95 40 87 85 40 51 +95 13 99 46 52 80 4 18 95 94 0 46 10 80 3 34 60 15 86 10 28 59 6 35 14 93 18 8 3 65 +57 37 6 31 45 85 42 34 47 92 48 40 7 17 5 74 67 62 0 74 58 21 23 3 5 24 50 54 99 19 +24 14 10 4 36 33 88 51 40 66 40 56 65 23 43 13 82 62 27 88 89 91 36 37 19 11 50 39 96 68 +82 7 39 80 52 90 57 17 61 15 51 71 82 15 21 44 4 46 75 50 78 18 63 75 98 45 6 16 57 25 +0 26 56 74 62 84 71 42 25 86 68 10 73 0 71 6 15 99 1 51 45 42 5 49 3 35 84 29 15 36 +60 78 76 3 95 73 36 57 35 44 50 42 85 57 18 69 37 42 75 79 15 12 74 72 51 36 79 3 58 71 +69 24 16 96 17 25 21 94 71 78 74 39 7 96 3 12 13 16 7 99 65 72 12 28 75 44 55 8 75 67 +3 13 92 9 92 83 69 91 65 92 29 63 46 1 4 62 29 85 47 93 81 3 15 23 63 50 17 9 13 13 +9 18 46 53 0 86 10 41 87 89 24 25 70 73 8 23 27 76 66 46 58 39 28 1 99 64 59 13 7 68 +72 57 90 50 47 57 34 27 94 39 23 31 74 77 45 74 18 49 96 8 95 50 20 81 73 55 72 2 32 15 +87 77 74 5 99 86 5 65 97 39 17 74 48 87 20 66 28 2 18 58 49 22 79 23 36 30 64 20 71 32 +35 43 66 96 63 77 18 90 47 86 94 19 88 79 23 12 38 4 56 42 36 2 77 1 3 17 64 52 31 24 +80 2 4 39 61 60 74 83 28 28 61 10 71 82 44 29 55 30 1 58 81 79 34 41 85 82 84 55 22 12 +76 77 58 92 90 0 54 28 77 68 58 12 1 81 37 28 19 60 71 59 25 83 8 49 52 11 28 65 59 70 +14 1 92 90 5 48 28 78 1 42 54 43 60 83 72 19 28 33 12 52 18 15 56 95 39 33 37 70 53 23 +53 76 26 31 18 81 83 79 25 1 82 43 50 24 63 49 5 23 66 37 80 41 63 77 2 28 15 21 32 93 +80 41 81 7 37 95 19 42 57 30 12 25 29 34 41 45 87 8 20 95 63 16 99 55 16 61 16 36 81 25 +32 30 2 81 23 25 88 30 37 76 52 77 79 58 21 58 10 0 13 32 72 80 3 75 75 25 21 9 79 18 +26 13 36 63 43 2 50 41 65 18 88 44 82 75 73 24 1 30 54 68 15 18 22 50 41 99 27 96 51 53 +22 4 76 11 85 88 28 75 1 2 92 66 63 3 58 43 53 5 1 24 99 90 87 87 41 1 85 37 98 92 +16 39 13 88 60 55 35 11 34 23 23 85 79 41 79 87 65 78 47 83 88 78 35 84 30 61 37 58 25 55 +27 33 15 76 82 79 73 92 93 78 18 38 22 96 63 92 41 9 50 96 14 55 8 60 15 61 97 56 43 22 +42 34 94 11 35 70 50 49 36 34 59 14 87 84 88 83 4 69 29 99 35 24 2 18 97 97 74 88 91 49 +33 25 71 12 60 2 48 22 81 33 27 95 54 25 53 14 20 43 26 96 98 37 64 27 72 33 78 45 22 61 +61 21 91 38 92 47 26 90 78 96 58 41 21 72 81 61 55 9 55 60 28 25 25 74 73 81 64 16 49 39 +90 89 12 93 91 23 82 36 63 58 73 81 49 32 60 39 4 84 73 16 18 26 58 85 46 28 82 91 72 7 +79 41 28 76 33 70 47 6 18 64 40 54 45 61 28 63 87 83 38 9 65 68 62 45 80 63 89 29 20 40 +20 59 58 23 61 79 35 19 78 2 26 48 90 34 69 31 31 42 92 33 18 74 28 47 45 52 36 89 19 40 +58 13 72 24 31 26 73 72 84 29 85 99 20 32 54 92 8 80 86 58 23 80 59 21 76 75 90 76 92 57 +74 53 80 51 8 88 84 63 82 99 97 77 38 9 51 61 37 20 68 47 65 21 53 82 85 96 62 65 35 4 +71 82 14 18 88 79 38 76 66 27 10 10 62 54 80 21 6 57 83 33 52 10 97 37 6 38 12 51 0 84 +95 30 75 92 84 30 55 57 32 44 53 24 77 81 34 84 69 85 91 33 50 72 62 79 62 12 59 75 99 81 +38 42 47 1 11 34 27 77 70 85 89 84 79 15 14 54 78 93 72 68 63 39 98 72 55 32 93 0 13 21 +3 15 10 15 3 31 84 89 53 5 60 41 66 77 45 12 68 68 50 68 99 64 46 54 30 56 2 90 99 78 +66 10 27 89 42 16 9 98 16 2 68 51 0 22 73 60 69 96 37 69 30 36 20 21 51 26 65 13 74 86 +94 58 34 97 77 88 90 75 47 30 6 36 89 66 48 9 20 6 52 45 0 37 99 46 11 53 53 72 94 40 +5 71 50 96 89 71 80 43 27 95 49 9 74 28 62 65 64 97 2 55 58 11 69 0 31 22 73 20 66 11 +63 39 84 62 64 5 56 92 26 86 19 20 56 85 42 48 56 51 54 29 26 95 72 38 70 61 16 54 57 19 +76 97 40 99 73 68 98 92 97 62 73 1 29 72 18 70 90 4 98 95 70 36 65 45 86 36 88 38 64 54 diff --git a/voice_bridge/scipy/spatial/tests/data/selfdual-4d-polytope.txt b/voice_bridge/scipy/spatial/tests/data/selfdual-4d-polytope.txt new file mode 100644 index 0000000000000000000000000000000000000000..47ce4a7ae522fc2a2bbaa9d8ca285913b8ef0712 --- /dev/null +++ b/voice_bridge/scipy/spatial/tests/data/selfdual-4d-polytope.txt @@ -0,0 +1,27 @@ +# The facets of a self-dual 4-dim regular polytope +# with 24 octahedron facets. Taken from cddlib. +# Format b + Ax >= 0 + 1 1 1 1 1 + 1 1 1 1 -1 + 1 1 1 -1 1 + 1 1 1 -1 -1 + 1 1 -1 1 1 + 1 1 -1 1 -1 + 1 1 -1 -1 1 + 1 1 -1 -1 -1 + 1 -1 1 1 1 + 1 -1 1 1 -1 + 1 -1 1 -1 1 + 1 -1 1 -1 -1 + 1 -1 -1 1 1 + 1 -1 -1 1 -1 + 1 -1 -1 -1 1 + 1 -1 -1 -1 -1 + 1 2 0 0 0 + 1 0 2 0 0 + 1 0 0 2 0 + 1 0 0 0 2 + 1 -2 0 0 0 + 1 0 -2 0 0 + 1 0 0 -2 0 + 1 0 0 0 -2 diff --git a/voice_bridge/scipy/spatial/transform/rotation.pyd b/voice_bridge/scipy/spatial/transform/rotation.pyd new file mode 100644 index 0000000000000000000000000000000000000000..9c5dd9b0ae79a872b84615eb332b6abce1689df0 Binary files /dev/null and b/voice_bridge/scipy/spatial/transform/rotation.pyd differ diff --git a/voice_bridge/scipy/special.pxd b/voice_bridge/scipy/special.pxd new file mode 100644 index 0000000000000000000000000000000000000000..62cb82807aa7b34bd882332316ee154753e55273 --- /dev/null +++ b/voice_bridge/scipy/special.pxd @@ -0,0 +1 @@ +from .special cimport cython_special diff --git a/voice_bridge/scipy/special/_comb.pyd b/voice_bridge/scipy/special/_comb.pyd new file mode 100644 index 0000000000000000000000000000000000000000..022a5ca9ebe1228888f954dc225affa23835a492 Binary files /dev/null and b/voice_bridge/scipy/special/_comb.pyd differ diff --git a/voice_bridge/scipy/special/_ellip_harm_2.pyd b/voice_bridge/scipy/special/_ellip_harm_2.pyd new file mode 100644 index 0000000000000000000000000000000000000000..c8516fad81a7a6c8957a2c94a9ed71cc1692e7e9 Binary files /dev/null and b/voice_bridge/scipy/special/_ellip_harm_2.pyd differ diff --git a/voice_bridge/scipy/special/_ufuncs.pyd b/voice_bridge/scipy/special/_ufuncs.pyd new file mode 100644 index 0000000000000000000000000000000000000000..c482e75a84962fe125820a5e0cce0d47d12592f1 Binary files /dev/null and b/voice_bridge/scipy/special/_ufuncs.pyd differ diff --git a/voice_bridge/scipy/special/_ufuncs_cxx.pyd b/voice_bridge/scipy/special/_ufuncs_cxx.pyd new file mode 100644 index 0000000000000000000000000000000000000000..659380a58c6afddf4ca3ae15df3d878a5c2421cf Binary files /dev/null and b/voice_bridge/scipy/special/_ufuncs_cxx.pyd differ diff --git a/voice_bridge/scipy/special/cython_special.pxd b/voice_bridge/scipy/special/cython_special.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a5581a577a9a9c83e45fabd9683b339697583a13 --- /dev/null +++ b/voice_bridge/scipy/special/cython_special.pxd @@ -0,0 +1,248 @@ +# This file is automatically generated by _generate_pyx.py. +# Do not edit manually! + +ctypedef fused number_t: + double complex + double + +cpdef number_t spherical_jn(long n, number_t z, bint derivative=*) nogil +cpdef number_t spherical_yn(long n, number_t z, bint derivative=*) nogil +cpdef number_t spherical_in(long n, number_t z, bint derivative=*) nogil +cpdef number_t spherical_kn(long n, number_t z, bint derivative=*) nogil + +ctypedef fused Dd_number_t: + double complex + double + +ctypedef fused dfg_number_t: + double + float + long double + +ctypedef fused dl_number_t: + double + long + +cpdef double voigt_profile(double x0, double x1, double x2) nogil +cpdef double agm(double x0, double x1) nogil +cdef void airy(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) nogil +cdef void airye(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1, Dd_number_t *y2, Dd_number_t *y3) nogil +cpdef double bdtr(double x0, dl_number_t x1, double x2) nogil +cpdef double bdtrc(double x0, dl_number_t x1, double x2) nogil +cpdef double bdtri(double x0, dl_number_t x1, double x2) nogil +cpdef double bdtrik(double x0, double x1, double x2) nogil +cpdef double bdtrin(double x0, double x1, double x2) nogil +cpdef double bei(double x0) nogil +cpdef double beip(double x0) nogil +cpdef double ber(double x0) nogil +cpdef double berp(double x0) nogil +cpdef double besselpoly(double x0, double x1, double x2) nogil +cpdef double beta(double x0, double x1) nogil +cpdef double betainc(double x0, double x1, double x2) nogil +cpdef double betaincinv(double x0, double x1, double x2) nogil +cpdef double betaln(double x0, double x1) nogil +cpdef double binom(double x0, double x1) nogil +cpdef double boxcox(double x0, double x1) nogil +cpdef double boxcox1p(double x0, double x1) nogil +cpdef double btdtr(double x0, double x1, double x2) nogil +cpdef double btdtri(double x0, double x1, double x2) nogil +cpdef double btdtria(double x0, double x1, double x2) nogil +cpdef double btdtrib(double x0, double x1, double x2) nogil +cpdef double cbrt(double x0) nogil +cpdef double chdtr(double x0, double x1) nogil +cpdef double chdtrc(double x0, double x1) nogil +cpdef double chdtri(double x0, double x1) nogil +cpdef double chdtriv(double x0, double x1) nogil +cpdef double chndtr(double x0, double x1, double x2) nogil +cpdef double chndtridf(double x0, double x1, double x2) nogil +cpdef double chndtrinc(double x0, double x1, double x2) nogil +cpdef double chndtrix(double x0, double x1, double x2) nogil +cpdef double cosdg(double x0) nogil +cpdef double cosm1(double x0) nogil +cpdef double cotdg(double x0) nogil +cpdef Dd_number_t dawsn(Dd_number_t x0) nogil +cpdef double ellipe(double x0) nogil +cpdef double ellipeinc(double x0, double x1) nogil +cdef void ellipj(double x0, double x1, double *y0, double *y1, double *y2, double *y3) nogil +cpdef double ellipkinc(double x0, double x1) nogil +cpdef double ellipkm1(double x0) nogil +cpdef double ellipk(double x0) nogil +cpdef double entr(double x0) nogil +cpdef Dd_number_t erf(Dd_number_t x0) nogil +cpdef Dd_number_t erfc(Dd_number_t x0) nogil +cpdef Dd_number_t erfcx(Dd_number_t x0) nogil +cpdef Dd_number_t erfi(Dd_number_t x0) nogil +cpdef double erfinv(double x0) nogil +cpdef double erfcinv(double x0) nogil +cpdef Dd_number_t eval_chebyc(dl_number_t x0, Dd_number_t x1) nogil +cpdef Dd_number_t eval_chebys(dl_number_t x0, Dd_number_t x1) nogil +cpdef Dd_number_t eval_chebyt(dl_number_t x0, Dd_number_t x1) nogil +cpdef Dd_number_t eval_chebyu(dl_number_t x0, Dd_number_t x1) nogil +cpdef Dd_number_t eval_gegenbauer(dl_number_t x0, double x1, Dd_number_t x2) nogil +cpdef Dd_number_t eval_genlaguerre(dl_number_t x0, double x1, Dd_number_t x2) nogil +cpdef double eval_hermite(long x0, double x1) nogil +cpdef double eval_hermitenorm(long x0, double x1) nogil +cpdef Dd_number_t eval_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) nogil +cpdef Dd_number_t eval_laguerre(dl_number_t x0, Dd_number_t x1) nogil +cpdef Dd_number_t eval_legendre(dl_number_t x0, Dd_number_t x1) nogil +cpdef Dd_number_t eval_sh_chebyt(dl_number_t x0, Dd_number_t x1) nogil +cpdef Dd_number_t eval_sh_chebyu(dl_number_t x0, Dd_number_t x1) nogil +cpdef Dd_number_t eval_sh_jacobi(dl_number_t x0, double x1, double x2, Dd_number_t x3) nogil +cpdef Dd_number_t eval_sh_legendre(dl_number_t x0, Dd_number_t x1) nogil +cpdef Dd_number_t exp1(Dd_number_t x0) nogil +cpdef double exp10(double x0) nogil +cpdef double exp2(double x0) nogil +cpdef Dd_number_t expi(Dd_number_t x0) nogil +cpdef dfg_number_t expit(dfg_number_t x0) nogil +cpdef Dd_number_t expm1(Dd_number_t x0) nogil +cpdef double expn(dl_number_t x0, double x1) nogil +cpdef double exprel(double x0) nogil +cpdef double fdtr(double x0, double x1, double x2) nogil +cpdef double fdtrc(double x0, double x1, double x2) nogil +cpdef double fdtri(double x0, double x1, double x2) nogil +cpdef double fdtridfd(double x0, double x1, double x2) nogil +cdef void fresnel(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil +cpdef Dd_number_t gamma(Dd_number_t x0) nogil +cpdef double gammainc(double x0, double x1) nogil +cpdef double gammaincc(double x0, double x1) nogil +cpdef double gammainccinv(double x0, double x1) nogil +cpdef double gammaincinv(double x0, double x1) nogil +cpdef double gammaln(double x0) nogil +cpdef double gammasgn(double x0) nogil +cpdef double gdtr(double x0, double x1, double x2) nogil +cpdef double gdtrc(double x0, double x1, double x2) nogil +cpdef double gdtria(double x0, double x1, double x2) nogil +cpdef double gdtrib(double x0, double x1, double x2) nogil +cpdef double gdtrix(double x0, double x1, double x2) nogil +cpdef double complex hankel1(double x0, double complex x1) nogil +cpdef double complex hankel1e(double x0, double complex x1) nogil +cpdef double complex hankel2(double x0, double complex x1) nogil +cpdef double complex hankel2e(double x0, double complex x1) nogil +cpdef double huber(double x0, double x1) nogil +cpdef Dd_number_t hyp0f1(double x0, Dd_number_t x1) nogil +cpdef Dd_number_t hyp1f1(double x0, double x1, Dd_number_t x2) nogil +cpdef Dd_number_t hyp2f1(double x0, double x1, double x2, Dd_number_t x3) nogil +cpdef double hyperu(double x0, double x1, double x2) nogil +cpdef double i0(double x0) nogil +cpdef double i0e(double x0) nogil +cpdef double i1(double x0) nogil +cpdef double i1e(double x0) nogil +cpdef double inv_boxcox(double x0, double x1) nogil +cpdef double inv_boxcox1p(double x0, double x1) nogil +cdef void it2i0k0(double x0, double *y0, double *y1) nogil +cdef void it2j0y0(double x0, double *y0, double *y1) nogil +cpdef double it2struve0(double x0) nogil +cdef void itairy(double x0, double *y0, double *y1, double *y2, double *y3) nogil +cdef void iti0k0(double x0, double *y0, double *y1) nogil +cdef void itj0y0(double x0, double *y0, double *y1) nogil +cpdef double itmodstruve0(double x0) nogil +cpdef double itstruve0(double x0) nogil +cpdef Dd_number_t iv(double x0, Dd_number_t x1) nogil +cpdef Dd_number_t ive(double x0, Dd_number_t x1) nogil +cpdef double j0(double x0) nogil +cpdef double j1(double x0) nogil +cpdef Dd_number_t jv(double x0, Dd_number_t x1) nogil +cpdef Dd_number_t jve(double x0, Dd_number_t x1) nogil +cpdef double k0(double x0) nogil +cpdef double k0e(double x0) nogil +cpdef double k1(double x0) nogil +cpdef double k1e(double x0) nogil +cpdef double kei(double x0) nogil +cpdef double keip(double x0) nogil +cdef void kelvin(double x0, double complex *y0, double complex *y1, double complex *y2, double complex *y3) nogil +cpdef double ker(double x0) nogil +cpdef double kerp(double x0) nogil +cpdef double kl_div(double x0, double x1) nogil +cpdef double kn(dl_number_t x0, double x1) nogil +cpdef double kolmogi(double x0) nogil +cpdef double kolmogorov(double x0) nogil +cpdef Dd_number_t kv(double x0, Dd_number_t x1) nogil +cpdef Dd_number_t kve(double x0, Dd_number_t x1) nogil +cpdef Dd_number_t log1p(Dd_number_t x0) nogil +cpdef Dd_number_t log_ndtr(Dd_number_t x0) nogil +cpdef Dd_number_t loggamma(Dd_number_t x0) nogil +cpdef dfg_number_t logit(dfg_number_t x0) nogil +cpdef double lpmv(double x0, double x1, double x2) nogil +cpdef double mathieu_a(double x0, double x1) nogil +cpdef double mathieu_b(double x0, double x1) nogil +cdef void mathieu_cem(double x0, double x1, double x2, double *y0, double *y1) nogil +cdef void mathieu_modcem1(double x0, double x1, double x2, double *y0, double *y1) nogil +cdef void mathieu_modcem2(double x0, double x1, double x2, double *y0, double *y1) nogil +cdef void mathieu_modsem1(double x0, double x1, double x2, double *y0, double *y1) nogil +cdef void mathieu_modsem2(double x0, double x1, double x2, double *y0, double *y1) nogil +cdef void mathieu_sem(double x0, double x1, double x2, double *y0, double *y1) nogil +cdef void modfresnelm(double x0, double complex *y0, double complex *y1) nogil +cdef void modfresnelp(double x0, double complex *y0, double complex *y1) nogil +cpdef double modstruve(double x0, double x1) nogil +cpdef double nbdtr(dl_number_t x0, dl_number_t x1, double x2) nogil +cpdef double nbdtrc(dl_number_t x0, dl_number_t x1, double x2) nogil +cpdef double nbdtri(dl_number_t x0, dl_number_t x1, double x2) nogil +cpdef double nbdtrik(double x0, double x1, double x2) nogil +cpdef double nbdtrin(double x0, double x1, double x2) nogil +cpdef double ncfdtr(double x0, double x1, double x2, double x3) nogil +cpdef double ncfdtri(double x0, double x1, double x2, double x3) nogil +cpdef double ncfdtridfd(double x0, double x1, double x2, double x3) nogil +cpdef double ncfdtridfn(double x0, double x1, double x2, double x3) nogil +cpdef double ncfdtrinc(double x0, double x1, double x2, double x3) nogil +cpdef double nctdtr(double x0, double x1, double x2) nogil +cpdef double nctdtridf(double x0, double x1, double x2) nogil +cpdef double nctdtrinc(double x0, double x1, double x2) nogil +cpdef double nctdtrit(double x0, double x1, double x2) nogil +cpdef Dd_number_t ndtr(Dd_number_t x0) nogil +cpdef double ndtri(double x0) nogil +cpdef double nrdtrimn(double x0, double x1, double x2) nogil +cpdef double nrdtrisd(double x0, double x1, double x2) nogil +cdef void obl_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cdef void obl_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil +cpdef double obl_cv(double x0, double x1, double x2) nogil +cdef void obl_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cdef void obl_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil +cdef void obl_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cdef void obl_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil +cpdef double owens_t(double x0, double x1) nogil +cdef void pbdv(double x0, double x1, double *y0, double *y1) nogil +cdef void pbvv(double x0, double x1, double *y0, double *y1) nogil +cdef void pbwa(double x0, double x1, double *y0, double *y1) nogil +cpdef double pdtr(double x0, double x1) nogil +cpdef double pdtrc(double x0, double x1) nogil +cpdef double pdtri(dl_number_t x0, double x1) nogil +cpdef double pdtrik(double x0, double x1) nogil +cpdef double poch(double x0, double x1) nogil +cdef void pro_ang1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cdef void pro_ang1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil +cpdef double pro_cv(double x0, double x1, double x2) nogil +cdef void pro_rad1(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cdef void pro_rad1_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil +cdef void pro_rad2(double x0, double x1, double x2, double x3, double *y0, double *y1) nogil +cdef void pro_rad2_cv(double x0, double x1, double x2, double x3, double x4, double *y0, double *y1) nogil +cpdef double pseudo_huber(double x0, double x1) nogil +cpdef Dd_number_t psi(Dd_number_t x0) nogil +cpdef double radian(double x0, double x1, double x2) nogil +cpdef double rel_entr(double x0, double x1) nogil +cpdef Dd_number_t rgamma(Dd_number_t x0) nogil +cpdef double round(double x0) nogil +cdef void shichi(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil +cdef void sici(Dd_number_t x0, Dd_number_t *y0, Dd_number_t *y1) nogil +cpdef double sindg(double x0) nogil +cpdef double smirnov(dl_number_t x0, double x1) nogil +cpdef double smirnovi(dl_number_t x0, double x1) nogil +cpdef Dd_number_t spence(Dd_number_t x0) nogil +cpdef double complex sph_harm(dl_number_t x0, dl_number_t x1, double x2, double x3) nogil +cpdef double stdtr(double x0, double x1) nogil +cpdef double stdtridf(double x0, double x1) nogil +cpdef double stdtrit(double x0, double x1) nogil +cpdef double struve(double x0, double x1) nogil +cpdef double tandg(double x0) nogil +cpdef double tklmbda(double x0, double x1) nogil +cpdef double complex wofz(double complex x0) nogil +cpdef Dd_number_t wrightomega(Dd_number_t x0) nogil +cpdef Dd_number_t xlog1py(Dd_number_t x0, Dd_number_t x1) nogil +cpdef Dd_number_t xlogy(Dd_number_t x0, Dd_number_t x1) nogil +cpdef double y0(double x0) nogil +cpdef double y1(double x0) nogil +cpdef double yn(dl_number_t x0, double x1) nogil +cpdef Dd_number_t yv(double x0, Dd_number_t x1) nogil +cpdef Dd_number_t yve(double x0, Dd_number_t x1) nogil +cpdef double zetac(double x0) nogil +cpdef double wright_bessel(double x0, double x1, double x2) nogil +cpdef double ndtri_exp(double x0) nogil \ No newline at end of file diff --git a/voice_bridge/scipy/special/cython_special.pyd b/voice_bridge/scipy/special/cython_special.pyd new file mode 100644 index 0000000000000000000000000000000000000000..e5d34575d0ca0d1599c0ff0f804cb2b69302e855 --- /dev/null +++ b/voice_bridge/scipy/special/cython_special.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:679c027fde3831c3db60c6b70632587fdb4092ffc4d88548b52cc0ecb66e8f16 +size 1348096 diff --git a/voice_bridge/scipy/special/specfun.pyd b/voice_bridge/scipy/special/specfun.pyd new file mode 100644 index 0000000000000000000000000000000000000000..a30187759dde16a0c4fdcf2f6134e3d821dd153e Binary files /dev/null and b/voice_bridge/scipy/special/specfun.pyd differ diff --git a/voice_bridge/scipy/special/tests/data/README b/voice_bridge/scipy/special/tests/data/README new file mode 100644 index 0000000000000000000000000000000000000000..da0b0fd6ab53426bf50d6cd2d5ce28066acc43a3 --- /dev/null +++ b/voice_bridge/scipy/special/tests/data/README @@ -0,0 +1,578 @@ +This directory contains numerical data for testing special functions. +The data is in version control as text files. + +The data is automatically packed into npz files by setup.py. +The npz files should not be checked in version control. + +The data in gsl is computed using the GNU scientific library, the data +in local is computed using mpmath, and the data in boost is a copy of +data distributed with the boost library and comes with the following +license: + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +========= + +Copyright holders of each file are listed here: + +Jamfile.v2:# Copyright Daryle Walker, Hubert Holin, John Maddock 2006 - 2007 +acosh_data.ipp:// Copyright John Maddock 2008. +acosh_test.hpp:// (C) Copyright Hubert Holin 2003. +almost_equal.ipp:// Copyright (c) 2006 Johan Rade +asinh_data.ipp:// Copyright John Maddock 2008. +asinh_test.hpp:// (C) Copyright Hubert Holin 2003. +assoc_legendre_p.ipp:// (C) Copyright John Maddock 2006-7. +atanh_data.ipp:// Copyright John Maddock 2008. +atanh_test.hpp:// (C) Copyright Hubert Holin 2003. +bessel_i_data.ipp:// Copyright (c) 2007 John Maddock +bessel_i_int_data.ipp:// Copyright (c) 2007 John Maddock +bessel_j_data.ipp:// Copyright (c) 2007 John Maddock +bessel_j_int_data.ipp:// Copyright (c) 2007 John Maddock +bessel_j_large_data.ipp:// Copyright (c) 2007 John Maddock +bessel_k_data.ipp:// Copyright (c) 2007 John Maddock +bessel_k_int_data.ipp:// Copyright (c) 2007 John Maddock +bessel_y01_data.ipp:// Copyright (c) 2007 John Maddock +bessel_yn_data.ipp:// Copyright (c) 2007 John Maddock +bessel_yv_data.ipp:// Copyright (c) 2007 John Maddock +beta_exp_data.ipp:// (C) Copyright John Maddock 2006. +beta_med_data.ipp:// (C) Copyright John Maddock 2006. +beta_small_data.ipp:// (C) Copyright John Maddock 2006. +binomial_data.ipp:// (C) Copyright John Maddock 2006-7. +binomial_large_data.ipp:// (C) Copyright John Maddock 2006-7. +binomial_quantile.ipp:// (C) Copyright John Maddock 2006-7. +cbrt_data.ipp:// (C) Copyright John Maddock 2006-7. +common_factor_test.cpp:// (C) Copyright Daryle Walker 2001, 2006. +compile_test/tools_rational_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_real_cast_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_remez_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_chi_squared_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_complement_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_sign_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_digamma_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_trunc_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/constants_incl_test.cpp:// Copyright John Maddock 2012. +compile_test/sf_sinc_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_binomial_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_binomial_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_test_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_normal_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_sinhc_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_ellint_rc_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_sin_pi_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_sph_harm_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_poisson_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/test_traits.cpp:// Copyright John Maddock 2007. +compile_test/dist_gamma_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_cos_pi_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_logistic_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/sf_fpclassify_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_atanh_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_precision_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_hankel_incl_test.cpp:// Copyright John Maddock 2012. +compile_test/sf_cbrt_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_nc_beta_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/sf_legendre_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_stats_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_polynomial_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_config_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_exponential_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_students_t_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_inv_gamma_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_acosh_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_beta_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_fisher_f_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_triangular_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/instantiate.hpp:// Copyright John Maddock 2006. +compile_test/instantiate.hpp:// Copyright Paul A. Bristow 2007, 2010. +compile_test/tools_solve_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_next_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/generate.sh:// Copyright John Maddock 2006. +compile_test/generate.sh:// Copyright John Maddock 2006. +compile_test/generate.sh:// Copyright John Maddock 2006. +compile_test/distribution_concept_check.cpp:// Copyright John Maddock 2006. +compile_test/sf_laguerre_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tr1_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/sf_ellint_rj_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_nc_chi_squ_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/dist_skew_norm_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_modf_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_find_location_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_acos_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_ellint_rd_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_roots_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_test_data_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_abs_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_nc_t_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/sf_factorials_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_gamma_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_atan_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_powm1_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_hypot_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_pareto_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_round_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_weibull_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/std_real_concept_check.cpp:// Copyright John Maddock 2006. +compile_test/dist_hypergeo_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/dist_inv_chi_sq_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_sqrt1pm1_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_log1p_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_jacobi_incl_test.cpp:// Copyright John Maddock 2012. +compile_test/dist_neg_binom_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_nc_f_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/dist_find_scale_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_bessel_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_minima_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_asin_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_extreme_val_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_lanczos_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_uniform_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/test_compile_result.hpp:// Copyright John Maddock 2007. +compile_test/tools_series_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_ellint_3_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_ellint_rf_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_ellint_2_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_hermite_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/poison.hpp:// Copyright John Maddock 2013. +compile_test/sf_zeta_incl_test.cpp:// Copyright John Maddock 2007. +compile_test/dist_laplace_incl_test.cpp:// Copyright John Maddock 2008. +compile_test/sf_expint_incl_test.cpp:// Copyright John Maddock 2007. +compile_test/sf_expm1_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_bernoulli_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/compl_asinh_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_beta_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/tools_fraction_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_owens_t_incl_test.cpp:// Copyright John Maddock 2012. +compile_test/tools_toms748_inc_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_ellint_1_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_erf_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/main.cpp:// Copyright John Maddock 2009. +compile_test/sf_math_fwd_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/sf_airy_incl_test.cpp:// Copyright John Maddock 2012. +compile_test/dist_lognormal_incl_test.cpp:// Copyright John Maddock 2006. +compile_test/dist_cauchy_incl_test.cpp:// Copyright John Maddock 2006. +complex_test.cpp:// (C) Copyright John Maddock 2005. +digamma_data.ipp:// (C) Copyright John Maddock 2006-7. +digamma_neg_data.ipp:// (C) Copyright John Maddock 2006-7. +digamma_root_data.ipp:// (C) Copyright John Maddock 2006-7. +digamma_small_data.ipp:// (C) Copyright John Maddock 2006-7. +e_float_concept_check.cpp:// Copyright John Maddock 2011. +ellint_e2_data.ipp:// Copyright (c) 2006 John Maddock +ellint_e_data.ipp:// Copyright (c) 2006 John Maddock +ellint_f_data.ipp:// Copyright (c) 2006 John Maddock +ellint_k_data.ipp:// (C) Copyright John Maddock 2006-7. +ellint_pi2_data.ipp:// Copyright (c) 2006 John Maddock +ellint_pi3_data.ipp:// Copyright (c) 2006 John Maddock +ellint_pi3_large_data.ipp:// Copyright (c) 2006 John Maddock +ellint_rc_data.ipp:// Copyright (c) 2006 John Maddock +ellint_rd_data.ipp:// Copyright (c) 2006 John Maddock +ellint_rf_data.ipp:// Copyright (c) 2006 John Maddock +ellint_rj_data.ipp:// Copyright (c) 2006 John Maddock +erf_data.ipp:// (C) Copyright John Maddock 2006-7. +erf_inv_data.ipp:// (C) Copyright John Maddock 2006-7. +erf_large_data.ipp:// (C) Copyright John Maddock 2006-7. +erf_small_data.ipp:// (C) Copyright John Maddock 2006. +erfc_inv_big_data.ipp:// (C) Copyright John Maddock 2006-7. +erfc_inv_data.ipp:// (C) Copyright John Maddock 2006-7. +expint_1_data.ipp:// Copyright John Maddock 2008. +expint_data.ipp:// Copyright John Maddock 2008. +expint_small_data.ipp:// Copyright John Maddock 2008. +expinti_data.ipp:// Copyright John Maddock 2008. +expinti_data_double.ipp:// Copyright John Maddock 2008. +expinti_data_long.ipp:// Copyright John Maddock 2008. +functor.hpp:// (C) Copyright John Maddock 2007. +gamma_inv_big_data.ipp:// (C) Copyright John Maddock 2006-7. +gamma_inv_data.ipp:// (C) Copyright John Maddock 2006-7. +gamma_inv_small_data.ipp:// (C) Copyright John Maddock 2006-7. +handle_test_result.hpp:// (C) Copyright John Maddock 2006-7. +hermite.ipp:// (C) Copyright John Maddock 2006-7. +hypergeometric_dist_data2.ipp:// Copyright John Maddock 2008 +hypergeometric_test_data.ipp:// Copyright Gautam Sewani 2008 +hypot_test.cpp:// (C) Copyright John Maddock 2005. +ibeta_data.ipp:// (C) Copyright John Maddock 2006. +ibeta_int_data.ipp:// (C) Copyright John Maddock 2006-7. +ibeta_inv_data.ipp:// (C) Copyright John Maddock 2006-7. +ibeta_inva_data.ipp:// (C) Copyright John Maddock 2006-7. +ibeta_large_data.ipp:// (C) Copyright John Maddock 2006. +ibeta_small_data.ipp:// (C) Copyright John Maddock 2006. +igamma_big_data.ipp:// (C) Copyright John Maddock 2006. +igamma_int_data.ipp:// (C) Copyright John Maddock 2006-7. +igamma_inva_data.ipp:// (C) Copyright John Maddock 2006-7. +igamma_med_data.ipp:// (C) Copyright John Maddock 2006. +igamma_small_data.ipp:// (C) Copyright John Maddock 2006. +jacobi_elliptic.ipp:// Copyright John Maddock 2012. +jacobi_elliptic_small.ipp:// Copyright John Maddock 2012. +jacobi_large_phi.ipp:// Copyright John Maddock 2012. +jacobi_near_1.ipp:// Copyright John Maddock 2012. +laguerre2.ipp:// (C) Copyright John Maddock 2006-7. +laguerre3.ipp:// (C) Copyright John Maddock 2006-7. +legendre_p.ipp:// (C) Copyright John Maddock 2006-7. +legendre_p_large.ipp:// (C) Copyright John Maddock 2006-7. +log1p_expm1_data.ipp:// (C) Copyright John Maddock 2006-7. +log1p_expm1_test.cpp:// Copyright John Maddock 2005. +log1p_expm1_test.cpp:// Copyright Paul A. Bristow 2010 +log1p_expm1_test.hpp:// Copyright John Maddock 2005. +log1p_expm1_test.hpp:// Copyright Paul A. Bristow 2010 +mpfr_concept_check.cpp:// Copyright John Maddock 2007-8. +mpreal_concept_check.cpp:// Copyright John Maddock 2007-8. +multiprc_concept_check_1.cpp:// Copyright John Maddock 2013. +multiprc_concept_check_2.cpp:// Copyright John Maddock 2013. +multiprc_concept_check_3.cpp:// Copyright John Maddock 2013. +multiprc_concept_check_4.cpp:// Copyright John Maddock 2013. +ncbeta.ipp:// Copyright John Maddock 2008. +ncbeta_big.ipp:// Copyright John Maddock 2008. +nccs.ipp:// Copyright John Maddock 2008. +nccs_big.ipp:// Copyright John Maddock 2008. +nct.ipp:// Copyright John Maddock 2008. +nct_asym.ipp:// Copyright John Maddock 2012. +nct_small_delta.ipp:// Copyright John Maddock 2012. +negative_binomial_quantile.ipp:// (C) Copyright John Maddock 2006-7. +ntl_concept_check.cpp:// Copyright John Maddock 2007-8. +ntl_concept_check.cpp:// Copyright Paul A. Bristow 2009, 2011 +owens_t.ipp:// Copyright John Maddock 2012. +owens_t_T7.hpp:// Copyright (C) Benjamin Sobotta 2012 +owens_t_large_data.ipp:// Copyright John Maddock 2012. +pch.hpp:// Copyright John Maddock 2008. +pch_light.hpp:// Copyright John Maddock 2008. +poisson_quantile.ipp:// (C) Copyright John Maddock 2006-7. +pow_test.cpp:// (C) Copyright Bruno Lalande 2008. +powm1_sqrtp1m1_test.cpp:// (C) Copyright John Maddock 2006. +powm1_sqrtp1m1_test.hpp:// Copyright John Maddock 2006. +s_.ipp:// Copyright (c) 2006 Johan Rade +s_.ipp:// Copyright (c) 2012 Paul A. Bristow +sinc_test.hpp:// (C) Copyright Hubert Holin 2003. +sinhc_test.hpp:// (C) Copyright Hubert Holin 2003. +special_functions_test.cpp:// (C) Copyright Hubert Holin 2003. +special_functions_test.cpp: BOOST_TEST_MESSAGE("(C) Copyright Hubert Holin 2003-2005."); +sph_bessel_data.ipp:// Copyright (c) 2007 John Maddock +sph_neumann_data.ipp:// Copyright (c) 2007 John Maddock +spherical_harmonic.ipp:// (C) Copyright John Maddock 2006-7. +std_real_concept_check.cpp:// Copyright John Maddock 2006. +table_type.hpp:// Copyright John Maddock 2012. +test_airy.cpp:// Copyright John Maddock 2012 +test_archive.cpp:// Copyright (c) 2006 Johan Rade +test_archive.cpp:// Copyright (c) 2011 Paul A. Bristow - filename changes for boost-trunk. +test_basic_nonfinite.cpp:// Copyright (c) 2006 Johan Rade +test_basic_nonfinite.cpp:// Copyright (c) 2011 Paul A. Bristow comments +test_basic_nonfinite.cpp:// Copyright (c) 2011 John Maddock +test_bernoulli.cpp:// Copyright John Maddock 2006. +test_bernoulli.cpp:// Copyright Paul A. Bristow 2007, 2012. +test_bessel_airy_zeros.cpp:// Copyright John Maddock 2013 +test_bessel_airy_zeros.cpp:// Copyright Christopher Kormanyos 2013. +test_bessel_airy_zeros.cpp:// Copyright Paul A. Bristow 2013. +test_bessel_hooks.hpp:// (C) Copyright John Maddock 2007. +test_bessel_i.cpp:// (C) Copyright John Maddock 2007. +test_bessel_i.hpp:// (C) Copyright John Maddock 2007. +test_bessel_j.cpp:// (C) Copyright John Maddock 2007. +test_bessel_j.hpp:// (C) Copyright John Maddock 2007. +test_bessel_k.cpp:// Copyright John Maddock 2006, 2007 +test_bessel_k.cpp:// Copyright Paul A. Bristow 2007 +test_bessel_k.hpp:// (C) Copyright John Maddock 2007. +test_bessel_y.cpp:// (C) Copyright John Maddock 2007. +test_bessel_y.hpp:// (C) Copyright John Maddock 2007. +test_beta.cpp:// Copyright John Maddock 2006. +test_beta.cpp:// Copyright Paul A. Bristow 2007, 2009 +test_beta.hpp:// Copyright John Maddock 2006. +test_beta.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_beta_dist.cpp:// Copyright John Maddock 2006. +test_beta_dist.cpp:// Copyright Paul A. Bristow 2007, 2009, 2010, 2012. +test_beta_hooks.hpp:// (C) Copyright John Maddock 2006. +test_binomial.cpp:// Copyright John Maddock 2006. +test_binomial.cpp:// Copyright Paul A. Bristow 2007. +test_binomial_coeff.cpp:// (C) Copyright John Maddock 2006. +test_binomial_coeff.hpp:// Copyright John Maddock 2006. +test_binomial_coeff.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_carlson.cpp:// Copyright 2006 John Maddock +test_carlson.cpp:// Copyright Paul A. Bristow 2007. +test_carlson.hpp:// Copyright John Maddock 2006. +test_carlson.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_cauchy.cpp:// Copyright John Maddock 2006, 2007. +test_cauchy.cpp:// Copyright Paul A. Bristow 2007 +test_cbrt.cpp:// Copyright John Maddock 2006. +test_cbrt.cpp:// Copyright Paul A. Bristow 2010 +test_cbrt.hpp:// Copyright John Maddock 2006. +test_cbrt.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_chi_squared.cpp:// Copyright Paul A. Bristow 2006. +test_chi_squared.cpp:// Copyright John Maddock 2007. +test_classify.cpp:// Copyright John Maddock 2006. +test_classify.cpp:// Copyright Paul A. Bristow 2007 +test_common_factor_gmpxx.cpp:// (C) Copyright John Maddock 2010. +test_constant_generate.cpp:// Copyright John Maddock 2010. +test_constants.cpp:// Copyright Paul Bristow 2007, 2011. +test_constants.cpp:// Copyright John Maddock 2006, 2011. +test_digamma.cpp:// (C) Copyright John Maddock 2006. +test_digamma.hpp:// Copyright John Maddock 2006. +test_digamma.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_dist_overloads.cpp:// Copyright John Maddock 2006. +test_dist_overloads.cpp:// Copyright Paul A. Bristow 2007. +test_ellint_1.cpp:// Copyright Xiaogang Zhang 2006 +test_ellint_1.cpp:// Copyright John Maddock 2006, 2007 +test_ellint_1.cpp:// Copyright Paul A. Bristow 2007 +test_ellint_1.hpp:// Copyright John Maddock 2006. +test_ellint_1.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_ellint_2.cpp:// Copyright Xiaogang Zhang 2006 +test_ellint_2.cpp:// Copyright John Maddock 2006, 2007 +test_ellint_2.cpp:// Copyright Paul A. Bristow 2007 +test_ellint_2.hpp:// Copyright John Maddock 2006. +test_ellint_2.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_ellint_3.cpp:// Copyright Xiaogang Zhang 2006 +test_ellint_3.cpp:// Copyright John Maddock 2006, 2007 +test_ellint_3.cpp:// Copyright Paul A. Bristow 2007 +test_ellint_3.hpp:// Copyright John Maddock 2006. +test_ellint_3.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_erf.cpp:// Copyright John Maddock 2006. +test_erf.cpp:// Copyright Paul A. Bristow 2007 +test_erf.hpp:// Copyright John Maddock 2006. +test_erf.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_erf_hooks.hpp:// (C) Copyright John Maddock 2006. +test_error_handling.cpp:// Copyright Paul A. Bristow 2006-7. +test_error_handling.cpp:// Copyright John Maddock 2006-7. +test_expint.cpp:// (C) Copyright John Maddock 2007. +test_expint.hpp:// Copyright John Maddock 2006. +test_expint.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_expint_hooks.hpp:// (C) Copyright John Maddock 2006. +test_exponential_dist.cpp:// Copyright John Maddock 2006. +test_exponential_dist.cpp:// Copyright Paul A. Bristow 2007. +test_extreme_value.cpp:// Copyright John Maddock 2006. +test_factorials.cpp:// Copyright John Maddock 2006. +test_find_location.cpp:// Copyright John Maddock 2007. +test_find_location.cpp:// Copyright Paul A. Bristow 2007. +test_find_scale.cpp:// Copyright John Maddock 2007. +test_find_scale.cpp:// Copyright Paul A. Bristow 2007. +test_fisher_f.cpp:// Copyright Paul A. Bristow 2006. +test_fisher_f.cpp:// Copyright John Maddock 2007. +test_fisher_f.cpp: // Distcalc version 1.2 Copyright 2002 H Lohninger, TU Wein +test_gamma.cpp:// (C) Copyright John Maddock 2006. +test_gamma.hpp:// Copyright John Maddock 2006. +test_gamma.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_gamma_data.ipp:// (C) Copyright John Maddock 2006. +test_gamma_dist.cpp:// Copyright John Maddock 2006. +test_gamma_dist.cpp:// Copyright Paul A. Bristow 2007, 2010. +test_gamma_hooks.hpp:// (C) Copyright John Maddock 2006. +test_geometric.cpp:// Copyright Paul A. Bristow 2010. +test_geometric.cpp:// Copyright John Maddock 2010. +test_hankel.cpp:// Copyright John Maddock 2012 +test_hermite.cpp:// Copyright John Maddock 2006, 2007 +test_hermite.cpp:// Copyright Paul A. Bristow 2007 +test_hermite.hpp:// Copyright John Maddock 2006. +test_hermite.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_hypergeometric_dist.cpp:// Copyright John Maddock 2008 +test_hypergeometric_dist.cpp:// Copyright Paul A. Bristow +test_hypergeometric_dist.cpp:// Copyright Gautam Sewani +test_ibeta.cpp:// (C) Copyright John Maddock 2006. +test_ibeta.hpp:// Copyright John Maddock 2006. +test_ibeta.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_ibeta_inv.cpp:// (C) Copyright John Maddock 2006. +test_ibeta_inv.hpp:// Copyright John Maddock 2006. +test_ibeta_inv.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_ibeta_inv_ab.cpp:// (C) Copyright John Maddock 2006. +test_ibeta_inv_ab.hpp:// Copyright John Maddock 2006. +test_ibeta_inv_ab.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_igamma.cpp:// (C) Copyright John Maddock 2006. +test_igamma.hpp:// Copyright John Maddock 2006. +test_igamma.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_igamma_inv.cpp:// (C) Copyright John Maddock 2006. +test_igamma_inv.hpp:// Copyright John Maddock 2006. +test_igamma_inv.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_igamma_inva.cpp:// (C) Copyright John Maddock 2006. +test_igamma_inva.hpp:// Copyright John Maddock 2006. +test_igamma_inva.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_instances/double_test_instances_4.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_4.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_8.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_9.cpp:// Copyright John Maddock 2011. +test_instances/Jamfile.v2:# Copyright ohn Maddock 2012 +test_instances/real_concept_test_instances_5.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_6.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_4.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_7.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_2.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_5.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_9.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_1.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_6.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_6.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_7.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_7.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_3.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_6.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_9.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_2.cpp:// Copyright John Maddock 2011. +test_instances/pch.hpp:// Copyright John Maddock 2012. +test_instances/ldouble_test_instances_2.cpp:// Copyright John Maddock 2011. +test_instances/long_double_test_instances_1.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_7.cpp:// Copyright John Maddock 2011. +test_instances/test_instances.hpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_10.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_3.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_3.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_10.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_5.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_8.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_8.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_1.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_10.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_10.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_9.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_4.cpp:// Copyright John Maddock 2011. +test_instances/real_concept_test_instances_3.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_2.cpp:// Copyright John Maddock 2011. +test_instances/float_test_instances_1.cpp:// Copyright John Maddock 2011. +test_instances/double_test_instances_8.cpp:// Copyright John Maddock 2011. +test_instances/ldouble_test_instances_5.cpp:// Copyright John Maddock 2011. +test_instantiate1.cpp:// Copyright John Maddock 2006. +test_instantiate2.cpp:// Copyright John Maddock 2006. +test_inv_hyp.cpp:// (C) Copyright John Maddock 2006. +test_inverse_chi_squared.cpp:// Copyright Paul A. Bristow 2010. +test_inverse_chi_squared.cpp:// Copyright John Maddock 2010. +test_inverse_chi_squared_distribution.cpp:// Copyright Paul A. Bristow 2010. +test_inverse_chi_squared_distribution.cpp:// Copyright John Maddock 2010. +test_inverse_gamma_distribution.cpp:// Copyright Paul A. Bristow 2010. +test_inverse_gamma_distribution.cpp:// Copyright John Maddock 2010. +test_inverse_gaussian.cpp:// Copyright Paul A. Bristow 2010. +test_inverse_gaussian.cpp:// Copyright John Maddock 2010. +test_jacobi.cpp:// Copyright John Maddock 2012 +test_jacobi.hpp:// Copyright John Maddock 2006. +test_jacobi.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_laguerre.cpp:// (C) Copyright John Maddock 2006. +test_laguerre.hpp:// Copyright John Maddock 2006. +test_laguerre.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_laplace.cpp:// Copyright Thijs van den Berg, 2008. +test_laplace.cpp:// Copyright John Maddock 2008. +test_laplace.cpp:// Copyright Paul A. Bristow 2008, 2009. +test_ldouble_simple.cpp:// Copyright John Maddock 2013. +test_legacy_nonfinite.cpp:// Copyright (c) 2006 Johan Rade +test_legacy_nonfinite.cpp:// Copyright (c) 2011 Paul A. Bristow comments +test_legendre.cpp:// (C) Copyright John Maddock 2006. +test_legendre.hpp:// Copyright John Maddock 2006. +test_legendre.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_legendre_hooks.hpp:// (C) Copyright John Maddock 2006. +test_lexical_cast.cpp:// Copyright (c) 2006 Johan Rade +test_lexical_cast.cpp:// Copyright (c) 2011 Paul A. Bristow incorporated Boost.Math +test_logistic_dist.cpp:// Copyright 2008 Gautam Sewani +test_lognormal.cpp:// Copyright John Maddock 2006. +test_lognormal.cpp:// Copyright Paul A. Bristow 2007 +test_long_double_support.cpp:// Copyright John Maddock 2009 +test_math_fwd.cpp:// Copyright John Maddock 2010. +test_math_fwd.cpp:// Copyright Paul A. Bristow 2010. +test_minima.cpp:// Copyright John Maddock 2006. +test_minima.cpp:// Copyright Paul A. Bristow 2007. +test_nc_beta.cpp:// Copyright John Maddock 2008. +test_nc_chi_squared.cpp:// Copyright John Maddock 2008. +test_nc_f.cpp:// Copyright John Maddock 2008. +test_nc_t.cpp:// Copyright John Maddock 2008, 2012. +test_nc_t.cpp:// Copyright Paul A. Bristow 2012. +test_ncbeta_hooks.hpp:// (C) Copyright John Maddock 2008. +test_nccs_hooks.hpp:// (C) Copyright John Maddock 2008. +test_negative_binomial.cpp:// Copyright Paul A. Bristow 2007. +test_negative_binomial.cpp:// Copyright John Maddock 2006. +test_next.cpp:// (C) Copyright John Maddock 2008. +test_nonfinite_io.cpp:// Copyright 2011 Paul A. Bristow +test_nonfinite_trap.cpp:// Copyright (c) 2006 Johan Rade +test_nonfinite_trap.cpp:// Copyright (c) 2011 Paul A. Bristow To incorporate into Boost.Math +test_normal.cpp:// Copyright Paul A. Bristow 2010. +test_normal.cpp:// Copyright John Maddock 2007. +test_out_of_range.hpp:// Copyright John Maddock 2012. +test_owens_t.cpp:// Copyright Paul A. Bristow 2012. +test_owens_t.cpp:// Copyright Benjamin Sobotta 2012. +test_pareto.cpp:// Copyright Paul A. Bristow 2007, 2009. +test_pareto.cpp:// Copyright John Maddock 2006. +test_poisson.cpp:// Copyright Paul A. Bristow 2007. +test_poisson.cpp:// Copyright John Maddock 2006. +test_policy.cpp:// Copyright John Maddock 2007. +test_policy_2.cpp:// Copyright John Maddock 2007. +test_policy_3.cpp:// Copyright John Maddock 2007. +test_policy_4.cpp:// Copyright John Maddock 2007. +test_policy_5.cpp:// Copyright John Maddock 2007. +test_policy_6.cpp:// Copyright John Maddock 2007. +test_policy_7.cpp:// Copyright John Maddock 2007. +test_policy_8.cpp:// Copyright John Maddock 2007. +test_policy_sf.cpp:// (C) Copyright John Maddock 2007. +test_print_info_on_type.cpp:// Copyright John Maddock 2010. +test_rational_instances/test_rational_ldouble2.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_float2.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_double2.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_double3.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_ldouble1.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_float4.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_double5.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_double4.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_real_concept1.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_real_concept3.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational.hpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_ldouble3.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_float3.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_real_concept5.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_ldouble5.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_ldouble4.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_double1.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_real_concept4.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_real_concept2.cpp:// (C) Copyright John Maddock 2006-7. +test_rational_instances/test_rational_float1.cpp:// (C) Copyright John Maddock 2006-7. +test_rationals.cpp:// (C) Copyright John Maddock 2006. +test_rayleigh.cpp:// Copyright John Maddock 2006. +test_real_concept.cpp:// Copyright John Maddock 2010 +test_real_concept_neg_bin.cpp:// Copyright Paul A. Bristow 2010. +test_real_concept_neg_bin.cpp:// Copyright John Maddock 2010. +test_remez.cpp:// Copyright John Maddock 2006 +test_remez.cpp:// Copyright Paul A. Bristow 2007 +test_roots.cpp:// (C) Copyright John Maddock 2006. +test_round.cpp:// (C) Copyright John Maddock 2007. +test_sign.cpp:#define BOOST_TEST_MAIN// Copyright John Maddock 2008 +test_sign.cpp:// (C) Copyright Paul A. Bristow 2011 (added tests for changesign) +test_signed_zero.cpp:// Copyright 2006 Johan Rade +test_signed_zero.cpp:// Copyright 2011 Paul A. Bristow To incorporate into Boost.Math +test_signed_zero.cpp:// Copyright 2012 Paul A. Bristow with new tests. +test_skew_normal.cpp:// Copyright Paul A. Bristow 2012. +test_skew_normal.cpp:// Copyright John Maddock 2012. +test_skew_normal.cpp:// Copyright Benjamin Sobotta 2012 +test_spherical_harmonic.cpp:// (C) Copyright John Maddock 2006. +test_students_t.cpp:// Copyright Paul A. Bristow 2006. +test_students_t.cpp:// Copyright John Maddock 2006. +test_tgamma_ratio.cpp:// (C) Copyright John Maddock 2006. +test_tgamma_ratio.hpp:// Copyright John Maddock 2006. +test_tgamma_ratio.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_toms748_solve.cpp:// (C) Copyright John Maddock 2006. +test_tr1.c:/* (C) Copyright John Maddock 2008. +test_tr1.cpp:// (C) Copyright John Maddock 2008. +test_triangular.cpp:// Copyright Paul Bristow 2006, 2007. +test_triangular.cpp:// Copyright John Maddock 2006, 2007. +test_uniform.cpp:// Copyright Paul Bristow 2007. +test_uniform.cpp:// Copyright John Maddock 2006. +test_weibull.cpp:// Copyright John Maddock 2006, 2012. +test_weibull.cpp:// Copyright Paul A. Bristow 2007, 2012. +test_zeta.cpp:// (C) Copyright John Maddock 2006. +test_zeta.hpp:// Copyright John Maddock 2006. +test_zeta.hpp:// Copyright Paul A. Bristow 2007, 2009 +test_zeta_hooks.hpp:// (C) Copyright John Maddock 2006. +tgamma_delta_ratio_data.ipp:// (C) Copyright John Maddock 2006-7. +tgamma_delta_ratio_int.ipp:// (C) Copyright John Maddock 2006-7. +tgamma_delta_ratio_int2.ipp:// (C) Copyright John Maddock 2006-7. +tgamma_ratio_data.ipp:// (C) Copyright John Maddock 2006-7. +zeta_1_below_data.ipp:// Copyright John Maddock 2008. +zeta_1_up_data.ipp:// Copyright John Maddock 2008. +zeta_data.ipp:// Copyright John Maddock 2008. +zeta_neg_data.ipp:// Copyright John Maddock 2008. +ztest_max_digits10.cpp: // Copyright 2010 Paul A. Bristow +zztest_max_digits10.cpp:// Copyright 2010 Paul A. Bristow diff --git a/voice_bridge/scipy/special/tests/data/boost.npz b/voice_bridge/scipy/special/tests/data/boost.npz new file mode 100644 index 0000000000000000000000000000000000000000..6a488a0b7a02a43f1c88b658ea86b4de836fb941 --- /dev/null +++ b/voice_bridge/scipy/special/tests/data/boost.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa0ac102c04291ee869877042b2a0257750850fa654b7cf57f85e371ff5969c9 +size 1085678 diff --git a/voice_bridge/scipy/special/tests/data/gsl.npz b/voice_bridge/scipy/special/tests/data/gsl.npz new file mode 100644 index 0000000000000000000000000000000000000000..d7cabb19e7771edbf1e4f0ae93cdf8b724554c37 --- /dev/null +++ b/voice_bridge/scipy/special/tests/data/gsl.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f281ee896d4e53cad8cdf9f68efd4447ff928a42458a5acb903bad340839f8e1 +size 51433 diff --git a/voice_bridge/scipy/special/tests/data/local.npz b/voice_bridge/scipy/special/tests/data/local.npz new file mode 100644 index 0000000000000000000000000000000000000000..2e6596a605749ec6f2f9ad50388d42f47c34309e --- /dev/null +++ b/voice_bridge/scipy/special/tests/data/local.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:417a1871385e3bf89d3853930a8313d70bc2876d5b8fc49b869479a8aea9e2d2 +size 203438 diff --git a/voice_bridge/scipy/stats/_boost/beta_ufunc.pyd b/voice_bridge/scipy/stats/_boost/beta_ufunc.pyd new file mode 100644 index 0000000000000000000000000000000000000000..71c797813b5986970f22325bb1b5e3de662371c4 Binary files /dev/null and b/voice_bridge/scipy/stats/_boost/beta_ufunc.pyd differ diff --git a/voice_bridge/scipy/stats/_boost/binom_ufunc.pyd b/voice_bridge/scipy/stats/_boost/binom_ufunc.pyd new file mode 100644 index 0000000000000000000000000000000000000000..e40f404d379ba440c6fb6b6fd131a21a9b3307d6 Binary files /dev/null and b/voice_bridge/scipy/stats/_boost/binom_ufunc.pyd differ diff --git a/voice_bridge/scipy/stats/_boost/nbinom_ufunc.pyd b/voice_bridge/scipy/stats/_boost/nbinom_ufunc.pyd new file mode 100644 index 0000000000000000000000000000000000000000..d165fa15d5d5e3480ee26495ecdcb85098e8db69 Binary files /dev/null and b/voice_bridge/scipy/stats/_boost/nbinom_ufunc.pyd differ diff --git a/voice_bridge/scipy/stats/_qmc_cy.pyd b/voice_bridge/scipy/stats/_qmc_cy.pyd new file mode 100644 index 0000000000000000000000000000000000000000..8a78b60056b445379cab2402e7aa2098ebcc56ce Binary files /dev/null and b/voice_bridge/scipy/stats/_qmc_cy.pyd differ diff --git a/voice_bridge/scipy/stats/_sobol.pyd b/voice_bridge/scipy/stats/_sobol.pyd new file mode 100644 index 0000000000000000000000000000000000000000..1880945f4ac664727b820ef8d4a1b9c530f75119 Binary files /dev/null and b/voice_bridge/scipy/stats/_sobol.pyd differ diff --git a/voice_bridge/scipy/stats/_sobol_direction_numbers.npz b/voice_bridge/scipy/stats/_sobol_direction_numbers.npz new file mode 100644 index 0000000000000000000000000000000000000000..44f1f1e9ebd1eb188289ca9adb8027855c1a23b6 --- /dev/null +++ b/voice_bridge/scipy/stats/_sobol_direction_numbers.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4859931147d42ce465b8605cb277f957d98b839d03194fdf06579357906d193b +size 589334 diff --git a/voice_bridge/scipy/stats/_stats.pyd b/voice_bridge/scipy/stats/_stats.pyd new file mode 100644 index 0000000000000000000000000000000000000000..e438541c204a025390781061dd77efc6ddceb8c8 Binary files /dev/null and b/voice_bridge/scipy/stats/_stats.pyd differ diff --git a/voice_bridge/scipy/stats/biasedurn.pxd b/voice_bridge/scipy/stats/biasedurn.pxd new file mode 100644 index 0000000000000000000000000000000000000000..92785f08dbec30a4db286fcb85b42d7221e2228e --- /dev/null +++ b/voice_bridge/scipy/stats/biasedurn.pxd @@ -0,0 +1,27 @@ +# Declare the class with cdef +cdef extern from "biasedurn/stocc.h" nogil: + cdef cppclass CFishersNCHypergeometric: + CFishersNCHypergeometric(int, int, int, double, double) except + + int mode() + double mean() + double variance() + double probability(int x) + double moments(double * mean, double * var) + + cdef cppclass CWalleniusNCHypergeometric: + CWalleniusNCHypergeometric() except + + CWalleniusNCHypergeometric(int, int, int, double, double) except + + int mode() + double mean() + double variance() + double probability(int x) + double moments(double * mean, double * var) + + cdef cppclass StochasticLib3: + StochasticLib3(int seed) except + + double Random() except + + void SetAccuracy(double accur) + int FishersNCHyp (int n, int m, int N, double odds) except + + int WalleniusNCHyp (int n, int m, int N, double odds) except + + double(*next_double)() + double(*next_normal)(const double m, const double s) diff --git a/voice_bridge/scipy/stats/biasedurn.pyd b/voice_bridge/scipy/stats/biasedurn.pyd new file mode 100644 index 0000000000000000000000000000000000000000..5fc8c70737d3818573589bb1f95e11d0e21a21aa Binary files /dev/null and b/voice_bridge/scipy/stats/biasedurn.pyd differ diff --git a/voice_bridge/scipy/stats/mvn.pyd b/voice_bridge/scipy/stats/mvn.pyd new file mode 100644 index 0000000000000000000000000000000000000000..486085b13e9b1b8d3fe46ea89ad83083fb4efb3d Binary files /dev/null and b/voice_bridge/scipy/stats/mvn.pyd differ diff --git a/voice_bridge/scipy/stats/statlib.pyd b/voice_bridge/scipy/stats/statlib.pyd new file mode 100644 index 0000000000000000000000000000000000000000..67bcb54cf897e4a790431c98fd5e7de1717b0fad Binary files /dev/null and b/voice_bridge/scipy/stats/statlib.pyd differ diff --git a/voice_bridge/scipy/stats/tests/data/nist_anova/AtmWtAg.dat b/voice_bridge/scipy/stats/tests/data/nist_anova/AtmWtAg.dat new file mode 100644 index 0000000000000000000000000000000000000000..30537565fe8c47f74da0e63a39f4b46600f7768f --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/nist_anova/AtmWtAg.dat @@ -0,0 +1,108 @@ +NIST/ITL StRD +Dataset Name: AtmWtAg (AtmWtAg.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 108) + + +Procedure: Analysis of Variance + + +Reference: Powell, L.J., Murphy, T.J. and Gramlich, J.W. (1982). + "The Absolute Isotopic Abundance & Atomic Weight + of a Reference Sample of Silver". + NBS Journal of Research, 87, pp. 9-19. + + +Data: 1 Factor + 2 Treatments + 24 Replicates/Cell + 48 Observations + 7 Constant Leading Digits + Average Level of Difficulty + Observed Data + + +Model: 3 Parameters (mu, tau_1, tau_2) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + + +Between Instrument 1 3.63834187500000E-09 3.63834187500000E-09 1.59467335677930E+01 +Within Instrument 46 1.04951729166667E-08 2.28155932971014E-10 + + Certified R-Squared 2.57426544538321E-01 + + Certified Residual + Standard Deviation 1.51048314446410E-05 + + + + + + + + + + + +Data: Instrument AgWt + 1 107.8681568 + 1 107.8681465 + 1 107.8681572 + 1 107.8681785 + 1 107.8681446 + 1 107.8681903 + 1 107.8681526 + 1 107.8681494 + 1 107.8681616 + 1 107.8681587 + 1 107.8681519 + 1 107.8681486 + 1 107.8681419 + 1 107.8681569 + 1 107.8681508 + 1 107.8681672 + 1 107.8681385 + 1 107.8681518 + 1 107.8681662 + 1 107.8681424 + 1 107.8681360 + 1 107.8681333 + 1 107.8681610 + 1 107.8681477 + 2 107.8681079 + 2 107.8681344 + 2 107.8681513 + 2 107.8681197 + 2 107.8681604 + 2 107.8681385 + 2 107.8681642 + 2 107.8681365 + 2 107.8681151 + 2 107.8681082 + 2 107.8681517 + 2 107.8681448 + 2 107.8681198 + 2 107.8681482 + 2 107.8681334 + 2 107.8681609 + 2 107.8681101 + 2 107.8681512 + 2 107.8681469 + 2 107.8681360 + 2 107.8681254 + 2 107.8681261 + 2 107.8681450 + 2 107.8681368 diff --git a/voice_bridge/scipy/stats/tests/data/nist_anova/SiRstv.dat b/voice_bridge/scipy/stats/tests/data/nist_anova/SiRstv.dat new file mode 100644 index 0000000000000000000000000000000000000000..18ea8971fd7a4d67800dafe98ac5ea5acef53025 --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/nist_anova/SiRstv.dat @@ -0,0 +1,85 @@ +NIST/ITL StRD +Dataset Name: SiRstv (SiRstv.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 85) + + +Procedure: Analysis of Variance + + +Reference: Ehrstein, James and Croarkin, M. Carroll. + Unpublished NIST dataset. + + +Data: 1 Factor + 5 Treatments + 5 Replicates/Cell + 25 Observations + 3 Constant Leading Digits + Lower Level of Difficulty + Observed Data + + +Model: 6 Parameters (mu,tau_1, ... , tau_5) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Instrument 4 5.11462616000000E-02 1.27865654000000E-02 1.18046237440255E+00 +Within Instrument 20 2.16636560000000E-01 1.08318280000000E-02 + + Certified R-Squared 1.90999039051129E-01 + + Certified Residual + Standard Deviation 1.04076068334656E-01 + + + + + + + + + + + + +Data: Instrument Resistance + 1 196.3052 + 1 196.1240 + 1 196.1890 + 1 196.2569 + 1 196.3403 + 2 196.3042 + 2 196.3825 + 2 196.1669 + 2 196.3257 + 2 196.0422 + 3 196.1303 + 3 196.2005 + 3 196.2889 + 3 196.0343 + 3 196.1811 + 4 196.2795 + 4 196.1748 + 4 196.1494 + 4 196.1485 + 4 195.9885 + 5 196.2119 + 5 196.1051 + 5 196.1850 + 5 196.0052 + 5 196.2090 diff --git a/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs01.dat b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs01.dat new file mode 100644 index 0000000000000000000000000000000000000000..945b24bf35422152a5faba73ed054ab78fda1bdf --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs01.dat @@ -0,0 +1,249 @@ +NIST/ITL StRD +Dataset Name: SmLs01 (SmLs01.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 249) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 21 Replicates/Cell + 189 Observations + 1 Constant Leading Digit + Lower Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.68000000000000E+00 2.10000000000000E-01 2.10000000000000E+01 +Within Treatment 180 1.80000000000000E+00 1.00000000000000E-02 + + Certified R-Squared 4.82758620689655E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1.4 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 2 1.3 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 3 1.5 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 4 1.3 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 5 1.5 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 6 1.3 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 7 1.5 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 8 1.3 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 9 1.5 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 diff --git a/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs02.dat b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs02.dat new file mode 100644 index 0000000000000000000000000000000000000000..ee76633a660a48225064bbb86a25f6a2f36c6d9a --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs02.dat @@ -0,0 +1,1869 @@ +NIST/ITL StRD +Dataset Name: SmLs02 (SmLs02.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 1869) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 201 Replicates/Cell + 1809 Observations + 1 Constant Leading Digit + Lower Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60800000000000E+01 2.01000000000000E+00 2.01000000000000E+02 +Within Treatment 1800 1.80000000000000E+01 1.00000000000000E-02 + + Certified R-Squared 4.71830985915493E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1.4 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 2 1.3 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 3 1.5 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 4 1.3 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 5 1.5 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 6 1.3 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 7 1.5 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 8 1.3 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 9 1.5 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 diff --git a/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs03.dat b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs03.dat new file mode 100644 index 0000000000000000000000000000000000000000..55dfa2313ffb152709c58b47c0058567b710d903 --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs03.dat @@ -0,0 +1,18069 @@ +NIST/ITL StRD +Dataset Name: SmLs03 (SmLs03.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 18069) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 2001 Replicates/Cell + 18009 Observations + 1 Constant Leading Digit + Lower Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60080000000000E+02 2.00100000000000E+01 2.00100000000000E+03 +Within Treatment 18000 1.80000000000000E+02 1.00000000000000E-02 + + Certified R-Squared 4.70712773465067E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1.4 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 1 1.3 + 1 1.5 + 2 1.3 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 2 1.2 + 2 1.4 + 3 1.5 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 3 1.4 + 3 1.6 + 4 1.3 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 4 1.2 + 4 1.4 + 5 1.5 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 5 1.4 + 5 1.6 + 6 1.3 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 6 1.2 + 6 1.4 + 7 1.5 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 7 1.4 + 7 1.6 + 8 1.3 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 8 1.2 + 8 1.4 + 9 1.5 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 + 9 1.4 + 9 1.6 diff --git a/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs04.dat b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs04.dat new file mode 100644 index 0000000000000000000000000000000000000000..6a2a9fc935a56989b166de9b23f3df3bc4f64879 --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs04.dat @@ -0,0 +1,249 @@ +NIST/ITL StRD +Dataset Name: SmLs04 (SmLs04.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 249) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 21 Replicates/Cell + 189 Observations + 7 Constant Leading Digits + Average Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.68000000000000E+00 2.10000000000000E-01 2.10000000000000E+01 +Within Treatment 180 1.80000000000000E+00 1.00000000000000E-02 + + Certified R-Squared 4.82758620689655E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000.4 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 2 1000000.3 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 3 1000000.5 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 4 1000000.3 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 5 1000000.5 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 6 1000000.3 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 7 1000000.5 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 8 1000000.3 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 9 1000000.5 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 diff --git a/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs05.dat b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs05.dat new file mode 100644 index 0000000000000000000000000000000000000000..fe11c40b5f51aefc81d4d1501a74e627f2b2d992 --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs05.dat @@ -0,0 +1,1869 @@ +NIST/ITL StRD +Dataset Name: SmLs05 (SmLs05.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 1869) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 201 Replicates/Cell + 1809 Observations + 7 Constant Leading Digits + Average Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60800000000000E+01 2.01000000000000E+00 2.01000000000000E+02 +Within Treatment 1800 1.80000000000000E+01 1.00000000000000E-02 + + Certified R-Squared 4.71830985915493E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000.4 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 2 1000000.3 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 3 1000000.5 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 4 1000000.3 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 5 1000000.5 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 6 1000000.3 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 7 1000000.5 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 8 1000000.3 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 9 1000000.5 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 diff --git a/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs06.dat b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs06.dat new file mode 100644 index 0000000000000000000000000000000000000000..602e4fbdaa26bbb8d95ce78d1f48dbbfa883e7e9 --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs06.dat @@ -0,0 +1,18069 @@ +NIST/ITL StRD +Dataset Name: SmLs06 (SmLs06.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 18069) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 2001 Replicates/Cell + 18009 Observations + 7 Constant Leading Digits + Average Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60080000000000E+02 2.00100000000000E+01 2.00100000000000E+03 +Within Treatment 18000 1.80000000000000E+02 1.00000000000000E-02 + + Certified R-Squared 4.70712773465067E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000.4 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 1 1000000.3 + 1 1000000.5 + 2 1000000.3 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 2 1000000.2 + 2 1000000.4 + 3 1000000.5 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 3 1000000.4 + 3 1000000.6 + 4 1000000.3 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 4 1000000.2 + 4 1000000.4 + 5 1000000.5 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 5 1000000.4 + 5 1000000.6 + 6 1000000.3 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 6 1000000.2 + 6 1000000.4 + 7 1000000.5 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 7 1000000.4 + 7 1000000.6 + 8 1000000.3 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 8 1000000.2 + 8 1000000.4 + 9 1000000.5 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 + 9 1000000.4 + 9 1000000.6 diff --git a/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs07.dat b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs07.dat new file mode 100644 index 0000000000000000000000000000000000000000..deeac955e65ffaf55838568baa54951efaf2662b --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs07.dat @@ -0,0 +1,249 @@ +NIST/ITL StRD +Dataset Name: SmLs07 (SmLs07.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 249) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 21 Replicates/Cell + 189 Observations + 13 Constant Leading Digits + Higher Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.68000000000000E+00 2.10000000000000E-01 2.10000000000000E+01 +Within Treatment 180 1.80000000000000E+00 1.00000000000000E-02 + + Certified R-Squared 4.82758620689655E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000000000.4 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 2 1000000000000.3 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 3 1000000000000.5 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 4 1000000000000.3 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 5 1000000000000.5 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 6 1000000000000.3 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 7 1000000000000.5 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 8 1000000000000.3 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 9 1000000000000.5 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 diff --git a/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs08.dat b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs08.dat new file mode 100644 index 0000000000000000000000000000000000000000..c5ee643fb8c6ef849ab8e34352bc60f15c715a45 --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs08.dat @@ -0,0 +1,1869 @@ +NIST/ITL StRD +Dataset Name: SmLs08 (SmLs08.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 1869) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 201 Replicates/Cell + 1809 Observations + 13 Constant Leading Digits + Higher Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60800000000000E+01 2.01000000000000E+00 2.01000000000000E+02 +Within Treatment 1800 1.80000000000000E+01 1.00000000000000E-02 + + Certified R-Squared 4.71830985915493E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000000000.4 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 2 1000000000000.3 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 3 1000000000000.5 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 4 1000000000000.3 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 5 1000000000000.5 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 6 1000000000000.3 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 7 1000000000000.5 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 8 1000000000000.3 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 9 1000000000000.5 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 diff --git a/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs09.dat b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs09.dat new file mode 100644 index 0000000000000000000000000000000000000000..887905e355a2a13801f1b004187631f2301f7eef --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/nist_anova/SmLs09.dat @@ -0,0 +1,18069 @@ +NIST/ITL StRD +Dataset Name: SmLs09 (SmLs09.dat) + + +File Format: ASCII + Certified Values (lines 41 to 47) + Data (lines 61 to 18069) + + +Procedure: Analysis of Variance + + +Reference: Simon, Stephen D. and Lesage, James P. (1989). + "Assessing the Accuracy of ANOVA Calculations in + Statistical Software". + Computational Statistics & Data Analysis, 8, pp. 325-332. + + +Data: 1 Factor + 9 Treatments + 2001 Replicates/Cell + 18009 Observations + 13 Constant Leading Digits + Higher Level of Difficulty + Generated Data + + +Model: 10 Parameters (mu,tau_1, ... , tau_9) + y_{ij} = mu + tau_i + epsilon_{ij} + + + + + + +Certified Values: + +Source of Sums of Mean +Variation df Squares Squares F Statistic + +Between Treatment 8 1.60080000000000E+02 2.00100000000000E+01 2.00100000000000E+03 +Within Treatment 18000 1.80000000000000E+02 1.00000000000000E-02 + + Certified R-Squared 4.70712773465067E-01 + + Certified Residual + Standard Deviation 1.00000000000000E-01 + + + + + + + + + + + + +Data: Treatment Response + 1 1000000000000.4 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 1 1000000000000.3 + 1 1000000000000.5 + 2 1000000000000.3 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 2 1000000000000.2 + 2 1000000000000.4 + 3 1000000000000.5 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 3 1000000000000.4 + 3 1000000000000.6 + 4 1000000000000.3 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 4 1000000000000.2 + 4 1000000000000.4 + 5 1000000000000.5 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 5 1000000000000.4 + 5 1000000000000.6 + 6 1000000000000.3 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 6 1000000000000.2 + 6 1000000000000.4 + 7 1000000000000.5 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 7 1000000000000.4 + 7 1000000000000.6 + 8 1000000000000.3 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 8 1000000000000.2 + 8 1000000000000.4 + 9 1000000000000.5 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 + 9 1000000000000.4 + 9 1000000000000.6 diff --git a/voice_bridge/scipy/stats/tests/data/nist_linregress/Norris.dat b/voice_bridge/scipy/stats/tests/data/nist_linregress/Norris.dat new file mode 100644 index 0000000000000000000000000000000000000000..4bf8ed911cae75824b27e5f5d5e444e17fa8eae8 --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/nist_linregress/Norris.dat @@ -0,0 +1,97 @@ +NIST/ITL StRD +Dataset Name: Norris (Norris.dat) + +File Format: ASCII + Certified Values (lines 31 to 46) + Data (lines 61 to 96) + +Procedure: Linear Least Squares Regression + +Reference: Norris, J., NIST. + Calibration of Ozone Monitors. + +Data: 1 Response Variable (y) + 1 Predictor Variable (x) + 36 Observations + Lower Level of Difficulty + Observed Data + +Model: Linear Class + 2 Parameters (B0,B1) + + y = B0 + B1*x + e + + + + Certified Regression Statistics + + Standard Deviation + Parameter Estimate of Estimate + + B0 -0.262323073774029 0.232818234301152 + B1 1.00211681802045 0.429796848199937E-03 + + Residual + Standard Deviation 0.884796396144373 + + R-Squared 0.999993745883712 + + + Certified Analysis of Variance Table + +Source of Degrees of Sums of Mean +Variation Freedom Squares Squares F Statistic + +Regression 1 4255954.13232369 4255954.13232369 5436385.54079785 +Residual 34 26.6173985294224 0.782864662630069 + + + + + + + + + + + + + +Data: y x + 0.1 0.2 + 338.8 337.4 + 118.1 118.2 + 888.0 884.6 + 9.2 10.1 + 228.1 226.5 + 668.5 666.3 + 998.5 996.3 + 449.1 448.6 + 778.9 777.0 + 559.2 558.2 + 0.3 0.4 + 0.1 0.6 + 778.1 775.5 + 668.8 666.9 + 339.3 338.0 + 448.9 447.5 + 10.8 11.6 + 557.7 556.0 + 228.3 228.1 + 998.0 995.8 + 888.8 887.6 + 119.6 120.2 + 0.3 0.3 + 0.6 0.3 + 557.6 556.8 + 339.3 339.1 + 888.0 887.2 + 998.5 999.0 + 778.9 779.0 + 10.2 11.1 + 117.6 118.3 + 228.9 229.2 + 668.4 669.1 + 449.2 448.9 + 0.2 0.5 + diff --git a/voice_bridge/scipy/stats/tests/data/stable-cdf-sample-data.npy b/voice_bridge/scipy/stats/tests/data/stable-cdf-sample-data.npy new file mode 100644 index 0000000000000000000000000000000000000000..7bd9bebeb7586ee84e74e65d9c8927aa9c5ab01f --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/stable-cdf-sample-data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cdb10fd30e9481b1b045bae3a534b12aea9ceec043053fd35c48cb06590ffec +size 27008 diff --git a/voice_bridge/scipy/stats/tests/data/stable-pdf-sample-data.npy b/voice_bridge/scipy/stats/tests/data/stable-pdf-sample-data.npy new file mode 100644 index 0000000000000000000000000000000000000000..7ec2fe1ccb82e05233e508ba10a4f764f75f48fa --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/stable-pdf-sample-data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ab32662a656b97dc1ecf1dca011252eb8672942d47e053469d4895639fbed01 +size 27008 diff --git a/voice_bridge/scipy/stats/tests/data/studentized_range_mpmath_ref.json b/voice_bridge/scipy/stats/tests/data/studentized_range_mpmath_ref.json new file mode 100644 index 0000000000000000000000000000000000000000..bb971286cf85b28738a80bacececfb90c2566782 --- /dev/null +++ b/voice_bridge/scipy/stats/tests/data/studentized_range_mpmath_ref.json @@ -0,0 +1,1499 @@ +{ + "COMMENT": "!!!!!! THIS FILE WAS AUTOGENERATED BY RUNNING `python studentized_range_mpmath_ref.py` !!!!!!", + "moment_data": [ + { + "src_case": { + "m": 0, + "k": 3, + "v": 10, + "expected_atol": 1e-09, + "expected_rtol": 1e-09 + }, + "mp_result": 1.0 + }, + { + "src_case": { + "m": 1, + "k": 3, + "v": 10, + "expected_atol": 1e-09, + "expected_rtol": 1e-09 + }, + "mp_result": 1.8342745127927962 + }, + { + "src_case": { + "m": 2, + "k": 3, + "v": 10, + "expected_atol": 1e-09, + "expected_rtol": 1e-09 + }, + "mp_result": 4.567483357831711 + }, + { + "src_case": { + "m": 3, + "k": 3, + "v": 10, + "expected_atol": 1e-09, + "expected_rtol": 1e-09 + }, + "mp_result": 14.412156886227011 + }, + { + "src_case": { + "m": 4, + "k": 3, + "v": 10, + "expected_atol": 1e-09, + "expected_rtol": 1e-09 + }, + "mp_result": 56.012250366720444 + } + ], + "cdf_data": [ + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0027502772229359594 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.8544145010066327e-12 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0027520560662338336 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.39089126131273e-13 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.002752437649536182 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.0862189999210748e-12 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.002752755744313648 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0027527430186246545 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.002752666667812431 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.505275157135514e-24 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 3.8546698113384126e-25 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.7362668562706085e-11 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 5.571947730052616e-26 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.032619249089036e-27 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.539763646681808e-22 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.618313512511099e-12 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 4.919231733354114e-28 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.159348906295542e-13 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.22331624289542043 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.2395624637676257 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.23510918942128056 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.23786536230099864 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.000651656693149116 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.2401356460422021 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.003971273224673166 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0008732969319364606 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.24023154593376422 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.001300816146573152 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.5682573722040226e-07 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0005841098057517027 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.2267674885784e-05 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0005731712496327297 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.746798012658064e-06 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 5.807700350854172e-07 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.147637957472628e-08 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 8.306675539750552e-08 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.8711786295203324 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9818862781476212 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9566506502400175 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9849546621386962 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9731488893573804 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.8450530667988544 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.6164875232404174 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9845292772767739 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.8079691517949077 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.7573606942645745 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.8587525248147736 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.8611036193280976 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.46523135355387657 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.6318042819232383 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.5574947140294286 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.5970517763141937 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.6493671527818267 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.6466699776044968 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9881335633712994 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999861266821 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.999908236635449 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999978467928313 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999996690216 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999993640496 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9570401457077894 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999997977351971 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9991738325963548 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999730883609333 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999905199205 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999950566264 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9312318042339768 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999991743904675 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9977643922032399 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999054426012515 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999602948055 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.9999999792458618 + } + ], + "pdf_data": [ + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.05487847613526332 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.564099684606509e-10 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.05494947290360002 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 8.442593793786411e-11 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.054964710604860405 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.764441961563576e-11 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.05497690690332341 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.05497385731702228 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 4.758021225803992e-22 + }, + { + "src_case": { + "q": 0.1, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.054977415200879516 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.8004731453548083e-19 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.5564176176604816e-09 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.342768070688728e-24 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.454372265306114e-10 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 3.9138464398429654e-25 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 5.266341131767418e-23 + }, + { + "src_case": { + "q": 0.1, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 8.234556126446594e-11 + }, + { + "src_case": { + "q": 0.1, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.32929780487562e-26 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.36083736990527154 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.4137959132282269 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.4080239698771056 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.398772020275752 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.4160873922094346 + }, + { + "src_case": { + "q": 1, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.4157583991350054 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.005210720148451848 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.02575314059867804 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.009782573637596617 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.006818708302379005 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0047089182958790715 + }, + { + "src_case": { + "q": 1, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.004627085294166373 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0010886280311369462 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.630674470916427e-06 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 4.121713278199428e-05 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 9.319506007252685e-06 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.5585754418789747e-06 + }, + { + "src_case": { + "q": 1, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.4190335899441991e-06 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.07185383302009114 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.050268901219386576 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.03321056847176124 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.04044172384981084 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.030571365659999617 + }, + { + "src_case": { + "q": 4, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.030120779149073032 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.17501664247670937 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.22374394725370736 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.23246597521020534 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.23239043677504484 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.23057775622748988 + }, + { + "src_case": { + "q": 4, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.23012666145240815 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.2073676639537027 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.3245990542431859 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0033733228559870584 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 7.728665739003835e-05 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.38244500549096866 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.45434978340834464 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.43334135870667473 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.159522630228393e-09 + }, + { + "src_case": { + "q": 4, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.45807877248528855 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 3.5303467191175695e-08 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 3.121281850105421e-06 + }, + { + "src_case": { + "q": 10, + "k": 3, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.1901591191700855e-09 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0006784051704217357 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.011845582636101885 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 3.844183552674918e-05 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 3.215093171597309e-08 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 5.125792577534542e-07 + }, + { + "src_case": { + "q": 10, + "k": 10, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.7759015355532446e-08 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 10, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.0017957646258393628 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 3, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.018534407764819284 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 20, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 0.00013316083413164858 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 50, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 2.082489228991225e-06 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 100, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 1.3444226792257012e-07 + }, + { + "src_case": { + "q": 10, + "k": 20, + "v": 120, + "expected_atol": 1e-11, + "expected_rtol": 1e-11 + }, + "mp_result": 7.446912854228521e-08 + } + ] +} \ No newline at end of file diff --git a/voice_bridge/select.pyd b/voice_bridge/select.pyd new file mode 100644 index 0000000000000000000000000000000000000000..001f63a7e278fe8881a3de751580d9b8b52912ec Binary files /dev/null and b/voice_bridge/select.pyd differ diff --git a/voice_bridge/sentencepiece/_sentencepiece.pyd b/voice_bridge/sentencepiece/_sentencepiece.pyd new file mode 100644 index 0000000000000000000000000000000000000000..c8ec17fad4c96f4e66e00e9d38664bcb961299d6 --- /dev/null +++ b/voice_bridge/sentencepiece/_sentencepiece.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8fb38357c97cde1c10650be7921d817cccf9f54f9b2d92b7d424e13d8e996d4 +size 2216960 diff --git a/voice_bridge/shm.dll b/voice_bridge/shm.dll new file mode 100644 index 0000000000000000000000000000000000000000..b80e6b4a9e5aa85a38f024c43cd0fff4aa9ddba0 Binary files /dev/null and b/voice_bridge/shm.dll differ diff --git a/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/icons/0.png b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/icons/0.png new file mode 100644 index 0000000000000000000000000000000000000000..8a27f0d7d8746e9807f9e9ceef8282d7cffd63f3 Binary files /dev/null and b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/icons/0.png differ diff --git a/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/policy.md b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/policy.md new file mode 100644 index 0000000000000000000000000000000000000000..c9bcc2cea42f727c8e43c934fc38163144848882 --- /dev/null +++ b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/policy.md @@ -0,0 +1,3 @@ +dummy1 policy + +https://voicevox.hiroshiba.jp/ diff --git a/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/portrait.png b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/portrait.png new file mode 100644 index 0000000000000000000000000000000000000000..5e5d74fce44315082dab503700aab6698bd02cc9 Binary files /dev/null and b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/portrait.png differ diff --git a/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/voice_samples/0_001.wav b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/voice_samples/0_001.wav new file mode 100644 index 0000000000000000000000000000000000000000..7ad74b1ce4ad8e0adf4565b6115e04d6a3b948c8 Binary files /dev/null and b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/voice_samples/0_001.wav differ diff --git a/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/voice_samples/0_002.wav b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/voice_samples/0_002.wav new file mode 100644 index 0000000000000000000000000000000000000000..3189647215051524df79142f6c52b2f6533529e3 Binary files /dev/null and b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/voice_samples/0_002.wav differ diff --git a/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/voice_samples/0_003.wav b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/voice_samples/0_003.wav new file mode 100644 index 0000000000000000000000000000000000000000..31b66454ac62546d36dd9d85f85a841a4ecaf52d Binary files /dev/null and b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/voice_samples/0_003.wav differ diff --git a/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/voice_samples/0_004.wav b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/voice_samples/0_004.wav new file mode 100644 index 0000000000000000000000000000000000000000..4f2bb6afa2818567201bb979909edd9dd21d7dc8 Binary files /dev/null and b/voice_bridge/speaker_info/aa33c99b-a43b-49b0-a2c8-6a81922f8213/voice_samples/0_004.wav differ diff --git a/voice_bridge/sqlite3.dll b/voice_bridge/sqlite3.dll new file mode 100644 index 0000000000000000000000000000000000000000..8bc1dd422163ff24afbc9eb523c8c319d6abe7e3 --- /dev/null +++ b/voice_bridge/sqlite3.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53aa551e62267b887017a95fe14a610c2bb3b53c4be62ddc4dc3548df3720a68 +size 1504944 diff --git a/voice_bridge/tcl86t.dll b/voice_bridge/tcl86t.dll new file mode 100644 index 0000000000000000000000000000000000000000..e2878f728f2639f46da0dd6fd7a7aa79b0d823f3 --- /dev/null +++ b/voice_bridge/tcl86t.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f650d6bc321bcda3fc3ac3dec3ac4e473fb0b7b68b6c948581bcfc54653e6768 +size 1705120 diff --git a/voice_bridge/tk86t.dll b/voice_bridge/tk86t.dll new file mode 100644 index 0000000000000000000000000000000000000000..6b10a41ffe0d6baa2163df26bab8e69cdd0fda56 --- /dev/null +++ b/voice_bridge/tk86t.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a6d0871be2fa7153de22be008a20a5257b721657e6d4b24da8b1f940345d0d5 +size 1468064 diff --git a/voice_bridge/torch/_C.pyd b/voice_bridge/torch/_C.pyd new file mode 100644 index 0000000000000000000000000000000000000000..d51c1c08e7069cd41a21a4a728747744134a6ebd Binary files /dev/null and b/voice_bridge/torch/_C.pyd differ diff --git a/voice_bridge/torch/_C_flatbuffer.pyd b/voice_bridge/torch/_C_flatbuffer.pyd new file mode 100644 index 0000000000000000000000000000000000000000..ddc002805f5dbb5cd635ecb3072cc5aaffad172f Binary files /dev/null and b/voice_bridge/torch/_C_flatbuffer.pyd differ diff --git a/voice_bridge/torch/bin/protoc.exe b/voice_bridge/torch/bin/protoc.exe new file mode 100644 index 0000000000000000000000000000000000000000..e6b83caf5d8591132ea48904d96d52929d88bcfa --- /dev/null +++ b/voice_bridge/torch/bin/protoc.exe @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8e8745ee9b7b33db862f62f365ac7e45abff675dcf3f2cf73792f502557a0a4 +size 2823168 diff --git a/voice_bridge/torch/include/ATen/ATen.h b/voice_bridge/torch/include/ATen/ATen.h new file mode 100644 index 0000000000000000000000000000000000000000..1be43cbe7def623731b8b323fe738c73dfcb87a8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ATen.h @@ -0,0 +1,33 @@ +#pragma once + +#if !defined(_MSC_VER) && __cplusplus < 201402L +#error C++14 or later compatible compiler is required to use ATen. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/voice_bridge/torch/include/ATen/AccumulateType.h b/voice_bridge/torch/include/ATen/AccumulateType.h new file mode 100644 index 0000000000000000000000000000000000000000..9b5b9b74d7069f17ec1ffee04fd083f243862d0a --- /dev/null +++ b/voice_bridge/torch/include/ATen/AccumulateType.h @@ -0,0 +1,179 @@ +#pragma once +#include +#include +#include +#include + +// Defines the accumulation type for a scalar type. +// Example: +// using accscalar_t = acc_type; +// +// Accumulation types are an important concept in numeric computing +// because you frequently want to perform intermediate computations +// at a higher precision than the input and output precision, to avoid +// compounding internal rounding errors. Accumulation is the most +// well-known intermediate computation (it is of great importance for +// sum reduction and matrix multiply, for example), but in PyTorch +// acc_type ends up getting used for all sorts of other intermediate +// computations, so it perhaps would be more accurately (ahem) called an +// "accurate" type. acc_type is especially important for reduced +// precision operations like float16 and bfloat16, where relatively +// benign looking inputs can easily end up overflowing/underflowing. +// +// acc_type is parametrized by whether or not you are running on CUDA +// or not, because on CUDA double precision operations are expensive +// and so by default, we don't actually want to use double as an +// acc_type on CUDA. A lot of things are typed out below, but +// basically, the table is generated by a few rules: +// +// If bool: +// Use 'bool' as acc_type. +// If floating point: +// If CUDA, use 'float' as acc_type (unless scalar_t is double), +// otherwise (CPU) use 'double' +// If integral: +// Use 'int64_t' as acc_type +// +// You're not forced to use this template; if you happen to know +// something specific about your use case, you can specify your own +// desired behavior. This template, however, will give you a reasonable +// default that will work for all dtypes supported in PyTorch. + +#if defined(__CUDACC__) +#include +#include +#elif defined(__HIPCC__) +#include +#include +#endif + +namespace at { + +template +struct AccumulateType {}; + +#if defined(__CUDACC__) || defined(__HIPCC__) +template <> +struct AccumulateType { + using type = float; +}; +#endif +template <> +struct AccumulateType { + using type = float; +}; +template <> +struct AccumulateType { + using type = float; +}; +template <> +struct AccumulateType { + using type = float; +}; +template <> +struct AccumulateType { + using type = double; +}; +template <> +struct AccumulateType { + using type = int64_t; +}; +template <> +struct AccumulateType { + using type = int64_t; +}; +template <> +struct AccumulateType { + using type = int64_t; +}; +template <> +struct AccumulateType { + using type = int64_t; +}; +template <> +struct AccumulateType { + using type = int64_t; +}; +template <> +struct AccumulateType { + using type = int64_t; +}; +template <> +struct AccumulateType { + using type = bool; +}; +template <> +struct AccumulateType { + using type = float; +}; +template <> +struct AccumulateType { + using type = float; +}; +template <> +struct AccumulateType, false> { + using type = c10::complex; +}; +template <> +struct AccumulateType, false> { + using type = c10::complex; +}; +template <> +struct AccumulateType, false> { + using type = c10::complex; +}; +template <> +struct AccumulateType, true> { + using type = c10::complex; +}; +template <> +struct AccumulateType, true> { + using type = c10::complex; +}; +template <> +struct AccumulateType, true> { + using type = c10::complex; +}; +template <> +struct AccumulateType { + using type = double; +}; +template <> +struct AccumulateType { + using type = double; +}; +template <> +struct AccumulateType { + using type = int64_t; +}; +template <> +struct AccumulateType { + using type = int64_t; +}; +template <> +struct AccumulateType { + using type = int64_t; +}; +template <> +struct AccumulateType { + using type = int64_t; +}; +template <> +struct AccumulateType { + using type = int64_t; +}; +template <> +struct AccumulateType { + using type = int64_t; +}; +template <> +struct AccumulateType { + using type = bool; +}; + +template +using acc_type = typename AccumulateType::type; + +TORCH_API c10::ScalarType toAccumulateType(c10::ScalarType type, bool is_cuda); + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ArrayRef.h b/voice_bridge/torch/include/ATen/ArrayRef.h new file mode 100644 index 0000000000000000000000000000000000000000..0461d5953ed8a7783c82402ca4523b0b0a1ad465 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ArrayRef.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/voice_bridge/torch/include/ATen/Backend.h b/voice_bridge/torch/include/ATen/Backend.h new file mode 100644 index 0000000000000000000000000000000000000000..9651469e190085d913ba9b5d1ca02085886fc4e1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/Backend.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/voice_bridge/torch/include/ATen/Backtrace.h b/voice_bridge/torch/include/ATen/Backtrace.h new file mode 100644 index 0000000000000000000000000000000000000000..bdef9f4a9de439bf8af9a7c5a35a958caa7b8b41 --- /dev/null +++ b/voice_bridge/torch/include/ATen/Backtrace.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/voice_bridge/torch/include/ATen/BatchedFallback.h b/voice_bridge/torch/include/ATen/BatchedFallback.h new file mode 100644 index 0000000000000000000000000000000000000000..beef24a6ed9c5b8373a0db5bcc16f268b8c18726 --- /dev/null +++ b/voice_bridge/torch/include/ATen/BatchedFallback.h @@ -0,0 +1,25 @@ +#pragma once +#include +#include +#include + +namespace at { + +// If an operator doesn't have a batching rule implemented then we fallback +// to this implementation. The fallback only works on out-of-place operators +// that return only tensors with new memory. (e.g., no in-place operators, no +// view operations). +// +// The fallback effectively takes all of the BatchedTensors in `stack`, slices +// them, and runs `op` on all of the corresponding slices to produce slices +// of the outputs. The output slices then get `torch.stack`ed to create the +// final returns. +// +// The performance of the fallback is not very good because it introduces an +// extra copy from stacking the sliced outputs. Because of this, we prefer to +// write batching rules for operators whenever possible. +void batchedTensorForLoopFallback( + const c10::OperatorHandle& op, + torch::jit::Stack* stack); + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/BatchedTensorImpl.h b/voice_bridge/torch/include/ATen/BatchedTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..b832c34e3ac7cba37775a06d68926073c8783947 --- /dev/null +++ b/voice_bridge/torch/include/ATen/BatchedTensorImpl.h @@ -0,0 +1,160 @@ +#pragma once + +#include + +#include +#include +#include + +namespace at { + +// We assume this in a few other places in the codebase, +// but there isn't a centralized definition. +constexpr int64_t kVmapMaxTensorDims = 64; + +// The valid vmap levels range from [0, 64). This effectively means that we +// support a maximum of 64 nested vmaps. +constexpr int64_t kVmapNumLevels = 64; + +// Store this number of elements of BatchDims on the stack. Most people will +// probably use <= 5 nested vmaps, but adjust this number as necessary. +constexpr int64_t kBatchDimsStackSize = 5; + +// a BatchDim represents a "private" dimension on a Tensor created inside of +// vmap. It is a (level, dim) tuple, with the `dim` indicating which dimension +// is being vmap'ed over and the `level` being an identifier for which vmap +// said dimension was created inside. The `dim` corresponds to a "physical +// dim" - it is a dimension index on the underlying physical tensor that is +// being vmapped over. +struct BatchDim { + BatchDim(int64_t level, int64_t dim) : dim_(dim), level_(level) {} + int64_t dim() const { + return dim_; + } + int64_t level() const { + return level_; + } + + private: + int64_t dim_; + int64_t level_; +}; + +using BatchDims = SmallVector; +using BatchDimsRef = ArrayRef; + +// A BatchedTensorImpl holds an underlying Tensor and a list of BatchDim +// NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a +// BatchedTensorImpl. +// +// The batch dimensions are treated as being "private"; they are not +// user-visible. For example, in the following Tensor, +// bt = BatchedTensorImpl(ones(2, 3, 5, 7), [(lvl=1, dim=0), (lvl=2, dim=1)]) +// dimensions 0 and 1 are batch dimensions. +// +// bt.sizes() returns (5, 7); bt.sum(0) performs a reduction over the (public) +// dim 0, which is equivalent to dim 3 in the underlying ones(2, 3, 5, 7) +// tensor. +struct TORCH_API BatchedTensorImpl : public c10::TensorImpl { + explicit BatchedTensorImpl(Tensor value, BatchDims bdims); + + // Returns a reference to BatchDims that represent which dimensions of this + // tensor are private. + BatchDimsRef bdims() const { + return bdims_; + } + + // BatchedTensorImpl wraps a Tensor + const Tensor& value() const { + return value_; + }; + + // Given a public dimension index, return the dimension index in the + // underlying value() tensor. For example, if we have + // bt = BatchedTensorImpl(ones(2, 3, 5, 7), [(lvl=1, dim=0), (lvl=2, + // dim=2)]) + // bt.actualDim(0) -> 1 + // bt.actualDim(1) -> 3 + // bt.actualDim(2) -> Error + int64_t actualDim(int64_t dim, bool wrap_dim = true) const; + + // We have to override this because we opted into CustomStrides + IntArrayRef strides_custom() const override; + // Override a bunch of methods inherited from TensorImpl to return error + // messages. + bool is_contiguous_custom(at::MemoryFormat memory_format) const override; + void set_size(int64_t dim, int64_t new_size) override; + void set_stride(int64_t dim, int64_t new_stride) override; + void set_storage_offset(int64_t storage_offset) override; +#ifdef DEBUG + bool has_storage() const override; +#endif + + private: + // see NOTE: [BatchedTensorImpl levels invariant] + void checkInvariants() const; + const char* tensorimpl_type_name() const override; + + Tensor value_; + + // Note: [BatchedTensorImpl levels invariant] + // There is an invariant that the BatchDims must be stored in increasing + // `level` order. That is, for i < j, bdims_[i].level must be less than + // bdims_[j].level. + BatchDims bdims_; +}; + +// NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a +// BatchedTensorImpl. +inline bool isBatchedTensor(const Tensor& tensor) { + return tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::Batched); +} + +// It is unsafe to call this on a Tensor that is not backed by a +// BatchedTensorImpl. Please use `maybeGetBatchedImpl` whenever possible. +inline BatchedTensorImpl* unsafeGetBatchedImpl(Tensor tensor) { + return static_cast(tensor.unsafeGetTensorImpl()); +} + +inline BatchedTensorImpl* maybeGetBatchedImpl(Tensor tensor) { + if (!isBatchedTensor(tensor)) { + return nullptr; + } + return unsafeGetBatchedImpl(tensor); +} + +// Returns a bitset. If bit i is set, then that means dim i is a batchdim. +inline std::bitset createBatchDimBitset( + BatchDimsRef bdims) { + std::bitset is_bdim; + for (const auto& bdim : bdims) { + is_bdim.set(bdim.dim()); + } + return is_bdim; +} + +// Creates a bitset for all of the levels present in `bdims` +inline std::bitset createVmapLevelsBitset(BatchDimsRef bdims) { + std::bitset result; + for (const auto& bdim : bdims) { + result.set(bdim.level()); + } + return result; +} + +inline std::ostream& operator<<(std::ostream& out, const BatchDim& bdim) { + out << "(lvl=" << bdim.level() << ", dim=" << bdim.dim() << ")"; + return out; +} + +// Use this to construct a BatchedTensor from a regular Tensor +TORCH_API Tensor makeBatched(const Tensor& tensor, BatchDims bdims); + +// Adds a batch dim to `tensor`, returning a BatchedTensor +TORCH_API Tensor addBatchDim(const Tensor& tensor, int64_t level, int64_t dim); + +// Checks if an inplace operation on self and other is "vmap compatible". +// See NOTE: [vmap-incompatible in-place operations] for the definition of this. +TORCH_API bool inplaceIsVmapCompatible(const Tensor& self, const Tensor& other); + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/CPUApplyUtils.h b/voice_bridge/torch/include/ATen/CPUApplyUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..d98e07527293634fc30768b6e3e51db2cf361372 --- /dev/null +++ b/voice_bridge/torch/include/ATen/CPUApplyUtils.h @@ -0,0 +1,343 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +/* + * The basic strategy for apply is as follows: + * + * 1. Starting with the outermost index, loop until we reach a dimension where + * the data is no longer contiguous, i.e. the stride at that dimension is not + * equal to the size of the tensor defined by the outer dimensions. Let's call + * this outer (contiguous) tensor A. Note that if the Tensor is contiguous, then + * A is equal to the entire Tensor. Let's call the inner tensor B. + * + * 2. We loop through the indices in B, starting at its outermost dimension. For + * example, if B is a 2x2 matrix, then we do: + * + * B[0][0] + * B[0][1] + * B[1][0] + * B[1][1] + * + * We set the offset into the underlying storage as (storageOffset + stride_B * + * index_B), i.e. basically we compute the offset into the storage as we would + * normally for a Tensor. But because we are guaranteed the subsequent data is + * contiguous in memory, we can simply loop for sizeof(A) iterations and perform + * the operation, without having to follow the order described by the strides of + * A. + * + * 3. As an optimization, we merge dimensions of A that are contiguous in + * memory. For example, if A is a 3x3x3x3 tensor narrowed from a 3x3x4x3 tensor, + * then the first two dimensions can be merged for the purposes of APPLY, + * reducing the number of nested loops. + */ + +inline Tensor sort_strides(Tensor& tensor_) { + IntArrayRef strides = tensor_.strides(); + std::vector indices; + indices.reserve(tensor_.ndimension()); + for (const auto i : c10::irange(tensor_.ndimension())) { + indices.push_back(i); + } + std::sort(indices.begin(), indices.end(), [&strides](int64_t i1, int64_t i2) { + return strides[i1] > strides[i2]; + }); + Tensor tensor = tensor_.permute(indices); + return tensor; +} + +template +struct strided_tensor_iter_fixed { + public: + T* data_ = NULL; + int64_t dim_ = 0; + + int64_t counter_[N] = {0}; + int64_t sizes_[N] = {0}; + int64_t strides_[N] = {0}; + + strided_tensor_iter_fixed(strided_tensor_iter_fixed const&) = delete; + void operator=(strided_tensor_iter_fixed const& x) = delete; + strided_tensor_iter_fixed(strided_tensor_iter_fixed&&) = default; + strided_tensor_iter_fixed(Tensor& tensor, bool sort_strides = false) + : data_(tensor.data_ptr()) { + (void)sort_strides; // Suppress unused variable warning + std::memset(counter_, 0, sizeof(int64_t) * N); + if (tensor.dim() > 0) { + std::memcpy( + sizes_, tensor.sizes().data(), tensor.dim() * sizeof(int64_t)); + std::memcpy( + strides_, tensor.strides().data(), tensor.dim() * sizeof(int64_t)); + } + dim_ = std::get<1>(collapse_dims(sizes_, strides_, tensor.ndimension())); + } +}; + +template +struct strided_tensor_iter { + private: + public: + T* data_ = NULL; + int64_t dim_; + + std::vector counter_; + std::vector sizes_; + std::vector strides_; + + strided_tensor_iter(strided_tensor_iter const&) = delete; + void operator=(strided_tensor_iter const& x) = delete; + strided_tensor_iter(strided_tensor_iter&&) = default; + strided_tensor_iter(Tensor& tensor) + : data_(tensor.data_ptr()), + dim_(tensor.ndimension()), + counter_(dim_, 0), + sizes_(tensor.sizes().vec()), + strides_(tensor.strides().vec()) { + dim_ = std::get<1>(collapse_dims(sizes_.data(), strides_.data(), dim_)); + } +}; + +inline bool _all_equal_numel(at::ArrayRef tensors) { + if (tensors.size() == 0) + return true; + int64_t all_numel = tensors[0].numel(); + for (const auto i : c10::irange(1, tensors.size())) { + if (tensors[i].numel() != all_numel) + return false; + } + return true; +} + +inline std::string _all_equal_numel_error(at::ArrayRef tensors) { + std::ostringstream oss; + oss << "inconsistent tensor size, expected "; + for (size_t i = 0; i < tensors.size() - 1; i++) { + oss << tensors[i].sizes() << ", "; + } + oss << "and " << tensors[tensors.size() - 1].sizes() + << " to have the same number of elements, but got "; + for (size_t i = 0; i < tensors.size() - 1; i++) { + oss << tensors[i].numel() << ", "; + } + oss << "and " << tensors[tensors.size() - 1].numel() + << " elements respectively"; + return oss.str(); +} + +inline bool _apply_preamble(ArrayRef tensors) { + checkDeviceType("CPU_tensor_apply", tensors, kCPU); + checkLayout("CPU_tensor_apply", tensors, kStrided); + if (!_all_equal_numel(tensors)) + AT_ERROR(_all_equal_numel_error(tensors)); + // An empty tensor has no elements + for (auto& t : tensors) + if (t.numel() == 0) + return false; + return true; +} + +inline int64_t _max_dim_tensors(ArrayRef tensors) { + int64_t dim = 0; + for (auto& t : tensors) + dim = std::max(dim, t.ndimension()); + return dim; +} + +inline void iterate(int64_t /*size*/){}; + +template +inline void iterate(int64_t size, Arg& iter, Args&... iter_tail) { + iter.counter_[iter.dim_ - 1] += size; + iter.data_ = iter.data_ + size * iter.strides_[iter.dim_ - 1]; + iterate(size, iter_tail...); +} + +inline bool iterate_continue() { + return true; +}; + +template +inline bool iterate_continue(Arg& iter, Args&... iter_tail) { + return iter.counter_[iter.dim_ - 1] < iter.sizes_[iter.dim_ - 1] && + iterate_continue(iter_tail...); +} + +inline int64_t max_iterate_size() { + return std::numeric_limits::max(); +}; + +template +inline int64_t max_iterate_size(Arg& iter, Args&... iter_tail) { + return std::min( + (iter.sizes_[iter.dim_ - 1] - iter.counter_[iter.dim_ - 1]), + max_iterate_size(iter_tail...)); +} + +inline void iterate_overflow(){}; + +template +inline void iterate_overflow(Arg& iter, Args&... iter_tail) { + if (iter.counter_[iter.dim_ - 1] == iter.sizes_[iter.dim_ - 1]) { + for (int64_t i = iter.dim_ - 1; i > 0; i--) { + if (iter.counter_[i] == iter.sizes_[i]) { + iter.counter_[i] = 0; + iter.counter_[i - 1]++; + iter.data_ = iter.data_ - (iter.sizes_[i] * iter.strides_[i]) + + iter.strides_[i - 1]; + } + } + } + iterate_overflow(iter_tail...); +} + +inline void forward(int64_t /*offset*/){}; + +template +inline void forward(int64_t offset, Arg& iter, Args&... iter_tail) { + int64_t multi = offset; + for (int64_t i = iter.dim_ - 1; i >= 0; i--) { + int64_t inc = multi % iter.sizes_[i]; + multi = multi / iter.sizes_[i]; + iter.data_ = iter.data_ + inc * iter.strides_[i]; + iter.counter_[i] += inc; + } + forward(offset, iter_tail...); +} + +inline int64_t max_dim() { + return 0; +} + +template +inline int64_t max_dim(Arg& iter, Args&... iter_tail) { + return std::max(iter.dim_, max_dim(iter_tail...)); +} + +inline void apply_op(){}; + +template +inline void apply_op( + int64_t numel, + int64_t offset, + const Op& op, + Args... iters) { + // For 0-dim tensors + if (numel == 1 && max_dim(iters...) == 0) { + op(*iters.data_...); + return; + } + if (offset > 0) + forward(offset, iters...); + // Splitting this into chunks helps the compiler create faster assembly + for (int64_t i = 0; i < numel;) { + for (; iterate_continue(iters...) && i < numel;) { + op(*iters.data_...); + iterate(1, iters...); + i++; + } + iterate_overflow(iters...); + } +} + +/* + Apply a pointwise operator to sequence of tensors + + The calling convention for op is a function/functor that takes the same + number of pointers of type scalar as the number of given tensors. For example, + to compute a = b * c, op would be of the form: + [](scalar* a_val, const scalar* b_val, const scalar* c_val) { a_val[0] = + b_val[0] * c_val[0]; }; +*/ + +template +inline void CPU_tensor_apply2(Tensor tensor1, Tensor tensor2, const Op op) { + if (!_apply_preamble({tensor1, tensor2})) + return; + if (_max_dim_tensors({tensor1, tensor2}) <= 8) { + apply_op( + tensor1.numel(), + 0, + op, + strided_tensor_iter_fixed(tensor1), + strided_tensor_iter_fixed(tensor2)); + } else { + apply_op( + tensor1.numel(), + 0, + op, + strided_tensor_iter(tensor1), + strided_tensor_iter(tensor2)); + } +} + +template +inline void CPU_tensor_apply3( + Tensor tensor1, + Tensor tensor2, + Tensor tensor3, + const Op op) { + if (!_apply_preamble({tensor1, tensor2, tensor3})) + return; + if (_max_dim_tensors({tensor1, tensor2, tensor3}) <= 8) { + apply_op( + tensor1.numel(), + 0, + op, + strided_tensor_iter_fixed(tensor1), + strided_tensor_iter_fixed(tensor2), + strided_tensor_iter_fixed(tensor3)); + } else { + apply_op( + tensor1.numel(), + 0, + op, + strided_tensor_iter(tensor1), + strided_tensor_iter(tensor2), + strided_tensor_iter(tensor3)); + } +} + +template < + typename scalar1, + typename scalar2, + typename scalar3, + typename scalar4, + typename Op> +inline void CPU_tensor_apply4( + Tensor tensor1, + Tensor tensor2, + Tensor tensor3, + Tensor tensor4, + const Op op) { + if (!_apply_preamble({tensor1, tensor2, tensor3, tensor4})) + return; + if (_max_dim_tensors({tensor1, tensor2, tensor3, tensor4}) <= 8) { + apply_op( + tensor1.numel(), + 0, + op, + strided_tensor_iter_fixed(tensor1), + strided_tensor_iter_fixed(tensor2), + strided_tensor_iter_fixed(tensor3), + strided_tensor_iter_fixed(tensor4)); + } else { + apply_op( + tensor1.numel(), + 0, + op, + strided_tensor_iter(tensor1), + strided_tensor_iter(tensor2), + strided_tensor_iter(tensor3), + strided_tensor_iter(tensor4)); + } +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/CPUFixedAllocator.h b/voice_bridge/torch/include/ATen/CPUFixedAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..cf621f34cc63735d7f7557f48146bb76467b8afc --- /dev/null +++ b/voice_bridge/torch/include/ATen/CPUFixedAllocator.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include + +// This file creates a fake allocator that just throws exceptions if +// it is actually used. + +// state passed to the allocator is the std::function called +// when the blob is release by ATen + +namespace at { + +static cpu_fixed_malloc(void*, ptrdiff_t) { + AT_ERROR("attempting to resize a tensor view of an external blob"); +} + +static cpu_fixed_realloc(void*, void*, ptrdiff_t) { + AT_ERROR("attempting to resize a tensor view of an external blob"); +} + +static cpu_fixed_free(void* state, void* allocation) { + auto on_release = static_cast*>(state); + (*on_release)(allocation); + delete on_release; +} + +static Allocator CPU_fixed_allocator = { + cpu_fixed_malloc, + cpu_fixed_realloc, + cpu_fixed_free}; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/CPUFunctions.h b/voice_bridge/torch/include/ATen/CPUFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..17c4ddd92f1d469abb771ed0392eed0df0508b1a --- /dev/null +++ b/voice_bridge/torch/include/ATen/CPUFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/voice_bridge/torch/include/ATen/CPUFunctions_inl.h b/voice_bridge/torch/include/ATen/CPUFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..04ec4e1aa257283acabe1ec8c99403a0fa47a0b2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/CPUFunctions_inl.h @@ -0,0 +1,557 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + diff --git a/voice_bridge/torch/include/ATen/CPUGeneratorImpl.h b/voice_bridge/torch/include/ATen/CPUGeneratorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..7cba81626a68228eb2549a8585db5386565014b0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/CPUGeneratorImpl.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { + +struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl { + // Constructors + CPUGeneratorImpl(uint64_t seed_in = default_rng_seed_val); + ~CPUGeneratorImpl() override = default; + + // CPUGeneratorImpl methods + std::shared_ptr clone() const; + void set_current_seed(uint64_t seed) override; + uint64_t current_seed() const override; + uint64_t seed() override; + void set_state(const c10::TensorImpl& new_state) override; + c10::intrusive_ptr get_state() const override; + static DeviceType device_type(); + uint32_t random(); + uint64_t random64(); + c10::optional next_float_normal_sample(); + c10::optional next_double_normal_sample(); + void set_next_float_normal_sample(c10::optional randn); + void set_next_double_normal_sample(c10::optional randn); + at::mt19937 engine(); + void set_engine(at::mt19937 engine); + + private: + CPUGeneratorImpl* clone_impl() const override; + at::mt19937 engine_; + c10::optional next_float_normal_sample_; + c10::optional next_double_normal_sample_; +}; + +namespace detail { + +TORCH_API const Generator& getDefaultCPUGenerator(); +TORCH_API Generator +createCPUGenerator(uint64_t seed_val = default_rng_seed_val); + +} // namespace detail + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/CUDAFunctions.h b/voice_bridge/torch/include/ATen/CUDAFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..10223723e47fda0105448b72b6f081b92d85f5bc --- /dev/null +++ b/voice_bridge/torch/include/ATen/CUDAFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/voice_bridge/torch/include/ATen/CUDAFunctions_inl.h b/voice_bridge/torch/include/ATen/CUDAFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..52c71aa74c84b9a82bfd2c69afa73f32117ab228 --- /dev/null +++ b/voice_bridge/torch/include/ATen/CUDAFunctions_inl.h @@ -0,0 +1,586 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + diff --git a/voice_bridge/torch/include/ATen/CollapseDims.h b/voice_bridge/torch/include/ATen/CollapseDims.h new file mode 100644 index 0000000000000000000000000000000000000000..4e25112e7d4490096e6340184a3a4813511f93b6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/CollapseDims.h @@ -0,0 +1,94 @@ +#include +#include + +namespace at { + +/* +[collapse dims] Updates sizes, and strides to reflect a "collapse" of +the info, possibly excluding the optional excludeDim. A "collapsed" version +of the info is the fewest dims that order the tensor's elements in the same +way as the original info. If excludeDim is specified, the collapse is the +fewest dims that order the tensor's elements as the original and preserve the +excluded dimension, unless the tensor collapses to a point. + +This function returns a pair of values. + +1) The (new) index of the preserved dimension if excludeDim is +specified. 0 if the tensor is collapsed to a point. -1 +otherwise. + +2) The new number of dimensions. +*/ +template +inline std::pair collapse_dims( + T* sizes, + T* strides, + int64_t dims, + const int excludeDim = -1) { + TORCH_CHECK( + excludeDim >= -1 && excludeDim < dims, + "expected excluded dim between -1 and dims - 1"); + + int64_t stopDim = (excludeDim == -1) ? dims : excludeDim; + int64_t newIndex = -1; + int64_t oldIndex = 0; + int64_t remappedExcludedDim = -1; + + while (oldIndex < dims) { + // Finds a dimension to collapse into + for (; oldIndex < stopDim; ++oldIndex) { + if (sizes[oldIndex] == 1) { + continue; + } + + ++newIndex; + sizes[newIndex] = sizes[oldIndex]; + strides[newIndex] = strides[oldIndex]; + ++oldIndex; + break; + } + + // Collapses dims + for (; oldIndex < stopDim; ++oldIndex) { + if (sizes[oldIndex] == 1) { + continue; + } + + if (strides[newIndex] == sizes[oldIndex] * strides[oldIndex]) { + sizes[newIndex] *= sizes[oldIndex]; + strides[newIndex] = strides[oldIndex]; + } else { + ++newIndex; + sizes[newIndex] = sizes[oldIndex]; + strides[newIndex] = strides[oldIndex]; + } + } + + // Handles excludeDim being set (oldIndex == excludeDim) + if (oldIndex != dims) { + // Preserves excluded dimension + ++newIndex; + sizes[newIndex] = sizes[oldIndex]; + strides[newIndex] = strides[oldIndex]; + remappedExcludedDim = newIndex; + + // Restarts iteration after excludeDim + ++oldIndex; + stopDim = dims; + } + } + + // Handles special case of all dims size 1 + if (newIndex == -1 || (newIndex == 0 && sizes[0] == 1)) { + dims = 1; + sizes[0] = 1; + strides[0] = 1; + + return std::pair(0, 1); + } + + dims = newIndex + 1; + return std::pair(remappedExcludedDim, dims); +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/CompositeExplicitAutogradFunctions.h b/voice_bridge/torch/include/ATen/CompositeExplicitAutogradFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..bdaee888e89bd9e1dd22c54a72d5d1b96affeda1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/CompositeExplicitAutogradFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/voice_bridge/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h b/voice_bridge/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..d66bdcf59637bbbc5ca09720f3bc957196bfd07a --- /dev/null +++ b/voice_bridge/torch/include/ATen/CompositeExplicitAutogradFunctions_inl.h @@ -0,0 +1,532 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + diff --git a/voice_bridge/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h b/voice_bridge/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..b3b7c537ca6e8ad8fe7a30a5a7af956af7994d6e --- /dev/null +++ b/voice_bridge/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/voice_bridge/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h b/voice_bridge/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..c85729c1f9babc4e1daa2610e109c99e437b3f03 --- /dev/null +++ b/voice_bridge/torch/include/ATen/CompositeExplicitAutogradNonFunctionalFunctions_inl.h @@ -0,0 +1,311 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + diff --git a/voice_bridge/torch/include/ATen/CompositeImplicitAutogradFunctions.h b/voice_bridge/torch/include/ATen/CompositeImplicitAutogradFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..fde0a471ac0135f1dcb55f78e10d0818c5cff2e2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/CompositeImplicitAutogradFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/voice_bridge/torch/include/ATen/CompositeImplicitAutogradFunctions_inl.h b/voice_bridge/torch/include/ATen/CompositeImplicitAutogradFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..8c2e910beb3aed0acedade33b6aa96044e03e7d3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/CompositeImplicitAutogradFunctions_inl.h @@ -0,0 +1,495 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + diff --git a/voice_bridge/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions.h b/voice_bridge/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..1432163225387fc9614419529c63e4ce402ca239 --- /dev/null +++ b/voice_bridge/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/voice_bridge/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h b/voice_bridge/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..c9a5e5524b1469e42768fe354a60a5513945ad83 --- /dev/null +++ b/voice_bridge/torch/include/ATen/CompositeImplicitAutogradNestedTensorFunctions_inl.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include + + + diff --git a/voice_bridge/torch/include/ATen/Config.h b/voice_bridge/torch/include/ATen/Config.h new file mode 100644 index 0000000000000000000000000000000000000000..2701377028ffbba0dbb516c8938dd63b52b2f5bc --- /dev/null +++ b/voice_bridge/torch/include/ATen/Config.h @@ -0,0 +1,22 @@ +#pragma once + +// Test these using #if AT_MKL_ENABLED(), not #ifdef, so that it's +// obvious if you forgot to include Config.h +// c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined +// +// DO NOT put the macros for CUDA libraries in this file; they belong in cuda/CUDAConfig.h + +#define AT_MKLDNN_ENABLED() 1 +#define AT_MKL_ENABLED() 1 +#define AT_MKL_SEQUENTIAL() 0 +#define AT_FFTW_ENABLED() 0 +#define AT_POCKETFFT_ENABLED() 0 +#define AT_NNPACK_ENABLED() 0 +#define CAFFE2_STATIC_LINK_CUDA() 0 +#define AT_BUILD_WITH_BLAS() 1 +#define AT_BUILD_WITH_LAPACK() 1 +#define AT_PARALLEL_OPENMP 1 +#define AT_PARALLEL_NATIVE 0 +#define AT_PARALLEL_NATIVE_TBB 0 +#define AT_BLAS_F2C() 0 +#define AT_BLAS_USE_CBLAS_DOT() 0 diff --git a/voice_bridge/torch/include/ATen/Context.h b/voice_bridge/torch/include/ATen/Context.h new file mode 100644 index 0000000000000000000000000000000000000000..43f4433b7ce99cd4e4254445e8f4a6f38bf89795 --- /dev/null +++ b/voice_bridge/torch/include/ATen/Context.h @@ -0,0 +1,447 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace at { + +class Tensor; + +enum class TORCH_API Float32MatmulPrecision { HIGHEST, HIGH, MEDIUM }; + +class TORCH_API Context { + public: + Context(); + + const Generator& defaultGenerator(Device device) { + DeviceType device_type = device.type(); + initCUDAIfNeeded(device_type); + initHIPIfNeeded(device_type); + if (device_type == at::kCPU) { + return at::detail::getDefaultCPUGenerator(); + } else if (device_type == at::kCUDA) { + return at::detail::getCUDAHooks().getDefaultCUDAGenerator(device.index()); + } else { + AT_ERROR(DeviceTypeName(device_type), " device type not enabled."); + } + } + Device getDeviceFromPtr(void* data, DeviceType device_type) { + initCUDAIfNeeded(device_type); + initHIPIfNeeded(device_type); + if (device_type == at::kCPU) { + return DeviceType::CPU; + } else if (device_type == at::kCUDA) { + return at::detail::getCUDAHooks().getDeviceFromPtr(data); + } else { + AT_ERROR(DeviceTypeName(device_type), " device type not enabled."); + } + } + static bool isPinnedPtr(void* data) { + return detail::getCUDAHooks().isPinnedPtr(data); + } + static bool hasOpenMP(); + static bool hasMKL(); + static bool hasLAPACK(); + static bool hasMKLDNN(); + static bool hasMAGMA() { + return detail::getCUDAHooks().hasMAGMA(); + } + static bool hasCUDA() { + return detail::getCUDAHooks().hasCUDA(); + } + static bool hasCUDART() { + return detail::getCUDAHooks().hasCUDART(); + } + static long versionCUDART() { + return detail::getCUDAHooks().versionCUDART(); + } + static bool hasCuDNN() { + return detail::getCUDAHooks().hasCuDNN(); + } + static long versionCuDNN() { + return detail::getCUDAHooks().versionCuDNN(); + } + static bool hasCuSOLVER() { + return detail::getCUDAHooks().hasCuSOLVER(); + } + static bool hasHIP() { + return detail::getHIPHooks().hasHIP(); + } + static bool hasIPU() { + return c10::impl::hasDeviceGuardImpl(at::DeviceType::IPU); + } + static bool hasXLA() { + return c10::impl::hasDeviceGuardImpl(at::DeviceType::XLA); + } + static bool hasLazy() { + return c10::impl::hasDeviceGuardImpl(at::DeviceType::Lazy); + } + static bool hasMPS(); + + static bool hasORT() { + return c10::impl::hasDeviceGuardImpl(at::DeviceType::ORT); + } + // defined in header so that getNonVariableType has ability to inline + // call_once check. getNonVariableType is called fairly frequently + void lazyInitCUDA() { + c10::call_once(thc_init, [&] { detail::getCUDAHooks().initCUDA(); }); + } + void lazyInitHIP() { + c10::call_once(thh_init, [&] { detail::getHIPHooks().initHIP(); }); + } + static const at::cuda::NVRTC& getNVRTC() { + return detail::getCUDAHooks().nvrtc(); + } + + static bool setFlushDenormal(bool on); + + // NB: This method is *purely* whether or not a user requested + // that CuDNN was enabled, it doesn't actually say anything about + // whether or not CuDNN is actually usable. Use cudnn_is_acceptable + // to test this instead + bool userEnabledCuDNN() const; + void setUserEnabledCuDNN(bool e); + bool userEnabledMkldnn() const; + void setUserEnabledMkldnn(bool e); + bool benchmarkCuDNN() const; + void setBenchmarkCuDNN(bool); + int benchmarkLimitCuDNN() const; + void setBenchmarkLimitCuDNN(int); + bool deterministicCuDNN() const; + void setDeterministicCuDNN(bool); + + // Note [Disabling Fused SDP Kernels] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Flash SDP kernels are enabled by default. However, they can be disabled + // by setting at::globalContext().setUserEnabledFlashSDP(false) flag. + // This is useful for debugging purposes. For example, if you want to + // compare the performance of the flash SDP kernels with the unfused + // kernel, you can disable the flash SDP kernels. By disabling + // the math SDP kernel, you can force your code to use flash kernels. + // The math SDP kernel can be disabled by setting + // at::globalContext().setUserEnabledMathSDP(false) flag. + void setSDPUseFlash(bool); + bool userEnabledFlashSDP() const; + + void setSDPUseMath(bool); + bool userEnabledMathSDP() const; + + at::LinalgBackend linalgPreferredBackend() const; + void setLinalgPreferredBackend(at::LinalgBackend); + + // Note [Enabling Deterministic Operations] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Operations in PyTorch that normally act nondeterministically, but have an + // alternate deterministic implementation, should satisfy the following + // requirements: + // + // * Include this comment: "See Note [Enabling Deterministic Operations]" + // + // * Check the value of `at::globalContext().deterministicAlgorithms()` to + // toggle + // between nondeterministic and deterministic implementations. + // + // * Have an entry in the list of PyTorch operations that toggle between + // nondeterministic + // and deterministic implementations, in the docstring of + // `use_deterministic_algorithms()` in torch/__init__.py + // + // `example_func()` below shows an example of toggling between + // nondeterministic and deterministic implementations: + // + // void example_func() { + // // See Note [Enabling Deterministic Operations] + // if (at::globalContext().deterministicAlgorithms()) { + // example_func_deterministic(); + // } else { + // example_func_nondeterministic(); + // } + // } + + bool deterministicAlgorithms() const; + bool deterministicAlgorithmsWarnOnly() const; + void setDeterministicAlgorithms(bool, bool); + + // Note [Writing Nondeterministic Operations] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Operations in PyTorch that act nondeterministically and do not have an + // alternate deterministic implementation should satisfy the following + // requirements: + // + // * Include this comment: "See Note [Writing Nondeterministic Operations]" + // + // * Include a comment explaining why the operation is nondeterministic. + // + // * Throw an error when `Context::deterministicAlgorithms()` is true. Most + // of the time, this should be accomplished by calling + // `at::globalContext().alertNotDeterminstic()`. However, if the + // nondeterministic behavior is caused by the CuBLAS workspace + // configuration in CUDA >= 10.2, + // `at::globalContext().alertCuBLASConfigNotDeterministic()` should be + // called instead (in this case, a comment explaining why the operation is + // nondeterministic is not necessary). See below for details on these + // methods. + // + // * Have an entry in the list of nondeterministic PyTorch operations in the + // docstring of `use_deterministic_algorithms()` in torch/__init__.py + // + // * Have a test function in `test/test_torch.py` whose name begins with + // `test_nondeterministic_alert_`. Alternatively, if CuBLAS workspace + // configuration is the reason for nondeterminism, the operation should be + // included in the `test_cublas_config_nondeterministic_alert` test. Any new + // tests should ideally follow a pattern similar to the existing ones. + // + // `example_func()` below shows an example of the comments and error-throwing + // code for a nondeterministic operation: + // + // void example_func() { + // // See Note [Writing Nondeterministic Operations] + // // Nondeterministic because + // at::globalContext().alertNondeterministic("example_func"); + // ... + // } + + // Throws an error if `Context::deterministicAlgorithms()` is true + static void alertNotDeterministic(c10::string_view const& caller); + + // Throws an error if `Context::deterministicAlgorithms()` is true, CUDA + // >= 10.2, and CUBLAS_WORKSPACE_CONFIG is not set to either ":16:8" or + // ":4096:8". For more details: + // https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility + void alertCuBLASConfigNotDeterministic() const; + + void setFloat32MatmulPrecision(const std::string& s); + bool allowTF32CuDNN() const; + void setAllowTF32CuDNN(bool); + bool allowTF32CuBLAS() const; + void setAllowTF32CuBLAS(bool); + Float32MatmulPrecision float32MatmulPrecision() const; + void setFloat32MatmulPrecision(Float32MatmulPrecision p); + bool allowFP16ReductionCuBLAS() const; + void setAllowFP16ReductionCuBLAS(bool); + at::QEngine qEngine() const; + void setQEngine(at::QEngine e); + static const std::vector& supportedQEngines(); + static bool isXNNPACKAvailable(); + // This method is used to release the original weight after pre-packing. + // It should be called once before loading/running the model. + // NB: By default it is set to true for mobile builds. + void setReleaseWeightsWhenPrepacking(bool e); + bool releaseWeightsWhenPrepacking() const; + + void setDisplayVmapFallbackWarnings(bool enabled); + bool areVmapFallbackWarningsEnabled() const; + + void setDefaultMobileCPUAllocator(); + void unsetDefaultMobileCPUAllocator(); + + private: + void initCUDAIfNeeded(DeviceType p) { + if (p == DeviceType::CUDA) { + lazyInitCUDA(); + } + } + void initHIPIfNeeded(DeviceType p) { + if (p == DeviceType::HIP) { + lazyInitHIP(); + } + } + static bool checkCuBLASConfigDeterministic(); + c10::once_flag thc_init; + c10::once_flag thh_init; + bool enabled_cudnn = true; + bool deterministic_cudnn = false; + bool _deterministic_algorithms = false; + bool _deterministic_algorithms_warn_only = false; + bool enabled_flashSDP = true; + bool enabled_mathSDP = true; +#ifdef USE_ROCM + bool benchmark_cudnn = true; +#else + bool benchmark_cudnn = false; +#endif + Float32MatmulPrecision float32_matmul_precision = + at::Float32MatmulPrecision::HIGHEST; + int benchmark_limit_cudnn = 10; + bool allow_tf32_cudnn = true; + bool allow_fp16_reduction_cublas = true; + bool enabled_mkldnn = true; + at::LinalgBackend linalg_preferred_backend = at::LinalgBackend::Default; +#ifdef C10_MOBILE + bool release_original_weights = true; +#else + bool release_original_weights = false; +#endif + bool display_vmap_fallback_warnings_ = false; + c10::optional quantized_engine = c10::nullopt; + + Allocator* prev_allocator_ptr_{nullptr}; +}; + +TORCH_API Context& globalContext(); + +static inline void init() { + globalContext(); +} + +TORCH_API Allocator* getCPUAllocator(); + +static inline DeprecatedTypeProperties& getDeprecatedTypeProperties( + Backend p, + ScalarType s) { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + p, s); +} + +static inline DeprecatedTypeProperties& CPU(ScalarType s) { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + Backend::CPU, s); +} + +static inline DeprecatedTypeProperties& CUDA(ScalarType s) { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + Backend::CUDA, s); +} + +static inline DeprecatedTypeProperties& HIP(ScalarType s) { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + Backend::HIP, s); +} + +static inline DeprecatedTypeProperties& MPS(ScalarType s) { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + Backend::MPS, s); +} + +static inline bool hasCUDA() { + return globalContext().hasCUDA(); +} + +static inline bool hasHIP() { + return globalContext().hasHIP(); +} + +static inline bool hasIPU() { + return globalContext().hasIPU(); +} + +static inline bool hasXLA() { + return globalContext().hasXLA(); +} + +static inline bool hasMPS() { + return globalContext().hasMPS(); +} + +static inline bool hasORT() { + return globalContext().hasORT(); +} + +// Despite its name, this function returns the number of *CUDA* GPUs. +static inline size_t getNumGPUs() { + // WARNING: DO NOT ADD LOGIC TO HANDLE OTHER DEVICE TYPES TO THIS + // FUNCTION. If you are interested in interrogating the number of + // devices for a specific device type, add that function to the + // relevant library (e.g., similar to at::cuda::device_count()) + if (hasCUDA() && hasHIP()) { + throw std::runtime_error( + "Enabling both CUDA and HIP in ATen is not supported, as HIP masquerades " + "to be CUDA (e.g., when you say CUDA, on a HIP build of ATen, this actually " + "means HIP. Rebuild PyTorch with one or the other disabled."); + } else if (hasCUDA()) { + return detail::getCUDAHooks().getNumGPUs(); + } else if (hasHIP()) { + return detail::getHIPHooks().getNumGPUs(); + } else { + return 0; + } +} + +static inline bool hasOpenMP() { + return globalContext().hasOpenMP(); +} + +static inline bool hasMKL() { + return globalContext().hasMKL(); +} + +static inline bool hasLAPACK() { + return globalContext().hasLAPACK(); +} + +static inline bool hasMAGMA() { + return globalContext().hasMAGMA(); +} + +static inline bool hasMKLDNN() { + return globalContext().hasMKLDNN(); +} + +static inline void manual_seed(uint64_t seed) { + auto gen = globalContext().defaultGenerator(DeviceType::CPU); + { + // See Note [Acquire lock when using random generators] + std::lock_guard lock(gen.mutex()); + gen.set_current_seed(seed); + } + // NB: Sometimes we build with CUDA, but we don't have any GPUs + // available. In that case, we must not seed CUDA; it will fail! + const auto num_gpus = detail::getCUDAHooks().getNumGPUs(); + if (hasCUDA() && num_gpus > 0) { + for (const auto i : c10::irange(num_gpus)) { + auto cuda_gen = globalContext().defaultGenerator( + Device(at::kCUDA, static_cast(i))); + { + // See Note [Acquire lock when using random generators] + std::lock_guard lock(cuda_gen.mutex()); + cuda_gen.set_current_seed(seed); + } + } + } +} + +// When the global flag `allow_tf32` is set to true, cuBLAS handles are +// automatically configured to use math mode CUBLAS_TF32_TENSOR_OP_MATH. +// For some operators, such as addmv, TF32 offers no performance improvement +// but causes precision loss. To help this case, this class implements +// a RAII guard that can be used to quickly disable TF32 within its scope. +// +// Usage: +// NoTF32Guard disable_tf32; +struct TORCH_API NoTF32Guard { + NoTF32Guard(); + ~NoTF32Guard(); + static bool should_disable_tf32(); + + private: + bool changed = false; +}; + +#ifdef USE_ROCM +struct TORCH_API ROCmBackwardPassGuard { + ROCmBackwardPassGuard(); + ~ROCmBackwardPassGuard(); + static bool is_backward_pass(); + + private: + static thread_local bool is_backward_pass_; +}; +#endif + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/DLConvertor.h b/voice_bridge/torch/include/ATen/DLConvertor.h new file mode 100644 index 0000000000000000000000000000000000000000..9ee55ed5df2f9d5e6ab0cd4a1009e137fb22608d --- /dev/null +++ b/voice_bridge/torch/include/ATen/DLConvertor.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include +#include + +// this convertor will: +// 1) take a Tensor object and wrap it in the DLPack tensor +// 2) take a dlpack tensor and convert it to the ATen Tensor + +namespace at { + +TORCH_API ScalarType toScalarType(const DLDataType& dtype); +TORCH_API DLManagedTensor* toDLPack(const Tensor& src); +TORCH_API Tensor fromDLPack(const DLManagedTensor* src); +TORCH_API DLDataType getDLDataType(const Tensor& t); +TORCH_API DLDevice getDLContext(const Tensor& tensor, const int64_t& device_id); + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/Device.h b/voice_bridge/torch/include/ATen/Device.h new file mode 100644 index 0000000000000000000000000000000000000000..6c515580363c9e9aab3ee322678fd0cb0283aec8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/Device.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/voice_bridge/torch/include/ATen/DeviceGuard.h b/voice_bridge/torch/include/ATen/DeviceGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..83bb31d7fd425cf7b085c252bd8022cd7ec79ad8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/DeviceGuard.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include +#include // TensorList whyyyyy + +namespace at { + +// Are you here because you're wondering why DeviceGuard(tensor) no +// longer works? For code organization reasons, we have temporarily(?) +// removed this constructor from DeviceGuard. The new way to +// spell it is: +// +// OptionalDeviceGuard guard(device_of(tensor)); + +/// Return the Device of a Tensor, if the Tensor is defined. +inline c10::optional device_of(const Tensor& t) { + if (t.defined()) { + return c10::make_optional(t.device()); + } else { + return c10::nullopt; + } +} + +inline c10::optional device_of(const c10::optional& t) { + return t.has_value() ? device_of(t.value()) : nullopt; +} + +/// Return the Device of a TensorList, if the list is non-empty and +/// the first Tensor is defined. (This function implicitly assumes +/// that all tensors in the list have the same device.) +inline c10::optional device_of(ITensorListRef t) { + if (!t.empty()) { + return device_of(t.front()); + } else { + return c10::nullopt; + } +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/DimVector.h b/voice_bridge/torch/include/ATen/DimVector.h new file mode 100644 index 0000000000000000000000000000000000000000..cb652fffcb14819d8ca5292daa012ad47f4c3fad --- /dev/null +++ b/voice_bridge/torch/include/ATen/DimVector.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/voice_bridge/torch/include/ATen/Dimname.h b/voice_bridge/torch/include/ATen/Dimname.h new file mode 100644 index 0000000000000000000000000000000000000000..71836a9e25d3d82d9cd5024b2f33e147e14bf87e --- /dev/null +++ b/voice_bridge/torch/include/ATen/Dimname.h @@ -0,0 +1 @@ +#include diff --git a/voice_bridge/torch/include/ATen/Dispatch.h b/voice_bridge/torch/include/ATen/Dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d2f5a244ad573fddc2c4a4c116f11334c95807a0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/Dispatch.h @@ -0,0 +1,536 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __CUDACC__ +#include // For CUDA_VERSION +#endif + +#ifdef TEMPLATE_SELECTIVE_BUILD +#include +#else +namespace at { +/** + * The method should_include_kernel_dtype() returns true/false + * based on whether the switching code for a specific dtype should be + * included based on build time constants generated from tracing model + * execution. This method will be implmeneted via code-generation and + * included in this file when code-gen is ready. + */ +inline constexpr bool should_include_kernel_dtype( + const char* /*kernel_tag_str*/, + at::ScalarType /*scalar_type*/ +) { + return true; +} +} // namespace at +#endif + +/** + * In the Facebook internal build (using BUCK), this macro is enabled by + * passing in -c pt.enable_record_kernel_dtype=1 when building the tracer + * binary. + */ +#if defined ENABLE_RECORD_KERNEL_FUNCTION_DTYPE +namespace at { +namespace detail { +TORCH_API void record_kernel_function_dtype(std::string name); +} +} // namespace at + +#define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type) \ + at::detail::record_kernel_function_dtype( \ + std::string(NAME) + "$" + toString(enum_type)); +#else +#define RECORD_KERNEL_FUNCTION_DTYPE(NAME, enum_type) +#endif + +// Avoid if_constexpr if possble, as it's more expensive to compile +#if defined __cpp_if_constexpr +#define AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type) \ + do { \ + if constexpr (!at::should_include_kernel_dtype( \ + at_dispatch_name, enum_type)) { \ + AT_ERROR( \ + "dtype '", \ + toString(enum_type), \ + "' not selected for kernel tag ", \ + at_dispatch_name); \ + } \ + } while (0) +#else // defined __cpp_if_constexpr +#define AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type) \ + at::guts::if_constexpr([&] { \ + AT_ERROR( \ + "dtype '", \ + toString(enum_type), \ + "' not selected for kernel tag ", \ + at_dispatch_name); \ + }) +#endif + +// Workaround for C10_UNUSED because CUDA 10.2 and below fails to handle unused +// attribute in the type aliasing context. Keep name long and verbose to avoid +// macro collisions. +#if defined(__CUDACC__) && CUDA_VERSION < 11000 +#define C10_UNUSED_DISPATCH_CUDA_WORKAROUND +#else +#define C10_UNUSED_DISPATCH_CUDA_WORKAROUND C10_UNUSED +#endif + +#define AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, HINT, ...) \ + case enum_type: { \ + AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \ + using HINT C10_UNUSED_DISPATCH_CUDA_WORKAROUND = \ + c10::impl::ScalarTypeToCPPTypeT; \ + return __VA_ARGS__(); \ + } + +#define AT_DISPATCH_CASE(enum_type, ...) \ + AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__) + +#define AT_DISPATCH_CASE_QINT(enum_type, scalar_type, ...) \ + case enum_type: { \ + AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \ + using scalar_t = scalar_type; \ + using underlying_t C10_UNUSED = typename scalar_t::underlying; \ + const auto& SCALAR_TYPE C10_UNUSED = enum_type; \ + const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); \ + return __VA_ARGS__(); \ + } + +#define AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \ + enum_type, scalar_type, bitwidth, qmin, qmax, ...) \ + case enum_type: { \ + AT_PRIVATE_CHECK_SELECTIVE_BUILD(enum_type); \ + using scalar_t = scalar_type; \ + using underlying_t C10_UNUSED = typename scalar_t::underlying; \ + const auto& SCALAR_TYPE C10_UNUSED = enum_type; \ + const auto& UNDERLYING_TYPE C10_UNUSED = toUnderlying(enum_type); \ + C10_UNUSED int bit_width = bitwidth; \ + C10_UNUSED int64_t quant_min = qmin; \ + C10_UNUSED int64_t quant_max = qmax; \ + return __VA_ARGS__(); \ + } + +namespace detail { + +inline at::ScalarType scalar_type(at::ScalarType s) { + return s; +} + +C10_DEPRECATED_MESSAGE( + "passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, " + "pass an at::ScalarType instead") +inline at::ScalarType scalar_type(const at::DeprecatedTypeProperties& t) { + return t.scalarType(); +} + +C10_DEPRECATED_MESSAGE( + "AT_DISPATCH_ALL_TYPES_AND_HALF is deprecated, " + "use AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, ...) instead") +inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF() {} + +C10_DEPRECATED_MESSAGE( + "AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX is deprecated, " + "use AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(at::ScalarType::Half, ...) " + "instead") +inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {} + +} // namespace detail + +// The AT_DISPATCH_* family of macros provides the ability to +// conveniently generate specializations of a kernel over all of the +// dtypes we care about in PyTorch. We call it "dispatch" because +// we are "dispatching" to the correct, dtype-specific kernel. +// +// A standard usage looks like: +// +// AT_DISPATCH_ALL_TYPES(self.scalar_type(), "op_name", [&] { +// // Your code here, with 'scalar_t' now defined to +// // be the dtype in question +// }); +// +// There are many variations of this macro, so it's important to +// understand exactly /which/ dtypes you want to get instantiated, as +// well as what the "default" set is. +// +// The default set of dtypes that are instantiated (e.g., by +// AT_DISPATCH_ALL_TYPES) are floating point types (float, double), +// and integral types (int32_t, int64_t, int16_t, int8_t, uint8_t), +// but NOT booleans (bool), half-precision floats (Half) or +// complex number (c10::complex, c10::complex). +// This "cut" is somewhat historical (the default types are the +// ones that TH historically supported), but it also reflects the +// fact that the non-default types are "poorly" behaved (booleans +// are NOT integers mod 2, half precision operations ~essentially +// don't exist on CPU, complex numbers are an experimental application). +// +// Here are the questions you should generally ask to decide which +// dispatch you want: +// +// 1. Is this an integral or floating point specific operation? +// (If so, you'll want one of the FLOATING or INTEGRAL macros.) +// +// 2. Should half be supported? (If you're on CPU, the answer is almost +// definitely no. If you do want support, use one of the AND_HALF +// macros) +// +// Much rarer situations: +// +// 3. Should bool be supported? (You often have to write your kernel +// differently if arithmetic operations are involved.) If so, +// Use AT_DISPATCH_ALL_TYPES_AND along with ScalarType::Bool +// +// 4. Should complex be supported? The answer is almost always no, +// unless you are working on "generic" code that should work on +// all dtypes. +// +// Parameters: +// ----------- +// +// 1. The NAME argument is a "tag" that is used to trace and then +// conditionally compile fragments of the case statements such +// that the kernel functions are specialized only for the dtypes +// that are needed. The NAME parameter *must* be a build time +// const char* (can't be std::string, etc...) +// +// Please ensure that the NAME is unique for every implementation +// or you run the risk of over-including code for the kernel +// functions. There is no risk of missing out on any code, so +// it's mostly a risk of a Type-2 error, and not a Type-1 error. +// +// Switch-like syntax: +// ------------------- +// There is also a switch-case like syntax which is useful if a kernel +// needs to be specialized for particular scalar types +// +// AT_DISPATCH_SWITCH(self.scalar_type(), "op_name", +// AT_DISPATCH_CASE_INTEGRAL_TYPES([&] { +// op_integral(iter); +// }) +// AT_DISPATCH_CASE_FLOATING_TYPES([&] { +// op_floating(iter); +// }) +// AT_DISPATCH_CASE(kBool, [&] { +// op_bool(iter); +// }) +// ); +// +// For each AT_DISPATCH_FOO macro, there is a corresponding +// AT_DISPATCH_CASE_FOO macro which can be used inside of an +// AT_DISPATCH_SWITCH block. + +// NB: the the_type variable is not used, but we have kept it for +// backwards compatibility. It's probably not used by anyone though; +// but we're just being safe (and it doesn't hurt.) Note we must +// use it to shut up warnings about unused store. + +#define AT_DISPATCH_SWITCH(TYPE, NAME, ...) \ + [&] { \ + const auto& the_type = TYPE; \ + constexpr const char* at_dispatch_name = NAME; \ + /* don't use TYPE again in case it is an expensive or side-effect op */ \ + at::ScalarType _st = ::detail::scalar_type(the_type); \ + RECORD_KERNEL_FUNCTION_DTYPE(at_dispatch_name, _st); \ + switch (_st) { \ + __VA_ARGS__ \ + default: \ + AT_ERROR( \ + '"', \ + at_dispatch_name, \ + "\" not implemented for '", \ + toString(_st), \ + "'"); \ + } \ + }() + +#define AT_DISPATCH_CASE_FLOATING_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) + +#define AT_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)) + +#define AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(...) \ + AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) + +#define AT_DISPATCH_FLOATING_TYPES_AND_HALF(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(__VA_ARGS__)) + +#define AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, ...) \ + AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) + +#define AT_DISPATCH_FLOATING_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_FLOATING_TYPES_AND(SCALARTYPE, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_FLOATING_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) \ + AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) + +#define AT_DISPATCH_FLOATING_TYPES_AND2( \ + SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_FLOATING_TYPES_AND2( \ + SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_COMPLEX_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::ComplexDouble, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::ComplexFloat, __VA_ARGS__) + +#define AT_DISPATCH_COMPLEX_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__)) + +#define AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, ...) \ + AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) + +#define AT_DISPATCH_COMPLEX_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, NAME, AT_DISPATCH_CASE_COMPLEX_TYPES_AND(SCALARTYPE, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(...) \ + AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) + +#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, NAME, AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__)) + +#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1(SCALARTYPE, ...) \ + AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) + +#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1( \ + SCALARTYPE, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND1( \ + SCALARTYPE, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( \ + SCALARTYPE1, SCALARTYPE2, ...) \ + AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) + +#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( \ + SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND2( \ + SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \ + AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) + +#define AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_FLOATING_AND_COMPLEX_TYPES_AND3( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_INTEGRAL_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__) + +#define AT_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__)) + +#define AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, ...) \ + AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) + +#define AT_DISPATCH_INTEGRAL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_INTEGRAL_TYPES_AND(SCALARTYPE, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_ALL_TYPES(...) \ + AT_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__) + +#define AT_DISPATCH_ALL_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__)) + +#define AT_DISPATCH_CASE_QINT_TYPES(...) \ + AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) \ + AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__) \ + AT_DISPATCH_CASE_QINT(at::kQInt32, at::qint32, __VA_ARGS__) + +#define AT_DISPATCH_QINT_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_TYPES(__VA_ARGS__)) + +#define AT_DISPATCH_CASE_QINT_BYTE_TYPES(...) \ + AT_DISPATCH_CASE_QINT(at::kQInt8, at::qint8, __VA_ARGS__) \ + AT_DISPATCH_CASE_QINT(at::kQUInt8, at::quint8, __VA_ARGS__) + +#define AT_DISPATCH_QINT_BYTE_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_QINT_BYTE_TYPES(__VA_ARGS__)) + +#define AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(...) \ + AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \ + at::kQInt8, at::qint8, CHAR_BIT, SCHAR_MIN, SCHAR_MAX, __VA_ARGS__) \ + AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \ + at::kQUInt8, at::quint8, CHAR_BIT, 0, UCHAR_MAX, __VA_ARGS__) \ + AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \ + at::kQInt32, \ + at::qint32, \ + CHAR_BIT * sizeof(int), \ + INT_MIN, \ + INT_MAX, \ + __VA_ARGS__) \ + AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \ + at::kQUInt4x2, at::quint4x2, 4, 0, 15, __VA_ARGS__) \ + AT_QINT_SUB_BYTE_PRIVATE_CASE_TYPE( \ + at::kQUInt2x4, at::quint2x4, 2, 0, 3, __VA_ARGS__) + +#define AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, NAME, AT_DISPATCH_CASE_QINT_AND_SUB_BYTE_TYPES(__VA_ARGS__)) + +#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(...) \ + AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE_COMPLEX_TYPES(__VA_ARGS__) + +#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__)) + +#define AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, ...) \ + AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) + +#define AT_DISPATCH_ALL_TYPES_AND(SCALARTYPE, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, NAME, AT_DISPATCH_CASE_ALL_TYPES_AND(SCALARTYPE, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, ...) \ + AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE, __VA_ARGS__) + +#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND(SCALARTYPE, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, ...) \ + AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) + +#define AT_DISPATCH_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_ALL_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( \ + SCALARTYPE1, SCALARTYPE2, ...) \ + AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) + +#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( \ + SCALARTYPE1, SCALARTYPE2, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND2( \ + SCALARTYPE1, SCALARTYPE2, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_ALL_TYPES_AND3( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \ + AT_DISPATCH_CASE_ALL_TYPES(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) + +#define AT_DISPATCH_ALL_TYPES_AND3( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_ALL_TYPES_AND3( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, ...) \ + AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) + +#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND3( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, __VA_ARGS__)) + +#define AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, ...) \ + AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX(__VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE1, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE2, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE3, __VA_ARGS__) \ + AT_DISPATCH_CASE(SCALARTYPE4, __VA_ARGS__) + +#define AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \ + SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, SCALARTYPE4, __VA_ARGS__)) + +#define AT_DISPATCH_INDEX_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_PRIVATE_CASE_TYPE_USING_HINT( \ + at::ScalarType::Int, index_t, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE_USING_HINT( \ + at::ScalarType::Long, index_t, __VA_ARGS__)) + +// ---------------------------------------------------------------------------- +// DEPRECATED MACROS, DON'T USE THESE +// ---------------------------------------------------------------------------- + +#define AT_DISPATCH_ALL_TYPES_AND_HALF(TYPE, NAME, ...) \ + detail::deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF(); \ + AT_DISPATCH_SWITCH( \ + TYPE, \ + NAME, \ + AT_DISPATCH_CASE_ALL_TYPES_AND(at::ScalarType::Half, __VA_ARGS__)) diff --git a/voice_bridge/torch/include/ATen/DynamicLibrary.h b/voice_bridge/torch/include/ATen/DynamicLibrary.h new file mode 100644 index 0000000000000000000000000000000000000000..523a21985f225eb72ac23c562e990fc105bd1ed4 --- /dev/null +++ b/voice_bridge/torch/include/ATen/DynamicLibrary.h @@ -0,0 +1,34 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +class DynamicLibraryError : public Error { + using Error::Error; +}; + +} // namespace c10 + +namespace at { + +struct DynamicLibrary { + AT_DISALLOW_COPY_AND_ASSIGN(DynamicLibrary); + + TORCH_API DynamicLibrary( + const char* name, + const char* alt_name = nullptr, + bool leak_handle = false); + + TORCH_API void* sym(const char* name); + + TORCH_API ~DynamicLibrary(); + + private: + bool leak_handle; + void* handle = nullptr; +}; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/EmptyTensor.h b/voice_bridge/torch/include/ATen/EmptyTensor.h new file mode 100644 index 0000000000000000000000000000000000000000..969eeb6dc5eed21d02e3621748cfc98b3531dcd8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/EmptyTensor.h @@ -0,0 +1,148 @@ +#pragma once +#include + +namespace at { +namespace detail { + +template +inline void check_size_nonnegative(ArrayRefType size) { + for (auto x : size) { + TORCH_CHECK( + x >= 0, + "Trying to create tensor with negative dimension ", + x, + ": ", + size); + } +} + +TORCH_API size_t computeStorageNbytesContiguous( + IntArrayRef sizes, + size_t itemsize, + size_t storage_offset = 0); +TORCH_API size_t computeStorageNbytes( + IntArrayRef sizes, + IntArrayRef strides, + size_t itemsize, + size_t storage_offset = 0); +TORCH_API SymInt computeStorageNbytes( + SymIntArrayRef sizes, + SymIntArrayRef strides, + SymInt itemsize, + SymInt storage_offset = 0); + +TORCH_API TensorBase empty_generic( + IntArrayRef size, + c10::Allocator* allocator, + c10::DispatchKeySet ks, + ScalarType scalar_type, + c10::optional memory_format_opt); + +TORCH_API TensorBase empty_strided_generic( + IntArrayRef size, + IntArrayRef stride, + c10::Allocator* allocator, + c10::DispatchKeySet ks, + ScalarType scalar_type); + +TORCH_API TensorBase empty_strided_symint_generic( + SymIntArrayRef size, + SymIntArrayRef stride, + c10::Allocator* allocator, + c10::DispatchKeySet ks, + ScalarType scalar_type); + +TORCH_API TensorBase empty_cpu( + IntArrayRef size, + ScalarType dtype, + bool pin_memory = false, + c10::optional memory_format_opt = c10::nullopt); + +TORCH_API TensorBase empty_cpu( + IntArrayRef size, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt, + c10::optional memory_format_opt); + +TORCH_API TensorBase empty_cpu(IntArrayRef size, const TensorOptions& options); + +TORCH_API TensorBase empty_strided_cpu( + IntArrayRef size, + IntArrayRef stride, + ScalarType dtype, + bool pin_memory = false); + +TORCH_API TensorBase empty_strided_cpu( + IntArrayRef size, + IntArrayRef stride, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt); + +TORCH_API TensorBase empty_strided_cpu( + IntArrayRef size, + IntArrayRef stride, + const TensorOptions& options); + +TORCH_API TensorBase empty_meta( + IntArrayRef size, + ScalarType dtype, + c10::optional memory_format_opt = c10::nullopt); + +TORCH_API TensorBase empty_meta( + IntArrayRef size, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt, + c10::optional memory_format_opt); + +TORCH_API TensorBase empty_symint_meta( + SymIntArrayRef size, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt, + c10::optional memory_format_opt); + +TORCH_API TensorBase empty_meta(IntArrayRef size, const TensorOptions& options); + +TORCH_API TensorBase +empty_strided_meta(IntArrayRef size, IntArrayRef stride, ScalarType dtype); + +TORCH_API TensorBase empty_strided_meta( + IntArrayRef size, + IntArrayRef stride, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt); + +TORCH_API TensorBase empty_strided_meta( + IntArrayRef size, + IntArrayRef stride, + const TensorOptions& options); + +TORCH_API TensorBase empty_strided_symint_meta( + SymIntArrayRef size, + SymIntArrayRef stride, + ScalarType dtype); + +TORCH_API TensorBase empty_strided_symint_meta( + SymIntArrayRef size, + SymIntArrayRef stride, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt); + +TORCH_API TensorBase empty_strided_symint_meta( + SymIntArrayRef size, + SymIntArrayRef stride, + const TensorOptions& options); + +} // namespace detail +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ExpandBase.h b/voice_bridge/torch/include/ATen/ExpandBase.h new file mode 100644 index 0000000000000000000000000000000000000000..8db6be6a643c8cb60cab8487478f9a2f0c817d8b --- /dev/null +++ b/voice_bridge/torch/include/ATen/ExpandBase.h @@ -0,0 +1,30 @@ +#include + +// Broadcasting utilities for working with TensorBase +namespace at { +namespace internal { +TORCH_API TensorBase expand_slow_path(const TensorBase& self, IntArrayRef size); +} // namespace internal + +inline c10::MaybeOwned expand_size( + const TensorBase& self, + IntArrayRef size) { + if (size.equals(self.sizes())) { + return c10::MaybeOwned::borrowed(self); + } + return c10::MaybeOwned::owned( + at::internal::expand_slow_path(self, size)); +} +c10::MaybeOwned expand_size(TensorBase&& self, IntArrayRef size) = + delete; + +inline c10::MaybeOwned expand_inplace( + const TensorBase& tensor, + const TensorBase& to_expand) { + return expand_size(to_expand, tensor.sizes()); +} +c10::MaybeOwned expand_inplace( + const TensorBase& tensor, + TensorBase&& to_expand) = delete; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ExpandUtils.h b/voice_bridge/torch/include/ATen/ExpandUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..a54853b259e730029be5e79622ab93aa711db2c1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ExpandUtils.h @@ -0,0 +1,512 @@ +#pragma once + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace at { + +TORCH_API std::vector infer_size(IntArrayRef a, IntArrayRef b); +TORCH_API DimVector infer_size_dimvector(IntArrayRef a, IntArrayRef b); + +// Named type instead of a pair/tuple so that we can be sure to +// construct the vectors in place and get NRVO. +template +struct InferExpandGeometryResult { + Container sizes; + Container strides; + explicit InferExpandGeometryResult(size_t ndim) + : sizes(ndim), strides(ndim) {} + explicit InferExpandGeometryResult(IntArrayRef sizes_, size_t ndim) + : sizes(sizes_.begin(), sizes_.end()), strides(ndim) {} +}; + +TORCH_API std::tuple, std::vector> +inferExpandGeometry( + IntArrayRef tensor_sizes, + IntArrayRef tensor_strides, + IntArrayRef sizes); + +TORCH_API InferExpandGeometryResult inferExpandGeometry_dimvector( + IntArrayRef tensor_sizes, + IntArrayRef tensor_strides, + IntArrayRef sizes); + +TORCH_API std::vector infer_dense_strides( + IntArrayRef tensor_sizes, + IntArrayRef tensor_strides); + +// True if input shapes are expandable +// NOTE: infer_size did a similar check, please keep them sync if change is +// needed +inline bool are_expandable(IntArrayRef shape1, IntArrayRef shape2) { + size_t ndim1 = shape1.size(); + size_t ndim2 = shape2.size(); + size_t ndim = ndim1 < ndim2 ? ndim1 : ndim2; + + for (int64_t i = ndim - 1; i >= 0; --i) { + if (shape1[--ndim1] == shape2[--ndim2] || shape1[ndim1] == 1 || + shape2[ndim2] == 1) { + continue; + } + return false; + } + return true; +} + +// avoid copy-construction of Tensor by using a reference_wrapper. +inline void check_defined( + std::initializer_list> tensors, + const char* api_name) { + for (auto& t : tensors) { + if (!t.get().defined()) { + AT_ERROR(api_name, "(...) called with an undefined Tensor"); + } + } +} + +// NOTE [ ExpandUtils Borrowing ] +// +// Functions in ExpandUtils return `c10::MaybeOwned` because +// expansion may not actually be needed, in which case we can improve +// efficiency by returning +// `c10::MaybeOwned::borrowed(to_expand)`. However, this means +// that you need to be careful: the returned `c10::MaybeOwned` +// must not outlive the original `Tensor` object that `to_expand` +// referred to! The deleted rvalue reference overloads of these +// functions help with this by preventing trivial use of a temporary +// resulting from a function call, but it is still possible to make a +// mistake. + +inline c10::MaybeOwned expand_inplace( + const Tensor& tensor, + const Tensor& to_expand) { + if (tensor.sizes().equals(to_expand.sizes())) { + return c10::MaybeOwned::borrowed(to_expand); + } + return c10::MaybeOwned::owned(to_expand.expand(tensor.sizes())); +} + +inline c10::MaybeOwned expand_inplace( + const Tensor& tensor, + Tensor&& to_expand) = delete; + +inline c10::MaybeOwned expand_inplace( + const Tensor& tensor, + const Tensor& to_expand, + const char* api_name) { + check_defined({tensor, to_expand}, api_name); + return expand_inplace(tensor, to_expand); +} + +inline c10::MaybeOwned expand_inplace( + const Tensor& tensor, + Tensor&& to_expand, + const char* api_name) = delete; + +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + const Tensor& to_expand1, + const Tensor& to_expand2) { + if (tensor.sizes().equals(to_expand1.sizes()) && + tensor.sizes().equals((to_expand2.sizes()))) { + return std::make_tuple( + c10::MaybeOwned::borrowed(to_expand1), + c10::MaybeOwned::borrowed(to_expand2)); + } + + return std::make_tuple( + c10::MaybeOwned::owned(to_expand1.expand(tensor.sizes())), + c10::MaybeOwned::owned(to_expand2.expand(tensor.sizes()))); +} + +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + Tensor&& to_expand1, + const Tensor& to_expand2) = delete; +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + const Tensor& to_expand1, + Tensor&& to_expand2) = delete; +inline std::tuple, c10::MaybeOwned> +expand_inplace(const Tensor& tensor, Tensor&& to_expand1, Tensor&& to_expand2) = + delete; + +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + const Tensor& to_expand1, + const Tensor& to_expand2, + const char* api_name) { + check_defined({tensor, to_expand1, to_expand2}, api_name); + return expand_inplace(tensor, to_expand1, to_expand2); +} + +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + Tensor&& to_expand1, + const Tensor& to_expand2, + const char* api_name) = delete; +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + const Tensor& to_expand1, + Tensor&& to_expand2, + const char* api_name) = delete; +inline std::tuple, c10::MaybeOwned> +expand_inplace( + const Tensor& tensor, + Tensor&& to_expand1, + Tensor&& to_expand2, + const char* api_name) = delete; + +// See NOTE [ ExpandUtils Borrowing ] above for `MaybeOwned` explanation. +inline std::tuple, c10::MaybeOwned> +expand_outplace(const Tensor& to_expand1, const Tensor& to_expand2) { + if (to_expand1.sizes().equals(to_expand2.sizes())) { + return std::make_tuple( + c10::MaybeOwned::borrowed(to_expand1), + c10::MaybeOwned::borrowed(to_expand2)); + } + + auto expanded_size = + infer_size_dimvector(to_expand1.sizes(), to_expand2.sizes()); + return std::make_tuple( + c10::MaybeOwned::owned(to_expand1.expand(expanded_size)), + c10::MaybeOwned::owned(to_expand2.expand(expanded_size))); +} + +inline std::tuple, c10::MaybeOwned> +expand_outplace(Tensor&& to_expand1, const Tensor& to_expand2) = delete; +inline std::tuple, c10::MaybeOwned> +expand_outplace(const Tensor& to_expand1, Tensor&& to_expand2) = delete; +inline std::tuple, c10::MaybeOwned> +expand_outplace(Tensor&& to_expand1, Tensor&& to_expand2) = delete; + +inline std::tuple, c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + const Tensor& to_expand2, + const char* api_name) { + check_defined({to_expand1, to_expand2}, api_name); + return expand_outplace(to_expand1, to_expand2); +} + +inline std::tuple, c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + const Tensor& to_expand2, + const char* api_name) = delete; +inline std::tuple, c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + Tensor&& to_expand2, + const char* api_name) = delete; +inline std::tuple, c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + Tensor&& to_expand2, + const char* api_name) = delete; + +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + const Tensor& to_expand2, + const Tensor& to_expand3) { + if (to_expand1.sizes().equals(to_expand2.sizes()) && + to_expand1.sizes().equals(to_expand3.sizes())) { + return std::make_tuple( + c10::MaybeOwned::borrowed(to_expand1), + c10::MaybeOwned::borrowed(to_expand2), + c10::MaybeOwned::borrowed(to_expand3)); + } + + auto expanded_size12 = + infer_size_dimvector(to_expand1.sizes(), to_expand2.sizes()); + auto expanded_size = + infer_size_dimvector(expanded_size12, to_expand3.sizes()); + return std::make_tuple( + c10::MaybeOwned::owned(to_expand1.expand(expanded_size)), + c10::MaybeOwned::owned(to_expand2.expand(expanded_size)), + c10::MaybeOwned::owned(to_expand3.expand(expanded_size))); +} + +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + const Tensor& to_expand2, + const Tensor& to_expand3) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + Tensor&& to_expand2, + const Tensor& to_expand3) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + Tensor&& to_expand2, + const Tensor& to_expand3) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + const Tensor& to_expand2, + Tensor&& to_expand3) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + const Tensor& to_expand2, + Tensor&& to_expand3) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + Tensor&& to_expand2, + Tensor&& to_expand3) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace(Tensor&& to_expand1, Tensor&& to_expand2, Tensor&& to_expand3) = + delete; + +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + const Tensor& to_expand2, + const Tensor& to_expand3, + const char* api_name) { + check_defined({to_expand1, to_expand2, to_expand3}, api_name); + return expand_outplace(to_expand1, to_expand2, to_expand3); +} + +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + const Tensor& to_expand2, + const Tensor& to_expand3, + const char* api_name) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + Tensor&& to_expand2, + const Tensor& to_expand3, + const char* api_name) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + Tensor&& to_expand2, + const Tensor& to_expand3, + const char* api_name) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + const Tensor& to_expand2, + Tensor&& to_expand3, + const char* api_name) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + const Tensor& to_expand2, + Tensor&& to_expand3, + const char* api_name) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + const Tensor& to_expand1, + Tensor&& to_expand2, + Tensor&& to_expand3, + const char* api_name) = delete; +inline std::tuple< + c10::MaybeOwned, + c10::MaybeOwned, + c10::MaybeOwned> +expand_outplace( + Tensor&& to_expand1, + Tensor&& to_expand2, + Tensor&& to_expand3, + const char* api_name) = delete; + +inline c10::MaybeOwned expand_size( + const Tensor& to_expand, + IntArrayRef sizes) { + if (to_expand.sizes().equals(sizes)) { + return c10::MaybeOwned::borrowed(to_expand); + } + + return c10::MaybeOwned::owned(to_expand.expand(sizes)); +} + +inline c10::MaybeOwned expand_size( + Tensor&& to_expand, + IntArrayRef sizes) = delete; + +inline c10::MaybeOwned expand_size( + const Tensor& to_expand, + IntArrayRef sizes, + const char* api_name) { + check_defined({to_expand}, api_name); + return expand_size(to_expand, sizes); +} + +inline c10::MaybeOwned expand_size( + Tensor&& to_expand, + IntArrayRef sizes, + const char* api_name) = delete; + +inline std::vector expand_outplace(TensorList to_expand) { + // expands a list of Tensors; ignores undefined (null) tensors + bool first = true; + DimVector sizes; + for (const auto i : c10::irange(to_expand.size())) { + if (!to_expand[i].defined()) { + continue; + } else if (first) { + sizes = to_expand[i].sizes(); + first = false; + } else { + sizes = infer_size_dimvector(sizes, to_expand[i].sizes()); + } + } + + std::vector result(to_expand.size()); + for (const auto i : c10::irange(to_expand.size())) { + if (!to_expand[i].defined()) { + continue; + } else if (to_expand[i].sizes().equals(sizes)) { + result[i] = to_expand[i]; + } else { + result[i] = to_expand[i].expand(sizes); + } + } + return result; +} + +static inline Tensor sum_to( + Tensor tensor, + const c10::SymIntArrayRef shape, + bool always_return_non_view = false) { + if (shape.size() == 0) { + return tensor.sum(); + } + + auto sizes = tensor.sym_sizes(); + c10::SmallVector reduce_dims; + const int64_t leading_dims = sizes.size() - shape.size(); + for (const auto i : c10::irange(leading_dims)) { + reduce_dims.push_back(i); + } + for (int64_t i = leading_dims; i < static_cast(sizes.size()); ++i) { + if (shape[i - leading_dims] == 1 && sizes[i] != 1) { + reduce_dims.push_back(i); + } + } + + if (!reduce_dims.empty()) { + tensor = tensor.sum(reduce_dims, /*keepdim=*/true); + } + + if (always_return_non_view) { + // This is only actually used by the functionalization pass. + // We want to be able to guarantee that this function doesn't return a view + // of the input. + return leading_dims > 0 ? at::view_copy_symint(tensor, shape) + : tensor.clone(); + } else { + return leading_dims > 0 ? tensor.view_symint(shape) : tensor; + } +} + +// Sums `tensor` repeatedly to produce a tensor of shape `shape`. +// Precondition: is_expandable_to(shape, tensor.sizes()) must be true +static inline Tensor sum_to( + Tensor tensor, + const IntArrayRef shape, + bool always_return_non_view = false) { + auto sym_size = c10::SymIntArrayRef( + reinterpret_cast(shape.data()), shape.size()); + return sum_to(tensor, sym_size, always_return_non_view); +} + +static inline bool is_expandable_to( + SymIntArrayRef shape, + c10::SymIntArrayRef desired) { + size_t ndim = shape.size(); + size_t target_dim = desired.size(); + if (ndim > target_dim) { + return false; + } + for (const auto i : c10::irange(ndim)) { + auto size = shape[ndim - i - 1]; + auto target = desired[target_dim - i - 1]; + if (size != target && size != 1) { + return false; + } + } + return true; +} + +static inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) { + auto sym_shape = c10::SymIntArrayRef( + reinterpret_cast(shape.data()), shape.size()); + auto sym_desired = c10::SymIntArrayRef( + reinterpret_cast(desired.data()), desired.size()); + return is_expandable_to(sym_shape, sym_desired); +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/Formatting.h b/voice_bridge/torch/include/ATen/Formatting.h new file mode 100644 index 0000000000000000000000000000000000000000..392e2a27b0130c7ba55621d6ac1d6fd4e989db02 --- /dev/null +++ b/voice_bridge/torch/include/ATen/Formatting.h @@ -0,0 +1 @@ +#include diff --git a/voice_bridge/torch/include/ATen/FuncTorchTLS.h b/voice_bridge/torch/include/ATen/FuncTorchTLS.h new file mode 100644 index 0000000000000000000000000000000000000000..9242a1d177a75c625ac0adc981a0bfdb6d4ff153 --- /dev/null +++ b/voice_bridge/torch/include/ATen/FuncTorchTLS.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include + +namespace at { +namespace functorch { + +// NOTE [functorch TLS in pytorch/pytorch] +// +// functorch lives out-of-tree. However, it has some TLS that needs to be +// propagated. The solution for that is we store a pointer to the TLS +// inside pytorch/pytorch and extend FuncTorchTLSBase inside functorch to +// include whatever functorch needs. +// +// We need to store a pointer due to the indirection: +// inside functorch, we will create a subclass of FunctorchTLSBase called +// FuncTorchTLSImpl that actually contains metadata, like the DynamicLayerStack. +// FuncTorchTLSBase doesn't have any metadata because it hasn't been defined +// yet. +// +// Here in pytorch/pytorch, we will pass around FuncTorchTLSBase*, but inside +// functorch, we will assign a FuncTorchTLSImpl* to the FunctorchTLSBase*. +// We can't directly pass around FunctorchTLSBase (without a pointer) because +// FuncTorchTLSImpl does not fit inside a FuncTorchTLSBase by virtue of having +// more elements. +struct TORCH_API FuncTorchTLSBase { + virtual ~FuncTorchTLSBase() = default; + virtual std::unique_ptr deepcopy() const = 0; + + // functorch doesn't always work with autograd.Function. + // This is a hook to get into functorch -- functorch will determine + // if it should raise an error message + virtual int64_t checkSupportsAutogradFunction() const = 0; + virtual void checkSupportsInplaceRequiresGrad() const = 0; + virtual void checkSupportsRetainGrad() const = 0; +}; + +// returns deepcopy of the functorch tls +TORCH_API std::unique_ptr getCopyOfFuncTorchTLS(); + +// sets the functorch tls. always does a deep copy. +TORCH_API void setFuncTorchTLS( + const std::shared_ptr& state); + +// get a mutable reference to the functorch tls +TORCH_API std::unique_ptr& functorchTLSAccessor(); + +} // namespace functorch +} // namespace at diff --git a/voice_bridge/torch/include/ATen/FunctionalStorageImpl.h b/voice_bridge/torch/include/ATen/FunctionalStorageImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..6caeac2737fd0c3394eb8e74a102171c9924e91b --- /dev/null +++ b/voice_bridge/torch/include/ATen/FunctionalStorageImpl.h @@ -0,0 +1,123 @@ +#pragma once + +#include + +namespace at { +namespace functionalization { + +// See Note [Functionalization Pass In Core] + +// ViewMeta is a class used by the functionalization pass to navigate between +// a base tensor and a view tensor. +// For example, if I call `b = a.view1(...)` +// the functionalization pass will generate and store a ViewMeta on b that looks +// like: +// +// ViewMeta( +// [](const Tensor& base, int64_t mutated_view_idx) { +// return base.view1(...); +// }, +// [](const at::Tensor& base, const at::Tensor& mutated_view, +// int64_t mutated_view_idx) -> at::Tensor { +// return at::functionalization::impl::view1_inverse(base, mutated_view, +// ...); +// } +// +// The forward_fn lambda describes how to replay view1 on a tensor. +// +// The reverse_fn lambda describes how, given a tensor that is already a view, +// how to get the corresponding base tensor. See Note [Functionalization Pass: +// View Inverses] for details. +struct ViewMeta { + ViewMeta( + std::function forward, + std::function reverse, + int64_t out_idx = 0) + : forward_fn(forward), reverse_fn(reverse), out_index(out_idx) {} + + std::function forward_fn; + std::function reverse_fn; + // See Note [out_idx in ViewMeta] + int64_t out_index; + + // Returns a copy of the current ViewMeta, if out_idx matches the current + // out_index. Otherwise, returns a new ViewMeta with the same forward/reverse + // functions, but a new out index. + ViewMeta to_out_idx(int64_t out_idx); +}; + +// Alias represents the state shared by (potentially multiple) views of the same +// tensor. For example, in the following code: +// +// b = a.view1(...) +// c = b.view2(...) +// b.add_(1) +// --> alias.add_update(b, {view1_meta}) +// +// The call to add_(1) will result in a call to alias.add_update(b, +// {view1_meta}), queueing up the mutation from b onto the alias. Later, suppose +// c is used in an expression (e.g. you try to print c, or pass it to an +// operator). Doing so will involve "syncing" c. First we apply any pending +// updates to the alias, and then we regenerate c by replaying its views off of +// the updated alias. E.g: +// +// print(str(c)) +// --> c.sync_() +// --> alias.apply_updates() // after this, the alias will be updated to +// reflect the mutation to b +class Alias { + public: + struct Update { + const at::Tensor new_val; + const std::vector view_metas; + }; + explicit Alias(const at::Tensor& base); + const at::Tensor& base() const; + size_t generation() const { + return generation_; + } + void add_update( + const at::Tensor& updated_val, + const std::vector& metas); + bool apply_updates(); + + private: + // NB: base_ should always point to a tensor BELOW the current + // functionalization layer. This is mainly to avoid reference cycles. e.g. + // given `b = a.view(...)` Both a.storage_ and b.storage_ are a + // FunctionStorageImpl containing an Alias, with contains a Tensor `base_`. In + // this case (where a and b are FunctionalTensorWrapper's), base_ should point + // not to a, but to a's unwrapped value, a.value_` See Note + // [Functionalization: Alias Removal] for a diagram that shows this visually. + at::Tensor base_; + std::vector updates_; + // generation_ gets incremented every time a mutation is queued onto the + // alias. It is used to determine if a given tensor is "up to date", or if it + // needs to be regenerated from the alias. + size_t generation_ = 0; +}; + +// FunctionalStorageImpl is a subclass of StorageImpl used by the +// functionalization pass. It has no underlying data (similar to meta storage). +// It also knows how to reflect mutations to tensors in the absence of a valid +// data pointer. It does this by separately storing an Alias object, which knows +// how to reflect mutations that may have happened to views of the original +// tensor. +struct TORCH_API FunctionalStorageImpl : public c10::StorageImpl { + explicit FunctionalStorageImpl(const Tensor& value); + + void add_update( + const Tensor& updated_val, + const std::vector& view_metas); + bool apply_updates(); + const Tensor& base(); + size_t generation() const; + + ~FunctionalStorageImpl() override = default; + + private: + at::functionalization::Alias alias_; +}; + +} // namespace functionalization +} // namespace at diff --git a/voice_bridge/torch/include/ATen/FunctionalTensorWrapper.h b/voice_bridge/torch/include/ATen/FunctionalTensorWrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..b98f31e26d6eb33cfe11ac59822d2fd6904a6bdf --- /dev/null +++ b/voice_bridge/torch/include/ATen/FunctionalTensorWrapper.h @@ -0,0 +1,307 @@ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace at { + +// Note [Functionalization Pass In Core] +// The Functionalization pass is used to remove aliasing from a pytorch program. +// +// This is useful for backends that don't support aliasing, like XLA and Vulkan. +// It's also necessary in order to remove mutation from a program, which is +// needed in Functorch. +// +// Consider this program: +// a = torch.ones(...) +// b = a.view(...) +// b.add_(1) +// +// In this program, b is meant to alias with a due to the use of view(). At the +// end of the program, both a and b are full of 2's. However, backends that +// don't support aliasing aren't able to correctly implement the view() +// operator. Instead, they can opt into the Functionalization pass, which will +// sit between the user and the backend, and provide the necessary aliasing +// logic. +// +// The functionalization pass will turn the above program into a slightly +// different program that has the same semantics, transparently to the user, +// that backends like XLA/Vulkan are able to implement a = torch.ones(...) b = +// a.view_copy(...) # view() replaced with view_copy(). Backends like +// XLA/Vulkan can implement this! b.add_(1) a.add_(1) # Our functionalization +// pass machinery knows that a and b are aliased - it applies b's mutation to a +// too. +// +// So, how does the functionalization pass keep track of which tensors are +// aliased? The pass works by wrapping EVERY tensor in the program inside of a +// FunctionalTensorWrapper, which knows about its alias'd tensors. +// +// See Note [Functionalization: Alias Removal] for details on the aliasing +// machinery. See Note [Functionalization: Mutation Removal] for details on +// mutation removal. +struct TORCH_API FunctionalTensorWrapper : public c10::TensorImpl { + explicit FunctionalTensorWrapper(const Tensor& value); + // Additional constructor to create a FunctionalTensorWrapper directly from an + // underlying tensor that was created from a view. For example, the code b = + // a.view1() will generate a constructor call to FunctionalTensorWrapper(b, a, + // view1_meta) + explicit FunctionalTensorWrapper( + const Tensor& view_value, + const FunctionalTensorWrapper* base, + functionalization::ViewMeta meta); + + // Get the underlying, actual tensor, that doesn't know anything about + // functionalization. + const Tensor& value() const { + return value_; + }; + // The concept of "level" is only ever important to functorch; it's exposed + // here as more of a hook for functorch to use. + int64_t level() const { + return level_; + }; + void set_level(int64_t level) { + level_ = level; + } + + // Sync's the underlying tensor with its alias, if it's out of date. This + // involves two steps: 1) Apply any pending updates/mutations to the alias 2) + // Replay the views (if any) to regenerate the current tensor off of the + // updated alias. + void sync_(); + // Performs step (1) of the sync. This is its own public API because it's + // needed by view_inplace ops like transpose_. See Note [Functionalization + // Pass - Inplace View Ops] + void regenerate_from_base(); + // Performs step (2) of the sync. This is its own public API because it's + // needed by functorch. functorch wants to make sure that all input tensors to + // a functionalized program have been properly synced so it can properly + // propagate mutations to inputs. It can't just call sync_(), because the + // FunctionalTensorWrapper will look like it has no aliases and sync_ will be + // a noop. We use the reference count on storage_ to determine if the wrapper + // is aliased, and by the time functorch is ready to propagate updates to + // inputs, any intermediate views of the input created by the program will + // have been deallocated. This function also returns whether or not the base + // actually had any updates to apply. + bool apply_updates(); + // Takes the current state of value_ and snapshots it, sending it as a pending + // update to the alias. + void commit_update(); + // When any tensor is mutated, the tensor increments its alias's "generation". + // Separately, each tensor maintains its own "generation" counter, which is + // used to determine if it's up-to-date with its alias. The act of syncing a + // tensor will set a tensor's generation equal to its alias's generation. + bool is_up_to_date() const; + // Every FunctionalTensorWrapper contains a vector objects + // describing the series of view ops that ran to generate the current tensor + // from the base tensor. This method is used by inplace-view ops like + // transpose_. It appends a ViewMeta to the existing stack, and refreshes the + // tensor by replaying the views off of the alias. + void mutate_view_meta(at::functionalization::ViewMeta meta); + + // The functionalization pass can be used to remove mutations. + // It does so by replacing any mutation op with it's corresponding + // out-of-place op, followed by a call to replace_(). e.g: + // + // a.add_(1) + // + // will turn into: + // + // tmp = a.add(1) + // a.replace_(tmp) + // + // replace_() swaps out the wrapped tensor, value_, with tmp. + void replace_(const Tensor& other); + + // See Note[resize_() in functionalization pass] + void maybe_replace_storage(const Tensor& other); + + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override; + + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override; + + ~FunctionalTensorWrapper() override = default; + + // FunctionalTensorWrapper overrides all custom size/stride function, + // so that if the inner tensor has a custom implementation + // we make sure to call that implementation. + at::IntArrayRef sizes_custom() const override; + at::IntArrayRef strides_custom() const override; + int64_t dim_custom() const override; + int64_t numel_custom() const override; + bool is_contiguous_custom(at::MemoryFormat memory_format) const override; + c10::SymIntArrayRef sym_sizes_custom() const override; + c10::SymIntArrayRef sym_strides_custom() const override; + + private: + const char* tensorimpl_type_name() const override; + void set_constructor_metadata(); + functionalization::FunctionalStorageImpl* functional_storage_impl() const; + + // This is used to re-implement shallow_copy_and_detach for + // FunctionalTensorWrapper. The implementation is identical, but we just need + // to return a subclass instead of a plain TensorImpl. + // TODO: maybe it's possible to arrange for that to happen automatically + // without an override here? + template + c10::intrusive_ptr shallow_copy_and_detach_core( + VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const; + + // Note that value is not taken by reference: internally, the wrapper will + // change the value tensor that it points to over time. + Tensor value_; + int64_t level_; + + size_t generation_ = 0; + std::vector view_metas_; +}; + +// Utility functions for the functionalization pass. + +namespace functionalization { +namespace impl { + +TORCH_API inline FunctionalTensorWrapper* unsafeGetFunctionalWrapper( + const Tensor& tensor) { + auto functional_impl = + static_cast(tensor.unsafeGetTensorImpl()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(functional_impl != nullptr); + return functional_impl; +} + +TORCH_API bool isFunctionalTensor(const at::Tensor& tensor); +TORCH_API bool isFunctionalTensor(const c10::optional& t); +TORCH_API bool isFunctionalTensor( + const c10::List>& t_list); +TORCH_API bool isFunctionalTensor(ITensorListRef list); + +TORCH_API Tensor to_functional_tensor(const Tensor& tensor); +TORCH_API c10::optional to_functional_tensor( + const c10::optional& tensor); +TORCH_API c10::List> to_functional_tensor( + const c10::List>& t_list); +TORCH_API std::vector to_functional_tensor(ITensorListRef t_list); + +TORCH_API Tensor +from_functional_tensor(const Tensor& tensor, bool assert_functional = true); +TORCH_API c10::optional from_functional_tensor( + const c10::optional& t, + bool assert_functional = true); +TORCH_API c10::List> from_functional_tensor( + const c10::List>& t_list); +TORCH_API std::vector from_functional_tensor(ITensorListRef t_list); + +TORCH_API void sync(const at::Tensor& t); +TORCH_API void sync(const c10::optional& t); +TORCH_API void sync(const c10::List> t_list); +TORCH_API void sync(ITensorListRef t_list); + +TORCH_API void replace_(const Tensor& functional_tensor, const Tensor& other); +TORCH_API void replace_( + const ITensorListRef functional_tensor, + ITensorListRef other); + +TORCH_API void commit_update(const Tensor& functional_tensor); +TORCH_API void commit_update(ITensorListRef functional_tensor); + +Tensor create_functional_tensor_with_view_meta( + const Tensor& view_to_wrap, + const Tensor& base, + functionalization::ViewMeta meta, + int64_t out_idx = 0); +std::vector create_functional_tensor_with_view_meta( + ITensorListRef view_to_wrap, + const Tensor& base, + functionalization::ViewMeta meta); + +void mutate_view_meta(const Tensor& self, functionalization::ViewMeta meta); + +void set_sizes_strides_offset(const Tensor& out, const Tensor& meta_out); +void set_sizes_strides_offset( + const std::vector& outs, + const std::vector& meta_outs); + +// ~~~~~ TLS used in functionalization ~~~~~ + +TORCH_API bool getFunctionalizationReapplyViewsTLS(); +TORCH_API void setFunctionalizationReapplyViewsTLS(bool reapply_views); + +class TORCH_API FunctionalizationReapplyViewsGuard { + public: + FunctionalizationReapplyViewsGuard(bool reapply_views) { + prev_ = getFunctionalizationReapplyViewsTLS(); + setFunctionalizationReapplyViewsTLS(reapply_views); + } + + ~FunctionalizationReapplyViewsGuard() { + setFunctionalizationReapplyViewsTLS(prev_); + } + + FunctionalizationReapplyViewsGuard( + const FunctionalizationReapplyViewsGuard&) = delete; + FunctionalizationReapplyViewsGuard operator=( + const FunctionalizationReapplyViewsGuard&) = delete; + FunctionalizationReapplyViewsGuard(FunctionalizationReapplyViewsGuard&&) = + delete; + FunctionalizationReapplyViewsGuard operator=( + FunctionalizationReapplyViewsGuard&&) = delete; + + private: + bool prev_; +}; + +} // namespace impl + +// Helper function to call an out-of-place composite aten kernel that may use +// mutations / views internally, and functionalize them. +TORCH_API void functionalize_op_helper( + const c10::OperatorHandle& op, + torch::jit::Stack* stack); + +template +struct _functionalize_aten_op final {}; + +template +struct _functionalize_aten_op final { + static ReturnType call( + typename c10::maybe_keep_symint::type... args) { + using FuncType = ReturnType( + typename c10::maybe_keep_symint::type...); + auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow( + (const char*)Op::name, (const char*)Op::overload_name) + .typed(); + + return c10::impl::BoxedKernelWrapper::call( + c10::BoxedKernel::makeFromFunction(), + op, + // BoxedKernelWrapper knows to ignore this keyset argument, + // because functionalize_op_helper doesn't take in a DispatchKeySet + c10::DispatchKeySet(), + args...); + } +}; + +template +using functionalize_aten_op = + _functionalize_aten_op; + +template +using functionalize_aten_op_symint = + _functionalize_aten_op; + +} // namespace functionalization +} // namespace at diff --git a/voice_bridge/torch/include/ATen/Functions.h b/voice_bridge/torch/include/ATen/Functions.h new file mode 100644 index 0000000000000000000000000000000000000000..2d56017a4a109a654c7508d3453213dbaaf29c86 --- /dev/null +++ b/voice_bridge/torch/include/ATen/Functions.h @@ -0,0 +1,1355 @@ +#pragma once + +// @generated by torchgen/gen.py from Functions.h + +#ifdef TORCH_ASSERT_NO_OPERATORS +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if your change would be better placed in \ + another file, or if a more specific header might achieve the same goal. \ + See NOTE: [Tensor vs. TensorBase] +#endif + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from and \ + see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +// NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS] +// +// In ATen, certain generated headers files include the definitions of +// every single operator in PyTorch. Unfortunately this means every +// time an operator signature is updated or changed in +// native_functions.yaml, you (and every other PyTorch developer) need +// to recompile every source file that includes any of these headers. +// +// To break up these header dependencies, and improve incremental +// build times for all PyTorch developers. These headers are split +// into per-operator headers in the `ATen/ops` folder. This limits +// incremental builds to only changes to methods of `Tensor`, or files +// that use the specific operator being changed. With `at::sum` as an +// example, you should include +// +// // instead of ATen/Functions.h +// // instead of ATen/NativeFunctions.h +// // instead of ATen/Operators.h +// // instead of ATen/CPUFunctions.h +// +// However, even if you're careful to use this in your own code. +// `Functions.h` might be included indirectly through another header +// without you realising. To avoid this, you can add +// +// #define TORCH_ASSERT_ONLY_METHOD_OPERATORS +// +// to the top of your source file. This way any time the non-specific +// headers are included, the compiler will error out. +// +// Also, be aware that `ops` are not available in all build +// configurations (namely fb-internal) so you must guard these +// includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g. +// +// #ifndef AT_PER_OPERATOR_HEADERS +// #include +// #else +// #include +// #endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { + + + +// Special C++ only overloads for std()-like functions (See gh-40287) +// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef +// So, for example std(0) would select the std(unbiased=False) overload +TORCH_API inline Tensor var(const Tensor& self, int dim) { + return at::var(self, IntArrayRef{dim}); +} +TORCH_API inline std::tuple var_mean(const Tensor& self, int dim) { + return at::var_mean(self, IntArrayRef{dim}); +} +TORCH_API inline Tensor std(const Tensor& self, int dim) { + return at::std(self, IntArrayRef{dim}); +} +TORCH_API inline std::tuple std_mean(const Tensor& self, int dim) { + return at::std_mean(self, IntArrayRef{dim}); +} + +inline int64_t numel(const Tensor& tensor) { + return tensor.numel(); +} + +inline int64_t size(const Tensor& tensor, int64_t dim) { + return tensor.size(dim); +} + +inline int64_t stride(const Tensor& tensor, int64_t dim) { + return tensor.stride(dim); +} + +inline bool is_complex(const Tensor& tensor) { + return tensor.is_complex(); +} + +inline bool is_floating_point(const Tensor& tensor) { + return tensor.is_floating_point(); +} + +inline bool is_signed(const Tensor& tensor) { + return tensor.is_signed(); +} + +inline bool is_inference(const Tensor& tensor) { + return tensor.is_inference(); +} + +inline bool _is_zerotensor(const Tensor& tensor) { + return tensor._is_zerotensor(); +} + +inline bool is_conj(const Tensor& tensor) { + return tensor.is_conj(); +} + +inline Tensor conj(const Tensor& tensor) { + return tensor.conj(); +} + +inline bool is_neg(const Tensor& tensor) { + return tensor.is_neg(); +} + +} diff --git a/voice_bridge/torch/include/ATen/Generator.h b/voice_bridge/torch/include/ATen/Generator.h new file mode 100644 index 0000000000000000000000000000000000000000..48c25e141dcb8c0264ca9435352889c7a250f74d --- /dev/null +++ b/voice_bridge/torch/include/ATen/Generator.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/voice_bridge/torch/include/ATen/InferSize.h b/voice_bridge/torch/include/ATen/InferSize.h new file mode 100644 index 0000000000000000000000000000000000000000..594b87373a209ab2f6542c8619673de18e65057a --- /dev/null +++ b/voice_bridge/torch/include/ATen/InferSize.h @@ -0,0 +1,87 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +// Infers the size of a dim with size -1, if it exists. Also checks that new +// shape is compatible with the number of elements. +// +// templated to handle std::vector and DimVector use cases, see +// below +// +template +inline void infer_size_impl( + InputArrayRef shape, + NumelType numel, + ResultVec& res) { + NumelType newsize = 1; + // N.B. this is an index, not a sym dim! + auto infer_dim = c10::optional(); + for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) { + if (shape[dim] == -1) { + if (infer_dim) { + throw std::runtime_error("only one dimension can be inferred"); + } + infer_dim = dim; + } else if (shape[dim] >= 0) { + newsize *= shape[dim]; + } else { + AT_ERROR("invalid shape dimension ", shape[dim]); + } + } + + if (numel == newsize || (infer_dim && newsize > 0 && numel % newsize == 0)) { + if (infer_dim) { + // We have a degree of freedom here to select the dimension size; follow + // NumPy semantics and just bail. However, a nice error message is needed + // because users often use `view` as a way to flatten & unflatten + // dimensions and will otherwise be confused why + // empty_tensor.view( 0, 0) + // works yet + // empty_tensor.view(-1, 0) + // doesn't. + TORCH_CHECK( + newsize != 0, + "cannot reshape tensor of 0 elements into shape ", + shape, + " because the unspecified dimension size -1 can be any " + "value and is ambiguous"); + res[*infer_dim] = numel / newsize; + } + return; + } + + std::ostringstream ss; + ss << "shape '" << shape << "' is invalid for input of size " << numel; + throw std::runtime_error(ss.str()); +} + +inline std::vector infer_size(IntArrayRef shape, int64_t numel) { + auto res = shape.vec(); + infer_size_impl(shape, numel, res); + return res; +} + +inline at::DimVector infer_size_dv(IntArrayRef shape, int64_t numel) { + auto res = at::DimVector(shape); + infer_size_impl(shape, numel, res); + return res; +} + +inline at::SymDimVector infer_size_dv( + c10::SymIntArrayRef shape, + c10::SymInt numel) { + auto res = at::SymDimVector(shape); + infer_size_impl( + shape, numel, res); + return res; +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/InitialTensorOptions.h b/voice_bridge/torch/include/ATen/InitialTensorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..d6914552eb0df70b18077c6ef10a55149790b5d6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/InitialTensorOptions.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace at { + +// Represents the initial TensorOptions, before the "defaults" are ever changed. +// This is designed to be used in library code, where the explicit devices, +// dtypes, etc. are known. NOTE: this is not a stable API. +inline TensorOptions initialTensorOptions() { + return TensorOptions(kCPU).dtype(kFloat).layout(kStrided).requires_grad( + false); +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/Layout.h b/voice_bridge/torch/include/ATen/Layout.h new file mode 100644 index 0000000000000000000000000000000000000000..ea71e2b469bcf02365c78ebfba1b1d0362b6e531 --- /dev/null +++ b/voice_bridge/torch/include/ATen/Layout.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/voice_bridge/torch/include/ATen/LinalgBackend.h b/voice_bridge/torch/include/ATen/LinalgBackend.h new file mode 100644 index 0000000000000000000000000000000000000000..4617afd0b72c7ce286e61a4d1abe2cc89743024c --- /dev/null +++ b/voice_bridge/torch/include/ATen/LinalgBackend.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +#include +#include + +namespace at { + +enum class LinalgBackend : int8_t { Default, Cusolver, Magma }; + +inline std::string LinalgBackendToString(at::LinalgBackend backend) { + switch (backend) { + case LinalgBackend::Default: + return "at::LinalgBackend::Default"; + case LinalgBackend::Cusolver: + return "at::LinalgBackend::Cusolver"; + case LinalgBackend::Magma: + return "at::LinalgBackend::Magma"; + default: + TORCH_CHECK(false, "Unknown linalg backend"); + } +} + +inline std::ostream& operator<<( + std::ostream& stream, + at::LinalgBackend backend) { + return stream << LinalgBackendToString(backend); +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/MapAllocator.h b/voice_bridge/torch/include/ATen/MapAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..7f602935cba1f8881aeec388fb6970569ecfd8ee --- /dev/null +++ b/voice_bridge/torch/include/ATen/MapAllocator.h @@ -0,0 +1,133 @@ +#pragma once + +#include + +namespace at { + +enum MappedAllocatorModes { + ALLOCATOR_MAPPED_SHARED = 1, + ALLOCATOR_MAPPED_SHAREDMEM = 2, + ALLOCATOR_MAPPED_EXCLUSIVE = 4, + ALLOCATOR_MAPPED_NOCREATE = 8, + ALLOCATOR_MAPPED_KEEPFD = 16, + ALLOCATOR_MAPPED_FROMFD = 32, + ALLOCATOR_MAPPED_UNLINK = 64 +}; + +// Sentinel value/type to help distinguish the file descriptor constructor from +// the non-file descriptor constructor +enum WithFd { WITH_FD }; + +TORCH_API std::string NewProcessWideShmHandle(); + +class TORCH_API MapAllocator { + public: + MapAllocator(std::string filename, int flags, size_t size); + MapAllocator(WithFd, std::string filename, int fd, int flags, size_t size); + MapAllocator(const MapAllocator&) = delete; + MapAllocator& operator=(const MapAllocator&) = delete; + MapAllocator(MapAllocator&&) = delete; + MapAllocator& operator=(MapAllocator&&) = delete; + + const char* filename() const { + return filename_.c_str(); + } + int fd() const { +#ifdef _WIN32 + TORCH_CHECK(false, "MapAllocator::fd() is unsupported on Windows"); +#else + return fd_; +#endif + } + ptrdiff_t size() const { + return size_; + } + // Return a pointer to the actual data for this allocator + // (in the case of the refcounted allocator, this is offset + // from the base pointer.) + virtual void* data() const { + return base_ptr_; + } + + static MapAllocator* fromDataPtr(const at::DataPtr&); + static at::DataPtr makeDataPtr( + std::string filename, + int flags, + size_t size, + size_t* actual_size_out); + static at::DataPtr makeDataPtr( + WithFd, + const char* filename, + int fd, + int flags, + size_t size, + size_t* actual_size_out); + + // Closes the data. Helps us avoid destructor shenanigans + virtual void close(); + + // This is very dangerous. You have to redefine this destructor for each + // subclass + virtual ~MapAllocator(); + + protected: + bool closed_ = false; + std::string filename_; + int flags_ = 0; + ptrdiff_t size_; /* mapped size */ +#ifdef _WIN32 + void* handle_; + void* event_; + std::string eventname_; +#else + int fd_ = -1; +#endif + void* base_ptr_ = nullptr; +}; + +// Base-from-member idiom +struct TORCH_API RefcountedMapAllocatorArgCheck { + RefcountedMapAllocatorArgCheck(int flags); +}; + +class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck, + public MapAllocator { + public: + RefcountedMapAllocator(const char* filename, int flags, size_t size); + RefcountedMapAllocator( + WithFd, + const char* filename, + int fd, + int flags, + size_t size); + + static RefcountedMapAllocator* fromDataPtr(const at::DataPtr&); + static at::DataPtr makeDataPtr( + const char* filename, + int flags, + size_t size, + size_t* actual_size_out); + static at::DataPtr makeDataPtr( + WithFd, + const char* filename, + int fd, + int flags, + size_t size, + size_t* actual_size_out); + + void* data() const override; + + void incref(); + int decref(); + void close() override; + + virtual ~RefcountedMapAllocator() { + close(); + } + + protected: + void checkFlags(); + void initializeAlloc(); +}; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/MatrixRef.h b/voice_bridge/torch/include/ATen/MatrixRef.h new file mode 100644 index 0000000000000000000000000000000000000000..ba693ab7d5809b030ea5e55e8124d9a97ec5751e --- /dev/null +++ b/voice_bridge/torch/include/ATen/MatrixRef.h @@ -0,0 +1,109 @@ +#pragma once +#include +#include + +#include + +namespace at { +/// MatrixRef - Like an ArrayRef, but with an extra recorded strides so that +/// we can easily view it as a multidimensional array. +/// +/// Like ArrayRef, this class does not own the underlying data, it is expected +/// to be used in situations where the data resides in some other buffer. +/// +/// This is intended to be trivially copyable, so it should be passed by +/// value. +/// +/// For now, 2D only (so the copies are actually cheap, without having +/// to write a SmallVector class) and contiguous only (so we can +/// return non-strided ArrayRef on index). +/// +/// P.S. dimension 0 indexes rows, dimension 1 indexes columns +template +class MatrixRef { + public: + typedef size_t size_type; + + private: + /// Underlying ArrayRef + ArrayRef arr; + + /// Stride of dim 0 (outer dimension) + size_type stride0; + + // Stride of dim 1 is assumed to be 1 + + public: + /// Construct an empty Matrixref. + /*implicit*/ MatrixRef() : arr(nullptr), stride0(0) {} + + /// Construct an MatrixRef from an ArrayRef and outer stride. + /*implicit*/ MatrixRef(ArrayRef arr, size_type stride0) + : arr(arr), stride0(stride0) { + TORCH_CHECK( + arr.size() % stride0 == 0, + "MatrixRef: ArrayRef size ", + arr.size(), + " not divisible by stride ", + stride0) + } + + /// @} + /// @name Simple Operations + /// @{ + + /// empty - Check if the matrix is empty. + bool empty() const { + return arr.empty(); + } + + const T* data() const { + return arr.data(); + } + + /// size - Get size a dimension + size_t size(size_t dim) const { + if (dim == 0) { + return arr.size() / stride0; + } else if (dim == 1) { + return stride0; + } else { + TORCH_CHECK( + 0, "MatrixRef: out of bounds dimension ", dim, "; expected 0 or 1"); + } + } + + size_t numel() const { + return arr.size(); + } + + /// equals - Check for element-wise equality. + bool equals(MatrixRef RHS) const { + return stride0 == RHS.stride0 && arr.equals(RHS.arr); + } + + /// @} + /// @name Operator Overloads + /// @{ + ArrayRef operator[](size_t Index) const { + return arr.slice(Index * stride0, stride0); + } + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + typename std::enable_if::value, MatrixRef>::type& + operator=(U&& Temporary) = delete; + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + typename std::enable_if::value, MatrixRef>::type& + operator=(std::initializer_list) = delete; +}; + +} // end namespace at diff --git a/voice_bridge/torch/include/ATen/MemoryOverlap.h b/voice_bridge/torch/include/ATen/MemoryOverlap.h new file mode 100644 index 0000000000000000000000000000000000000000..afc06ae11e5116f1e331fde0f62277bf7ab219c7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/MemoryOverlap.h @@ -0,0 +1,42 @@ +#pragma once + +#include + +namespace c10 { +struct TensorImpl; +} + +namespace at { +class TensorBase; + +// MemOverlap: Whether or not there is memory overlap +// +// No: Absolutely no memory overlap +// Yes: Absolutely yes memory overlap +// TooHard: There might be memory overlap, but it was too expensive to compute. +// +// NB: Please update the python test for these if you renumber them. +enum class MemOverlap { No, Yes, TooHard }; + +enum class MemOverlapStatus { Full, Partial, No, TooHard }; + +TORCH_API MemOverlap has_internal_overlap(const TensorBase& t); +TORCH_API MemOverlap has_internal_overlap(c10::TensorImpl* t); + +TORCH_API void assert_no_internal_overlap(const TensorBase& t); +TORCH_API void assert_no_internal_overlap(c10::TensorImpl* t); + +TORCH_API MemOverlapStatus +get_overlap_status(const TensorBase& a, const TensorBase& b); +TORCH_API MemOverlapStatus +get_overlap_status(c10::TensorImpl* a, c10::TensorImpl* b); + +TORCH_API void assert_no_partial_overlap( + const TensorBase& a, + const TensorBase& b); +void assert_no_partial_overlap(c10::TensorImpl* a, c10::TensorImpl* b); + +TORCH_API void assert_no_overlap(const TensorBase& a, const TensorBase& b); +TORCH_API void assert_no_overlap(c10::TensorImpl* a, c10::TensorImpl* b); + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/MetaFunctions.h b/voice_bridge/torch/include/ATen/MetaFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..4e1d24af30086427cb4e7ebebadc4830e5c7ce6e --- /dev/null +++ b/voice_bridge/torch/include/ATen/MetaFunctions.h @@ -0,0 +1,29 @@ +#include + +// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch] +// Code introduced to avoid cyclic dependency in static dispatch is no longer +// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place, +// to Operators.cpp for supporting multiple backends with multiple kernels. +// +// Note [Avoiding Include Cycles In Static Dispatch] +// In order to avoid #include cycles in the static dispatch build, we've carefully split out +// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h. +// +// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h. +// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods +// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all +// directly inlined into TensorBody.h. +// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API, +// which include functions that have defaultable optional arguments. +// That requires knowing the full Tensor class definition. +// +// We break the cycle by doing the following: +// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h +// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl., +// - CPUFunctions_inl.h includes everything else +// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class, +// and then it includes CPUFunctions_inl.h. +// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly. +// - This also means that static dispatch build, CPUFunctions.h only needs to +// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h. +#include diff --git a/voice_bridge/torch/include/ATen/MetaFunctions_inl.h b/voice_bridge/torch/include/ATen/MetaFunctions_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..4a3e60ddda1d04463ebabda091ea52e2a0805269 --- /dev/null +++ b/voice_bridge/torch/include/ATen/MetaFunctions_inl.h @@ -0,0 +1,321 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunctions_inl.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + . \ + See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + diff --git a/voice_bridge/torch/include/ATen/MethodOperators.h b/voice_bridge/torch/include/ATen/MethodOperators.h new file mode 100644 index 0000000000000000000000000000000000000000..7328d3b66881b9e14f521e67eb261f1830300347 --- /dev/null +++ b/voice_bridge/torch/include/ATen/MethodOperators.h @@ -0,0 +1,435 @@ +#pragma once + +// @generated by torchgen/gen.py from MethodOperators.h + +#ifdef TORCH_ASSERT_NO_OPERATORS +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if your change would be better placed in \ + another file, or if a more specific header might achieve the same goal. \ + See NOTE: [Tensor vs. TensorBase] +#endif + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace _ops { + +} // namespace _ops +} // namespace at diff --git a/voice_bridge/torch/include/ATen/NamedTensor.h b/voice_bridge/torch/include/ATen/NamedTensor.h new file mode 100644 index 0000000000000000000000000000000000000000..a7606b0a668a43800b89755af1371551909b23d5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/NamedTensor.h @@ -0,0 +1 @@ +#include diff --git a/voice_bridge/torch/include/ATen/NamedTensorUtils.h b/voice_bridge/torch/include/ATen/NamedTensorUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..c9ff27c2d1b214a31de5920e47caf37b67f7c6a6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/NamedTensorUtils.h @@ -0,0 +1,211 @@ +#pragma once +#include +#include + +#include +#include +#include + +namespace at { + +using NameVector = SmallVector; + +inline bool has_names(ITensorListRef tensors) { + return std::any_of(tensors.begin(), tensors.end(), [](const Tensor& t) { + return t.has_names(); + }); +} + +// Converts dim to an positional index. Errors if `dim` cannot be used to +// refer to any dimension of tensor. +TORCH_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim); +TORCH_API std::vector dimnames_to_positions( + const Tensor& tensor, + DimnameList dims); + +// Unifies two DimnameList to produce a third. This is useful for implementing +// the named inference rule for binary broadcasting operations like add. +// +// There are three main constraints: +// 1) Check matching: Names must match positionally from the right. +// 2) Check misaligned: If a name `n` is in `names`, then it must appear at +// the same index from the right in other. +// 3) The output names are obtained by unifying the names individually from the +// right. +TORCH_API std::vector unify_from_right( + DimnameList names, + DimnameList other, + const char* action = "broadcast"); + +[[noreturn]] inline void reportNYIDimnameOverload(const char* op_name) { + TORCH_CHECK( + false, + op_name, + ": You passed a dimname (string) to this op in place of a dimension " + "index but it does not yet support this behavior. Please pass a dimension " + "index to work around this."); +} + +// [NOTE] Writing name inference rules +// +// Operators that support named tensors are either composed of operations that +// support named tensors or implement some name inference rule. An op that +// implements its own name inference rule generally looks like the following: +// +// Tensor op(...) { +// perform_shape_checks(...); +// # (1) +// auto maybe_outnames = compute_outnames(...); +// auto result = [&]() { +// NoNamesGuard guard; +// return op_impl(...); +// }(); +// # (2) +// propagate_names_if_nonempty(result, maybe_outnames); +// +// Each op has (1) a compute outnames step and (2) a propagate names step. +// +// compute_outnames is responsible for checking that input names match and +// determining what the output names should be. It returns either: +// - {} (if the inputs tensors are all unnamed) +// - non-empty outnames. +// +// propagate_names_if_nonempty propagates the outnames if they exist to the +// result tensors. +// +// The {} case is an optimization; if the user does not use named tensors they +// pay no perf cost for it. + +namespace namedinference { + +const Tensor& propagate_names_if_present_and_nonempty( + const Tensor& result, + c10::optional maybe_names, + bool validate_names = false); +// Propagates `names` to `result` if `names` is not empty. +// `names` can be empty; see [NOTE] Writing name inference rules +// If `names` is not empty, `names.size()` should equal `result.dim()`. +// When in doubt, use this overload instead of the others. +TORCH_API const Tensor& propagate_names_if_nonempty( + const Tensor& result, + DimnameList maybe_names, + bool validate_names = false); + +// Propagates `names` to `result`. Only use this if we are certain that there +// are names to propagate (that names is not empty). +TORCH_API const Tensor& propagate_names( + const Tensor& result, + DimnameList names, + bool validate_names = false); + +// Propagates all names from src to result. +TORCH_API void propagate_names(const Tensor& result, const Tensor& src); + +// Propagates all names except for those at the excluded_idxs. +TORCH_API void propagate_names_except( + const Tensor& result, + const Tensor& src, + IntArrayRef excluded_idxs); + +// Used for reduction ops that have a `keepdim` arg. +TORCH_API void propagate_names_for_reduction( + const Tensor& result, + const Tensor& src, + IntArrayRef excluded_idxs, + bool keepdim); + +TORCH_API void propagate_names_for_expand( + const Tensor& result, + const Tensor& self); + +TORCH_API std::vector compute_cat_outnames( + const MaterializedITensorListRef& tensors); + +TORCH_API std::vector compute_broadcast_outnames( + const Tensor& self, + const Tensor& other); + +TORCH_API std::vector broadcast_to_outnames( + const Tensor& tensor, + const Tensor& reference_tensor, + const char* op_name); + +TORCH_API std::vector compute_matmul_outnames( + const Tensor& self, + const Tensor& other); + +TORCH_API std::vector compute_cdist_outnames( + const Tensor& self, + const Tensor& other); + +TORCH_API std::vector compute_bmm_outnames( + const Tensor& result, + const Tensor& self, + const Tensor& other); + +TORCH_API std::vector compute_squeeze_outnames(const Tensor& tensor); + +std::vector compute_diagonal_outnames( + const Tensor& tensor, + int64_t dim1, + int64_t dim2); + +// TensorImpl* overloads for Legacy TH/THC code. Use these sparingly. + +TORCH_API TensorImpl* propagate_names_if_nonempty( + TensorImpl* result, + DimnameList maybe_names, + bool validate_names = false); + +TORCH_API TensorImpl* propagate_names( + TensorImpl* result, + DimnameList names, + bool validate_names = false); + +TORCH_API void propagate_names(TensorImpl* result, /*const */ TensorImpl* src); + +TORCH_API inline void propagate_names( + const TensorBase& result, + DimnameList names, + bool validate_names = false) { + propagate_names(result.unsafeGetTensorImpl(), names, validate_names); +} + +TORCH_API inline void propagate_names_if_nonempty( + const TensorBase& result, + DimnameList names, + bool validate_names = false) { + propagate_names_if_nonempty( + result.unsafeGetTensorImpl(), names, validate_names); +} + +TORCH_API inline void propagate_names( + const TensorBase& result, + const TensorBase& src) { + propagate_names(result.unsafeGetTensorImpl(), src.unsafeGetTensorImpl()); +} + +// result = m1 @ m2 + bias +TORCH_API std::vector propagate_names_for_addmm( + const Tensor& m1, + const Tensor& m2, + const Tensor& bias); + +TORCH_API std::vector propagate_names_for_addmv( + const Tensor& mat, + const Tensor& vec, + const Tensor& bias); + +TORCH_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2); + +TORCH_API std::vector compute_baddbmm_outnames( + const Tensor& result, + const Tensor& self, + const Tensor& other, + const Tensor& bias); + +TORCH_API bool are_names_equal(TensorImpl* self, TensorImpl* other); + +} // namespace namedinference + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/NativeFunctions.h b/voice_bridge/torch/include/ATen/NativeFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..ac95c41573dc770c739470ea1a4bac9d657d4229 --- /dev/null +++ b/voice_bridge/torch/include/ATen/NativeFunctions.h @@ -0,0 +1,1245 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunctions.h + +#ifdef TORCH_ASSERT_NO_OPERATORS +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if your change would be better placed in \ + another file, or if a more specific header might achieve the same goal. \ + See NOTE: [Tensor vs. TensorBase] +#endif + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + diff --git a/voice_bridge/torch/include/ATen/NativeMetaFunctions.h b/voice_bridge/torch/include/ATen/NativeMetaFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..ed440051b4e684c4d8aa3923b2768e25ea909d3a --- /dev/null +++ b/voice_bridge/torch/include/ATen/NativeMetaFunctions.h @@ -0,0 +1,1231 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunctions.h + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +namespace meta { + + + +} // namespace meta +} // namespace at diff --git a/voice_bridge/torch/include/ATen/NestedTensorImpl.h b/voice_bridge/torch/include/ATen/NestedTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..278df3c0d20382534283d2938d0a85e8f3df3246 --- /dev/null +++ b/voice_bridge/torch/include/ATen/NestedTensorImpl.h @@ -0,0 +1,267 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +struct TORCH_API NestedTensorImpl : public c10::TensorImpl { + explicit NestedTensorImpl( + Storage storage, + c10::DispatchKeySet key_set, + const caffe2::TypeMeta data_type, + at::Tensor nested_size_tensor, + at::Tensor nested_stride_tensor, + std::vector&& offsets); + + explicit NestedTensorImpl( + at::Tensor buffer, + at::Tensor nested_size_tensor, + at::Tensor nested_stride_tensor, + std::vector&& offsets); + // assume contiguous, `nested_stride_tensor` and `offsets` + // can be infered from `nested_size_tensor` + explicit NestedTensorImpl(at::Tensor buffer, at::Tensor nested_size_tensor); + + // This constructor is used creating view tensors from nested tensors + explicit NestedTensorImpl( + c10::TensorImpl::ImplType impl_type, + const at::Tensor& base_tensor, + at::Tensor nested_size_tensor, + at::Tensor nested_stride_tensor, + std::vector&& offsets); + + // TODO: don't expose private implementation details like this; in + // particular, resizing this tensor will mess up our dim() and + // callers cannot fix it. + const Tensor& get_nested_size_tensor() const { + return nested_size_tensor_; + } + // TODO: don't expose private implementation details like this + const Tensor& get_nested_stride_tensor() const { + return nested_stride_tensor_; + } + const std::vector& get_offsets() const { + return offsets_; + } + // Returns nullopt if the ith dimension is irregular. The ith dimension + // of a NestedTensor is regular if the unbound tensors match in + // size at the (i-1)th dimension. + c10::optional opt_size(int64_t d) const { + d = at::maybe_wrap_dim(d, dim(), false); + if (opt_sizes_[d] == -1) { + return c10::nullopt; + } + return opt_sizes_[d]; + } + + int64_t size(int64_t d) const { + c10::optional optional_size = this->opt_size(d); + TORCH_CHECK( + optional_size.has_value(), + "Given dimension ", + d, + " is irregular and does not have a size."); + return *optional_size; + } + /** + * Return a view of the nested tensor as a 1 dimensional contiguous tensor. + * + * The buffer tensor created by this function shares the same storage_impl as + * the original nested tensor, and therefore can be seen as a view. + * + * @return A newly constructed view tensor + */ + at::Tensor get_buffer() const { + auto buffer_key_set_ = generate_buffer_key_set(); + const auto buffer_size = get_buffer_size(); + auto buffer_tensor_impl = c10::make_intrusive( + c10::TensorImpl::VIEW, Storage(storage_), buffer_key_set_, data_type_); + buffer_tensor_impl->set_sizes_contiguous(c10::makeArrayRef(buffer_size)); + return Tensor(buffer_tensor_impl); + } + + int64_t get_buffer_size() const { + return storage_.nbytes() / data_type_.itemsize(); + } + + protected: + const char* tensorimpl_type_name() const override; + + // TODO: numel_custom and is_contiguous_custom can be profitably overridden + // with real implementations + int64_t numel_custom() const override; + c10::SymInt sym_numel_custom() const override; + bool is_contiguous_custom(MemoryFormat) const override; + int64_t size_custom(int64_t d) const override { + return this->size(d); + } + c10::SymInt sym_size_custom(int64_t d) const override { + return c10::SymInt{this->size(d)}; + } + IntArrayRef sizes_custom() const override; + c10::SymIntArrayRef sym_sizes_custom() const override; + IntArrayRef strides_custom() const override; + c10::SymIntArrayRef sym_strides_custom() const override; + + // this one is real + int64_t dim_custom() const override; + + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override; + + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override; + + void shallow_copy_from(const c10::intrusive_ptr& impl) override { + copy_tensor_metadata( + /*src_impl=*/impl.get(), + /*dest_impl=*/this, + /*version_counter=*/version_counter(), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); + } + + private: + // Must be called after any changes to our dim() to sync the state + // to TensorImpl. + void refresh_dim(); + + const at::Tensor nested_size_tensor_, nested_stride_tensor_; + // The starting positions of the underlying tensors in contiguous buffer + // i.e. the buffer memory offsets to get the underlying tensors + // The reason to keep this metadata is that, without strong enough constraint + // it cannot be derived from `nested_size_tensor_` + // and `nested_stride_tensor_`: + // 1. when buffer has blanks, e.g. [tensor1, blank, tensor2] + // this can happen e.g. after slicing a nested tensor + // 2. when multiple tensors share a same memory + // 3. when the nesting ordering is changed, e.g. [tensor1, tensor3, tensor2] + // Some strong enough constraints are: + // 1. every underlying tensor is contiguous in memory + // && nesting in ascending order + std::vector offsets_; + // NOTE: -1 here means the size is missing + // TODO: maybe we can remove this metadata since + // we can compute it from `nested_size_tensor_` + std::vector opt_sizes_; + + template + c10::intrusive_ptr shallow_copy_and_detach_core( + VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const; + + /** + * Generates a non-nested key_set from a nested tensor. + * + * For many nested tensor kernel implementations a buffer tensor + * is generated and redispatched to a non-nested kernel this function + * generates the key set used by that buffer tensor + * + * @return Appropriate key set for non-nested tensor + */ + inline c10::DispatchKeySet generate_buffer_key_set() const { + auto buffer_key_set = this->key_set(); + const bool Autograd = buffer_key_set.has_any(c10::autograd_dispatch_keyset); + // Remove nested tensor specific keys + buffer_key_set = buffer_key_set - + c10::DispatchKeySet{ + c10::DispatchKey::NestedTensor, + c10::DispatchKey::AutogradNestedTensor}; + + // Add dense tensor specific keys + buffer_key_set = + buffer_key_set | c10::DispatchKeySet{c10::DispatchKey::Dense}; + buffer_key_set = Autograd + ? c10::DispatchKeySet{c10::DispatchKey::Autograd} | buffer_key_set + : buffer_key_set; + + return buffer_key_set; + } +}; + +inline NestedTensorImpl* get_nested_tensor_impl_or_null( + const at::Tensor& tensor) { + if (tensor.is_nested()) { + return static_cast(tensor.unsafeGetTensorImpl()); + } + return nullptr; +} + +inline NestedTensorImpl* get_nested_tensor_impl(const at::Tensor& tensor) { + TORCH_CHECK( + tensor.is_nested(), "get_nested_tensor_impl requires a NestedTensor."); + return static_cast(tensor.unsafeGetTensorImpl()); +} + +inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt) { + int64_t ntensors = nt->size(0); + if (ntensors == 0) { + return true; + } + const Tensor &sizemat = nt->get_nested_size_tensor(), + &stridemat = nt->get_nested_stride_tensor(); + const auto& offsets = nt->get_offsets(); + int64_t orig_dim = sizemat.size(1); + // nesting scalars + if (orig_dim == 0) { + // each scalar must be contiguous + // if there is blanck memory between underlying scalars + for (int64_t i = 0; i < ntensors; i++) { + if (offsets[i] != i) { + return false; + } + } + } + // nesting tensors + else { + // if any underlying tensor is noncontiguous + const int64_t *sizemat_ptr = sizemat.data_ptr(), + *stridemat_ptr = stridemat.data_ptr(); + for (int64_t i = 0; i < ntensors; i++) { + if (stridemat_ptr[orig_dim - 1] != 1) { + return false; + } + int64_t product = sizemat_ptr[orig_dim - 1]; + for (int64_t j = orig_dim - 2; j >= 0; j--) { + if (stridemat_ptr[j] != product) { + return false; + } + product *= sizemat_ptr[j]; + } + sizemat_ptr += orig_dim; + stridemat_ptr += orig_dim; + } + // if there is blanck memory between underlying tensors + if (offsets[0] != 0) { + return false; + } + sizemat_ptr = sizemat.data_ptr(); + stridemat_ptr = stridemat.data_ptr(); + for (int64_t i = 1; i < ntensors; i++) { + if (offsets[i] != offsets[i - 1] + *sizemat_ptr * *stridemat_ptr) { + return false; + } + sizemat_ptr += orig_dim; + stridemat_ptr += orig_dim; + } + } + // everything is fine + return true; +} + +inline const at::Tensor& get_nested_size_tensor(const at::Tensor& tensor) { + return get_nested_tensor_impl(tensor)->get_nested_size_tensor(); +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/NumericUtils.h b/voice_bridge/torch/include/ATen/NumericUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..816cc4e8a44b08c00bca279756e16fa4af042645 --- /dev/null +++ b/voice_bridge/torch/include/ATen/NumericUtils.h @@ -0,0 +1,148 @@ +#pragma once + +#ifdef __HIPCC__ +#include +#endif + +#include +#include +#include +#include +#include +#include + +namespace at { + +// std::isnan isn't performant to use on integral types; it will +// (uselessly) convert to floating point and then do the test. +// This function is. + +template < + typename T, + typename std::enable_if::value, int>::type = 0> +inline C10_HOST_DEVICE bool _isnan(T /*val*/) { + return false; +} + +template < + typename T, + typename std::enable_if::value, int>::type = 0> +inline C10_HOST_DEVICE bool _isnan(T val) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return ::isnan(val); +#else + return std::isnan(val); +#endif +} + +template < + typename T, + typename std::enable_if::value, int>::type = 0> +inline bool _isnan(T val) { + return std::isnan(val.real()) || std::isnan(val.imag()); +} + +template < + typename T, + typename std::enable_if::value, int>::type = 0> +inline C10_HOST_DEVICE bool _isnan(T val) { + return at::_isnan(static_cast(val)); +} + +template < + typename T, + typename std::enable_if::value, int>::type = + 0> +inline C10_HOST_DEVICE bool _isnan(at::BFloat16 val) { + return at::_isnan(static_cast(val)); +} + +inline C10_HOST_DEVICE bool _isnan(at::BFloat16 val) { + return at::_isnan(static_cast(val)); +} + +// std::isinf isn't performant to use on integral types; it will +// (uselessly) convert to floating point and then do the test. +// This function is. + +template < + typename T, + typename std::enable_if::value, int>::type = 0> +inline C10_HOST_DEVICE bool _isinf(T /*val*/) { + return false; +} + +template < + typename T, + typename std::enable_if::value, int>::type = 0> +inline C10_HOST_DEVICE bool _isinf(T val) { +#if defined(__CUDACC__) || defined(__HIPCC__) + return ::isinf(val); +#else + return std::isinf(val); +#endif +} + +inline C10_HOST_DEVICE bool _isinf(at::Half val) { + return at::_isinf(static_cast(val)); +} + +inline C10_HOST_DEVICE bool _isinf(at::BFloat16 val) { + return at::_isinf(static_cast(val)); +} + +template +C10_HOST_DEVICE inline T exp(T x) { + static_assert( + !std::is_same::value, + "this template must be used with float or less precise type"); +#if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__) + // use __expf fast approximation for peak bandwidth + return __expf(x); +#else + return ::exp(x); +#endif +} + +template <> +C10_HOST_DEVICE inline double exp(double x) { + return ::exp(x); +} + +template +C10_HOST_DEVICE inline T log(T x) { + static_assert( + !std::is_same::value, + "this template must be used with float or less precise type"); +#if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__) + // use __logf fast approximation for peak bandwidth + return __logf(x); +#else + return ::log(x); +#endif +} + +template <> +C10_HOST_DEVICE inline double log(double x) { + return ::log(x); +} + +template +C10_HOST_DEVICE inline T tan(T x) { + static_assert( + !std::is_same::value, + "this template must be used with float or less precise type"); +#if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__) + // use __tanf fast approximation for peak bandwidth + return __tanf(x); +#else + return ::tan(x); +#endif +} + +template <> +C10_HOST_DEVICE inline double tan(double x) { + return ::tan(x); +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/OpMathType.h b/voice_bridge/torch/include/ATen/OpMathType.h new file mode 100644 index 0000000000000000000000000000000000000000..f08e420692569ba584fc216a3212c88f361d53a2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/OpMathType.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { + +// For FP16 or BFloat16 inputs, ops should perform internal math in FP32. +template +struct OpMathType { + using type = scalar_t; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType { + using type = float; +}; +template <> +struct OpMathType> { + using type = c10::complex; +}; + +template +using opmath_type = typename OpMathType::type; + +namespace { + +inline c10::ScalarType toOpMathType(const c10::ScalarType type) { + switch (type) { +#define DEFINE_CASE(scalar_t, TypeNum) \ + case ScalarType::TypeNum: \ + return CppTypeToScalarType>::value; + + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type); + } +} + +} // namespace + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/OpaqueTensorImpl.h b/voice_bridge/torch/include/ATen/OpaqueTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..e6c6413815bbdb9fa5b88285a4b12e8659eb121a --- /dev/null +++ b/voice_bridge/torch/include/ATen/OpaqueTensorImpl.h @@ -0,0 +1,186 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { + +// An "Opaque" TensorImpl -- there are no strides and (for now) +// even data() is not supported (thus no pointer arithmetic). + +// NOTE: We could allow data() in the future, but would have to ensure pointer +// arithmetic code is properly guarded. +// +// NOTE: This does not support resize_ (and other metadata-changing ops) because +// of `shallow_copy_and_detach`. We would need to define an interface to +// "shallow copy" in order to add support. + +template +struct TORCH_API OpaqueTensorImpl : public TensorImpl { + // public constructor for now... + OpaqueTensorImpl( + at::DispatchKeySet key_set, + const caffe2::TypeMeta data_type, + c10::Device device, + OpaqueHandle opaque_handle, + c10::IntArrayRef sizes, + bool is_non_overlapping_and_dense = true) + : TensorImpl(key_set, data_type, device), + opaque_handle_(std::move(opaque_handle)) { + set_storage_access_should_throw(); + set_custom_sizes_strides(SizesStridesPolicy::CustomStrides); + sizes_and_strides_.set_sizes(sizes); + refresh_numel(); + is_non_overlapping_and_dense_ = is_non_overlapping_and_dense; + } + + // Destructor doesn't call release_resources because it's + // unnecessary; don't forget to change that if needed! + void release_resources() override { + TensorImpl::release_resources(); + opaque_handle_ = {}; + } + + void set_size(int64_t dim, int64_t new_size) override { + AT_ERROR("opaque tensors do not have set_size"); + } + + void set_stride(int64_t dim, int64_t new_stride) override { + AT_ERROR("opaque tensors do not have set_stride"); + } + + void set_storage_offset(int64_t storage_offset) override { + AT_ERROR("opaque tensors do not have set_storage_offset"); + } + +#ifdef DEBUG + bool has_storage() const override { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + !storage_, "OpaqueTensorImpl assumes that storage_ is never set"); + return false; + } +#endif + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive>( + key_set(), + dtype(), + device(), + opaque_handle_, + sizes_and_strides_.sizes_arrayref()); + copy_tensor_metadata( + /*src_opaque_impl=*/this, + /*dest_opaque_impl=*/impl.get(), + /*version_counter=*/version_counter, + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + return impl; + } + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive>( + key_set(), + dtype(), + device(), + opaque_handle_, + sizes_and_strides_.sizes_arrayref()); + copy_tensor_metadata( + /*src_opaque_impl=*/this, + /*dest_opaque_impl=*/impl.get(), + /*version_counter=*/std::move(version_counter), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + return impl; + } + + /** + * Shallow-copies data from another TensorImpl into this TensorImpl. + * + * For why this function doesn't check this TensorImpl's + * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ]. + */ + void shallow_copy_from(const c10::intrusive_ptr& impl) override { + AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set())); + auto opaque_impl = + static_cast*>(impl.get()); + copy_tensor_metadata( + /*src_impl=*/opaque_impl, + /*dest_impl=*/this, + /*version_counter=*/version_counter(), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); + refresh_numel(); + } + + const OpaqueHandle& opaque_handle() const { + return opaque_handle_; + } + + OpaqueHandle& unsafe_opaque_handle() { + return opaque_handle_; + } + + protected: + /** + * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / + * storage_offset) from one TensorImpl to another TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE + * [ TensorImpl Shallow-Copying ]. + */ + static void copy_tensor_metadata( + const OpaqueTensorImpl* src_opaque_impl, + OpaqueTensorImpl* dest_opaque_impl, + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) { + TensorImpl::copy_tensor_metadata( + src_opaque_impl, + dest_opaque_impl, + version_counter, + allow_tensor_metadata_change); + + // OpaqueTensorImpl-specific fields. + dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_; + } + + static void copy_tensor_metadata( + const OpaqueTensorImpl* src_opaque_impl, + OpaqueTensorImpl* dest_opaque_impl, + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) { + TensorImpl::copy_tensor_metadata( + src_opaque_impl, + dest_opaque_impl, + std::move(version_counter), + allow_tensor_metadata_change); + + // OpaqueTensorImpl-specific fields. + dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_; + } + + private: + const char* tensorimpl_type_name() const override { + return "OpaqueTensorImpl"; + } + + OpaqueHandle opaque_handle_; +}; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/Operators.h b/voice_bridge/torch/include/ATen/Operators.h new file mode 100644 index 0000000000000000000000000000000000000000..4287bcb0936bc93f5e268af377ad870cc343bd06 --- /dev/null +++ b/voice_bridge/torch/include/ATen/Operators.h @@ -0,0 +1,1286 @@ +#pragma once + +// @generated by torchgen/gen.py from Operators.h + +#ifdef TORCH_ASSERT_NO_OPERATORS +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if your change would be better placed in \ + another file, or if a more specific header might achieve the same goal. \ + See NOTE: [Tensor vs. TensorBase] +#endif + +#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider including a specific operator from \ + and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Extension writers: do you write wrapper functions? Are you frustrated with +// resolving overloads of operators? Are you frustrated with dealing with +// pointer-to-methods and resolving overloads of pointer-to-methods?? Look no +// further, this is the utility for you. +// +// Given an operator schema: aten::op.overload(... +// +// Use ATEN_FN2(op, overload) to get a *function* version of the operator +// that is guaranteed to not be overloaded. This means that you can safely +// decltype(&ATEN_FN2(op, overload)) it. NB: the 2 means this macro takes 2 args. +// +// Given an operator schema without an overload name: aten::op(... +// +// Use ATEN_FN(op) to get an unambiguous *function* version of the operator. +// +// There is some interesting behavior for out= operations. +// ATEN_FN2(sin, out) gives a function that is *faithful* to the schema; +// that is, the order of arguments is exactly what it looks like in the schema. + +#define ATEN_FN2(op_name, overload) at::_ops::op_name##_##overload::call +#define ATEN_FN(op_name) at::_ops::op_name::call + +// Separately, ATEN_OP(op) and ATEN_OP2(op, overload) define a class containing compile-time +// metadata about a given aten operator. +// Notable data on the class includes: +// - ATEN_OP2(add, Tensor)::name // returns the string name: "add" +// - ATEN_OP2(add, Tensor)::overload_name // returns the string overload name: "Tensor" +// - ATEN_OP2(add, Tensor)::schema // returns the C++ schema type: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &) +// - ATEN_OP2(add, Tensor)::schema_str // returns the string jit type: "add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor" + +#define ATEN_OP2(op_name, overload) at::_ops::op_name##_##overload +#define ATEN_OP(op_name) at::_ops::op_name + +// WARNING: Please do not call any of the ops in the _ops namespace directly. +// Use the ATEN_FN macros. We do not guarantee stability of the naming +// scheme for the functions in at::_ops + +// See Note [The ATen Operators API] for details of the at::_ops namespace + +namespace at { +namespace _ops { + +} // namespace _ops +} // namespace at diff --git a/voice_bridge/torch/include/ATen/PTThreadPool.h b/voice_bridge/torch/include/ATen/PTThreadPool.h new file mode 100644 index 0000000000000000000000000000000000000000..1c910dfb97dce44748c054b457b400b13b1b9fda --- /dev/null +++ b/voice_bridge/torch/include/ATen/PTThreadPool.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include + +namespace at { + +class TORCH_API PTThreadPool : public c10::ThreadPool { + public: + explicit PTThreadPool(int pool_size, int numa_node_id = -1) + : c10::ThreadPool(pool_size, numa_node_id, []() { + c10::setThreadName("PTThreadPool"); + at::init_num_threads(); + }) {} +}; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/PadNd.h b/voice_bridge/torch/include/ATen/PadNd.h new file mode 100644 index 0000000000000000000000000000000000000000..2c0d67e9d5d3fdf24036ce7b811841690538879d --- /dev/null +++ b/voice_bridge/torch/include/ATen/PadNd.h @@ -0,0 +1,26 @@ +#pragma once + +namespace at { + +enum class padding_mode { + reflect, + replicate, + circular, + constant, +}; + +static inline c10::string_view padding_mode_string(padding_mode m) { + switch (m) { + case padding_mode::reflect: + return "reflect"; + case padding_mode::replicate: + return "replicate"; + case padding_mode::circular: + return "circular"; + case padding_mode::constant: + return "constant"; + } + TORCH_CHECK(false, "Invalid padding mode (", static_cast(m), ")"); +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/Parallel-inl.h b/voice_bridge/torch/include/ATen/Parallel-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..62f287fc33c42a549b63ed480ecd839675db11bf --- /dev/null +++ b/voice_bridge/torch/include/ATen/Parallel-inl.h @@ -0,0 +1,83 @@ +#pragma once + +#include +#include + +namespace at { + +template +inline void parallel_for( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const F& f) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(grain_size >= 0); + if (begin >= end) { + return; + } + +#ifdef INTRA_OP_PARALLEL + at::internal::lazy_init_num_threads(); + const auto numiter = end - begin; + const bool use_parallel = + (numiter > grain_size && numiter > 1 && !at::in_parallel_region() && + at::get_num_threads() > 1); + if (!use_parallel) { + internal::ThreadIdGuard tid_guard(0); + f(begin, end); + return; + } + + internal::invoke_parallel(begin, end, grain_size, f); +#else + internal::ThreadIdGuard tid_guard(0); + f(begin, end); +#endif +} + +template +inline scalar_t parallel_reduce( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const scalar_t ident, + const F& f, + const SF& sf) { + TORCH_CHECK(grain_size >= 0); + if (begin >= end) { + return ident; + } + +#ifdef INTRA_OP_PARALLEL + at::internal::lazy_init_num_threads(); + const auto max_threads = at::get_num_threads(); + const bool use_parallel = + ((end - begin) > grain_size && !at::in_parallel_region() && + max_threads > 1); + if (!use_parallel) { + internal::ThreadIdGuard tid_guard(0); + return f(begin, end, ident); + } + + c10::SmallVector results(max_threads, ident); + internal::invoke_parallel( + begin, + end, + grain_size, + [&](const int64_t my_begin, const int64_t my_end) { + const auto tid = at::get_thread_num(); + results[tid] = f(my_begin, my_end, ident); + }); + + scalar_t result = ident; + for (auto partial_result : results) { + result = sf(result, partial_result); + } + return result; +#else + internal::ThreadIdGuard tid_guard(0); + return f(begin, end, ident); +#endif +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/Parallel.h b/voice_bridge/torch/include/ATen/Parallel.h new file mode 100644 index 0000000000000000000000000000000000000000..4693997624e98d3c515fac6ee94d42966c45b703 --- /dev/null +++ b/voice_bridge/torch/include/ATen/Parallel.h @@ -0,0 +1,160 @@ +#pragma once +#include +#include +#include +#include + +namespace at { + +inline int64_t divup(int64_t x, int64_t y) { + return (x + y - 1) / y; +} + +// Called during new thread initialization +TORCH_API void init_num_threads(); + +// Sets the number of threads to be used in parallel region +TORCH_API void set_num_threads(int); + +// Returns the maximum number of threads that may be used in a parallel region +TORCH_API int get_num_threads(); + +// Returns the current thread number (starting from 0) +// in the current parallel region, or 0 in the sequential region +TORCH_API int get_thread_num(); + +// Checks whether the code runs in parallel region +TORCH_API bool in_parallel_region(); + +namespace internal { + +// Initialise num_threads lazily at first parallel call +inline TORCH_API void lazy_init_num_threads() { + thread_local bool init = false; + if (C10_UNLIKELY(!init)) { + at::init_num_threads(); + init = true; + } +} + +TORCH_API void set_thread_num(int); + +class TORCH_API ThreadIdGuard { + public: + ThreadIdGuard(int new_id) : old_id_(at::get_thread_num()) { + set_thread_num(new_id); + } + + ~ThreadIdGuard() { + set_thread_num(old_id_); + } + + private: + int old_id_; +}; + +} // namespace internal + +/* +parallel_for + +begin: index at which to start applying user function + +end: index at which to stop applying user function + +grain_size: number of elements per chunk. impacts the degree of parallelization + +f: user function applied in parallel to the chunks, signature: + void f(int64_t begin, int64_t end) + +Warning: parallel_for does NOT copy thread local +states from the current thread to the worker threads. +This means for example that Tensor operations CANNOT be used in the +body of your function, only data pointers. +*/ +template +inline void parallel_for( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const F& f); + +/* +parallel_reduce + +begin: index at which to start applying reduction + +end: index at which to stop applying reduction + +grain_size: number of elements per chunk. impacts number of elements in +intermediate results tensor and degree of parallelization. + +ident: identity for binary combination function sf. sf(ident, x) needs to return +x. + +f: function for reduction over a chunk. f needs to be of signature scalar_t +f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy) + +sf: function to combine two partial results. sf needs to be of signature +scalar_t sf(scalar_t x, scalar_t y) + +For example, you might have a tensor of 10000 entires and want to sum together +all the elements. Parallel_reduce with a grain_size of 2500 will then allocate +an intermediate result tensor with 4 elements. Then it will execute the function +"f" you provide and pass the beginning and end index of these chunks, so +0-2499, 2500-4999, etc. and the combination identity. It will then write out +the result from each of these chunks into the intermediate result tensor. After +that it'll reduce the partial results from each chunk into a single number using +the combination function sf and the identity ident. For a total summation this +would be "+" and 0 respectively. This is similar to tbb's approach [1], where +you need to provide a function to accumulate a subrange, a function to combine +two partial results and an identity. + +Warning: parallel_reduce does NOT copy thread local +states from the current thread to the worker threads. +This means for example that Tensor operations CANNOT be used in the +body of your function, only data pointers. + +[1] https://software.intel.com/en-us/node/506154 +*/ +template +inline scalar_t parallel_reduce( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const scalar_t ident, + const F& f, + const SF& sf); + +// Returns a detailed string describing parallelization settings +TORCH_API std::string get_parallel_info(); + +// Sets number of threads used for inter-op parallelism +TORCH_API void set_num_interop_threads(int); + +// Returns the number of threads used for inter-op parallelism +TORCH_API int get_num_interop_threads(); + +// Launches inter-op parallel task +TORCH_API void launch(std::function func); +namespace internal { +void launch_no_thread_state(std::function fn); +} // namespace internal + +// Launches intra-op parallel task +TORCH_API void intraop_launch(std::function func); + +// Returns number of intra-op threads used by default +TORCH_API int intraop_default_num_threads(); + +} // namespace at + +#if AT_PARALLEL_OPENMP +#include // IWYU pragma: keep +#elif AT_PARALLEL_NATIVE +#include // IWYU pragma: keep +#elif AT_PARALLEL_NATIVE_TBB +#include // IWYU pragma: keep +#endif + +#include // IWYU pragma: keep diff --git a/voice_bridge/torch/include/ATen/ParallelFuture.h b/voice_bridge/torch/include/ATen/ParallelFuture.h new file mode 100644 index 0000000000000000000000000000000000000000..042cd92da19345d7523671ca75da7279d13062a9 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ParallelFuture.h @@ -0,0 +1,13 @@ +#pragma once + +#include +#include +#include + +namespace at { + +// Launches intra-op parallel task, returns a future +TORCH_API c10::intrusive_ptr intraop_launch_future( + std::function func); + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ParallelNative.h b/voice_bridge/torch/include/ATen/ParallelNative.h new file mode 100644 index 0000000000000000000000000000000000000000..7f083d6025453df1915509f9734d9a70f9761b49 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ParallelNative.h @@ -0,0 +1,22 @@ +#pragma once + +#include +#include +#include + +#include + +#define INTRA_OP_PARALLEL + +namespace at { +namespace internal { + +TORCH_API void invoke_parallel( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const std::function& f); + +} // namespace internal + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ParallelNativeTBB.h b/voice_bridge/torch/include/ATen/ParallelNativeTBB.h new file mode 100644 index 0000000000000000000000000000000000000000..246e57d74f379104f47de9d7d29ff717db4b0c27 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ParallelNativeTBB.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include + +#include + +#ifdef _WIN32 +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#endif +#include + +#define INTRA_OP_PARALLEL + +namespace at { +namespace internal { + +template +inline void invoke_parallel( + const int64_t begin, + const int64_t end, + const int64_t grain_size, + const F& f) { + // Choose number of tasks based on grain size and number of threads. + int64_t chunk_size = divup((end - begin), get_num_threads()); + // Make sure each task is at least grain_size size. + chunk_size = std::max(grain_size, chunk_size); + + std::atomic_flag err_flag = ATOMIC_FLAG_INIT; + std::exception_ptr eptr; + tbb::parallel_for( + tbb::blocked_range(begin, end, chunk_size), + [&eptr, &err_flag, f](const tbb::blocked_range& r) { + try { + internal::ThreadIdGuard tid_guard( + tbb::this_task_arena::current_thread_index()); + f(r.begin(), r.end()); + } catch (...) { + if (!err_flag.test_and_set()) { + eptr = std::current_exception(); + } + } + }, + tbb::static_partitioner{}); + if (eptr) { + std::rethrow_exception(eptr); + } +} + +} // namespace internal +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ParallelOpenMP.h b/voice_bridge/torch/include/ATen/ParallelOpenMP.h new file mode 100644 index 0000000000000000000000000000000000000000..f1d0e7fb0a89e240a9ed11ebe33e84f1d873d928 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ParallelOpenMP.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include +#include + +#ifdef _OPENMP +#define INTRA_OP_PARALLEL + +#include +#endif + +namespace at { + +#ifdef _OPENMP +namespace internal { +template +inline void invoke_parallel( + int64_t begin, + int64_t end, + int64_t grain_size, + const F& f) { + std::atomic_flag err_flag = ATOMIC_FLAG_INIT; + std::exception_ptr eptr; + +#pragma omp parallel + { + // choose number of tasks based on grain size and number of threads + // can't use num_threads clause due to bugs in GOMP's thread pool (See + // #32008) + int64_t num_threads = omp_get_num_threads(); + if (grain_size > 0) { + num_threads = std::min(num_threads, divup((end - begin), grain_size)); + } + + int64_t tid = omp_get_thread_num(); + int64_t chunk_size = divup((end - begin), num_threads); + int64_t begin_tid = begin + tid * chunk_size; + if (begin_tid < end) { + try { + internal::ThreadIdGuard tid_guard(tid); + f(begin_tid, std::min(end, chunk_size + begin_tid)); + } catch (...) { + if (!err_flag.test_and_set()) { + eptr = std::current_exception(); + } + } + } + } + if (eptr) { + std::rethrow_exception(eptr); + } +} +} // namespace internal +#endif // _OPENMP + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/PythonTorchFunctionTLS.h b/voice_bridge/torch/include/ATen/PythonTorchFunctionTLS.h new file mode 100644 index 0000000000000000000000000000000000000000..ef283164246d3bfb5c2f76cdce07b15a2182a736 --- /dev/null +++ b/voice_bridge/torch/include/ATen/PythonTorchFunctionTLS.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include + +namespace at { +namespace impl { + +struct TORCH_API PythonTorchFunctionTLS { + static void set_disabled(bool); + static bool is_disabled(); + + static void set_mode(std::shared_ptr); + static const std::shared_ptr& get_mode(); + static void swap_mode(std::shared_ptr&); + + static void push_onto_stack(std::shared_ptr mode); + static const std::shared_ptr pop_stack(); + static const std::shared_ptr& get_stack_at(int64_t idx); + static int64_t stack_len(); + + static const PythonTorchFunctionTLS& get_state(); + static void set_state(const PythonTorchFunctionTLS& state); + + private: + // The mode TLS is split into + // - disabled_, which says whether or not to disable all torch function + // modes + // - mode_, which is the C++ mode, that can only be the mode handling mode + // or null + // - stack_, which is a vector of modes representing the stack of user + // defined modes + bool disabled_; + std::shared_ptr mode_ = nullptr; + std::vector> stack_; +}; + +TORCH_API bool function_mode_enabled(); + +} // namespace impl +} // namespace at diff --git a/voice_bridge/torch/include/ATen/RedispatchFunctions.h b/voice_bridge/torch/include/ATen/RedispatchFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..f32902f9b9015ab66d024f55f7f97246a3fe0013 --- /dev/null +++ b/voice_bridge/torch/include/ATen/RedispatchFunctions.h @@ -0,0 +1,22101 @@ +#pragma once + +// @generated by torchgen/gen.py from RedispatchFunctions.h + +#ifdef TORCH_ASSERT_ONLY_METHOD_OPERATORS +#error This change adds a dependency on all pytorch operators, meaning the \ + file will need to be re-compiled every time an operator is changed or added. \ + Consider using the at::_ops::{name}::redispatch() interface by including \ + the specific operator from +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +namespace redispatch { + + // aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Byte(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Byte::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Char(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Char::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Double(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Double::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Float(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Float::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Int(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Int::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Long(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Long::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Short(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Short::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor + inline at::Tensor _cast_Half(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Half::redispatch(dispatchKeySet, self, non_blocking); + } + + // aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> () + inline void __dispatch__backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList inputs, const c10::optional & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false) { + return at::_ops::_backward::redispatch(dispatchKeySet, self, inputs, gradient, retain_graph, create_graph); + } + + // aten::set_data(Tensor(a!) self, Tensor new_data) -> () + inline void __dispatch_set_data(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & new_data) { + return at::_ops::set_data::redispatch(dispatchKeySet, self, new_data); + } + + // aten::data(Tensor self) -> Tensor + inline at::Tensor __dispatch_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::data::redispatch(dispatchKeySet, self); + } + + // aten::is_leaf(Tensor self) -> bool + inline bool __dispatch_is_leaf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_leaf::redispatch(dispatchKeySet, self); + } + + // aten::output_nr(Tensor self) -> int + inline int64_t __dispatch_output_nr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::output_nr::redispatch(dispatchKeySet, self); + } + + // aten::_version(Tensor self) -> int + inline int64_t __dispatch__version(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_version::redispatch(dispatchKeySet, self); + } + + // aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!) + inline at::Tensor & __dispatch_requires_grad_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool requires_grad=true) { + return at::_ops::requires_grad_::redispatch(dispatchKeySet, self, requires_grad); + } + + // aten::retain_grad(Tensor(a!) self) -> () + inline void __dispatch_retain_grad(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::retain_grad::redispatch(dispatchKeySet, self); + } + + // aten::retains_grad(Tensor self) -> bool + inline bool __dispatch_retains_grad(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::retains_grad::redispatch(dispatchKeySet, self); + } + + // aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a) + inline at::Tensor _fw_primal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level) { + return at::_ops::_fw_primal::redispatch(dispatchKeySet, self, level); + } + + // aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a) + inline at::Tensor _make_dual(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { + return at::_ops::_make_dual::redispatch(dispatchKeySet, primal, tangent, level); + } + + // aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent) + inline ::std::tuple _unpack_dual(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dual, int64_t level) { + return at::_ops::_unpack_dual::redispatch(dispatchKeySet, dual, level); + } + + // aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor + inline at::Tensor _new_zeros_with_same_feature_meta(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0) { + return at::_ops::_new_zeros_with_same_feature_meta::redispatch(dispatchKeySet, self, other, self_num_batch_dims); + } + + // aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool + inline bool _has_same_storage_numel(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_has_same_storage_numel::redispatch(dispatchKeySet, self, other); + } + + // aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!) + inline at::Tensor & rename_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional names) { + return at::_ops::rename_::redispatch(dispatchKeySet, self, names); + } + + // aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a) + inline at::Tensor rename(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional names) { + return at::_ops::rename::redispatch(dispatchKeySet, self, names); + } + + // aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a) + inline at::Tensor align_to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList names) { + return at::_ops::align_to::redispatch(dispatchKeySet, self, names); + } + + // aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a) + inline at::Tensor align_to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) { + return at::_ops::align_to_ellipsis_idx::redispatch(dispatchKeySet, self, order, ellipsis_idx); + } + + // aten::align_as(Tensor self, Tensor other) -> Tensor + inline at::Tensor align_as(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::align_as::redispatch(dispatchKeySet, self, other); + } + + // aten::align_tensors(Tensor[] tensors) -> Tensor[] + inline ::std::vector align_tensors(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::align_tensors::redispatch(dispatchKeySet, tensors); + } + + // aten::_assert_async(Tensor self) -> () + inline void _assert_async(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_assert_async::redispatch(dispatchKeySet, self); + } + + // aten::_assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> () + inline void _assert_tensor_metadata(c10::DispatchKeySet dispatchKeySet, const at::Tensor & a, at::OptionalIntArrayRef size=c10::nullopt, at::OptionalIntArrayRef stride=c10::nullopt, c10::optional dtype=c10::nullopt) { + return at::_ops::_assert_tensor_metadata::redispatch(dispatchKeySet, a, size, stride, dtype); + } + + // aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a) + inline at::Tensor refine_names(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList names) { + return at::_ops::refine_names::redispatch(dispatchKeySet, self, names); + } + + // aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool + inline bool _use_cudnn_ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank) { + return at::_ops::_use_cudnn_ctc_loss::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank); + } + + // aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool + inline bool _use_cudnn_ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank) { + return at::_ops::_use_cudnn_ctc_loss_Tensor::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank); + } + + // aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) + inline ::std::tuple _cudnn_ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + return at::_ops::_cudnn_ctc_loss::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity); + } + + // aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) + inline ::std::tuple _cudnn_ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + return at::_ops::_cudnn_ctc_loss_Tensor::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity); + } + + // aten::_use_cudnn_rnn_flatten_weight() -> bool + inline bool _use_cudnn_rnn_flatten_weight(c10::DispatchKeySet dispatchKeySet) { + return at::_ops::_use_cudnn_rnn_flatten_weight::redispatch(dispatchKeySet); + } + + // aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor + inline at::Tensor _cudnn_rnn_flatten_weight(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight::redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); + } + + // aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor + inline at::Tensor _cudnn_rnn_flatten_weight_symint(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight::redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional); + } + + // aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _cudnn_rnn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state) { + return at::_ops::_cudnn_rnn::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRef(batch_sizes), dropout_state); + } + + // aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _cudnn_rnn_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state) { + return at::_ops::_cudnn_rnn::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); + } + + // aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) + inline ::std::tuple> _cudnn_rnn_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRef(batch_sizes), dropout_state, reserve, output_mask); + } + + // aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) + inline ::std::tuple> _cudnn_rnn_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); + } + + // aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor _cudnn_init_dropout_state(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, at::TensorOptions options) { + return at::_ops::_cudnn_init_dropout_state::redispatch(dispatchKeySet, dropout, train, dropout_seed, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor _cudnn_init_dropout_state(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_cudnn_init_dropout_state::redispatch(dispatchKeySet, dropout, train, dropout_seed, dtype, layout, device, pin_memory); + } + + // aten::_debug_has_internal_overlap(Tensor self) -> int + inline int64_t _debug_has_internal_overlap(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_debug_has_internal_overlap::redispatch(dispatchKeySet, self); + } + + // aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor) + inline ::std::tuple _fused_dropout(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::_fused_dropout::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor + inline at::Tensor _masked_scale(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, double scale) { + return at::_ops::_masked_scale::redispatch(dispatchKeySet, self, mask, scale); + } + + // aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor) + inline ::std::tuple native_dropout(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, c10::optional train) { + return at::_ops::native_dropout::redispatch(dispatchKeySet, input, p, train); + } + + // aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor + inline at::Tensor native_dropout_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, double scale) { + return at::_ops::native_dropout_backward::redispatch(dispatchKeySet, grad_output, mask, scale); + } + + // aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor) + inline ::std::tuple _sobol_engine_draw(c10::DispatchKeySet dispatchKeySet, const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype) { + return at::_ops::_sobol_engine_draw::redispatch(dispatchKeySet, quasi, n, sobolstate, dimension, num_generated, dtype); + } + + // aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!) + inline at::Tensor & _sobol_engine_ff_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) { + return at::_ops::_sobol_engine_ff_::redispatch(dispatchKeySet, self, n, sobolstate, dimension, num_generated); + } + + // aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!) + inline at::Tensor & _sobol_engine_scramble_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & ltm, int64_t dimension) { + return at::_ops::_sobol_engine_scramble_::redispatch(dispatchKeySet, self, ltm, dimension); + } + + // aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!) + inline at::Tensor & _sobol_engine_initialize_state_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dimension) { + return at::_ops::_sobol_engine_initialize_state_::redispatch(dispatchKeySet, self, dimension); + } + + // aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor + inline at::Tensor _reshape_from_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & shape) { + return at::_ops::_reshape_from_tensor::redispatch(dispatchKeySet, self, shape); + } + + // aten::_shape_as_tensor(Tensor self) -> Tensor + inline at::Tensor _shape_as_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_shape_as_tensor::redispatch(dispatchKeySet, self); + } + + // aten::dropout(Tensor input, float p, bool train) -> Tensor + inline at::Tensor dropout(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) { + return at::_ops::dropout::redispatch(dispatchKeySet, input, p, train); + } + + // aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + inline at::Tensor & dropout_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) { + return at::_ops::dropout_::redispatch(dispatchKeySet, self, p, train); + } + + // aten::feature_dropout(Tensor input, float p, bool train) -> Tensor + inline at::Tensor feature_dropout(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) { + return at::_ops::feature_dropout::redispatch(dispatchKeySet, input, p, train); + } + + // aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + inline at::Tensor & feature_dropout_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) { + return at::_ops::feature_dropout_::redispatch(dispatchKeySet, self, p, train); + } + + // aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor + inline at::Tensor alpha_dropout(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) { + return at::_ops::alpha_dropout::redispatch(dispatchKeySet, input, p, train); + } + + // aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + inline at::Tensor & alpha_dropout_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) { + return at::_ops::alpha_dropout_::redispatch(dispatchKeySet, self, p, train); + } + + // aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor + inline at::Tensor feature_alpha_dropout(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, bool train) { + return at::_ops::feature_alpha_dropout::redispatch(dispatchKeySet, input, p, train); + } + + // aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + inline at::Tensor & feature_alpha_dropout_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, bool train) { + return at::_ops::feature_alpha_dropout_::redispatch(dispatchKeySet, self, p, train); + } + + // aten::abs(Tensor self) -> Tensor + inline at::Tensor abs(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::abs::redispatch(dispatchKeySet, self); + } + + // aten::abs_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & abs_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::abs_::redispatch(dispatchKeySet, self); + } + + // aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & abs_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::abs_out::redispatch(dispatchKeySet, self, out); + } + + // aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & abs_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::abs_out::redispatch(dispatchKeySet, self, out); + } + + // aten::absolute(Tensor self) -> Tensor + inline at::Tensor absolute(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::absolute::redispatch(dispatchKeySet, self); + } + + // aten::absolute_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & absolute_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::absolute_::redispatch(dispatchKeySet, self); + } + + // aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & absolute_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::absolute_out::redispatch(dispatchKeySet, self, out); + } + + // aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & absolute_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::absolute_out::redispatch(dispatchKeySet, self, out); + } + + // aten::angle(Tensor self) -> Tensor + inline at::Tensor angle(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::angle::redispatch(dispatchKeySet, self); + } + + // aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & angle_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::angle_out::redispatch(dispatchKeySet, self, out); + } + + // aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & angle_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::angle_out::redispatch(dispatchKeySet, self, out); + } + + // aten::view_as_real(Tensor(a) self) -> Tensor(a) + inline at::Tensor view_as_real(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::view_as_real::redispatch(dispatchKeySet, self); + } + + // aten::view_as_complex(Tensor(a) self) -> Tensor(a) + inline at::Tensor view_as_complex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::view_as_complex::redispatch(dispatchKeySet, self); + } + + // aten::sgn(Tensor self) -> Tensor + inline at::Tensor sgn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sgn::redispatch(dispatchKeySet, self); + } + + // aten::sgn_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sgn_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sgn_::redispatch(dispatchKeySet, self); + } + + // aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sgn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sgn_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sgn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sgn_out::redispatch(dispatchKeySet, self, out); + } + + // aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor chalf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::chalf::redispatch(dispatchKeySet, self, memory_format); + } + + // aten::real(Tensor(a) self) -> Tensor(a) + inline at::Tensor real(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::real::redispatch(dispatchKeySet, self); + } + + // aten::imag(Tensor(a) self) -> Tensor(a) + inline at::Tensor imag(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::imag::redispatch(dispatchKeySet, self); + } + + // aten::_conj(Tensor(a) self) -> Tensor(a) + inline at::Tensor _conj(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_conj::redispatch(dispatchKeySet, self); + } + + // aten::conj(Tensor(a) self) -> Tensor(a) + inline at::Tensor __dispatch_conj(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::conj::redispatch(dispatchKeySet, self); + } + + // aten::_conj_physical(Tensor self) -> Tensor + inline at::Tensor _conj_physical(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_conj_physical::redispatch(dispatchKeySet, self); + } + + // aten::conj_physical(Tensor self) -> Tensor + inline at::Tensor conj_physical(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::conj_physical::redispatch(dispatchKeySet, self); + } + + // aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conj_physical_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::conj_physical_out::redispatch(dispatchKeySet, self, out); + } + + // aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conj_physical_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::conj_physical_out::redispatch(dispatchKeySet, self, out); + } + + // aten::conj_physical_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & conj_physical_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::conj_physical_::redispatch(dispatchKeySet, self); + } + + // aten::resolve_conj(Tensor(a) self) -> Tensor(a) + inline at::Tensor resolve_conj(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::resolve_conj::redispatch(dispatchKeySet, self); + } + + // aten::resolve_neg(Tensor(a) self) -> Tensor(a) + inline at::Tensor resolve_neg(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::resolve_neg::redispatch(dispatchKeySet, self); + } + + // aten::_neg_view(Tensor(a) self) -> Tensor(a) + inline at::Tensor _neg_view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_neg_view::redispatch(dispatchKeySet, self); + } + + // aten::acos(Tensor self) -> Tensor + inline at::Tensor acos(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::acos::redispatch(dispatchKeySet, self); + } + + // aten::acos_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & acos_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::acos_::redispatch(dispatchKeySet, self); + } + + // aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & acos_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::acos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & acos_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::acos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arccos(Tensor self) -> Tensor + inline at::Tensor arccos(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::arccos::redispatch(dispatchKeySet, self); + } + + // aten::arccos_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & arccos_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::arccos_::redispatch(dispatchKeySet, self); + } + + // aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arccos_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::arccos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arccos_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::arccos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor + inline at::Tensor avg_pool1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true) { + return at::_ops::avg_pool1d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad); + } + + // aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor + inline at::Tensor adaptive_avg_pool1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool1d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) + inline ::std::tuple adaptive_max_pool1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool1d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + inline at::Tensor add(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::add_Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & add_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::add__Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & add_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::add_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & add_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::add_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + inline at::Tensor _add_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_add_relu_Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & _add_relu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_add_relu__Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _add_relu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_add_relu_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _add_relu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::_add_relu_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + inline at::Tensor _add_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::_add_relu_Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & _add_relu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::_add_relu__Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + inline at::Tensor add(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::add_Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & add_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::add__Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor addmv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmv::redispatch(dispatchKeySet, self, mat, vec, beta, alpha); + } + + // aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & addmv_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmv_::redispatch(dispatchKeySet, self, mat, vec, beta, alpha); + } + + // aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addmv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmv_out::redispatch(dispatchKeySet, self, mat, vec, beta, alpha, out); + } + + // aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addmv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::addmv_out::redispatch(dispatchKeySet, self, mat, vec, beta, alpha, out); + } + + // aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor addr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addr::redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha); + } + + // aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & addr_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addr_::redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha); + } + + // aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addr_out::redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha, out); + } + + // aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::addr_out::redispatch(dispatchKeySet, self, vec1, vec2, beta, alpha, out); + } + + // aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor + inline at::Tensor affine_grid_generator(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, at::IntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator::redispatch(dispatchKeySet, theta, size, align_corners); + } + + // aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor + inline at::Tensor affine_grid_generator_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, at::IntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_backward::redispatch(dispatchKeySet, grad, size, align_corners); + } + + // aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor + inline at::Tensor all(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::all_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::all_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) { + return at::_ops::all_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + inline at::Tensor all(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::all_dimname::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::all_dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) { + return at::_ops::all_dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool + inline bool allclose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) { + return at::_ops::allclose::redispatch(dispatchKeySet, self, other, rtol, atol, equal_nan); + } + + // aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor + inline at::Tensor any(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::any_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::any_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out) { + return at::_ops::any_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + inline at::Tensor any(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::any_dimname::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::any_dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out) { + return at::_ops::any_dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor arange(c10::DispatchKeySet dispatchKeySet, const at::Scalar & end, at::TensorOptions options={}) { + return at::_ops::arange::redispatch(dispatchKeySet, end, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor arange(c10::DispatchKeySet dispatchKeySet, const at::Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::arange::redispatch(dispatchKeySet, end, dtype, layout, device, pin_memory); + } + + // aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor arange(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, at::TensorOptions options={}) { + return at::_ops::arange_start::redispatch(dispatchKeySet, start, end, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor arange(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::arange_start::redispatch(dispatchKeySet, start, end, dtype, layout, device, pin_memory); + } + + // aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor arange(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::TensorOptions options={}) { + return at::_ops::arange_start_step::redispatch(dispatchKeySet, start, end, step, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor arange(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::arange_start_step::redispatch(dispatchKeySet, start, end, step, dtype, layout, device, pin_memory); + } + + // aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arange_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & end) { + return at::_ops::arange_out::redispatch(dispatchKeySet, end, out); + } + + // aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arange_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & end, at::Tensor & out) { + return at::_ops::arange_out::redispatch(dispatchKeySet, end, out); + } + + // aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arange_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step) { + return at::_ops::arange_start_out::redispatch(dispatchKeySet, start, end, step, out); + } + + // aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arange_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) { + return at::_ops::arange_start_out::redispatch(dispatchKeySet, start, end, step, out); + } + + // aten::_dim_arange(Tensor like, int dim) -> Tensor + inline at::Tensor _dim_arange(c10::DispatchKeySet dispatchKeySet, const at::Tensor & like, int64_t dim) { + return at::_ops::_dim_arange::redispatch(dispatchKeySet, like, dim); + } + + // aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor + inline at::Tensor argmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::argmax::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & argmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::argmax_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & argmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim, bool keepdim, at::Tensor & out) { + return at::_ops::argmax_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor + inline at::Tensor argmin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::argmin::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & argmin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::argmin_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & argmin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim, bool keepdim, at::Tensor & out) { + return at::_ops::argmin_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::acosh(Tensor self) -> Tensor + inline at::Tensor acosh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::acosh::redispatch(dispatchKeySet, self); + } + + // aten::acosh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & acosh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::acosh_::redispatch(dispatchKeySet, self); + } + + // aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & acosh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::acosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & acosh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::acosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arccosh(Tensor self) -> Tensor + inline at::Tensor arccosh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::arccosh::redispatch(dispatchKeySet, self); + } + + // aten::arccosh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & arccosh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::arccosh_::redispatch(dispatchKeySet, self); + } + + // aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arccosh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::arccosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arccosh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::arccosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::asinh(Tensor self) -> Tensor + inline at::Tensor asinh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::asinh::redispatch(dispatchKeySet, self); + } + + // aten::asinh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & asinh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::asinh_::redispatch(dispatchKeySet, self); + } + + // aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & asinh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::asinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & asinh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::asinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arcsinh(Tensor self) -> Tensor + inline at::Tensor arcsinh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::arcsinh::redispatch(dispatchKeySet, self); + } + + // aten::arcsinh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & arcsinh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::arcsinh_::redispatch(dispatchKeySet, self); + } + + // aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arcsinh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::arcsinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arcsinh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::arcsinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::atanh(Tensor self) -> Tensor + inline at::Tensor atanh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::atanh::redispatch(dispatchKeySet, self); + } + + // aten::atanh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & atanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::atanh_::redispatch(dispatchKeySet, self); + } + + // aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & atanh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::atanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & atanh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::atanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arctanh(Tensor self) -> Tensor + inline at::Tensor arctanh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::arctanh::redispatch(dispatchKeySet, self); + } + + // aten::arctanh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & arctanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::arctanh_::redispatch(dispatchKeySet, self); + } + + // aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arctanh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::arctanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arctanh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::arctanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) + inline at::Tensor as_strided(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); + } + + // aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) + inline at::Tensor as_strided_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided::redispatch(dispatchKeySet, self, size, stride, storage_offset); + } + + // aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) + inline const at::Tensor & as_strided_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); + } + + // aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) + inline const at::Tensor & as_strided__symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_::redispatch(dispatchKeySet, self, size, stride, storage_offset); + } + + // aten::asin(Tensor self) -> Tensor + inline at::Tensor asin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::asin::redispatch(dispatchKeySet, self); + } + + // aten::asin_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & asin_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::asin_::redispatch(dispatchKeySet, self); + } + + // aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & asin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::asin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & asin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::asin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arcsin(Tensor self) -> Tensor + inline at::Tensor arcsin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::arcsin::redispatch(dispatchKeySet, self); + } + + // aten::arcsin_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & arcsin_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::arcsin_::redispatch(dispatchKeySet, self); + } + + // aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arcsin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::arcsin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arcsin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::arcsin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::atan(Tensor self) -> Tensor + inline at::Tensor atan(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::atan::redispatch(dispatchKeySet, self); + } + + // aten::atan_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & atan_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::atan_::redispatch(dispatchKeySet, self); + } + + // aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & atan_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::atan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & atan_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::atan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arctan(Tensor self) -> Tensor + inline at::Tensor arctan(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::arctan::redispatch(dispatchKeySet, self); + } + + // aten::arctan_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & arctan_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::arctan_::redispatch(dispatchKeySet, self); + } + + // aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arctan_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::arctan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arctan_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::arctan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::atleast_1d(Tensor self) -> Tensor + inline at::Tensor atleast_1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::atleast_1d::redispatch(dispatchKeySet, self); + } + + // aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[] + inline ::std::vector atleast_1d(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::atleast_1d_Sequence::redispatch(dispatchKeySet, tensors); + } + + // aten::atleast_2d(Tensor self) -> Tensor + inline at::Tensor atleast_2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::atleast_2d::redispatch(dispatchKeySet, self); + } + + // aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[] + inline ::std::vector atleast_2d(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::atleast_2d_Sequence::redispatch(dispatchKeySet, tensors); + } + + // aten::atleast_3d(Tensor self) -> Tensor + inline at::Tensor atleast_3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::atleast_3d::redispatch(dispatchKeySet, self); + } + + // aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[] + inline ::std::vector atleast_3d(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::atleast_3d_Sequence::redispatch(dispatchKeySet, tensors); + } + + // aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor baddbmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::baddbmm::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha); + } + + // aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & baddbmm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::baddbmm_::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha); + } + + // aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & baddbmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::baddbmm_out::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha, out); + } + + // aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & baddbmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::baddbmm_out::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha, out); + } + + // aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor bartlett_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::TensorOptions options={}) { + return at::_ops::bartlett_window::redispatch(dispatchKeySet, window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor bartlett_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::bartlett_window::redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory); + } + + // aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor bartlett_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::bartlett_window_periodic::redispatch(dispatchKeySet, window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor bartlett_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::bartlett_window_periodic::redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory); + } + + // aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor + inline at::Tensor batch_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { + return at::_ops::batch_norm::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); + } + + // aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor + inline at::Tensor quantized_batch_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) { + return at::_ops::quantized_batch_norm::redispatch(dispatchKeySet, input, weight, bias, mean, var, eps, output_scale, output_zero_point); + } + + // aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int) + inline ::std::tuple _batch_norm_impl_index(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { + return at::_ops::_batch_norm_impl_index::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); + } + + // aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _batch_norm_impl_index_backward(c10::DispatchKeySet dispatchKeySet, int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var_transform, bool train, double eps, ::std::array output_mask, const at::Tensor & reservedSpace) { + return at::_ops::_batch_norm_impl_index_backward::redispatch(dispatchKeySet, impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace); + } + + // aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor + inline at::Tensor bernoulli(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli::redispatch(dispatchKeySet, self, generator); + } + + // aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bernoulli_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bernoulli_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::bernoulli_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & bernoulli_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & p, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli__Tensor::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & bernoulli_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p=0.5, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli__float::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor + inline at::Tensor bernoulli(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli_p::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor + inline at::Tensor bilinear(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional & bias={}) { + return at::_ops::bilinear::redispatch(dispatchKeySet, input1, input2, weight, bias); + } + + // aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor + inline at::Tensor binary_cross_entropy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy::redispatch(dispatchKeySet, self, target, weight, reduction); + } + + // aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & binary_cross_entropy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_out::redispatch(dispatchKeySet, self, target, weight, reduction, out); + } + + // aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & binary_cross_entropy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & out) { + return at::_ops::binary_cross_entropy_out::redispatch(dispatchKeySet, self, target, weight, reduction, out); + } + + // aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor + inline at::Tensor binary_cross_entropy_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_backward::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction); + } + + // aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & binary_cross_entropy_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, grad_input); + } + + // aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & binary_cross_entropy_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input) { + return at::_ops::binary_cross_entropy_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, grad_input); + } + + // aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor + inline at::Tensor binary_cross_entropy_with_logits(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, const c10::optional & pos_weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_with_logits::redispatch(dispatchKeySet, self, target, weight, pos_weight, reduction); + } + + // aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor + inline at::Tensor bincount(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & weights={}, int64_t minlength=0) { + return at::_ops::bincount::redispatch(dispatchKeySet, self, weights, minlength); + } + + // aten::bitwise_not(Tensor self) -> Tensor + inline at::Tensor bitwise_not(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::bitwise_not::redispatch(dispatchKeySet, self); + } + + // aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & bitwise_not_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::bitwise_not_::redispatch(dispatchKeySet, self); + } + + // aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_not_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::bitwise_not_out::redispatch(dispatchKeySet, self, out); + } + + // aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_not_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::bitwise_not_out::redispatch(dispatchKeySet, self, out); + } + + // aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copysign_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::copysign_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copysign_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::copysign_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor copysign(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::copysign_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & copysign_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::copysign__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor copysign(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::copysign_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & copysign_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::copysign__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copysign_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::copysign_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copysign_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::copysign_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logical_not(Tensor self) -> Tensor + inline at::Tensor logical_not(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::logical_not::redispatch(dispatchKeySet, self); + } + + // aten::logical_not_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & logical_not_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::logical_not_::redispatch(dispatchKeySet, self); + } + + // aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_not_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::logical_not_out::redispatch(dispatchKeySet, self, out); + } + + // aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_not_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::logical_not_out::redispatch(dispatchKeySet, self, out); + } + + // aten::logical_xor(Tensor self, Tensor other) -> Tensor + inline at::Tensor logical_xor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_xor::redispatch(dispatchKeySet, self, other); + } + + // aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & logical_xor_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_xor_::redispatch(dispatchKeySet, self, other); + } + + // aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_xor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_xor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_xor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logical_xor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logical_and(Tensor self, Tensor other) -> Tensor + inline at::Tensor logical_and(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_and::redispatch(dispatchKeySet, self, other); + } + + // aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & logical_and_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_and_::redispatch(dispatchKeySet, self, other); + } + + // aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_and_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_and_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_and_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logical_and_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logical_or(Tensor self, Tensor other) -> Tensor + inline at::Tensor logical_or(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_or::redispatch(dispatchKeySet, self, other); + } + + // aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & logical_or_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_or_::redispatch(dispatchKeySet, self, other); + } + + // aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_or_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logical_or_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logical_or_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logical_or_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor blackman_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::TensorOptions options={}) { + return at::_ops::blackman_window::redispatch(dispatchKeySet, window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor blackman_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::blackman_window::redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory); + } + + // aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor blackman_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::blackman_window_periodic::redispatch(dispatchKeySet, window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor blackman_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::blackman_window_periodic::redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory); + } + + // aten::bmm(Tensor self, Tensor mat2) -> Tensor + inline at::Tensor bmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::bmm::redispatch(dispatchKeySet, self, mat2); + } + + // aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::bmm_out::redispatch(dispatchKeySet, self, mat2, out); + } + + // aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::bmm_out::redispatch(dispatchKeySet, self, mat2, out); + } + + // aten::broadcast_tensors(Tensor[] tensors) -> Tensor[] + inline ::std::vector broadcast_tensors(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::broadcast_tensors::redispatch(dispatchKeySet, tensors); + } + + // aten::broadcast_to(Tensor(a) self, int[] size) -> Tensor(a) + inline at::Tensor broadcast_to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::broadcast_to::redispatch(dispatchKeySet, self, size); + } + + // aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a) + inline at::Tensor _sparse_broadcast_to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_sparse_broadcast_to::redispatch(dispatchKeySet, self, size); + } + + // aten::cat(Tensor[] tensors, int dim=0) -> Tensor + inline at::Tensor cat(c10::DispatchKeySet dispatchKeySet, const at::ITensorListRef & tensors, int64_t dim=0) { + return at::_ops::cat::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cat_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::ITensorListRef & tensors, int64_t dim=0) { + return at::_ops::cat_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cat_outf(c10::DispatchKeySet dispatchKeySet, const at::ITensorListRef & tensors, int64_t dim, at::Tensor & out) { + return at::_ops::cat_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor + inline at::Tensor cat(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim) { + return at::_ops::cat_names::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cat_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, at::Dimname dim) { + return at::_ops::cat_names_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cat_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) { + return at::_ops::cat_names_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concat(Tensor[] tensors, int dim=0) -> Tensor + inline at::Tensor concat(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim=0) { + return at::_ops::concat::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concat_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, int64_t dim=0) { + return at::_ops::concat_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concat_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) { + return at::_ops::concat_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor + inline at::Tensor concat(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim) { + return at::_ops::concat_names::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concat_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, at::Dimname dim) { + return at::_ops::concat_names_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concat_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) { + return at::_ops::concat_names_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor + inline at::Tensor concatenate(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim=0) { + return at::_ops::concatenate::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concatenate_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, int64_t dim=0) { + return at::_ops::concatenate_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concatenate_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) { + return at::_ops::concatenate_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor + inline at::Tensor concatenate(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim) { + return at::_ops::concatenate_names::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concatenate_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, at::Dimname dim) { + return at::_ops::concatenate_names_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & concatenate_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Dimname dim, at::Tensor & out) { + return at::_ops::concatenate_names_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::block_diag(Tensor[] tensors) -> Tensor + inline at::Tensor block_diag(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::block_diag::redispatch(dispatchKeySet, tensors); + } + + // aten::ceil(Tensor self) -> Tensor + inline at::Tensor ceil(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::ceil::redispatch(dispatchKeySet, self); + } + + // aten::ceil_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & ceil_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::ceil_::redispatch(dispatchKeySet, self); + } + + // aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ceil_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::ceil_out::redispatch(dispatchKeySet, self, out); + } + + // aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ceil_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::ceil_out::redispatch(dispatchKeySet, self, out); + } + + // aten::chain_matmul(Tensor[] matrices) -> Tensor + inline at::Tensor chain_matmul(c10::DispatchKeySet dispatchKeySet, at::TensorList matrices) { + return at::_ops::chain_matmul::redispatch(dispatchKeySet, matrices); + } + + // aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & chain_matmul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList matrices) { + return at::_ops::chain_matmul_out::redispatch(dispatchKeySet, matrices, out); + } + + // aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & chain_matmul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList matrices, at::Tensor & out) { + return at::_ops::chain_matmul_out::redispatch(dispatchKeySet, matrices, out); + } + + // aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] + inline ::std::vector unsafe_chunk(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t chunks, int64_t dim=0) { + return at::_ops::unsafe_chunk::redispatch(dispatchKeySet, self, chunks, dim); + } + + // aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[] + inline ::std::vector chunk(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t chunks, int64_t dim=0) { + return at::_ops::chunk::redispatch(dispatchKeySet, self, chunks, dim); + } + + // aten::tensor_split.sections(Tensor(a -> *) self, int sections, int dim=0) -> Tensor(a)[] + inline ::std::vector tensor_split(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections, int64_t dim=0) { + return at::_ops::tensor_split_sections::redispatch(dispatchKeySet, self, sections, dim); + } + + // aten::tensor_split.indices(Tensor(a -> *) self, int[] indices, int dim=0) -> Tensor(a)[] + inline ::std::vector tensor_split(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices, int64_t dim=0) { + return at::_ops::tensor_split_indices::redispatch(dispatchKeySet, self, indices, dim); + } + + // aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] + inline ::std::vector tensor_split(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim=0) { + return at::_ops::tensor_split_tensor_indices_or_sections::redispatch(dispatchKeySet, self, tensor_indices_or_sections, dim); + } + + // aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor + inline at::Tensor clamp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clamp::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor + inline at::Tensor clamp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clamp_Tensor::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) + inline at::Tensor & clamp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clamp_::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) + inline at::Tensor & clamp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clamp__Tensor::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clamp_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min, const c10::optional & max, at::Tensor & out) { + return at::_ops::clamp_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clamp_Tensor_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min, const c10::optional & max, at::Tensor & out) { + return at::_ops::clamp_Tensor_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clamp_max(Tensor self, Scalar max) -> Tensor + inline at::Tensor clamp_max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max) { + return at::_ops::clamp_max::redispatch(dispatchKeySet, self, max); + } + + // aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor + inline at::Tensor clamp_max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max) { + return at::_ops::clamp_max_Tensor::redispatch(dispatchKeySet, self, max); + } + + // aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) + inline at::Tensor & clamp_max_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & max) { + return at::_ops::clamp_max_::redispatch(dispatchKeySet, self, max); + } + + // aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) + inline at::Tensor & clamp_max_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & max) { + return at::_ops::clamp_max__Tensor::redispatch(dispatchKeySet, self, max); + } + + // aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_max_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & max) { + return at::_ops::clamp_max_out::redispatch(dispatchKeySet, self, max, out); + } + + // aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_max_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & max, at::Tensor & out) { + return at::_ops::clamp_max_out::redispatch(dispatchKeySet, self, max, out); + } + + // aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_max_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & max) { + return at::_ops::clamp_max_Tensor_out::redispatch(dispatchKeySet, self, max, out); + } + + // aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_max_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & max, at::Tensor & out) { + return at::_ops::clamp_max_Tensor_out::redispatch(dispatchKeySet, self, max, out); + } + + // aten::clamp_min(Tensor self, Scalar min) -> Tensor + inline at::Tensor clamp_min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min) { + return at::_ops::clamp_min::redispatch(dispatchKeySet, self, min); + } + + // aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor + inline at::Tensor clamp_min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min) { + return at::_ops::clamp_min_Tensor::redispatch(dispatchKeySet, self, min); + } + + // aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) + inline at::Tensor & clamp_min_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min) { + return at::_ops::clamp_min_::redispatch(dispatchKeySet, self, min); + } + + // aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!) + inline at::Tensor & clamp_min_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & min) { + return at::_ops::clamp_min__Tensor::redispatch(dispatchKeySet, self, min); + } + + // aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_min_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & min) { + return at::_ops::clamp_min_out::redispatch(dispatchKeySet, self, min, out); + } + + // aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_min_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min, at::Tensor & out) { + return at::_ops::clamp_min_out::redispatch(dispatchKeySet, self, min, out); + } + + // aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_min_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & min) { + return at::_ops::clamp_min_Tensor_out::redispatch(dispatchKeySet, self, min, out); + } + + // aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clamp_min_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & min, at::Tensor & out) { + return at::_ops::clamp_min_Tensor_out::redispatch(dispatchKeySet, self, min, out); + } + + // aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor + inline at::Tensor clip(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clip::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor + inline at::Tensor clip(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clip_Tensor::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) + inline at::Tensor & clip_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clip_::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) + inline at::Tensor & clip_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clip__Tensor::redispatch(dispatchKeySet, self, min, max); + } + + // aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clip_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & min, const c10::optional & max=c10::nullopt) { + return at::_ops::clip_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clip_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min, const c10::optional & max, at::Tensor & out) { + return at::_ops::clip_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clip_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & min={}, const c10::optional & max={}) { + return at::_ops::clip_Tensor_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clip_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & min, const c10::optional & max, at::Tensor & out) { + return at::_ops::clip_Tensor_out::redispatch(dispatchKeySet, self, min, max, out); + } + + // aten::cudnn_is_acceptable(Tensor self) -> bool + inline bool cudnn_is_acceptable(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::cudnn_is_acceptable::redispatch(dispatchKeySet, self); + } + + // aten::complex(Tensor real, Tensor imag) -> Tensor + inline at::Tensor complex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & real, const at::Tensor & imag) { + return at::_ops::complex::redispatch(dispatchKeySet, real, imag); + } + + // aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & complex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & real, const at::Tensor & imag) { + return at::_ops::complex_out::redispatch(dispatchKeySet, real, imag, out); + } + + // aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & complex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & real, const at::Tensor & imag, at::Tensor & out) { + return at::_ops::complex_out::redispatch(dispatchKeySet, real, imag, out); + } + + // aten::polar(Tensor abs, Tensor angle) -> Tensor + inline at::Tensor polar(c10::DispatchKeySet dispatchKeySet, const at::Tensor & abs, const at::Tensor & angle) { + return at::_ops::polar::redispatch(dispatchKeySet, abs, angle); + } + + // aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & polar_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & abs, const at::Tensor & angle) { + return at::_ops::polar_out::redispatch(dispatchKeySet, abs, angle, out); + } + + // aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & polar_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & abs, const at::Tensor & angle, at::Tensor & out) { + return at::_ops::polar_out::redispatch(dispatchKeySet, abs, angle, out); + } + + // aten::constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor + inline at::Tensor constant_pad_nd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd::redispatch(dispatchKeySet, self, pad, value); + } + + // aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a) + inline at::Tensor __dispatch_contiguous(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::MemoryFormat memory_format=MemoryFormat::Contiguous) { + return at::_ops::contiguous::redispatch(dispatchKeySet, self, memory_format); + } + + // aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor + inline at::Tensor convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); + } + + // aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple convolution_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward::redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRef(*bias_sizes)) : c10::nullopt, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + + // aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple convolution_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward::redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + + // aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor + inline at::Tensor convolution_overrideable(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_overrideable::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); + } + + // aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + inline ::std::tuple convolution_backward_overrideable(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable::redispatch(dispatchKeySet, grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + + // aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor + inline at::Tensor _convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { + return at::_ops::_convolution::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32); + } + + // aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor + inline at::Tensor _convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) { + return at::_ops::_convolution_deprecated::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); + } + + // aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor + inline at::Tensor _convolution_mode(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_convolution_mode::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _convolution_double_backward(c10::DispatchKeySet dispatchKeySet, const c10::optional & ggI, const c10::optional & ggW, const c10::optional & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::_convolution_double_backward::redispatch(dispatchKeySet, ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask); + } + + // aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor + inline at::Tensor conv1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv1d::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor + inline at::Tensor conv2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv2d::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor + inline at::Tensor conv3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv3d::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding="valid", int[1] dilation=1, int groups=1) -> Tensor + inline at::Tensor conv1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv1d_padding::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding="valid", int[2] dilation=1, int groups=1) -> Tensor + inline at::Tensor conv2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv2d_padding::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding="valid", int[3] dilation=1, int groups=1) -> Tensor + inline at::Tensor conv3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv3d_padding::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, groups); + } + + // aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor + inline at::Tensor conv_tbc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad=0) { + return at::_ops::conv_tbc::redispatch(dispatchKeySet, self, weight, bias, pad); + } + + // aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) + inline ::std::tuple conv_tbc_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) { + return at::_ops::conv_tbc_backward::redispatch(dispatchKeySet, self, input, weight, bias, pad); + } + + // aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor + inline at::Tensor conv_transpose1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) { + return at::_ops::conv_transpose1d::redispatch(dispatchKeySet, input, weight, bias, stride, padding, output_padding, groups, dilation); + } + + // aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor + inline at::Tensor conv_transpose2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) { + return at::_ops::conv_transpose2d_input::redispatch(dispatchKeySet, input, weight, bias, stride, padding, output_padding, groups, dilation); + } + + // aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor + inline at::Tensor conv_transpose3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, int64_t groups=1, at::IntArrayRef dilation=1) { + return at::_ops::conv_transpose3d_input::redispatch(dispatchKeySet, input, weight, bias, stride, padding, output_padding, groups, dilation); + } + + // aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor + inline at::Tensor copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy::redispatch(dispatchKeySet, self, src, non_blocking); + } + + // aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) + inline at::Tensor & copy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy_::redispatch(dispatchKeySet, self, src, non_blocking); + } + + // aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor + inline at::Tensor _copy_from(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, bool non_blocking=false) { + return at::_ops::_copy_from::redispatch(dispatchKeySet, self, dst, non_blocking); + } + + // aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor + inline at::Tensor _copy_from_and_resize(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst) { + return at::_ops::_copy_from_and_resize::redispatch(dispatchKeySet, self, dst); + } + + // aten::cos(Tensor self) -> Tensor + inline at::Tensor cos(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::cos::redispatch(dispatchKeySet, self); + } + + // aten::cos_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & cos_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::cos_::redispatch(dispatchKeySet, self); + } + + // aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cos_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::cos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cos_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::cos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::cosh(Tensor self) -> Tensor + inline at::Tensor cosh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::cosh::redispatch(dispatchKeySet, self); + } + + // aten::cosh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & cosh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::cosh_::redispatch(dispatchKeySet, self); + } + + // aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cosh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::cosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cosh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::cosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor + inline at::Tensor cosine_embedding_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean) { + return at::_ops::cosine_embedding_loss::redispatch(dispatchKeySet, input1, input2, target, margin, reduction); + } + + // aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor + inline at::Tensor count_nonzero(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::count_nonzero_dim_IntList::redispatch(dispatchKeySet, self, dim); + } + + // aten::count_nonzero(Tensor self, int? dim=None) -> Tensor + inline at::Tensor count_nonzero(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim=c10::nullopt) { + return at::_ops::count_nonzero::redispatch(dispatchKeySet, self, dim); + } + + // aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor + inline at::Tensor cov(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t correction=1, const c10::optional & fweights={}, const c10::optional & aweights={}) { + return at::_ops::cov::redispatch(dispatchKeySet, self, correction, fweights, aweights); + } + + // aten::corrcoef(Tensor self) -> Tensor + inline at::Tensor corrcoef(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::corrcoef::redispatch(dispatchKeySet, self); + } + + // aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid + inline at::Tensor cudnn_affine_grid_generator(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) { + return at::_ops::cudnn_affine_grid_generator::redispatch(dispatchKeySet, theta, N, C, H, W); + } + + // aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta + inline at::Tensor cudnn_affine_grid_generator_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) { + return at::_ops::cudnn_affine_grid_generator_backward::redispatch(dispatchKeySet, grad, N, C, H, W); + } + + // aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple cudnn_batch_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::cudnn_batch_norm::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); + } + + // aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor) + inline ::std::tuple cudnn_batch_norm_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const at::Tensor & reserveSpace) { + return at::_ops::cudnn_batch_norm_backward::redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace); + } + + // aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + inline at::Tensor cudnn_convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) { + return at::_ops::cudnn_convolution::redispatch(dispatchKeySet, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); + } + + // aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + inline at::Tensor cudnn_convolution_transpose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) { + return at::_ops::cudnn_convolution_transpose::redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32); + } + + // aten::_mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor + inline at::Tensor _mps_convolution_transpose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution_transpose::redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups); + } + + // aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor) + inline ::std::tuple mps_convolution_transpose_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward::redispatch(dispatchKeySet, self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask); + } + + // aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + inline at::Tensor cudnn_convolution_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::cudnn_convolution_relu::redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups); + } + + // aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + inline at::Tensor cudnn_convolution_add_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::cudnn_convolution_add_relu::redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups); + } + + // aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output + inline at::Tensor cudnn_grid_sampler(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid) { + return at::_ops::cudnn_grid_sampler::redispatch(dispatchKeySet, self, grid); + } + + // aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid) + inline ::std::tuple cudnn_grid_sampler_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) { + return at::_ops::cudnn_grid_sampler_backward::redispatch(dispatchKeySet, self, grid, grad_output); + } + + // aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) + inline ::std::tuple cummax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::cummax::redispatch(dispatchKeySet, self, dim); + } + + // aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim) { + return at::_ops::cummax_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::cummax_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) + inline ::std::tuple cummax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::cummax_dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) { + return at::_ops::cummax_dimname_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::cummax_dimname_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () + inline void _cummax_helper(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) { + return at::_ops::_cummax_helper::redispatch(dispatchKeySet, self, values, indices, dim); + } + + // aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) + inline ::std::tuple cummin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::cummin::redispatch(dispatchKeySet, self, dim); + } + + // aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim) { + return at::_ops::cummin_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::cummin_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) + inline ::std::tuple cummin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::cummin_dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim) { + return at::_ops::cummin_dimname_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple cummin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::cummin_dimname_out::redispatch(dispatchKeySet, self, dim, values, indices); + } + + // aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () + inline void _cummin_helper(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) { + return at::_ops::_cummin_helper::redispatch(dispatchKeySet, self, values, indices, dim); + } + + // aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor + inline at::Tensor cummaxmin_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) { + return at::_ops::cummaxmin_backward::redispatch(dispatchKeySet, grad, input, indices, dim); + } + + // aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor cumprod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumprod::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) + inline at::Tensor & cumprod_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumprod_::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumprod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumprod_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumprod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::cumprod_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor cumprod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumprod_dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) + inline at::Tensor & cumprod_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumprod__dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumprod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumprod_dimname_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumprod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::cumprod_dimname_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor + inline at::Tensor cumprod_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) { + return at::_ops::cumprod_backward::redispatch(dispatchKeySet, grad, input, dim, output); + } + + // aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor cumsum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) + inline at::Tensor & cumsum_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum_::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumsum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumsum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::cumsum_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor cumsum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum_dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) + inline at::Tensor & cumsum_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum__dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumsum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum_dimname_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cumsum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::cumsum_dimname_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + inline at::Tensor cumulative_trapezoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) { + return at::_ops::cumulative_trapezoid_x::redispatch(dispatchKeySet, y, x, dim); + } + + // aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor + inline at::Tensor cumulative_trapezoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Scalar & dx=1, int64_t dim=-1) { + return at::_ops::cumulative_trapezoid_dx::redispatch(dispatchKeySet, y, dx, dim); + } + + // aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor + inline at::Tensor ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, int64_t reduction=at::Reduction::Mean, bool zero_infinity=false) { + return at::_ops::ctc_loss_IntList::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); + } + + // aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor + inline at::Tensor ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, int64_t reduction=at::Reduction::Mean, bool zero_infinity=false) { + return at::_ops::ctc_loss_Tensor::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); + } + + // aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) + inline ::std::tuple _ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false) { + return at::_ops::_ctc_loss::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity); + } + + // aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) + inline ::std::tuple _ctc_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, bool zero_infinity=false) { + return at::_ops::_ctc_loss_Tensor::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity); + } + + // aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor + inline at::Tensor _ctc_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false) { + return at::_ops::_ctc_loss_backward::redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); + } + + // aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor + inline at::Tensor _ctc_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false) { + return at::_ops::_ctc_loss_backward_Tensor::redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); + } + + // aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor + inline at::Tensor diag_embed(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) { + return at::_ops::diag_embed::redispatch(dispatchKeySet, self, offset, dim1, dim2); + } + + // aten::diagflat(Tensor self, int offset=0) -> Tensor + inline at::Tensor diagflat(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset=0) { + return at::_ops::diagflat::redispatch(dispatchKeySet, self, offset); + } + + // aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) + inline at::Tensor diagonal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) { + return at::_ops::diagonal::redispatch(dispatchKeySet, self, offset, dim1, dim2); + } + + // aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a) + inline at::Tensor linalg_diagonal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) { + return at::_ops::linalg_diagonal::redispatch(dispatchKeySet, A, offset, dim1, dim2); + } + + // aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) + inline at::Tensor diagonal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset=0) { + return at::_ops::diagonal_Dimname::redispatch(dispatchKeySet, self, outdim, dim1, dim2, offset); + } + + // aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor + inline at::Tensor diagonal_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(input_sizes), offset, dim1, dim2); + } + + // aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor + inline at::Tensor diagonal_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward::redispatch(dispatchKeySet, grad_output, input_sizes, offset, dim1, dim2); + } + + // aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) + inline at::Tensor & fill_diagonal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & fill_value, bool wrap=false) { + return at::_ops::fill_diagonal_::redispatch(dispatchKeySet, self, fill_value, wrap); + } + + // aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor + inline at::Tensor diff(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n=1, int64_t dim=-1, const c10::optional & prepend={}, const c10::optional & append={}) { + return at::_ops::diff::redispatch(dispatchKeySet, self, n, dim, prepend, append); + } + + // aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diff_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t n=1, int64_t dim=-1, const c10::optional & prepend={}, const c10::optional & append={}) { + return at::_ops::diff_out::redispatch(dispatchKeySet, self, n, dim, prepend, append, out); + } + + // aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diff_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append, at::Tensor & out) { + return at::_ops::diff_out::redispatch(dispatchKeySet, self, n, dim, prepend, append, out); + } + + // aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & spacing=c10::nullopt, c10::optional dim=c10::nullopt, int64_t edge_order=1) { + return at::_ops::gradient_scalarint::redispatch(dispatchKeySet, self, spacing, dim, edge_order); + } + + // aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_scalararray::redispatch(dispatchKeySet, self, spacing, dim, edge_order); + } + + // aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_array::redispatch(dispatchKeySet, self, dim, edge_order); + } + + // aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ArrayRef spacing, c10::optional dim=c10::nullopt, int64_t edge_order=1) { + return at::_ops::gradient_scalarrayint::redispatch(dispatchKeySet, self, spacing, dim, edge_order); + } + + // aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_scalarrayarray::redispatch(dispatchKeySet, self, spacing, dim, edge_order); + } + + // aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList spacing, c10::optional dim=c10::nullopt, int64_t edge_order=1) { + return at::_ops::gradient_tensorarrayint::redispatch(dispatchKeySet, self, spacing, dim, edge_order); + } + + // aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[] + inline ::std::vector gradient(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_tensorarray::redispatch(dispatchKeySet, self, spacing, dim, edge_order); + } + + // aten::div.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor div(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::div_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & div_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::div__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::div_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::div_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor + inline at::Tensor div(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + return at::_ops::div_Tensor_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) + inline at::Tensor & div_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + return at::_ops::div__Tensor_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + return at::_ops::div_out_mode::redispatch(dispatchKeySet, self, other, rounding_mode, out); + } + + // aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode, at::Tensor & out) { + return at::_ops::div_out_mode::redispatch(dispatchKeySet, self, other, rounding_mode, out); + } + + // aten::div.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor div(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::div_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & div_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::div__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor + inline at::Tensor div(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + return at::_ops::div_Scalar_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) + inline at::Tensor & div_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + return at::_ops::div__Scalar_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::divide.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::divide_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::divide__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & divide_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::divide_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & divide_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::divide_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::divide.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::divide_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::divide__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor + inline at::Tensor divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + return at::_ops::divide_Tensor_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) + inline at::Tensor & divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + return at::_ops::divide__Tensor_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & divide_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode) { + return at::_ops::divide_out_mode::redispatch(dispatchKeySet, self, other, rounding_mode, out); + } + + // aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & divide_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional rounding_mode, at::Tensor & out) { + return at::_ops::divide_out_mode::redispatch(dispatchKeySet, self, other, rounding_mode, out); + } + + // aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor + inline at::Tensor divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + return at::_ops::divide_Scalar_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) + inline at::Tensor & divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + return at::_ops::divide__Scalar_mode::redispatch(dispatchKeySet, self, other, rounding_mode); + } + + // aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor true_divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::true_divide_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & true_divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::true_divide__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & true_divide_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::true_divide_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & true_divide_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::true_divide_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor true_divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::true_divide_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & true_divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::true_divide__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::dot(Tensor self, Tensor tensor) -> Tensor + inline at::Tensor dot(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor) { + return at::_ops::dot::redispatch(dispatchKeySet, self, tensor); + } + + // aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dot_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor) { + return at::_ops::dot_out::redispatch(dispatchKeySet, self, tensor, out); + } + + // aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dot_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor, at::Tensor & out) { + return at::_ops::dot_out::redispatch(dispatchKeySet, self, tensor, out); + } + + // aten::vdot(Tensor self, Tensor other) -> Tensor + inline at::Tensor vdot(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::vdot::redispatch(dispatchKeySet, self, other); + } + + // aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & vdot_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::vdot_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & vdot_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::vdot_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor + inline at::Tensor einsum(c10::DispatchKeySet dispatchKeySet, c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path=c10::nullopt) { + return at::_ops::einsum::redispatch(dispatchKeySet, equation, tensors, path); + } + + // aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor + inline at::Tensor embedding(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) { + return at::_ops::embedding::redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse); + } + + // aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor + inline at::Tensor embedding_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) { + return at::_ops::embedding_backward::redispatch(dispatchKeySet, grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse); + } + + // aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor + inline at::Tensor embedding_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) { + return at::_ops::embedding_backward::redispatch(dispatchKeySet, grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse); + } + + // aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor + inline at::Tensor embedding_dense_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward::redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); + } + + // aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor + inline at::Tensor embedding_dense_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward::redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); + } + + // aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!) + inline at::Tensor & embedding_renorm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + return at::_ops::embedding_renorm_::redispatch(dispatchKeySet, self, indices, max_norm, norm_type); + } + + // aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor + inline at::Tensor embedding_sparse_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_sparse_backward::redispatch(dispatchKeySet, grad, indices, num_weights, padding_idx, scale_grad_by_freq); + } + + // aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _embedding_bag_forward_only(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_forward_only::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); + } + + // aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor) + inline ::std::tuple _rowwise_prune(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) { + return at::_ops::_rowwise_prune::redispatch(dispatchKeySet, weight, mask, compressed_indices_dtype); + } + + // aten::row_stack(Tensor[] tensors) -> Tensor + inline at::Tensor row_stack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::row_stack::redispatch(dispatchKeySet, tensors); + } + + // aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & row_stack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::row_stack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & row_stack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::row_stack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple embedding_bag(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false) { + return at::_ops::embedding_bag::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset); + } + + // aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple embedding_bag(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, c10::optional padding_idx) { + return at::_ops::embedding_bag_padding_idx::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); + } + + // aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _embedding_bag(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx); + } + + // aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + inline at::Tensor _embedding_bag_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_backward::redispatch(dispatchKeySet, grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx); + } + + // aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + inline at::Tensor _embedding_bag_sparse_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_sparse_backward::redispatch(dispatchKeySet, grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); + } + + // aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor + inline at::Tensor _embedding_bag_dense_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_dense_backward::redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx); + } + + // aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor + inline at::Tensor _embedding_bag_per_sample_weights_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_per_sample_weights_backward::redispatch(dispatchKeySet, grad, weight, indices, offsets, offset2bag, mode, padding_idx); + } + + // aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_names::redispatch(dispatchKeySet, size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::empty_names::redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory, memory_format); + } + + // aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_memory_format::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::empty_memory_format::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), dtype, layout, device, pin_memory, memory_format); + } + + // aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_memory_format::redispatch(dispatchKeySet, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::empty_memory_format::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory, memory_format); + } + + // aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_empty::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_empty::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), dtype, layout, device, pin_memory); + } + + // aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_empty::redispatch(dispatchKeySet, self, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_empty::redispatch(dispatchKeySet, self, size, dtype, layout, device, pin_memory); + } + + // aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty_strided(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={}) { + return at::_ops::new_empty_strided::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty_strided(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_empty_strided::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), dtype, layout, device, pin_memory); + } + + // aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty_strided_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options={}) { + return at::_ops::new_empty_strided::redispatch(dispatchKeySet, self, size, stride, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_empty_strided_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_empty_strided::redispatch(dispatchKeySet, self, size, stride, dtype, layout, device, pin_memory); + } + + // aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_full(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::new_full::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_full(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_full::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), fill_value, dtype, layout, device, pin_memory); + } + + // aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_full_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::new_full::redispatch(dispatchKeySet, self, size, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_full_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_full::redispatch(dispatchKeySet, self, size, fill_value, dtype, layout, device, pin_memory); + } + + // aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_zeros(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_zeros::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_zeros(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_zeros::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), dtype, layout, device, pin_memory); + } + + // aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_zeros_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_zeros::redispatch(dispatchKeySet, self, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_zeros_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_zeros::redispatch(dispatchKeySet, self, size, dtype, layout, device, pin_memory); + } + + // aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_ones(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_ones::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_ones(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_ones::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), dtype, layout, device, pin_memory); + } + + // aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_ones_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::new_ones::redispatch(dispatchKeySet, self, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor new_ones_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::new_ones::redispatch(dispatchKeySet, self, size, dtype, layout, device, pin_memory); + } + + // aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor + inline at::Tensor _empty_affine_quantized(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized::redispatch(dispatchKeySet, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor + inline at::Tensor _empty_affine_quantized(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format) { + return at::_ops::_empty_affine_quantized::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory, scale, zero_point, memory_format); + } + + // aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor + inline at::Tensor _empty_per_channel_affine_quantized(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options={}, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized::redispatch(dispatchKeySet, size, scales, zero_points, axis, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor + inline at::Tensor _empty_per_channel_affine_quantized(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::_empty_per_channel_affine_quantized::redispatch(dispatchKeySet, size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format); + } + + // aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) + inline const at::Tensor & resize_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), memory_format); + } + + // aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) + inline const at::Tensor & resize__symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_::redispatch(dispatchKeySet, self, size, memory_format); + } + + // aten::_resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!) + inline const at::Tensor & _resize_output_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Device device) { + return at::_ops::_resize_output_::redispatch(dispatchKeySet, self, size, device); + } + + // aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty_quantized(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & qtensor, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_quantized::redispatch(dispatchKeySet, size, qtensor, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty_quantized(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & qtensor, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::empty_quantized::redispatch(dispatchKeySet, size, qtensor, dtype, layout, device, pin_memory, memory_format); + } + + // aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_out::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), memory_format, out); + } + + // aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional memory_format, at::Tensor & out) { + return at::_ops::empty_out::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), memory_format, out); + } + + // aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_out::redispatch(dispatchKeySet, size, memory_format, out); + } + + // aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional memory_format, at::Tensor & out) { + return at::_ops::empty_out::redispatch(dispatchKeySet, size, memory_format, out); + } + + // aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_like::redispatch(dispatchKeySet, self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor empty_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::empty_like::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format); + } + + // aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor empty_strided(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={}) { + return at::_ops::empty_strided::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor empty_strided(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::empty_strided::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), dtype, layout, device, pin_memory); + } + + // aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor empty_strided_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options={}) { + return at::_ops::empty_strided::redispatch(dispatchKeySet, size, stride, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor empty_strided_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::empty_strided::redispatch(dispatchKeySet, size, stride, dtype, layout, device, pin_memory); + } + + // aten::erf(Tensor self) -> Tensor + inline at::Tensor erf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::erf::redispatch(dispatchKeySet, self); + } + + // aten::erf_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & erf_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::erf_::redispatch(dispatchKeySet, self); + } + + // aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & erf_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::erf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & erf_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::erf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::erfc(Tensor self) -> Tensor + inline at::Tensor erfc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::erfc::redispatch(dispatchKeySet, self); + } + + // aten::erfc_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & erfc_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::erfc_::redispatch(dispatchKeySet, self); + } + + // aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & erfc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::erfc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & erfc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::erfc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::exp(Tensor self) -> Tensor + inline at::Tensor exp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::exp::redispatch(dispatchKeySet, self); + } + + // aten::exp_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & exp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::exp_::redispatch(dispatchKeySet, self); + } + + // aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & exp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::exp_out::redispatch(dispatchKeySet, self, out); + } + + // aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & exp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::exp_out::redispatch(dispatchKeySet, self, out); + } + + // aten::exp2(Tensor self) -> Tensor + inline at::Tensor exp2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::exp2::redispatch(dispatchKeySet, self); + } + + // aten::exp2_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & exp2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::exp2_::redispatch(dispatchKeySet, self); + } + + // aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & exp2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::exp2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & exp2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::exp2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::expm1(Tensor self) -> Tensor + inline at::Tensor expm1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::expm1::redispatch(dispatchKeySet, self); + } + + // aten::expm1_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & expm1_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::expm1_::redispatch(dispatchKeySet, self); + } + + // aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & expm1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::expm1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & expm1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::expm1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) + inline at::Tensor expand(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, bool implicit=false) { + return at::_ops::expand::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), implicit); + } + + // aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) + inline at::Tensor expand_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit=false) { + return at::_ops::expand::redispatch(dispatchKeySet, self, size, implicit); + } + + // aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a) + inline at::Tensor expand_as(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::expand_as::redispatch(dispatchKeySet, self, other); + } + + // aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor eye(c10::DispatchKeySet dispatchKeySet, int64_t n, at::TensorOptions options={}) { + return at::_ops::eye::redispatch(dispatchKeySet, n, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor eye(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::eye::redispatch(dispatchKeySet, n, dtype, layout, device, pin_memory); + } + + // aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor eye(c10::DispatchKeySet dispatchKeySet, int64_t n, int64_t m, at::TensorOptions options={}) { + return at::_ops::eye_m::redispatch(dispatchKeySet, n, m, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor eye(c10::DispatchKeySet dispatchKeySet, int64_t n, int64_t m, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::eye_m::redispatch(dispatchKeySet, n, m, dtype, layout, device, pin_memory); + } + + // aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eye_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n) { + return at::_ops::eye_out::redispatch(dispatchKeySet, n, out); + } + + // aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eye_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, at::Tensor & out) { + return at::_ops::eye_out::redispatch(dispatchKeySet, n, out); + } + + // aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eye_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n, int64_t m) { + return at::_ops::eye_m_out::redispatch(dispatchKeySet, n, m, out); + } + + // aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eye_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, int64_t m, at::Tensor & out) { + return at::_ops::eye_m_out::redispatch(dispatchKeySet, n, m, out); + } + + // aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) + inline at::Tensor flatten(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t start_dim=0, int64_t end_dim=-1) { + return at::_ops::flatten_using_ints::redispatch(dispatchKeySet, self, start_dim, end_dim); + } + + // aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a) + inline at::Tensor flatten(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) { + return at::_ops::flatten_named_out_dim::redispatch(dispatchKeySet, self, start_dim, end_dim, out_dim); + } + + // aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a) + inline at::Tensor flatten(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) { + return at::_ops::flatten_using_names::redispatch(dispatchKeySet, self, start_dim, end_dim, out_dim); + } + + // aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) + inline at::Tensor flatten(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) { + return at::_ops::flatten_DimnameList::redispatch(dispatchKeySet, self, dims, out_dim); + } + + // aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a) + inline at::Tensor unflatten(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::IntArrayRef sizes) { + return at::_ops::unflatten_int::redispatch(dispatchKeySet, self, dim, sizes); + } + + // aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) + inline at::Tensor unflatten(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) { + return at::_ops::unflatten_Dimname::redispatch(dispatchKeySet, self, dim, sizes, names); + } + + // aten::fill.Scalar(Tensor self, Scalar value) -> Tensor + inline at::Tensor fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value) { + return at::_ops::fill_Scalar::redispatch(dispatchKeySet, self, value); + } + + // aten::fill.Tensor(Tensor self, Tensor value) -> Tensor + inline at::Tensor fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value) { + return at::_ops::fill_Tensor::redispatch(dispatchKeySet, self, value); + } + + // aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) + inline at::Tensor & fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & value) { + return at::_ops::fill__Scalar::redispatch(dispatchKeySet, self, value); + } + + // aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) + inline at::Tensor & fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & value) { + return at::_ops::fill__Tensor::redispatch(dispatchKeySet, self, value); + } + + // aten::floor(Tensor self) -> Tensor + inline at::Tensor floor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::floor::redispatch(dispatchKeySet, self); + } + + // aten::floor_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & floor_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::floor_::redispatch(dispatchKeySet, self); + } + + // aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & floor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::floor_out::redispatch(dispatchKeySet, self, out); + } + + // aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & floor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::floor_out::redispatch(dispatchKeySet, self, out); + } + + // aten::floor_divide(Tensor self, Tensor other) -> Tensor + inline at::Tensor floor_divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::floor_divide::redispatch(dispatchKeySet, self, other); + } + + // aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & floor_divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::floor_divide__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & floor_divide_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::floor_divide_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & floor_divide_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::floor_divide_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor floor_divide(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::floor_divide_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & floor_divide_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::floor_divide__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::frac(Tensor self) -> Tensor + inline at::Tensor frac(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::frac::redispatch(dispatchKeySet, self); + } + + // aten::frac_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & frac_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::frac_::redispatch(dispatchKeySet, self); + } + + // aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & frac_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::frac_out::redispatch(dispatchKeySet, self, out); + } + + // aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & frac_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::frac_out::redispatch(dispatchKeySet, self, out); + } + + // aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor full(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, at::TensorOptions options={}) { + return at::_ops::full_names::redispatch(dispatchKeySet, size, fill_value, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor full(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::full_names::redispatch(dispatchKeySet, size, fill_value, names, dtype, layout, device, pin_memory); + } + + // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor full(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor full(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::full::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), fill_value, dtype, layout, device, pin_memory); + } + + // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor full_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) { + return at::_ops::full::redispatch(dispatchKeySet, size, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor full_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::full::redispatch(dispatchKeySet, size, fill_value, dtype, layout, device, pin_memory); + } + + // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), fill_value, out); + } + + // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), fill_value, out); + } + + // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::full_out::redispatch(dispatchKeySet, size, fill_value, out); + } + + // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::full_out::redispatch(dispatchKeySet, size, fill_value, out); + } + + // aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor full_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::full_like::redispatch(dispatchKeySet, self, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor full_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::full_like::redispatch(dispatchKeySet, self, fill_value, dtype, layout, device, pin_memory, memory_format); + } + + // aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor from_file(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, c10::optional shared=c10::nullopt, c10::optional size=0, at::TensorOptions options={}) { + return at::_ops::from_file::redispatch(dispatchKeySet, filename, shared, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor from_file(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, c10::optional shared, c10::optional size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::from_file::redispatch(dispatchKeySet, filename, shared, size, dtype, layout, device, pin_memory); + } + + // aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gcd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gcd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::gcd_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::gcd(Tensor self, Tensor other) -> Tensor + inline at::Tensor gcd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd::redispatch(dispatchKeySet, self, other); + } + + // aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & gcd_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd_::redispatch(dispatchKeySet, self, other); + } + + // aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lcm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lcm_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lcm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::lcm_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::lcm(Tensor self, Tensor other) -> Tensor + inline at::Tensor lcm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lcm::redispatch(dispatchKeySet, self, other); + } + + // aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & lcm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::lcm_::redispatch(dispatchKeySet, self, other); + } + + // aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + inline at::Tensor grid_sampler(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners); + } + + // aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + inline at::Tensor grid_sampler_2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_2d::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners); + } + + // aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) + inline ::std::tuple grid_sampler_2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_2d_backward::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); + } + + // aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + inline at::Tensor _grid_sampler_2d_cpu_fallback(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::_grid_sampler_2d_cpu_fallback::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners); + } + + // aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) + inline ::std::tuple _grid_sampler_2d_cpu_fallback_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::_grid_sampler_2d_cpu_fallback_backward::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners); + } + + // aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + inline at::Tensor grid_sampler_3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_3d::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners); + } + + // aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor) + inline ::std::tuple grid_sampler_3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_3d_backward::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask); + } + + // aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hann_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::TensorOptions options={}) { + return at::_ops::hann_window::redispatch(dispatchKeySet, window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hann_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hann_window::redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory); + } + + // aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hann_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::hann_window_periodic::redispatch(dispatchKeySet, window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hann_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hann_window_periodic::redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory); + } + + // aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::TensorOptions options={}) { + return at::_ops::hamming_window::redispatch(dispatchKeySet, window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hamming_window::redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory); + } + + // aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic::redispatch(dispatchKeySet, window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hamming_window_periodic::redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory); + } + + // aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic_alpha::redispatch(dispatchKeySet, window_length, periodic, alpha, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hamming_window_periodic_alpha::redispatch(dispatchKeySet, window_length, periodic, alpha, dtype, layout, device, pin_memory); + } + + // aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options={}) { + return at::_ops::hamming_window_periodic_alpha_beta::redispatch(dispatchKeySet, window_length, periodic, alpha, beta, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor hamming_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::hamming_window_periodic_alpha_beta::redispatch(dispatchKeySet, window_length, periodic, alpha, beta, dtype, layout, device, pin_memory); + } + + // aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor kaiser_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::TensorOptions options={}) { + return at::_ops::kaiser_window::redispatch(dispatchKeySet, window_length, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor kaiser_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::kaiser_window::redispatch(dispatchKeySet, window_length, dtype, layout, device, pin_memory); + } + + // aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor kaiser_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::kaiser_window_periodic::redispatch(dispatchKeySet, window_length, periodic, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor kaiser_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::kaiser_window_periodic::redispatch(dispatchKeySet, window_length, periodic, dtype, layout, device, pin_memory); + } + + // aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor kaiser_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, at::TensorOptions options={}) { + return at::_ops::kaiser_window_beta::redispatch(dispatchKeySet, window_length, periodic, beta, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor kaiser_window(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::kaiser_window_beta::redispatch(dispatchKeySet, window_length, periodic, beta, dtype, layout, device, pin_memory); + } + + // aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor + inline at::Tensor hinge_embedding_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, double margin=1.0, int64_t reduction=at::Reduction::Mean) { + return at::_ops::hinge_embedding_loss::redispatch(dispatchKeySet, self, target, margin, reduction); + } + + // aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor + inline at::Tensor group_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t num_groups, const c10::optional & weight={}, const c10::optional & bias={}, double eps=1e-05, bool cudnn_enabled=true) { + return at::_ops::group_norm::redispatch(dispatchKeySet, input, num_groups, weight, bias, eps, cudnn_enabled); + } + + // aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_group_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) { + return at::_ops::native_group_norm::redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps); + } + + // aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_group_norm_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) { + return at::_ops::native_group_norm::redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps); + } + + // aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_group_norm_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array output_mask) { + return at::_ops::native_group_norm_backward::redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask); + } + + // aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_group_norm_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask) { + return at::_ops::native_group_norm_backward::redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask); + } + + // aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor + inline at::Tensor _fft_r2c(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) { + return at::_ops::_fft_r2c::redispatch(dispatchKeySet, self, dim, normalization, onesided); + } + + // aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_r2c_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) { + return at::_ops::_fft_r2c_out::redispatch(dispatchKeySet, self, dim, normalization, onesided, out); + } + + // aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_r2c_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out) { + return at::_ops::_fft_r2c_out::redispatch(dispatchKeySet, self, dim, normalization, onesided, out); + } + + // aten::_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor + inline at::Tensor _fft_c2r(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) { + return at::_ops::_fft_c2r::redispatch(dispatchKeySet, self, dim, normalization, last_dim_size); + } + + // aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_c2r_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size) { + return at::_ops::_fft_c2r_out::redispatch(dispatchKeySet, self, dim, normalization, last_dim_size, out); + } + + // aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_c2r_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, int64_t last_dim_size, at::Tensor & out) { + return at::_ops::_fft_c2r_out::redispatch(dispatchKeySet, self, dim, normalization, last_dim_size, out); + } + + // aten::_fft_c2c(Tensor self, int[] dim, int normalization, bool forward) -> Tensor + inline at::Tensor _fft_c2c(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c::redispatch(dispatchKeySet, self, dim, normalization, forward); + } + + // aten::_fft_c2c.out(Tensor self, int[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_c2c_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward) { + return at::_ops::_fft_c2c_out::redispatch(dispatchKeySet, self, dim, normalization, forward, out); + } + + // aten::_fft_c2c.out(Tensor self, int[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fft_c2c_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out) { + return at::_ops::_fft_c2c_out::redispatch(dispatchKeySet, self, dim, normalization, forward, out); + } + + // aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> () + inline void _validate_compressed_sparse_indices(c10::DispatchKeySet dispatchKeySet, bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) { + return at::_ops::_validate_compressed_sparse_indices::redispatch(dispatchKeySet, is_crow, compressed_idx, plain_idx, cdim, dim, nnz); + } + + // aten::_cufft_get_plan_cache_size(int device_index) -> int + inline int64_t _cufft_get_plan_cache_size(c10::DispatchKeySet dispatchKeySet, int64_t device_index) { + return at::_ops::_cufft_get_plan_cache_size::redispatch(dispatchKeySet, device_index); + } + + // aten::_cufft_get_plan_cache_max_size(int device_index) -> int + inline int64_t _cufft_get_plan_cache_max_size(c10::DispatchKeySet dispatchKeySet, int64_t device_index) { + return at::_ops::_cufft_get_plan_cache_max_size::redispatch(dispatchKeySet, device_index); + } + + // aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> () + inline void _cufft_set_plan_cache_max_size(c10::DispatchKeySet dispatchKeySet, int64_t device_index, int64_t max_size) { + return at::_ops::_cufft_set_plan_cache_max_size::redispatch(dispatchKeySet, device_index, max_size); + } + + // aten::_cufft_clear_plan_cache(int device_index) -> () + inline void _cufft_clear_plan_cache(c10::DispatchKeySet dispatchKeySet, int64_t device_index) { + return at::_ops::_cufft_clear_plan_cache::redispatch(dispatchKeySet, device_index); + } + + // aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor + inline at::Tensor index(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices) { + return at::_ops::index_Tensor::redispatch(dispatchKeySet, self, indices); + } + + // aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::List> & indices) { + return at::_ops::index_Tensor_out::redispatch(dispatchKeySet, self, indices, out); + } + + // aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, at::Tensor & out) { + return at::_ops::index_Tensor_out::redispatch(dispatchKeySet, self, indices, out); + } + + // aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy_out::redispatch(dispatchKeySet, self, dim, index, source, out); + } + + // aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, at::Tensor & out) { + return at::_ops::index_copy_out::redispatch(dispatchKeySet, self, dim, index, source, out); + } + + // aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) + inline at::Tensor & index_copy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy_::redispatch(dispatchKeySet, self, dim, index, source); + } + + // aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor + inline at::Tensor index_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy::redispatch(dispatchKeySet, self, dim, index, source); + } + + // aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!) + inline at::Tensor & index_copy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy__dimname::redispatch(dispatchKeySet, self, dim, index, source); + } + + // aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor + inline at::Tensor index_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) { + return at::_ops::index_copy_dimname::redispatch(dispatchKeySet, self, dim, index, source); + } + + // aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) + inline at::Tensor & index_put_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::index_put_::redispatch(dispatchKeySet, self, indices, values, accumulate); + } + + // aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor + inline at::Tensor index_put(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::index_put::redispatch(dispatchKeySet, self, indices, values, accumulate); + } + + // aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!) + inline at::Tensor & _index_put_impl_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) { + return at::_ops::_index_put_impl_::redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe); + } + + // aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor + inline at::Tensor instance_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { + return at::_ops::instance_norm::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); + } + + // aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor + inline at::Tensor isclose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) { + return at::_ops::isclose::redispatch(dispatchKeySet, self, other, rtol, atol, equal_nan); + } + + // aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Tensor_out::redispatch(dispatchKeySet, elements, test_elements, assume_unique, invert, out); + } + + // aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Tensor_Tensor_out::redispatch(dispatchKeySet, elements, test_elements, assume_unique, invert, out); + } + + // aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor + inline at::Tensor isin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Tensor::redispatch(dispatchKeySet, elements, test_elements, assume_unique, invert); + } + + // aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Scalar_out::redispatch(dispatchKeySet, elements, test_element, assume_unique, invert, out); + } + + // aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Tensor_Scalar_out::redispatch(dispatchKeySet, elements, test_element, assume_unique, invert, out); + } + + // aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor + inline at::Tensor isin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Tensor_Scalar::redispatch(dispatchKeySet, elements, test_element, assume_unique, invert); + } + + // aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Scalar_Tensor_out::redispatch(dispatchKeySet, element, test_elements, assume_unique, invert, out); + } + + // aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isin_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert, at::Tensor & out) { + return at::_ops::isin_Scalar_Tensor_out::redispatch(dispatchKeySet, element, test_elements, assume_unique, invert, out); + } + + // aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor + inline at::Tensor isin(c10::DispatchKeySet dispatchKeySet, const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique=false, bool invert=false) { + return at::_ops::isin_Scalar_Tensor::redispatch(dispatchKeySet, element, test_elements, assume_unique, invert); + } + + // aten::isnan(Tensor self) -> Tensor + inline at::Tensor isnan(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::isnan::redispatch(dispatchKeySet, self); + } + + // aten::is_distributed(Tensor self) -> bool + inline bool is_distributed(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_distributed::redispatch(dispatchKeySet, self); + } + + // aten::is_floating_point(Tensor self) -> bool + inline bool __dispatch_is_floating_point(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_floating_point::redispatch(dispatchKeySet, self); + } + + // aten::is_complex(Tensor self) -> bool + inline bool __dispatch_is_complex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_complex::redispatch(dispatchKeySet, self); + } + + // aten::is_conj(Tensor self) -> bool + inline bool __dispatch_is_conj(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_conj::redispatch(dispatchKeySet, self); + } + + // aten::_is_zerotensor(Tensor self) -> bool + inline bool __dispatch__is_zerotensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_is_zerotensor::redispatch(dispatchKeySet, self); + } + + // aten::is_neg(Tensor self) -> bool + inline bool __dispatch_is_neg(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_neg::redispatch(dispatchKeySet, self); + } + + // aten::isreal(Tensor self) -> Tensor + inline at::Tensor isreal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::isreal::redispatch(dispatchKeySet, self); + } + + // aten::is_nonzero(Tensor self) -> bool + inline bool is_nonzero(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_nonzero::redispatch(dispatchKeySet, self); + } + + // aten::is_same_size(Tensor self, Tensor other) -> bool + inline bool is_same_size(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::is_same_size::redispatch(dispatchKeySet, self, other); + } + + // aten::is_signed(Tensor self) -> bool + inline bool __dispatch_is_signed(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_signed::redispatch(dispatchKeySet, self); + } + + // aten::is_inference(Tensor self) -> bool + inline bool __dispatch_is_inference(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_inference::redispatch(dispatchKeySet, self); + } + + // aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor + inline at::Tensor kl_div(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, bool log_target=false) { + return at::_ops::kl_div::redispatch(dispatchKeySet, self, target, reduction, log_target); + } + + // aten::kron(Tensor self, Tensor other) -> Tensor + inline at::Tensor kron(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::kron::redispatch(dispatchKeySet, self, other); + } + + // aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kron_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::kron_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kron_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::kron_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple kthvalue(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue::redispatch(dispatchKeySet, self, k, dim, keepdim); + } + + // aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple kthvalue_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false) { + return at::_ops::kthvalue_values::redispatch(dispatchKeySet, self, k, dim, keepdim, values, indices); + } + + // aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple kthvalue_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_values::redispatch(dispatchKeySet, self, k, dim, keepdim, values, indices); + } + + // aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple kthvalue(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname::redispatch(dispatchKeySet, self, k, dim, keepdim); + } + + // aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple kthvalue_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false) { + return at::_ops::kthvalue_dimname_out::redispatch(dispatchKeySet, self, k, dim, keepdim, values, indices); + } + + // aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple kthvalue_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::kthvalue_dimname_out::redispatch(dispatchKeySet, self, k, dim, keepdim, values, indices); + } + + // aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor + inline at::Tensor layer_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight={}, const c10::optional & bias={}, double eps=1e-05, bool cudnn_enable=true) { + return at::_ops::layer_norm::redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps, cudnn_enable); + } + + // aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_layer_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps) { + return at::_ops::native_layer_norm::redispatch(dispatchKeySet, input, c10::fromIntArrayRef(normalized_shape), weight, bias, eps); + } + + // aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_layer_norm_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps) { + return at::_ops::native_layer_norm::redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps); + } + + // aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_layer_norm_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward::redispatch(dispatchKeySet, grad_out, input, c10::fromIntArrayRef(normalized_shape), mean, rstd, weight, bias, output_mask); + } + + // aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_layer_norm_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward::redispatch(dispatchKeySet, grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask); + } + + // aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor + inline at::Tensor nan_to_num(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt) { + return at::_ops::nan_to_num::redispatch(dispatchKeySet, self, nan, posinf, neginf); + } + + // aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) + inline at::Tensor & nan_to_num_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt) { + return at::_ops::nan_to_num_::redispatch(dispatchKeySet, self, nan, posinf, neginf); + } + + // aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nan_to_num_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt) { + return at::_ops::nan_to_num_out::redispatch(dispatchKeySet, self, nan, posinf, neginf, out); + } + + // aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nan_to_num_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf, at::Tensor & out) { + return at::_ops::nan_to_num_out::redispatch(dispatchKeySet, self, nan, posinf, neginf, out); + } + + // aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor + inline at::Tensor linear(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}) { + return at::_ops::linear::redispatch(dispatchKeySet, input, weight, bias); + } + + // aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple linear_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + return at::_ops::linear_backward::redispatch(dispatchKeySet, self, grad_output, weight, output_mask); + } + + // aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linear_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}) { + return at::_ops::linear_out::redispatch(dispatchKeySet, input, weight, bias, out); + } + + // aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linear_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::Tensor & out) { + return at::_ops::linear_out::redispatch(dispatchKeySet, input, weight, bias, out); + } + + // aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor + inline at::Tensor mkldnn_linear(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias={}) { + return at::_ops::mkldnn_linear::redispatch(dispatchKeySet, self, weight, bias); + } + + // aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor + inline at::Tensor mkldnn_linear_backward_input(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) { + return at::_ops::mkldnn_linear_backward_input::redispatch(dispatchKeySet, input_size, grad_output, weight); + } + + // aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor) + inline ::std::tuple mkldnn_linear_backward_weights(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) { + return at::_ops::mkldnn_linear_backward_weights::redispatch(dispatchKeySet, grad_output, input, weight, bias_defined); + } + + // aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple mkldnn_linear_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + return at::_ops::mkldnn_linear_backward::redispatch(dispatchKeySet, self, grad_output, weight, output_mask); + } + + // aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor + inline at::Tensor fbgemm_linear_int8_weight_fp32_activation(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { + return at::_ops::fbgemm_linear_int8_weight_fp32_activation::redispatch(dispatchKeySet, input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); + } + + // aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor + inline at::Tensor fbgemm_linear_int8_weight(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) { + return at::_ops::fbgemm_linear_int8_weight::redispatch(dispatchKeySet, input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); + } + + // aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int) + inline ::std::tuple fbgemm_linear_quantize_weight(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) { + return at::_ops::fbgemm_linear_quantize_weight::redispatch(dispatchKeySet, input); + } + + // aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor + inline at::Tensor fbgemm_pack_gemm_matrix_fp16(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) { + return at::_ops::fbgemm_pack_gemm_matrix_fp16::redispatch(dispatchKeySet, input); + } + + // aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor + inline at::Tensor fbgemm_linear_fp16_weight_fp32_activation(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) { + return at::_ops::fbgemm_linear_fp16_weight_fp32_activation::redispatch(dispatchKeySet, input, packed_weight, bias); + } + + // aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor + inline at::Tensor fbgemm_linear_fp16_weight(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) { + return at::_ops::fbgemm_linear_fp16_weight::redispatch(dispatchKeySet, input, packed_weight, bias); + } + + // aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor + inline at::Tensor fbgemm_pack_quantized_matrix(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input) { + return at::_ops::fbgemm_pack_quantized_matrix::redispatch(dispatchKeySet, input); + } + + // aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor + inline at::Tensor fbgemm_pack_quantized_matrix(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t K, int64_t N) { + return at::_ops::fbgemm_pack_quantized_matrix_KN::redispatch(dispatchKeySet, input, K, N); + } + + // aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor ldexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ldexp_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & ldexp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::ldexp_::redispatch(dispatchKeySet, self, other); + } + + // aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ldexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ldexp_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ldexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::ldexp_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor linspace(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}) { + return at::_ops::linspace::redispatch(dispatchKeySet, start, end, steps, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor linspace(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::linspace::redispatch(dispatchKeySet, start, end, steps, dtype, layout, device, pin_memory); + } + + // aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linspace_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps) { + return at::_ops::linspace_out::redispatch(dispatchKeySet, start, end, steps, out); + } + + // aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linspace_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) { + return at::_ops::linspace_out::redispatch(dispatchKeySet, start, end, steps, out); + } + + // aten::log(Tensor self) -> Tensor + inline at::Tensor log(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::log::redispatch(dispatchKeySet, self); + } + + // aten::log_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & log_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::log_::redispatch(dispatchKeySet, self); + } + + // aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::log_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::log_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log10(Tensor self) -> Tensor + inline at::Tensor log10(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::log10::redispatch(dispatchKeySet, self); + } + + // aten::log10_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & log10_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::log10_::redispatch(dispatchKeySet, self); + } + + // aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log10_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::log10_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log10_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::log10_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log1p(Tensor self) -> Tensor + inline at::Tensor log1p(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::log1p::redispatch(dispatchKeySet, self); + } + + // aten::log1p_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & log1p_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::log1p_::redispatch(dispatchKeySet, self); + } + + // aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log1p_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::log1p_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log1p_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::log1p_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log2(Tensor self) -> Tensor + inline at::Tensor log2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::log2::redispatch(dispatchKeySet, self); + } + + // aten::log2_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & log2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::log2_::redispatch(dispatchKeySet, self); + } + + // aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::log2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::log2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logaddexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logaddexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logaddexp_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logaddexp(Tensor self, Tensor other) -> Tensor + inline at::Tensor logaddexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp::redispatch(dispatchKeySet, self, other); + } + + // aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logaddexp2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp2_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logaddexp2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::logaddexp2_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logaddexp2(Tensor self, Tensor other) -> Tensor + inline at::Tensor logaddexp2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::logaddexp2::redispatch(dispatchKeySet, self, other); + } + + // aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor xlogy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::xlogy_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor + inline at::Tensor xlogy(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::xlogy_Scalar_Self::redispatch(dispatchKeySet, self, other); + } + + // aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor + inline at::Tensor xlogy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::xlogy_Scalar_Other::redispatch(dispatchKeySet, self, other); + } + + // aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & xlogy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::xlogy__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & xlogy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::xlogy__Scalar_Other::redispatch(dispatchKeySet, self, other); + } + + // aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & xlogy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::xlogy_OutTensor::redispatch(dispatchKeySet, self, other, out); + } + + // aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & xlogy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::xlogy_OutTensor::redispatch(dispatchKeySet, self, other, out); + } + + // aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & xlogy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::xlogy_OutScalar_Self::redispatch(dispatchKeySet, self, other, out); + } + + // aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & xlogy_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::xlogy_OutScalar_Self::redispatch(dispatchKeySet, self, other, out); + } + + // aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & xlogy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::xlogy_OutScalar_Other::redispatch(dispatchKeySet, self, other, out); + } + + // aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & xlogy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::xlogy_OutScalar_Other::redispatch(dispatchKeySet, self, other, out); + } + + // aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor logspace(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0, at::TensorOptions options={}) { + return at::_ops::logspace::redispatch(dispatchKeySet, start, end, steps, base, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor logspace(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::logspace::redispatch(dispatchKeySet, start, end, steps, base, dtype, layout, device, pin_memory); + } + + // aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logspace_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base=10.0) { + return at::_ops::logspace_out::redispatch(dispatchKeySet, start, end, steps, base, out); + } + + // aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logspace_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, int64_t steps, double base, at::Tensor & out) { + return at::_ops::logspace_out::redispatch(dispatchKeySet, start, end, steps, base, out); + } + + // aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + inline at::Tensor log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::log_softmax_int::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::log_softmax_int_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::log_softmax_int_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::log_softmax_Dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + inline at::Tensor _log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_log_softmax::redispatch(dispatchKeySet, self, dim, half_to_float); + } + + // aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _log_softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_log_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _log_softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { + return at::_ops::_log_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor + inline at::Tensor _log_softmax_backward_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + return at::_ops::_log_softmax_backward_data::redispatch(dispatchKeySet, grad_output, output, dim, input_dtype); + } + + // aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _log_softmax_backward_data_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + return at::_ops::_log_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, input_dtype, out); + } + + // aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _log_softmax_backward_data_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) { + return at::_ops::_log_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, input_dtype, out); + } + + // aten::_logcumsumexp(Tensor self, int dim) -> Tensor + inline at::Tensor _logcumsumexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::_logcumsumexp::redispatch(dispatchKeySet, self, dim); + } + + // aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _logcumsumexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::_logcumsumexp_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _logcumsumexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::_logcumsumexp_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::logcumsumexp(Tensor self, int dim) -> Tensor + inline at::Tensor logcumsumexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::logcumsumexp::redispatch(dispatchKeySet, self, dim); + } + + // aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logcumsumexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::logcumsumexp_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logcumsumexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::logcumsumexp_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor + inline at::Tensor logcumsumexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::logcumsumexp_dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logcumsumexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim) { + return at::_ops::logcumsumexp_dimname_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logcumsumexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, at::Tensor & out) { + return at::_ops::logcumsumexp_dimname_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + inline at::Tensor logsumexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::logsumexp::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logsumexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::logsumexp_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logsumexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::logsumexp_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor + inline at::Tensor logsumexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim=false) { + return at::_ops::logsumexp_names::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logsumexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false) { + return at::_ops::logsumexp_names_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logsumexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out) { + return at::_ops::logsumexp_names_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor + inline at::Tensor margin_ranking_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean) { + return at::_ops::margin_ranking_loss::redispatch(dispatchKeySet, input1, input2, target, margin, reduction); + } + + // aten::matmul(Tensor self, Tensor other) -> Tensor + inline at::Tensor matmul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::matmul::redispatch(dispatchKeySet, self, other); + } + + // aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor) + inline ::std::tuple matmul_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask) { + return at::_ops::matmul_backward::redispatch(dispatchKeySet, grad, self, other, mask); + } + + // aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & matmul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::matmul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & matmul_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::matmul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::matrix_power(Tensor self, int n) -> Tensor + inline at::Tensor matrix_power(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n) { + return at::_ops::matrix_power::redispatch(dispatchKeySet, self, n); + } + + // aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & matrix_power_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t n) { + return at::_ops::matrix_power_out::redispatch(dispatchKeySet, self, n, out); + } + + // aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & matrix_power_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out) { + return at::_ops::matrix_power_out::redispatch(dispatchKeySet, self, n, out); + } + + // aten::matrix_exp(Tensor self) -> Tensor + inline at::Tensor matrix_exp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::matrix_exp::redispatch(dispatchKeySet, self); + } + + // aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor + inline at::Tensor matrix_exp_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad) { + return at::_ops::matrix_exp_backward::redispatch(dispatchKeySet, self, grad); + } + + // aten::_aminmax(Tensor self) -> (Tensor, Tensor) + inline ::std::tuple _aminmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_aminmax::redispatch(dispatchKeySet, self); + } + + // aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple _aminmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::_aminmax_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) + inline ::std::tuple aminmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::aminmax::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) + inline ::std::tuple aminmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & min, at::Tensor & max, const at::Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false) { + return at::_ops::aminmax_out::redispatch(dispatchKeySet, self, dim, keepdim, min, max); + } + + // aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max) + inline ::std::tuple aminmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim, bool keepdim, at::Tensor & min, at::Tensor & max) { + return at::_ops::aminmax_out::redispatch(dispatchKeySet, self, dim, keepdim, min, max); + } + + // aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor + inline at::Tensor _compute_linear_combination(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & coefficients) { + return at::_ops::_compute_linear_combination::redispatch(dispatchKeySet, input, coefficients); + } + + // aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _compute_linear_combination_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & coefficients) { + return at::_ops::_compute_linear_combination_out::redispatch(dispatchKeySet, input, coefficients, out); + } + + // aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _compute_linear_combination_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out) { + return at::_ops::_compute_linear_combination_out::redispatch(dispatchKeySet, input, coefficients, out); + } + + // aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::max_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple max_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::max_dim_max::redispatch(dispatchKeySet, self, dim, keepdim, max, max_values); + } + + // aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple max_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) { + return at::_ops::max_dim_max::redispatch(dispatchKeySet, self, dim, keepdim, max, max_values); + } + + // aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::max_names_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple max_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & max, at::Tensor & max_values, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::max_names_dim_max::redispatch(dispatchKeySet, self, dim, keepdim, max, max_values); + } + + // aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple max_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & max, at::Tensor & max_values) { + return at::_ops::max_names_dim_max::redispatch(dispatchKeySet, self, dim, keepdim, max, max_values); + } + + // aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, int[] sizes, bool keepdim) -> Tensor + inline at::Tensor value_selecting_reduction_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::redispatch(dispatchKeySet, grad, dim, indices, sizes, keepdim); + } + + // aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor + inline at::Tensor amax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false) { + return at::_ops::amax::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & amax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false) { + return at::_ops::amax_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & amax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::amax_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + inline ::std::tuple max_pool1d_with_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool1d_with_indices::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor max_pool1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool1d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor max_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::_mps_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor _mps_max_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::_mps_max_pool2d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::mps_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor mps_max_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mps_max_pool2d_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor mkldnn_max_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool2d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor mkldnn_max_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool2d_backward::redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor mkldnn_max_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool3d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor mkldnn_max_pool3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool3d_backward::redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor quantized_max_pool1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool1d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor quantized_max_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool2d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + inline at::Tensor max_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool3d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::mean::redispatch(dispatchKeySet, self, dtype); + } + + // aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::mean_dim::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mean_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::mean_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mean_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::mean_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::mean_names_dim::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mean_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::mean_names_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mean_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::mean_names_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor nanmean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nanmean::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanmean_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nanmean_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanmean_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::nanmean_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::median(Tensor self) -> Tensor + inline at::Tensor median(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::median::redispatch(dispatchKeySet, self); + } + + // aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple median(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::median_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple median_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::median_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple median_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::median_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple median(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::median_names_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple median_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::median_names_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple median_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::median_names_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::nanmedian(Tensor self) -> Tensor + inline at::Tensor nanmedian(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::nanmedian::redispatch(dispatchKeySet, self); + } + + // aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple nanmedian(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::nanmedian_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple nanmedian_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::nanmedian_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple nanmedian_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::nanmedian_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple nanmedian(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::nanmedian_names_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple nanmedian_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::nanmedian_names_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple nanmedian_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::nanmedian_names_dim_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::min_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple min_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::min_dim_min::redispatch(dispatchKeySet, self, dim, keepdim, min, min_indices); + } + + // aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple min_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) { + return at::_ops::min_dim_min::redispatch(dispatchKeySet, self, dim, keepdim, min, min_indices); + } + + // aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::min_names_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple min_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & min, at::Tensor & min_indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::min_names_dim_min::redispatch(dispatchKeySet, self, dim, keepdim, min, min_indices); + } + + // aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple min_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & min, at::Tensor & min_indices) { + return at::_ops::min_names_dim_min::redispatch(dispatchKeySet, self, dim, keepdim, min, min_indices); + } + + // aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor + inline at::Tensor amin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false) { + return at::_ops::amin::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & amin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false) { + return at::_ops::amin_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & amin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::amin_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor + inline at::Tensor _mps_convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups); + } + + // aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple mps_convolution_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward::redispatch(dispatchKeySet, self, grad_output, weight, padding, stride, dilation, groups, output_mask); + } + + // aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor + inline at::Tensor mkldnn_convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::mkldnn_convolution::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups); + } + + // aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) + inline ::std::tuple miopen_batch_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::miopen_batch_norm::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); + } + + // aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) + inline ::std::tuple miopen_batch_norm_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon) { + return at::_ops::miopen_batch_norm_backward::redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon); + } + + // aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + inline at::Tensor miopen_convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); + } + + // aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + inline at::Tensor miopen_convolution_transpose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_transpose::redispatch(dispatchKeySet, self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic); + } + + // aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + inline at::Tensor miopen_depthwise_convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); + } + + // aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + inline at::Tensor miopen_convolution_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::miopen_convolution_relu::redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups); + } + + // aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor + inline at::Tensor miopen_convolution_add_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::miopen_convolution_add_relu::redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups); + } + + // aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple miopen_rnn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state) { + return at::_ops::miopen_rnn::redispatch(dispatchKeySet, input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); + } + + // aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) + inline ::std::tuple> miopen_rnn_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::miopen_rnn_backward::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); + } + + // aten::mm(Tensor self, Tensor mat2) -> Tensor + inline at::Tensor mm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::mm::redispatch(dispatchKeySet, self, mat2); + } + + // aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::mm_out::redispatch(dispatchKeySet, self, mat2, out); + } + + // aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::mm_out::redispatch(dispatchKeySet, self, mat2, out); + } + + // aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor + inline at::Tensor _sparse_mm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sparse, const at::Tensor & dense) { + return at::_ops::_sparse_mm::redispatch(dispatchKeySet, sparse, dense); + } + + // aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor + inline at::Tensor _sparse_sparse_matmul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_sparse_sparse_matmul::redispatch(dispatchKeySet, self, other); + } + + // aten::_sparse_mask_helper(Tensor t, Tensor mask_indices) -> Tensor + inline at::Tensor _sparse_mask_helper(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask_indices) { + return at::_ops::_sparse_mask_helper::redispatch(dispatchKeySet, t, mask_indices); + } + + // aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple mode(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=-1, bool keepdim=false) { + return at::_ops::mode::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple mode_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim=-1, bool keepdim=false) { + return at::_ops::mode_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple mode_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::mode_values::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + inline ::std::tuple mode(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::mode_dimname::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple mode_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool keepdim=false) { + return at::_ops::mode_dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple mode_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices) { + return at::_ops::mode_dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, values, indices); + } + + // aten::mul.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor mul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::mul_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & mul_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::mul__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::mul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mul_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::mul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::mul.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor mul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::mul_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & mul_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::mul__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor multiply(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::multiply_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & multiply_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::multiply__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multiply_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::multiply_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multiply_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::multiply_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor multiply(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::multiply_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & multiply_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::multiply__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::mv(Tensor self, Tensor vec) -> Tensor + inline at::Tensor mv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec) { + return at::_ops::mv::redispatch(dispatchKeySet, self, vec); + } + + // aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & vec) { + return at::_ops::mv_out::redispatch(dispatchKeySet, self, vec, out); + } + + // aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec, at::Tensor & out) { + return at::_ops::mv_out::redispatch(dispatchKeySet, self, vec, out); + } + + // aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mvlgamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t p) { + return at::_ops::mvlgamma_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mvlgamma_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out) { + return at::_ops::mvlgamma_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::mvlgamma(Tensor self, int p) -> Tensor + inline at::Tensor mvlgamma(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p) { + return at::_ops::mvlgamma::redispatch(dispatchKeySet, self, p); + } + + // aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) + inline at::Tensor & mvlgamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t p) { + return at::_ops::mvlgamma_::redispatch(dispatchKeySet, self, p); + } + + // aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor + inline at::Tensor narrow_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, int64_t start, int64_t length) { + return at::_ops::narrow_copy::redispatch(dispatchKeySet, self, dim, start, length); + } + + // aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor + inline at::Tensor narrow_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { + return at::_ops::narrow_copy::redispatch(dispatchKeySet, self, dim, start, length); + } + + // aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & narrow_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t start, int64_t length) { + return at::_ops::narrow_copy_out::redispatch(dispatchKeySet, self, dim, start, length, out); + } + + // aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & narrow_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, int64_t start, int64_t length, at::Tensor & out) { + return at::_ops::narrow_copy_out::redispatch(dispatchKeySet, self, dim, start, length, out); + } + + // aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & narrow_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) { + return at::_ops::narrow_copy_out::redispatch(dispatchKeySet, self, dim, start, length, out); + } + + // aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & narrow_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, at::Tensor & out) { + return at::_ops::narrow_copy_out::redispatch(dispatchKeySet, self, dim, start, length, out); + } + + // aten::narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a) + inline at::Tensor narrow(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, int64_t start, int64_t length) { + return at::_ops::narrow::redispatch(dispatchKeySet, self, dim, start, length); + } + + // aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, int length) -> Tensor(a) + inline at::Tensor narrow(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & start, int64_t length) { + return at::_ops::narrow_Tensor::redispatch(dispatchKeySet, self, dim, start, length); + } + + // aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_batch_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps) { + return at::_ops::native_batch_norm::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps); + } + + // aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_batch_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps) { + return at::_ops::native_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd); + } + + // aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_batch_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, at::Tensor & out, at::Tensor & save_mean, at::Tensor & save_invstd) { + return at::_ops::native_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd); + } + + // aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) + inline ::std::tuple batch_norm_stats(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double eps) { + return at::_ops::batch_norm_stats::redispatch(dispatchKeySet, input, eps); + } + + // aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor + inline at::Tensor batch_norm_elemt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) { + return at::_ops::batch_norm_elemt::redispatch(dispatchKeySet, input, weight, bias, mean, invstd, eps); + } + + // aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & batch_norm_elemt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) { + return at::_ops::batch_norm_elemt_out::redispatch(dispatchKeySet, input, weight, bias, mean, invstd, eps, out); + } + + // aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & batch_norm_elemt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps, at::Tensor & out) { + return at::_ops::batch_norm_elemt_out::redispatch(dispatchKeySet, input, weight, bias, mean, invstd, eps, out); + } + + // aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor) + inline ::std::tuple batch_norm_gather_stats(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count) { + return at::_ops::batch_norm_gather_stats::redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, count); + } + + // aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor) + inline ::std::tuple batch_norm_gather_stats_with_counts(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const at::Tensor & counts) { + return at::_ops::batch_norm_gather_stats_with_counts::redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, counts); + } + + // aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + inline ::std::tuple native_batch_norm_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask) { + return at::_ops::native_batch_norm_backward::redispatch(dispatchKeySet, grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); + } + + // aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple batch_norm_backward_reduce(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g) { + return at::_ops::batch_norm_backward_reduce::redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g); + } + + // aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor + inline at::Tensor batch_norm_backward_elemt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count) { + return at::_ops::batch_norm_backward_elemt::redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count); + } + + // aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor) + inline ::std::tuple batch_norm_update_stats(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum) { + return at::_ops::batch_norm_update_stats::redispatch(dispatchKeySet, input, running_mean, running_var, momentum); + } + + // aten::is_vulkan_available() -> bool + inline bool is_vulkan_available(c10::DispatchKeySet dispatchKeySet) { + return at::_ops::is_vulkan_available::redispatch(dispatchKeySet); + } + + // aten::_nnpack_available() -> bool + inline bool _nnpack_available(c10::DispatchKeySet dispatchKeySet) { + return at::_ops::_nnpack_available::redispatch(dispatchKeySet); + } + + // aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1) -> Tensor + inline at::Tensor _nnpack_spatial_convolution(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) { + return at::_ops::_nnpack_spatial_convolution::redispatch(dispatchKeySet, input, weight, bias, padding, stride); + } + + // aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor ones(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::ones_names::redispatch(dispatchKeySet, size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor ones(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::ones_names::redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory); + } + + // aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor ones(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::ones::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor ones(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::ones::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), dtype, layout, device, pin_memory); + } + + // aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor ones_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::ones::redispatch(dispatchKeySet, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor ones_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::ones::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory); + } + + // aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size) { + return at::_ops::ones_out::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), out); + } + + // aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::ones_out::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), out); + } + + // aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::ones_out::redispatch(dispatchKeySet, size, out); + } + + // aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::ones_out::redispatch(dispatchKeySet, size, out); + } + + // aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor ones_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::ones_like::redispatch(dispatchKeySet, self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor ones_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::ones_like::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format); + } + + // aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor + inline at::Tensor pairwise_distance(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p=2, double eps=1e-06, bool keepdim=false) { + return at::_ops::pairwise_distance::redispatch(dispatchKeySet, x1, x2, p, eps, keepdim); + } + + // aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor + inline at::Tensor cdist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p=2, c10::optional compute_mode=c10::nullopt) { + return at::_ops::cdist::redispatch(dispatchKeySet, x1, x2, p, compute_mode); + } + + // aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor + inline at::Tensor _euclidean_dist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2) { + return at::_ops::_euclidean_dist::redispatch(dispatchKeySet, x1, x2); + } + + // aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor + inline at::Tensor _cdist_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode) { + return at::_ops::_cdist_forward::redispatch(dispatchKeySet, x1, x2, p, compute_mode); + } + + // aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor + inline at::Tensor _cdist_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) { + return at::_ops::_cdist_backward::redispatch(dispatchKeySet, grad, x1, x2, p, cdist); + } + + // aten::pdist(Tensor self, float p=2) -> Tensor + inline at::Tensor pdist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p=2) { + return at::_ops::pdist::redispatch(dispatchKeySet, self, p); + } + + // aten::_pdist_forward(Tensor self, float p=2) -> Tensor + inline at::Tensor _pdist_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p=2) { + return at::_ops::_pdist_forward::redispatch(dispatchKeySet, self, p); + } + + // aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor + inline at::Tensor _pdist_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) { + return at::_ops::_pdist_backward::redispatch(dispatchKeySet, grad, self, p, pdist); + } + + // aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor + inline at::Tensor cosine_similarity(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, int64_t dim=1, double eps=1e-08) { + return at::_ops::cosine_similarity::redispatch(dispatchKeySet, x1, x2, dim, eps); + } + + // aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) + inline at::Tensor permute(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::permute::redispatch(dispatchKeySet, self, dims); + } + + // aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) + inline at::Tensor movedim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { + return at::_ops::movedim_intlist::redispatch(dispatchKeySet, self, source, destination); + } + + // aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) + inline at::Tensor movedim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t source, int64_t destination) { + return at::_ops::movedim_int::redispatch(dispatchKeySet, self, source, destination); + } + + // aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) + inline at::Tensor moveaxis(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) { + return at::_ops::moveaxis_intlist::redispatch(dispatchKeySet, self, source, destination); + } + + // aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) + inline at::Tensor moveaxis(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t source, int64_t destination) { + return at::_ops::moveaxis_int::redispatch(dispatchKeySet, self, source, destination); + } + + // aten::numpy_T(Tensor(a) self) -> Tensor(a) + inline at::Tensor numpy_T(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::numpy_T::redispatch(dispatchKeySet, self); + } + + // aten::matrix_H(Tensor(a) self) -> Tensor(a) + inline at::Tensor matrix_H(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::matrix_H::redispatch(dispatchKeySet, self); + } + + // aten::mT(Tensor(a) self) -> Tensor(a) + inline at::Tensor mT(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::mT::redispatch(dispatchKeySet, self); + } + + // aten::mH(Tensor(a) self) -> Tensor(a) + inline at::Tensor mH(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::mH::redispatch(dispatchKeySet, self); + } + + // aten::adjoint(Tensor(a) self) -> Tensor(a) + inline at::Tensor adjoint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::adjoint::redispatch(dispatchKeySet, self); + } + + // aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor + inline at::Tensor pixel_shuffle(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t upscale_factor) { + return at::_ops::pixel_shuffle::redispatch(dispatchKeySet, self, upscale_factor); + } + + // aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor + inline at::Tensor pixel_unshuffle(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t downscale_factor) { + return at::_ops::pixel_unshuffle::redispatch(dispatchKeySet, self, downscale_factor); + } + + // aten::channel_shuffle(Tensor self, int groups) -> Tensor + inline at::Tensor channel_shuffle(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t groups) { + return at::_ops::channel_shuffle::redispatch(dispatchKeySet, self, groups); + } + + // aten::native_channel_shuffle(Tensor self, int groups) -> Tensor + inline at::Tensor native_channel_shuffle(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t groups) { + return at::_ops::native_channel_shuffle::redispatch(dispatchKeySet, self, groups); + } + + // aten::is_pinned(Tensor self, Device? device=None) -> bool + inline bool is_pinned(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional device=c10::nullopt) { + return at::_ops::is_pinned::redispatch(dispatchKeySet, self, device); + } + + // aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a) + inline at::Tensor pin_memory(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional device=c10::nullopt) { + return at::_ops::pin_memory::redispatch(dispatchKeySet, self, device); + } + + // aten::_pin_memory(Tensor self, Device? device=None) -> Tensor + inline at::Tensor _pin_memory(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional device=c10::nullopt) { + return at::_ops::_pin_memory::redispatch(dispatchKeySet, self, device); + } + + // aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor + inline at::Tensor pinverse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond=1e-15) { + return at::_ops::pinverse::redispatch(dispatchKeySet, self, rcond); + } + + // aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor + inline at::Tensor poisson_nll_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) { + return at::_ops::poisson_nll_loss::redispatch(dispatchKeySet, input, target, log_input, full, eps, reduction); + } + + // aten::rad2deg(Tensor self) -> Tensor + inline at::Tensor rad2deg(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::rad2deg::redispatch(dispatchKeySet, self); + } + + // aten::rad2deg_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & rad2deg_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::rad2deg_::redispatch(dispatchKeySet, self); + } + + // aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rad2deg_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::rad2deg_out::redispatch(dispatchKeySet, self, out); + } + + // aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rad2deg_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::rad2deg_out::redispatch(dispatchKeySet, self, out); + } + + // aten::deg2rad(Tensor self) -> Tensor + inline at::Tensor deg2rad(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::deg2rad::redispatch(dispatchKeySet, self); + } + + // aten::deg2rad_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & deg2rad_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::deg2rad_::redispatch(dispatchKeySet, self); + } + + // aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & deg2rad_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::deg2rad_out::redispatch(dispatchKeySet, self, out); + } + + // aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & deg2rad_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::deg2rad_out::redispatch(dispatchKeySet, self, out); + } + + // aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor scalar_tensor(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, at::TensorOptions options={}) { + return at::_ops::scalar_tensor::redispatch(dispatchKeySet, s, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor scalar_tensor(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::scalar_tensor::redispatch(dispatchKeySet, s, dtype, layout, device, pin_memory); + } + + // aten::rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_names::redispatch(dispatchKeySet, size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_names::redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory); + } + + // aten::rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::rand_generator_with_names::redispatch(dispatchKeySet, size, generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator_with_names::redispatch(dispatchKeySet, size, generator, names, dtype, layout, device, pin_memory); + } + + // aten::rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::rand::redispatch(dispatchKeySet, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory); + } + + // aten::rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::rand_generator::redispatch(dispatchKeySet, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor rand(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::rand_generator::redispatch(dispatchKeySet, size, generator, dtype, layout, device, pin_memory); + } + + // aten::rand.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size) { + return at::_ops::rand_out::redispatch(dispatchKeySet, size, out); + } + + // aten::rand.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::rand_out::redispatch(dispatchKeySet, size, out); + } + + // aten::rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional generator) { + return at::_ops::rand_generator_out::redispatch(dispatchKeySet, size, generator, out); + } + + // aten::rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::rand_generator_out::redispatch(dispatchKeySet, size, generator, out); + } + + // aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor rand_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::rand_like::redispatch(dispatchKeySet, self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor rand_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::rand_like::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format); + } + + // aten::randint(int high, int[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong) { + return at::_ops::randint::redispatch(dispatchKeySet, high, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randint(int high, int[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randint::redispatch(dispatchKeySet, high, size, dtype, layout, device, pin_memory); + } + + // aten::randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, c10::optional generator, at::TensorOptions options=at::kLong) { + return at::_ops::randint_generator::redispatch(dispatchKeySet, high, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randint_generator::redispatch(dispatchKeySet, high, size, generator, dtype, layout, device, pin_memory); + } + + // aten::randint.low(int low, int high, int[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options=at::kLong) { + return at::_ops::randint_low::redispatch(dispatchKeySet, low, high, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randint.low(int low, int high, int[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randint_low::redispatch(dispatchKeySet, low, high, size, dtype, layout, device, pin_memory); + } + + // aten::randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, at::TensorOptions options=at::kLong) { + return at::_ops::randint_low_generator::redispatch(dispatchKeySet, low, high, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randint(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randint_low_generator::redispatch(dispatchKeySet, low, high, size, generator, dtype, layout, device, pin_memory); + } + + // aten::randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t high, at::IntArrayRef size) { + return at::_ops::randint_out::redispatch(dispatchKeySet, high, size, out); + } + + // aten::randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_outf(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::randint_out::redispatch(dispatchKeySet, high, size, out); + } + + // aten::randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t high, at::IntArrayRef size, c10::optional generator) { + return at::_ops::randint_generator_out::redispatch(dispatchKeySet, high, size, generator, out); + } + + // aten::randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_outf(c10::DispatchKeySet dispatchKeySet, int64_t high, at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randint_generator_out::redispatch(dispatchKeySet, high, size, generator, out); + } + + // aten::randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size) { + return at::_ops::randint_low_out::redispatch(dispatchKeySet, low, high, size, out); + } + + // aten::randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_outf(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::randint_low_out::redispatch(dispatchKeySet, low, high, size, out); + } + + // aten::randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator) { + return at::_ops::randint_low_generator_out::redispatch(dispatchKeySet, low, high, size, generator, out); + } + + // aten::randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_outf(c10::DispatchKeySet dispatchKeySet, int64_t low, int64_t high, at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randint_low_generator_out::redispatch(dispatchKeySet, low, high, size, generator, out); + } + + // aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randint_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like::redispatch(dispatchKeySet, self, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randint_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like::redispatch(dispatchKeySet, self, high, dtype, layout, device, pin_memory, memory_format); + } + + // aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randint_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype::redispatch(dispatchKeySet, self, low, high, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randint_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t low, int64_t high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randint_like_low_dtype::redispatch(dispatchKeySet, self, low, high, dtype, layout, device, pin_memory, memory_format); + } + + // aten::randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::randn::redispatch(dispatchKeySet, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory); + } + + // aten::randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::randn_generator::redispatch(dispatchKeySet, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator::redispatch(dispatchKeySet, size, generator, dtype, layout, device, pin_memory); + } + + // aten::randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_names::redispatch(dispatchKeySet, size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_names::redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory); + } + + // aten::randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_generator_with_names::redispatch(dispatchKeySet, size, generator, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randn(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator_with_names::redispatch(dispatchKeySet, size, generator, names, dtype, layout, device, pin_memory); + } + + // aten::randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size) { + return at::_ops::randn_out::redispatch(dispatchKeySet, size, out); + } + + // aten::randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::randn_out::redispatch(dispatchKeySet, size, out); + } + + // aten::randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional generator) { + return at::_ops::randn_generator_out::redispatch(dispatchKeySet, size, generator, out); + } + + // aten::randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randn_generator_out::redispatch(dispatchKeySet, size, generator, out); + } + + // aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randn_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::randn_like::redispatch(dispatchKeySet, self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor randn_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::randn_like::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format); + } + + // aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randperm(c10::DispatchKeySet dispatchKeySet, int64_t n, at::TensorOptions options=at::kLong) { + return at::_ops::randperm::redispatch(dispatchKeySet, n, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randperm(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randperm::redispatch(dispatchKeySet, n, dtype, layout, device, pin_memory); + } + + // aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randperm(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional generator, at::TensorOptions options=at::kLong) { + return at::_ops::randperm_generator::redispatch(dispatchKeySet, n, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor randperm(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randperm_generator::redispatch(dispatchKeySet, n, generator, dtype, layout, device, pin_memory); + } + + // aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randperm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n) { + return at::_ops::randperm_out::redispatch(dispatchKeySet, n, out); + } + + // aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randperm_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, at::Tensor & out) { + return at::_ops::randperm_out::redispatch(dispatchKeySet, n, out); + } + + // aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randperm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n, c10::optional generator) { + return at::_ops::randperm_generator_out::redispatch(dispatchKeySet, n, generator, out); + } + + // aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randperm_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, c10::optional generator, at::Tensor & out) { + return at::_ops::randperm_generator_out::redispatch(dispatchKeySet, n, generator, out); + } + + // aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor range(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step=1, at::TensorOptions options={}) { + return at::_ops::range_step::redispatch(dispatchKeySet, start, end, step, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor range(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::range_step::redispatch(dispatchKeySet, start, end, step, dtype, layout, device, pin_memory); + } + + // aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor range(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, at::TensorOptions options={}) { + return at::_ops::range::redispatch(dispatchKeySet, start, end, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor range(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::range::redispatch(dispatchKeySet, start, end, dtype, layout, device, pin_memory); + } + + // aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & range_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & start, const at::Scalar & end) { + return at::_ops::range_out_::redispatch(dispatchKeySet, start, end, out); + } + + // aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & range_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, at::Tensor & out) { + return at::_ops::range_out_::redispatch(dispatchKeySet, start, end, out); + } + + // aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & range_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step) { + return at::_ops::range_out::redispatch(dispatchKeySet, start, end, step, out); + } + + // aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & range_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out) { + return at::_ops::range_out::redispatch(dispatchKeySet, start, end, step, out); + } + + // aten::ravel(Tensor(a) self) -> Tensor(a) + inline at::Tensor ravel(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::ravel::redispatch(dispatchKeySet, self); + } + + // aten::reciprocal(Tensor self) -> Tensor + inline at::Tensor reciprocal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::reciprocal::redispatch(dispatchKeySet, self); + } + + // aten::reciprocal_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & reciprocal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::reciprocal_::redispatch(dispatchKeySet, self); + } + + // aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reciprocal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::reciprocal_out::redispatch(dispatchKeySet, self, out); + } + + // aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reciprocal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::reciprocal_out::redispatch(dispatchKeySet, self, out); + } + + // aten::neg(Tensor self) -> Tensor + inline at::Tensor neg(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::neg::redispatch(dispatchKeySet, self); + } + + // aten::neg_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & neg_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::neg_::redispatch(dispatchKeySet, self); + } + + // aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & neg_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::neg_out::redispatch(dispatchKeySet, self, out); + } + + // aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & neg_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::neg_out::redispatch(dispatchKeySet, self, out); + } + + // aten::negative(Tensor self) -> Tensor + inline at::Tensor negative(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::negative::redispatch(dispatchKeySet, self); + } + + // aten::negative_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & negative_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::negative_::redispatch(dispatchKeySet, self); + } + + // aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & negative_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::negative_out::redispatch(dispatchKeySet, self, out); + } + + // aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & negative_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::negative_out::redispatch(dispatchKeySet, self, out); + } + + // aten::repeat(Tensor self, SymInt[] repeats) -> Tensor + inline at::Tensor repeat(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef repeats) { + return at::_ops::repeat::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(repeats)); + } + + // aten::repeat(Tensor self, SymInt[] repeats) -> Tensor + inline at::Tensor repeat_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef repeats) { + return at::_ops::repeat::redispatch(dispatchKeySet, self, repeats); + } + + // aten::repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor + inline at::Tensor repeat_interleave(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor::redispatch(dispatchKeySet, repeats, output_size); + } + + // aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor + inline at::Tensor repeat_interleave(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_Tensor::redispatch(dispatchKeySet, self, repeats, dim, output_size); + } + + // aten::repeat_interleave.self_int(Tensor self, int repeats, int? dim=None, *, int? output_size=None) -> Tensor + inline at::Tensor repeat_interleave(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_self_int::redispatch(dispatchKeySet, self, repeats, dim, output_size); + } + + // aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) + inline at::Tensor reshape(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape) { + return at::_ops::reshape::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(shape)); + } + + // aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) + inline at::Tensor reshape_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef shape) { + return at::_ops::reshape::redispatch(dispatchKeySet, self, shape); + } + + // aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) + inline at::Tensor _reshape_alias(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::_reshape_alias::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride)); + } + + // aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) + inline at::Tensor _reshape_alias_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::_reshape_alias::redispatch(dispatchKeySet, self, size, stride); + } + + // aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor + inline at::Tensor _mkldnn_reshape(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape) { + return at::_ops::_mkldnn_reshape::redispatch(dispatchKeySet, self, shape); + } + + // aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a) + inline at::Tensor reshape_as(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::reshape_as::redispatch(dispatchKeySet, self, other); + } + + // aten::round(Tensor self) -> Tensor + inline at::Tensor round(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::round::redispatch(dispatchKeySet, self); + } + + // aten::round_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & round_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::round_::redispatch(dispatchKeySet, self); + } + + // aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & round_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::round_out::redispatch(dispatchKeySet, self, out); + } + + // aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & round_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::round_out::redispatch(dispatchKeySet, self, out); + } + + // aten::round.decimals(Tensor self, *, int decimals) -> Tensor + inline at::Tensor round(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals) { + return at::_ops::round_decimals::redispatch(dispatchKeySet, self, decimals); + } + + // aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!) + inline at::Tensor & round_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t decimals) { + return at::_ops::round__decimals::redispatch(dispatchKeySet, self, decimals); + } + + // aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & round_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t decimals) { + return at::_ops::round_decimals_out::redispatch(dispatchKeySet, self, decimals, out); + } + + // aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & round_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out) { + return at::_ops::round_decimals_out::redispatch(dispatchKeySet, self, decimals, out); + } + + // aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor + inline at::Tensor rrelu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt) { + return at::_ops::rrelu::redispatch(dispatchKeySet, self, lower, upper, training, generator); + } + + // aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & rrelu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt) { + return at::_ops::rrelu_::redispatch(dispatchKeySet, self, lower, upper, training, generator); + } + + // aten::relu(Tensor self) -> Tensor + inline at::Tensor relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::relu::redispatch(dispatchKeySet, self); + } + + // aten::relu_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & relu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::relu_::redispatch(dispatchKeySet, self); + } + + // aten::relu6(Tensor self) -> Tensor + inline at::Tensor relu6(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::relu6::redispatch(dispatchKeySet, self); + } + + // aten::relu6_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & relu6_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::relu6_::redispatch(dispatchKeySet, self); + } + + // aten::prelu(Tensor self, Tensor weight) -> Tensor + inline at::Tensor prelu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight) { + return at::_ops::prelu::redispatch(dispatchKeySet, self, weight); + } + + // aten::prelu_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor) + inline ::std::tuple prelu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) { + return at::_ops::prelu_backward::redispatch(dispatchKeySet, grad_output, self, weight); + } + + // aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gelu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_out::redispatch(dispatchKeySet, self, approximate, out); + } + + // aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gelu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate, at::Tensor & out) { + return at::_ops::gelu_out::redispatch(dispatchKeySet, self, approximate, out); + } + + // aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!) + inline at::Tensor & gelu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_::redispatch(dispatchKeySet, self, approximate); + } + + // aten::gelu(Tensor self, *, str approximate='none') -> Tensor + inline at::Tensor gelu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu::redispatch(dispatchKeySet, self, approximate); + } + + // aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & gelu_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, approximate, grad_input); + } + + // aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & gelu_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input) { + return at::_ops::gelu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, approximate, grad_input); + } + + // aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor + inline at::Tensor gelu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none") { + return at::_ops::gelu_backward::redispatch(dispatchKeySet, grad_output, self, approximate); + } + + // aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor + inline at::Tensor infinitely_differentiable_gelu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self) { + return at::_ops::infinitely_differentiable_gelu_backward::redispatch(dispatchKeySet, grad, self); + } + + // aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardshrink_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::hardshrink_out::redispatch(dispatchKeySet, self, lambd, out); + } + + // aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardshrink_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) { + return at::_ops::hardshrink_out::redispatch(dispatchKeySet, self, lambd, out); + } + + // aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor + inline at::Tensor hardshrink(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::hardshrink::redispatch(dispatchKeySet, self, lambd); + } + + // aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & hardshrink_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::hardshrink_backward_grad_input::redispatch(dispatchKeySet, grad_out, self, lambd, grad_input); + } + + // aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & hardshrink_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) { + return at::_ops::hardshrink_backward_grad_input::redispatch(dispatchKeySet, grad_out, self, lambd, grad_input); + } + + // aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor + inline at::Tensor hardshrink_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::hardshrink_backward::redispatch(dispatchKeySet, grad_out, self, lambd); + } + + // aten::rsqrt(Tensor self) -> Tensor + inline at::Tensor rsqrt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::rsqrt::redispatch(dispatchKeySet, self); + } + + // aten::rsqrt_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & rsqrt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::rsqrt_::redispatch(dispatchKeySet, self); + } + + // aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rsqrt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::rsqrt_out::redispatch(dispatchKeySet, self, out); + } + + // aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rsqrt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::rsqrt_out::redispatch(dispatchKeySet, self, out); + } + + // aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) + inline at::Tensor select(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, int64_t index) { + return at::_ops::select_Dimname::redispatch(dispatchKeySet, self, dim, index); + } + + // aten::select.int(Tensor(a) self, int dim, int index) -> Tensor(a) + inline at::Tensor select(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, int64_t index) { + return at::_ops::select_int::redispatch(dispatchKeySet, self, dim, index); + } + + // aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, int index) -> Tensor + inline at::Tensor select_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) { + return at::_ops::select_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(input_sizes), dim, index); + } + + // aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, int index) -> Tensor + inline at::Tensor select_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t index) { + return at::_ops::select_backward::redispatch(dispatchKeySet, grad_output, input_sizes, dim, index); + } + + // aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, int index) -> Tensor + inline at::Tensor _nested_select_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, int64_t index) { + return at::_ops::_nested_select_backward::redispatch(dispatchKeySet, grad_output, self, dim, index); + } + + // aten::selu(Tensor self) -> Tensor + inline at::Tensor selu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::selu::redispatch(dispatchKeySet, self); + } + + // aten::selu_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & selu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::selu_::redispatch(dispatchKeySet, self); + } + + // aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor + inline at::Tensor celu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha=1.0) { + return at::_ops::celu::redispatch(dispatchKeySet, self, alpha); + } + + // aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!) + inline at::Tensor & celu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha=1.0) { + return at::_ops::celu_::redispatch(dispatchKeySet, self, alpha); + } + + // aten::silu(Tensor self) -> Tensor + inline at::Tensor silu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::silu::redispatch(dispatchKeySet, self); + } + + // aten::silu_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & silu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::silu_::redispatch(dispatchKeySet, self); + } + + // aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & silu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::silu_out::redispatch(dispatchKeySet, self, out); + } + + // aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & silu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::silu_out::redispatch(dispatchKeySet, self, out); + } + + // aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & silu_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::silu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, grad_input); + } + + // aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & silu_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) { + return at::_ops::silu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, grad_input); + } + + // aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor silu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::silu_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::mish(Tensor self) -> Tensor + inline at::Tensor mish(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::mish::redispatch(dispatchKeySet, self); + } + + // aten::mish_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & mish_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::mish_::redispatch(dispatchKeySet, self); + } + + // aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mish_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::mish_out::redispatch(dispatchKeySet, self, out); + } + + // aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mish_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::mish_out::redispatch(dispatchKeySet, self, out); + } + + // aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor mish_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::mish_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::sigmoid(Tensor self) -> Tensor + inline at::Tensor sigmoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sigmoid::redispatch(dispatchKeySet, self); + } + + // aten::sigmoid_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sigmoid_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sigmoid_::redispatch(dispatchKeySet, self); + } + + // aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sigmoid_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sigmoid_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::logit(Tensor self, float? eps=None) -> Tensor + inline at::Tensor logit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::logit::redispatch(dispatchKeySet, self, eps); + } + + // aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) + inline at::Tensor & logit_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::logit_::redispatch(dispatchKeySet, self, eps); + } + + // aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logit_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::logit_out::redispatch(dispatchKeySet, self, eps, out); + } + + // aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & logit_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional eps, at::Tensor & out) { + return at::_ops::logit_out::redispatch(dispatchKeySet, self, eps, out); + } + + // aten::sin(Tensor self) -> Tensor + inline at::Tensor sin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sin::redispatch(dispatchKeySet, self); + } + + // aten::sin_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sin_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sin_::redispatch(dispatchKeySet, self); + } + + // aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sinc(Tensor self) -> Tensor + inline at::Tensor sinc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sinc::redispatch(dispatchKeySet, self); + } + + // aten::sinc_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sinc_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sinc_::redispatch(dispatchKeySet, self); + } + + // aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sinc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sinc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sinc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sinc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sinh(Tensor self) -> Tensor + inline at::Tensor sinh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sinh::redispatch(dispatchKeySet, self); + } + + // aten::sinh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sinh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sinh_::redispatch(dispatchKeySet, self); + } + + // aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sinh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sinh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::detach(Tensor(a) self) -> Tensor(a) + inline at::Tensor detach(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::detach::redispatch(dispatchKeySet, self); + } + + // aten::detach_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & detach_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::detach_::redispatch(dispatchKeySet, self); + } + + // aten::size.int(Tensor self, int dim) -> int + inline int64_t __dispatch_size(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::size_int::redispatch(dispatchKeySet, self, dim); + } + + // aten::size.Dimname(Tensor self, Dimname dim) -> int + inline int64_t size(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::size_Dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + inline at::Tensor slice(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_Tensor::redispatch(dispatchKeySet, self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); + } + + // aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) + inline at::Tensor slice_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_Tensor::redispatch(dispatchKeySet, self, dim, start, end, step); + } + + // aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor + inline at::Tensor slice_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step) { + return at::_ops::slice_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(input_sizes), dim, start, end, step); + } + + // aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor + inline at::Tensor slice_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) { + return at::_ops::slice_backward::redispatch(dispatchKeySet, grad_output, input_sizes, dim, start, end, step); + } + + // aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + inline at::Tensor slice_scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_scatter::redispatch(dispatchKeySet, self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); + } + + // aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + inline at::Tensor slice_scatter_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_scatter::redispatch(dispatchKeySet, self, src, dim, start, end, step); + } + + // aten::select_scatter(Tensor self, Tensor src, int dim, int index) -> Tensor + inline at::Tensor select_scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index) { + return at::_ops::select_scatter::redispatch(dispatchKeySet, self, src, dim, index); + } + + // aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor + inline at::Tensor diagonal_scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) { + return at::_ops::diagonal_scatter::redispatch(dispatchKeySet, self, src, offset, dim1, dim2); + } + + // aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor + inline at::Tensor as_strided_scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter::redispatch(dispatchKeySet, self, src, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); + } + + // aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor + inline at::Tensor as_strided_scatter_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter::redispatch(dispatchKeySet, self, src, size, stride, storage_offset); + } + + // aten::smm(Tensor self, Tensor mat2) -> Tensor + inline at::Tensor smm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2) { + return at::_ops::smm::redispatch(dispatchKeySet, self, mat2); + } + + // aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + inline at::Tensor softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::softmax_int::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::softmax_int_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::softmax_int_out::redispatch(dispatchKeySet, self, dim, dtype, out); + } + + // aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::softmax_Dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + inline at::Tensor _softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_softmax::redispatch(dispatchKeySet, self, dim, half_to_float); + } + + // aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { + return at::_ops::_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor + inline at::Tensor _softmax_backward_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + return at::_ops::_softmax_backward_data::redispatch(dispatchKeySet, grad_output, output, dim, input_dtype); + } + + // aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _softmax_backward_data_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) { + return at::_ops::_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, input_dtype, grad_input); + } + + // aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _softmax_backward_data_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input) { + return at::_ops::_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, input_dtype, grad_input); + } + + // aten::unsafe_split.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[] + inline ::std::vector unsafe_split(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::unsafe_split_Tensor::redispatch(dispatchKeySet, self, split_size, dim); + } + + // aten::split.Tensor(Tensor(a -> *) self, int split_size, int dim=0) -> Tensor(a)[] + inline ::std::vector split(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::split_Tensor::redispatch(dispatchKeySet, self, split_size, dim); + } + + // aten::split.sizes(Tensor(a -> *) self, int[] split_size, int dim=0) -> Tensor(a)[] + inline ::std::vector split(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef split_size, int64_t dim=0) { + return at::_ops::split_sizes::redispatch(dispatchKeySet, self, split_size, dim); + } + + // aten::unsafe_split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[] + inline ::std::vector unsafe_split_with_sizes(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::unsafe_split_with_sizes::redispatch(dispatchKeySet, self, split_sizes, dim); + } + + // aten::split_with_sizes(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[] + inline ::std::vector split_with_sizes(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes::redispatch(dispatchKeySet, self, split_sizes, dim); + } + + // aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] + inline ::std::vector hsplit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections) { + return at::_ops::hsplit_int::redispatch(dispatchKeySet, self, sections); + } + + // aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] + inline ::std::vector hsplit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices) { + return at::_ops::hsplit_array::redispatch(dispatchKeySet, self, indices); + } + + // aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] + inline ::std::vector vsplit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections) { + return at::_ops::vsplit_int::redispatch(dispatchKeySet, self, sections); + } + + // aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] + inline ::std::vector vsplit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices) { + return at::_ops::vsplit_array::redispatch(dispatchKeySet, self, indices); + } + + // aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] + inline ::std::vector dsplit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sections) { + return at::_ops::dsplit_int::redispatch(dispatchKeySet, self, sections); + } + + // aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] + inline ::std::vector dsplit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef indices) { + return at::_ops::dsplit_array::redispatch(dispatchKeySet, self, indices); + } + + // aten::squeeze(Tensor(a) self) -> Tensor(a) + inline at::Tensor squeeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::squeeze::redispatch(dispatchKeySet, self); + } + + // aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) + inline at::Tensor squeeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::squeeze_dim::redispatch(dispatchKeySet, self, dim); + } + + // aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) + inline at::Tensor squeeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::squeeze_dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::squeeze_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & squeeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::squeeze_::redispatch(dispatchKeySet, self); + } + + // aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) + inline at::Tensor & squeeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim) { + return at::_ops::squeeze__dim::redispatch(dispatchKeySet, self, dim); + } + + // aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) + inline at::Tensor & squeeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim) { + return at::_ops::squeeze__dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor sspaddmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sspaddmm::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha); + } + + // aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sspaddmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sspaddmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sspaddmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sspaddmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::stack(Tensor[] tensors, int dim=0) -> Tensor + inline at::Tensor stack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim=0) { + return at::_ops::stack::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & stack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, int64_t dim=0) { + return at::_ops::stack_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & stack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) { + return at::_ops::stack_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::_stack(Tensor[] tensors, int dim=0) -> Tensor + inline at::Tensor _stack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim=0) { + return at::_ops::_stack::redispatch(dispatchKeySet, tensors, dim); + } + + // aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _stack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors, int64_t dim=0) { + return at::_ops::_stack_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _stack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, int64_t dim, at::Tensor & out) { + return at::_ops::_stack_out::redispatch(dispatchKeySet, tensors, dim, out); + } + + // aten::hstack(Tensor[] tensors) -> Tensor + inline at::Tensor hstack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::hstack::redispatch(dispatchKeySet, tensors); + } + + // aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hstack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::hstack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hstack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::hstack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::vstack(Tensor[] tensors) -> Tensor + inline at::Tensor vstack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::vstack::redispatch(dispatchKeySet, tensors); + } + + // aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & vstack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::vstack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & vstack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::vstack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::dstack(Tensor[] tensors) -> Tensor + inline at::Tensor dstack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::dstack::redispatch(dispatchKeySet, tensors); + } + + // aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dstack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::dstack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dstack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::dstack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor + inline at::Tensor stft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool normalized, c10::optional onesided=c10::nullopt, c10::optional return_complex=c10::nullopt) { + return at::_ops::stft::redispatch(dispatchKeySet, self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex); + } + + // aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor + inline at::Tensor stft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, c10::optional hop_length=c10::nullopt, c10::optional win_length=c10::nullopt, const c10::optional & window={}, bool center=true, c10::string_view pad_mode="reflect", bool normalized=false, c10::optional onesided=c10::nullopt, c10::optional return_complex=c10::nullopt) { + return at::_ops::stft_center::redispatch(dispatchKeySet, self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex); + } + + // aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor + inline at::Tensor istft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n_fft, c10::optional hop_length=c10::nullopt, c10::optional win_length=c10::nullopt, const c10::optional & window={}, bool center=true, bool normalized=false, c10::optional onesided=c10::nullopt, c10::optional length=c10::nullopt, bool return_complex=false) { + return at::_ops::istft::redispatch(dispatchKeySet, self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex); + } + + // aten::stride.int(Tensor self, int dim) -> int + inline int64_t __dispatch_stride(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::stride_int::redispatch(dispatchKeySet, self, dim); + } + + // aten::stride.Dimname(Tensor self, Dimname dim) -> int + inline int64_t stride(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::stride_Dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::sum::redispatch(dispatchKeySet, self, dtype); + } + + // aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::sum_dim_IntList::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::sum_dim_DimnameList::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::sum_IntList_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::sum_IntList_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::sum_DimnameList_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::sum_DimnameList_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor + inline at::Tensor _nested_sum_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false) { + return at::_ops::_nested_sum_backward::redispatch(dispatchKeySet, grad, self, dim, keepdim); + } + + // aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor nansum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nansum::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nansum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::nansum_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nansum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::nansum_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::sum_to_size(Tensor self, int[] size) -> Tensor + inline at::Tensor sum_to_size(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::sum_to_size::redispatch(dispatchKeySet, self, size); + } + + // aten::sqrt(Tensor self) -> Tensor + inline at::Tensor sqrt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sqrt::redispatch(dispatchKeySet, self); + } + + // aten::sqrt_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sqrt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sqrt_::redispatch(dispatchKeySet, self); + } + + // aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sqrt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sqrt_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sqrt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sqrt_out::redispatch(dispatchKeySet, self, out); + } + + // aten::square(Tensor self) -> Tensor + inline at::Tensor square(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::square::redispatch(dispatchKeySet, self); + } + + // aten::square_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & square_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::square_::redispatch(dispatchKeySet, self); + } + + // aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & square_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::square_out::redispatch(dispatchKeySet, self, out); + } + + // aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & square_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::square_out::redispatch(dispatchKeySet, self, out); + } + + // aten::std(Tensor self, bool unbiased=True) -> Tensor + inline at::Tensor std(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased=true) { + return at::_ops::std::redispatch(dispatchKeySet, self, unbiased); + } + + // aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor + inline at::Tensor std(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased=true, bool keepdim=false) { + return at::_ops::std_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::std.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor + inline at::Tensor std(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false) { + return at::_ops::std_correction::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + inline ::std::tuple std_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased=true) { + return at::_ops::std_mean::redispatch(dispatchKeySet, self, unbiased); + } + + // aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple std_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased=true, bool keepdim=false) { + return at::_ops::std_mean_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::std_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple std_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false) { + return at::_ops::std_mean_correction::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple std_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased=true, bool keepdim=false) { + return at::_ops::std_mean_names_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple std_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, c10::optional correction, bool keepdim=false) { + return at::_ops::std_mean_correction_names::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased=true, bool keepdim=false) { + return at::_ops::std_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::std_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::std.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false) { + return at::_ops::std_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::std.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim, at::Tensor & out) { + return at::_ops::std_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + inline at::Tensor std(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased=true, bool keepdim=false) { + return at::_ops::std_names_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased=true, bool keepdim=false) { + return at::_ops::std_names_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::std_names_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor + inline at::Tensor std(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, c10::optional correction, bool keepdim=false) { + return at::_ops::std_correction_names::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, c10::optional correction, bool keepdim=false) { + return at::_ops::std_correction_names_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & std_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, c10::optional correction, bool keepdim, at::Tensor & out) { + return at::_ops::std_correction_names_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor prod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::prod::redispatch(dispatchKeySet, self, dtype); + } + + // aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor prod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::prod_dim_int::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::prod_int_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::prod_int_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor prod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::prod_dim_Dimname::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::prod_Dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::prod_Dimname_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::t(Tensor(a) self) -> Tensor(a) + inline at::Tensor t(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::t::redispatch(dispatchKeySet, self); + } + + // aten::t_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & t_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::t_::redispatch(dispatchKeySet, self); + } + + // aten::tan(Tensor self) -> Tensor + inline at::Tensor tan(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::tan::redispatch(dispatchKeySet, self); + } + + // aten::tan_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & tan_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::tan_::redispatch(dispatchKeySet, self); + } + + // aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tan_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::tan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tan_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::tan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::tanh(Tensor self) -> Tensor + inline at::Tensor tanh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::tanh::redispatch(dispatchKeySet, self); + } + + // aten::tanh_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & tanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::tanh_::redispatch(dispatchKeySet, self); + } + + // aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tanh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::tanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tanh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::tanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor + inline at::Tensor tensordot(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) { + return at::_ops::tensordot::redispatch(dispatchKeySet, self, other, dims_self, dims_other); + } + + // aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tensordot_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) { + return at::_ops::tensordot_out::redispatch(dispatchKeySet, self, other, dims_self, dims_other, out); + } + + // aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tensordot_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other, at::Tensor & out) { + return at::_ops::tensordot_out::redispatch(dispatchKeySet, self, other, dims_self, dims_other, out); + } + + // aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor + inline at::Tensor threshold(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { + return at::_ops::threshold::redispatch(dispatchKeySet, self, threshold, value); + } + + // aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!) + inline at::Tensor & threshold_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { + return at::_ops::threshold_::redispatch(dispatchKeySet, self, threshold, value); + } + + // aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & threshold_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) { + return at::_ops::threshold_out::redispatch(dispatchKeySet, self, threshold, value, out); + } + + // aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & threshold_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out) { + return at::_ops::threshold_out::redispatch(dispatchKeySet, self, threshold, value, out); + } + + // aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & threshold_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { + return at::_ops::threshold_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, threshold, grad_input); + } + + // aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & threshold_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input) { + return at::_ops::threshold_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, threshold, grad_input); + } + + // aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor + inline at::Tensor threshold_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) { + return at::_ops::threshold_backward::redispatch(dispatchKeySet, grad_output, self, threshold); + } + + // aten::tile(Tensor self, int[] dims) -> Tensor + inline at::Tensor tile(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::tile::redispatch(dispatchKeySet, self, dims); + } + + // aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) + inline at::Tensor transpose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_int::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) + inline at::Tensor transpose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) { + return at::_ops::transpose_Dimname::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor + inline at::Tensor _mkldnn_transpose(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::_mkldnn_transpose::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + inline at::Tensor & transpose_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + inline at::Tensor & _mkldnn_transpose_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::_mkldnn_transpose_::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::one_hot(Tensor self, int num_classes=-1) -> Tensor + inline at::Tensor one_hot(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_classes=-1) { + return at::_ops::one_hot::redispatch(dispatchKeySet, self, num_classes); + } + + // aten::flip(Tensor self, int[] dims) -> Tensor + inline at::Tensor flip(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::flip::redispatch(dispatchKeySet, self, dims); + } + + // aten::fliplr(Tensor self) -> Tensor + inline at::Tensor fliplr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::fliplr::redispatch(dispatchKeySet, self); + } + + // aten::flipud(Tensor self) -> Tensor + inline at::Tensor flipud(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::flipud::redispatch(dispatchKeySet, self); + } + + // aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor + inline at::Tensor roll(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll::redispatch(dispatchKeySet, self, shifts, dims); + } + + // aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor + inline at::Tensor rot90(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k=1, at::IntArrayRef dims={0,1}) { + return at::_ops::rot90::redispatch(dispatchKeySet, self, k, dims); + } + + // aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + inline at::Tensor trapezoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) { + return at::_ops::trapezoid_x::redispatch(dispatchKeySet, y, x, dim); + } + + // aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor + inline at::Tensor trapezoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Scalar & dx=1, int64_t dim=-1) { + return at::_ops::trapezoid_dx::redispatch(dispatchKeySet, y, dx, dim); + } + + // aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + inline at::Tensor trapz(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) { + return at::_ops::trapz_x::redispatch(dispatchKeySet, y, x, dim); + } + + // aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor + inline at::Tensor trapz(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, double dx=1, int64_t dim=-1) { + return at::_ops::trapz_dx::redispatch(dispatchKeySet, y, dx, dim); + } + + // aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _transform_bias_rescale_qkv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) { + return at::_ops::_transform_bias_rescale_qkv::redispatch(dispatchKeySet, qkv, qkv_bias, num_heads); + } + + // aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor + inline at::Tensor _nested_tensor_from_mask(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask, bool mask_check=true) { + return at::_ops::_nested_tensor_from_mask::redispatch(dispatchKeySet, t, mask, mask_check); + } + + // aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool + inline bool _nested_tensor_from_mask_left_aligned(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask) { + return at::_ops::_nested_tensor_from_mask_left_aligned::redispatch(dispatchKeySet, t, mask); + } + + // aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor + inline at::Tensor _nested_from_padded(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213=false) { + return at::_ops::_nested_from_padded::redispatch(dispatchKeySet, padded, cpu_nested_shape_example, fuse_transform_0213); + } + + // aten::_nested_tensor_size(Tensor self) -> Tensor + inline at::Tensor _nested_tensor_size(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nested_tensor_size::redispatch(dispatchKeySet, self); + } + + // aten::_nested_tensor_strides(Tensor self) -> Tensor + inline at::Tensor _nested_tensor_strides(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nested_tensor_strides::redispatch(dispatchKeySet, self); + } + + // aten::_nested_tensor_offsets(Tensor self) -> int[] + inline ::std::vector _nested_tensor_offsets(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nested_tensor_offsets::redispatch(dispatchKeySet, self); + } + + // aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor + inline at::Tensor _nested_from_padded_and_nested_example(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & nt_example) { + return at::_ops::_nested_from_padded_and_nested_example::redispatch(dispatchKeySet, padded, nt_example); + } + + // aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a) + inline at::Tensor _nested_view_from_buffer(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) { + return at::_ops::_nested_view_from_buffer::redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets); + } + + // aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor + inline at::Tensor _nested_view_from_buffer_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) { + return at::_ops::_nested_view_from_buffer_copy::redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets); + } + + // aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor + inline at::Tensor _trilinear(c10::DispatchKeySet dispatchKeySet, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim=1) { + return at::_ops::_trilinear::redispatch(dispatchKeySet, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim); + } + + // aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor + inline at::Tensor triplet_margin_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin=1.0, double p=2, double eps=1e-06, bool swap=false, int64_t reduction=at::Reduction::Mean) { + return at::_ops::triplet_margin_loss::redispatch(dispatchKeySet, anchor, positive, negative, margin, p, eps, swap, reduction); + } + + // aten::trunc(Tensor self) -> Tensor + inline at::Tensor trunc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::trunc::redispatch(dispatchKeySet, self); + } + + // aten::trunc_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & trunc_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::trunc_::redispatch(dispatchKeySet, self); + } + + // aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & trunc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::trunc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & trunc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::trunc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::fix(Tensor self) -> Tensor + inline at::Tensor fix(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::fix::redispatch(dispatchKeySet, self); + } + + // aten::fix_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & fix_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::fix_::redispatch(dispatchKeySet, self); + } + + // aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fix_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::fix_out::redispatch(dispatchKeySet, self, out); + } + + // aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fix_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::fix_out::redispatch(dispatchKeySet, self, out); + } + + // aten::type_as(Tensor self, Tensor other) -> Tensor + inline at::Tensor type_as(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::type_as::redispatch(dispatchKeySet, self, other); + } + + // aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool + inline bool _has_compatible_shallow_copy_type(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & from) { + return at::_ops::_has_compatible_shallow_copy_type::redispatch(dispatchKeySet, self, from); + } + + // aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor) + inline ::std::tuple _unique(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted=true, bool return_inverse=false) { + return at::_ops::_unique::redispatch(dispatchKeySet, self, sorted, return_inverse); + } + + // aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + inline ::std::tuple unique_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted=true, bool return_inverse=false, bool return_counts=false) { + return at::_ops::unique_dim::redispatch(dispatchKeySet, self, dim, sorted, return_inverse, return_counts); + } + + // aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor) + inline ::std::tuple unique_consecutive(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse=false, bool return_counts=false, c10::optional dim=c10::nullopt) { + return at::_ops::unique_consecutive::redispatch(dispatchKeySet, self, return_inverse, return_counts, dim); + } + + // aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + inline ::std::tuple unique_dim_consecutive(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool return_inverse=false, bool return_counts=false) { + return at::_ops::unique_dim_consecutive::redispatch(dispatchKeySet, self, dim, return_inverse, return_counts); + } + + // aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _unique2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted=true, bool return_inverse=false, bool return_counts=false) { + return at::_ops::_unique2::redispatch(dispatchKeySet, self, sorted, return_inverse, return_counts); + } + + // aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor + inline at::Tensor _unsafe_view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_unsafe_view::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size)); + } + + // aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor + inline at::Tensor _unsafe_view_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::_unsafe_view::redispatch(dispatchKeySet, self, size); + } + + // aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a) + inline at::Tensor unsqueeze(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::unsqueeze::redispatch(dispatchKeySet, self, dim); + } + + // aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) + inline at::Tensor & unsqueeze_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim) { + return at::_ops::unsqueeze_::redispatch(dispatchKeySet, self, dim); + } + + // aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor + inline at::Tensor vander(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, c10::optional N=c10::nullopt, bool increasing=false) { + return at::_ops::vander::redispatch(dispatchKeySet, x, N, increasing); + } + + // aten::var(Tensor self, bool unbiased=True) -> Tensor + inline at::Tensor var(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased=true) { + return at::_ops::var::redispatch(dispatchKeySet, self, unbiased); + } + + // aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor + inline at::Tensor var(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased=true, bool keepdim=false) { + return at::_ops::var_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::var.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor + inline at::Tensor var(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false) { + return at::_ops::var_correction::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased=true, bool keepdim=false) { + return at::_ops::var_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::var_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::var.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false) { + return at::_ops::var_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::var.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim, at::Tensor & out) { + return at::_ops::var_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + inline at::Tensor var(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased=true, bool keepdim=false) { + return at::_ops::var_names_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool unbiased=true, bool keepdim=false) { + return at::_ops::var_names_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out) { + return at::_ops::var_names_out::redispatch(dispatchKeySet, self, dim, unbiased, keepdim, out); + } + + // aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor + inline at::Tensor var(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, c10::optional correction, bool keepdim=false) { + return at::_ops::var_correction_names::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::DimnameList dim, c10::optional correction, bool keepdim=false) { + return at::_ops::var_correction_names_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & var_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, c10::optional correction, bool keepdim, at::Tensor & out) { + return at::_ops::var_correction_names_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out); + } + + // aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + inline ::std::tuple var_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased=true) { + return at::_ops::var_mean::redispatch(dispatchKeySet, self, unbiased); + } + + // aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple var_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased=true, bool keepdim=false) { + return at::_ops::var_mean_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::var_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple var_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false) { + return at::_ops::var_mean_correction::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple var_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased=true, bool keepdim=false) { + return at::_ops::var_mean_names_dim::redispatch(dispatchKeySet, self, dim, unbiased, keepdim); + } + + // aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor) + inline ::std::tuple var_mean(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, c10::optional correction, bool keepdim=false) { + return at::_ops::var_mean_correction_names::redispatch(dispatchKeySet, self, dim, correction, keepdim); + } + + // aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a) + inline at::Tensor view_as(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::view_as::redispatch(dispatchKeySet, self, other); + } + + // aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor + inline at::Tensor where(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::where_self::redispatch(dispatchKeySet, condition, self, other); + } + + // aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & where_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::where_self_out::redispatch(dispatchKeySet, condition, self, other, out); + } + + // aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & where_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::where_self_out::redispatch(dispatchKeySet, condition, self, other, out); + } + + // aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor + inline at::Tensor where(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::where_ScalarSelf::redispatch(dispatchKeySet, condition, self, other); + } + + // aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor + inline at::Tensor where(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::where_ScalarOther::redispatch(dispatchKeySet, condition, self, other); + } + + // aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor + inline at::Tensor where(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) { + return at::_ops::where_Scalar::redispatch(dispatchKeySet, condition, self, other); + } + + // aten::where(Tensor condition) -> Tensor[] + inline ::std::vector where(c10::DispatchKeySet dispatchKeySet, const at::Tensor & condition) { + return at::_ops::where::redispatch(dispatchKeySet, condition); + } + + // aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor + inline at::Tensor norm_except_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, int64_t pow=2, int64_t dim=0) { + return at::_ops::norm_except_dim::redispatch(dispatchKeySet, v, pow, dim); + } + + // aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor + inline at::Tensor _weight_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim=0) { + return at::_ops::_weight_norm::redispatch(dispatchKeySet, v, g, dim); + } + + // aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor) + inline ::std::tuple _weight_norm_interface(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim=0) { + return at::_ops::_weight_norm_interface::redispatch(dispatchKeySet, v, g, dim); + } + + // aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) + inline ::std::tuple _weight_norm_interface_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { + return at::_ops::_weight_norm_interface_backward::redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim); + } + + // aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) + inline ::std::tuple _weight_norm_differentiable_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { + return at::_ops::_weight_norm_differentiable_backward::redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim); + } + + // aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor zeros(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::zeros_names::redispatch(dispatchKeySet, size, names, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor zeros(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::zeros_names::redispatch(dispatchKeySet, size, names, dtype, layout, device, pin_memory); + } + + // aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _efficientzerotensor(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_efficientzerotensor::redispatch(dispatchKeySet, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _efficientzerotensor(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_efficientzerotensor::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory); + } + + // aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor zeros(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::zeros::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor zeros(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::zeros::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), dtype, layout, device, pin_memory); + } + + // aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor zeros_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::zeros::redispatch(dispatchKeySet, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor zeros_symint(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::zeros::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory); + } + + // aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size) { + return at::_ops::zeros_out::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), out); + } + + // aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::zeros_out::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), out); + } + + // aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::zeros_out::redispatch(dispatchKeySet, size, out); + } + + // aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::zeros_out::redispatch(dispatchKeySet, size, out); + } + + // aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor zeros_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::zeros_like::redispatch(dispatchKeySet, self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor zeros_like(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::zeros_like::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, memory_format); + } + + // aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor + inline at::Tensor _standard_gamma_grad(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output) { + return at::_ops::_standard_gamma_grad::redispatch(dispatchKeySet, self, output); + } + + // aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor + inline at::Tensor _standard_gamma(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_standard_gamma::redispatch(dispatchKeySet, self, generator); + } + + // aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor + inline at::Tensor _dirichlet_grad(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) { + return at::_ops::_dirichlet_grad::redispatch(dispatchKeySet, x, alpha, total); + } + + // aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor + inline at::Tensor _sample_dirichlet(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_sample_dirichlet::redispatch(dispatchKeySet, self, generator); + } + + // aten::poisson(Tensor self, Generator? generator=None) -> Tensor + inline at::Tensor poisson(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::poisson::redispatch(dispatchKeySet, self, generator); + } + + // aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor + inline at::Tensor binomial(c10::DispatchKeySet dispatchKeySet, const at::Tensor & count, const at::Tensor & prob, c10::optional generator=c10::nullopt) { + return at::_ops::binomial::redispatch(dispatchKeySet, count, prob, generator); + } + + // aten::native_norm(Tensor self, Scalar p=2) -> Tensor + inline at::Tensor native_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p=2) { + return at::_ops::native_norm::redispatch(dispatchKeySet, self, p); + } + + // aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor + inline at::Tensor native_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, c10::optional dtype) { + return at::_ops::native_norm_ScalarOpt_dim_dtype::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype); + } + + // aten::_sparse_sum(Tensor self) -> Tensor + inline at::Tensor _sparse_sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_sparse_sum::redispatch(dispatchKeySet, self); + } + + // aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor + inline at::Tensor _sparse_sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) { + return at::_ops::_sparse_sum_dtype::redispatch(dispatchKeySet, self, dtype); + } + + // aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor + inline at::Tensor _sparse_sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::_sparse_sum_dim::redispatch(dispatchKeySet, self, dim); + } + + // aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor + inline at::Tensor _sparse_sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) { + return at::_ops::_sparse_sum_dim_dtype::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor + inline at::Tensor _sparse_sum_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::_sparse_sum_backward::redispatch(dispatchKeySet, grad, self, dim); + } + + // aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor _sparse_csr_sum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_csr_sum_dim_dtype::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor _sparse_csr_prod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_csr_prod_dim_dtype::redispatch(dispatchKeySet, self, dim, keepdim, dtype); + } + + // aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + inline at::Tensor _sparse_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_softmax_int::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor _sparse_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_softmax_Dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + inline at::Tensor _sparse_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_sparse_softmax::redispatch(dispatchKeySet, self, dim, half_to_float); + } + + // aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor + inline at::Tensor _sparse_softmax_backward_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + return at::_ops::_sparse_softmax_backward_data::redispatch(dispatchKeySet, grad_output, output, dim, self); + } + + // aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + inline at::Tensor _sparse_log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_log_softmax_int::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor _sparse_log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_log_softmax_Dimname::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + inline at::Tensor _sparse_log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_sparse_log_softmax::redispatch(dispatchKeySet, self, dim, half_to_float); + } + + // aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor + inline at::Tensor _sparse_log_softmax_backward_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + return at::_ops::_sparse_log_softmax_backward_data::redispatch(dispatchKeySet, grad_output, output, dim, self); + } + + // aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor + inline at::Tensor _spdiags(c10::DispatchKeySet dispatchKeySet, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional layout=c10::nullopt) { + return at::_ops::_spdiags::redispatch(dispatchKeySet, diagonals, offsets, shape, layout); + } + + // aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor + inline at::Tensor norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::ScalarType dtype) { + return at::_ops::norm_ScalarOpt_dtype::redispatch(dispatchKeySet, self, p, dtype); + } + + // aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor + inline at::Tensor norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p=2) { + return at::_ops::norm_Scalar::redispatch(dispatchKeySet, self, p); + } + + // aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor + inline at::Tensor norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) { + return at::_ops::norm_ScalarOpt_dim_dtype::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype); + } + + // aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor + inline at::Tensor norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::norm_ScalarOpt_dim::redispatch(dispatchKeySet, self, p, dim, keepdim); + } + + // aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) { + return at::_ops::norm_dtype_out::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out); + } + + // aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::norm_dtype_out::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out); + } + + // aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::norm_out::redispatch(dispatchKeySet, self, p, dim, keepdim, out); + } + + // aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::norm_out::redispatch(dispatchKeySet, self, p, dim, keepdim, out); + } + + // aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor + inline at::Tensor norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) { + return at::_ops::norm_names_ScalarOpt_dim_dtype::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype); + } + + // aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor + inline at::Tensor norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim=false) { + return at::_ops::norm_names_ScalarOpt_dim::redispatch(dispatchKeySet, self, p, dim, keepdim); + } + + // aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) { + return at::_ops::norm_names_dtype_out::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out); + } + + // aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::norm_names_dtype_out::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out); + } + + // aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim=false) { + return at::_ops::norm_names_out::redispatch(dispatchKeySet, self, p, dim, keepdim, out); + } + + // aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::DimnameList dim, bool keepdim, at::Tensor & out) { + return at::_ops::norm_names_out::redispatch(dispatchKeySet, self, p, dim, keepdim, out); + } + + // aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) + inline ::std::tuple frexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::frexp_Tensor::redispatch(dispatchKeySet, self); + } + + // aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) + inline ::std::tuple frexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & mantissa, at::Tensor & exponent, const at::Tensor & self) { + return at::_ops::frexp_Tensor_out::redispatch(dispatchKeySet, self, mantissa, exponent); + } + + // aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent) + inline ::std::tuple frexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent) { + return at::_ops::frexp_Tensor_out::redispatch(dispatchKeySet, self, mantissa, exponent); + } + + // aten::frobenius_norm(Tensor self) -> Tensor + inline at::Tensor frobenius_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::frobenius_norm::redispatch(dispatchKeySet, self); + } + + // aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + inline at::Tensor frobenius_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::frobenius_norm_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & frobenius_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::frobenius_norm_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & frobenius_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::frobenius_norm_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor + inline at::Tensor nuclear_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool keepdim=false) { + return at::_ops::nuclear_norm::redispatch(dispatchKeySet, self, keepdim); + } + + // aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nuclear_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool keepdim=false) { + return at::_ops::nuclear_norm_out::redispatch(dispatchKeySet, self, keepdim, out); + } + + // aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nuclear_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool keepdim, at::Tensor & out) { + return at::_ops::nuclear_norm_out::redispatch(dispatchKeySet, self, keepdim, out); + } + + // aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor + inline at::Tensor nuclear_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::nuclear_norm_dim::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nuclear_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::nuclear_norm_dim_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nuclear_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::nuclear_norm_dim_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor clone(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::clone::redispatch(dispatchKeySet, self, memory_format); + } + + // aten::positive(Tensor(a) self) -> Tensor(a) + inline at::Tensor positive(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::positive::redispatch(dispatchKeySet, self); + } + + // aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) + inline const at::Tensor & resize_as_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_as_::redispatch(dispatchKeySet, self, the_template, memory_format); + } + + // aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) + inline const at::Tensor & resize_as_sparse_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template) { + return at::_ops::resize_as_sparse_::redispatch(dispatchKeySet, self, the_template); + } + + // aten::zero_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & zero_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::zero_::redispatch(dispatchKeySet, self); + } + + // aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sub_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::sub_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sub_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sub_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + inline at::Tensor sub(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::sub_Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & sub_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::sub__Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + inline at::Tensor sub(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::sub_Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & sub_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::sub__Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & subtract_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::subtract_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & subtract_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::subtract_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + inline at::Tensor subtract(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::subtract_Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & subtract_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::subtract__Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + inline at::Tensor subtract(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::subtract_Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & subtract_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::subtract__Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + inline at::Tensor rsub(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Tensor::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & heaviside_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & values) { + return at::_ops::heaviside_out::redispatch(dispatchKeySet, self, values, out); + } + + // aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & heaviside_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values, at::Tensor & out) { + return at::_ops::heaviside_out::redispatch(dispatchKeySet, self, values, out); + } + + // aten::heaviside(Tensor self, Tensor values) -> Tensor + inline at::Tensor heaviside(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & values) { + return at::_ops::heaviside::redispatch(dispatchKeySet, self, values); + } + + // aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!) + inline at::Tensor & heaviside_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & values) { + return at::_ops::heaviside_::redispatch(dispatchKeySet, self, values); + } + + // aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + inline at::Tensor rsub(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Scalar::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor _sparse_addmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::_sparse_addmm::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha); + } + + // aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sparse_sampled_addmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sparse_sampled_addmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sparse_sampled_addmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sparse_sampled_addmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor sparse_sampled_addmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::sparse_sampled_addmm::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha); + } + + // aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::addmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor addmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmm::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha); + } + + // aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & addmm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addmm_::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha); + } + + // aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _addmm_activation_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) { + return at::_ops::_addmm_activation_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, use_gelu, out); + } + + // aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _addmm_activation_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out) { + return at::_ops::_addmm_activation_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, use_gelu, out); + } + + // aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor + inline at::Tensor _addmm_activation(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) { + return at::_ops::_addmm_activation::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, use_gelu); + } + + // aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_compressed_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_compressed_tensor_comp_plain_value_size::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_compressed_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_compressed_tensor_comp_plain_value_size::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_csr_tensor_crow_col_value_size::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_csr_tensor_crow_col_value_size::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_csc_tensor_ccol_row_value_size::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_csc_tensor_ccol_row_value_size::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_bsr_tensor_crow_col_value_size::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_bsr_tensor_crow_col_value_size::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_bsc_tensor_ccol_row_value_size::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_bsc_tensor_ccol_row_value_size::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_compressed_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_compressed_tensor_comp_plain_value::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_compressed_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_compressed_tensor_comp_plain_value::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, dtype, layout, device, pin_memory); + } + + // aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_csr_tensor_crow_col_value::redispatch(dispatchKeySet, crow_indices, col_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_csr_tensor_crow_col_value::redispatch(dispatchKeySet, crow_indices, col_indices, values, dtype, layout, device, pin_memory); + } + + // aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_csc_tensor_ccol_row_value::redispatch(dispatchKeySet, ccol_indices, row_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_csc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_csc_tensor_ccol_row_value::redispatch(dispatchKeySet, ccol_indices, row_indices, values, dtype, layout, device, pin_memory); + } + + // aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_bsr_tensor_crow_col_value::redispatch(dispatchKeySet, crow_indices, col_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsr_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_bsr_tensor_crow_col_value::redispatch(dispatchKeySet, crow_indices, col_indices, values, dtype, layout, device, pin_memory); + } + + // aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_bsc_tensor_ccol_row_value::redispatch(dispatchKeySet, ccol_indices, row_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_bsc_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_bsc_tensor_ccol_row_value::redispatch(dispatchKeySet, ccol_indices, row_indices, values, dtype, layout, device, pin_memory); + } + + // aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_compressed_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_compressed_tensor_unsafe::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_compressed_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_compressed_tensor_unsafe::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_csr_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_csr_tensor_unsafe::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_csr_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_csr_tensor_unsafe::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_csc_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_csc_tensor_unsafe::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_csc_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_csc_tensor_unsafe::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_bsr_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_bsr_tensor_unsafe::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_bsr_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_bsr_tensor_unsafe::redispatch(dispatchKeySet, crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_bsc_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_bsc_tensor_unsafe::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_bsc_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_bsc_tensor_unsafe::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_coo_tensor(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_coo_tensor_size::redispatch(dispatchKeySet, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor sparse_coo_tensor(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_coo_tensor_size::redispatch(dispatchKeySet, size, dtype, layout, device, pin_memory); + } + + // aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor sparse_coo_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options={}) { + return at::_ops::sparse_coo_tensor_indices::redispatch(dispatchKeySet, indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor sparse_coo_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_coo_tensor_indices::redispatch(dispatchKeySet, indices, values, dtype, layout, device, pin_memory); + } + + // aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor sparse_coo_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::sparse_coo_tensor_indices_size::redispatch(dispatchKeySet, indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor sparse_coo_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_coo_tensor_indices_size::redispatch(dispatchKeySet, indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_coo_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_coo_tensor_unsafe::redispatch(dispatchKeySet, indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _sparse_coo_tensor_unsafe(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_coo_tensor_unsafe::redispatch(dispatchKeySet, indices, values, size, dtype, layout, device, pin_memory); + } + + // aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> () + inline void _validate_sparse_coo_tensor_args(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_coo_tensor_args::redispatch(dispatchKeySet, indices, values, size); + } + + // aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> () + inline void _validate_sparse_compressed_tensor_args(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) { + return at::_ops::_validate_sparse_compressed_tensor_args::redispatch(dispatchKeySet, compressed_indices, plain_indices, values, size, layout); + } + + // aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> () + inline void _validate_sparse_csr_tensor_args(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_csr_tensor_args::redispatch(dispatchKeySet, crow_indices, col_indices, values, size); + } + + // aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> () + inline void _validate_sparse_csc_tensor_args(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_csc_tensor_args::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size); + } + + // aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> () + inline void _validate_sparse_bsr_tensor_args(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_bsr_tensor_args::redispatch(dispatchKeySet, crow_indices, col_indices, values, size); + } + + // aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> () + inline void _validate_sparse_bsc_tensor_args(c10::DispatchKeySet dispatchKeySet, const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_bsc_tensor_args::redispatch(dispatchKeySet, ccol_indices, row_indices, values, size); + } + + // aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor _sparse_coo_tensor_with_dims(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::_sparse_coo_tensor_with_dims::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor _sparse_coo_tensor_with_dims(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_coo_tensor_with_dims::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, dtype, layout, device, pin_memory); + } + + // aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory); + } + + // aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) + inline const at::Tensor & sparse_resize_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + return at::_ops::sparse_resize_::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim); + } + + // aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) + inline const at::Tensor & sparse_resize_and_clear_(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + return at::_ops::sparse_resize_and_clear_::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim); + } + + // aten::sparse_mask(Tensor self, Tensor mask) -> Tensor + inline at::Tensor sparse_mask(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask) { + return at::_ops::sparse_mask::redispatch(dispatchKeySet, self, mask); + } + + // aten::_to_cpu(Tensor[] tensors) -> Tensor[] + inline ::std::vector _to_cpu(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::_to_cpu::redispatch(dispatchKeySet, tensors); + } + + // aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor + inline at::Tensor to_dense(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::to_dense::redispatch(dispatchKeySet, self, dtype); + } + + // aten::_to_dense(Tensor self, ScalarType? dtype=None) -> Tensor + inline at::Tensor _to_dense(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::_to_dense::redispatch(dispatchKeySet, self, dtype); + } + + // aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor + inline at::Tensor to_dense_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input) { + return at::_ops::to_dense_backward::redispatch(dispatchKeySet, grad, input); + } + + // aten::sparse_dim(Tensor self) -> int + inline int64_t sparse_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sparse_dim::redispatch(dispatchKeySet, self); + } + + // aten::_dimI(Tensor self) -> int + inline int64_t _dimI(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_dimI::redispatch(dispatchKeySet, self); + } + + // aten::dense_dim(Tensor self) -> int + inline int64_t dense_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::dense_dim::redispatch(dispatchKeySet, self); + } + + // aten::_dimV(Tensor self) -> int + inline int64_t _dimV(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_dimV::redispatch(dispatchKeySet, self); + } + + // aten::_nnz(Tensor self) -> int + inline int64_t _nnz(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_nnz::redispatch(dispatchKeySet, self); + } + + // aten::coalesce(Tensor(a) self) -> Tensor(a) + inline at::Tensor coalesce(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::coalesce::redispatch(dispatchKeySet, self); + } + + // aten::_coalesce(Tensor self) -> Tensor + inline at::Tensor _coalesce(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_coalesce::redispatch(dispatchKeySet, self); + } + + // aten::is_coalesced(Tensor self) -> bool + inline bool is_coalesced(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::is_coalesced::redispatch(dispatchKeySet, self); + } + + // aten::_indices(Tensor(a) self) -> Tensor(a) + inline at::Tensor _indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_indices::redispatch(dispatchKeySet, self); + } + + // aten::_values(Tensor(a) self) -> Tensor(a) + inline at::Tensor _values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_values::redispatch(dispatchKeySet, self); + } + + // aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) + inline at::Tensor & _coalesced_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool coalesced) { + return at::_ops::_coalesced_::redispatch(dispatchKeySet, self, coalesced); + } + + // aten::indices(Tensor(a) self) -> Tensor(a) + inline at::Tensor indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::indices::redispatch(dispatchKeySet, self); + } + + // aten::values(Tensor(a) self) -> Tensor(a) + inline at::Tensor values(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::values::redispatch(dispatchKeySet, self); + } + + // aten::crow_indices(Tensor(a) self) -> Tensor(a) + inline at::Tensor crow_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::crow_indices::redispatch(dispatchKeySet, self); + } + + // aten::col_indices(Tensor(a) self) -> Tensor(a) + inline at::Tensor col_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::col_indices::redispatch(dispatchKeySet, self); + } + + // aten::ccol_indices(Tensor(a) self) -> Tensor(a) + inline at::Tensor ccol_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::ccol_indices::redispatch(dispatchKeySet, self); + } + + // aten::row_indices(Tensor(a) self) -> Tensor(a) + inline at::Tensor row_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::row_indices::redispatch(dispatchKeySet, self); + } + + // aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hspmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & mat1, const at::Tensor & mat2) { + return at::_ops::hspmm_out::redispatch(dispatchKeySet, mat1, mat2, out); + } + + // aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hspmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2, at::Tensor & out) { + return at::_ops::hspmm_out::redispatch(dispatchKeySet, mat1, mat2, out); + } + + // aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor + inline at::Tensor hspmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mat1, const at::Tensor & mat2) { + return at::_ops::hspmm::redispatch(dispatchKeySet, mat1, mat2); + } + + // aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) + inline at::Tensor & copy_sparse_to_sparse_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy_sparse_to_sparse_::redispatch(dispatchKeySet, self, src, non_blocking); + } + + // aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] + inline ::std::vector unbind(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=0) { + return at::_ops::unbind_int::redispatch(dispatchKeySet, self, dim); + } + + // aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] + inline ::std::vector unbind(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim) { + return at::_ops::unbind_Dimname::redispatch(dispatchKeySet, self, dim); + } + + // aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor + inline at::Tensor to_sparse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sparse_dim) { + return at::_ops::to_sparse_sparse_dim::redispatch(dispatchKeySet, self, sparse_dim); + } + + // aten::to_sparse(Tensor self) -> Tensor + inline at::Tensor to_sparse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::to_sparse::redispatch(dispatchKeySet, self); + } + + // aten::to_sparse_csr(Tensor self) -> Tensor + inline at::Tensor to_sparse_csr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::to_sparse_csr::redispatch(dispatchKeySet, self); + } + + // aten::to_sparse_csc(Tensor self) -> Tensor + inline at::Tensor to_sparse_csc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::to_sparse_csc::redispatch(dispatchKeySet, self); + } + + // aten::to_sparse_bsr(Tensor self, int[2] blocksize) -> Tensor + inline at::Tensor to_sparse_bsr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize) { + return at::_ops::to_sparse_bsr::redispatch(dispatchKeySet, self, blocksize); + } + + // aten::to_sparse_bsc(Tensor self, int[2] blocksize) -> Tensor + inline at::Tensor to_sparse_bsc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize) { + return at::_ops::to_sparse_bsc::redispatch(dispatchKeySet, self, blocksize); + } + + // aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor + inline at::Tensor to_mkldnn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::to_mkldnn::redispatch(dispatchKeySet, self, dtype); + } + + // aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1) -> Tensor + inline at::Tensor mkldnn_reorder_conv2d_weight(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::mkldnn_reorder_conv2d_weight::redispatch(dispatchKeySet, self, padding, stride, dilation, groups); + } + + // aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor + inline at::Tensor mkldnn_reorder_conv3d_weight(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::mkldnn_reorder_conv3d_weight::redispatch(dispatchKeySet, self, padding, stride, dilation, groups); + } + + // aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor + inline at::Tensor to_mkldnn_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input) { + return at::_ops::to_mkldnn_backward::redispatch(dispatchKeySet, grad, input); + } + + // aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor + inline at::Tensor quantize_per_tensor_dynamic(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range) { + return at::_ops::quantize_per_tensor_dynamic::redispatch(dispatchKeySet, self, dtype, reduce_range); + } + + // aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor + inline at::Tensor quantize_per_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor::redispatch(dispatchKeySet, self, scale, zero_point, dtype); + } + + // aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor + inline at::Tensor quantize_per_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensor_qparams::redispatch(dispatchKeySet, self, scale, zero_point, dtype); + } + + // aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[] + inline ::std::vector quantize_per_tensor(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensors::redispatch(dispatchKeySet, tensors, scales, zero_points, dtype); + } + + // aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor + inline at::Tensor quantize_per_channel(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) { + return at::_ops::quantize_per_channel::redispatch(dispatchKeySet, self, scales, zero_points, axis, dtype); + } + + // aten::dequantize.self(Tensor self) -> Tensor + inline at::Tensor dequantize(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::dequantize_self::redispatch(dispatchKeySet, self); + } + + // aten::dequantize.tensors(Tensor[] tensors) -> Tensor[] + inline ::std::vector dequantize(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::dequantize_tensors::redispatch(dispatchKeySet, tensors); + } + + // aten::q_scale(Tensor self) -> float + inline double q_scale(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::q_scale::redispatch(dispatchKeySet, self); + } + + // aten::q_zero_point(Tensor self) -> int + inline int64_t q_zero_point(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::q_zero_point::redispatch(dispatchKeySet, self); + } + + // aten::q_per_channel_scales(Tensor self) -> Tensor + inline at::Tensor q_per_channel_scales(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::q_per_channel_scales::redispatch(dispatchKeySet, self); + } + + // aten::q_per_channel_zero_points(Tensor self) -> Tensor + inline at::Tensor q_per_channel_zero_points(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::q_per_channel_zero_points::redispatch(dispatchKeySet, self); + } + + // aten::q_per_channel_axis(Tensor self) -> int + inline int64_t q_per_channel_axis(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::q_per_channel_axis::redispatch(dispatchKeySet, self); + } + + // aten::int_repr(Tensor self) -> Tensor + inline at::Tensor int_repr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::int_repr::redispatch(dispatchKeySet, self); + } + + // aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor + inline at::Tensor _make_per_tensor_quantized_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point) { + return at::_ops::_make_per_tensor_quantized_tensor::redispatch(dispatchKeySet, self, scale, zero_point); + } + + // aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor + inline at::Tensor _make_per_channel_quantized_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) { + return at::_ops::_make_per_channel_quantized_tensor::redispatch(dispatchKeySet, self, scale, zero_point, axis); + } + + // aten::qscheme(Tensor self) -> QScheme + inline at::QScheme qscheme(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::qscheme::redispatch(dispatchKeySet, self); + } + + // aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor + inline at::Tensor fake_quantize_per_tensor_affine(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_tensor_affine::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max); + } + + // aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor + inline at::Tensor fake_quantize_per_tensor_affine(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_tensor_affine_tensor_qparams::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max); + } + + // aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask) + inline ::std::tuple fake_quantize_per_tensor_affine_cachemask(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_tensor_affine_cachemask::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max); + } + + // aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask) + inline ::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) { + return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::redispatch(dispatchKeySet, self, scale, zero_point, fake_quant_enabled, quant_min, quant_max); + } + + // aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor + inline at::Tensor fake_quantize_per_tensor_affine_cachemask_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & mask) { + return at::_ops::fake_quantize_per_tensor_affine_cachemask_backward::redispatch(dispatchKeySet, grad, mask); + } + + // aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor + inline at::Tensor _fake_quantize_learnable_per_tensor_affine(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, grad_factor); + } + + // aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _fake_quantize_learnable_per_tensor_affine_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine_backward::redispatch(dispatchKeySet, grad, self, scale, zero_point, quant_min, quant_max, grad_factor); + } + + // aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor + inline at::Tensor fake_quantize_per_channel_affine(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_channel_affine::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max); + } + + // aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask) + inline ::std::tuple fake_quantize_per_channel_affine_cachemask(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_channel_affine_cachemask::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max); + } + + // aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor + inline at::Tensor fake_quantize_per_channel_affine_cachemask_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & mask) { + return at::_ops::fake_quantize_per_channel_affine_cachemask_backward::redispatch(dispatchKeySet, grad, mask); + } + + // aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor + inline at::Tensor _fake_quantize_learnable_per_channel_affine(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_channel_affine::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, grad_factor); + } + + // aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _fake_quantize_learnable_per_channel_affine_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_channel_affine_backward::redispatch(dispatchKeySet, grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor); + } + + // aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor + inline at::Tensor fused_moving_avg_obs_fake_quant(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::fused_moving_avg_obs_fake_quant::redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); + } + + // aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) + inline ::std::tuple _fused_moving_avg_obs_fq_helper(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::_fused_moving_avg_obs_fq_helper::redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); + } + + // aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int) + inline ::std::tuple _choose_qparams_per_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool reduce_range=false) { + return at::_ops::_choose_qparams_per_tensor::redispatch(dispatchKeySet, self, reduce_range); + } + + // aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor + inline at::Tensor _saturate_weight_to_fp16(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight) { + return at::_ops::_saturate_weight_to_fp16::redispatch(dispatchKeySet, weight); + } + + // aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor) + inline ::std::tuple choose_qparams_optimized(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) { + return at::_ops::choose_qparams_optimized::redispatch(dispatchKeySet, input, numel, n_bins, ratio, bit_width); + } + + // aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a) + inline at::Tensor _autocast_to_reduced_precision(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) { + return at::_ops::_autocast_to_reduced_precision::redispatch(dispatchKeySet, self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype); + } + + // aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a) + inline at::Tensor _autocast_to_full_precision(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) { + return at::_ops::_autocast_to_full_precision::redispatch(dispatchKeySet, self, cuda_enabled, cpu_enabled); + } + + // aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor _to_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, bool non_blocking=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::_to_copy::redispatch(dispatchKeySet, self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor _to_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, c10::optional memory_format) { + return at::_ops::_to_copy::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, non_blocking, memory_format); + } + + // aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + inline at::Tensor to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::to_dtype_layout::redispatch(dispatchKeySet, self, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, copy, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } + + // aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + inline at::Tensor to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, bool copy, c10::optional memory_format) { + return at::_ops::to_dtype_layout::redispatch(dispatchKeySet, self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format); + } + + // aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + inline at::Tensor to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::to_device::redispatch(dispatchKeySet, self, device, dtype, non_blocking, copy, memory_format); + } + + // aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + inline at::Tensor to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::to_dtype::redispatch(dispatchKeySet, self, dtype, non_blocking, copy, memory_format); + } + + // aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) + inline at::Tensor to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::to_other::redispatch(dispatchKeySet, self, other, non_blocking, copy, memory_format); + } + + // aten::meshgrid(Tensor[] tensors) -> Tensor[] + inline ::std::vector meshgrid(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::meshgrid::redispatch(dispatchKeySet, tensors); + } + + // aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[] + inline ::std::vector meshgrid(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, c10::string_view indexing) { + return at::_ops::meshgrid_indexing::redispatch(dispatchKeySet, tensors, indexing); + } + + // aten::cartesian_prod(Tensor[] tensors) -> Tensor + inline at::Tensor cartesian_prod(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::cartesian_prod::redispatch(dispatchKeySet, tensors); + } + + // aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor + inline at::Tensor combinations(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t r=2, bool with_replacement=false) { + return at::_ops::combinations::redispatch(dispatchKeySet, self, r, with_replacement); + } + + // aten::item(Tensor self) -> Scalar + inline at::Scalar item(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::item::redispatch(dispatchKeySet, self); + } + + // aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType + inline at::ScalarType result_type(c10::DispatchKeySet dispatchKeySet, const at::Tensor & tensor, const at::Tensor & other) { + return at::_ops::result_type_Tensor::redispatch(dispatchKeySet, tensor, other); + } + + // aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType + inline at::ScalarType result_type(c10::DispatchKeySet dispatchKeySet, const at::Tensor & tensor, const at::Scalar & other) { + return at::_ops::result_type_Scalar::redispatch(dispatchKeySet, tensor, other); + } + + // aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType + inline at::ScalarType result_type(c10::DispatchKeySet dispatchKeySet, const at::Scalar & scalar, const at::Tensor & tensor) { + return at::_ops::result_type_Scalar_Tensor::redispatch(dispatchKeySet, scalar, tensor); + } + + // aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType + inline at::ScalarType result_type(c10::DispatchKeySet dispatchKeySet, const at::Scalar & scalar1, const at::Scalar & scalar2) { + return at::_ops::result_type_Scalar_Scalar::redispatch(dispatchKeySet, scalar1, scalar2); + } + + // aten::can_cast(ScalarType from, ScalarType to) -> bool + inline bool can_cast(c10::DispatchKeySet dispatchKeySet, at::ScalarType from, at::ScalarType to) { + return at::_ops::can_cast::redispatch(dispatchKeySet, from, to); + } + + // aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType + inline at::ScalarType promote_types(c10::DispatchKeySet dispatchKeySet, at::ScalarType type1, at::ScalarType type2) { + return at::_ops::promote_types::redispatch(dispatchKeySet, type1, type2); + } + + // aten::_local_scalar_dense(Tensor self) -> Scalar + inline at::Scalar _local_scalar_dense(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_local_scalar_dense::redispatch(dispatchKeySet, self); + } + + // aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _lstm_mps(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::_lstm_mps::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + + // aten::lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[]) + inline ::std::tuple,::std::vector> lstm_mps_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_mps_backward::redispatch(dispatchKeySet, grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + + // aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _thnn_fused_lstm_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional & input_bias={}, const c10::optional & hidden_bias={}) { + return at::_ops::_thnn_fused_lstm_cell::redispatch(dispatchKeySet, input_gates, hidden_gates, cx, input_bias, hidden_bias); + } + + // aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _thnn_fused_lstm_cell_backward_impl(c10::DispatchKeySet dispatchKeySet, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_lstm_cell_backward_impl::redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias); + } + + // aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _thnn_fused_lstm_cell_backward(c10::DispatchKeySet dispatchKeySet, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_lstm_cell_backward::redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias); + } + + // aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _thnn_differentiable_lstm_cell_backward(c10::DispatchKeySet dispatchKeySet, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional & input_bias, const c10::optional & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) { + return at::_ops::_thnn_differentiable_lstm_cell_backward::redispatch(dispatchKeySet, grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy); + } + + // aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor) + inline ::std::tuple _thnn_fused_gru_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias={}, const c10::optional & hidden_bias={}) { + return at::_ops::_thnn_fused_gru_cell::redispatch(dispatchKeySet, input_gates, hidden_gates, hx, input_bias, hidden_bias); + } + + // aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _thnn_fused_gru_cell_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_gru_cell_backward::redispatch(dispatchKeySet, grad_hy, workspace, has_bias); + } + + // aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _thnn_differentiable_gru_cell_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias) { + return at::_ops::_thnn_differentiable_gru_cell_backward::redispatch(dispatchKeySet, grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias); + } + + // aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) + inline ::std::tuple lstm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_input::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + + // aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) + inline ::std::tuple lstm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::lstm_data::redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + + // aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + inline ::std::tuple gru(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::gru_input::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + + // aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + inline ::std::tuple gru(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::gru_data::redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + + // aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + inline ::std::tuple rnn_tanh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::rnn_tanh_input::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + + // aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + inline ::std::tuple rnn_tanh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::rnn_tanh_data::redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + + // aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + inline ::std::tuple rnn_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::rnn_relu_input::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); + } + + // aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + inline ::std::tuple rnn_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { + return at::_ops::rnn_relu_data::redispatch(dispatchKeySet, data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); + } + + // aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) + inline ::std::tuple lstm_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}) { + return at::_ops::lstm_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh); + } + + // aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + inline at::Tensor gru_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}) { + return at::_ops::gru_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh); + } + + // aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + inline at::Tensor rnn_tanh_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}) { + return at::_ops::rnn_tanh_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh); + } + + // aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + inline at::Tensor rnn_relu_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}) { + return at::_ops::rnn_relu_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh); + } + + // aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor) + inline ::std::tuple quantized_lstm_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + return at::_ops::quantized_lstm_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + + // aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + inline at::Tensor quantized_gru_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + return at::_ops::quantized_gru_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + + // aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + inline at::Tensor quantized_rnn_relu_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + return at::_ops::quantized_rnn_relu_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + + // aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + inline at::Tensor quantized_rnn_tanh_cell(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) { + return at::_ops::quantized_rnn_tanh_cell::redispatch(dispatchKeySet, input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); + } + + // aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor) + inline ::std::tuple _pack_padded_sequence(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first) { + return at::_ops::_pack_padded_sequence::redispatch(dispatchKeySet, input, lengths, batch_first); + } + + // aten::_pack_padded_sequence_backward(Tensor grad, int[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor + inline at::Tensor _pack_padded_sequence_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, at::IntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) { + return at::_ops::_pack_padded_sequence_backward::redispatch(dispatchKeySet, grad, input_size, batch_sizes, batch_first); + } + + // aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor) + inline ::std::tuple _pad_packed_sequence(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) { + return at::_ops::_pad_packed_sequence::redispatch(dispatchKeySet, data, batch_sizes, batch_first, padding_value, total_length); + } + + // aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) + inline at::Tensor & set_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source) { + return at::_ops::set__source_Storage::redispatch(dispatchKeySet, self, source); + } + + // aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) + inline at::Tensor & set_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set__source_Storage_storage_offset::redispatch(dispatchKeySet, self, source, storage_offset, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride)); + } + + // aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) + inline at::Tensor & set__symint(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set__source_Storage_storage_offset::redispatch(dispatchKeySet, self, source, storage_offset, size, stride); + } + + // aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) + inline at::Tensor & set_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set__source_Tensor_storage_offset::redispatch(dispatchKeySet, self, source, storage_offset, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride)); + } + + // aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) + inline at::Tensor & set__symint(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set__source_Tensor_storage_offset::redispatch(dispatchKeySet, self, source, storage_offset, size, stride); + } + + // aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) + inline at::Tensor & set_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source) { + return at::_ops::set__source_Tensor::redispatch(dispatchKeySet, self, source); + } + + // aten::set_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & set_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::set_::redispatch(dispatchKeySet, self); + } + + // aten::lift(Tensor self) -> Tensor + inline at::Tensor lift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::lift::redispatch(dispatchKeySet, self); + } + + // aten::lift_fresh(Tensor(a) self) -> Tensor(a) + inline at::Tensor lift_fresh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::lift_fresh::redispatch(dispatchKeySet, self); + } + + // aten::lift_fresh_copy(Tensor self) -> Tensor + inline at::Tensor lift_fresh_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::lift_fresh_copy::redispatch(dispatchKeySet, self); + } + + // aten::is_set_to(Tensor self, Tensor tensor) -> bool + inline bool is_set_to(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor) { + return at::_ops::is_set_to::redispatch(dispatchKeySet, self, tensor); + } + + // aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) + inline at::Tensor & masked_fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { + return at::_ops::masked_fill__Scalar::redispatch(dispatchKeySet, self, mask, value); + } + + // aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor + inline at::Tensor masked_fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { + return at::_ops::masked_fill_Scalar::redispatch(dispatchKeySet, self, mask, value); + } + + // aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) + inline at::Tensor & masked_fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { + return at::_ops::masked_fill__Tensor::redispatch(dispatchKeySet, self, mask, value); + } + + // aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor + inline at::Tensor masked_fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { + return at::_ops::masked_fill_Tensor::redispatch(dispatchKeySet, self, mask, value); + } + + // aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) + inline at::Tensor & masked_scatter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { + return at::_ops::masked_scatter_::redispatch(dispatchKeySet, self, mask, source); + } + + // aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor + inline at::Tensor masked_scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { + return at::_ops::masked_scatter::redispatch(dispatchKeySet, self, mask, source); + } + + // aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor + inline at::Tensor _masked_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, c10::optional dim=c10::nullopt, c10::optional mask_type=c10::nullopt) { + return at::_ops::_masked_softmax::redispatch(dispatchKeySet, self, mask, dim, mask_type); + } + + // aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor + inline at::Tensor _masked_softmax_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim=c10::nullopt) { + return at::_ops::_masked_softmax_backward::redispatch(dispatchKeySet, grad_output, output, mask, dim); + } + + // aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a) + inline at::Tensor view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::view::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size)); + } + + // aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a) + inline at::Tensor view_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::view::redispatch(dispatchKeySet, self, size); + } + + // aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a) + inline at::Tensor view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) { + return at::_ops::view_dtype::redispatch(dispatchKeySet, self, dtype); + } + + // aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) + inline at::Tensor & put_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false) { + return at::_ops::put_::redispatch(dispatchKeySet, self, index, source, accumulate); + } + + // aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor + inline at::Tensor put(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false) { + return at::_ops::put::redispatch(dispatchKeySet, self, index, source, accumulate); + } + + // aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_add_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add_out::redispatch(dispatchKeySet, self, dim, index, source, alpha, out); + } + + // aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_add_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::index_add_out::redispatch(dispatchKeySet, self, dim, index, source, alpha, out); + } + + // aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & index_add_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add_::redispatch(dispatchKeySet, self, dim, index, source, alpha); + } + + // aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor + inline at::Tensor index_add(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add::redispatch(dispatchKeySet, self, dim, index, source, alpha); + } + + // aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor + inline at::Tensor index_add(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) { + return at::_ops::index_add_dimname::redispatch(dispatchKeySet, self, dim, index, source, alpha); + } + + // aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_reduce_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) { + return at::_ops::index_reduce_out::redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self, out); + } + + // aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_reduce_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out) { + return at::_ops::index_reduce_out::redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self, out); + } + + // aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!) + inline at::Tensor & index_reduce_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) { + return at::_ops::index_reduce_::redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self); + } + + // aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor + inline at::Tensor index_reduce(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) { + return at::_ops::index_reduce::redispatch(dispatchKeySet, self, dim, index, source, reduce, include_self); + } + + // aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) + inline at::Tensor & index_fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill__int_Scalar::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor + inline at::Tensor index_fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_int_Scalar::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) + inline at::Tensor & index_fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill__int_Tensor::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor + inline at::Tensor index_fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_int_Tensor::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) + inline at::Tensor & index_fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill__Dimname_Scalar::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!) + inline at::Tensor & index_fill_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill__Dimname_Tensor::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor + inline at::Tensor index_fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_Dimname_Scalar::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor + inline at::Tensor index_fill(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_Dimname_Tensor::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor + inline at::Tensor scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_src::redispatch(dispatchKeySet, self, dim, index, src); + } + + // aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) + inline at::Tensor & scatter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter__src::redispatch(dispatchKeySet, self, dim, index, src); + } + + // aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_src_out::redispatch(dispatchKeySet, self, dim, index, src, out); + } + + // aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) { + return at::_ops::scatter_src_out::redispatch(dispatchKeySet, self, dim, index, src, out); + } + + // aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor + inline at::Tensor scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter_value::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) + inline at::Tensor & scatter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter__value::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter_value_out::redispatch(dispatchKeySet, self, dim, index, value, out); + } + + // aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { + return at::_ops::scatter_value_out::redispatch(dispatchKeySet, self, dim, index, value, out); + } + + // aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor + inline at::Tensor scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + return at::_ops::scatter_reduce::redispatch(dispatchKeySet, self, dim, index, src, reduce); + } + + // aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!) + inline at::Tensor & scatter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + return at::_ops::scatter__reduce::redispatch(dispatchKeySet, self, dim, index, src, reduce); + } + + // aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) { + return at::_ops::scatter_reduce_out::redispatch(dispatchKeySet, self, dim, index, src, reduce, out); + } + + // aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, at::Tensor & out) { + return at::_ops::scatter_reduce_out::redispatch(dispatchKeySet, self, dim, index, src, reduce, out); + } + + // aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor + inline at::Tensor scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + return at::_ops::scatter_value_reduce::redispatch(dispatchKeySet, self, dim, index, value, reduce); + } + + // aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!) + inline at::Tensor & scatter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + return at::_ops::scatter__value_reduce::redispatch(dispatchKeySet, self, dim, index, value, reduce); + } + + // aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) { + return at::_ops::scatter_value_reduce_out::redispatch(dispatchKeySet, self, dim, index, value, reduce, out); + } + + // aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce, at::Tensor & out) { + return at::_ops::scatter_value_reduce_out::redispatch(dispatchKeySet, self, dim, index, value, reduce, out); + } + + // aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor + inline at::Tensor scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_dimname_src::redispatch(dispatchKeySet, self, dim, index, src); + } + + // aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor + inline at::Tensor scatter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::scatter_dimname_value::redispatch(dispatchKeySet, self, dim, index, value); + } + + // aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor + inline at::Tensor scatter_add(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_add::redispatch(dispatchKeySet, self, dim, index, src); + } + + // aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) + inline at::Tensor & scatter_add_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_add_::redispatch(dispatchKeySet, self, dim, index, src); + } + + // aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_add_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_add_out::redispatch(dispatchKeySet, self, dim, index, src, out); + } + + // aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_add_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, at::Tensor & out) { + return at::_ops::scatter_add_out::redispatch(dispatchKeySet, self, dim, index, src, out); + } + + // aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor + inline at::Tensor scatter_add(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) { + return at::_ops::scatter_add_dimname::redispatch(dispatchKeySet, self, dim, index, src); + } + + // aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor + inline at::Tensor scatter_reduce(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) { + return at::_ops::scatter_reduce_two::redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self); + } + + // aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!) + inline at::Tensor & scatter_reduce_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) { + return at::_ops::scatter_reduce__two::redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self); + } + + // aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_reduce_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) { + return at::_ops::scatter_reduce_two_out::redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self, out); + } + + // aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scatter_reduce_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, at::Tensor & out) { + return at::_ops::scatter_reduce_two_out::redispatch(dispatchKeySet, self, dim, index, src, reduce, include_self, out); + } + + // aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & eq_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::eq__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & eq_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::eq__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_and_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_and_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_and_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_and_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_and_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_and_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_and_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::bitwise_and_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor bitwise_and(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_and_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + inline at::Tensor bitwise_and(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_and_Scalar_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor bitwise_and(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_and_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & bitwise_and_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_and__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & bitwise_and_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_and__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor __and__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__and___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor __and__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__and___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & __iand__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::__iand___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & __iand__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::__iand___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_or_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_or_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_or_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_or_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_or_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_or_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_or_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::bitwise_or_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor bitwise_or(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_or_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + inline at::Tensor bitwise_or(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_or_Scalar_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor bitwise_or(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_or_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & bitwise_or_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_or__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & bitwise_or_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_or__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor __or__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__or___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor __or__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__or___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & __ior__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::__ior___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & __ior__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::__ior___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_xor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_xor_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_xor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_xor_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_xor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_xor_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_xor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::bitwise_xor_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor bitwise_xor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_xor_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + inline at::Tensor bitwise_xor(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_xor_Scalar_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor bitwise_xor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_xor_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & bitwise_xor_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_xor__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & bitwise_xor_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_xor__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor __xor__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__xor___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor __xor__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__xor___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & __ixor__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::__ixor___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & __ixor__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::__ixor___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor __lshift__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__lshift___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor __lshift__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__lshift___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & __ilshift__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::__ilshift___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & __ilshift__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::__ilshift___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor bitwise_left_shift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_left_shift_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_left_shift__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_left_shift_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_left_shift_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor bitwise_left_shift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_left_shift_Tensor_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_left_shift__Tensor_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_left_shift_Tensor_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::bitwise_left_shift_Tensor_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + inline at::Tensor bitwise_left_shift(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_left_shift_Scalar_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor __rshift__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__rshift___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor __rshift__(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__rshift___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & __irshift__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::__irshift___Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & __irshift__(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::__irshift___Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor bitwise_right_shift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_right_shift_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_right_shift__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::bitwise_right_shift_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_right_shift_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor bitwise_right_shift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_right_shift_Tensor_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_right_shift__Tensor_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::bitwise_right_shift_Tensor_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::bitwise_right_shift_Tensor_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + inline at::Tensor bitwise_right_shift(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_right_shift_Scalar_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) + inline at::Tensor & tril_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal=0) { + return at::_ops::tril_::redispatch(dispatchKeySet, self, diagonal); + } + + // aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) + inline at::Tensor & triu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t diagonal=0) { + return at::_ops::triu_::redispatch(dispatchKeySet, self, diagonal); + } + + // aten::digamma_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & digamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::digamma_::redispatch(dispatchKeySet, self); + } + + // aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) + inline at::Tensor & lerp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + return at::_ops::lerp__Scalar::redispatch(dispatchKeySet, self, end, weight); + } + + // aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) + inline at::Tensor & lerp_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + return at::_ops::lerp__Tensor::redispatch(dispatchKeySet, self, end, weight); + } + + // aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + inline at::Tensor & addbmm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addbmm_::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha); + } + + // aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addbmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addbmm_out::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha, out); + } + + // aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addbmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::addbmm_out::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha, out); + } + + // aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + inline at::Tensor addbmm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addbmm::redispatch(dispatchKeySet, self, batch1, batch2, beta, alpha); + } + + // aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & random_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t from, c10::optional to, c10::optional generator=c10::nullopt) { + return at::_ops::random__from::redispatch(dispatchKeySet, self, from, to, generator); + } + + // aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & random_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t to, c10::optional generator=c10::nullopt) { + return at::_ops::random__to::redispatch(dispatchKeySet, self, to, generator); + } + + // aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & random_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::random_::redispatch(dispatchKeySet, self, generator); + } + + // aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & uniform_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double from=0, double to=1, c10::optional generator=c10::nullopt) { + return at::_ops::uniform_::redispatch(dispatchKeySet, self, from, to, generator); + } + + // aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & cauchy_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double median=0, double sigma=1, c10::optional generator=c10::nullopt) { + return at::_ops::cauchy_::redispatch(dispatchKeySet, self, median, sigma, generator); + } + + // aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & log_normal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean=1, double std=2, c10::optional generator=c10::nullopt) { + return at::_ops::log_normal_::redispatch(dispatchKeySet, self, mean, std, generator); + } + + // aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & exponential_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double lambd=1, c10::optional generator=c10::nullopt) { + return at::_ops::exponential_::redispatch(dispatchKeySet, self, lambd, generator); + } + + // aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & geometric_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::geometric_::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diag_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::diag_out::redispatch(dispatchKeySet, self, diagonal, out); + } + + // aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diag_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) { + return at::_ops::diag_out::redispatch(dispatchKeySet, self, diagonal, out); + } + + // aten::diag(Tensor self, int diagonal=0) -> Tensor + inline at::Tensor diag(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::diag::redispatch(dispatchKeySet, self, diagonal); + } + + // aten::diag_backward(Tensor grad, SymInt[] input_sizes, int diagonal) -> Tensor + inline at::Tensor diag_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, at::IntArrayRef input_sizes, int64_t diagonal) { + return at::_ops::diag_backward::redispatch(dispatchKeySet, grad, c10::fromIntArrayRef(input_sizes), diagonal); + } + + // aten::diag_backward(Tensor grad, SymInt[] input_sizes, int diagonal) -> Tensor + inline at::Tensor diag_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, c10::SymIntArrayRef input_sizes, int64_t diagonal) { + return at::_ops::diag_backward::redispatch(dispatchKeySet, grad, input_sizes, diagonal); + } + + // aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cross_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, c10::optional dim=c10::nullopt) { + return at::_ops::cross_out::redispatch(dispatchKeySet, self, other, dim, out); + } + + // aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cross_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional dim, at::Tensor & out) { + return at::_ops::cross_out::redispatch(dispatchKeySet, self, other, dim, out); + } + + // aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor + inline at::Tensor cross(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, c10::optional dim=c10::nullopt) { + return at::_ops::cross::redispatch(dispatchKeySet, self, other, dim); + } + + // aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & triu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::triu_out::redispatch(dispatchKeySet, self, diagonal, out); + } + + // aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & triu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) { + return at::_ops::triu_out::redispatch(dispatchKeySet, self, diagonal, out); + } + + // aten::triu(Tensor self, int diagonal=0) -> Tensor + inline at::Tensor triu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::triu::redispatch(dispatchKeySet, self, diagonal); + } + + // aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tril_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::tril_out::redispatch(dispatchKeySet, self, diagonal, out); + } + + // aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tril_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal, at::Tensor & out) { + return at::_ops::tril_out::redispatch(dispatchKeySet, self, diagonal, out); + } + + // aten::tril(Tensor self, int diagonal=0) -> Tensor + inline at::Tensor tril(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t diagonal=0) { + return at::_ops::tril::redispatch(dispatchKeySet, self, diagonal); + } + + // aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor tril_indices(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong) { + return at::_ops::tril_indices::redispatch(dispatchKeySet, row, col, offset, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor tril_indices(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::tril_indices::redispatch(dispatchKeySet, row, col, offset, dtype, layout, device, pin_memory); + } + + // aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor triu_indices(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset=0, at::TensorOptions options=at::kLong) { + return at::_ops::triu_indices::redispatch(dispatchKeySet, row, col, offset, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor triu_indices(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::triu_indices::redispatch(dispatchKeySet, row, col, offset, dtype, layout, device, pin_memory); + } + + // aten::trace(Tensor self) -> Tensor + inline at::Tensor trace(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::trace::redispatch(dispatchKeySet, self); + } + + // aten::trace_backward(Tensor grad, int[] sizes) -> Tensor + inline at::Tensor trace_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, at::IntArrayRef sizes) { + return at::_ops::trace_backward::redispatch(dispatchKeySet, grad, sizes); + } + + // aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ne_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ne_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ne_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::ne_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ne.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor ne(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ne_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ne_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ne_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ne_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::ne_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ne.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor ne(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ne_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & ne_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::ne__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & ne_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::ne__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & not_equal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::not_equal_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & not_equal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::not_equal_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor not_equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::not_equal_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & not_equal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::not_equal_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & not_equal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::not_equal_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor not_equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::not_equal_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & not_equal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::not_equal__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & not_equal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::not_equal__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eq_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::eq_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eq_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::eq_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::eq.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor eq(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::eq_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eq_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::eq_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & eq_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::eq_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::eq.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor eq(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::eq_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ge_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ge_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ge_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::ge_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ge.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor ge(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ge_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ge_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ge_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ge_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::ge_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::ge.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor ge(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ge_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & ge_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::ge__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & ge_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::ge__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_equal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_equal_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_equal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::greater_equal_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor greater_equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_equal_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_equal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_equal_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_equal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::greater_equal_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor greater_equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_equal_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & greater_equal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_equal__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & greater_equal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_equal__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & le_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::le_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & le_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::le_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::le.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor le(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::le_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & le_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::le_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & le_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::le_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::le.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor le(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::le_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & le_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::le__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & le_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::le__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_equal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_equal_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_equal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::less_equal_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor less_equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_equal_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_equal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_equal_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_equal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::less_equal_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor less_equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_equal_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & less_equal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_equal__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & less_equal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_equal__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::gt_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::gt_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::gt.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor gt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::gt_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gt_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::gt_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::gt.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor gt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gt_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & gt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::gt__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & gt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::gt__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::greater_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor greater(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & greater_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::greater_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::greater.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor greater(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & greater_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & greater_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::lt_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::lt_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::lt.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor lt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::lt_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lt_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::lt_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::lt.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor lt(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::lt_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & lt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::lt__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & lt_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::lt__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::less_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor less(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::less_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & less_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::less_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::less.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor less(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::less_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & less_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::less__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & less_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::less__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & take_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & index) { + return at::_ops::take_out::redispatch(dispatchKeySet, self, index, out); + } + + // aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & take_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, at::Tensor & out) { + return at::_ops::take_out::redispatch(dispatchKeySet, self, index, out); + } + + // aten::take(Tensor self, Tensor index) -> Tensor + inline at::Tensor take(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index) { + return at::_ops::take::redispatch(dispatchKeySet, self, index); + } + + // aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & take_along_dim_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, c10::optional dim=c10::nullopt) { + return at::_ops::take_along_dim_out::redispatch(dispatchKeySet, self, indices, dim, out); + } + + // aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & take_along_dim_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::optional dim, at::Tensor & out) { + return at::_ops::take_along_dim_out::redispatch(dispatchKeySet, self, indices, dim, out); + } + + // aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor + inline at::Tensor take_along_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::optional dim=c10::nullopt) { + return at::_ops::take_along_dim::redispatch(dispatchKeySet, self, indices, dim); + } + + // aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_select_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_select_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, at::Tensor & out) { + return at::_ops::index_select_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::index_select(Tensor self, int dim, Tensor index) -> Tensor + inline at::Tensor index_select(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select::redispatch(dispatchKeySet, self, dim, index); + } + + // aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_select_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { + return at::_ops::index_select_dimname_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_select_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, at::Tensor & out) { + return at::_ops::index_select_dimname_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor + inline at::Tensor index_select(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index) { + return at::_ops::index_select_dimname::redispatch(dispatchKeySet, self, dim, index); + } + + // aten::index_select_backward(Tensor grad, int[] self_sizes, int dim, Tensor index) -> Tensor + inline at::Tensor index_select_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index) { + return at::_ops::index_select_backward::redispatch(dispatchKeySet, grad, self_sizes, dim, index); + } + + // aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_select_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask) { + return at::_ops::masked_select_out::redispatch(dispatchKeySet, self, mask, out); + } + + // aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_select_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) { + return at::_ops::masked_select_out::redispatch(dispatchKeySet, self, mask, out); + } + + // aten::masked_select(Tensor self, Tensor mask) -> Tensor + inline at::Tensor masked_select(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask) { + return at::_ops::masked_select::redispatch(dispatchKeySet, self, mask); + } + + // aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor + inline at::Tensor masked_select_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) { + return at::_ops::masked_select_backward::redispatch(dispatchKeySet, grad, input, mask); + } + + // aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nonzero_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::nonzero_out::redispatch(dispatchKeySet, self, out); + } + + // aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nonzero_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::nonzero_out::redispatch(dispatchKeySet, self, out); + } + + // aten::nonzero(Tensor self) -> Tensor + inline at::Tensor nonzero(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::nonzero::redispatch(dispatchKeySet, self); + } + + // aten::nonzero_numpy(Tensor self) -> Tensor[] + inline ::std::vector nonzero_numpy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::nonzero_numpy::redispatch(dispatchKeySet, self); + } + + // aten::argwhere(Tensor self) -> Tensor + inline at::Tensor argwhere(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::argwhere::redispatch(dispatchKeySet, self); + } + + // aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gather_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_out::redispatch(dispatchKeySet, self, dim, index, sparse_grad, out); + } + + // aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gather_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { + return at::_ops::gather_out::redispatch(dispatchKeySet, self, dim, index, sparse_grad, out); + } + + // aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor + inline at::Tensor gather(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather::redispatch(dispatchKeySet, self, dim, index, sparse_grad); + } + + // aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor + inline at::Tensor gather_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) { + return at::_ops::gather_backward::redispatch(dispatchKeySet, grad, self, dim, index, sparse_grad); + } + + // aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gather_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_dimname_out::redispatch(dispatchKeySet, self, dim, index, sparse_grad, out); + } + + // aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & gather_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out) { + return at::_ops::gather_dimname_out::redispatch(dispatchKeySet, self, dim, index, sparse_grad, out); + } + + // aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor + inline at::Tensor gather(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) { + return at::_ops::gather_dimname::redispatch(dispatchKeySet, self, dim, index, sparse_grad); + } + + // aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor + inline at::Tensor _gather_sparse_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) { + return at::_ops::_gather_sparse_backward::redispatch(dispatchKeySet, self, dim, index, grad); + } + + // aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addcmul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcmul_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addcmul_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) { + return at::_ops::addcmul_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor + inline at::Tensor addcmul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcmul::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) + inline at::Tensor & addcmul_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcmul_::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addcdiv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcdiv_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & addcdiv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) { + return at::_ops::addcdiv_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor + inline at::Tensor addcdiv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcdiv::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) + inline at::Tensor & addcdiv_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcdiv_::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, float label_smoothing=0.0) -> Tensor + inline at::Tensor cross_entropy_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100, double label_smoothing=0.0) { + return at::_ops::cross_entropy_loss::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, label_smoothing); + } + + // aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) + inline ::std::tuple triangular_solve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & X, at::Tensor & M, const at::Tensor & self, const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) { + return at::_ops::triangular_solve_X::redispatch(dispatchKeySet, self, A, upper, transpose, unitriangular, X, M); + } + + // aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) + inline ::std::tuple triangular_solve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular, at::Tensor & X, at::Tensor & M) { + return at::_ops::triangular_solve_X::redispatch(dispatchKeySet, self, A, upper, transpose, unitriangular, X, M); + } + + // aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) + inline ::std::tuple triangular_solve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) { + return at::_ops::triangular_solve::redispatch(dispatchKeySet, self, A, upper, transpose, unitriangular); + } + + // aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> () + inline void _linalg_check_errors(c10::DispatchKeySet dispatchKeySet, const at::Tensor & info, c10::string_view api_name, bool is_matrix) { + return at::_ops::_linalg_check_errors::redispatch(dispatchKeySet, info, api_name, is_matrix); + } + + // aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_solve_triangular_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false) { + return at::_ops::linalg_solve_triangular_out::redispatch(dispatchKeySet, self, B, upper, left, unitriangular, out); + } + + // aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_solve_triangular_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out) { + return at::_ops::linalg_solve_triangular_out::redispatch(dispatchKeySet, self, B, upper, left, unitriangular, out); + } + + // aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor + inline at::Tensor linalg_solve_triangular(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left=true, bool unitriangular=false) { + return at::_ops::linalg_solve_triangular::redispatch(dispatchKeySet, self, B, upper, left, unitriangular); + } + + // aten::linalg_vander(Tensor x, *, int? N=None) -> Tensor + inline at::Tensor linalg_vander(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, c10::optional N=c10::nullopt) { + return at::_ops::linalg_vander::redispatch(dispatchKeySet, x, N); + } + + // aten::symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple symeig_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & e, at::Tensor & V, const at::Tensor & self, bool eigenvectors=false, bool upper=true) { + return at::_ops::symeig_e::redispatch(dispatchKeySet, self, eigenvectors, upper, e, V); + } + + // aten::symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple symeig_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool eigenvectors, bool upper, at::Tensor & e, at::Tensor & V) { + return at::_ops::symeig_e::redispatch(dispatchKeySet, self, eigenvectors, upper, e, V); + } + + // aten::symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors) + inline ::std::tuple symeig(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool eigenvectors=false, bool upper=true) { + return at::_ops::symeig::redispatch(dispatchKeySet, self, eigenvectors, upper); + } + + // aten::_symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor) + inline ::std::tuple _symeig_helper(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool eigenvectors, bool upper) { + return at::_ops::_symeig_helper::redispatch(dispatchKeySet, self, eigenvectors, upper); + } + + // aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) + inline ::std::tuple svd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & U, at::Tensor & S, at::Tensor & V, const at::Tensor & self, bool some=true, bool compute_uv=true) { + return at::_ops::svd_U::redispatch(dispatchKeySet, self, some, compute_uv, U, S, V); + } + + // aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) + inline ::std::tuple svd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V) { + return at::_ops::svd_U::redispatch(dispatchKeySet, self, some, compute_uv, U, S, V); + } + + // aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) + inline ::std::tuple svd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some=true, bool compute_uv=true) { + return at::_ops::svd::redispatch(dispatchKeySet, self, some, compute_uv); + } + + // aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) + inline at::Tensor swapaxes(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t axis0, int64_t axis1) { + return at::_ops::swapaxes::redispatch(dispatchKeySet, self, axis0, axis1); + } + + // aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!) + inline at::Tensor & swapaxes_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t axis0, int64_t axis1) { + return at::_ops::swapaxes_::redispatch(dispatchKeySet, self, axis0, axis1); + } + + // aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a) + inline at::Tensor swapdims(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::swapdims::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + inline at::Tensor & swapdims_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::swapdims_::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cholesky_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool upper=false) { + return at::_ops::cholesky_out::redispatch(dispatchKeySet, self, upper, out); + } + + // aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cholesky_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) { + return at::_ops::cholesky_out::redispatch(dispatchKeySet, self, upper, out); + } + + // aten::cholesky(Tensor self, bool upper=False) -> Tensor + inline at::Tensor cholesky(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper=false) { + return at::_ops::cholesky::redispatch(dispatchKeySet, self, upper); + } + + // aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cholesky_solve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, bool upper=false) { + return at::_ops::cholesky_solve_out::redispatch(dispatchKeySet, self, input2, upper, out); + } + + // aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cholesky_solve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out) { + return at::_ops::cholesky_solve_out::redispatch(dispatchKeySet, self, input2, upper, out); + } + + // aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor + inline at::Tensor cholesky_solve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, bool upper=false) { + return at::_ops::cholesky_solve::redispatch(dispatchKeySet, self, input2, upper); + } + + // aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor + inline at::Tensor _cholesky_solve_helper(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper) { + return at::_ops::_cholesky_solve_helper::redispatch(dispatchKeySet, self, A, upper); + } + + // aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor + inline at::Tensor cholesky_inverse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper=false) { + return at::_ops::cholesky_inverse::redispatch(dispatchKeySet, self, upper); + } + + // aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cholesky_inverse_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool upper=false) { + return at::_ops::cholesky_inverse_out::redispatch(dispatchKeySet, self, upper, out); + } + + // aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cholesky_inverse_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) { + return at::_ops::cholesky_inverse_out::redispatch(dispatchKeySet, self, upper, out); + } + + // aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + inline ::std::tuple qr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & Q, at::Tensor & R, const at::Tensor & self, bool some=true) { + return at::_ops::qr_Q::redispatch(dispatchKeySet, self, some, Q, R); + } + + // aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + inline ::std::tuple qr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, at::Tensor & Q, at::Tensor & R) { + return at::_ops::qr_Q::redispatch(dispatchKeySet, self, some, Q, R); + } + + // aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) + inline ::std::tuple qr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some=true) { + return at::_ops::qr::redispatch(dispatchKeySet, self, some); + } + + // aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) + inline ::std::tuple geqrf_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & a, at::Tensor & tau, const at::Tensor & self) { + return at::_ops::geqrf_a::redispatch(dispatchKeySet, self, a, tau); + } + + // aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) + inline ::std::tuple geqrf_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & a, at::Tensor & tau) { + return at::_ops::geqrf_a::redispatch(dispatchKeySet, self, a, tau); + } + + // aten::geqrf(Tensor self) -> (Tensor a, Tensor tau) + inline ::std::tuple geqrf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::geqrf::redispatch(dispatchKeySet, self); + } + + // aten::orgqr(Tensor self, Tensor input2) -> Tensor + inline at::Tensor orgqr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2) { + return at::_ops::orgqr::redispatch(dispatchKeySet, self, input2); + } + + // aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & orgqr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & input2) { + return at::_ops::orgqr_out::redispatch(dispatchKeySet, self, input2, out); + } + + // aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & orgqr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) { + return at::_ops::orgqr_out::redispatch(dispatchKeySet, self, input2, out); + } + + // aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ormqr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false) { + return at::_ops::ormqr_out::redispatch(dispatchKeySet, self, input2, input3, left, transpose, out); + } + + // aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ormqr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out) { + return at::_ops::ormqr_out::redispatch(dispatchKeySet, self, input2, input3, left, transpose, out); + } + + // aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor + inline at::Tensor ormqr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false) { + return at::_ops::ormqr::redispatch(dispatchKeySet, self, input2, input3, left, transpose); + } + + // aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info) + inline ::std::tuple _lu_with_info(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool pivot=true, bool check_errors=true) { + return at::_ops::_lu_with_info::redispatch(dispatchKeySet, self, pivot, check_errors); + } + + // aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lu_solve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { + return at::_ops::lu_solve_out::redispatch(dispatchKeySet, self, LU_data, LU_pivots, out); + } + + // aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lu_solve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) { + return at::_ops::lu_solve_out::redispatch(dispatchKeySet, self, LU_data, LU_pivots, out); + } + + // aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor + inline at::Tensor lu_solve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { + return at::_ops::lu_solve::redispatch(dispatchKeySet, self, LU_data, LU_pivots); + } + + // aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U) + inline ::std::tuple lu_unpack(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true) { + return at::_ops::lu_unpack::redispatch(dispatchKeySet, LU_data, LU_pivots, unpack_data, unpack_pivots); + } + + // aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) + inline ::std::tuple lu_unpack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data=true, bool unpack_pivots=true) { + return at::_ops::lu_unpack_out::redispatch(dispatchKeySet, LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U); + } + + // aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) + inline ::std::tuple lu_unpack_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, at::Tensor & P, at::Tensor & L, at::Tensor & U) { + return at::_ops::lu_unpack_out::redispatch(dispatchKeySet, LU_data, LU_pivots, unpack_data, unpack_pivots, P, L, U); + } + + // aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multinomial_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t num_samples, bool replacement=false, c10::optional generator=c10::nullopt) { + return at::_ops::multinomial_out::redispatch(dispatchKeySet, self, num_samples, replacement, generator, out); + } + + // aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multinomial_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional generator, at::Tensor & out) { + return at::_ops::multinomial_out::redispatch(dispatchKeySet, self, num_samples, replacement, generator, out); + } + + // aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor + inline at::Tensor multinomial(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t num_samples, bool replacement=false, c10::optional generator=c10::nullopt) { + return at::_ops::multinomial::redispatch(dispatchKeySet, self, num_samples, replacement, generator); + } + + // aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lgamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::lgamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lgamma_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::lgamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::lgamma_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & lgamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::lgamma_::redispatch(dispatchKeySet, self); + } + + // aten::lgamma(Tensor self) -> Tensor + inline at::Tensor lgamma(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::lgamma::redispatch(dispatchKeySet, self); + } + + // aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & digamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::digamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & digamma_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::digamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::digamma(Tensor self) -> Tensor + inline at::Tensor digamma(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::digamma::redispatch(dispatchKeySet, self); + } + + // aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & polygamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n, const at::Tensor & self) { + return at::_ops::polygamma_out::redispatch(dispatchKeySet, n, self, out); + } + + // aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & polygamma_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out) { + return at::_ops::polygamma_out::redispatch(dispatchKeySet, n, self, out); + } + + // aten::polygamma(int n, Tensor self) -> Tensor + inline at::Tensor polygamma(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self) { + return at::_ops::polygamma::redispatch(dispatchKeySet, n, self); + } + + // aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!) + inline at::Tensor & polygamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t n) { + return at::_ops::polygamma_::redispatch(dispatchKeySet, self, n); + } + + // aten::erfinv(Tensor self) -> Tensor + inline at::Tensor erfinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::erfinv::redispatch(dispatchKeySet, self); + } + + // aten::erfinv_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & erfinv_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::erfinv_::redispatch(dispatchKeySet, self); + } + + // aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & erfinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::erfinv_out::redispatch(dispatchKeySet, self, out); + } + + // aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & erfinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::erfinv_out::redispatch(dispatchKeySet, self, out); + } + + // aten::i0(Tensor self) -> Tensor + inline at::Tensor i0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::i0::redispatch(dispatchKeySet, self); + } + + // aten::i0_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & i0_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::i0_::redispatch(dispatchKeySet, self); + } + + // aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & i0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::i0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & i0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::i0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sign(Tensor self) -> Tensor + inline at::Tensor sign(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::sign::redispatch(dispatchKeySet, self); + } + + // aten::sign_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & sign_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::sign_::redispatch(dispatchKeySet, self); + } + + // aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sign_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::sign_out::redispatch(dispatchKeySet, self, out); + } + + // aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sign_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::sign_out::redispatch(dispatchKeySet, self, out); + } + + // aten::signbit(Tensor self) -> Tensor + inline at::Tensor signbit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::signbit::redispatch(dispatchKeySet, self); + } + + // aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & signbit_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::signbit_out::redispatch(dispatchKeySet, self, out); + } + + // aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & signbit_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::signbit_out::redispatch(dispatchKeySet, self, out); + } + + // aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor + inline at::Tensor dist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p=2) { + return at::_ops::dist::redispatch(dispatchKeySet, self, other, p); + } + + // aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & atan2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::atan2_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & atan2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::atan2_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & atan2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::atan2_::redispatch(dispatchKeySet, self, other); + } + + // aten::atan2(Tensor self, Tensor other) -> Tensor + inline at::Tensor atan2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::atan2::redispatch(dispatchKeySet, self, other); + } + + // aten::arctan2(Tensor self, Tensor other) -> Tensor + inline at::Tensor arctan2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::arctan2::redispatch(dispatchKeySet, self, other); + } + + // aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arctan2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::arctan2_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & arctan2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::arctan2_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & arctan2_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::arctan2_::redispatch(dispatchKeySet, self, other); + } + + // aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lerp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + return at::_ops::lerp_Scalar_out::redispatch(dispatchKeySet, self, end, weight, out); + } + + // aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lerp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out) { + return at::_ops::lerp_Scalar_out::redispatch(dispatchKeySet, self, end, weight, out); + } + + // aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lerp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + return at::_ops::lerp_Tensor_out::redispatch(dispatchKeySet, self, end, weight, out); + } + + // aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lerp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out) { + return at::_ops::lerp_Tensor_out::redispatch(dispatchKeySet, self, end, weight, out); + } + + // aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor + inline at::Tensor lerp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) { + return at::_ops::lerp_Scalar::redispatch(dispatchKeySet, self, end, weight); + } + + // aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor + inline at::Tensor lerp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) { + return at::_ops::lerp_Tensor::redispatch(dispatchKeySet, self, end, weight); + } + + // aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & histc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) { + return at::_ops::histc_out::redispatch(dispatchKeySet, self, bins, min, max, out); + } + + // aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & histc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max, at::Tensor & out) { + return at::_ops::histc_out::redispatch(dispatchKeySet, self, bins, min, max, out); + } + + // aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor + inline at::Tensor histc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) { + return at::_ops::histc::redispatch(dispatchKeySet, self, bins, min, max); + } + + // aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) + inline ::std::tuple histogram_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogram_bins_tensor_out::redispatch(dispatchKeySet, self, bins, weight, density, hist, bin_edges); + } + + // aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) + inline ::std::tuple histogram_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) { + return at::_ops::histogram_bins_tensor_out::redispatch(dispatchKeySet, self, bins, weight, density, hist, bin_edges); + } + + // aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) + inline ::std::tuple histogram(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogram_bins_tensor::redispatch(dispatchKeySet, self, bins, weight, density); + } + + // aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) + inline ::std::tuple histogram_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, int64_t bins=100, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogram_bin_ct_out::redispatch(dispatchKeySet, self, bins, range, weight, density, hist, bin_edges); + } + + // aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges) + inline ::std::tuple histogram_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges) { + return at::_ops::histogram_bin_ct_out::redispatch(dispatchKeySet, self, bins, range, weight, density, hist, bin_edges); + } + + // aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) + inline ::std::tuple histogram(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins=100, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogram_bin_ct::redispatch(dispatchKeySet, self, bins, range, weight, density); + } + + // aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[] + inline ::std::vector _histogramdd_bin_edges(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_bin_edges::redispatch(dispatchKeySet, self, bins, range, weight, density); + } + + // aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor + inline at::Tensor _histogramdd_from_bin_cts(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_from_bin_cts::redispatch(dispatchKeySet, self, bins, range, weight, density); + } + + // aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor + inline at::Tensor _histogramdd_from_bin_tensors(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, const c10::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_from_bin_tensors::redispatch(dispatchKeySet, self, bins, weight, density); + } + + // aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) + inline ::std::tuple> histogramdd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogramdd::redispatch(dispatchKeySet, self, bins, range, weight, density); + } + + // aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) + inline ::std::tuple> histogramdd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogramdd_int_bins::redispatch(dispatchKeySet, self, bins, range, weight, density); + } + + // aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges) + inline ::std::tuple> histogramdd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::histogramdd_TensorList_bins::redispatch(dispatchKeySet, self, bins, range, weight, density); + } + + // aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::fmod_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::fmod_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor fmod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::fmod_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & fmod_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::fmod__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmod_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmod_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor fmod(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmod_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & fmod_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmod__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hypot_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::hypot_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hypot_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::hypot_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::hypot(Tensor self, Tensor other) -> Tensor + inline at::Tensor hypot(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::hypot::redispatch(dispatchKeySet, self, other); + } + + // aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & hypot_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::hypot_::redispatch(dispatchKeySet, self, other); + } + + // aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & igamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igamma_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & igamma_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::igamma_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::igamma(Tensor self, Tensor other) -> Tensor + inline at::Tensor igamma(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igamma::redispatch(dispatchKeySet, self, other); + } + + // aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & igamma_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::igamma_::redispatch(dispatchKeySet, self, other); + } + + // aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & igammac_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igammac_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & igammac_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::igammac_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::igammac(Tensor self, Tensor other) -> Tensor + inline at::Tensor igammac(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::igammac::redispatch(dispatchKeySet, self, other); + } + + // aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & igammac_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::igammac_::redispatch(dispatchKeySet, self, other); + } + + // aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nextafter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::nextafter_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nextafter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::nextafter_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::nextafter(Tensor self, Tensor other) -> Tensor + inline at::Tensor nextafter(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::nextafter::redispatch(dispatchKeySet, self, other); + } + + // aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & nextafter_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::nextafter_::redispatch(dispatchKeySet, self, other); + } + + // aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & remainder_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::remainder_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & remainder_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::remainder_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor remainder(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::remainder_Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + inline at::Tensor & remainder_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other) { + return at::_ops::remainder__Scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & remainder_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::remainder_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & remainder_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::remainder_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor + inline at::Tensor remainder(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::remainder_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + inline at::Tensor & remainder_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other) { + return at::_ops::remainder__Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor + inline at::Tensor remainder(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::remainder_Scalar_Tensor::redispatch(dispatchKeySet, self, other); + } + + // aten::min(Tensor self) -> Tensor + inline at::Tensor min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::min::redispatch(dispatchKeySet, self); + } + + // aten::fmin(Tensor self, Tensor other) -> Tensor + inline at::Tensor fmin(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmin::redispatch(dispatchKeySet, self, other); + } + + // aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmin_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmin_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmin_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmin_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::max(Tensor self) -> Tensor + inline at::Tensor max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::max::redispatch(dispatchKeySet, self); + } + + // aten::fmax(Tensor self, Tensor other) -> Tensor + inline at::Tensor fmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmax::redispatch(dispatchKeySet, self, other); + } + + // aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmax_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmax_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::maximum(Tensor self, Tensor other) -> Tensor + inline at::Tensor maximum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::maximum::redispatch(dispatchKeySet, self, other); + } + + // aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & maximum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::maximum_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & maximum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::maximum_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::max.other(Tensor self, Tensor other) -> Tensor + inline at::Tensor max(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::max_other::redispatch(dispatchKeySet, self, other); + } + + // aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::max_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::max_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::minimum(Tensor self, Tensor other) -> Tensor + inline at::Tensor minimum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::minimum::redispatch(dispatchKeySet, self, other); + } + + // aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & minimum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::minimum_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & minimum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::minimum_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & min_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::min_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & min_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::min_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::min.other(Tensor self, Tensor other) -> Tensor + inline at::Tensor min(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::min_other::redispatch(dispatchKeySet, self, other); + } + + // aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + inline at::Tensor quantile(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation); + } + + // aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantile_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantile_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::quantile_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + inline at::Tensor quantile(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_scalar::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation); + } + + // aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantile_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::quantile_scalar_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantile_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::quantile_scalar_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + inline at::Tensor nanquantile(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::nanquantile::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation); + } + + // aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanquantile_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::nanquantile_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanquantile_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::nanquantile_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor + inline at::Tensor nanquantile(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::nanquantile_scalar::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation); + } + + // aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanquantile_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") { + return at::_ops::nanquantile_scalar_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanquantile_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, at::Tensor & out) { + return at::_ops::nanquantile_scalar_out::redispatch(dispatchKeySet, self, q, dim, keepdim, interpolation, out); + } + + // aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t dim=-1, bool descending=false) { + return at::_ops::sort_values::redispatch(dispatchKeySet, self, dim, descending, values, indices); + } + + // aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) { + return at::_ops::sort_values::redispatch(dispatchKeySet, self, dim, descending, values, indices); + } + + // aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::optional stable, int64_t dim=-1, bool descending=false) { + return at::_ops::sort_values_stable::redispatch(dispatchKeySet, self, stable, dim, descending, values, indices); + } + + // aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional stable, int64_t dim, bool descending, at::Tensor & values, at::Tensor & indices) { + return at::_ops::sort_values_stable::redispatch(dispatchKeySet, self, stable, dim, descending, values, indices); + } + + // aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) + inline ::std::tuple sort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=-1, bool descending=false) { + return at::_ops::sort::redispatch(dispatchKeySet, self, dim, descending); + } + + // aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) + inline ::std::tuple sort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional stable, int64_t dim=-1, bool descending=false) { + return at::_ops::sort_stable::redispatch(dispatchKeySet, self, stable, dim, descending); + } + + // aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim, bool descending=false) { + return at::_ops::sort_dimname_values::redispatch(dispatchKeySet, self, dim, descending, values, indices); + } + + // aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) { + return at::_ops::sort_dimname_values::redispatch(dispatchKeySet, self, dim, descending, values, indices); + } + + // aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, c10::optional stable, at::Dimname dim, bool descending=false) { + return at::_ops::sort_dimname_values_stable::redispatch(dispatchKeySet, self, stable, dim, descending, values, indices); + } + + // aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple sort_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional stable, at::Dimname dim, bool descending, at::Tensor & values, at::Tensor & indices) { + return at::_ops::sort_dimname_values_stable::redispatch(dispatchKeySet, self, stable, dim, descending, values, indices); + } + + // aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) + inline ::std::tuple sort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending=false) { + return at::_ops::sort_dimname::redispatch(dispatchKeySet, self, dim, descending); + } + + // aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) + inline ::std::tuple sort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional stable, at::Dimname dim, bool descending=false) { + return at::_ops::sort_dimname_stable::redispatch(dispatchKeySet, self, stable, dim, descending); + } + + // aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & msort_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::msort_out::redispatch(dispatchKeySet, self, out); + } + + // aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & msort_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::msort_out::redispatch(dispatchKeySet, self, out); + } + + // aten::msort(Tensor self) -> Tensor + inline at::Tensor msort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::msort::redispatch(dispatchKeySet, self); + } + + // aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor + inline at::Tensor argsort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=-1, bool descending=false) { + return at::_ops::argsort::redispatch(dispatchKeySet, self, dim, descending); + } + + // aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor + inline at::Tensor argsort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool stable, int64_t dim=-1, bool descending=false) { + return at::_ops::argsort_stable::redispatch(dispatchKeySet, self, stable, dim, descending); + } + + // aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor + inline at::Tensor argsort(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, bool descending=false) { + return at::_ops::argsort_dimname::redispatch(dispatchKeySet, self, dim, descending); + } + + // aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple topk_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) { + return at::_ops::topk_values::redispatch(dispatchKeySet, self, k, dim, largest, sorted, values, indices); + } + + // aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + inline ::std::tuple topk_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices) { + return at::_ops::topk_values::redispatch(dispatchKeySet, self, k, dim, largest, sorted, values, indices); + } + + // aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) + inline ::std::tuple topk(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) { + return at::_ops::topk::redispatch(dispatchKeySet, self, k, dim, largest, sorted); + } + + // aten::all(Tensor self) -> Tensor + inline at::Tensor all(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::all::redispatch(dispatchKeySet, self); + } + + // aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::all_all_out::redispatch(dispatchKeySet, self, out); + } + + // aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & all_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::all_all_out::redispatch(dispatchKeySet, self, out); + } + + // aten::any(Tensor self) -> Tensor + inline at::Tensor any(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::any::redispatch(dispatchKeySet, self); + } + + // aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::any_all_out::redispatch(dispatchKeySet, self, out); + } + + // aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & any_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::any_all_out::redispatch(dispatchKeySet, self, out); + } + + // aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & renorm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) { + return at::_ops::renorm_out::redispatch(dispatchKeySet, self, p, dim, maxnorm, out); + } + + // aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & renorm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out) { + return at::_ops::renorm_out::redispatch(dispatchKeySet, self, p, dim, maxnorm, out); + } + + // aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor + inline at::Tensor renorm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) { + return at::_ops::renorm::redispatch(dispatchKeySet, self, p, dim, maxnorm); + } + + // aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) + inline at::Tensor & renorm_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) { + return at::_ops::renorm_::redispatch(dispatchKeySet, self, p, dim, maxnorm); + } + + // aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) + inline at::Tensor unfold(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { + return at::_ops::unfold::redispatch(dispatchKeySet, self, dimension, size, step); + } + + // aten::unfold_backward(Tensor grad_in, int[] input_sizes, int dim, int size, int step) -> Tensor + inline at::Tensor unfold_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward::redispatch(dispatchKeySet, grad_in, input_sizes, dim, size, step); + } + + // aten::equal(Tensor self, Tensor other) -> bool + inline bool equal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::equal::redispatch(dispatchKeySet, self, other); + } + + // aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pow_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::pow_Tensor_Tensor_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pow_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::pow_Tensor_Tensor_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor + inline at::Tensor pow(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::pow_Tensor_Tensor::redispatch(dispatchKeySet, self, exponent); + } + + // aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pow_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::pow_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pow_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::pow_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor + inline at::Tensor pow(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::pow_Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pow_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::pow_Tensor_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pow_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { + return at::_ops::pow_Tensor_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor + inline at::Tensor pow(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::pow_Tensor_Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) + inline at::Tensor & pow_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::pow__Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) + inline at::Tensor & pow_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::pow__Tensor::redispatch(dispatchKeySet, self, exponent); + } + + // aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & float_power_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::float_power_Tensor_Tensor_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & float_power_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::float_power_Tensor_Tensor_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor + inline at::Tensor float_power(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::float_power_Tensor_Tensor::redispatch(dispatchKeySet, self, exponent); + } + + // aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & float_power_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::float_power_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & float_power_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::float_power_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor + inline at::Tensor float_power(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::float_power_Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & float_power_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::float_power_Tensor_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & float_power_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { + return at::_ops::float_power_Tensor_Scalar_out::redispatch(dispatchKeySet, self, exponent, out); + } + + // aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor + inline at::Tensor float_power(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::float_power_Tensor_Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) + inline at::Tensor & float_power_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::float_power__Scalar::redispatch(dispatchKeySet, self, exponent); + } + + // aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) + inline at::Tensor & float_power_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::float_power__Tensor::redispatch(dispatchKeySet, self, exponent); + } + + // aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & normal_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean=0, double std=1, c10::optional generator=c10::nullopt) { + return at::_ops::normal_::redispatch(dispatchKeySet, self, mean, std, generator); + } + + // aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor + inline at::Tensor normal_functional(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean=0, double std=1, c10::optional generator=c10::nullopt) { + return at::_ops::normal_functional::redispatch(dispatchKeySet, self, mean, std, generator); + } + + // aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & mean, double std=1, c10::optional generator=c10::nullopt) { + return at::_ops::normal_Tensor_float_out::redispatch(dispatchKeySet, mean, std, generator, out); + } + + // aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, double std, c10::optional generator, at::Tensor & out) { + return at::_ops::normal_Tensor_float_out::redispatch(dispatchKeySet, mean, std, generator, out); + } + + // aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor + inline at::Tensor normal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, double std=1, c10::optional generator=c10::nullopt) { + return at::_ops::normal_Tensor_float::redispatch(dispatchKeySet, mean, std, generator); + } + + // aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, double mean, const at::Tensor & std, c10::optional generator=c10::nullopt) { + return at::_ops::normal_float_Tensor_out::redispatch(dispatchKeySet, mean, std, generator, out); + } + + // aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_outf(c10::DispatchKeySet dispatchKeySet, double mean, const at::Tensor & std, c10::optional generator, at::Tensor & out) { + return at::_ops::normal_float_Tensor_out::redispatch(dispatchKeySet, mean, std, generator, out); + } + + // aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor + inline at::Tensor normal(c10::DispatchKeySet dispatchKeySet, double mean, const at::Tensor & std, c10::optional generator=c10::nullopt) { + return at::_ops::normal_float_Tensor::redispatch(dispatchKeySet, mean, std, generator); + } + + // aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & mean, const at::Tensor & std, c10::optional generator=c10::nullopt) { + return at::_ops::normal_Tensor_Tensor_out::redispatch(dispatchKeySet, mean, std, generator, out); + } + + // aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, const at::Tensor & std, c10::optional generator, at::Tensor & out) { + return at::_ops::normal_Tensor_Tensor_out::redispatch(dispatchKeySet, mean, std, generator, out); + } + + // aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor + inline at::Tensor normal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & mean, const at::Tensor & std, c10::optional generator=c10::nullopt) { + return at::_ops::normal_Tensor_Tensor::redispatch(dispatchKeySet, mean, std, generator); + } + + // aten::normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor normal(c10::DispatchKeySet dispatchKeySet, double mean, double std, at::IntArrayRef size, c10::optional generator=c10::nullopt, at::TensorOptions options={}) { + return at::_ops::normal_float_float::redispatch(dispatchKeySet, mean, std, size, generator, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor normal(c10::DispatchKeySet dispatchKeySet, double mean, double std, at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::normal_float_float::redispatch(dispatchKeySet, mean, std, size, generator, dtype, layout, device, pin_memory); + } + + // aten::normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, double mean, double std, at::IntArrayRef size, c10::optional generator=c10::nullopt) { + return at::_ops::normal_float_float_out::redispatch(dispatchKeySet, mean, std, size, generator, out); + } + + // aten::normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_outf(c10::DispatchKeySet dispatchKeySet, double mean, double std, at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::normal_float_float_out::redispatch(dispatchKeySet, mean, std, size, generator, out); + } + + // aten::alias(Tensor(a) self) -> Tensor(a) + inline at::Tensor alias(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::alias::redispatch(dispatchKeySet, self); + } + + // aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> () + inline void _amp_foreach_non_finite_check_and_unscale_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_::redispatch(dispatchKeySet, self, found_inf, inv_scale); + } + + // aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!) + inline at::Tensor & _amp_update_scale_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { + return at::_ops::_amp_update_scale_::redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); + } + + // aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + inline ::std::vector _foreach_add(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add_Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + inline void _foreach_add_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add__Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + inline ::std::vector _foreach_sub(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_sub_Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + inline void _foreach_sub_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_sub__Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + inline ::std::vector _foreach_mul(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_mul_Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + inline void _foreach_mul_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_mul__Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + inline ::std::vector _foreach_div(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_div_Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + inline void _foreach_div_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_div__Scalar::redispatch(dispatchKeySet, self, scalar); + } + + // aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] + inline ::std::vector _foreach_add(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_List::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + inline void _foreach_add_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add__List::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] + inline ::std::vector _foreach_sub(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_sub_List::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + inline void _foreach_sub_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_sub__List::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[] + inline ::std::vector _foreach_mul(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_mul_List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () + inline void _foreach_mul_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_mul__List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[] + inline ::std::vector _foreach_div(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_div_List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () + inline void _foreach_div_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_div__List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_add(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add_ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + inline void _foreach_add_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add__ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_sub(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_sub_ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + inline void _foreach_sub_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_sub__ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_div(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_div_ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + inline void _foreach_div_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_div__ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_mul(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_mul_ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + inline void _foreach_mul_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_mul__ScalarList::redispatch(dispatchKeySet, self, scalars); + } + + // aten::_foreach_exp(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_exp(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_exp::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_zero_(Tensor(a!)[] self) -> () + inline void _foreach_zero_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_zero_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_exp_(Tensor(a!)[] self) -> () + inline void _foreach_exp_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_exp_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sqrt(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_sqrt(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sqrt::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sqrt_(Tensor(a!)[] self) -> () + inline void _foreach_sqrt_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sqrt_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_abs(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_abs(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_abs::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_abs_(Tensor(a!)[] self) -> () + inline void _foreach_abs_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_abs_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_acos(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_acos(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_acos::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_acos_(Tensor(a!)[] self) -> () + inline void _foreach_acos_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_acos_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_asin(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_asin(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_asin::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_asin_(Tensor(a!)[] self) -> () + inline void _foreach_asin_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_asin_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_atan(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_atan(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_atan::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_atan_(Tensor(a!)[] self) -> () + inline void _foreach_atan_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_atan_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_ceil(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_ceil(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_ceil::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_ceil_(Tensor(a!)[] self) -> () + inline void _foreach_ceil_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_ceil_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_cos(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_cos(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_cos::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_cos_(Tensor(a!)[] self) -> () + inline void _foreach_cos_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_cos_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_cosh(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_cosh(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_cosh::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_cosh_(Tensor(a!)[] self) -> () + inline void _foreach_cosh_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_cosh_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_erf(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_erf(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_erf::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_erf_(Tensor(a!)[] self) -> () + inline void _foreach_erf_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_erf_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_erfc(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_erfc(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_erfc::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_erfc_(Tensor(a!)[] self) -> () + inline void _foreach_erfc_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_erfc_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_expm1(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_expm1(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_expm1::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_expm1_(Tensor(a!)[] self) -> () + inline void _foreach_expm1_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_expm1_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_floor(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_floor(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_floor::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_floor_(Tensor(a!)[] self) -> () + inline void _foreach_floor_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_floor_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_log(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log_(Tensor(a!)[] self) -> () + inline void _foreach_log_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log10(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_log10(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log10::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log10_(Tensor(a!)[] self) -> () + inline void _foreach_log10_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log10_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log1p(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_log1p(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log1p::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log1p_(Tensor(a!)[] self) -> () + inline void _foreach_log1p_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log1p_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log2(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_log2(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log2::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_log2_(Tensor(a!)[] self) -> () + inline void _foreach_log2_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_log2_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_neg(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_neg(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_neg::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_neg_(Tensor(a!)[] self) -> () + inline void _foreach_neg_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_neg_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_tan(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_tan(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_tan::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_tan_(Tensor(a!)[] self) -> () + inline void _foreach_tan_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_tan_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_tanh(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_tanh(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_tanh::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_tanh_(Tensor(a!)[] self) -> () + inline void _foreach_tanh_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_tanh_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sin(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_sin(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sin::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sin_(Tensor(a!)[] self) -> () + inline void _foreach_sin_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sin_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sinh(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_sinh(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sinh::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sinh_(Tensor(a!)[] self) -> () + inline void _foreach_sinh_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sinh_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_round(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_round(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_round::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_round_(Tensor(a!)[] self) -> () + inline void _foreach_round_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_round_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_lgamma(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_lgamma(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_lgamma::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_lgamma_(Tensor(a!)[] self) -> () + inline void _foreach_lgamma_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_lgamma_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_frac(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_frac(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_frac::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_frac_(Tensor(a!)[] self) -> () + inline void _foreach_frac_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_frac_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_reciprocal(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_reciprocal(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_reciprocal::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_reciprocal_(Tensor(a!)[] self) -> () + inline void _foreach_reciprocal_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_reciprocal_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sigmoid(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_sigmoid(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sigmoid::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sigmoid_(Tensor(a!)[] self) -> () + inline void _foreach_sigmoid_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_sigmoid_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_trunc(Tensor[] self) -> Tensor[] + inline ::std::vector _foreach_trunc(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_trunc::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_trunc_(Tensor(a!)[] self) -> () + inline void _foreach_trunc_(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_trunc_::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + inline void _foreach_addcdiv_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcdiv__Scalar::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + inline void _foreach_addcmul_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcmul__Scalar::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + inline void _foreach_addcdiv_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcdiv__ScalarList::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars); + } + + // aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> () + inline void _foreach_addcmul_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcmul__ScalarList::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars); + } + + // aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + inline ::std::vector _foreach_addcdiv(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcdiv_Scalar::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + inline ::std::vector _foreach_addcmul(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcmul_Scalar::redispatch(dispatchKeySet, self, tensor1, tensor2, value); + } + + // aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_addcdiv(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcdiv_ScalarList::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars); + } + + // aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[] + inline ::std::vector _foreach_addcmul(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcmul_ScalarList::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars); + } + + // aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[] + inline ::std::vector _foreach_maximum(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_maximum_List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> () + inline void _foreach_maximum_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_maximum__List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[] + inline ::std::vector _foreach_minimum(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_minimum_List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> () + inline void _foreach_minimum_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_minimum__List::redispatch(dispatchKeySet, self, other); + } + + // aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[] + inline ::std::vector _foreach_norm(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & ord=2) { + return at::_ops::_foreach_norm_Scalar::redispatch(dispatchKeySet, self, ord); + } + + // aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + inline at::Tensor bucketize(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false) { + return at::_ops::bucketize_Tensor::redispatch(dispatchKeySet, self, boundaries, out_int32, right); + } + + // aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bucketize_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false) { + return at::_ops::bucketize_Tensor_out::redispatch(dispatchKeySet, self, boundaries, out_int32, right, out); + } + + // aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bucketize_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) { + return at::_ops::bucketize_Tensor_out::redispatch(dispatchKeySet, self, boundaries, out_int32, right, out); + } + + // aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + inline at::Tensor bucketize(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false) { + return at::_ops::bucketize_Scalar::redispatch(dispatchKeySet, self, boundaries, out_int32, right); + } + + // aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor + inline at::Tensor searchsorted(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Tensor::redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter); + } + + // aten::_torch_cuda_cu_linker_symbol_op(Tensor self) -> Tensor + inline at::Tensor _torch_cuda_cu_linker_symbol_op(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_torch_cuda_cu_linker_symbol_op::redispatch(dispatchKeySet, self); + } + + // aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & searchsorted_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Tensor_out::redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter, out); + } + + // aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & searchsorted_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out) { + return at::_ops::searchsorted_Tensor_out::redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter, out); + } + + // aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor + inline at::Tensor searchsorted(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Scalar::redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter); + } + + // aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor + inline at::Tensor _convert_indices_from_coo_to_csr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t size, bool out_int32=false) { + return at::_ops::_convert_indices_from_coo_to_csr::redispatch(dispatchKeySet, self, size, out_int32); + } + + // aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convert_indices_from_coo_to_csr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t size, bool out_int32=false) { + return at::_ops::_convert_indices_from_coo_to_csr_out::redispatch(dispatchKeySet, self, size, out_int32, out); + } + + // aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convert_indices_from_coo_to_csr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out) { + return at::_ops::_convert_indices_from_coo_to_csr_out::redispatch(dispatchKeySet, self, size, out_int32, out); + } + + // aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor + inline at::Tensor _convert_indices_from_csr_to_coo(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false) { + return at::_ops::_convert_indices_from_csr_to_coo::redispatch(dispatchKeySet, crow_indices, col_indices, out_int32, transpose); + } + + // aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convert_indices_from_csr_to_coo_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32=false, bool transpose=false) { + return at::_ops::_convert_indices_from_csr_to_coo_out::redispatch(dispatchKeySet, crow_indices, col_indices, out_int32, transpose, out); + } + + // aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convert_indices_from_csr_to_coo_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose, at::Tensor & out) { + return at::_ops::_convert_indices_from_csr_to_coo_out::redispatch(dispatchKeySet, crow_indices, col_indices, out_int32, transpose, out); + } + + // aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mse_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::mse_loss_out::redispatch(dispatchKeySet, self, target, reduction, out); + } + + // aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mse_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { + return at::_ops::mse_loss_out::redispatch(dispatchKeySet, self, target, reduction, out); + } + + // aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + inline at::Tensor mse_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::mse_loss::redispatch(dispatchKeySet, self, target, reduction); + } + + // aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & mse_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::mse_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, grad_input); + } + + // aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & mse_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) { + return at::_ops::mse_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, grad_input); + } + + // aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor + inline at::Tensor mse_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::mse_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, reduction); + } + + // aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + inline at::Tensor l1_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::l1_loss::redispatch(dispatchKeySet, self, target, reduction); + } + + // aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multi_margin_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::multi_margin_loss_out::redispatch(dispatchKeySet, self, target, p, margin, weight, reduction, out); + } + + // aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multi_margin_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight, int64_t reduction, at::Tensor & out) { + return at::_ops::multi_margin_loss_out::redispatch(dispatchKeySet, self, target, p, margin, weight, reduction, out); + } + + // aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor + inline at::Tensor multi_margin_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::multi_margin_loss::redispatch(dispatchKeySet, self, target, p, margin, weight, reduction); + } + + // aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & multi_margin_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::multi_margin_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, p, margin, weight, reduction, grad_input); + } + + // aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & multi_margin_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight, int64_t reduction, at::Tensor & grad_input) { + return at::_ops::multi_margin_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, p, margin, weight, reduction, grad_input); + } + + // aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor + inline at::Tensor multi_margin_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::multi_margin_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, p, margin, weight, reduction); + } + + // aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multilabel_margin_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::multilabel_margin_loss_out::redispatch(dispatchKeySet, self, target, reduction, out); + } + + // aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & multilabel_margin_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { + return at::_ops::multilabel_margin_loss_out::redispatch(dispatchKeySet, self, target, reduction, out); + } + + // aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + inline at::Tensor multilabel_margin_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::multilabel_margin_loss::redispatch(dispatchKeySet, self, target, reduction); + } + + // aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple multilabel_margin_loss_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & is_target, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::multilabel_margin_loss_forward_output::redispatch(dispatchKeySet, self, target, reduction, output, is_target); + } + + // aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple multilabel_margin_loss_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & output, at::Tensor & is_target) { + return at::_ops::multilabel_margin_loss_forward_output::redispatch(dispatchKeySet, self, target, reduction, output, is_target); + } + + // aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target) + inline ::std::tuple multilabel_margin_loss_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::multilabel_margin_loss_forward::redispatch(dispatchKeySet, self, target, reduction); + } + + // aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & multilabel_margin_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) { + return at::_ops::multilabel_margin_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, is_target, grad_input); + } + + // aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & multilabel_margin_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) { + return at::_ops::multilabel_margin_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, is_target, grad_input); + } + + // aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor + inline at::Tensor multilabel_margin_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) { + return at::_ops::multilabel_margin_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, reduction, is_target); + } + + // aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nll_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss_out::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out); + } + + // aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nll_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) { + return at::_ops::nll_loss_out::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out); + } + + // aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor + inline at::Tensor nll_loss_nd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss_nd::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor + inline at::Tensor nll_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple nll_loss_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss_forward_output::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight); + } + + // aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple nll_loss_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss_forward_output::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight); + } + + // aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) + inline ::std::tuple nll_loss_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss_forward::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & nll_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } + + // aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & nll_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) { + return at::_ops::nll_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } + + // aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor + inline at::Tensor nll_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight); + } + + // aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nll_loss2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss2d_out::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out); + } + + // aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nll_loss2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & out) { + return at::_ops::nll_loss2d_out::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, out); + } + + // aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor + inline at::Tensor nll_loss2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100) { + return at::_ops::nll_loss2d::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple nll_loss2d_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & total_weight, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss2d_forward_output::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight); + } + + // aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple nll_loss2d_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight) { + return at::_ops::nll_loss2d_forward_output::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index, output, total_weight); + } + + // aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) + inline ::std::tuple nll_loss2d_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index) { + return at::_ops::nll_loss2d_forward::redispatch(dispatchKeySet, self, target, weight, reduction, ignore_index); + } + + // aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & nll_loss2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } + + // aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & nll_loss2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input) { + return at::_ops::nll_loss2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); + } + + // aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor + inline at::Tensor nll_loss2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight) { + return at::_ops::nll_loss2d_backward::redispatch(dispatchKeySet, grad_output, self, target, weight, reduction, ignore_index, total_weight); + } + + // aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & smooth_l1_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double beta=1.0) { + return at::_ops::smooth_l1_loss_out::redispatch(dispatchKeySet, self, target, reduction, beta, out); + } + + // aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & smooth_l1_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & out) { + return at::_ops::smooth_l1_loss_out::redispatch(dispatchKeySet, self, target, reduction, beta, out); + } + + // aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor + inline at::Tensor smooth_l1_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double beta=1.0) { + return at::_ops::smooth_l1_loss::redispatch(dispatchKeySet, self, target, reduction, beta); + } + + // aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & smooth_l1_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) { + return at::_ops::smooth_l1_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, beta, grad_input); + } + + // aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & smooth_l1_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta, at::Tensor & grad_input) { + return at::_ops::smooth_l1_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, beta, grad_input); + } + + // aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor + inline at::Tensor smooth_l1_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) { + return at::_ops::smooth_l1_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, reduction, beta); + } + + // aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & huber_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) { + return at::_ops::huber_loss_out::redispatch(dispatchKeySet, self, target, reduction, delta, out); + } + + // aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & huber_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out) { + return at::_ops::huber_loss_out::redispatch(dispatchKeySet, self, target, reduction, delta, out); + } + + // aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor + inline at::Tensor huber_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0) { + return at::_ops::huber_loss::redispatch(dispatchKeySet, self, target, reduction, delta); + } + + // aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & huber_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { + return at::_ops::huber_loss_backward_out::redispatch(dispatchKeySet, grad_output, self, target, reduction, delta, grad_input); + } + + // aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & huber_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input) { + return at::_ops::huber_loss_backward_out::redispatch(dispatchKeySet, grad_output, self, target, reduction, delta, grad_input); + } + + // aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor + inline at::Tensor huber_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) { + return at::_ops::huber_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, reduction, delta); + } + + // aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & soft_margin_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::soft_margin_loss_out::redispatch(dispatchKeySet, self, target, reduction, out); + } + + // aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & soft_margin_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & out) { + return at::_ops::soft_margin_loss_out::redispatch(dispatchKeySet, self, target, reduction, out); + } + + // aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + inline at::Tensor soft_margin_loss(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean) { + return at::_ops::soft_margin_loss::redispatch(dispatchKeySet, self, target, reduction); + } + + // aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & soft_margin_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::soft_margin_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, grad_input); + } + + // aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & soft_margin_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) { + return at::_ops::soft_margin_loss_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, target, reduction, grad_input); + } + + // aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor + inline at::Tensor soft_margin_loss_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::soft_margin_loss_backward::redispatch(dispatchKeySet, grad_output, self, target, reduction); + } + + // aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & elu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1) { + return at::_ops::elu_out::redispatch(dispatchKeySet, self, alpha, scale, input_scale, out); + } + + // aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & elu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, at::Tensor & out) { + return at::_ops::elu_out::redispatch(dispatchKeySet, self, alpha, scale, input_scale, out); + } + + // aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor + inline at::Tensor elu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1) { + return at::_ops::elu::redispatch(dispatchKeySet, self, alpha, scale, input_scale); + } + + // aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & elu_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) { + return at::_ops::elu_backward_grad_input::redispatch(dispatchKeySet, grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input); + } + + // aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & elu_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result, at::Tensor & grad_input) { + return at::_ops::elu_backward_grad_input::redispatch(dispatchKeySet, grad_output, alpha, scale, input_scale, is_result, self_or_result, grad_input); + } + + // aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor + inline at::Tensor elu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) { + return at::_ops::elu_backward::redispatch(dispatchKeySet, grad_output, alpha, scale, input_scale, is_result, self_or_result); + } + + // aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) + inline at::Tensor & elu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & alpha=1, const at::Scalar & scale=1, const at::Scalar & input_scale=1) { + return at::_ops::elu_::redispatch(dispatchKeySet, self, alpha, scale, input_scale); + } + + // aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & glu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim=-1) { + return at::_ops::glu_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & glu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::glu_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::glu(Tensor self, int dim=-1) -> Tensor + inline at::Tensor glu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=-1) { + return at::_ops::glu::redispatch(dispatchKeySet, self, dim); + } + + // aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & glu_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) { + return at::_ops::glu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, dim, grad_input); + } + + // aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & glu_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input) { + return at::_ops::glu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, dim, grad_input); + } + + // aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor + inline at::Tensor glu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) { + return at::_ops::glu_backward::redispatch(dispatchKeySet, grad_output, self, dim); + } + + // aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor + inline at::Tensor glu_jvp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) { + return at::_ops::glu_jvp::redispatch(dispatchKeySet, glu, x, dx, dim); + } + + // aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor + inline at::Tensor glu_backward_jvp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) { + return at::_ops::glu_backward_jvp::redispatch(dispatchKeySet, grad_x, grad_glu, x, dgrad_glu, dx, dim); + } + + // aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardsigmoid_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::hardsigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardsigmoid_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardsigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::hardsigmoid(Tensor self) -> Tensor + inline at::Tensor hardsigmoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::hardsigmoid::redispatch(dispatchKeySet, self); + } + + // aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & hardsigmoid_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::hardsigmoid_::redispatch(dispatchKeySet, self); + } + + // aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & hardsigmoid_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardsigmoid_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, grad_input); + } + + // aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & hardsigmoid_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) { + return at::_ops::hardsigmoid_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, grad_input); + } + + // aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor hardsigmoid_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardsigmoid_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardtanh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_out::redispatch(dispatchKeySet, self, min_val, max_val, out); + } + + // aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardtanh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & out) { + return at::_ops::hardtanh_out::redispatch(dispatchKeySet, self, min_val, max_val, out); + } + + // aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor + inline at::Tensor hardtanh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh::redispatch(dispatchKeySet, self, min_val, max_val); + } + + // aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & hardtanh_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + return at::_ops::hardtanh_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, min_val, max_val, grad_input); + } + + // aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & hardtanh_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input) { + return at::_ops::hardtanh_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, min_val, max_val, grad_input); + } + + // aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor + inline at::Tensor hardtanh_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) { + return at::_ops::hardtanh_backward::redispatch(dispatchKeySet, grad_output, self, min_val, max_val); + } + + // aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) + inline at::Tensor & hardtanh_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & min_val=-1, const at::Scalar & max_val=1) { + return at::_ops::hardtanh_::redispatch(dispatchKeySet, self, min_val, max_val); + } + + // aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardswish_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::hardswish_out::redispatch(dispatchKeySet, self, out); + } + + // aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardswish_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardswish_out::redispatch(dispatchKeySet, self, out); + } + + // aten::hardswish(Tensor self) -> Tensor + inline at::Tensor hardswish(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::hardswish::redispatch(dispatchKeySet, self); + } + + // aten::hardswish_(Tensor(a!) self) -> Tensor(a!) + inline at::Tensor & hardswish_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) { + return at::_ops::hardswish_::redispatch(dispatchKeySet, self); + } + + // aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor hardswish_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & leaky_relu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu_out::redispatch(dispatchKeySet, self, negative_slope, out); + } + + // aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & leaky_relu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) { + return at::_ops::leaky_relu_out::redispatch(dispatchKeySet, self, negative_slope, out); + } + + // aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor + inline at::Tensor leaky_relu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu::redispatch(dispatchKeySet, self, negative_slope); + } + + // aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & leaky_relu_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, negative_slope, self_is_result, grad_input); + } + + // aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & leaky_relu_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input) { + return at::_ops::leaky_relu_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, negative_slope, self_is_result, grad_input); + } + + // aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor + inline at::Tensor leaky_relu_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) { + return at::_ops::leaky_relu_backward::redispatch(dispatchKeySet, grad_output, self, negative_slope, self_is_result); + } + + // aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) + inline at::Tensor & leaky_relu_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu_::redispatch(dispatchKeySet, self, negative_slope); + } + + // aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_sigmoid_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::log_sigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_sigmoid_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::log_sigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::log_sigmoid(Tensor self) -> Tensor + inline at::Tensor log_sigmoid(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::log_sigmoid::redispatch(dispatchKeySet, self); + } + + // aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple log_sigmoid_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & buffer, const at::Tensor & self) { + return at::_ops::log_sigmoid_forward_output::redispatch(dispatchKeySet, self, output, buffer); + } + + // aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple log_sigmoid_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & output, at::Tensor & buffer) { + return at::_ops::log_sigmoid_forward_output::redispatch(dispatchKeySet, self, output, buffer); + } + + // aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) + inline ::std::tuple log_sigmoid_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::log_sigmoid_forward::redispatch(dispatchKeySet, self); + } + + // aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & log_sigmoid_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) { + return at::_ops::log_sigmoid_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, buffer, grad_input); + } + + // aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & log_sigmoid_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer, at::Tensor & grad_input) { + return at::_ops::log_sigmoid_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, buffer, grad_input); + } + + // aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor + inline at::Tensor log_sigmoid_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) { + return at::_ops::log_sigmoid_backward::redispatch(dispatchKeySet, grad_output, self, buffer); + } + + // aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rrelu_with_noise_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt) { + return at::_ops::rrelu_with_noise_out::redispatch(dispatchKeySet, self, noise, lower, upper, training, generator, out); + } + + // aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rrelu_with_noise_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator, at::Tensor & out) { + return at::_ops::rrelu_with_noise_out::redispatch(dispatchKeySet, self, noise, lower, upper, training, generator, out); + } + + // aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor + inline at::Tensor rrelu_with_noise(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt) { + return at::_ops::rrelu_with_noise::redispatch(dispatchKeySet, self, noise, lower, upper, training, generator); + } + + // aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor + inline at::Tensor rrelu_with_noise_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) { + return at::_ops::rrelu_with_noise_backward::redispatch(dispatchKeySet, grad_output, self, noise, lower, upper, training, self_is_result); + } + + // aten::rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) + inline at::Tensor & rrelu_with_noise_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt) { + return at::_ops::rrelu_with_noise_::redispatch(dispatchKeySet, self, noise, lower, upper, training, generator); + } + + // aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & softplus_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20) { + return at::_ops::softplus_out::redispatch(dispatchKeySet, self, beta, threshold, out); + } + + // aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & softplus_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out) { + return at::_ops::softplus_out::redispatch(dispatchKeySet, self, beta, threshold, out); + } + + // aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor + inline at::Tensor softplus(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20) { + return at::_ops::softplus::redispatch(dispatchKeySet, self, beta, threshold); + } + + // aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & softplus_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) { + return at::_ops::softplus_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, beta, threshold, grad_input); + } + + // aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & softplus_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & grad_input) { + return at::_ops::softplus_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, beta, threshold, grad_input); + } + + // aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor + inline at::Tensor softplus_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) { + return at::_ops::softplus_backward::redispatch(dispatchKeySet, grad_output, self, beta, threshold); + } + + // aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & softshrink_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::softshrink_out::redispatch(dispatchKeySet, self, lambd, out); + } + + // aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & softshrink_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & out) { + return at::_ops::softshrink_out::redispatch(dispatchKeySet, self, lambd, out); + } + + // aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor + inline at::Tensor softshrink(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & lambd=0.5) { + return at::_ops::softshrink::redispatch(dispatchKeySet, self, lambd); + } + + // aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & softshrink_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::softshrink_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, lambd, grad_input); + } + + // aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & softshrink_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) { + return at::_ops::softshrink_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, lambd, grad_input); + } + + // aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor + inline at::Tensor softshrink_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::softshrink_backward::redispatch(dispatchKeySet, grad_output, self, lambd); + } + + // aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), out); + } + + // aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), out); + } + + // aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor + inline at::Tensor adaptive_avg_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size)); + } + + // aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor + inline at::Tensor adaptive_avg_pool2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::adaptive_avg_pool2d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor + inline at::Tensor mkldnn_adaptive_avg_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::mkldnn_adaptive_avg_pool2d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_adaptive_avg_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::mkldnn_adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_adaptive_avg_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::mkldnn_adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor mkldnn_adaptive_avg_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::mkldnn_adaptive_avg_pool2d_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor + inline at::Tensor _adaptive_avg_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size)); + } + + // aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor + inline at::Tensor _adaptive_avg_pool2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool2d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor _adaptive_avg_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::_adaptive_avg_pool2d_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool3d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor + inline at::Tensor adaptive_avg_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::_adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor + inline at::Tensor _adaptive_avg_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool3d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::adaptive_avg_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, grad_input); + } + + // aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & adaptive_avg_pool3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) { + return at::_ops::adaptive_avg_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, grad_input); + } + + // aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor + inline at::Tensor _adaptive_avg_pool3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::_adaptive_avg_pool3d_backward::redispatch(dispatchKeySet, grad_output, self); + } + + // aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple adaptive_max_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool2d_out::redispatch(dispatchKeySet, self, output_size, out, indices); + } + + // aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple adaptive_max_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) { + return at::_ops::adaptive_max_pool2d_out::redispatch(dispatchKeySet, self, output_size, out, indices); + } + + // aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) + inline ::std::tuple adaptive_max_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool2d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & adaptive_max_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, indices, grad_input); + } + + // aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & adaptive_max_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::adaptive_max_pool2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, indices, grad_input); + } + + // aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor + inline at::Tensor adaptive_max_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool2d_backward::redispatch(dispatchKeySet, grad_output, self, indices); + } + + // aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple adaptive_max_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool3d_out::redispatch(dispatchKeySet, self, output_size, out, indices); + } + + // aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple adaptive_max_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_out::redispatch(dispatchKeySet, self, output_size, out, indices); + } + + // aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) + inline ::std::tuple adaptive_max_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool3d::redispatch(dispatchKeySet, self, output_size); + } + + // aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & adaptive_max_pool3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, indices, grad_input); + } + + // aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & adaptive_max_pool3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::adaptive_max_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, indices, grad_input); + } + + // aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor + inline at::Tensor adaptive_max_pool3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_backward::redispatch(dispatchKeySet, grad_output, self, indices); + } + + // aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & avg_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt) { + return at::_ops::avg_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out); + } + + // aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & avg_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out) { + return at::_ops::avg_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out); + } + + // aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor + inline at::Tensor avg_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt) { + return at::_ops::avg_pool2d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + + // aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & avg_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + return at::_ops::avg_pool2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); + } + + // aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & avg_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input) { + return at::_ops::avg_pool2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); + } + + // aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor + inline at::Tensor avg_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + return at::_ops::avg_pool2d_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + + // aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & avg_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt) { + return at::_ops::avg_pool3d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out); + } + + // aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & avg_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & out) { + return at::_ops::avg_pool3d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out); + } + + // aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor + inline at::Tensor avg_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt) { + return at::_ops::avg_pool3d::redispatch(dispatchKeySet, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + + // aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & avg_pool3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + return at::_ops::avg_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); + } + + // aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & avg_pool3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input) { + return at::_ops::avg_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); + } + + // aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor + inline at::Tensor avg_pool3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { + return at::_ops::avg_pool3d_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); + } + + // aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fractional_max_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool2d_output::redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples, output, indices); + } + + // aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fractional_max_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) { + return at::_ops::fractional_max_pool2d_output::redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples, output, indices); + } + + // aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) + inline ::std::tuple fractional_max_pool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool2d::redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples); + } + + // aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & fractional_max_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices, grad_input); + } + + // aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & fractional_max_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::fractional_max_pool2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices, grad_input); + } + + // aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor + inline at::Tensor fractional_max_pool2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool2d_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices); + } + + // aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fractional_max_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool3d_output::redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples, output, indices); + } + + // aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fractional_max_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples, at::Tensor & output, at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_output::redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples, output, indices); + } + + // aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) + inline ::std::tuple fractional_max_pool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) { + return at::_ops::fractional_max_pool3d::redispatch(dispatchKeySet, self, kernel_size, output_size, random_samples); + } + + // aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & fractional_max_pool3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices, grad_input); + } + + // aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & fractional_max_pool3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::fractional_max_pool3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices, grad_input); + } + + // aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor + inline at::Tensor fractional_max_pool3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) { + return at::_ops::fractional_max_pool3d_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, output_size, indices); + } + + // aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple max_pool2d_with_indices_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d_with_indices_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); + } + + // aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple max_pool2d_with_indices_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); + } + + // aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + inline ::std::tuple max_pool2d_with_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool2d_with_indices::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & max_pool2d_with_indices_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); + } + + // aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & max_pool2d_with_indices_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::max_pool2d_with_indices_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); + } + + // aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor + inline at::Tensor max_pool2d_with_indices_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool2d_with_indices_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); + } + + // aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple max_pool3d_with_indices_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool3d_with_indices_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); + } + + // aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple max_pool3d_with_indices_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices) { + return at::_ops::max_pool3d_with_indices_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); + } + + // aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + inline ::std::tuple max_pool3d_with_indices(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::max_pool3d_with_indices::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode); + } + + // aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & max_pool3d_with_indices_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool3d_with_indices_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); + } + + // aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & max_pool3d_with_indices_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::max_pool3d_with_indices_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); + } + + // aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor + inline at::Tensor max_pool3d_with_indices_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) { + return at::_ops::max_pool3d_with_indices_backward::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); + } + + // aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_unpool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) { + return at::_ops::max_unpool2d_out::redispatch(dispatchKeySet, self, indices, output_size, out); + } + + // aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_unpool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::max_unpool2d_out::redispatch(dispatchKeySet, self, indices, output_size, out); + } + + // aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor + inline at::Tensor max_unpool2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size) { + return at::_ops::max_unpool2d::redispatch(dispatchKeySet, self, indices, output_size); + } + + // aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_unpool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d_out::redispatch(dispatchKeySet, self, indices, output_size, stride, padding, out); + } + + // aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & max_unpool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::max_unpool3d_out::redispatch(dispatchKeySet, self, indices, output_size, stride, padding, out); + } + + // aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor + inline at::Tensor max_unpool3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, at::IntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::max_unpool3d::redispatch(dispatchKeySet, self, indices, output_size, stride, padding); + } + + // aten::reflection_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::reflection_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::reflection_pad1d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::reflection_pad1d(Tensor self, int[2] padding) -> Tensor + inline at::Tensor reflection_pad1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d::redispatch(dispatchKeySet, self, padding); + } + + // aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor + inline at::Tensor reflection_pad1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_backward::redispatch(dispatchKeySet, grad_output, self, padding); + } + + // aten::reflection_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad2d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::reflection_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::reflection_pad2d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::reflection_pad2d(Tensor self, int[4] padding) -> Tensor + inline at::Tensor reflection_pad2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad2d::redispatch(dispatchKeySet, self, padding); + } + + // aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor + inline at::Tensor reflection_pad2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad2d_backward::redispatch(dispatchKeySet, grad_output, self, padding); + } + + // aten::reflection_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad3d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::reflection_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & reflection_pad3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::reflection_pad3d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::reflection_pad3d(Tensor self, int[6] padding) -> Tensor + inline at::Tensor reflection_pad3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad3d::redispatch(dispatchKeySet, self, padding); + } + + // aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & reflection_pad3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor + inline at::Tensor reflection_pad3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad3d_backward::redispatch(dispatchKeySet, grad_output, self, padding); + } + + // aten::replication_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad1d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::replication_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad1d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::replication_pad1d(Tensor self, int[2] padding) -> Tensor + inline at::Tensor replication_pad1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad1d::redispatch(dispatchKeySet, self, padding); + } + + // aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::replication_pad1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::replication_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor + inline at::Tensor replication_pad1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad1d_backward::redispatch(dispatchKeySet, grad_output, self, padding); + } + + // aten::replication_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad2d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::replication_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad2d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::replication_pad2d(Tensor self, int[4] padding) -> Tensor + inline at::Tensor replication_pad2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad2d::redispatch(dispatchKeySet, self, padding); + } + + // aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::replication_pad2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::replication_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor + inline at::Tensor replication_pad2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad2d_backward::redispatch(dispatchKeySet, grad_output, self, padding); + } + + // aten::replication_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad3d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::replication_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & replication_pad3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::replication_pad3d_out::redispatch(dispatchKeySet, self, padding, out); + } + + // aten::replication_pad3d(Tensor self, int[6] padding) -> Tensor + inline at::Tensor replication_pad3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad3d::redispatch(dispatchKeySet, self, padding); + } + + // aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & replication_pad3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::replication_pad3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, padding, grad_input); + } + + // aten::replication_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor + inline at::Tensor replication_pad3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::replication_pad3d_backward::redispatch(dispatchKeySet, grad_output, self, padding); + } + + // aten::_pad_circular(Tensor self, int[] pad) -> Tensor + inline at::Tensor _pad_circular(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef pad) { + return at::_ops::_pad_circular::redispatch(dispatchKeySet, self, pad); + } + + // aten::_pad_enum(Tensor self, int[] pad, int mode, float? value=None) -> Tensor + inline at::Tensor _pad_enum(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef pad, int64_t mode, c10::optional value=c10::nullopt) { + return at::_ops::_pad_enum::redispatch(dispatchKeySet, self, pad, mode, value); + } + + // aten::pad(Tensor self, int[] pad, str mode="constant", float? value=None) -> Tensor + inline at::Tensor pad(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef pad, c10::string_view mode="constant", c10::optional value=c10::nullopt) { + return at::_ops::pad::redispatch(dispatchKeySet, self, pad, mode, value); + } + + // aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_linear1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors); + } + + // aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_linear1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_vec::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors); + } + + // aten::upsample_linear1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_linear1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors); + } + + // aten::upsample_linear1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_linear1d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors); + } + + // aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_bilinear2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bilinear2d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors); + } + + // aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_bilinear2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bilinear2d_vec::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors); + } + + // aten::upsample_bilinear2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_bilinear2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bilinear2d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors); + } + + // aten::upsample_bilinear2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_bilinear2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bilinear2d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors); + } + + // aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bilinear2d_aa_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors); + } + + // aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bilinear2d_aa_vec::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors); + } + + // aten::_upsample_bilinear2d_aa_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bilinear2d_aa_backward_vec::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors); + } + + // aten::_upsample_bilinear2d_aa_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bilinear2d_aa_backward_vec::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors); + } + + // aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_trilinear3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors); + } + + // aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_trilinear3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors); + } + + // aten::upsample_trilinear3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_trilinear3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors); + } + + // aten::upsample_trilinear3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_trilinear3d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors); + } + + // aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_bicubic2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bicubic2d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors); + } + + // aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_bicubic2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bicubic2d_vec::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors); + } + + // aten::upsample_bicubic2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_bicubic2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bicubic2d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors); + } + + // aten::upsample_bicubic2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_bicubic2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bicubic2d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors); + } + + // aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bicubic2d_aa_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors); + } + + // aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bicubic2d_aa_vec::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors); + } + + // aten::_upsample_bicubic2d_aa_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bicubic2d_aa_backward_vec::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors); + } + + // aten::_upsample_bicubic2d_aa_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bicubic2d_aa_backward_vec::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors); + } + + // aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest1d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors); + } + + // aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest1d_vec::redispatch(dispatchKeySet, input, output_size, scale_factors); + } + + // aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact1d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors); + } + + // aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact1d_vec::redispatch(dispatchKeySet, input, output_size, scale_factors); + } + + // aten::upsample_nearest1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest1d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors); + } + + // aten::upsample_nearest1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest1d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest1d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors); + } + + // aten::_upsample_nearest_exact1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact1d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors); + } + + // aten::_upsample_nearest_exact1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact1d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact1d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors); + } + + // aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest2d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors); + } + + // aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest2d_vec::redispatch(dispatchKeySet, input, output_size, scale_factors); + } + + // aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors); + } + + // aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_vec::redispatch(dispatchKeySet, input, output_size, scale_factors); + } + + // aten::upsample_nearest2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest2d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors); + } + + // aten::upsample_nearest2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest2d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors); + } + + // aten::_upsample_nearest_exact2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors); + } + + // aten::_upsample_nearest_exact2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors); + } + + // aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest3d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors); + } + + // aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest3d_vec::redispatch(dispatchKeySet, input, output_size, scale_factors); + } + + // aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact3d_vec::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors); + } + + // aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact3d_vec::redispatch(dispatchKeySet, input, output_size, scale_factors); + } + + // aten::upsample_nearest3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest3d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors); + } + + // aten::upsample_nearest3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor + inline at::Tensor upsample_nearest3d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest3d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors); + } + + // aten::_upsample_nearest_exact3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact3d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors); + } + + // aten::_upsample_nearest_exact3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor + inline at::Tensor _upsample_nearest_exact3d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact3d_backward_vec::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors); + } + + // aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales, out); + } + + // aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales, at::Tensor & out) { + return at::_ops::upsample_linear1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales, out); + } + + // aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales, out); + } + + // aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales, at::Tensor & out) { + return at::_ops::upsample_linear1d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales, out); + } + + // aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor + inline at::Tensor upsample_linear1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales); + } + + // aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor + inline at::Tensor upsample_linear1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d::redispatch(dispatchKeySet, self, output_size, align_corners, scales); + } + + // aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales, grad_input); + } + + // aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_linear1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales, grad_input); + } + + // aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales, grad_input); + } + + // aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_linear1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales, grad_input); + } + + // aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor + inline at::Tensor upsample_linear1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales); + } + + // aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor + inline at::Tensor upsample_linear1d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_linear1d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales); + } + + // aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_bilinear2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_bilinear2d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bilinear2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_h, scales_w); + } + + // aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bilinear2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w); + } + + // aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_bilinear2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_bilinear2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bilinear2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_h, scales_w); + } + + // aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bilinear2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bilinear2d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + + // aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_bilinear2d_aa_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_bilinear2d_aa_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_h, scales_w); + } + + // aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w); + } + + // aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bilinear2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_h, scales_w); + } + + // aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bilinear2d_aa_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bilinear2d_aa_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + + // aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_bicubic2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_bicubic2d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bicubic2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_h, scales_w); + } + + // aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bicubic2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w); + } + + // aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_bicubic2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_bicubic2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bicubic2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_h, scales_w); + } + + // aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_bicubic2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_bicubic2d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + + // aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_bicubic2d_aa_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_bicubic2d_aa_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w, out); + } + + // aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_h, scales_w); + } + + // aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa::redispatch(dispatchKeySet, self, output_size, align_corners, scales_h, scales_w); + } + + // aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_bicubic2d_aa_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); + } + + // aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_h, scales_w); + } + + // aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_bicubic2d_aa_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_bicubic2d_aa_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_h, scales_w); + } + + // aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_d, scales_h, scales_w, out); + } + + // aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_d, scales_h, scales_w, out); + } + + // aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_d, scales_h, scales_w, out); + } + + // aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_out::redispatch(dispatchKeySet, self, output_size, align_corners, scales_d, scales_h, scales_w, out); + } + + // aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_trilinear3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), align_corners, scales_d, scales_h, scales_w); + } + + // aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_trilinear3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d::redispatch(dispatchKeySet, self, output_size, align_corners, scales_d, scales_h, scales_w); + } + + // aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_trilinear3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_trilinear3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_trilinear3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), align_corners, scales_d, scales_h, scales_w); + } + + // aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_trilinear3d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_trilinear3d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w); + } + + // aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales, out); + } + + // aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales, at::Tensor & out) { + return at::_ops::upsample_nearest1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales, out); + } + + // aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_out::redispatch(dispatchKeySet, self, output_size, scales, out); + } + + // aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, at::Tensor & out) { + return at::_ops::upsample_nearest1d_out::redispatch(dispatchKeySet, self, output_size, scales, out); + } + + // aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales, out); + } + + // aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact1d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales, out); + } + + // aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_out::redispatch(dispatchKeySet, self, output_size, scales, out); + } + + // aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact1d_out::redispatch(dispatchKeySet, self, output_size, scales, out); + } + + // aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor + inline at::Tensor upsample_nearest1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales); + } + + // aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor + inline at::Tensor upsample_nearest1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d::redispatch(dispatchKeySet, self, output_size, scales); + } + + // aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor + inline at::Tensor _upsample_nearest_exact1d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales); + } + + // aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor + inline at::Tensor _upsample_nearest_exact1d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d::redispatch(dispatchKeySet, self, output_size, scales); + } + + // aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales, grad_input); + } + + // aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_nearest1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales, grad_input); + } + + // aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales, grad_input); + } + + // aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::upsample_nearest1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales, grad_input); + } + + // aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales, grad_input); + } + + // aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales, grad_input); + } + + // aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales, grad_input); + } + + // aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales, grad_input); + } + + // aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor + inline at::Tensor upsample_nearest1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales); + } + + // aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor + inline at::Tensor upsample_nearest1d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::upsample_nearest1d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales); + } + + // aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor + inline at::Tensor _upsample_nearest_exact1d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales); + } + + // aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor + inline at::Tensor _upsample_nearest_exact1d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales); + } + + // aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales_h, scales_w, out); + } + + // aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_nearest2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales_h, scales_w, out); + } + + // aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_out::redispatch(dispatchKeySet, self, output_size, scales_h, scales_w, out); + } + + // aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_nearest2d_out::redispatch(dispatchKeySet, self, output_size, scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d_out::redispatch(dispatchKeySet, self, output_size, scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact2d_out::redispatch(dispatchKeySet, self, output_size, scales_h, scales_w, out); + } + + // aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales_h, scales_w); + } + + // aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d::redispatch(dispatchKeySet, self, output_size, scales_h, scales_w); + } + + // aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales_h, scales_w); + } + + // aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact2d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d::redispatch(dispatchKeySet, self, output_size, scales_h, scales_w); + } + + // aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales_h, scales_w); + } + + // aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest2d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w); + } + + // aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales_h, scales_w); + } + + // aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact2d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact2d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_h, scales_w); + } + + // aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales_d, scales_h, scales_w, out); + } + + // aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_nearest3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales_d, scales_h, scales_w, out); + } + + // aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_out::redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w, out); + } + + // aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::upsample_nearest3d_out::redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales_d, scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact3d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales_d, scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_out::redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w, out); + } + + // aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact3d_out::redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w, out); + } + + // aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales_d, scales_h, scales_w); + } + + // aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d::redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w); + } + + // aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), scales_d, scales_h, scales_w); + } + + // aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact3d_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d::redispatch(dispatchKeySet, self, output_size, scales_d, scales_h, scales_w); + } + + // aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales_d, scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales_d, scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); + } + + // aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact3d_backward_grad_input::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); + } + + // aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales_d, scales_h, scales_w); + } + + // aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor upsample_nearest3d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w); + } + + // aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact3d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(output_size), c10::fromIntArrayRef(input_size), scales_d, scales_h, scales_w); + } + + // aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + inline at::Tensor _upsample_nearest_exact3d_backward_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::_upsample_nearest_exact3d_backward::redispatch(dispatchKeySet, grad_output, output_size, input_size, scales_d, scales_h, scales_w); + } + + // aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & sigmoid_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) { + return at::_ops::sigmoid_backward_grad_input::redispatch(dispatchKeySet, grad_output, output, grad_input); + } + + // aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & sigmoid_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) { + return at::_ops::sigmoid_backward_grad_input::redispatch(dispatchKeySet, grad_output, output, grad_input); + } + + // aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor + inline at::Tensor sigmoid_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output) { + return at::_ops::sigmoid_backward::redispatch(dispatchKeySet, grad_output, output); + } + + // aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & logit_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::logit_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, eps, grad_input); + } + + // aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & logit_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps, at::Tensor & grad_input) { + return at::_ops::logit_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, eps, grad_input); + } + + // aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor + inline at::Tensor logit_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::logit_backward::redispatch(dispatchKeySet, grad_output, self, eps); + } + + // aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & tanh_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output) { + return at::_ops::tanh_backward_grad_input::redispatch(dispatchKeySet, grad_output, output, grad_input); + } + + // aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + inline at::Tensor & tanh_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, at::Tensor & grad_input) { + return at::_ops::tanh_backward_grad_input::redispatch(dispatchKeySet, grad_output, output, grad_input); + } + + // aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor + inline at::Tensor tanh_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output) { + return at::_ops::tanh_backward::redispatch(dispatchKeySet, grad_output, output); + } + + // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_transpose2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); + } + + // aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_transpose2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); + } + + // aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor + inline at::Tensor slow_conv_transpose2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose2d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); + } + + // aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_transpose3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); + } + + // aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_transpose3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_transpose3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); + } + + // aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1) -> Tensor + inline at::Tensor slow_conv_transpose3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_transpose3d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); + } + + // aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & thnn_conv2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::thnn_conv2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, out); + } + + // aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & thnn_conv2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::thnn_conv2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, out); + } + + // aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor + inline at::Tensor thnn_conv2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::thnn_conv2d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding); + } + + // aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!) + inline at::Tensor & _slow_conv2d_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::_slow_conv2d_forward_output::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output); + } + + // aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!) + inline at::Tensor & _slow_conv2d_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) { + return at::_ops::_slow_conv2d_forward_output::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output); + } + + // aten::_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor + inline at::Tensor _slow_conv2d_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::_slow_conv2d_forward::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding); + } + + // aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _slow_conv2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::_slow_conv2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias); + } + + // aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _slow_conv2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias) { + return at::_ops::_slow_conv2d_backward_grad_input::redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias); + } + + // aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + inline ::std::tuple _slow_conv2d_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask::redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, output_mask); + } + + // aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & _conv_depthwise2d_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) { + return at::_ops::_conv_depthwise2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & _conv_depthwise2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, const at::Tensor & out) { + return at::_ops::_conv_depthwise2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation) -> Tensor + inline at::Tensor _conv_depthwise2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) { + return at::_ops::_conv_depthwise2d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation); + } + + // aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation) -> Tensor + inline at::Tensor conv_depthwise3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) { + return at::_ops::conv_depthwise3d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation); + } + + // aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::slow_conv3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, out); + } + + // aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out) { + return at::_ops::slow_conv3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, out); + } + + // aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor + inline at::Tensor slow_conv3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0) { + return at::_ops::slow_conv3d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding); + } + + // aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output) -> Tensor(a!) + inline at::Tensor & slow_conv3d_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::slow_conv3d_forward_output::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output); + } + + // aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output) -> Tensor(a!) + inline at::Tensor & slow_conv3d_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output) { + return at::_ops::slow_conv3d_forward_output::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, output); + } + + // aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding) -> Tensor + inline at::Tensor slow_conv3d_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding) { + return at::_ops::slow_conv3d_forward::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding); + } + + // aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor + inline at::Tensor slow_conv_dilated2d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated2d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation); + } + + // aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor + inline at::Tensor slow_conv_dilated3d(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated3d::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation); + } + + // aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & col2im_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), kernel_size, dilation, padding, stride, out); + } + + // aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & col2im_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::col2im_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), kernel_size, dilation, padding, stride, out); + } + + // aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & col2im_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im_out::redispatch(dispatchKeySet, self, output_size, kernel_size, dilation, padding, stride, out); + } + + // aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & col2im_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::col2im_out::redispatch(dispatchKeySet, self, output_size, kernel_size, dilation, padding, stride, out); + } + + // aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + inline at::Tensor col2im(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), kernel_size, dilation, padding, stride); + } + + // aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + inline at::Tensor col2im_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::col2im::redispatch(dispatchKeySet, self, output_size, kernel_size, dilation, padding, stride); + } + + // aten::column_stack(Tensor[] tensors) -> Tensor + inline at::Tensor column_stack(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::column_stack::redispatch(dispatchKeySet, tensors); + } + + // aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & column_stack_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::column_stack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & column_stack_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::column_stack_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & im2col_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col_out::redispatch(dispatchKeySet, self, kernel_size, dilation, padding, stride, out); + } + + // aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & im2col_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::im2col_out::redispatch(dispatchKeySet, self, kernel_size, dilation, padding, stride, out); + } + + // aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + inline at::Tensor im2col(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) { + return at::_ops::im2col::redispatch(dispatchKeySet, self, kernel_size, dilation, padding, stride); + } + + // aten::isfinite(Tensor self) -> Tensor + inline at::Tensor isfinite(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::isfinite::redispatch(dispatchKeySet, self); + } + + // aten::isinf(Tensor self) -> Tensor + inline at::Tensor isinf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::isinf::redispatch(dispatchKeySet, self); + } + + // aten::record_stream(Tensor(a!) self, Stream s) -> () + inline void record_stream(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Stream s) { + return at::_ops::record_stream::redispatch(dispatchKeySet, self, s); + } + + // aten::isposinf(Tensor self) -> Tensor + inline at::Tensor isposinf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::isposinf::redispatch(dispatchKeySet, self); + } + + // aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isposinf_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::isposinf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isposinf_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::isposinf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::isneginf(Tensor self) -> Tensor + inline at::Tensor isneginf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::isneginf::redispatch(dispatchKeySet, self); + } + + // aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isneginf_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::isneginf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isneginf_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::isneginf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor + inline at::Tensor _add_batch_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t batch_dim, int64_t level) { + return at::_ops::_add_batch_dim::redispatch(dispatchKeySet, self, batch_dim, level); + } + + // aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor + inline at::Tensor _remove_batch_dim(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) { + return at::_ops::_remove_batch_dim::redispatch(dispatchKeySet, self, level, batch_size, out_dim); + } + + // aten::special_entr(Tensor self) -> Tensor + inline at::Tensor special_entr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_entr::redispatch(dispatchKeySet, self); + } + + // aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_entr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_entr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_entr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_entr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_ndtri(Tensor self) -> Tensor + inline at::Tensor special_ndtri(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_ndtri::redispatch(dispatchKeySet, self); + } + + // aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_ndtri_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_ndtri_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_ndtri_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_ndtri_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_log_ndtr(Tensor self) -> Tensor + inline at::Tensor special_log_ndtr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_log_ndtr::redispatch(dispatchKeySet, self); + } + + // aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_log_ndtr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_log_ndtr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_log_ndtr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_log_ndtr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_expm1(Tensor self) -> Tensor + inline at::Tensor special_expm1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_expm1::redispatch(dispatchKeySet, self); + } + + // aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_expm1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_expm1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_expm1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_expm1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_exp2(Tensor self) -> Tensor + inline at::Tensor special_exp2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_exp2::redispatch(dispatchKeySet, self); + } + + // aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_exp2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_exp2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_exp2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_exp2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_psi(Tensor self) -> Tensor + inline at::Tensor special_psi(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_psi::redispatch(dispatchKeySet, self); + } + + // aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_psi_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_psi_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_psi_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_psi_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_digamma(Tensor self) -> Tensor + inline at::Tensor special_digamma(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_digamma::redispatch(dispatchKeySet, self); + } + + // aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_digamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_digamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_digamma_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_digamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_gammaln(Tensor self) -> Tensor + inline at::Tensor special_gammaln(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_gammaln::redispatch(dispatchKeySet, self); + } + + // aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_gammaln_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_gammaln_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_gammaln_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_gammaln_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erf(Tensor self) -> Tensor + inline at::Tensor special_erf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_erf::redispatch(dispatchKeySet, self); + } + + // aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erf_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_erf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erf_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_erf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erfc(Tensor self) -> Tensor + inline at::Tensor special_erfc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_erfc::redispatch(dispatchKeySet, self); + } + + // aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erfc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_erfc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erfc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_erfc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erfcx(Tensor self) -> Tensor + inline at::Tensor special_erfcx(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_erfcx::redispatch(dispatchKeySet, self); + } + + // aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erfcx_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_erfcx_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erfcx_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_erfcx_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erfinv(Tensor self) -> Tensor + inline at::Tensor special_erfinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_erfinv::redispatch(dispatchKeySet, self); + } + + // aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erfinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_erfinv_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_erfinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_erfinv_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_ndtr(Tensor self) -> Tensor + inline at::Tensor special_ndtr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_ndtr::redispatch(dispatchKeySet, self); + } + + // aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_ndtr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_ndtr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_ndtr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_ndtr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_xlog1py(Tensor self, Tensor other) -> Tensor + inline at::Tensor special_xlog1py(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_xlog1py::redispatch(dispatchKeySet, self, other); + } + + // aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor + inline at::Tensor special_xlog1py(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::special_xlog1py_self_scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor special_xlog1py(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::special_xlog1py_other_scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlog1py_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_xlog1py_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlog1py_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_xlog1py_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlog1py_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::special_xlog1py_self_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlog1py_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_xlog1py_self_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlog1py_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::special_xlog1py_other_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlog1py_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::special_xlog1py_other_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlogy(Tensor self, Tensor other) -> Tensor + inline at::Tensor special_xlogy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_xlogy::redispatch(dispatchKeySet, self, other); + } + + // aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor + inline at::Tensor special_xlogy(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::special_xlogy_self_scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor special_xlogy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::special_xlogy_other_scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlogy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_xlogy_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlogy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_xlogy_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlogy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::special_xlogy_self_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlogy_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_xlogy_self_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlogy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::special_xlogy_other_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_xlogy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::special_xlogy_other_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_zeta(Tensor self, Tensor other) -> Tensor + inline at::Tensor special_zeta(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_zeta::redispatch(dispatchKeySet, self, other); + } + + // aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor + inline at::Tensor special_zeta(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::special_zeta_self_scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor + inline at::Tensor special_zeta(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::special_zeta_other_scalar::redispatch(dispatchKeySet, self, other); + } + + // aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_zeta_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_zeta_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_zeta_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_zeta_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_zeta_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::special_zeta_self_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_zeta_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_zeta_self_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_zeta_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::special_zeta_other_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_zeta_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::special_zeta_other_scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_i0(Tensor self) -> Tensor + inline at::Tensor special_i0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_i0::redispatch(dispatchKeySet, self); + } + + // aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_i0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_i0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i0e(Tensor self) -> Tensor + inline at::Tensor special_i0e(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_i0e::redispatch(dispatchKeySet, self); + } + + // aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i0e_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_i0e_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i0e_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_i0e_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i1(Tensor self) -> Tensor + inline at::Tensor special_i1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_i1::redispatch(dispatchKeySet, self); + } + + // aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_i1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_i1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i1e(Tensor self) -> Tensor + inline at::Tensor special_i1e(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_i1e::redispatch(dispatchKeySet, self); + } + + // aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i1e_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_i1e_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_i1e_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_i1e_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_logit(Tensor self, float? eps=None) -> Tensor + inline at::Tensor special_logit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::special_logit::redispatch(dispatchKeySet, self, eps); + } + + // aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_logit_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional eps=c10::nullopt) { + return at::_ops::special_logit_out::redispatch(dispatchKeySet, self, eps, out); + } + + // aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_logit_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional eps, at::Tensor & out) { + return at::_ops::special_logit_out::redispatch(dispatchKeySet, self, eps, out); + } + + // aten::special_polygamma(int n, Tensor self) -> Tensor + inline at::Tensor special_polygamma(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self) { + return at::_ops::special_polygamma::redispatch(dispatchKeySet, n, self); + } + + // aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_polygamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n, const at::Tensor & self) { + return at::_ops::special_polygamma_out::redispatch(dispatchKeySet, n, self, out); + } + + // aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_polygamma_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_polygamma_out::redispatch(dispatchKeySet, n, self, out); + } + + // aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + inline at::Tensor special_logsumexp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::special_logsumexp::redispatch(dispatchKeySet, self, dim, keepdim); + } + + // aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_logsumexp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false) { + return at::_ops::special_logsumexp_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_logsumexp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out) { + return at::_ops::special_logsumexp_out::redispatch(dispatchKeySet, self, dim, keepdim, out); + } + + // aten::special_expit(Tensor self) -> Tensor + inline at::Tensor special_expit(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_expit::redispatch(dispatchKeySet, self); + } + + // aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_expit_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_expit_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_expit_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_expit_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_sinc(Tensor self) -> Tensor + inline at::Tensor special_sinc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_sinc::redispatch(dispatchKeySet, self); + } + + // aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_sinc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_sinc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_sinc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_sinc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_round(Tensor self, *, int decimals=0) -> Tensor + inline at::Tensor special_round(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals=0) { + return at::_ops::special_round::redispatch(dispatchKeySet, self, decimals); + } + + // aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_round_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t decimals=0) { + return at::_ops::special_round_out::redispatch(dispatchKeySet, self, decimals, out); + } + + // aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_round_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t decimals, at::Tensor & out) { + return at::_ops::special_round_out::redispatch(dispatchKeySet, self, decimals, out); + } + + // aten::special_log1p(Tensor self) -> Tensor + inline at::Tensor special_log1p(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_log1p::redispatch(dispatchKeySet, self); + } + + // aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_log1p_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_log1p_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_log1p_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_log1p_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor special_log_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::special_log_softmax::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_gammainc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammainc_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_gammainc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_gammainc_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_gammainc(Tensor self, Tensor other) -> Tensor + inline at::Tensor special_gammainc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammainc::redispatch(dispatchKeySet, self, other); + } + + // aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_gammaincc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammaincc_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_gammaincc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::special_gammaincc_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::special_gammaincc(Tensor self, Tensor other) -> Tensor + inline at::Tensor special_gammaincc(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::special_gammaincc::redispatch(dispatchKeySet, self, other); + } + + // aten::special_multigammaln(Tensor self, int p) -> Tensor + inline at::Tensor special_multigammaln(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p) { + return at::_ops::special_multigammaln::redispatch(dispatchKeySet, self, p); + } + + // aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_multigammaln_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t p) { + return at::_ops::special_multigammaln_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_multigammaln_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t p, at::Tensor & out) { + return at::_ops::special_multigammaln_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + inline at::Tensor special_softmax(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::special_softmax::redispatch(dispatchKeySet, self, dim, dtype); + } + + // aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_fft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft::redispatch(dispatchKeySet, self, n, dim, norm); + } + + // aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fft_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fft_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_ifft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft::redispatch(dispatchKeySet, self, n, dim, norm); + } + + // aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifft_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifft_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_rfft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft::redispatch(dispatchKeySet, self, n, dim, norm); + } + + // aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfft_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfft_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_irfft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft::redispatch(dispatchKeySet, self, n, dim, norm); + } + + // aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfft_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfft_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_hfft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft::redispatch(dispatchKeySet, self, n, dim, norm); + } + + // aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_hfft_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_hfft_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_hfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + inline at::Tensor fft_ihfft(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft::redispatch(dispatchKeySet, self, n, dim, norm); + } + + // aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ihfft_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ihfft_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ihfft_out::redispatch(dispatchKeySet, self, n, dim, norm, out); + } + + // aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_fft2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fft2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fft2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_ifft2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft2::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifft2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifft2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_rfft2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfft2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfft2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_irfft2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft2::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfft2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfft2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_hfft2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft2::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_hfft2_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_hfft2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_hfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor + inline at::Tensor fft_ihfft2(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_ihfft2_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_ihfft2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_ihfft2_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_fftn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fftn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fftn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_ifftn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifftn::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifftn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ifftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_ifftn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ifftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_rfftn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfftn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_rfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfftn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_rfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_irfftn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfftn::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfftn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_irfftn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_hfftn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_hfftn_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_hfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_hfftn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_hfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + inline at::Tensor fft_ihfftn(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfftn::redispatch(dispatchKeySet, self, s, dim, norm); + } + + // aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_ihfftn_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & fft_ihfftn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, const at::Tensor & out) { + return at::_ops::fft_ihfftn_out::redispatch(dispatchKeySet, self, s, dim, norm, out); + } + + // aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor fft_fftfreq(c10::DispatchKeySet dispatchKeySet, int64_t n, double d=1.0, at::TensorOptions options={}) { + return at::_ops::fft_fftfreq::redispatch(dispatchKeySet, n, d, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor fft_fftfreq(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::fft_fftfreq::redispatch(dispatchKeySet, n, d, dtype, layout, device, pin_memory); + } + + // aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fftfreq_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n, double d=1.0) { + return at::_ops::fft_fftfreq_out::redispatch(dispatchKeySet, n, d, out); + } + + // aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_fftfreq_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out) { + return at::_ops::fft_fftfreq_out::redispatch(dispatchKeySet, n, d, out); + } + + // aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor fft_rfftfreq(c10::DispatchKeySet dispatchKeySet, int64_t n, double d=1.0, at::TensorOptions options={}) { + return at::_ops::fft_rfftfreq::redispatch(dispatchKeySet, n, d, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } + + // aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor fft_rfftfreq(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::fft_rfftfreq::redispatch(dispatchKeySet, n, d, dtype, layout, device, pin_memory); + } + + // aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfftfreq_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t n, double d=1.0) { + return at::_ops::fft_rfftfreq_out::redispatch(dispatchKeySet, n, d, out); + } + + // aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fft_rfftfreq_outf(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out) { + return at::_ops::fft_rfftfreq_out::redispatch(dispatchKeySet, n, d, out); + } + + // aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor + inline at::Tensor fft_fftshift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt) { + return at::_ops::fft_fftshift::redispatch(dispatchKeySet, self, dim); + } + + // aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor + inline at::Tensor fft_ifftshift(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt) { + return at::_ops::fft_ifftshift::redispatch(dispatchKeySet, self, dim); + } + + // aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info) + inline ::std::tuple linalg_cholesky_ex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper=false, bool check_errors=false) { + return at::_ops::linalg_cholesky_ex::redispatch(dispatchKeySet, self, upper, check_errors); + } + + // aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) + inline ::std::tuple linalg_cholesky_ex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false) { + return at::_ops::linalg_cholesky_ex_L::redispatch(dispatchKeySet, self, upper, check_errors, L, info); + } + + // aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) + inline ::std::tuple linalg_cholesky_ex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) { + return at::_ops::linalg_cholesky_ex_L::redispatch(dispatchKeySet, self, upper, check_errors, L, info); + } + + // aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor + inline at::Tensor linalg_cholesky(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper=false) { + return at::_ops::linalg_cholesky::redispatch(dispatchKeySet, self, upper); + } + + // aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cholesky_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool upper=false) { + return at::_ops::linalg_cholesky_out::redispatch(dispatchKeySet, self, upper, out); + } + + // aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cholesky_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool upper, at::Tensor & out) { + return at::_ops::linalg_cholesky_out::redispatch(dispatchKeySet, self, upper, out); + } + + // aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor + inline at::Tensor linalg_cross(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1) { + return at::_ops::linalg_cross::redispatch(dispatchKeySet, self, other, dim); + } + + // aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cross_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t dim=-1) { + return at::_ops::linalg_cross_out::redispatch(dispatchKeySet, self, other, dim, out); + } + + // aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cross_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t dim, at::Tensor & out) { + return at::_ops::linalg_cross_out::redispatch(dispatchKeySet, self, other, dim, out); + } + + // aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots) + inline ::std::tuple linalg_lu_factor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_factor::redispatch(dispatchKeySet, A, pivot); + } + + // aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) + inline ::std::tuple linalg_lu_factor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_factor_out::redispatch(dispatchKeySet, A, pivot, LU, pivots); + } + + // aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots) + inline ::std::tuple linalg_lu_factor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & LU, at::Tensor & pivots) { + return at::_ops::linalg_lu_factor_out::redispatch(dispatchKeySet, A, pivot, LU, pivots); + } + + // aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info) + inline ::std::tuple linalg_lu_factor_ex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot=true, bool check_errors=false) { + return at::_ops::linalg_lu_factor_ex::redispatch(dispatchKeySet, A, pivot, check_errors); + } + + // aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) + inline ::std::tuple linalg_lu_factor_ex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, bool pivot=true, bool check_errors=false) { + return at::_ops::linalg_lu_factor_ex_out::redispatch(dispatchKeySet, A, pivot, check_errors, LU, pivots, info); + } + + // aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) + inline ::std::tuple linalg_lu_factor_ex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, bool check_errors, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) { + return at::_ops::linalg_lu_factor_ex_out::redispatch(dispatchKeySet, A, pivot, check_errors, LU, pivots, info); + } + + // aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U) + inline ::std::tuple linalg_lu(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu::redispatch(dispatchKeySet, A, pivot); + } + + // aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) + inline ::std::tuple linalg_lu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_out::redispatch(dispatchKeySet, A, pivot, P, L, U); + } + + // aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) + inline ::std::tuple linalg_lu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) { + return at::_ops::linalg_lu_out::redispatch(dispatchKeySet, A, pivot, P, L, U); + } + + // aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor + inline at::Tensor linalg_lu_solve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false) { + return at::_ops::linalg_lu_solve::redispatch(dispatchKeySet, LU, pivots, B, left, adjoint); + } + + // aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_lu_solve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left=true, bool adjoint=false) { + return at::_ops::linalg_lu_solve_out::redispatch(dispatchKeySet, LU, pivots, B, left, adjoint, out); + } + + // aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_lu_solve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint, at::Tensor & out) { + return at::_ops::linalg_lu_solve_out::redispatch(dispatchKeySet, LU, pivots, B, left, adjoint, out); + } + + // aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots) + inline ::std::tuple _linalg_det(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) { + return at::_ops::_linalg_det::redispatch(dispatchKeySet, A); + } + + // aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) + inline ::std::tuple _linalg_det_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A) { + return at::_ops::_linalg_det_result::redispatch(dispatchKeySet, A, result, LU, pivots); + } + + // aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) + inline ::std::tuple _linalg_det_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots) { + return at::_ops::_linalg_det_result::redispatch(dispatchKeySet, A, result, LU, pivots); + } + + // aten::linalg_det(Tensor A) -> Tensor + inline at::Tensor linalg_det(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) { + return at::_ops::linalg_det::redispatch(dispatchKeySet, A); + } + + // aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_det_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & A) { + return at::_ops::linalg_det_out::redispatch(dispatchKeySet, A, out); + } + + // aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_det_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out) { + return at::_ops::linalg_det_out::redispatch(dispatchKeySet, A, out); + } + + // aten::det(Tensor self) -> Tensor + inline at::Tensor det(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::det::redispatch(dispatchKeySet, self); + } + + // aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info) + inline ::std::tuple linalg_ldl_factor_ex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian=false, bool check_errors=false) { + return at::_ops::linalg_ldl_factor_ex::redispatch(dispatchKeySet, self, hermitian, check_errors); + } + + // aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) + inline ::std::tuple linalg_ldl_factor_ex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info, const at::Tensor & self, bool hermitian=false, bool check_errors=false) { + return at::_ops::linalg_ldl_factor_ex_out::redispatch(dispatchKeySet, self, hermitian, check_errors, LD, pivots, info); + } + + // aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) + inline ::std::tuple linalg_ldl_factor_ex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, bool check_errors, at::Tensor & LD, at::Tensor & pivots, at::Tensor & info) { + return at::_ops::linalg_ldl_factor_ex_out::redispatch(dispatchKeySet, self, hermitian, check_errors, LD, pivots, info); + } + + // aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots) + inline ::std::tuple linalg_ldl_factor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian=false) { + return at::_ops::linalg_ldl_factor::redispatch(dispatchKeySet, self, hermitian); + } + + // aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) + inline ::std::tuple linalg_ldl_factor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & LD, at::Tensor & pivots, const at::Tensor & self, bool hermitian=false) { + return at::_ops::linalg_ldl_factor_out::redispatch(dispatchKeySet, self, hermitian, LD, pivots); + } + + // aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots) + inline ::std::tuple linalg_ldl_factor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots) { + return at::_ops::linalg_ldl_factor_out::redispatch(dispatchKeySet, self, hermitian, LD, pivots); + } + + // aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor + inline at::Tensor linalg_ldl_solve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) { + return at::_ops::linalg_ldl_solve::redispatch(dispatchKeySet, LD, pivots, B, hermitian); + } + + // aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_ldl_solve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian=false) { + return at::_ops::linalg_ldl_solve_out::redispatch(dispatchKeySet, LD, pivots, B, hermitian, out); + } + + // aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_ldl_solve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_ldl_solve_out::redispatch(dispatchKeySet, LD, pivots, B, hermitian, out); + } + + // aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) + inline ::std::tuple linalg_lstsq(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, c10::optional rcond=c10::nullopt, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_lstsq::redispatch(dispatchKeySet, self, b, rcond, driver); + } + + // aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) + inline ::std::tuple linalg_lstsq_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, c10::optional rcond=c10::nullopt, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_lstsq_out::redispatch(dispatchKeySet, self, b, rcond, driver, solution, residuals, rank, singular_values); + } + + // aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) + inline ::std::tuple linalg_lstsq_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, c10::optional rcond, c10::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) { + return at::_ops::linalg_lstsq_out::redispatch(dispatchKeySet, self, b, rcond, driver, solution, residuals, rank, singular_values); + } + + // aten::linalg_matmul(Tensor self, Tensor other) -> Tensor + inline at::Tensor linalg_matmul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::linalg_matmul::redispatch(dispatchKeySet, self, other); + } + + // aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matmul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::linalg_matmul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matmul_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::linalg_matmul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor + inline at::Tensor linalg_vecdot(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & y, int64_t dim=-1) { + return at::_ops::linalg_vecdot::redispatch(dispatchKeySet, x, y, dim); + } + + // aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_vecdot_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & y, int64_t dim=-1) { + return at::_ops::linalg_vecdot_out::redispatch(dispatchKeySet, x, y, dim, out); + } + + // aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_vecdot_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) { + return at::_ops::linalg_vecdot_out::redispatch(dispatchKeySet, x, y, dim, out); + } + + // aten::linalg_matrix_exp(Tensor self) -> Tensor + inline at::Tensor linalg_matrix_exp(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::linalg_matrix_exp::redispatch(dispatchKeySet, self); + } + + // aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots) + inline ::std::tuple _linalg_slogdet(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) { + return at::_ops::_linalg_slogdet::redispatch(dispatchKeySet, A); + } + + // aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) + inline ::std::tuple _linalg_slogdet_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A) { + return at::_ops::_linalg_slogdet_sign::redispatch(dispatchKeySet, A, sign, logabsdet, LU, pivots); + } + + // aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) + inline ::std::tuple _linalg_slogdet_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots) { + return at::_ops::_linalg_slogdet_sign::redispatch(dispatchKeySet, A, sign, logabsdet, LU, pivots); + } + + // aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet) + inline ::std::tuple linalg_slogdet(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) { + return at::_ops::linalg_slogdet::redispatch(dispatchKeySet, A); + } + + // aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) + inline ::std::tuple linalg_slogdet_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & A) { + return at::_ops::linalg_slogdet_out::redispatch(dispatchKeySet, A, sign, logabsdet); + } + + // aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) + inline ::std::tuple linalg_slogdet_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet) { + return at::_ops::linalg_slogdet_out::redispatch(dispatchKeySet, A, sign, logabsdet); + } + + // aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) + inline ::std::tuple slogdet(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::slogdet::redispatch(dispatchKeySet, self); + } + + // aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) + inline ::std::tuple slogdet_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & sign, at::Tensor & logabsdet, const at::Tensor & self) { + return at::_ops::slogdet_out::redispatch(dispatchKeySet, self, sign, logabsdet); + } + + // aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet) + inline ::std::tuple slogdet_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & sign, at::Tensor & logabsdet) { + return at::_ops::slogdet_out::redispatch(dispatchKeySet, self, sign, logabsdet); + } + + // aten::logdet(Tensor self) -> Tensor + inline at::Tensor logdet(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::logdet::redispatch(dispatchKeySet, self); + } + + // aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors) + inline ::std::tuple linalg_eig(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::linalg_eig::redispatch(dispatchKeySet, self); + } + + // aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple linalg_eig_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & self) { + return at::_ops::linalg_eig_out::redispatch(dispatchKeySet, self, eigenvalues, eigenvectors); + } + + // aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple linalg_eig_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & eigenvalues, at::Tensor & eigenvectors) { + return at::_ops::linalg_eig_out::redispatch(dispatchKeySet, self, eigenvalues, eigenvectors); + } + + // aten::linalg_eigvals(Tensor self) -> Tensor + inline at::Tensor linalg_eigvals(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::linalg_eigvals::redispatch(dispatchKeySet, self); + } + + // aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_eigvals_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::linalg_eigvals_out::redispatch(dispatchKeySet, self, out); + } + + // aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_eigvals_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::linalg_eigvals_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_linalg_eigh(Tensor A, str UPLO="L", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors) + inline ::std::tuple _linalg_eigh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true) { + return at::_ops::_linalg_eigh::redispatch(dispatchKeySet, A, UPLO, compute_v); + } + + // aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple _linalg_eigh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true) { + return at::_ops::_linalg_eigh_eigenvalues::redispatch(dispatchKeySet, A, UPLO, compute_v, eigenvalues, eigenvectors); + } + + // aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO="L", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple _linalg_eigh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors) { + return at::_ops::_linalg_eigh_eigenvalues::redispatch(dispatchKeySet, A, UPLO, compute_v, eigenvalues, eigenvectors); + } + + // aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors) + inline ::std::tuple linalg_eigh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigh::redispatch(dispatchKeySet, self, UPLO); + } + + // aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple linalg_eigh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & eigvals, at::Tensor & eigvecs, const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigh_eigvals::redispatch(dispatchKeySet, self, UPLO, eigvals, eigvecs); + } + + // aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + inline ::std::tuple linalg_eigh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & eigvals, at::Tensor & eigvecs) { + return at::_ops::linalg_eigh_eigvals::redispatch(dispatchKeySet, self, UPLO, eigvals, eigvecs); + } + + // aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> Tensor + inline at::Tensor linalg_eigvalsh(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigvalsh::redispatch(dispatchKeySet, self, UPLO); + } + + // aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_eigvalsh_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::string_view UPLO="L") { + return at::_ops::linalg_eigvalsh_out::redispatch(dispatchKeySet, self, UPLO, out); + } + + // aten::linalg_eigvalsh.out(Tensor self, str UPLO="L", *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_eigvalsh_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view UPLO, at::Tensor & out) { + return at::_ops::linalg_eigvalsh_out::redispatch(dispatchKeySet, self, UPLO, out); + } + + // aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor + inline at::Tensor linalg_householder_product(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau) { + return at::_ops::linalg_householder_product::redispatch(dispatchKeySet, input, tau); + } + + // aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_householder_product_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & tau) { + return at::_ops::linalg_householder_product_out::redispatch(dispatchKeySet, input, tau, out); + } + + // aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_householder_product_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tau, at::Tensor & out) { + return at::_ops::linalg_householder_product_out::redispatch(dispatchKeySet, input, tau, out); + } + + // aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info) + inline ::std::tuple linalg_inv_ex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors=false) { + return at::_ops::linalg_inv_ex::redispatch(dispatchKeySet, A, check_errors); + } + + // aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) + inline ::std::tuple linalg_inv_ex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & inverse, at::Tensor & info, const at::Tensor & A, bool check_errors=false) { + return at::_ops::linalg_inv_ex_inverse::redispatch(dispatchKeySet, A, check_errors, inverse, info); + } + + // aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info) + inline ::std::tuple linalg_inv_ex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool check_errors, at::Tensor & inverse, at::Tensor & info) { + return at::_ops::linalg_inv_ex_inverse::redispatch(dispatchKeySet, A, check_errors, inverse, info); + } + + // aten::linalg_inv(Tensor A) -> Tensor + inline at::Tensor linalg_inv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A) { + return at::_ops::linalg_inv::redispatch(dispatchKeySet, A); + } + + // aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_inv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & A) { + return at::_ops::linalg_inv_out::redispatch(dispatchKeySet, A, out); + } + + // aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_inv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out) { + return at::_ops::linalg_inv_out::redispatch(dispatchKeySet, A, out); + } + + // aten::inverse(Tensor self) -> Tensor + inline at::Tensor inverse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::inverse::redispatch(dispatchKeySet, self); + } + + // aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & inverse_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::inverse_out::redispatch(dispatchKeySet, self, out); + } + + // aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & inverse_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::inverse_out::redispatch(dispatchKeySet, self, out); + } + + // aten::inner(Tensor self, Tensor other) -> Tensor + inline at::Tensor inner(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::inner::redispatch(dispatchKeySet, self, other); + } + + // aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & inner_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::inner_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & inner_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::inner_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::outer(Tensor self, Tensor vec2) -> Tensor + inline at::Tensor outer(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2) { + return at::_ops::outer::redispatch(dispatchKeySet, self, vec2); + } + + // aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & outer_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) { + return at::_ops::outer_out::redispatch(dispatchKeySet, self, vec2, out); + } + + // aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & outer_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) { + return at::_ops::outer_out::redispatch(dispatchKeySet, self, vec2, out); + } + + // aten::ger(Tensor self, Tensor vec2) -> Tensor + inline at::Tensor ger(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2) { + return at::_ops::ger::redispatch(dispatchKeySet, self, vec2); + } + + // aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ger_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) { + return at::_ops::ger_out::redispatch(dispatchKeySet, self, vec2, out); + } + + // aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ger_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) { + return at::_ops::ger_out::redispatch(dispatchKeySet, self, vec2, out); + } + + // aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor linalg_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & ord=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_norm::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype); + } + + // aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor linalg_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_norm_ord_str::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype); + } + + // aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & ord=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_norm_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::linalg_norm_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_norm_ord_str_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::linalg_norm_ord_str_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor linalg_vector_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_vector_norm::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype); + } + + // aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_vector_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & ord=2, at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_vector_norm_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_vector_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::linalg_vector_norm_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor linalg_matrix_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_matrix_norm::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype); + } + + // aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_matrix_norm_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::linalg_matrix_norm_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + inline at::Tensor linalg_matrix_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_matrix_norm_str_ord::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype); + } + + // aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::string_view ord="fro", at::IntArrayRef dim={-2,-1}, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::linalg_matrix_norm_str_ord_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::linalg_matrix_norm_str_ord_out::redispatch(dispatchKeySet, self, ord, dim, keepdim, dtype, out); + } + + // aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh) + inline ::std::tuple _linalg_svd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, c10::optional driver=c10::nullopt) { + return at::_ops::_linalg_svd::redispatch(dispatchKeySet, A, full_matrices, compute_uv, driver); + } + + // aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + inline ::std::tuple _linalg_svd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, c10::optional driver=c10::nullopt) { + return at::_ops::_linalg_svd_U::redispatch(dispatchKeySet, A, full_matrices, compute_uv, driver, U, S, Vh); + } + + // aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + inline ::std::tuple _linalg_svd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) { + return at::_ops::_linalg_svd_U::redispatch(dispatchKeySet, A, full_matrices, compute_uv, driver, U, S, Vh); + } + + // aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh) + inline ::std::tuple linalg_svd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices=true, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_svd::redispatch(dispatchKeySet, A, full_matrices, driver); + } + + // aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + inline ::std::tuple linalg_svd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=true, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_svd_U::redispatch(dispatchKeySet, A, full_matrices, driver, U, S, Vh); + } + + // aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) + inline ::std::tuple linalg_svd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, bool full_matrices, c10::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) { + return at::_ops::linalg_svd_U::redispatch(dispatchKeySet, A, full_matrices, driver, U, S, Vh); + } + + // aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor + inline at::Tensor linalg_svdvals(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_svdvals::redispatch(dispatchKeySet, A, driver); + } + + // aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_svdvals_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & A, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_svdvals_out::redispatch(dispatchKeySet, A, driver, out); + } + + // aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_svdvals_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::optional driver, at::Tensor & out) { + return at::_ops::linalg_svdvals_out::redispatch(dispatchKeySet, A, driver, out); + } + + // aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor + inline at::Tensor linalg_cond(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p=c10::nullopt) { + return at::_ops::linalg_cond::redispatch(dispatchKeySet, self, p); + } + + // aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cond_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p=c10::nullopt) { + return at::_ops::linalg_cond_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cond_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::Tensor & out) { + return at::_ops::linalg_cond_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::linalg_cond.p_str(Tensor self, str p) -> Tensor + inline at::Tensor linalg_cond(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p) { + return at::_ops::linalg_cond_p_str::redispatch(dispatchKeySet, self, p); + } + + // aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cond_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::string_view p) { + return at::_ops::linalg_cond_p_str_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_cond_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::string_view p, at::Tensor & out) { + return at::_ops::linalg_cond_p_str_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor + inline at::Tensor linalg_pinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & atol={}, const c10::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_tensor::redispatch(dispatchKeySet, self, atol, rtol, hermitian); + } + + // aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & atol={}, const c10::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_tensor_out::redispatch(dispatchKeySet, self, atol, rtol, hermitian, out); + } + + // aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & atol, const c10::optional & rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_atol_rtol_tensor_out::redispatch(dispatchKeySet, self, atol, rtol, hermitian, out); + } + + // aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor + inline at::Tensor linalg_pinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_float::redispatch(dispatchKeySet, self, atol, rtol, hermitian); + } + + // aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian=false) { + return at::_ops::linalg_pinv_atol_rtol_float_out::redispatch(dispatchKeySet, self, atol, rtol, hermitian, out); + } + + // aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_atol_rtol_float_out::redispatch(dispatchKeySet, self, atol, rtol, hermitian, out); + } + + // aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor + inline at::Tensor linalg_pinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond, bool hermitian=false) { + return at::_ops::linalg_pinv::redispatch(dispatchKeySet, self, rcond, hermitian); + } + + // aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor + inline at::Tensor linalg_pinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false) { + return at::_ops::linalg_pinv_rcond_tensor::redispatch(dispatchKeySet, self, rcond, hermitian); + } + + // aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double rcond, bool hermitian=false) { + return at::_ops::linalg_pinv_out::redispatch(dispatchKeySet, self, rcond, hermitian, out); + } + + // aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_out::redispatch(dispatchKeySet, self, rcond, hermitian, out); + } + + // aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false) { + return at::_ops::linalg_pinv_out_rcond_tensor::redispatch(dispatchKeySet, self, rcond, hermitian, out); + } + + // aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_pinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_pinv_out_rcond_tensor::redispatch(dispatchKeySet, self, rcond, hermitian, out); + } + + // aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info) + inline ::std::tuple _linalg_solve_ex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) { + return at::_ops::_linalg_solve_ex::redispatch(dispatchKeySet, A, B, left, check_errors); + } + + // aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) + inline ::std::tuple _linalg_solve_ex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) { + return at::_ops::_linalg_solve_ex_result::redispatch(dispatchKeySet, A, B, left, check_errors, result, LU, pivots, info); + } + + // aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) + inline ::std::tuple _linalg_solve_ex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & LU, at::Tensor & pivots, at::Tensor & info) { + return at::_ops::_linalg_solve_ex_result::redispatch(dispatchKeySet, A, B, left, check_errors, result, LU, pivots, info); + } + + // aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info) + inline ::std::tuple linalg_solve_ex(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) { + return at::_ops::linalg_solve_ex::redispatch(dispatchKeySet, A, B, left, check_errors); + } + + // aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) + inline ::std::tuple linalg_solve_ex_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & result, at::Tensor & info, const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false) { + return at::_ops::linalg_solve_ex_out::redispatch(dispatchKeySet, A, B, left, check_errors, result, info); + } + + // aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info) + inline ::std::tuple linalg_solve_ex_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, at::Tensor & result, at::Tensor & info) { + return at::_ops::linalg_solve_ex_out::redispatch(dispatchKeySet, A, B, left, check_errors, result, info); + } + + // aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor + inline at::Tensor linalg_solve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left=true) { + return at::_ops::linalg_solve::redispatch(dispatchKeySet, A, B, left); + } + + // aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_solve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & A, const at::Tensor & B, bool left=true) { + return at::_ops::linalg_solve_out::redispatch(dispatchKeySet, A, B, left, out); + } + + // aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_solve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, const at::Tensor & B, bool left, at::Tensor & out) { + return at::_ops::linalg_solve_out::redispatch(dispatchKeySet, A, B, left, out); + } + + // aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor + inline at::Tensor linalg_tensorinv(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind=2) { + return at::_ops::linalg_tensorinv::redispatch(dispatchKeySet, self, ind); + } + + // aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_tensorinv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t ind=2) { + return at::_ops::linalg_tensorinv_out::redispatch(dispatchKeySet, self, ind, out); + } + + // aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_tensorinv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t ind, at::Tensor & out) { + return at::_ops::linalg_tensorinv_out::redispatch(dispatchKeySet, self, ind, out); + } + + // aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor + inline at::Tensor linalg_tensorsolve(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims=c10::nullopt) { + return at::_ops::linalg_tensorsolve::redispatch(dispatchKeySet, self, other, dims); + } + + // aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_tensorsolve_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims=c10::nullopt) { + return at::_ops::linalg_tensorsolve_out::redispatch(dispatchKeySet, self, other, dims, out); + } + + // aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_tensorsolve_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims, at::Tensor & out) { + return at::_ops::linalg_tensorsolve_out::redispatch(dispatchKeySet, self, other, dims, out); + } + + // aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R) + inline ::std::tuple linalg_qr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view mode="reduced") { + return at::_ops::linalg_qr::redispatch(dispatchKeySet, A, mode); + } + + // aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + inline ::std::tuple linalg_qr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & Q, at::Tensor & R, const at::Tensor & A, c10::string_view mode="reduced") { + return at::_ops::linalg_qr_out::redispatch(dispatchKeySet, A, mode, Q, R); + } + + // aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + inline ::std::tuple linalg_qr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, c10::string_view mode, at::Tensor & Q, at::Tensor & R) { + return at::_ops::linalg_qr_out::redispatch(dispatchKeySet, A, mode, Q, R); + } + + // aten::linalg_matrix_power(Tensor self, int n) -> Tensor + inline at::Tensor linalg_matrix_power(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n) { + return at::_ops::linalg_matrix_power::redispatch(dispatchKeySet, self, n); + } + + // aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_power_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t n) { + return at::_ops::linalg_matrix_power_out::redispatch(dispatchKeySet, self, n, out); + } + + // aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_power_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t n, at::Tensor & out) { + return at::_ops::linalg_matrix_power_out::redispatch(dispatchKeySet, self, n, out); + } + + // aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor + inline at::Tensor linalg_matrix_rank(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & atol={}, const c10::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_atol_rtol_tensor::redispatch(dispatchKeySet, input, atol, rtol, hermitian); + } + + // aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const c10::optional & atol={}, const c10::optional & rtol={}, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_atol_rtol_tensor_out::redispatch(dispatchKeySet, input, atol, rtol, hermitian, out); + } + + // aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & atol, const c10::optional & rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_matrix_rank_atol_rtol_tensor_out::redispatch(dispatchKeySet, input, atol, rtol, hermitian, out); + } + + // aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor + inline at::Tensor linalg_matrix_rank(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_atol_rtol_float::redispatch(dispatchKeySet, self, atol, rtol, hermitian); + } + + // aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_atol_rtol_float_out::redispatch(dispatchKeySet, self, atol, rtol, hermitian, out); + } + + // aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_matrix_rank_atol_rtol_float_out::redispatch(dispatchKeySet, self, atol, rtol, hermitian, out); + } + + // aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor + inline at::Tensor linalg_matrix_rank(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank::redispatch(dispatchKeySet, self, tol, hermitian); + } + + // aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double tol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_out::redispatch(dispatchKeySet, self, tol, hermitian, out); + } + + // aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double tol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_matrix_rank_out::redispatch(dispatchKeySet, self, tol, hermitian, out); + } + + // aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor + inline at::Tensor linalg_matrix_rank(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_tol_tensor::redispatch(dispatchKeySet, input, tol, hermitian); + } + + // aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & tol, bool hermitian=false) { + return at::_ops::linalg_matrix_rank_out_tol_tensor::redispatch(dispatchKeySet, input, tol, hermitian, out); + } + + // aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_rank_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & tol, bool hermitian, at::Tensor & out) { + return at::_ops::linalg_matrix_rank_out_tol_tensor::redispatch(dispatchKeySet, input, tol, hermitian, out); + } + + // aten::linalg_multi_dot(Tensor[] tensors) -> Tensor + inline at::Tensor linalg_multi_dot(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::linalg_multi_dot::redispatch(dispatchKeySet, tensors); + } + + // aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_multi_dot_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::linalg_multi_dot_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_multi_dot_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::linalg_multi_dot_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor + inline at::Tensor nested_to_padded_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt) { + return at::_ops::nested_to_padded_tensor::redispatch(dispatchKeySet, self, padding, output_size); + } + + // aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor + inline at::Tensor _test_serialization_subcmul(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_test_serialization_subcmul::redispatch(dispatchKeySet, self, other, alpha); + } + + // aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor + inline at::Tensor _test_optional_intlist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends) { + return at::_ops::_test_optional_intlist::redispatch(dispatchKeySet, values, addends); + } + + // aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor + inline at::Tensor _test_optional_filled_intlist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends) { + return at::_ops::_test_optional_filled_intlist::redispatch(dispatchKeySet, values, addends); + } + + // aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor + inline at::Tensor _test_optional_floatlist(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, c10::optional> addends) { + return at::_ops::_test_optional_floatlist::redispatch(dispatchKeySet, values, addends); + } + + // aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor + inline at::Tensor _test_string_default(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, c10::string_view a="\"'\\", c10::string_view b="\"'\\") { + return at::_ops::_test_string_default::redispatch(dispatchKeySet, dummy, a, b); + } + + // aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor + inline at::Tensor _test_ambiguous_defaults(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, int64_t a=1, int64_t b=1) { + return at::_ops::_test_ambiguous_defaults_a::redispatch(dispatchKeySet, dummy, a, b); + } + + // aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b="2") -> Tensor + inline at::Tensor _test_ambiguous_defaults(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, int64_t a, c10::string_view b) { + return at::_ops::_test_ambiguous_defaults_b::redispatch(dispatchKeySet, dummy, a, b); + } + + // aten::_test_warn_in_autograd(Tensor self) -> Tensor + inline at::Tensor _test_warn_in_autograd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_test_warn_in_autograd::redispatch(dispatchKeySet, self); + } + + // aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor + inline at::Tensor _test_autograd_multiple_dispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage::redispatch(dispatchKeySet, self); + } + + // aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor + inline at::Tensor _test_autograd_multiple_dispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool b) { + return at::_ops::_test_autograd_multiple_dispatch_ntonly::redispatch(dispatchKeySet, self, b); + } + + // aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a) + inline at::Tensor _test_autograd_multiple_dispatch_view(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_view::redispatch(dispatchKeySet, self); + } + + // aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor + inline at::Tensor _test_autograd_multiple_dispatch_view_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_view_copy::redispatch(dispatchKeySet, self); + } + + // aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor + inline at::Tensor segment_reduce(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & indices={}, const c10::optional & offsets={}, int64_t axis=0, bool unsafe=false, const c10::optional & initial=c10::nullopt) { + return at::_ops::segment_reduce::redispatch(dispatchKeySet, data, reduce, lengths, indices, offsets, axis, unsafe, initial); + } + + // aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor + inline at::Tensor _segment_reduce_backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & offsets={}, int64_t axis=0, const c10::optional & initial=c10::nullopt) { + return at::_ops::_segment_reduce_backward::redispatch(dispatchKeySet, grad, output, data, reduce, lengths, offsets, axis, initial); + } + + // aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor + inline at::Tensor pad_sequence(c10::DispatchKeySet dispatchKeySet, at::TensorList sequences, bool batch_first=false, double padding_value=0.0) { + return at::_ops::pad_sequence::redispatch(dispatchKeySet, sequences, batch_first, padding_value); + } + + // aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor + inline at::Tensor flatten_dense_tensors(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors) { + return at::_ops::flatten_dense_tensors::redispatch(dispatchKeySet, tensors); + } + + // aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[] + inline ::std::vector unflatten_dense_tensors(c10::DispatchKeySet dispatchKeySet, const at::Tensor & flat, at::TensorList tensors) { + return at::_ops::unflatten_dense_tensors::redispatch(dispatchKeySet, flat, tensors); + } + + // aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + inline at::Tensor _nested_tensor_from_tensor_list(c10::DispatchKeySet dispatchKeySet, at::TensorList list, c10::optional dtype=c10::nullopt, c10::optional layout=c10::nullopt, c10::optional device=c10::nullopt, c10::optional pin_memory=c10::nullopt) { + return at::_ops::_nested_tensor_from_tensor_list::redispatch(dispatchKeySet, list, dtype, layout, device, pin_memory); + } + + // aten::_fw_primal_copy(Tensor self, int level) -> Tensor + inline at::Tensor _fw_primal_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level) { + return at::_ops::_fw_primal_copy::redispatch(dispatchKeySet, self, level); + } + + // aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor + inline at::Tensor _make_dual_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { + return at::_ops::_make_dual_copy::redispatch(dispatchKeySet, primal, tangent, level); + } + + // aten::view_as_real_copy(Tensor self) -> Tensor + inline at::Tensor view_as_real_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::view_as_real_copy::redispatch(dispatchKeySet, self); + } + + // aten::view_as_complex_copy(Tensor self) -> Tensor + inline at::Tensor view_as_complex_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::view_as_complex_copy::redispatch(dispatchKeySet, self); + } + + // aten::_conj_copy(Tensor self) -> Tensor + inline at::Tensor _conj_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_conj_copy::redispatch(dispatchKeySet, self); + } + + // aten::_neg_view_copy(Tensor self) -> Tensor + inline at::Tensor _neg_view_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_neg_view_copy::redispatch(dispatchKeySet, self); + } + + // aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor + inline at::Tensor as_strided_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); + } + + // aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor + inline at::Tensor as_strided_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy::redispatch(dispatchKeySet, self, size, stride, storage_offset); + } + + // aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor + inline at::Tensor _sparse_broadcast_to_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_sparse_broadcast_to_copy::redispatch(dispatchKeySet, self, size); + } + + // aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor + inline at::Tensor diagonal_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) { + return at::_ops::diagonal_copy::redispatch(dispatchKeySet, self, offset, dim1, dim2); + } + + // aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor + inline at::Tensor expand_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, bool implicit=false) { + return at::_ops::expand_copy::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), implicit); + } + + // aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor + inline at::Tensor expand_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit=false) { + return at::_ops::expand_copy::redispatch(dispatchKeySet, self, size, implicit); + } + + // aten::permute_copy(Tensor self, int[] dims) -> Tensor + inline at::Tensor permute_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::permute_copy::redispatch(dispatchKeySet, self, dims); + } + + // aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor + inline at::Tensor _reshape_alias_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::_reshape_alias_copy::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride)); + } + + // aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor + inline at::Tensor _reshape_alias_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::_reshape_alias_copy::redispatch(dispatchKeySet, self, size, stride); + } + + // aten::select_copy.int(Tensor self, int dim, int index) -> Tensor + inline at::Tensor select_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, int64_t index) { + return at::_ops::select_copy_int::redispatch(dispatchKeySet, self, dim, index); + } + + // aten::detach_copy(Tensor self) -> Tensor + inline at::Tensor detach_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::detach_copy::redispatch(dispatchKeySet, self); + } + + // aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + inline at::Tensor slice_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_copy_Tensor::redispatch(dispatchKeySet, self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); + } + + // aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor + inline at::Tensor slice_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_copy_Tensor::redispatch(dispatchKeySet, self, dim, start, end, step); + } + + // aten::split_copy.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[] + inline ::std::vector split_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor::redispatch(dispatchKeySet, self, split_size, dim); + } + + // aten::split_with_sizes_copy(Tensor self, int[] split_sizes, int dim=0) -> Tensor[] + inline ::std::vector split_with_sizes_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy::redispatch(dispatchKeySet, self, split_sizes, dim); + } + + // aten::squeeze_copy(Tensor self) -> Tensor + inline at::Tensor squeeze_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::squeeze_copy::redispatch(dispatchKeySet, self); + } + + // aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor + inline at::Tensor squeeze_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::squeeze_copy_dim::redispatch(dispatchKeySet, self, dim); + } + + // aten::t_copy(Tensor self) -> Tensor + inline at::Tensor t_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::t_copy::redispatch(dispatchKeySet, self); + } + + // aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor + inline at::Tensor transpose_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_copy_int::redispatch(dispatchKeySet, self, dim0, dim1); + } + + // aten::unsqueeze_copy(Tensor self, int dim) -> Tensor + inline at::Tensor unsqueeze_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim) { + return at::_ops::unsqueeze_copy::redispatch(dispatchKeySet, self, dim); + } + + // aten::_indices_copy(Tensor self) -> Tensor + inline at::Tensor _indices_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_indices_copy::redispatch(dispatchKeySet, self); + } + + // aten::_values_copy(Tensor self) -> Tensor + inline at::Tensor _values_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::_values_copy::redispatch(dispatchKeySet, self); + } + + // aten::indices_copy(Tensor self) -> Tensor + inline at::Tensor indices_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::indices_copy::redispatch(dispatchKeySet, self); + } + + // aten::values_copy(Tensor self) -> Tensor + inline at::Tensor values_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::values_copy::redispatch(dispatchKeySet, self); + } + + // aten::crow_indices_copy(Tensor self) -> Tensor + inline at::Tensor crow_indices_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::crow_indices_copy::redispatch(dispatchKeySet, self); + } + + // aten::col_indices_copy(Tensor self) -> Tensor + inline at::Tensor col_indices_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::col_indices_copy::redispatch(dispatchKeySet, self); + } + + // aten::ccol_indices_copy(Tensor self) -> Tensor + inline at::Tensor ccol_indices_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::ccol_indices_copy::redispatch(dispatchKeySet, self); + } + + // aten::row_indices_copy(Tensor self) -> Tensor + inline at::Tensor row_indices_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::row_indices_copy::redispatch(dispatchKeySet, self); + } + + // aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[] + inline ::std::vector unbind_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim=0) { + return at::_ops::unbind_copy_int::redispatch(dispatchKeySet, self, dim); + } + + // aten::view_copy(Tensor self, SymInt[] size) -> Tensor + inline at::Tensor view_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::view_copy::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size)); + } + + // aten::view_copy(Tensor self, SymInt[] size) -> Tensor + inline at::Tensor view_copy_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::view_copy::redispatch(dispatchKeySet, self, size); + } + + // aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor + inline at::Tensor view_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype) { + return at::_ops::view_copy_dtype::redispatch(dispatchKeySet, self, dtype); + } + + // aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor + inline at::Tensor unfold_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { + return at::_ops::unfold_copy::redispatch(dispatchKeySet, self, dimension, size, step); + } + + // aten::alias_copy(Tensor self) -> Tensor + inline at::Tensor alias_copy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::alias_copy::redispatch(dispatchKeySet, self); + } + + // aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fw_primal_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t level) { + return at::_ops::_fw_primal_copy_out::redispatch(dispatchKeySet, self, level, out); + } + + // aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fw_primal_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level, at::Tensor & out) { + return at::_ops::_fw_primal_copy_out::redispatch(dispatchKeySet, self, level, out); + } + + // aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _make_dual_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) { + return at::_ops::_make_dual_copy_out::redispatch(dispatchKeySet, primal, tangent, level, out); + } + + // aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _make_dual_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out) { + return at::_ops::_make_dual_copy_out::redispatch(dispatchKeySet, primal, tangent, level, out); + } + + // aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_as_real_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::view_as_real_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_as_real_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::view_as_real_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_as_complex_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::view_as_complex_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_as_complex_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::view_as_complex_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _conj_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_conj_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _conj_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_conj_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _neg_view_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_neg_view_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _neg_view_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_neg_view_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); + } + + // aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); + } + + // aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_copy_out::redispatch(dispatchKeySet, self, size, stride, storage_offset, out); + } + + // aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_copy_out::redispatch(dispatchKeySet, self, size, stride, storage_offset, out); + } + + // aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_broadcast_to_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_sparse_broadcast_to_copy_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_broadcast_to_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_sparse_broadcast_to_copy_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) { + return at::_ops::diagonal_copy_out::redispatch(dispatchKeySet, self, offset, dim1, dim2, out); + } + + // aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_copy_out::redispatch(dispatchKeySet, self, offset, dim1, dim2, out); + } + + // aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & expand_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, bool implicit=false) { + return at::_ops::expand_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), implicit, out); + } + + // aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & expand_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, bool implicit, at::Tensor & out) { + return at::_ops::expand_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), implicit, out); + } + + // aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & expand_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit=false) { + return at::_ops::expand_copy_out::redispatch(dispatchKeySet, self, size, implicit, out); + } + + // aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & expand_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit, at::Tensor & out) { + return at::_ops::expand_copy_out::redispatch(dispatchKeySet, self, size, implicit, out); + } + + // aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & permute_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::permute_copy_out::redispatch(dispatchKeySet, self, dims, out); + } + + // aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & permute_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::permute_copy_out::redispatch(dispatchKeySet, self, dims, out); + } + + // aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _reshape_alias_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::_reshape_alias_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), out); + } + + // aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _reshape_alias_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::_reshape_alias_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), out); + } + + // aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _reshape_alias_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::_reshape_alias_copy_out::redispatch(dispatchKeySet, self, size, stride, out); + } + + // aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _reshape_alias_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::_reshape_alias_copy_out::redispatch(dispatchKeySet, self, size, stride, out); + } + + // aten::select_copy.int_out(Tensor self, int dim, int index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, int64_t index) { + return at::_ops::select_copy_int_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::select_copy.int_out(Tensor self, int dim, int index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, int64_t index, at::Tensor & out) { + return at::_ops::select_copy_int_out::redispatch(dispatchKeySet, self, dim, index, out); + } + + // aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & detach_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::detach_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & detach_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::detach_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_copy_Tensor_out::redispatch(dispatchKeySet, self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); + } + + // aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, int64_t step, at::Tensor & out) { + return at::_ops::slice_copy_Tensor_out::redispatch(dispatchKeySet, self, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); + } + + // aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_copy_Tensor_out::redispatch(dispatchKeySet, self, dim, start, end, step, out); + } + + // aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, at::Tensor & out) { + return at::_ops::slice_copy_Tensor_out::redispatch(dispatchKeySet, self, dim, start, end, step, out); + } + + // aten::split_copy.Tensor_out(Tensor self, int split_size, int dim=0, *, Tensor(a!)[] out) -> () + inline void split_copy_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::split_copy_Tensor_out::redispatch(dispatchKeySet, self, split_size, dim, out); + } + + // aten::split_copy.Tensor_out(Tensor self, int split_size, int dim=0, *, Tensor(a!)[] out) -> () + inline void split_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) { + return at::_ops::split_copy_Tensor_out::redispatch(dispatchKeySet, self, split_size, dim, out); + } + + // aten::split_with_sizes_copy.out(Tensor self, int[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + inline void split_with_sizes_copy_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy_out::redispatch(dispatchKeySet, self, split_sizes, dim, out); + } + + // aten::split_with_sizes_copy.out(Tensor self, int[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + inline void split_with_sizes_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::split_with_sizes_copy_out::redispatch(dispatchKeySet, self, split_sizes, dim, out); + } + + // aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & squeeze_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::squeeze_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & squeeze_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::squeeze_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & squeeze_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::squeeze_copy_dim_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & squeeze_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::squeeze_copy_dim_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & t_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::t_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & t_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::t_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & transpose_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_copy_int_out::redispatch(dispatchKeySet, self, dim0, dim1, out); + } + + // aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & transpose_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { + return at::_ops::transpose_copy_int_out::redispatch(dispatchKeySet, self, dim0, dim1, out); + } + + // aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unsqueeze_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::unsqueeze_copy_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unsqueeze_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::unsqueeze_copy_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _indices_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _indices_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _values_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_values_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _values_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_values_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & indices_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & indices_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & values_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::values_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & values_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::values_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & crow_indices_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::crow_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & crow_indices_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::crow_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & col_indices_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::col_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & col_indices_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::col_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () + inline void unbind_copy_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, int64_t dim=0) { + return at::_ops::unbind_copy_int_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> () + inline void unbind_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, at::TensorList out) { + return at::_ops::unbind_copy_int_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::view_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), out); + } + + // aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::view_copy_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), out); + } + + // aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_copy_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::view_copy_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_copy_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::view_copy_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::ScalarType dtype) { + return at::_ops::view_copy_dtype_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & view_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::view_copy_dtype_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unfold_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) { + return at::_ops::unfold_copy_out::redispatch(dispatchKeySet, self, dimension, size, step, out); + } + + // aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unfold_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dimension, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_copy_out::redispatch(dispatchKeySet, self, dimension, size, step, out); + } + + // aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & alias_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::alias_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & alias_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::alias_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor + inline at::Tensor to_padded_tensor(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt) { + return at::_ops::to_padded_tensor::redispatch(dispatchKeySet, self, padding, output_size); + } + + // aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor + inline at::Tensor _nested_tensor_softmax_with_shape(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & query) { + return at::_ops::_nested_tensor_softmax_with_shape::redispatch(dispatchKeySet, self, query); + } + + // aten::_nested_tensor_layer_norm(Tensor self, Tensor? weight, Tensor? bias, float eps) -> Tensor + inline at::Tensor _nested_tensor_layer_norm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & weight, const c10::optional & bias, double eps) { + return at::_ops::_nested_tensor_layer_norm::redispatch(dispatchKeySet, self, weight, bias, eps); + } + + // aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor + inline at::Tensor _transformer_encoder_layer_fwd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask={}, c10::optional mask_type=c10::nullopt) { + return at::_ops::_transformer_encoder_layer_fwd::redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type); + } + + // aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor) + inline ::std::tuple _native_multi_head_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask={}, bool need_weights=true, bool average_attn_weights=true, c10::optional mask_type=c10::nullopt) { + return at::_ops::_native_multi_head_attention::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type); + } + + // aten::_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor) + inline ::std::tuple _scaled_dot_product_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool need_attn_weights=false, bool is_causal=false) { + return at::_ops::_scaled_dot_product_attention::redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal); + } + + // aten::_scaled_dot_product_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor) + inline ::std::tuple _scaled_dot_product_attention_forward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool need_attn_weights=false, bool is_causal=false) { + return at::_ops::_scaled_dot_product_attention_forward::redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal); + } + + // aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor) + inline ::std::tuple _scaled_dot_product_attention_math(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool need_attn_weights=false, bool is_causal=false) { + return at::_ops::_scaled_dot_product_attention_math::redispatch(dispatchKeySet, query, key, value, attn_mask, dropout_p, need_attn_weights, is_causal); + } + + // aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor + inline at::Tensor _triton_scaled_dot_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p=0.0) { + return at::_ops::_triton_scaled_dot_attention::redispatch(dispatchKeySet, q, k, v, dropout_p); + } + + // aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor + inline at::Tensor _triton_multi_head_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask={}) { + return at::_ops::_triton_multi_head_attention::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask); + } + + // aten::special_airy_ai(Tensor x) -> Tensor + inline at::Tensor special_airy_ai(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) { + return at::_ops::special_airy_ai::redispatch(dispatchKeySet, x); + } + + // aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_airy_ai_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x) { + return at::_ops::special_airy_ai_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_airy_ai_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) { + return at::_ops::special_airy_ai_out::redispatch(dispatchKeySet, x, out); + } + + // aten::_flash_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal) -> Tensor + inline at::Tensor _flash_scaled_dot_product_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal) { + return at::_ops::_flash_scaled_dot_product_attention::redispatch(dispatchKeySet, query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal); + } + + // aten::_transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor) + inline ::std::tuple _transformer_decoder_only_layer_fwd(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask={}, const c10::optional & incr_key={}, const c10::optional & incr_value={}) { + return at::_ops::_transformer_decoder_only_layer_fwd::redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value); + } + + // aten::_native_decoder_only_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True) -> (Tensor, Tensor, Tensor, Tensor) + inline ::std::tuple _native_decoder_only_multi_head_attention(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask={}, const c10::optional & incr_key={}, const c10::optional & incr_value={}, bool need_weights=true, bool average_attn_weights=true) { + return at::_ops::_native_decoder_only_multi_head_attention::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights); + } + + // aten::special_bessel_j0(Tensor self) -> Tensor + inline at::Tensor special_bessel_j0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_bessel_j0::redispatch(dispatchKeySet, self); + } + + // aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_j0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_bessel_j0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_j0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_bessel_j0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_j1(Tensor self) -> Tensor + inline at::Tensor special_bessel_j1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_bessel_j1::redispatch(dispatchKeySet, self); + } + + // aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_j1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_bessel_j1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_j1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_bessel_j1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_y0(Tensor self) -> Tensor + inline at::Tensor special_bessel_y0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_bessel_y0::redispatch(dispatchKeySet, self); + } + + // aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_y0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_bessel_y0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_y0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_bessel_y0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_y1(Tensor self) -> Tensor + inline at::Tensor special_bessel_y1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_bessel_y1::redispatch(dispatchKeySet, self); + } + + // aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_y1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_bessel_y1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_bessel_y1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_bessel_y1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_t(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_t::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_t(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_t_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_t(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_t_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_t_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_t_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_t_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_t_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_t_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_t_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_t_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_t_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_t_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_t_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_t_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_t_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_u(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_u::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_u(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_u_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_u(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_u_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_u_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_u_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_u_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_u_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_u_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_u_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_u_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_u_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_u_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_u_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_u_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_u_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_v(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_v::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_v(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_v_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_v(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_v_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_v_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_v_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_v_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_v_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_v_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_v_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_v_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_v_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_v_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_v_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_v_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_v_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_w(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_w::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_w(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_w_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_chebyshev_polynomial_w(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_w_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_w_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_w_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_w_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_w_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_w_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_chebyshev_polynomial_w_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_w_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_w_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_w_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_chebyshev_polynomial_w_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_chebyshev_polynomial_w_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_chebyshev_polynomial_w_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_hermite_polynomial_h(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_h::redispatch(dispatchKeySet, x, n); + } + + // aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_hermite_polynomial_h(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_h_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_hermite_polynomial_h(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_hermite_polynomial_h_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_h_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_h_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_h_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_h_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_h_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_h_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_h_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_h_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_h_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_hermite_polynomial_h_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_h_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_h_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_hermite_polynomial_he(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_he::redispatch(dispatchKeySet, x, n); + } + + // aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_hermite_polynomial_he(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_he_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_hermite_polynomial_he(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_hermite_polynomial_he_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_he_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_he_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_he_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_he_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_he_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_hermite_polynomial_he_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_he_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_he_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_he_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_hermite_polynomial_he_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_hermite_polynomial_he_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_hermite_polynomial_he_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_laguerre_polynomial_l(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_laguerre_polynomial_l::redispatch(dispatchKeySet, x, n); + } + + // aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_laguerre_polynomial_l(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_laguerre_polynomial_l_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_laguerre_polynomial_l(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_laguerre_polynomial_l_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_laguerre_polynomial_l_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_laguerre_polynomial_l_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_laguerre_polynomial_l_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_laguerre_polynomial_l_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_laguerre_polynomial_l_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_laguerre_polynomial_l_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_laguerre_polynomial_l_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_laguerre_polynomial_l_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_laguerre_polynomial_l_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_laguerre_polynomial_l_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_laguerre_polynomial_l_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_laguerre_polynomial_l_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_legendre_polynomial_p(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_legendre_polynomial_p::redispatch(dispatchKeySet, x, n); + } + + // aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_legendre_polynomial_p(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_legendre_polynomial_p_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_legendre_polynomial_p(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_legendre_polynomial_p_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_legendre_polynomial_p_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_legendre_polynomial_p_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_legendre_polynomial_p_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_legendre_polynomial_p_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_legendre_polynomial_p_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_legendre_polynomial_p_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_legendre_polynomial_p_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_legendre_polynomial_p_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_legendre_polynomial_p_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_legendre_polynomial_p_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_legendre_polynomial_p_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_legendre_polynomial_p_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_modified_bessel_i0(Tensor self) -> Tensor + inline at::Tensor special_modified_bessel_i0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_modified_bessel_i0::redispatch(dispatchKeySet, self); + } + + // aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_i0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_modified_bessel_i0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_i0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_modified_bessel_i0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_i1(Tensor self) -> Tensor + inline at::Tensor special_modified_bessel_i1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_modified_bessel_i1::redispatch(dispatchKeySet, self); + } + + // aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_i1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_modified_bessel_i1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_i1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_modified_bessel_i1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_k0(Tensor self) -> Tensor + inline at::Tensor special_modified_bessel_k0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_modified_bessel_k0::redispatch(dispatchKeySet, self); + } + + // aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_k0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_modified_bessel_k0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_k0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_modified_bessel_k0_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_k1(Tensor self) -> Tensor + inline at::Tensor special_modified_bessel_k1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::special_modified_bessel_k1::redispatch(dispatchKeySet, self); + } + + // aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_k1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_modified_bessel_k1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_modified_bessel_k1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_modified_bessel_k1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor + inline at::Tensor special_scaled_modified_bessel_k0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) { + return at::_ops::special_scaled_modified_bessel_k0::redispatch(dispatchKeySet, x); + } + + // aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_scaled_modified_bessel_k0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x) { + return at::_ops::special_scaled_modified_bessel_k0_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_scaled_modified_bessel_k0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) { + return at::_ops::special_scaled_modified_bessel_k0_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor + inline at::Tensor special_scaled_modified_bessel_k1(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) { + return at::_ops::special_scaled_modified_bessel_k1::redispatch(dispatchKeySet, x); + } + + // aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_scaled_modified_bessel_k1_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x) { + return at::_ops::special_scaled_modified_bessel_k1_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_scaled_modified_bessel_k1_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) { + return at::_ops::special_scaled_modified_bessel_k1_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_t(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_t::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_t(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_t(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_t_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_t_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_t_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_t_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_t_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_t_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_t_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_t_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_u(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_u(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_u(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_u_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_u_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_u_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_u_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_u_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_u_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_u_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_v(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_v(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_v(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_v_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_v_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_v_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_v_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_v_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_v_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_v_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_w(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_w::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_w(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor + inline at::Tensor special_shifted_chebyshev_polynomial_w(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar::redispatch(dispatchKeySet, x, n); + } + + // aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_w_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_w_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_w_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_w_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_w_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & x, const at::Tensor & n) { + return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_w_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & x, const at::Tensor & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_w_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Scalar & n) { + return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_shifted_chebyshev_polynomial_w_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Scalar & n, at::Tensor & out) { + return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar_out::redispatch(dispatchKeySet, x, n, out); + } + + // aten::special_spherical_bessel_j0(Tensor x) -> Tensor + inline at::Tensor special_spherical_bessel_j0(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x) { + return at::_ops::special_spherical_bessel_j0::redispatch(dispatchKeySet, x); + } + + // aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_spherical_bessel_j0_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x) { + return at::_ops::special_spherical_bessel_j0_out::redispatch(dispatchKeySet, x, out); + } + + // aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & special_spherical_bessel_j0_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out) { + return at::_ops::special_spherical_bessel_j0_out::redispatch(dispatchKeySet, x, out); + } + + // aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor + inline at::Tensor _foobar(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true) { + return at::_ops::_foobar::redispatch(dispatchKeySet, self, arg1, arg2, arg3); + } + + // aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> () + inline void _fused_adam_(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } + + // aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _new_zeros_with_same_feature_meta_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0) { + return at::_ops::_new_zeros_with_same_feature_meta_out::redispatch(dispatchKeySet, self, other, self_num_batch_dims, out); + } + + // aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _new_zeros_with_same_feature_meta_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims, at::Tensor & out) { + return at::_ops::_new_zeros_with_same_feature_meta_out::redispatch(dispatchKeySet, self, other, self_num_batch_dims, out); + } + + // aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _cudnn_ctc_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { + return at::_ops::_cudnn_ctc_loss_out::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1); + } + + // aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _cudnn_ctc_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_cudnn_ctc_loss_out::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity, out0, out1); + } + + // aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cudnn_rnn_flatten_weight_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight_out::redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } + + // aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cudnn_rnn_flatten_weight_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { + return at::_ops::_cudnn_rnn_flatten_weight_out::redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } + + // aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cudnn_rnn_flatten_weight_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) { + return at::_ops::_cudnn_rnn_flatten_weight_out::redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } + + // aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cudnn_rnn_flatten_weight_symint_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, at::Tensor & out) { + return at::_ops::_cudnn_rnn_flatten_weight_out::redispatch(dispatchKeySet, weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional, out); + } + + // aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _cudnn_rnn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state) { + return at::_ops::_cudnn_rnn_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRef(batch_sizes), dropout_state, out0, out1, out2, out3, out4); + } + + // aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _cudnn_rnn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { + return at::_ops::_cudnn_rnn_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRef(batch_sizes), dropout_state, out0, out1, out2, out3, out4); + } + + // aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _cudnn_rnn_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state) { + return at::_ops::_cudnn_rnn_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); + } + + // aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _cudnn_rnn_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const at::Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { + return at::_ops::_cudnn_rnn_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); + } + + // aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () + inline void _cudnn_rnn_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRef(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + + // aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () + inline void _cudnn_rnn_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + return at::_ops::_cudnn_rnn_backward_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, c10::fromIntArrayRef(batch_sizes), dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + + // aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () + inline void _cudnn_rnn_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::_cudnn_rnn_backward_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + + // aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () + inline void _cudnn_rnn_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + return at::_ops::_cudnn_rnn_backward_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + + // aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cudnn_init_dropout_state_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, double dropout, bool train, int64_t dropout_seed) { + return at::_ops::_cudnn_init_dropout_state_out::redispatch(dispatchKeySet, dropout, train, dropout_seed, out); + } + + // aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cudnn_init_dropout_state_outf(c10::DispatchKeySet dispatchKeySet, double dropout, bool train, int64_t dropout_seed, at::Tensor & out) { + return at::_ops::_cudnn_init_dropout_state_out::redispatch(dispatchKeySet, dropout, train, dropout_seed, out); + } + + // aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _fused_dropout_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::_fused_dropout_out::redispatch(dispatchKeySet, self, p, generator, out0, out1); + } + + // aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _fused_dropout_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional generator, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_fused_dropout_out::redispatch(dispatchKeySet, self, p, generator, out0, out1); + } + + // aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _masked_scale_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, double scale) { + return at::_ops::_masked_scale_out::redispatch(dispatchKeySet, self, mask, scale, out); + } + + // aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _masked_scale_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, double scale, at::Tensor & out) { + return at::_ops::_masked_scale_out::redispatch(dispatchKeySet, self, mask, scale, out); + } + + // aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple native_dropout_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, double p, c10::optional train) { + return at::_ops::native_dropout_out::redispatch(dispatchKeySet, input, p, train, out0, out1); + } + + // aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple native_dropout_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double p, c10::optional train, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::native_dropout_out::redispatch(dispatchKeySet, input, p, train, out0, out1); + } + + // aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & native_dropout_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & mask, double scale) { + return at::_ops::native_dropout_backward_out::redispatch(dispatchKeySet, grad_output, mask, scale, out); + } + + // aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & native_dropout_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & mask, double scale, at::Tensor & out) { + return at::_ops::native_dropout_backward_out::redispatch(dispatchKeySet, grad_output, mask, scale, out); + } + + // aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _conj_physical_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_conj_physical_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _conj_physical_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_conj_physical_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _add_relu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::_add_relu_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _add_relu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::_add_relu_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & add_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::add_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & add_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::add_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & affine_grid_generator_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & theta, at::IntArrayRef size, bool align_corners) { + return at::_ops::affine_grid_generator_out::redispatch(dispatchKeySet, theta, size, align_corners, out); + } + + // aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & affine_grid_generator_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, at::IntArrayRef size, bool align_corners, at::Tensor & out) { + return at::_ops::affine_grid_generator_out::redispatch(dispatchKeySet, theta, size, align_corners, out); + } + + // aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bartlett_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length) { + return at::_ops::bartlett_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bartlett_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { + return at::_ops::bartlett_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bartlett_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::bartlett_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bartlett_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::bartlett_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_batch_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) { + return at::_ops::quantized_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, mean, var, eps, output_scale, output_zero_point, out); + } + + // aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_batch_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point, at::Tensor & out) { + return at::_ops::quantized_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, mean, var, eps, output_scale, output_zero_point, out); + } + + // aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bernoulli_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & p, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli_Tensor_out::redispatch(dispatchKeySet, self, p, generator, out); + } + + // aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bernoulli_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & p, c10::optional generator, at::Tensor & out) { + return at::_ops::bernoulli_Tensor_out::redispatch(dispatchKeySet, self, p, generator, out); + } + + // aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor + inline at::Tensor bernoulli(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & p, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli_Tensor::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bernoulli_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double p=0.5, c10::optional generator=c10::nullopt) { + return at::_ops::bernoulli_float_out::redispatch(dispatchKeySet, self, p, generator, out); + } + + // aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bernoulli_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional generator, at::Tensor & out) { + return at::_ops::bernoulli_float_out::redispatch(dispatchKeySet, self, p, generator, out); + } + + // aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & binary_cross_entropy_with_logits_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, const c10::optional & pos_weight={}, int64_t reduction=at::Reduction::Mean) { + return at::_ops::binary_cross_entropy_with_logits_out::redispatch(dispatchKeySet, self, target, weight, pos_weight, reduction, out); + } + + // aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & binary_cross_entropy_with_logits_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction, at::Tensor & out) { + return at::_ops::binary_cross_entropy_with_logits_out::redispatch(dispatchKeySet, self, target, weight, pos_weight, reduction, out); + } + + // aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bincount_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & weights={}, int64_t minlength=0) { + return at::_ops::bincount_out::redispatch(dispatchKeySet, self, weights, minlength, out); + } + + // aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bincount_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & weights, int64_t minlength, at::Tensor & out) { + return at::_ops::bincount_out::redispatch(dispatchKeySet, self, weights, minlength, out); + } + + // aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & blackman_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length) { + return at::_ops::blackman_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & blackman_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { + return at::_ops::blackman_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & blackman_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::blackman_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & blackman_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::blackman_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & block_diag_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList tensors) { + return at::_ops::block_diag_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & block_diag_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out) { + return at::_ops::block_diag_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::constant_pad_nd.out(Tensor self, int[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & constant_pad_nd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value=0) { + return at::_ops::constant_pad_nd_out::redispatch(dispatchKeySet, self, pad, value, out); + } + + // aten::constant_pad_nd.out(Tensor self, int[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & constant_pad_nd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value, at::Tensor & out) { + return at::_ops::constant_pad_nd_out::redispatch(dispatchKeySet, self, pad, value, out); + } + + // aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_out::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); + } + + // aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { + return at::_ops::convolution_out::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); + } + + // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward_out::redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRef(*bias_sizes)) : c10::nullopt, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); + } + + // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::convolution_backward_out::redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes.has_value() ? c10::make_optional(c10::fromIntArrayRef(*bias_sizes)) : c10::nullopt, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); + } + + // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward_out::redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); + } + + // aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::convolution_backward_out::redispatch(dispatchKeySet, grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); + } + + // aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & convolution_overrideable_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups) { + return at::_ops::convolution_overrideable_out::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); + } + + // aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & convolution_overrideable_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, at::Tensor & out) { + return at::_ops::convolution_overrideable_out::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, out); + } + + // aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_overrideable_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask) { + return at::_ops::convolution_backward_overrideable_out::redispatch(dispatchKeySet, grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); + } + + // aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple convolution_backward_overrideable_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::convolution_backward_overrideable_out::redispatch(dispatchKeySet, grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask, out0, out1, out2); + } + + // aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) { + return at::_ops::_convolution_out::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out); + } + + // aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, at::Tensor & out) { + return at::_ops::_convolution_out::redispatch(dispatchKeySet, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32, out); + } + + // aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conv_tbc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad=0) { + return at::_ops::conv_tbc_out::redispatch(dispatchKeySet, self, weight, bias, pad, out); + } + + // aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conv_tbc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad, at::Tensor & out) { + return at::_ops::conv_tbc_out::redispatch(dispatchKeySet, self, weight, bias, pad, out); + } + + // aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy_out::redispatch(dispatchKeySet, self, src, non_blocking, out); + } + + // aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { + return at::_ops::copy_out::redispatch(dispatchKeySet, self, src, non_blocking, out); + } + + // aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _copy_from_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & dst, bool non_blocking=false) { + return at::_ops::_copy_from_out::redispatch(dispatchKeySet, self, dst, non_blocking, out); + } + + // aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _copy_from_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out) { + return at::_ops::_copy_from_out::redispatch(dispatchKeySet, self, dst, non_blocking, out); + } + + // aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _copy_from_and_resize_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & dst) { + return at::_ops::_copy_from_and_resize_out::redispatch(dispatchKeySet, self, dst, out); + } + + // aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _copy_from_and_resize_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & dst, at::Tensor & out) { + return at::_ops::_copy_from_and_resize_out::redispatch(dispatchKeySet, self, dst, out); + } + + // aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & count_nonzero_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::count_nonzero_dim_IntList_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & count_nonzero_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { + return at::_ops::count_nonzero_dim_IntList_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & count_nonzero_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dim=c10::nullopt) { + return at::_ops::count_nonzero_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & count_nonzero_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dim, at::Tensor & out) { + return at::_ops::count_nonzero_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_affine_grid_generator_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) { + return at::_ops::cudnn_affine_grid_generator_out::redispatch(dispatchKeySet, theta, N, C, H, W, out); + } + + // aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_affine_grid_generator_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { + return at::_ops::cudnn_affine_grid_generator_out::redispatch(dispatchKeySet, theta, N, C, H, W, out); + } + + // aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_affine_grid_generator_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) { + return at::_ops::cudnn_affine_grid_generator_backward_out::redispatch(dispatchKeySet, grad, N, C, H, W, out); + } + + // aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_affine_grid_generator_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) { + return at::_ops::cudnn_affine_grid_generator_backward_out::redispatch(dispatchKeySet, grad, N, C, H, W, out); + } + + // aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple cudnn_batch_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::cudnn_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3); + } + + // aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple cudnn_batch_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::cudnn_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3); + } + + // aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple cudnn_batch_norm_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const at::Tensor & reserveSpace) { + return at::_ops::cudnn_batch_norm_backward_out::redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2); + } + + // aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple cudnn_batch_norm_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::cudnn_batch_norm_backward_out::redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace, out0, out1, out2); + } + + // aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) { + return at::_ops::cudnn_convolution_out::redispatch(dispatchKeySet, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); + } + + // aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { + return at::_ops::cudnn_convolution_out::redispatch(dispatchKeySet, self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); + } + + // aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_transpose_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32) { + return at::_ops::cudnn_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); + } + + // aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_transpose_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out) { + return at::_ops::cudnn_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32, out); + } + + // aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_convolution_transpose_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, out); + } + + // aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_convolution_transpose_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::_mps_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, padding, output_padding, stride, dilation, groups, out); + } + + // aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple mps_convolution_transpose_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_transpose_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); + } + + // aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple mps_convolution_transpose_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mps_convolution_transpose_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask, out0, out1); + } + + // aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_relu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::cudnn_convolution_relu_out::redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups, out); + } + + // aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_relu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::cudnn_convolution_relu_out::redispatch(dispatchKeySet, self, weight, bias, stride, padding, dilation, groups, out); + } + + // aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_add_relu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::cudnn_convolution_add_relu_out::redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups, out); + } + + // aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_convolution_add_relu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional & alpha, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::cudnn_convolution_add_relu_out::redispatch(dispatchKeySet, self, weight, z, alpha, bias, stride, padding, dilation, groups, out); + } + + // aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_grid_sampler_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & grid) { + return at::_ops::cudnn_grid_sampler_out::redispatch(dispatchKeySet, self, grid, out); + } + + // aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cudnn_grid_sampler_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, at::Tensor & out) { + return at::_ops::cudnn_grid_sampler_out::redispatch(dispatchKeySet, self, grid, out); + } + + // aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple cudnn_grid_sampler_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) { + return at::_ops::cudnn_grid_sampler_backward_out::redispatch(dispatchKeySet, self, grid, grad_output, out0, out1); + } + + // aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple cudnn_grid_sampler_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::cudnn_grid_sampler_backward_out::redispatch(dispatchKeySet, self, grid, grad_output, out0, out1); + } + + // aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _ctc_loss_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false) { + return at::_ops::_ctc_loss_out::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1); + } + + // aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _ctc_loss_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_ctc_loss_out::redispatch(dispatchKeySet, log_probs, targets, input_lengths, target_lengths, blank, zero_infinity, out0, out1); + } + + // aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _ctc_loss_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false) { + return at::_ops::_ctc_loss_backward_out::redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out); + } + + // aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _ctc_loss_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity, at::Tensor & out) { + return at::_ops::_ctc_loss_backward_out::redispatch(dispatchKeySet, grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity, out); + } + + // aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diag_embed_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) { + return at::_ops::diag_embed_out::redispatch(dispatchKeySet, self, offset, dim1, dim2, out); + } + + // aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diag_embed_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diag_embed_out::redispatch(dispatchKeySet, self, offset, dim1, dim2, out); + } + + // aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward_out::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(input_sizes), offset, dim1, dim2, out); + } + + // aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_backward_out::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(input_sizes), offset, dim1, dim2, out); + } + + // aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) { + return at::_ops::diagonal_backward_out::redispatch(dispatchKeySet, grad_output, input_sizes, offset, dim1, dim2, out); + } + + // aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_backward_out::redispatch(dispatchKeySet, grad_output, input_sizes, offset, dim1, dim2, out); + } + + // aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::div_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::div_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode) { + return at::_ops::div_Scalar_mode_out::redispatch(dispatchKeySet, self, other, rounding_mode, out); + } + + // aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & div_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, c10::optional rounding_mode, at::Tensor & out) { + return at::_ops::div_Scalar_mode_out::redispatch(dispatchKeySet, self, other, rounding_mode, out); + } + + // aten::embedding.out(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false) { + return at::_ops::embedding_out::redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse, out); + } + + // aten::embedding.out(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse, at::Tensor & out) { + return at::_ops::embedding_out::redispatch(dispatchKeySet, weight, indices, padding_idx, scale_grad_by_freq, sparse, out); + } + + // aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_dense_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward_out::redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); + } + + // aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_dense_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, at::Tensor & out) { + return at::_ops::embedding_dense_backward_out::redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); + } + + // aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_dense_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, int64_t padding_idx, bool scale_grad_by_freq) { + return at::_ops::embedding_dense_backward_out::redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); + } + + // aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_dense_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, int64_t padding_idx, bool scale_grad_by_freq, at::Tensor & out) { + return at::_ops::embedding_dense_backward_out::redispatch(dispatchKeySet, grad_output, indices, num_weights, padding_idx, scale_grad_by_freq, out); + } + + // aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_renorm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + return at::_ops::embedding_renorm_out::redispatch(dispatchKeySet, self, indices, max_norm, norm_type, out); + } + + // aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & embedding_renorm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type, at::Tensor & out) { + return at::_ops::embedding_renorm_out::redispatch(dispatchKeySet, self, indices, max_norm, norm_type, out); + } + + // aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor + inline at::Tensor embedding_renorm(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) { + return at::_ops::embedding_renorm::redispatch(dispatchKeySet, self, indices, max_norm, norm_type); + } + + // aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple _embedding_bag_forward_only_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_forward_only_out::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); + } + + // aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple _embedding_bag_forward_only_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::_embedding_bag_forward_only_out::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); + } + + // aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple _embedding_bag_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const c10::optional & per_sample_weights={}, bool include_last_offset=false, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_out::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); + } + + // aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple _embedding_bag_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::_embedding_bag_out::redispatch(dispatchKeySet, weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx, out0, out1, out2, out3); + } + + // aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _embedding_bag_dense_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_dense_backward_out::redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out); + } + + // aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _embedding_bag_dense_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx, at::Tensor & out) { + return at::_ops::_embedding_bag_dense_backward_out::redispatch(dispatchKeySet, grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx, out); + } + + // aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _embedding_bag_per_sample_weights_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx=-1) { + return at::_ops::_embedding_bag_per_sample_weights_backward_out::redispatch(dispatchKeySet, grad, weight, indices, offsets, offset2bag, mode, padding_idx, out); + } + + // aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _embedding_bag_per_sample_weights_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx, at::Tensor & out) { + return at::_ops::_embedding_bag_per_sample_weights_backward_out::redispatch(dispatchKeySet, grad, weight, indices, offsets, offset2bag, mode, padding_idx, out); + } + + // aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional names, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_names_out::redispatch(dispatchKeySet, size, names, memory_format, out); + } + + // aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, c10::optional memory_format, at::Tensor & out) { + return at::_ops::empty_names_out::redispatch(dispatchKeySet, size, names, memory_format, out); + } + + // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::new_empty_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), out); + } + + // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::new_empty_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), out); + } + + // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::new_empty_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::new_empty_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_strided_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::new_empty_strided_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), out); + } + + // aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_strided_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::new_empty_strided_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), out); + } + + // aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_strided_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::new_empty_strided_out::redispatch(dispatchKeySet, self, size, stride, out); + } + + // aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_empty_strided_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::new_empty_strided_out::redispatch(dispatchKeySet, self, size, stride, out); + } + + // aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_full_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::new_full_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), fill_value, out); + } + + // aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_full_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::new_full_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), fill_value, out); + } + + // aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_full_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value) { + return at::_ops::new_full_out::redispatch(dispatchKeySet, self, size, fill_value, out); + } + + // aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_full_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) { + return at::_ops::new_full_out::redispatch(dispatchKeySet, self, size, fill_value, out); + } + + // aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_zeros_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::new_zeros_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), out); + } + + // aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_zeros_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::new_zeros_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), out); + } + + // aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_zeros_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::new_zeros_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_zeros_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::new_zeros_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_ones_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::new_ones_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), out); + } + + // aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_ones_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::new_ones_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), out); + } + + // aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_ones_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::new_ones_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & new_ones_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::new_ones_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _empty_affine_quantized_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized_out::redispatch(dispatchKeySet, size, scale, zero_point, memory_format, out); + } + + // aten::_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _empty_affine_quantized_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_affine_quantized_out::redispatch(dispatchKeySet, size, scale, zero_point, memory_format, out); + } + + // aten::_empty_per_channel_affine_quantized.out(int[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _empty_per_channel_affine_quantized_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_per_channel_affine_quantized_out::redispatch(dispatchKeySet, size, scales, zero_points, axis, memory_format, out); + } + + // aten::_empty_per_channel_affine_quantized.out(int[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _empty_per_channel_affine_quantized_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_per_channel_affine_quantized_out::redispatch(dispatchKeySet, size, scales, zero_points, axis, memory_format, out); + } + + // aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), memory_format, out); + } + + // aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), memory_format, out); + } + + // aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_symint_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_out::redispatch(dispatchKeySet, self, size, memory_format, out); + } + + // aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::redispatch(dispatchKeySet, self, size, memory_format, out); + } + + // aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor resize(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), memory_format); + } + + // aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor resize_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize::redispatch(dispatchKeySet, self, size, memory_format); + } + + // aten::_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & _resize_output_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::Device device) { + return at::_ops::_resize_output_out::redispatch(dispatchKeySet, self, size, device, out); + } + + // aten::_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & _resize_output_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Device device, const at::Tensor & out) { + return at::_ops::_resize_output_out::redispatch(dispatchKeySet, self, size, device, out); + } + + // aten::_resize_output(Tensor self, int[] size, Device device) -> Tensor + inline at::Tensor _resize_output(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Device device) { + return at::_ops::_resize_output::redispatch(dispatchKeySet, self, size, device); + } + + // aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_quantized_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, const at::Tensor & qtensor, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_quantized_out::redispatch(dispatchKeySet, size, qtensor, memory_format, out); + } + + // aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_quantized_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Tensor & qtensor, c10::optional memory_format, at::Tensor & out) { + return at::_ops::empty_quantized_out::redispatch(dispatchKeySet, size, qtensor, memory_format, out); + } + + // aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::empty_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::empty_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_strided_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, at::IntArrayRef stride) { + return at::_ops::empty_strided_out::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), out); + } + + // aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_strided_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::empty_strided_out::redispatch(dispatchKeySet, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), out); + } + + // aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_strided_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) { + return at::_ops::empty_strided_out::redispatch(dispatchKeySet, size, stride, out); + } + + // aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & empty_strided_symint_outf(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::empty_strided_out::redispatch(dispatchKeySet, size, stride, out); + } + + // aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fill_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & value) { + return at::_ops::fill_Scalar_out::redispatch(dispatchKeySet, self, value, out); + } + + // aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fill_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & value, at::Tensor & out) { + return at::_ops::fill_Scalar_out::redispatch(dispatchKeySet, self, value, out); + } + + // aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fill_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & value) { + return at::_ops::fill_Tensor_out::redispatch(dispatchKeySet, self, value, out); + } + + // aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & fill_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & value, at::Tensor & out) { + return at::_ops::fill_Tensor_out::redispatch(dispatchKeySet, self, value, out); + } + + // aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names) { + return at::_ops::full_names_out::redispatch(dispatchKeySet, size, fill_value, names, out); + } + + // aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, at::Tensor & out) { + return at::_ops::full_names_out::redispatch(dispatchKeySet, size, fill_value, names, out); + } + + // aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & fill_value, c10::optional memory_format=c10::nullopt) { + return at::_ops::full_like_out::redispatch(dispatchKeySet, self, fill_value, memory_format, out); + } + + // aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & full_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & fill_value, c10::optional memory_format, at::Tensor & out) { + return at::_ops::full_like_out::redispatch(dispatchKeySet, self, fill_value, memory_format, out); + } + + // aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & from_file_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, c10::string_view filename, c10::optional shared=c10::nullopt, c10::optional size=0) { + return at::_ops::from_file_out::redispatch(dispatchKeySet, filename, shared, size, out); + } + + // aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & from_file_outf(c10::DispatchKeySet dispatchKeySet, c10::string_view filename, c10::optional shared, c10::optional size, at::Tensor & out) { + return at::_ops::from_file_out::redispatch(dispatchKeySet, filename, shared, size, out); + } + + // aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & grid_sampler_2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_2d_out::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out); + } + + // aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & grid_sampler_2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { + return at::_ops::grid_sampler_2d_out::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out); + } + + // aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple grid_sampler_2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_2d_backward_out::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); + } + + // aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple grid_sampler_2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::grid_sampler_2d_backward_out::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); + } + + // aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _grid_sampler_2d_cpu_fallback_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::_grid_sampler_2d_cpu_fallback_out::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out); + } + + // aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _grid_sampler_2d_cpu_fallback_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { + return at::_ops::_grid_sampler_2d_cpu_fallback_out::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out); + } + + // aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & grid_sampler_3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::grid_sampler_3d_out::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out); + } + + // aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & grid_sampler_3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out) { + return at::_ops::grid_sampler_3d_out::redispatch(dispatchKeySet, input, grid, interpolation_mode, padding_mode, align_corners, out); + } + + // aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple grid_sampler_3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask) { + return at::_ops::grid_sampler_3d_backward_out::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); + } + + // aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple grid_sampler_3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::grid_sampler_3d_backward_out::redispatch(dispatchKeySet, grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask, out0, out1); + } + + // aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hann_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length) { + return at::_ops::hann_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hann_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { + return at::_ops::hann_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hann_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::hann_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hann_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::hann_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length) { + return at::_ops::hamming_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { + return at::_ops::hamming_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::hamming_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::hamming_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic, double alpha) { + return at::_ops::hamming_window_periodic_alpha_out::redispatch(dispatchKeySet, window_length, periodic, alpha, out); + } + + // aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, at::Tensor & out) { + return at::_ops::hamming_window_periodic_alpha_out::redispatch(dispatchKeySet, window_length, periodic, alpha, out); + } + + // aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic, double alpha, double beta) { + return at::_ops::hamming_window_periodic_alpha_beta_out::redispatch(dispatchKeySet, window_length, periodic, alpha, beta, out); + } + + // aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hamming_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double alpha, double beta, at::Tensor & out) { + return at::_ops::hamming_window_periodic_alpha_beta_out::redispatch(dispatchKeySet, window_length, periodic, alpha, beta, out); + } + + // aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kaiser_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length) { + return at::_ops::kaiser_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kaiser_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out) { + return at::_ops::kaiser_window_out::redispatch(dispatchKeySet, window_length, out); + } + + // aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kaiser_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::kaiser_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kaiser_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::kaiser_window_periodic_out::redispatch(dispatchKeySet, window_length, periodic, out); + } + + // aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kaiser_window_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t window_length, bool periodic, double beta) { + return at::_ops::kaiser_window_beta_out::redispatch(dispatchKeySet, window_length, periodic, beta, out); + } + + // aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & kaiser_window_outf(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, double beta, at::Tensor & out) { + return at::_ops::kaiser_window_beta_out::redispatch(dispatchKeySet, window_length, periodic, beta, out); + } + + // aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps) { + return at::_ops::native_group_norm_out::redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); + } + + // aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, int64_t N, int64_t C, int64_t HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_group_norm_out::redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); + } + + // aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) { + return at::_ops::native_group_norm_out::redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); + } + + // aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_group_norm_out::redispatch(dispatchKeySet, input, weight, bias, N, C, HxW, group, eps, out0, out1, out2); + } + + // aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array output_mask) { + return at::_ops::native_group_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); + } + + // aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_group_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); + } + + // aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask) { + return at::_ops::native_group_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); + } + + // aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_group_norm_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_group_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask, out0, out1, out2); + } + + // aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_put_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false) { + return at::_ops::index_put_out::redispatch(dispatchKeySet, self, indices, values, accumulate, out); + } + + // aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_put_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out) { + return at::_ops::index_put_out::redispatch(dispatchKeySet, self, indices, values, accumulate, out); + } + + // aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _index_put_impl_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) { + return at::_ops::_index_put_impl_out::redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe, out); + } + + // aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _index_put_impl_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, bool unsafe, at::Tensor & out) { + return at::_ops::_index_put_impl_out::redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe, out); + } + + // aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor + inline at::Tensor _index_put_impl(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false) { + return at::_ops::_index_put_impl::redispatch(dispatchKeySet, self, indices, values, accumulate, unsafe); + } + + // aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isnan_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::isnan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isnan_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::isnan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps) { + return at::_ops::native_layer_norm_out::redispatch(dispatchKeySet, input, c10::fromIntArrayRef(normalized_shape), weight, bias, eps, out0, out1, out2); + } + + // aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_out::redispatch(dispatchKeySet, input, c10::fromIntArrayRef(normalized_shape), weight, bias, eps, out0, out1, out2); + } + + // aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps) { + return at::_ops::native_layer_norm_out::redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps, out0, out1, out2); + } + + // aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_out::redispatch(dispatchKeySet, input, normalized_shape, weight, bias, eps, out0, out1, out2); + } + + // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, c10::fromIntArrayRef(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); + } + + // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, c10::fromIntArrayRef(normalized_shape), mean, rstd, weight, bias, output_mask, out0, out1, out2); + } + + // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask) { + return at::_ops::native_layer_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); + } + + // aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_layer_norm_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_layer_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask, out0, out1, out2); + } + + // aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple linear_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + return at::_ops::linear_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, output_mask, out0, out1, out2); + } + + // aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple linear_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::linear_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, output_mask, out0, out1, out2); + } + + // aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_linear_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias={}) { + return at::_ops::mkldnn_linear_out::redispatch(dispatchKeySet, self, weight, bias, out); + } + + // aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_linear_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::Tensor & out) { + return at::_ops::mkldnn_linear_out::redispatch(dispatchKeySet, self, weight, bias, out); + } + + // aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_linear_backward_input_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) { + return at::_ops::mkldnn_linear_backward_input_out::redispatch(dispatchKeySet, input_size, grad_output, weight, out); + } + + // aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_linear_backward_input_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight, at::Tensor & out) { + return at::_ops::mkldnn_linear_backward_input_out::redispatch(dispatchKeySet, input_size, grad_output, weight, out); + } + + // aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple mkldnn_linear_backward_weights_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) { + return at::_ops::mkldnn_linear_backward_weights_out::redispatch(dispatchKeySet, grad_output, input, weight, bias_defined, out0, out1); + } + + // aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple mkldnn_linear_backward_weights_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::mkldnn_linear_backward_weights_out::redispatch(dispatchKeySet, grad_output, input, weight, bias_defined, out0, out1); + } + + // aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple mkldnn_linear_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask) { + return at::_ops::mkldnn_linear_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, output_mask, out0, out1, out2); + } + + // aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple mkldnn_linear_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::mkldnn_linear_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, output_mask, out0, out1, out2); + } + + // aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple matmul_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask) { + return at::_ops::matmul_backward_out::redispatch(dispatchKeySet, grad, self, other, mask, out0, out1); + } + + // aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple matmul_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array mask, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::matmul_backward_out::redispatch(dispatchKeySet, grad, self, other, mask, out0, out1); + } + + // aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _aminmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self) { + return at::_ops::_aminmax_out::redispatch(dispatchKeySet, self, out0, out1); + } + + // aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _aminmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_aminmax_out::redispatch(dispatchKeySet, self, out0, out1); + } + + // aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _aminmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, int64_t dim, bool keepdim=false) { + return at::_ops::_aminmax_dim_out::redispatch(dispatchKeySet, self, dim, keepdim, out0, out1); + } + + // aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _aminmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_aminmax_dim_out::redispatch(dispatchKeySet, self, dim, keepdim, out0, out1); + } + + // aten::_mps_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_max_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::_mps_max_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::_mps_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_max_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::_mps_max_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mps_max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mps_max_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mps_max_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mps_max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mps_max_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::mps_max_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::mkldnn_max_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::mkldnn_max_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool3d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::mkldnn_max_pool3d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::mkldnn_max_pool3d_backward_out::redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_max_pool3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::mkldnn_max_pool3d_backward_out::redispatch(dispatchKeySet, grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_max_pool1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool1d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_max_pool1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::quantized_max_pool1d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_max_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false) { + return at::_ops::quantized_max_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantized_max_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out) { + return at::_ops::quantized_max_pool2d_out::redispatch(dispatchKeySet, self, kernel_size, stride, padding, dilation, ceil_mode, out); + } + + // aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & median_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::median_out::redispatch(dispatchKeySet, self, out); + } + + // aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & median_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::median_out::redispatch(dispatchKeySet, self, out); + } + + // aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanmedian_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::nanmedian_out::redispatch(dispatchKeySet, self, out); + } + + // aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & nanmedian_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::nanmedian_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, out); + } + + // aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mps_convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::_mps_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, out); + } + + // aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple mps_convolution_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask) { + return at::_ops::mps_convolution_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2); + } + + // aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple mps_convolution_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::mps_convolution_backward_out::redispatch(dispatchKeySet, self, grad_output, weight, padding, stride, dilation, groups, output_mask, out0, out1, out2); + } + + // aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::mkldnn_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, out); + } + + // aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::mkldnn_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, out); + } + + // aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple miopen_batch_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::miopen_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2); + } + + // aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple miopen_batch_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::miopen_batch_norm_out::redispatch(dispatchKeySet, input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2); + } + + // aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple miopen_batch_norm_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon) { + return at::_ops::miopen_batch_norm_backward_out::redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2); + } + + // aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple miopen_batch_norm_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::miopen_batch_norm_backward_out::redispatch(dispatchKeySet, input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, out0, out1, out2); + } + + // aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } + + // aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } + + // aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_convolution_transpose_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out); + } + + // aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_convolution_transpose_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_convolution_transpose_out::redispatch(dispatchKeySet, self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic, out); + } + + // aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_depthwise_convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } + + // aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & miopen_depthwise_convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_depthwise_convolution_out::redispatch(dispatchKeySet, self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } + + // aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple miopen_rnn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state) { + return at::_ops::miopen_rnn_out::redispatch(dispatchKeySet, input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); + } + + // aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple miopen_rnn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { + return at::_ops::miopen_rnn_out::redispatch(dispatchKeySet, input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, out0, out1, out2, out3, out4); + } + + // aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () + inline void miopen_rnn_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask) { + return at::_ops::miopen_rnn_backward_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + + // aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> () + inline void miopen_rnn_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) { + return at::_ops::miopen_rnn_backward_out::redispatch(dispatchKeySet, input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3); + } + + // aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_sparse_matmul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::_sparse_sparse_matmul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_sparse_matmul_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::_sparse_sparse_matmul_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_sparse_mask_helper.out(Tensor t, Tensor mask_indices, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_mask_helper_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & t, const at::Tensor & mask_indices) { + return at::_ops::_sparse_mask_helper_out::redispatch(dispatchKeySet, t, mask_indices, out); + } + + // aten::_sparse_mask_helper.out(Tensor t, Tensor mask_indices, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_mask_helper_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask_indices, at::Tensor & out) { + return at::_ops::_sparse_mask_helper_out::redispatch(dispatchKeySet, t, mask_indices, out); + } + + // aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mul_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::mul_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mul_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::mul_Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_stats_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, double eps) { + return at::_ops::batch_norm_stats_out::redispatch(dispatchKeySet, input, eps, out0, out1); + } + + // aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_stats_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, double eps, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::batch_norm_stats_out::redispatch(dispatchKeySet, input, eps, out0, out1); + } + + // aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_gather_stats_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count) { + return at::_ops::batch_norm_gather_stats_out::redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1); + } + + // aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_gather_stats_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::batch_norm_gather_stats_out::redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, count, out0, out1); + } + + // aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_gather_stats_with_counts_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const at::Tensor & counts) { + return at::_ops::batch_norm_gather_stats_with_counts_out::redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1); + } + + // aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_gather_stats_with_counts_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::batch_norm_gather_stats_with_counts_out::redispatch(dispatchKeySet, input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1); + } + + // aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_batch_norm_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask) { + return at::_ops::native_batch_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2); + } + + // aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple native_batch_norm_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::native_batch_norm_backward_out::redispatch(dispatchKeySet, grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask, out0, out1, out2); + } + + // aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple batch_norm_backward_reduce_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g) { + return at::_ops::batch_norm_backward_reduce_out::redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3); + } + + // aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple batch_norm_backward_reduce_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::batch_norm_backward_reduce_out::redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g, out0, out1, out2, out3); + } + + // aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & batch_norm_backward_elemt_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count) { + return at::_ops::batch_norm_backward_elemt_out::redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count, out); + } + + // aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & batch_norm_backward_elemt_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & weight, const at::Tensor & mean_dy, const at::Tensor & mean_dy_xmu, const at::Tensor & count, at::Tensor & out) { + return at::_ops::batch_norm_backward_elemt_out::redispatch(dispatchKeySet, grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu, count, out); + } + + // aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_update_stats_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum) { + return at::_ops::batch_norm_update_stats_out::redispatch(dispatchKeySet, input, running_mean, running_var, momentum, out0, out1); + } + + // aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple batch_norm_update_stats_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::batch_norm_update_stats_out::redispatch(dispatchKeySet, input, running_mean, running_var, momentum, out0, out1); + } + + // aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nnpack_spatial_convolution_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) { + return at::_ops::_nnpack_spatial_convolution_out::redispatch(dispatchKeySet, input, weight, bias, padding, stride, out); + } + + // aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nnpack_spatial_convolution_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::_nnpack_spatial_convolution_out::redispatch(dispatchKeySet, input, weight, bias, padding, stride, out); + } + + // aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional names) { + return at::_ops::ones_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::ones_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::ones_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ones_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::ones_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _euclidean_dist_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x1, const at::Tensor & x2) { + return at::_ops::_euclidean_dist_out::redispatch(dispatchKeySet, x1, x2, out); + } + + // aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _euclidean_dist_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, at::Tensor & out) { + return at::_ops::_euclidean_dist_out::redispatch(dispatchKeySet, x1, x2, out); + } + + // aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cdist_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode) { + return at::_ops::_cdist_forward_out::redispatch(dispatchKeySet, x1, x2, p, compute_mode, out); + } + + // aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cdist_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode, at::Tensor & out) { + return at::_ops::_cdist_forward_out::redispatch(dispatchKeySet, x1, x2, p, compute_mode, out); + } + + // aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cdist_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) { + return at::_ops::_cdist_backward_out::redispatch(dispatchKeySet, grad, x1, x2, p, cdist, out); + } + + // aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cdist_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out) { + return at::_ops::_cdist_backward_out::redispatch(dispatchKeySet, grad, x1, x2, p, cdist, out); + } + + // aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _pdist_forward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double p=2) { + return at::_ops::_pdist_forward_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _pdist_forward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, at::Tensor & out) { + return at::_ops::_pdist_forward_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _pdist_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) { + return at::_ops::_pdist_backward_out::redispatch(dispatchKeySet, grad, self, p, pdist, out); + } + + // aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _pdist_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out) { + return at::_ops::_pdist_backward_out::redispatch(dispatchKeySet, grad, self, p, pdist, out); + } + + // aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pixel_shuffle_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t upscale_factor) { + return at::_ops::pixel_shuffle_out::redispatch(dispatchKeySet, self, upscale_factor, out); + } + + // aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pixel_shuffle_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t upscale_factor, at::Tensor & out) { + return at::_ops::pixel_shuffle_out::redispatch(dispatchKeySet, self, upscale_factor, out); + } + + // aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pixel_unshuffle_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t downscale_factor) { + return at::_ops::pixel_unshuffle_out::redispatch(dispatchKeySet, self, downscale_factor, out); + } + + // aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & pixel_unshuffle_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t downscale_factor, at::Tensor & out) { + return at::_ops::pixel_unshuffle_out::redispatch(dispatchKeySet, self, downscale_factor, out); + } + + // aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & channel_shuffle_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t groups) { + return at::_ops::channel_shuffle_out::redispatch(dispatchKeySet, self, groups, out); + } + + // aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & channel_shuffle_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t groups, at::Tensor & out) { + return at::_ops::channel_shuffle_out::redispatch(dispatchKeySet, self, groups, out); + } + + // aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _pin_memory_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional device=c10::nullopt) { + return at::_ops::_pin_memory_out::redispatch(dispatchKeySet, self, device, out); + } + + // aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _pin_memory_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional device, at::Tensor & out) { + return at::_ops::_pin_memory_out::redispatch(dispatchKeySet, self, device, out); + } + + // aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scalar_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & s) { + return at::_ops::scalar_tensor_out::redispatch(dispatchKeySet, s, out); + } + + // aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & scalar_tensor_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & s, at::Tensor & out) { + return at::_ops::scalar_tensor_out::redispatch(dispatchKeySet, s, out); + } + + // aten::rand.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional names) { + return at::_ops::rand_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::rand.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::rand_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::rand.generator_with_names_out(int[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::rand_generator_with_names_out::redispatch(dispatchKeySet, size, generator, names, out); + } + + // aten::rand.generator_with_names_out(int[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::rand_generator_with_names_out::redispatch(dispatchKeySet, size, generator, names, out); + } + + // aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::rand_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rand_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::rand_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_out::redispatch(dispatchKeySet, self, high, memory_format, out); + } + + // aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_out::redispatch(dispatchKeySet, self, high, memory_format, out); + } + + // aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t low, int64_t high, c10::optional memory_format=c10::nullopt) { + return at::_ops::randint_like_low_dtype_out::redispatch(dispatchKeySet, self, low, high, memory_format, out); + } + + // aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randint_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t low, int64_t high, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randint_like_low_dtype_out::redispatch(dispatchKeySet, self, low, high, memory_format, out); + } + + // aten::randn.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional names) { + return at::_ops::randn_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::randn.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::randn_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::randn.generator_with_names_out(int[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::randn_generator_with_names_out::redispatch(dispatchKeySet, size, generator, names, out); + } + + // aten::randn.generator_with_names_out(int[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::randn_generator_with_names_out::redispatch(dispatchKeySet, size, generator, names, out); + } + + // aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::randn_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & randn_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::randn_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef repeats) { + return at::_ops::repeat_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(repeats), out); + } + + // aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef repeats, at::Tensor & out) { + return at::_ops::repeat_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(repeats), out); + } + + // aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef repeats) { + return at::_ops::repeat_out::redispatch(dispatchKeySet, self, repeats, out); + } + + // aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef repeats, at::Tensor & out) { + return at::_ops::repeat_out::redispatch(dispatchKeySet, self, repeats, out); + } + + // aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_interleave_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & repeats, c10::optional output_size=c10::nullopt) { + return at::_ops::repeat_interleave_Tensor_out::redispatch(dispatchKeySet, repeats, output_size, out); + } + + // aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & repeat_interleave_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & repeats, c10::optional output_size, at::Tensor & out) { + return at::_ops::repeat_interleave_Tensor_out::redispatch(dispatchKeySet, repeats, output_size, out); + } + + // aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mkldnn_reshape_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef shape) { + return at::_ops::_mkldnn_reshape_out::redispatch(dispatchKeySet, self, shape, out); + } + + // aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mkldnn_reshape_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shape, at::Tensor & out) { + return at::_ops::_mkldnn_reshape_out::redispatch(dispatchKeySet, self, shape, out); + } + + // aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & relu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::relu_out::redispatch(dispatchKeySet, self, out); + } + + // aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & relu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::relu_out::redispatch(dispatchKeySet, self, out); + } + + // aten::prelu.out(Tensor self, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prelu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight) { + return at::_ops::prelu_out::redispatch(dispatchKeySet, self, weight, out); + } + + // aten::prelu.out(Tensor self, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prelu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::Tensor & out) { + return at::_ops::prelu_out::redispatch(dispatchKeySet, self, weight, out); + } + + // aten::prelu_backward.out(Tensor grad_output, Tensor self, Tensor weight, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple prelu_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) { + return at::_ops::prelu_backward_out::redispatch(dispatchKeySet, grad_output, self, weight, out0, out1); + } + + // aten::prelu_backward.out(Tensor grad_output, Tensor self, Tensor weight, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple prelu_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::prelu_backward_out::redispatch(dispatchKeySet, grad_output, self, weight, out0, out1); + } + + // aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, int index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) { + return at::_ops::select_backward_out::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(input_sizes), dim, index, out); + } + + // aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, int index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index, at::Tensor & out) { + return at::_ops::select_backward_out::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(input_sizes), dim, index, out); + } + + // aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, int index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t index) { + return at::_ops::select_backward_out::redispatch(dispatchKeySet, grad_output, input_sizes, dim, index, out); + } + + // aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, int index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t index, at::Tensor & out) { + return at::_ops::select_backward_out::redispatch(dispatchKeySet, grad_output, input_sizes, dim, index, out); + } + + // aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & celu_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & alpha=1.0) { + return at::_ops::celu_out::redispatch(dispatchKeySet, self, alpha, out); + } + + // aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & celu_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::celu_out::redispatch(dispatchKeySet, self, alpha, out); + } + + // aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step) { + return at::_ops::slice_backward_out::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(input_sizes), dim, start, end, step, out); + } + + // aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step, at::Tensor & out) { + return at::_ops::slice_backward_out::redispatch(dispatchKeySet, grad_output, c10::fromIntArrayRef(input_sizes), dim, start, end, step, out); + } + + // aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) { + return at::_ops::slice_backward_out::redispatch(dispatchKeySet, grad_output, input_sizes, dim, start, end, step, out); + } + + // aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out) { + return at::_ops::slice_backward_out::redispatch(dispatchKeySet, grad_output, input_sizes, dim, start, end, step, out); + } + + // aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) { + return at::_ops::slice_scatter_out::redispatch(dispatchKeySet, self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); + } + + // aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, int64_t step, at::Tensor & out) { + return at::_ops::slice_scatter_out::redispatch(dispatchKeySet, self, src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step, out); + } + + // aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_scatter_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) { + return at::_ops::slice_scatter_out::redispatch(dispatchKeySet, self, src, dim, start, end, step, out); + } + + // aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slice_scatter_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, at::Tensor & out) { + return at::_ops::slice_scatter_out::redispatch(dispatchKeySet, self, src, dim, start, end, step, out); + } + + // aten::select_scatter.out(Tensor self, Tensor src, int dim, int index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index) { + return at::_ops::select_scatter_out::redispatch(dispatchKeySet, self, src, dim, index, out); + } + + // aten::select_scatter.out(Tensor self, Tensor src, int dim, int index, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & select_scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index, at::Tensor & out) { + return at::_ops::select_scatter_out::redispatch(dispatchKeySet, self, src, dim, index, out); + } + + // aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) { + return at::_ops::diagonal_scatter_out::redispatch(dispatchKeySet, self, src, offset, dim1, dim2, out); + } + + // aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & diagonal_scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out) { + return at::_ops::diagonal_scatter_out::redispatch(dispatchKeySet, self, src, offset, dim1, dim2, out); + } + + // aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter_out::redispatch(dispatchKeySet, self, src, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); + } + + // aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_scatter_out::redispatch(dispatchKeySet, self, src, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt, out); + } + + // aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_scatter_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) { + return at::_ops::as_strided_scatter_out::redispatch(dispatchKeySet, self, src, size, stride, storage_offset, out); + } + + // aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & as_strided_scatter_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, at::Tensor & out) { + return at::_ops::as_strided_scatter_out::redispatch(dispatchKeySet, self, src, size, stride, storage_offset, out); + } + + // aten::unsafe_split.Tensor_out(Tensor self, int split_size, int dim=0, *, Tensor(a!)[] out) -> () + inline void unsafe_split_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, int64_t split_size, int64_t dim=0) { + return at::_ops::unsafe_split_Tensor_out::redispatch(dispatchKeySet, self, split_size, dim, out); + } + + // aten::unsafe_split.Tensor_out(Tensor self, int split_size, int dim=0, *, Tensor(a!)[] out) -> () + inline void unsafe_split_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t split_size, int64_t dim, at::TensorList out) { + return at::_ops::unsafe_split_Tensor_out::redispatch(dispatchKeySet, self, split_size, dim, out); + } + + // aten::unsafe_split_with_sizes.out(Tensor self, int[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + inline void unsafe_split_with_sizes_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::unsafe_split_with_sizes_out::redispatch(dispatchKeySet, self, split_sizes, dim, out); + } + + // aten::unsafe_split_with_sizes.out(Tensor self, int[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () + inline void unsafe_split_with_sizes_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::unsafe_split_with_sizes_out::redispatch(dispatchKeySet, self, split_sizes, dim, out); + } + + // aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::sum_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, at::Tensor & out) { + return at::_ops::sum_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::std_mean.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple std_mean_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false) { + return at::_ops::std_mean_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out0, out1); + } + + // aten::std_mean.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple std_mean_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::std_mean_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out0, out1); + } + + // aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::prod_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & prod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, at::Tensor & out) { + return at::_ops::prod_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mkldnn_transpose_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::_mkldnn_transpose_out::redispatch(dispatchKeySet, self, dim0, dim1, out); + } + + // aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _mkldnn_transpose_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { + return at::_ops::_mkldnn_transpose_out::redispatch(dispatchKeySet, self, dim0, dim1, out); + } + + // aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & flip_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::flip_out::redispatch(dispatchKeySet, self, dims, out); + } + + // aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & flip_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::flip_out::redispatch(dispatchKeySet, self, dims, out); + } + + // aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & roll_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll_out::redispatch(dispatchKeySet, self, shifts, dims, out); + } + + // aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & roll_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::roll_out::redispatch(dispatchKeySet, self, shifts, dims, out); + } + + // aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rot90_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t k=1, at::IntArrayRef dims={0,1}) { + return at::_ops::rot90_out::redispatch(dispatchKeySet, self, k, dims, out); + } + + // aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rot90_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t k, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::rot90_out::redispatch(dispatchKeySet, self, k, dims, out); + } + + // aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _transform_bias_rescale_qkv_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) { + return at::_ops::_transform_bias_rescale_qkv_out::redispatch(dispatchKeySet, qkv, qkv_bias, num_heads, out0, out1, out2); + } + + // aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _transform_bias_rescale_qkv_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_transform_bias_rescale_qkv_out::redispatch(dispatchKeySet, qkv, qkv_bias, num_heads, out0, out1, out2); + } + + // aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_from_mask_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & t, const at::Tensor & mask, bool mask_check=true) { + return at::_ops::_nested_tensor_from_mask_out::redispatch(dispatchKeySet, t, mask, mask_check, out); + } + + // aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_from_mask_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & t, const at::Tensor & mask, bool mask_check, at::Tensor & out) { + return at::_ops::_nested_tensor_from_mask_out::redispatch(dispatchKeySet, t, mask, mask_check, out); + } + + // aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_from_padded_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213=false) { + return at::_ops::_nested_from_padded_out::redispatch(dispatchKeySet, padded, cpu_nested_shape_example, fuse_transform_0213, out); + } + + // aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_from_padded_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213, at::Tensor & out) { + return at::_ops::_nested_from_padded_out::redispatch(dispatchKeySet, padded, cpu_nested_shape_example, fuse_transform_0213, out); + } + + // aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_size_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_nested_tensor_size_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_size_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_nested_tensor_size_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_strides_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_nested_tensor_strides_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_strides_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_nested_tensor_strides_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_from_padded_and_nested_example_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & padded, const at::Tensor & nt_example) { + return at::_ops::_nested_from_padded_and_nested_example_out::redispatch(dispatchKeySet, padded, nt_example, out); + } + + // aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_from_padded_and_nested_example_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & padded, const at::Tensor & nt_example, at::Tensor & out) { + return at::_ops::_nested_from_padded_and_nested_example_out::redispatch(dispatchKeySet, padded, nt_example, out); + } + + // aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_view_from_buffer_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets) { + return at::_ops::_nested_view_from_buffer_copy_out::redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets, out); + } + + // aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_view_from_buffer_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, at::IntArrayRef offsets, at::Tensor & out) { + return at::_ops::_nested_view_from_buffer_copy_out::redispatch(dispatchKeySet, self, nested_size, nested_strides, offsets, out); + } + + // aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _trilinear_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim=1) { + return at::_ops::_trilinear_out::redispatch(dispatchKeySet, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out); + } + + // aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _trilinear_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim, at::Tensor & out) { + return at::_ops::_trilinear_out::redispatch(dispatchKeySet, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, out); + } + + // aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _unique_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, bool sorted=true, bool return_inverse=false) { + return at::_ops::_unique_out::redispatch(dispatchKeySet, self, sorted, return_inverse, out0, out1); + } + + // aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _unique_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_unique_out::redispatch(dispatchKeySet, self, sorted, return_inverse, out0, out1); + } + + // aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple unique_dim_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, int64_t dim, bool sorted=true, bool return_inverse=false, bool return_counts=false) { + return at::_ops::unique_dim_out::redispatch(dispatchKeySet, self, dim, sorted, return_inverse, return_counts, out0, out1, out2); + } + + // aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple unique_dim_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::unique_dim_out::redispatch(dispatchKeySet, self, dim, sorted, return_inverse, return_counts, out0, out1, out2); + } + + // aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple unique_consecutive_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, bool return_inverse=false, bool return_counts=false, c10::optional dim=c10::nullopt) { + return at::_ops::unique_consecutive_out::redispatch(dispatchKeySet, self, return_inverse, return_counts, dim, out0, out1, out2); + } + + // aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple unique_consecutive_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::unique_consecutive_out::redispatch(dispatchKeySet, self, return_inverse, return_counts, dim, out0, out1, out2); + } + + // aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple unique_dim_consecutive_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, int64_t dim, bool return_inverse=false, bool return_counts=false) { + return at::_ops::unique_dim_consecutive_out::redispatch(dispatchKeySet, self, dim, return_inverse, return_counts, out0, out1, out2); + } + + // aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple unique_dim_consecutive_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::unique_dim_consecutive_out::redispatch(dispatchKeySet, self, dim, return_inverse, return_counts, out0, out1, out2); + } + + // aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _unique2_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, bool sorted=true, bool return_inverse=false, bool return_counts=false) { + return at::_ops::_unique2_out::redispatch(dispatchKeySet, self, sorted, return_inverse, return_counts, out0, out1, out2); + } + + // aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _unique2_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_unique2_out::redispatch(dispatchKeySet, self, sorted, return_inverse, return_counts, out0, out1, out2); + } + + // aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _unsafe_view_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::_unsafe_view_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), out); + } + + // aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _unsafe_view_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_unsafe_view_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(size), out); + } + + // aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _unsafe_view_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::_unsafe_view_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _unsafe_view_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::_unsafe_view_out::redispatch(dispatchKeySet, self, size, out); + } + + // aten::var_mean.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple var_mean_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false) { + return at::_ops::var_mean_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out0, out1); + } + + // aten::var_mean.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple var_mean_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::var_mean_correction_out::redispatch(dispatchKeySet, self, dim, correction, keepdim, out0, out1); + } + + // aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _weight_norm_interface_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & v, const at::Tensor & g, int64_t dim=0) { + return at::_ops::_weight_norm_interface_out::redispatch(dispatchKeySet, v, g, dim, out0, out1); + } + + // aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _weight_norm_interface_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & v, const at::Tensor & g, int64_t dim, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_weight_norm_interface_out::redispatch(dispatchKeySet, v, g, dim, out0, out1); + } + + // aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _weight_norm_interface_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) { + return at::_ops::_weight_norm_interface_backward_out::redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim, out0, out1); + } + + // aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _weight_norm_interface_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_weight_norm_interface_backward_out::redispatch(dispatchKeySet, grad_w, saved_v, saved_g, saved_norms, dim, out0, out1); + } + + // aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size, c10::optional names) { + return at::_ops::zeros_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::zeros_names_out::redispatch(dispatchKeySet, size, names, out); + } + + // aten::_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _efficientzerotensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size) { + return at::_ops::_efficientzerotensor_out::redispatch(dispatchKeySet, size, out); + } + + // aten::_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _efficientzerotensor_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_efficientzerotensor_out::redispatch(dispatchKeySet, size, out); + } + + // aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_like_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::zeros_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zeros_like_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::zeros_like_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _standard_gamma_grad_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & output) { + return at::_ops::_standard_gamma_grad_out::redispatch(dispatchKeySet, self, output, out); + } + + // aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _standard_gamma_grad_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output, at::Tensor & out) { + return at::_ops::_standard_gamma_grad_out::redispatch(dispatchKeySet, self, output, out); + } + + // aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _standard_gamma_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_standard_gamma_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _standard_gamma_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::_standard_gamma_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _dirichlet_grad_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) { + return at::_ops::_dirichlet_grad_out::redispatch(dispatchKeySet, x, alpha, total, out); + } + + // aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _dirichlet_grad_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out) { + return at::_ops::_dirichlet_grad_out::redispatch(dispatchKeySet, x, alpha, total, out); + } + + // aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sample_dirichlet_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_sample_dirichlet_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sample_dirichlet_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::_sample_dirichlet_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & poisson_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::poisson_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & poisson_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::poisson_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & binomial_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & count, const at::Tensor & prob, c10::optional generator=c10::nullopt) { + return at::_ops::binomial_out::redispatch(dispatchKeySet, count, prob, generator, out); + } + + // aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & binomial_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & count, const at::Tensor & prob, c10::optional generator, at::Tensor & out) { + return at::_ops::binomial_out::redispatch(dispatchKeySet, count, prob, generator, out); + } + + // aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & native_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & p=2) { + return at::_ops::native_norm_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & native_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out) { + return at::_ops::native_norm_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & native_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, c10::optional dtype) { + return at::_ops::native_norm_ScalarOpt_dim_dtype_out::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out); + } + + // aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & native_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::native_norm_ScalarOpt_dim_dtype_out::redispatch(dispatchKeySet, self, p, dim, keepdim, dtype, out); + } + + // aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_sum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::_sparse_sum_dim_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_sum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { + return at::_ops::_sparse_sum_dim_out::redispatch(dispatchKeySet, self, dim, out); + } + + // aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_sum_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::_sparse_sum_backward_out::redispatch(dispatchKeySet, grad, self, dim, out); + } + + // aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_sum_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { + return at::_ops::_sparse_sum_backward_out::redispatch(dispatchKeySet, grad, self, dim, out); + } + + // aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_csr_sum_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_csr_sum_dim_dtype_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_csr_sum_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::_sparse_csr_sum_dim_dtype_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_csr_prod_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) { + return at::_ops::_sparse_csr_prod_dim_dtype_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_csr_prod_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out) { + return at::_ops::_sparse_csr_prod_dim_dtype_out::redispatch(dispatchKeySet, self, dim, keepdim, dtype, out); + } + + // aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_sparse_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { + return at::_ops::_sparse_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_softmax_backward_data_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + return at::_ops::_sparse_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, self, out); + } + + // aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_softmax_backward_data_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_sparse_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, self, out); + } + + // aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_log_softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_sparse_log_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_log_softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { + return at::_ops::_sparse_log_softmax_out::redispatch(dispatchKeySet, self, dim, half_to_float, out); + } + + // aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_log_softmax_backward_data_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) { + return at::_ops::_sparse_log_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, self, out); + } + + // aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_log_softmax_backward_data_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_sparse_log_softmax_backward_data_out::redispatch(dispatchKeySet, grad_output, output, dim, self, out); + } + + // aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _spdiags_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional layout=c10::nullopt) { + return at::_ops::_spdiags_out::redispatch(dispatchKeySet, diagonals, offsets, shape, layout, out); + } + + // aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _spdiags_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional layout, at::Tensor & out) { + return at::_ops::_spdiags_out::redispatch(dispatchKeySet, diagonals, offsets, shape, layout, out); + } + + // aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::ScalarType dtype) { + return at::_ops::norm_ScalarOpt_dtype_out::redispatch(dispatchKeySet, self, p, dtype, out); + } + + // aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & p, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::norm_ScalarOpt_dtype_out::redispatch(dispatchKeySet, self, p, dtype, out); + } + + // aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & p=2) { + return at::_ops::norm_Scalar_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & p, at::Tensor & out) { + return at::_ops::norm_Scalar_out::redispatch(dispatchKeySet, self, p, out); + } + + // aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clone_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::clone_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & clone_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::clone_out::redispatch(dispatchKeySet, self, memory_format, out); + } + + // aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_as_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, const at::Tensor & the_template, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_as_out::redispatch(dispatchKeySet, self, the_template, memory_format, out); + } + + // aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_as_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_as_out::redispatch(dispatchKeySet, self, the_template, memory_format, out); + } + + // aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor + inline at::Tensor resize_as(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_as::redispatch(dispatchKeySet, self, the_template, memory_format); + } + + // aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_as_sparse_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, const at::Tensor & the_template) { + return at::_ops::resize_as_sparse_out::redispatch(dispatchKeySet, self, the_template, out); + } + + // aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & resize_as_sparse_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template, const at::Tensor & out) { + return at::_ops::resize_as_sparse_out::redispatch(dispatchKeySet, self, the_template, out); + } + + // aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor + inline at::Tensor resize_as_sparse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & the_template) { + return at::_ops::resize_as_sparse::redispatch(dispatchKeySet, self, the_template); + } + + // aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zero_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::zero_out::redispatch(dispatchKeySet, self, out); + } + + // aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & zero_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::zero_out::redispatch(dispatchKeySet, self, out); + } + + // aten::zero(Tensor self) -> Tensor + inline at::Tensor zero(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::zero::redispatch(dispatchKeySet, self); + } + + // aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sub_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::sub_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sub_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sub_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rsub_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Tensor_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rsub_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::rsub_Tensor_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rsub_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::rsub_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rsub_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::rsub_Scalar_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_addmm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::_sparse_addmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_addmm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::_sparse_addmm_out::redispatch(dispatchKeySet, self, mat1, mat2, beta, alpha, out); + } + + // aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sparse_coo_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::IntArrayRef size) { + return at::_ops::sparse_coo_tensor_size_out::redispatch(dispatchKeySet, size, out); + } + + // aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sparse_coo_tensor_outf(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::sparse_coo_tensor_size_out::redispatch(dispatchKeySet, size, out); + } + + // aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_coo_tensor_with_dims_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size) { + return at::_ops::_sparse_coo_tensor_with_dims_out::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, out); + } + + // aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_coo_tensor_with_dims_outf(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::_sparse_coo_tensor_with_dims_out::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, out); + } + + // aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, indices, values, out); + } + + // aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _sparse_coo_tensor_with_dims_and_tensors_outf(c10::DispatchKeySet dispatchKeySet, int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::Tensor & out) { + return at::_ops::_sparse_coo_tensor_with_dims_and_tensors_out::redispatch(dispatchKeySet, sparse_dim, dense_dim, size, indices, values, out); + } + + // aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & sparse_resize_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + return at::_ops::sparse_resize_out::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim, out); + } + + // aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & sparse_resize_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) { + return at::_ops::sparse_resize_out::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim, out); + } + + // aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor + inline at::Tensor sparse_resize(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + return at::_ops::sparse_resize::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim); + } + + // aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & sparse_resize_and_clear_out(c10::DispatchKeySet dispatchKeySet, const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + return at::_ops::sparse_resize_and_clear_out::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim, out); + } + + // aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!) + inline const at::Tensor & sparse_resize_and_clear_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const at::Tensor & out) { + return at::_ops::sparse_resize_and_clear_out::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim, out); + } + + // aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor + inline at::Tensor sparse_resize_and_clear(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { + return at::_ops::sparse_resize_and_clear::redispatch(dispatchKeySet, self, size, sparse_dim, dense_dim); + } + + // aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sparse_mask_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask) { + return at::_ops::sparse_mask_out::redispatch(dispatchKeySet, self, mask, out); + } + + // aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & sparse_mask_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, at::Tensor & out) { + return at::_ops::sparse_mask_out::redispatch(dispatchKeySet, self, mask, out); + } + + // aten::_to_dense.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_dense_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::_to_dense_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::_to_dense.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_dense_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, at::Tensor & out) { + return at::_ops::_to_dense_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _coalesce_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_coalesce_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _coalesce_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_coalesce_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _coalesced_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool coalesced) { + return at::_ops::_coalesced_out::redispatch(dispatchKeySet, self, coalesced, out); + } + + // aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _coalesced_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced, at::Tensor & out) { + return at::_ops::_coalesced_out::redispatch(dispatchKeySet, self, coalesced, out); + } + + // aten::_coalesced(Tensor self, bool coalesced) -> Tensor + inline at::Tensor _coalesced(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced) { + return at::_ops::_coalesced::redispatch(dispatchKeySet, self, coalesced); + } + + // aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copy_sparse_to_sparse_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy_sparse_to_sparse_out::redispatch(dispatchKeySet, self, src, non_blocking, out); + } + + // aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & copy_sparse_to_sparse_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking, at::Tensor & out) { + return at::_ops::copy_sparse_to_sparse_out::redispatch(dispatchKeySet, self, src, non_blocking, out); + } + + // aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor + inline at::Tensor copy_sparse_to_sparse(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & src, bool non_blocking=false) { + return at::_ops::copy_sparse_to_sparse::redispatch(dispatchKeySet, self, src, non_blocking); + } + + // aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_sparse_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t sparse_dim) { + return at::_ops::to_sparse_sparse_dim_out::redispatch(dispatchKeySet, self, sparse_dim, out); + } + + // aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_sparse_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) { + return at::_ops::to_sparse_sparse_dim_out::redispatch(dispatchKeySet, self, sparse_dim, out); + } + + // aten::to_sparse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_sparse_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::to_sparse_out::redispatch(dispatchKeySet, self, out); + } + + // aten::to_sparse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_sparse_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::to_sparse_out::redispatch(dispatchKeySet, self, out); + } + + // aten::to_sparse_csr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_sparse_csr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::to_sparse_csr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::to_sparse_csr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_sparse_csr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::to_sparse_csr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::to_sparse_csc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_sparse_csc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::to_sparse_csc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::to_sparse_csc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_sparse_csc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::to_sparse_csc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_sparse_bsr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef blocksize) { + return at::_ops::to_sparse_bsr_out::redispatch(dispatchKeySet, self, blocksize, out); + } + + // aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_sparse_bsr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, at::Tensor & out) { + return at::_ops::to_sparse_bsr_out::redispatch(dispatchKeySet, self, blocksize, out); + } + + // aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_sparse_bsc_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef blocksize) { + return at::_ops::to_sparse_bsc_out::redispatch(dispatchKeySet, self, blocksize, out); + } + + // aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_sparse_bsc_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, at::Tensor & out) { + return at::_ops::to_sparse_bsc_out::redispatch(dispatchKeySet, self, blocksize, out); + } + + // aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_mkldnn_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional dtype=c10::nullopt) { + return at::_ops::to_mkldnn_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_mkldnn_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, at::Tensor & out) { + return at::_ops::to_mkldnn_out::redispatch(dispatchKeySet, self, dtype, out); + } + + // aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_reorder_conv2d_weight_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::redispatch(dispatchKeySet, self, padding, stride, dilation, groups, out); + } + + // aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_reorder_conv2d_weight_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv2d_weight_out::redispatch(dispatchKeySet, self, padding, stride, dilation, groups, out); + } + + // aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_reorder_conv3d_weight_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding=0, at::IntArrayRef stride=1, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::redispatch(dispatchKeySet, self, padding, stride, dilation, groups, out); + } + + // aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_reorder_conv3d_weight_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::mkldnn_reorder_conv3d_weight_out::redispatch(dispatchKeySet, self, padding, stride, dilation, groups, out); + } + + // aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_tensor_dynamic_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::ScalarType dtype, bool reduce_range) { + return at::_ops::quantize_per_tensor_dynamic_out::redispatch(dispatchKeySet, self, dtype, reduce_range, out); + } + + // aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_tensor_dynamic_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out) { + return at::_ops::quantize_per_tensor_dynamic_out::redispatch(dispatchKeySet, self, dtype, reduce_range, out); + } + + // aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_out::redispatch(dispatchKeySet, self, scale, zero_point, dtype, out); + } + + // aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_tensor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::quantize_per_tensor_out::redispatch(dispatchKeySet, self, scale, zero_point, dtype, out); + } + + // aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensor_qparams_out::redispatch(dispatchKeySet, self, scale, zero_point, dtype, out); + } + + // aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_tensor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::quantize_per_tensor_tensor_qparams_out::redispatch(dispatchKeySet, self, scale, zero_point, dtype, out); + } + + // aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () + inline void quantize_per_tensor_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) { + return at::_ops::quantize_per_tensor_tensors_out::redispatch(dispatchKeySet, tensors, scales, zero_points, dtype, out); + } + + // aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> () + inline void quantize_per_tensor_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) { + return at::_ops::quantize_per_tensor_tensors_out::redispatch(dispatchKeySet, tensors, scales, zero_points, dtype, out); + } + + // aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_channel_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) { + return at::_ops::quantize_per_channel_out::redispatch(dispatchKeySet, self, scales, zero_points, axis, dtype, out); + } + + // aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & quantize_per_channel_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::quantize_per_channel_out::redispatch(dispatchKeySet, self, scales, zero_points, axis, dtype, out); + } + + // aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dequantize_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::dequantize_self_out::redispatch(dispatchKeySet, self, out); + } + + // aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dequantize_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::dequantize_self_out::redispatch(dispatchKeySet, self, out); + } + + // aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> () + inline void dequantize_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList tensors) { + return at::_ops::dequantize_tensors_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> () + inline void dequantize_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::TensorList out) { + return at::_ops::dequantize_tensors_out::redispatch(dispatchKeySet, tensors, out); + } + + // aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & q_per_channel_scales_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::q_per_channel_scales_out::redispatch(dispatchKeySet, self, out); + } + + // aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & q_per_channel_scales_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::q_per_channel_scales_out::redispatch(dispatchKeySet, self, out); + } + + // aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & q_per_channel_zero_points_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::q_per_channel_zero_points_out::redispatch(dispatchKeySet, self, out); + } + + // aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & q_per_channel_zero_points_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::q_per_channel_zero_points_out::redispatch(dispatchKeySet, self, out); + } + + // aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & int_repr_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::int_repr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & int_repr_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::int_repr_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _make_per_tensor_quantized_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double scale, int64_t zero_point) { + return at::_ops::_make_per_tensor_quantized_tensor_out::redispatch(dispatchKeySet, self, scale, zero_point, out); + } + + // aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _make_per_tensor_quantized_tensor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out) { + return at::_ops::_make_per_tensor_quantized_tensor_out::redispatch(dispatchKeySet, self, scale, zero_point, out); + } + + // aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _make_per_channel_quantized_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) { + return at::_ops::_make_per_channel_quantized_tensor_out::redispatch(dispatchKeySet, self, scale, zero_point, axis, out); + } + + // aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _make_per_channel_quantized_tensor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out) { + return at::_ops::_make_per_channel_quantized_tensor_out::redispatch(dispatchKeySet, self, scale, zero_point, axis, out); + } + + // aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fake_quantize_per_tensor_affine_cachemask_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_tensor_affine_cachemask_out::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, out0, out1); + } + + // aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fake_quantize_per_tensor_affine_cachemask_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::fake_quantize_per_tensor_affine_cachemask_out::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, out0, out1); + } + + // aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) { + return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::redispatch(dispatchKeySet, self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1); + } + + // aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out::redispatch(dispatchKeySet, self, scale, zero_point, fake_quant_enabled, quant_min, quant_max, out0, out1); + } + + // aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fake_quantize_learnable_per_tensor_affine_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine_out::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, grad_factor, out); + } + + // aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fake_quantize_learnable_per_tensor_affine_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) { + return at::_ops::_fake_quantize_learnable_per_tensor_affine_out::redispatch(dispatchKeySet, self, scale, zero_point, quant_min, quant_max, grad_factor, out); + } + + // aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fake_quantize_per_channel_affine_cachemask_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + return at::_ops::fake_quantize_per_channel_affine_cachemask_out::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, out0, out1); + } + + // aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple fake_quantize_per_channel_affine_cachemask_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::fake_quantize_per_channel_affine_cachemask_out::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, out0, out1); + } + + // aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fake_quantize_learnable_per_channel_affine_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0) { + return at::_ops::_fake_quantize_learnable_per_channel_affine_out::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out); + } + + // aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _fake_quantize_learnable_per_channel_affine_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out) { + return at::_ops::_fake_quantize_learnable_per_channel_affine_out::redispatch(dispatchKeySet, self, scale, zero_point, axis, quant_min, quant_max, grad_factor, out); + } + + // aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) + inline ::std::tuple _fused_moving_avg_obs_fq_helper_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::_fused_moving_avg_obs_fq_helper_out::redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1); + } + + // aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!)) + inline ::std::tuple _fused_moving_avg_obs_fq_helper_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_fused_moving_avg_obs_fq_helper_out::redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant, out0, out1); + } + + // aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out) + inline ::std::tuple _fused_moving_avg_obs_fq_helper_functional(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false) { + return at::_ops::_fused_moving_avg_obs_fq_helper_functional::redispatch(dispatchKeySet, self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant); + } + + // aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool non_blocking=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::_to_copy_out::redispatch(dispatchKeySet, self, non_blocking, memory_format, out); + } + + // aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _to_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_to_copy_out::redispatch(dispatchKeySet, self, non_blocking, memory_format, out); + } + + // aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _lstm_mps_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::_lstm_mps_out::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4); + } + + // aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _lstm_mps_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { + return at::_ops::_lstm_mps_out::redispatch(dispatchKeySet, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4); + } + + // aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () + inline void lstm_mps_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::TensorList out1, at::TensorList out2, const at::Tensor & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::lstm_mps_backward_out::redispatch(dispatchKeySet, grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); + } + + // aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> () + inline void lstm_mps_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) { + return at::_ops::lstm_mps_backward_out::redispatch(dispatchKeySet, grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2); + } + + // aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _thnn_fused_lstm_cell_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional & input_bias={}, const c10::optional & hidden_bias={}) { + return at::_ops::_thnn_fused_lstm_cell_out::redispatch(dispatchKeySet, input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2); + } + + // aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _thnn_fused_lstm_cell_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_thnn_fused_lstm_cell_out::redispatch(dispatchKeySet, input_gates, hidden_gates, cx, input_bias, hidden_bias, out0, out1, out2); + } + + // aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _thnn_fused_lstm_cell_backward_impl_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_lstm_cell_backward_impl_out::redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2); + } + + // aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _thnn_fused_lstm_cell_backward_impl_outf(c10::DispatchKeySet dispatchKeySet, const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_thnn_fused_lstm_cell_backward_impl_out::redispatch(dispatchKeySet, grad_hy, grad_cy, cx, cy, workspace, has_bias, out0, out1, out2); + } + + // aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _thnn_fused_gru_cell_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias={}, const c10::optional & hidden_bias={}) { + return at::_ops::_thnn_fused_gru_cell_out::redispatch(dispatchKeySet, input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1); + } + + // aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _thnn_fused_gru_cell_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_thnn_fused_gru_cell_out::redispatch(dispatchKeySet, input_gates, hidden_gates, hx, input_bias, hidden_bias, out0, out1); + } + + // aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _thnn_fused_gru_cell_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) { + return at::_ops::_thnn_fused_gru_cell_backward_out::redispatch(dispatchKeySet, grad_hy, workspace, has_bias, out0, out1, out2, out3, out4); + } + + // aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!)) + inline ::std::tuple _thnn_fused_gru_cell_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4) { + return at::_ops::_thnn_fused_gru_cell_backward_out::redispatch(dispatchKeySet, grad_hy, workspace, has_bias, out0, out1, out2, out3, out4); + } + + // aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _pack_padded_sequence_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & lengths, bool batch_first) { + return at::_ops::_pack_padded_sequence_out::redispatch(dispatchKeySet, input, lengths, batch_first, out0, out1); + } + + // aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _pack_padded_sequence_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & lengths, bool batch_first, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_pack_padded_sequence_out::redispatch(dispatchKeySet, input, lengths, batch_first, out0, out1); + } + + // aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Storage source) { + return at::_ops::set_source_Storage_out::redispatch(dispatchKeySet, self, source, out); + } + + // aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, at::Tensor & out) { + return at::_ops::set_source_Storage_out::redispatch(dispatchKeySet, self, source, out); + } + + // aten::set.source_Storage(Tensor self, Storage source) -> Tensor + inline at::Tensor set(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source) { + return at::_ops::set_source_Storage::redispatch(dispatchKeySet, self, source); + } + + // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset_out::redispatch(dispatchKeySet, self, source, storage_offset, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), out); + } + + // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out) { + return at::_ops::set_source_Storage_storage_offset_out::redispatch(dispatchKeySet, self, source, storage_offset, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), out); + } + + // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset_out::redispatch(dispatchKeySet, self, source, storage_offset, size, stride, out); + } + + // aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out) { + return at::_ops::set_source_Storage_storage_offset_out::redispatch(dispatchKeySet, self, source, storage_offset, size, stride, out); + } + + // aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor + inline at::Tensor set(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset::redispatch(dispatchKeySet, self, source, storage_offset, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride)); + } + + // aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor + inline at::Tensor set_symint(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) { + return at::_ops::set_source_Storage_storage_offset::redispatch(dispatchKeySet, self, source, storage_offset, size, stride); + } + + // aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & source) { + return at::_ops::set_source_Tensor_out::redispatch(dispatchKeySet, self, source, out); + } + + // aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & source, at::Tensor & out) { + return at::_ops::set_source_Tensor_out::redispatch(dispatchKeySet, self, source, out); + } + + // aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor + inline at::Tensor set(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & source) { + return at::_ops::set_source_Tensor::redispatch(dispatchKeySet, self, source); + } + + // aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::set_out::redispatch(dispatchKeySet, self, out); + } + + // aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & set_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::set_out::redispatch(dispatchKeySet, self, out); + } + + // aten::set(Tensor self) -> Tensor + inline at::Tensor set(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) { + return at::_ops::set::redispatch(dispatchKeySet, self); + } + + // aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::lift_out::redispatch(dispatchKeySet, self, out); + } + + // aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lift_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::lift_out::redispatch(dispatchKeySet, self, out); + } + + // aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lift_fresh_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::lift_fresh_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & lift_fresh_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::lift_fresh_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_fill_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) { + return at::_ops::masked_fill_Scalar_out::redispatch(dispatchKeySet, self, mask, value, out); + } + + // aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_fill_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value, at::Tensor & out) { + return at::_ops::masked_fill_Scalar_out::redispatch(dispatchKeySet, self, mask, value, out); + } + + // aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_fill_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) { + return at::_ops::masked_fill_Tensor_out::redispatch(dispatchKeySet, self, mask, value, out); + } + + // aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_fill_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value, at::Tensor & out) { + return at::_ops::masked_fill_Tensor_out::redispatch(dispatchKeySet, self, mask, value, out); + } + + // aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_scatter_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) { + return at::_ops::masked_scatter_out::redispatch(dispatchKeySet, self, mask, source, out); + } + + // aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & masked_scatter_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source, at::Tensor & out) { + return at::_ops::masked_scatter_out::redispatch(dispatchKeySet, self, mask, source, out); + } + + // aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _masked_softmax_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & mask, c10::optional dim=c10::nullopt, c10::optional mask_type=c10::nullopt) { + return at::_ops::_masked_softmax_out::redispatch(dispatchKeySet, self, mask, dim, mask_type, out); + } + + // aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _masked_softmax_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mask, c10::optional dim, c10::optional mask_type, at::Tensor & out) { + return at::_ops::_masked_softmax_out::redispatch(dispatchKeySet, self, mask, dim, mask_type, out); + } + + // aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _masked_softmax_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim=c10::nullopt) { + return at::_ops::_masked_softmax_backward_out::redispatch(dispatchKeySet, grad_output, output, mask, dim, out); + } + + // aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _masked_softmax_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional dim, at::Tensor & out) { + return at::_ops::_masked_softmax_backward_out::redispatch(dispatchKeySet, grad_output, output, mask, dim, out); + } + + // aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & put_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false) { + return at::_ops::put_out::redispatch(dispatchKeySet, self, index, source, accumulate, out); + } + + // aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & put_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out) { + return at::_ops::put_out::redispatch(dispatchKeySet, self, index, source, accumulate, out); + } + + // aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_fill_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) { + return at::_ops::index_fill_int_Scalar_out::redispatch(dispatchKeySet, self, dim, index, value, out); + } + + // aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_fill_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out) { + return at::_ops::index_fill_int_Scalar_out::redispatch(dispatchKeySet, self, dim, index, value, out); + } + + // aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_fill_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) { + return at::_ops::index_fill_int_Tensor_out::redispatch(dispatchKeySet, self, dim, index, value, out); + } + + // aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & index_fill_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out) { + return at::_ops::index_fill_int_Tensor_out::redispatch(dispatchKeySet, self, dim, index, value, out); + } + + // aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_and_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_and_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_and_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_and_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_or_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_or_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_or_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_or_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_xor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_xor_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_xor_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_xor_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __lshift___out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__lshift___Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __lshift___outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::__lshift___Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __lshift___out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__lshift___Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __lshift___outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::__lshift___Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_left_shift_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_left_shift_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_left_shift_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __rshift___out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::__rshift___Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __rshift___outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::__rshift___Scalar_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __rshift___out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::__rshift___Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & __rshift___outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::__rshift___Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::bitwise_right_shift_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bitwise_right_shift_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::bitwise_right_shift_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & random_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t from, c10::optional to, c10::optional generator=c10::nullopt) { + return at::_ops::random_from_out::redispatch(dispatchKeySet, self, from, to, generator, out); + } + + // aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & random_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t from, c10::optional to, c10::optional generator, at::Tensor & out) { + return at::_ops::random_from_out::redispatch(dispatchKeySet, self, from, to, generator, out); + } + + // aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor + inline at::Tensor random(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t from, c10::optional to, c10::optional generator=c10::nullopt) { + return at::_ops::random_from::redispatch(dispatchKeySet, self, from, to, generator); + } + + // aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & random_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, int64_t to, c10::optional generator=c10::nullopt) { + return at::_ops::random_to_out::redispatch(dispatchKeySet, self, to, generator, out); + } + + // aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & random_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t to, c10::optional generator, at::Tensor & out) { + return at::_ops::random_to_out::redispatch(dispatchKeySet, self, to, generator, out); + } + + // aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor + inline at::Tensor random(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t to, c10::optional generator=c10::nullopt) { + return at::_ops::random_to::redispatch(dispatchKeySet, self, to, generator); + } + + // aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & random_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::random_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & random_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::random_out::redispatch(dispatchKeySet, self, generator, out); + } + + // aten::random(Tensor self, *, Generator? generator=None) -> Tensor + inline at::Tensor random(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::random::redispatch(dispatchKeySet, self, generator); + } + + // aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & uniform_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double from=0, double to=1, c10::optional generator=c10::nullopt) { + return at::_ops::uniform_out::redispatch(dispatchKeySet, self, from, to, generator, out); + } + + // aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & uniform_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from, double to, c10::optional generator, at::Tensor & out) { + return at::_ops::uniform_out::redispatch(dispatchKeySet, self, from, to, generator, out); + } + + // aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor + inline at::Tensor uniform(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double from=0, double to=1, c10::optional generator=c10::nullopt) { + return at::_ops::uniform::redispatch(dispatchKeySet, self, from, to, generator); + } + + // aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cauchy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double median=0, double sigma=1, c10::optional generator=c10::nullopt) { + return at::_ops::cauchy_out::redispatch(dispatchKeySet, self, median, sigma, generator, out); + } + + // aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & cauchy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double median, double sigma, c10::optional generator, at::Tensor & out) { + return at::_ops::cauchy_out::redispatch(dispatchKeySet, self, median, sigma, generator, out); + } + + // aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor + inline at::Tensor cauchy(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double median=0, double sigma=1, c10::optional generator=c10::nullopt) { + return at::_ops::cauchy::redispatch(dispatchKeySet, self, median, sigma, generator); + } + + // aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_normal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double mean=1, double std=2, c10::optional generator=c10::nullopt) { + return at::_ops::log_normal_out::redispatch(dispatchKeySet, self, mean, std, generator, out); + } + + // aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & log_normal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional generator, at::Tensor & out) { + return at::_ops::log_normal_out::redispatch(dispatchKeySet, self, mean, std, generator, out); + } + + // aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor + inline at::Tensor log_normal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean=1, double std=2, c10::optional generator=c10::nullopt) { + return at::_ops::log_normal::redispatch(dispatchKeySet, self, mean, std, generator); + } + + // aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & exponential_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double lambd=1, c10::optional generator=c10::nullopt) { + return at::_ops::exponential_out::redispatch(dispatchKeySet, self, lambd, generator, out); + } + + // aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & exponential_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd, c10::optional generator, at::Tensor & out) { + return at::_ops::exponential_out::redispatch(dispatchKeySet, self, lambd, generator, out); + } + + // aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor + inline at::Tensor exponential(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double lambd=1, c10::optional generator=c10::nullopt) { + return at::_ops::exponential::redispatch(dispatchKeySet, self, lambd, generator); + } + + // aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & geometric_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::geometric_out::redispatch(dispatchKeySet, self, p, generator, out); + } + + // aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & geometric_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional generator, at::Tensor & out) { + return at::_ops::geometric_out::redispatch(dispatchKeySet, self, p, generator, out); + } + + // aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor + inline at::Tensor geometric(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, c10::optional generator=c10::nullopt) { + return at::_ops::geometric::redispatch(dispatchKeySet, self, p, generator); + } + + // aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tril_indices_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t row, int64_t col, int64_t offset=0) { + return at::_ops::tril_indices_out::redispatch(dispatchKeySet, row, col, offset, out); + } + + // aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & tril_indices_outf(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out) { + return at::_ops::tril_indices_out::redispatch(dispatchKeySet, row, col, offset, out); + } + + // aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & triu_indices_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, int64_t row, int64_t col, int64_t offset=0) { + return at::_ops::triu_indices_out::redispatch(dispatchKeySet, row, col, offset, out); + } + + // aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & triu_indices_outf(c10::DispatchKeySet dispatchKeySet, int64_t row, int64_t col, int64_t offset, at::Tensor & out) { + return at::_ops::triu_indices_out::redispatch(dispatchKeySet, row, col, offset, out); + } + + // aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & trace_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::trace_out::redispatch(dispatchKeySet, self, out); + } + + // aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & trace_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::trace_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_symeig_helper.out(Tensor self, bool eigenvectors, bool upper, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _symeig_helper_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, bool eigenvectors, bool upper) { + return at::_ops::_symeig_helper_out::redispatch(dispatchKeySet, self, eigenvectors, upper, out0, out1); + } + + // aten::_symeig_helper.out(Tensor self, bool eigenvectors, bool upper, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _symeig_helper_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool eigenvectors, bool upper, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_symeig_helper_out::redispatch(dispatchKeySet, self, eigenvectors, upper, out0, out1); + } + + // aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cholesky_solve_helper_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & A, bool upper) { + return at::_ops::_cholesky_solve_helper_out::redispatch(dispatchKeySet, self, A, upper, out); + } + + // aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _cholesky_solve_helper_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) { + return at::_ops::_cholesky_solve_helper_out::redispatch(dispatchKeySet, self, A, upper, out); + } + + // aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dist_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p=2) { + return at::_ops::dist_out::redispatch(dispatchKeySet, self, other, p, out); + } + + // aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & dist_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, const at::Scalar & p, at::Tensor & out) { + return at::_ops::dist_out::redispatch(dispatchKeySet, self, other, p, out); + } + + // aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> () + inline void _histogramdd_bin_edges_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_bin_edges_out::redispatch(dispatchKeySet, self, bins, range, weight, density, out); + } + + // aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> () + inline void _histogramdd_bin_edges_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, at::TensorList out) { + return at::_ops::_histogramdd_bin_edges_out::redispatch(dispatchKeySet, self, bins, range, weight, density, out); + } + + // aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _histogramdd_from_bin_cts_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_from_bin_cts_out::redispatch(dispatchKeySet, self, bins, range, weight, density, out); + } + + // aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _histogramdd_from_bin_cts_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, at::Tensor & out) { + return at::_ops::_histogramdd_from_bin_cts_out::redispatch(dispatchKeySet, self, bins, range, weight, density, out); + } + + // aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _histogramdd_from_bin_tensors_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::TensorList bins, const c10::optional & weight={}, bool density=false) { + return at::_ops::_histogramdd_from_bin_tensors_out::redispatch(dispatchKeySet, self, bins, weight, density, out); + } + + // aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _histogramdd_from_bin_tensors_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList bins, const c10::optional & weight, bool density, at::Tensor & out) { + return at::_ops::_histogramdd_from_bin_tensors_out::redispatch(dispatchKeySet, self, bins, weight, density, out); + } + + // aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & remainder_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & other) { + return at::_ops::remainder_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & remainder_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::remainder_Scalar_Tensor_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & argsort_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool stable, int64_t dim=-1, bool descending=false) { + return at::_ops::argsort_stable_out::redispatch(dispatchKeySet, self, stable, dim, descending, out); + } + + // aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & argsort_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out) { + return at::_ops::argsort_stable_out::redispatch(dispatchKeySet, self, stable, dim, descending, out); + } + + // aten::unfold_backward.out(Tensor grad_in, int[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unfold_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) { + return at::_ops::unfold_backward_out::redispatch(dispatchKeySet, grad_in, input_sizes, dim, size, step, out); + } + + // aten::unfold_backward.out(Tensor grad_in, int[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & unfold_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out) { + return at::_ops::unfold_backward_out::redispatch(dispatchKeySet, grad_in, input_sizes, dim, size, step, out); + } + + // aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double mean=0, double std=1, c10::optional generator=c10::nullopt) { + return at::_ops::normal_out::redispatch(dispatchKeySet, self, mean, std, generator, out); + } + + // aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & normal_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional generator, at::Tensor & out) { + return at::_ops::normal_out::redispatch(dispatchKeySet, self, mean, std, generator, out); + } + + // aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> () + inline void _amp_foreach_non_finite_check_and_unscale_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::redispatch(dispatchKeySet, self, found_inf, inv_scale, out); + } + + // aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> () + inline void _amp_foreach_non_finite_check_and_unscale_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::redispatch(dispatchKeySet, self, found_inf, inv_scale, out); + } + + // aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out) + inline ::std::tuple<::std::vector,at::Tensor> _amp_foreach_non_finite_check_and_unscale(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) { + return at::_ops::_amp_foreach_non_finite_check_and_unscale::redispatch(dispatchKeySet, self, found_inf, inv_scale); + } + + // aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _amp_update_scale_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { + return at::_ops::_amp_update_scale_out::redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out); + } + + // aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _amp_update_scale_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) { + return at::_ops::_amp_update_scale_out::redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out); + } + + // aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out) + inline ::std::tuple _amp_update_scale(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { + return at::_ops::_amp_update_scale::redispatch(dispatchKeySet, self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); + } + + // aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_add_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_add_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_add_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_sub_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_sub_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_sub_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_sub_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_mul_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_mul_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_div_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_div_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () + inline void _foreach_div_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_div_Scalar_out::redispatch(dispatchKeySet, self, scalar, out); + } + + // aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + inline void _foreach_add_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_List_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + inline void _foreach_add_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { + return at::_ops::_foreach_add_List_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + inline void _foreach_sub_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_sub_List_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () + inline void _foreach_sub_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { + return at::_ops::_foreach_sub_List_out::redispatch(dispatchKeySet, self, other, alpha, out); + } + + // aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_mul_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_mul_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_div_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_div_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_div_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_div_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_add_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_add_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_add_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_sub_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_sub_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_sub_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_sub_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_div_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_div_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_div_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_div_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_mul_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_mul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_mul_ScalarList_out::redispatch(dispatchKeySet, self, scalars, out); + } + + // aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_exp_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_exp_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_exp_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_exp_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_zero_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_zero_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_zero_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_zero_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out + inline ::std::vector _foreach_zero(c10::DispatchKeySet dispatchKeySet, at::TensorList self) { + return at::_ops::_foreach_zero::redispatch(dispatchKeySet, self); + } + + // aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sqrt_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sqrt_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sqrt_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sqrt_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_abs_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_abs_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_abs_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_abs_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_acos_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_acos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_acos_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_acos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_asin_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_asin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_asin_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_asin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_atan_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_atan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_atan_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_atan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_ceil_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_ceil_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_ceil_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_ceil_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_cos_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_cos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_cos_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_cos_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_cosh_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_cosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_cosh_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_cosh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_erf_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_erf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_erf_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_erf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_erfc_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_erfc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_erfc_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_erfc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_expm1_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_expm1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_expm1_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_expm1_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_floor_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_floor_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_floor_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_floor_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log10_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log10_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log10_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log10_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log1p_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log1p_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log1p_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log1p_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log2_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_log2_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log2_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_neg_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_neg_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_neg_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_neg_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_tan_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_tan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_tan_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_tan_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_tanh_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_tanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_tanh_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_tanh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sin_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sin_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sin_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sinh_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sinh_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sinh_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_round_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_round_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_round_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_round_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_lgamma_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_lgamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_lgamma_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_lgamma_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_frac_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_frac_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_frac_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_frac_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_reciprocal_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_reciprocal_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_reciprocal_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_reciprocal_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sigmoid_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_sigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_sigmoid_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_sigmoid_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_trunc_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_trunc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> () + inline void _foreach_trunc_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_trunc_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () + inline void _foreach_addcdiv_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcdiv_Scalar_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () + inline void _foreach_addcdiv_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { + return at::_ops::_foreach_addcdiv_Scalar_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () + inline void _foreach_addcmul_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value=1) { + return at::_ops::_foreach_addcmul_Scalar_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> () + inline void _foreach_addcmul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) { + return at::_ops::_foreach_addcmul_Scalar_out::redispatch(dispatchKeySet, self, tensor1, tensor2, value, out); + } + + // aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_addcdiv_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcdiv_ScalarList_out::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out); + } + + // aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_addcdiv_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_addcdiv_ScalarList_out::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out); + } + + // aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_addcmul_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars) { + return at::_ops::_foreach_addcmul_ScalarList_out::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out); + } + + // aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> () + inline void _foreach_addcmul_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_addcmul_ScalarList_out::redispatch(dispatchKeySet, self, tensor1, tensor2, scalars, out); + } + + // aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_maximum_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_maximum_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_maximum_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_maximum_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_minimum_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_minimum_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () + inline void _foreach_minimum_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_minimum_List_out::redispatch(dispatchKeySet, self, other, out); + } + + // aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> () + inline void _foreach_norm_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, const at::Scalar & ord=2) { + return at::_ops::_foreach_norm_Scalar_out::redispatch(dispatchKeySet, self, ord, out); + } + + // aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> () + inline void _foreach_norm_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & ord, at::TensorList out) { + return at::_ops::_foreach_norm_Scalar_out::redispatch(dispatchKeySet, self, ord, out); + } + + // aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bucketize_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false) { + return at::_ops::bucketize_Scalar_out::redispatch(dispatchKeySet, self, boundaries, out_int32, right, out); + } + + // aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & bucketize_outf(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out) { + return at::_ops::bucketize_Scalar_out::redispatch(dispatchKeySet, self, boundaries, out_int32, right, out); + } + + // aten::_torch_cuda_cu_linker_symbol_op.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _torch_cuda_cu_linker_symbol_op_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_torch_cuda_cu_linker_symbol_op_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_torch_cuda_cu_linker_symbol_op.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _torch_cuda_cu_linker_symbol_op_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_torch_cuda_cu_linker_symbol_op_out::redispatch(dispatchKeySet, self, out); + } + + // aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & searchsorted_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}) { + return at::_ops::searchsorted_Scalar_out::redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter, out); + } + + // aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & searchsorted_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out) { + return at::_ops::searchsorted_Scalar_out::redispatch(dispatchKeySet, sorted_sequence, self, out_int32, right, side, sorter, out); + } + + // aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & glu_jvp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) { + return at::_ops::glu_jvp_out::redispatch(dispatchKeySet, glu, x, dx, dim, out); + } + + // aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & glu_jvp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) { + return at::_ops::glu_jvp_out::redispatch(dispatchKeySet, glu, x, dx, dim, out); + } + + // aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & glu_backward_jvp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) { + return at::_ops::glu_backward_jvp_out::redispatch(dispatchKeySet, grad_x, grad_glu, x, dgrad_glu, dx, dim, out); + } + + // aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & glu_backward_jvp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim, at::Tensor & out) { + return at::_ops::glu_backward_jvp_out::redispatch(dispatchKeySet, grad_x, grad_glu, x, dgrad_glu, dx, dim, out); + } + + // aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardswish_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & hardswish_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardswish_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rrelu_with_noise_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) { + return at::_ops::rrelu_with_noise_backward_out::redispatch(dispatchKeySet, grad_output, self, noise, lower, upper, training, self_is_result, out); + } + + // aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & rrelu_with_noise_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result, at::Tensor & out) { + return at::_ops::rrelu_with_noise_backward_out::redispatch(dispatchKeySet, grad_output, self, noise, lower, upper, training, self_is_result, out); + } + + // aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_adaptive_avg_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::mkldnn_adaptive_avg_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & mkldnn_adaptive_avg_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::mkldnn_adaptive_avg_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), out); + } + + // aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::_adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, c10::fromIntArrayRef(output_size), out); + } + + // aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::_adaptive_avg_pool2d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::_adaptive_avg_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_adaptive_avg_pool2d_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::_adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::_adaptive_avg_pool3d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::_adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::_adaptive_avg_pool3d_out::redispatch(dispatchKeySet, self, output_size, out); + } + + // aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::_adaptive_avg_pool3d_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _adaptive_avg_pool3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_adaptive_avg_pool3d_backward_out::redispatch(dispatchKeySet, grad_output, self, out); + } + + // aten::upsample_linear1d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors, out); + } + + // aten::upsample_linear1d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_linear1d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors, out); + } + + // aten::upsample_linear1d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_vec_out::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out); + } + + // aten::upsample_linear1d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_linear1d_vec_out::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out); + } + + // aten::upsample_linear1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors, out); + } + + // aten::upsample_linear1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_linear1d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors, out); + } + + // aten::upsample_linear1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_linear1d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors, out); + } + + // aten::upsample_linear1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_linear1d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_linear1d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors, out); + } + + // aten::upsample_bilinear2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bilinear2d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors, out); + } + + // aten::upsample_bilinear2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_bilinear2d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors, out); + } + + // aten::upsample_bilinear2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bilinear2d_vec_out::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out); + } + + // aten::upsample_bilinear2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_bilinear2d_vec_out::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out); + } + + // aten::upsample_bilinear2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bilinear2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors, out); + } + + // aten::upsample_bilinear2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_bilinear2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors, out); + } + + // aten::upsample_bilinear2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bilinear2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors, out); + } + + // aten::upsample_bilinear2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bilinear2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_bilinear2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors, out); + } + + // aten::_upsample_bilinear2d_aa.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bilinear2d_aa_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors, out); + } + + // aten::_upsample_bilinear2d_aa.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_bilinear2d_aa_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors, out); + } + + // aten::_upsample_bilinear2d_aa.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bilinear2d_aa_vec_out::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out); + } + + // aten::_upsample_bilinear2d_aa.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_bilinear2d_aa_vec_out::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out); + } + + // aten::_upsample_bilinear2d_aa_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bilinear2d_aa_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors, out); + } + + // aten::_upsample_bilinear2d_aa_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_bilinear2d_aa_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors, out); + } + + // aten::_upsample_bilinear2d_aa_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bilinear2d_aa_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors, out); + } + + // aten::_upsample_bilinear2d_aa_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bilinear2d_aa_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_bilinear2d_aa_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors, out); + } + + // aten::upsample_trilinear3d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors, out); + } + + // aten::upsample_trilinear3d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors, out); + } + + // aten::upsample_trilinear3d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_vec_out::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out); + } + + // aten::upsample_trilinear3d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_vec_out::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out); + } + + // aten::upsample_trilinear3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors, out); + } + + // aten::upsample_trilinear3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors, out); + } + + // aten::upsample_trilinear3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_trilinear3d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors, out); + } + + // aten::upsample_trilinear3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_trilinear3d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_trilinear3d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors, out); + } + + // aten::upsample_bicubic2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bicubic2d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors, out); + } + + // aten::upsample_bicubic2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_bicubic2d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors, out); + } + + // aten::upsample_bicubic2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bicubic2d_vec_out::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out); + } + + // aten::upsample_bicubic2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_bicubic2d_vec_out::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out); + } + + // aten::upsample_bicubic2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bicubic2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors, out); + } + + // aten::upsample_bicubic2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_bicubic2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors, out); + } + + // aten::upsample_bicubic2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::upsample_bicubic2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors, out); + } + + // aten::upsample_bicubic2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_bicubic2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_bicubic2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors, out); + } + + // aten::_upsample_bicubic2d_aa.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bicubic2d_aa_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors, out); + } + + // aten::_upsample_bicubic2d_aa.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_bicubic2d_aa_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, align_corners, scale_factors, out); + } + + // aten::_upsample_bicubic2d_aa.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bicubic2d_aa_vec_out::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out); + } + + // aten::_upsample_bicubic2d_aa.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_bicubic2d_aa_vec_out::redispatch(dispatchKeySet, input, output_size, align_corners, scale_factors, out); + } + + // aten::_upsample_bicubic2d_aa_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bicubic2d_aa_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors, out); + } + + // aten::_upsample_bicubic2d_aa_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_bicubic2d_aa_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), align_corners, scale_factors, out); + } + + // aten::_upsample_bicubic2d_aa_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors) { + return at::_ops::_upsample_bicubic2d_aa_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors, out); + } + + // aten::_upsample_bicubic2d_aa_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_bicubic2d_aa_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_bicubic2d_aa_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, align_corners, scale_factors, out); + } + + // aten::upsample_nearest1d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest1d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors, out); + } + + // aten::upsample_nearest1d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_nearest1d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors, out); + } + + // aten::upsample_nearest1d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest1d_vec_out::redispatch(dispatchKeySet, input, output_size, scale_factors, out); + } + + // aten::upsample_nearest1d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_nearest1d_vec_out::redispatch(dispatchKeySet, input, output_size, scale_factors, out); + } + + // aten::_upsample_nearest_exact1d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact1d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors, out); + } + + // aten::_upsample_nearest_exact1d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact1d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors, out); + } + + // aten::_upsample_nearest_exact1d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact1d_vec_out::redispatch(dispatchKeySet, input, output_size, scale_factors, out); + } + + // aten::_upsample_nearest_exact1d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact1d_vec_out::redispatch(dispatchKeySet, input, output_size, scale_factors, out); + } + + // aten::upsample_nearest1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest1d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors, out); + } + + // aten::upsample_nearest1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_nearest1d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors, out); + } + + // aten::upsample_nearest1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest1d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors, out); + } + + // aten::upsample_nearest1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest1d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_nearest1d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors, out); + } + + // aten::_upsample_nearest_exact1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact1d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors, out); + } + + // aten::_upsample_nearest_exact1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact1d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors, out); + } + + // aten::_upsample_nearest_exact1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact1d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors, out); + } + + // aten::_upsample_nearest_exact1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact1d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors, out); + } + + // aten::upsample_nearest2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest2d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors, out); + } + + // aten::upsample_nearest2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_nearest2d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors, out); + } + + // aten::upsample_nearest2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest2d_vec_out::redispatch(dispatchKeySet, input, output_size, scale_factors, out); + } + + // aten::upsample_nearest2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_nearest2d_vec_out::redispatch(dispatchKeySet, input, output_size, scale_factors, out); + } + + // aten::_upsample_nearest_exact2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors, out); + } + + // aten::_upsample_nearest_exact2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact2d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors, out); + } + + // aten::_upsample_nearest_exact2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_vec_out::redispatch(dispatchKeySet, input, output_size, scale_factors, out); + } + + // aten::_upsample_nearest_exact2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact2d_vec_out::redispatch(dispatchKeySet, input, output_size, scale_factors, out); + } + + // aten::upsample_nearest2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors, out); + } + + // aten::upsample_nearest2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_nearest2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors, out); + } + + // aten::upsample_nearest2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors, out); + } + + // aten::upsample_nearest2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_nearest2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors, out); + } + + // aten::_upsample_nearest_exact2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors, out); + } + + // aten::_upsample_nearest_exact2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors, out); + } + + // aten::_upsample_nearest_exact2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors, out); + } + + // aten::_upsample_nearest_exact2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact2d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact2d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors, out); + } + + // aten::upsample_nearest3d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest3d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors, out); + } + + // aten::upsample_nearest3d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_nearest3d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors, out); + } + + // aten::upsample_nearest3d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest3d_vec_out::redispatch(dispatchKeySet, input, output_size, scale_factors, out); + } + + // aten::upsample_nearest3d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_nearest3d_vec_out::redispatch(dispatchKeySet, input, output_size, scale_factors, out); + } + + // aten::_upsample_nearest_exact3d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact3d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors, out); + } + + // aten::_upsample_nearest_exact3d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact3d_vec_out::redispatch(dispatchKeySet, input, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, scale_factors, out); + } + + // aten::_upsample_nearest_exact3d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact3d_vec_out::redispatch(dispatchKeySet, input, output_size, scale_factors, out); + } + + // aten::_upsample_nearest_exact3d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact3d_vec_out::redispatch(dispatchKeySet, input, output_size, scale_factors, out); + } + + // aten::upsample_nearest3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest3d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors, out); + } + + // aten::upsample_nearest3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_nearest3d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors, out); + } + + // aten::upsample_nearest3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::upsample_nearest3d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors, out); + } + + // aten::upsample_nearest3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & upsample_nearest3d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::upsample_nearest3d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors, out); + } + + // aten::_upsample_nearest_exact3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact3d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors, out); + } + + // aten::_upsample_nearest_exact3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalIntArrayRef output_size, at::IntArrayRef input_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact3d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRef(*output_size)) : c10::nullopt, c10::fromIntArrayRef(input_size), scale_factors, out); + } + + // aten::_upsample_nearest_exact3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_backward_symint_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors) { + return at::_ops::_upsample_nearest_exact3d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors, out); + } + + // aten::_upsample_nearest_exact3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _upsample_nearest_exact3d_backward_symint_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, at::OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors, at::Tensor & out) { + return at::_ops::_upsample_nearest_exact3d_backward_vec_out::redispatch(dispatchKeySet, grad_output, output_size, input_size, scale_factors, out); + } + + // aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _slow_conv2d_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask) { + return at::_ops::_slow_conv2d_backward_output_mask_out::redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2); + } + + // aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _slow_conv2d_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_slow_conv2d_backward_output_mask_out::redispatch(dispatchKeySet, grad_output, self, weight, kernel_size, stride, padding, output_mask, out0, out1, out2); + } + + // aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conv_depthwise3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation) { + return at::_ops::conv_depthwise3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & conv_depthwise3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::conv_depthwise3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_dilated2d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_dilated2d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated2d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_dilated3d_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1) { + return at::_ops::slow_conv_dilated3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & slow_conv_dilated3d_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out) { + return at::_ops::slow_conv_dilated3d_out::redispatch(dispatchKeySet, self, weight, kernel_size, bias, stride, padding, dilation, out); + } + + // aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isinf_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::isinf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & isinf_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::isinf_out::redispatch(dispatchKeySet, self, out); + } + + // aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_exp_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::linalg_matrix_exp_out::redispatch(dispatchKeySet, self, out); + } + + // aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & linalg_matrix_exp_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::linalg_matrix_exp_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_optional_intlist_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & values, at::OptionalIntArrayRef addends) { + return at::_ops::_test_optional_intlist_out::redispatch(dispatchKeySet, values, addends, out); + } + + // aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_optional_intlist_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) { + return at::_ops::_test_optional_intlist_out::redispatch(dispatchKeySet, values, addends, out); + } + + // aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_optional_filled_intlist_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & values, at::OptionalIntArrayRef addends) { + return at::_ops::_test_optional_filled_intlist_out::redispatch(dispatchKeySet, values, addends, out); + } + + // aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_optional_filled_intlist_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out) { + return at::_ops::_test_optional_filled_intlist_out::redispatch(dispatchKeySet, values, addends, out); + } + + // aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_optional_floatlist_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & values, c10::optional> addends) { + return at::_ops::_test_optional_floatlist_out::redispatch(dispatchKeySet, values, addends, out); + } + + // aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_optional_floatlist_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, c10::optional> addends, at::Tensor & out) { + return at::_ops::_test_optional_floatlist_out::redispatch(dispatchKeySet, values, addends, out); + } + + // aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_warn_in_autograd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_test_warn_in_autograd_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_warn_in_autograd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_test_warn_in_autograd_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_autograd_multiple_dispatch_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_autograd_multiple_dispatch_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_test_autograd_multiple_dispatch_fullcoverage_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_autograd_multiple_dispatch_view_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::_test_autograd_multiple_dispatch_view_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _test_autograd_multiple_dispatch_view_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::_test_autograd_multiple_dispatch_view_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & segment_reduce_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & indices={}, const c10::optional & offsets={}, int64_t axis=0, bool unsafe=false, const c10::optional & initial=c10::nullopt) { + return at::_ops::segment_reduce_out::redispatch(dispatchKeySet, data, reduce, lengths, indices, offsets, axis, unsafe, initial, out); + } + + // aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & segment_reduce_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial, at::Tensor & out) { + return at::_ops::segment_reduce_out::redispatch(dispatchKeySet, data, reduce, lengths, indices, offsets, axis, unsafe, initial, out); + } + + // aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _segment_reduce_backward_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths={}, const c10::optional & offsets={}, int64_t axis=0, const c10::optional & initial=c10::nullopt) { + return at::_ops::_segment_reduce_backward_out::redispatch(dispatchKeySet, grad, output, data, reduce, lengths, offsets, axis, initial, out); + } + + // aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _segment_reduce_backward_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial, at::Tensor & out) { + return at::_ops::_segment_reduce_backward_out::redispatch(dispatchKeySet, grad, output, data, reduce, lengths, offsets, axis, initial, out); + } + + // aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_from_tensor_list_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, at::TensorList list, c10::optional dtype=c10::nullopt, c10::optional layout=c10::nullopt, c10::optional device=c10::nullopt, c10::optional pin_memory=c10::nullopt) { + return at::_ops::_nested_tensor_from_tensor_list_out::redispatch(dispatchKeySet, list, dtype, layout, device, pin_memory, out); + } + + // aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_from_tensor_list_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, at::Tensor & out) { + return at::_ops::_nested_tensor_from_tensor_list_out::redispatch(dispatchKeySet, list, dtype, layout, device, pin_memory, out); + } + + // aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ccol_indices_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::ccol_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & ccol_indices_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::ccol_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & row_indices_copy_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self) { + return at::_ops::row_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & row_indices_copy_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out) { + return at::_ops::row_indices_copy_out::redispatch(dispatchKeySet, self, out); + } + + // aten::to_padded_tensor.out(Tensor self, float padding, int[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_padded_tensor_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt) { + return at::_ops::to_padded_tensor_out::redispatch(dispatchKeySet, self, padding, output_size, out); + } + + // aten::to_padded_tensor.out(Tensor self, float padding, int[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & to_padded_tensor_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size, at::Tensor & out) { + return at::_ops::to_padded_tensor_out::redispatch(dispatchKeySet, self, padding, output_size, out); + } + + // aten::_nested_tensor_layer_norm.out(Tensor self, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_layer_norm_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, const c10::optional & weight, const c10::optional & bias, double eps) { + return at::_ops::_nested_tensor_layer_norm_out::redispatch(dispatchKeySet, self, weight, bias, eps, out); + } + + // aten::_nested_tensor_layer_norm.out(Tensor self, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _nested_tensor_layer_norm_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const c10::optional & weight, const c10::optional & bias, double eps, at::Tensor & out) { + return at::_ops::_nested_tensor_layer_norm_out::redispatch(dispatchKeySet, self, weight, bias, eps, out); + } + + // aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _transformer_encoder_layer_fwd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask={}, c10::optional mask_type=c10::nullopt) { + return at::_ops::_transformer_encoder_layer_fwd_out::redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out); + } + + // aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _transformer_encoder_layer_fwd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type, at::Tensor & out) { + return at::_ops::_transformer_encoder_layer_fwd_out::redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out); + } + + // aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _native_multi_head_attention_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask={}, bool need_weights=true, bool average_attn_weights=true, c10::optional mask_type=c10::nullopt) { + return at::_ops::_native_multi_head_attention_out::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1); + } + + // aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) + inline ::std::tuple _native_multi_head_attention_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::_native_multi_head_attention_out::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type, out0, out1); + } + + // aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _triton_scaled_dot_attention_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p=0.0) { + return at::_ops::_triton_scaled_dot_attention_out::redispatch(dispatchKeySet, q, k, v, dropout_p, out); + } + + // aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _triton_scaled_dot_attention_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) { + return at::_ops::_triton_scaled_dot_attention_out::redispatch(dispatchKeySet, q, k, v, dropout_p, out); + } + + // aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _triton_multi_head_attention_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask={}) { + return at::_ops::_triton_multi_head_attention_out::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out); + } + + // aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _triton_multi_head_attention_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, at::Tensor & out) { + return at::_ops::_triton_multi_head_attention_out::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out); + } + + // aten::_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _transformer_decoder_only_layer_fwd_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask={}, const c10::optional & incr_key={}, const c10::optional & incr_value={}) { + return at::_ops::_transformer_decoder_only_layer_fwd_out::redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value, out0, out1, out2); + } + + // aten::_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + inline ::std::tuple _transformer_decoder_only_layer_fwd_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask, const c10::optional & incr_key, const c10::optional & incr_value, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) { + return at::_ops::_transformer_decoder_only_layer_fwd_out::redispatch(dispatchKeySet, src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, incr_key, incr_value, out0, out1, out2); + } + + // aten::_native_decoder_only_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple _native_decoder_only_multi_head_attention_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask={}, const c10::optional & incr_key={}, const c10::optional & incr_value={}, bool need_weights=true, bool average_attn_weights=true) { + return at::_ops::_native_decoder_only_multi_head_attention_out::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights, out0, out1, out2, out3); + } + + // aten::_native_decoder_only_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) + inline ::std::tuple _native_decoder_only_multi_head_attention_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional & mask, const c10::optional & incr_key, const c10::optional & incr_value, bool need_weights, bool average_attn_weights, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::_native_decoder_only_multi_head_attention_out::redispatch(dispatchKeySet, query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, incr_key, incr_value, need_weights, average_attn_weights, out0, out1, out2, out3); + } + + // aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _foobar_out(c10::DispatchKeySet dispatchKeySet, at::Tensor & out, const at::Tensor & self, bool arg1=true, bool arg2=true, bool arg3=true) { + return at::_ops::_foobar_out::redispatch(dispatchKeySet, self, arg1, arg2, arg3, out); + } + + // aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!) + inline at::Tensor & _foobar_outf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool arg1, bool arg2, bool arg3, at::Tensor & out) { + return at::_ops::_foobar_out::redispatch(dispatchKeySet, self, arg1, arg2, arg3, out); + } + + // aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_adam_out(c10::DispatchKeySet dispatchKeySet, at::TensorList out, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam_out::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + + // aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> () + inline void _fused_adam_outf(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, at::TensorList out) { + return at::_ops::_fused_adam_out::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out); + } + + // aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out) + inline ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale={}, const c10::optional & found_inf={}) { + return at::_ops::_fused_adam::redispatch(dispatchKeySet, self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); + } +} // namespace redispatch + +} diff --git a/voice_bridge/torch/include/ATen/RegistrationDeclarations.h b/voice_bridge/torch/include/ATen/RegistrationDeclarations.h new file mode 100644 index 0000000000000000000000000000000000000000..203ea7064791641b6ef74c5064e9ad66872bc2ab --- /dev/null +++ b/voice_bridge/torch/include/ATen/RegistrationDeclarations.h @@ -0,0 +1,2955 @@ +// This file contains all native_functions that can be registered to +// and the schema string that they should be registered with + +Tensor _cast_Byte(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Char(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Double(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Float(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Int(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Long(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Short(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _cast_Half(const Tensor & self, bool non_blocking); // {"schema": "aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +void _backward(const Tensor & self, TensorList inputs, const c10::optional & gradient, c10::optional retain_graph, bool create_graph); // {"schema": "aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()", "dispatch": "False", "default": "True"} +void set_data(Tensor & self, const Tensor & new_data); // {"schema": "aten::set_data(Tensor(a!) self, Tensor new_data) -> ()", "dispatch": "False", "default": "True"} +Tensor data(const Tensor & self); // {"schema": "aten::data(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +bool is_leaf(const Tensor & self); // {"schema": "aten::is_leaf(Tensor self) -> bool", "dispatch": "False", "default": "True"} +int64_t output_nr(const Tensor & self); // {"schema": "aten::output_nr(Tensor self) -> int", "dispatch": "False", "default": "True"} +int64_t _version(const Tensor & self); // {"schema": "aten::_version(Tensor self) -> int", "dispatch": "False", "default": "True"} +Tensor & requires_grad_(Tensor & self, bool requires_grad); // {"schema": "aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)", "dispatch": "False", "default": "True"} +void retain_grad(Tensor & self); // {"schema": "aten::retain_grad(Tensor(a!) self) -> ()", "dispatch": "False", "default": "True"} +bool retains_grad(const Tensor & self); // {"schema": "aten::retains_grad(Tensor self) -> bool", "dispatch": "False", "default": "True"} +Tensor _fw_primal(const Tensor & self, int64_t level); // {"schema": "aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor _make_dual(const Tensor & primal, const Tensor & tangent, int64_t level); // {"schema": "aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)", "dispatch": "True", "default": "True"} +::std::tuple _unpack_dual(const Tensor & dual, int64_t level); // {"schema": "aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)", "dispatch": "False", "default": "True"} +Tensor _new_zeros_with_same_feature_meta(const Tensor & self, const Tensor & other, int64_t self_num_batch_dims); // {"schema": "aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor", "dispatch": "True", "default": "True"} +bool _has_same_storage_numel(const Tensor & self, const Tensor & other); // {"schema": "aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool", "dispatch": "True", "default": "True"} +Tensor & rename_(Tensor & self, c10::optional names); // {"schema": "aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor rename(const Tensor & self, c10::optional names); // {"schema": "aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor align_to(const Tensor & self, DimnameList names); // {"schema": "aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor align_to(const Tensor & self, DimnameList order, int64_t ellipsis_idx); // {"schema": "aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor align_as(const Tensor & self, const Tensor & other); // {"schema": "aten::align_as(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector align_tensors(TensorList tensors); // {"schema": "aten::align_tensors(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +void _assert_async(const Tensor & self); // {"schema": "aten::_assert_async(Tensor self) -> ()", "dispatch": "True", "default": "False"} +void _assert_tensor_metadata(const Tensor & a, OptionalIntArrayRef size, OptionalIntArrayRef stride, c10::optional dtype); // {"schema": "aten::_assert_tensor_metadata(Tensor a, int[]? size=None, int[]? stride=None, ScalarType? dtype=None) -> ()", "dispatch": "False", "default": "True"} +Tensor refine_names(const Tensor & self, DimnameList names); // {"schema": "aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a)", "dispatch": "False", "default": "True"} +bool _use_cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank); // {"schema": "aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool", "dispatch": "True", "default": "False"} +bool _use_cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank); // {"schema": "aten::_use_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank) -> bool", "dispatch": "True", "default": "False"} +::std::tuple _cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity); // {"schema": "aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity); // {"schema": "aten::_cudnn_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +bool _use_cudnn_rnn_flatten_weight(); // {"schema": "aten::_use_cudnn_rnn_flatten_weight() -> bool", "dispatch": "False", "default": "True"} +Tensor _cudnn_rnn_flatten_weight(TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional); // {"schema": "aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _cudnn_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state); // {"schema": "aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple> _cudnn_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional & cx, const Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const Tensor & reserve, ::std::array output_mask); // {"schema": "aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])", "dispatch": "True", "default": "False"} +Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "False"} +int64_t _debug_has_internal_overlap(const Tensor & self); // {"schema": "aten::_debug_has_internal_overlap(Tensor self) -> int", "dispatch": "False", "default": "True"} +::std::tuple _fused_dropout(const Tensor & self, double p, c10::optional generator); // {"schema": "aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _masked_scale(const Tensor & self, const Tensor & mask, double scale); // {"schema": "aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple native_dropout(const Tensor & input, double p, c10::optional train); // {"schema": "aten::native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor native_dropout_backward(const Tensor & grad_output, const Tensor & mask, double scale); // {"schema": "aten::native_dropout_backward(Tensor grad_output, Tensor mask, float scale) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _sobol_engine_draw(const Tensor & quasi, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype); // {"schema": "aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor & _sobol_engine_ff_(Tensor & self, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated); // {"schema": "aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & _sobol_engine_scramble_(Tensor & self, const Tensor & ltm, int64_t dimension); // {"schema": "aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & _sobol_engine_initialize_state_(Tensor & self, int64_t dimension); // {"schema": "aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor _reshape_from_tensor(const Tensor & self, const Tensor & shape); // {"schema": "aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _shape_as_tensor(const Tensor & self); // {"schema": "aten::_shape_as_tensor(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor dropout(const Tensor & input, double p, bool train); // {"schema": "aten::dropout(Tensor input, float p, bool train) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & dropout_(Tensor & self, double p, bool train); // {"schema": "aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor feature_dropout(const Tensor & input, double p, bool train); // {"schema": "aten::feature_dropout(Tensor input, float p, bool train) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & feature_dropout_(Tensor & self, double p, bool train); // {"schema": "aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor alpha_dropout(const Tensor & input, double p, bool train); // {"schema": "aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & alpha_dropout_(Tensor & self, double p, bool train); // {"schema": "aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor feature_alpha_dropout(const Tensor & input, double p, bool train); // {"schema": "aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & feature_alpha_dropout_(Tensor & self, double p, bool train); // {"schema": "aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor abs(const Tensor & self); // {"schema": "aten::abs(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & abs_(Tensor & self); // {"schema": "aten::abs_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & abs_out(const Tensor & self, Tensor & out); // {"schema": "aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor absolute(const Tensor & self); // {"schema": "aten::absolute(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & absolute_(Tensor & self); // {"schema": "aten::absolute_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & absolute_out(const Tensor & self, Tensor & out); // {"schema": "aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor angle(const Tensor & self); // {"schema": "aten::angle(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & angle_out(const Tensor & self, Tensor & out); // {"schema": "aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor view_as_real(const Tensor & self); // {"schema": "aten::view_as_real(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor view_as_complex(const Tensor & self); // {"schema": "aten::view_as_complex(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor sgn(const Tensor & self); // {"schema": "aten::sgn(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sgn_(Tensor & self); // {"schema": "aten::sgn_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sgn_out(const Tensor & self, Tensor & out); // {"schema": "aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor chalf(const Tensor & self, c10::optional memory_format); // {"schema": "aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor real(const Tensor & self); // {"schema": "aten::real(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor imag(const Tensor & self); // {"schema": "aten::imag(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _conj(const Tensor & self); // {"schema": "aten::_conj(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor conj(const Tensor & self); // {"schema": "aten::conj(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _conj_physical(const Tensor & self); // {"schema": "aten::_conj_physical(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor conj_physical(const Tensor & self); // {"schema": "aten::conj_physical(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & conj_physical_out(const Tensor & self, Tensor & out); // {"schema": "aten::conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & conj_physical_(Tensor & self); // {"schema": "aten::conj_physical_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor resolve_conj(const Tensor & self); // {"schema": "aten::resolve_conj(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor resolve_neg(const Tensor & self); // {"schema": "aten::resolve_neg(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _neg_view(const Tensor & self); // {"schema": "aten::_neg_view(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor acos(const Tensor & self); // {"schema": "aten::acos(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & acos_(Tensor & self); // {"schema": "aten::acos_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & acos_out(const Tensor & self, Tensor & out); // {"schema": "aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arccos(const Tensor & self); // {"schema": "aten::arccos(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arccos_(Tensor & self); // {"schema": "aten::arccos_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arccos_out(const Tensor & self, Tensor & out); // {"schema": "aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor avg_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad); // {"schema": "aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor", "dispatch": "False", "default": "True"} +Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor add(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & add_(Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & add_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _add_relu(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _add_relu_(Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _add_relu_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _add_relu(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _add_relu_(Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor add(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & add_(Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & addmv_out(const Tensor & self, const Tensor & mat, const Tensor & vec, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & addr_out(const Tensor & self, const Tensor & vec1, const Tensor & vec2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor affine_grid_generator(const Tensor & theta, IntArrayRef size, bool align_corners); // {"schema": "aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor", "dispatch": "True", "default": "True"} +Tensor affine_grid_generator_backward(const Tensor & grad, IntArrayRef size, bool align_corners); // {"schema": "aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor", "dispatch": "False", "default": "True"} +Tensor all(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & all_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & out); // {"schema": "aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor all(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & all_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & out); // {"schema": "aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +bool allclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan); // {"schema": "aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool", "dispatch": "True", "default": "True"} +Tensor any(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & any_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & out); // {"schema": "aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor any(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & any_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & out); // {"schema": "aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor arange(const Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor arange(const Scalar & start, const Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor arange(const Scalar & start, const Scalar & end, const Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & arange_out(const Scalar & end, Tensor & out); // {"schema": "aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & arange_out(const Scalar & start, const Scalar & end, const Scalar & step, Tensor & out); // {"schema": "aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _dim_arange(const Tensor & like, int64_t dim); // {"schema": "aten::_dim_arange(Tensor like, int dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor argmax(const Tensor & self, c10::optional dim, bool keepdim); // {"schema": "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & argmax_out(const Tensor & self, c10::optional dim, bool keepdim, Tensor & out); // {"schema": "aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor argmin(const Tensor & self, c10::optional dim, bool keepdim); // {"schema": "aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & argmin_out(const Tensor & self, c10::optional dim, bool keepdim, Tensor & out); // {"schema": "aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor acosh(const Tensor & self); // {"schema": "aten::acosh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & acosh_(Tensor & self); // {"schema": "aten::acosh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & acosh_out(const Tensor & self, Tensor & out); // {"schema": "aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arccosh(const Tensor & self); // {"schema": "aten::arccosh(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arccosh_(Tensor & self); // {"schema": "aten::arccosh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arccosh_out(const Tensor & self, Tensor & out); // {"schema": "aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor asinh(const Tensor & self); // {"schema": "aten::asinh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & asinh_(Tensor & self); // {"schema": "aten::asinh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & asinh_out(const Tensor & self, Tensor & out); // {"schema": "aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arcsinh(const Tensor & self); // {"schema": "aten::arcsinh(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arcsinh_(Tensor & self); // {"schema": "aten::arcsinh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arcsinh_out(const Tensor & self, Tensor & out); // {"schema": "aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor atanh(const Tensor & self); // {"schema": "aten::atanh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & atanh_(Tensor & self); // {"schema": "aten::atanh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & atanh_out(const Tensor & self, Tensor & out); // {"schema": "aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arctanh(const Tensor & self); // {"schema": "aten::arctanh(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arctanh_(Tensor & self); // {"schema": "aten::arctanh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arctanh_out(const Tensor & self, Tensor & out); // {"schema": "aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor as_strided(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); // {"schema": "aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)", "dispatch": "True", "default": "False"} +const Tensor & as_strided_(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); // {"schema": "aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor asin(const Tensor & self); // {"schema": "aten::asin(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & asin_(Tensor & self); // {"schema": "aten::asin_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & asin_out(const Tensor & self, Tensor & out); // {"schema": "aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arcsin(const Tensor & self); // {"schema": "aten::arcsin(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arcsin_(Tensor & self); // {"schema": "aten::arcsin_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arcsin_out(const Tensor & self, Tensor & out); // {"schema": "aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor atan(const Tensor & self); // {"schema": "aten::atan(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & atan_(Tensor & self); // {"schema": "aten::atan_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & atan_out(const Tensor & self, Tensor & out); // {"schema": "aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor arctan(const Tensor & self); // {"schema": "aten::arctan(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arctan_(Tensor & self); // {"schema": "aten::arctan_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arctan_out(const Tensor & self, Tensor & out); // {"schema": "aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor atleast_1d(const Tensor & self); // {"schema": "aten::atleast_1d(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector atleast_1d(TensorList tensors); // {"schema": "aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor atleast_2d(const Tensor & self); // {"schema": "aten::atleast_2d(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector atleast_2d(TensorList tensors); // {"schema": "aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor atleast_3d(const Tensor & self); // {"schema": "aten::atleast_3d(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector atleast_3d(TensorList tensors); // {"schema": "aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & baddbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & baddbmm_out(const Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bartlett_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bartlett_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor batch_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled); // {"schema": "aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantized_batch_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & mean, const Tensor & var, double eps, double output_scale, int64_t output_zero_point); // {"schema": "aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _batch_norm_impl_index(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, bool cudnn_enabled); // {"schema": "aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)", "dispatch": "False", "default": "True"} +::std::tuple _batch_norm_impl_index_backward(int64_t impl_index, const Tensor & input, const Tensor & grad_output, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var_transform, bool train, double eps, ::std::array output_mask, const Tensor & reservedSpace); // {"schema": "aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor bernoulli(const Tensor & self, c10::optional generator); // {"schema": "aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bernoulli_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bernoulli_(Tensor & self, const Tensor & p, c10::optional generator); // {"schema": "aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bernoulli_(Tensor & self, double p, c10::optional generator); // {"schema": "aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bernoulli(const Tensor & self, double p, c10::optional generator); // {"schema": "aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor bilinear(const Tensor & input1, const Tensor & input2, const Tensor & weight, const c10::optional & bias); // {"schema": "aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction); // {"schema": "aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & binary_cross_entropy_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, Tensor & out); // {"schema": "aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction); // {"schema": "aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & binary_cross_entropy_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, Tensor & grad_input); // {"schema": "aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor binary_cross_entropy_with_logits(const Tensor & self, const Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction); // {"schema": "aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bincount(const Tensor & self, const c10::optional & weights, int64_t minlength); // {"schema": "aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor", "dispatch": "True", "default": "False"} +Tensor bitwise_not(const Tensor & self); // {"schema": "aten::bitwise_not(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_not_(Tensor & self); // {"schema": "aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_not_out(const Tensor & self, Tensor & out); // {"schema": "aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & copysign_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor copysign(const Tensor & self, const Tensor & other); // {"schema": "aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & copysign_(Tensor & self, const Tensor & other); // {"schema": "aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor copysign(const Tensor & self, const Scalar & other); // {"schema": "aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & copysign_(Tensor & self, const Scalar & other); // {"schema": "aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & copysign_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor logical_not(const Tensor & self); // {"schema": "aten::logical_not(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logical_not_(Tensor & self); // {"schema": "aten::logical_not_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logical_not_out(const Tensor & self, Tensor & out); // {"schema": "aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logical_xor(const Tensor & self, const Tensor & other); // {"schema": "aten::logical_xor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logical_xor_(Tensor & self, const Tensor & other); // {"schema": "aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logical_xor_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logical_and(const Tensor & self, const Tensor & other); // {"schema": "aten::logical_and(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logical_and_(Tensor & self, const Tensor & other); // {"schema": "aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logical_and_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logical_or(const Tensor & self, const Tensor & other); // {"schema": "aten::logical_or(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logical_or_(Tensor & self, const Tensor & other); // {"schema": "aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & logical_or_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor blackman_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor blackman_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bmm(const Tensor & self, const Tensor & mat2); // {"schema": "aten::bmm(Tensor self, Tensor mat2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bmm_out(const Tensor & self, const Tensor & mat2, Tensor & out); // {"schema": "aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::vector broadcast_tensors(TensorList tensors); // {"schema": "aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor broadcast_to(const Tensor & self, IntArrayRef size); // {"schema": "aten::broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _sparse_broadcast_to(const Tensor & self, IntArrayRef size); // {"schema": "aten::_sparse_broadcast_to(Tensor(a) self, int[] size) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor cat(const ITensorListRef & tensors, int64_t dim); // {"schema": "aten::cat(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cat_out(const ITensorListRef & tensors, int64_t dim, Tensor & out); // {"schema": "aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cat(TensorList tensors, Dimname dim); // {"schema": "aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & cat_out(TensorList tensors, Dimname dim, Tensor & out); // {"schema": "aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor concat(TensorList tensors, int64_t dim); // {"schema": "aten::concat(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & concat_out(TensorList tensors, int64_t dim, Tensor & out); // {"schema": "aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor concat(TensorList tensors, Dimname dim); // {"schema": "aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & concat_out(TensorList tensors, Dimname dim, Tensor & out); // {"schema": "aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor concatenate(TensorList tensors, int64_t dim); // {"schema": "aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & concatenate_out(TensorList tensors, int64_t dim, Tensor & out); // {"schema": "aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor concatenate(TensorList tensors, Dimname dim); // {"schema": "aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & concatenate_out(TensorList tensors, Dimname dim, Tensor & out); // {"schema": "aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor block_diag(TensorList tensors); // {"schema": "aten::block_diag(Tensor[] tensors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor ceil(const Tensor & self); // {"schema": "aten::ceil(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ceil_(Tensor & self); // {"schema": "aten::ceil_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ceil_out(const Tensor & self, Tensor & out); // {"schema": "aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor chain_matmul(TensorList matrices); // {"schema": "aten::chain_matmul(Tensor[] matrices) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & chain_matmul_out(TensorList matrices, Tensor & out); // {"schema": "aten::chain_matmul.out(Tensor[] matrices, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::vector unsafe_chunk(const Tensor & self, int64_t chunks, int64_t dim); // {"schema": "aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector chunk(const Tensor & self, int64_t chunks, int64_t dim); // {"schema": "aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector tensor_split(const Tensor & self, int64_t sections, int64_t dim); // {"schema": "aten::tensor_split.sections(Tensor(a -> *) self, int sections, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector tensor_split(const Tensor & self, IntArrayRef indices, int64_t dim); // {"schema": "aten::tensor_split.indices(Tensor(a -> *) self, int[] indices, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector tensor_split(const Tensor & self, const Tensor & tensor_indices_or_sections, int64_t dim); // {"schema": "aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +Tensor clamp(const Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor clamp(const Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & clamp_(Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_(Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_out(const Tensor & self, const c10::optional & min, const c10::optional & max, Tensor & out); // {"schema": "aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & clamp_out(const Tensor & self, const c10::optional & min, const c10::optional & max, Tensor & out); // {"schema": "aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor clamp_max(const Tensor & self, const Scalar & max); // {"schema": "aten::clamp_max(Tensor self, Scalar max) -> Tensor", "dispatch": "True", "default": "True"} +Tensor clamp_max(const Tensor & self, const Tensor & max); // {"schema": "aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & clamp_max_(Tensor & self, const Scalar & max); // {"schema": "aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_max_(Tensor & self, const Tensor & max); // {"schema": "aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_max_out(const Tensor & self, const Scalar & max, Tensor & out); // {"schema": "aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & clamp_max_out(const Tensor & self, const Tensor & max, Tensor & out); // {"schema": "aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor clamp_min(const Tensor & self, const Scalar & min); // {"schema": "aten::clamp_min(Tensor self, Scalar min) -> Tensor", "dispatch": "True", "default": "True"} +Tensor clamp_min(const Tensor & self, const Tensor & min); // {"schema": "aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & clamp_min_(Tensor & self, const Scalar & min); // {"schema": "aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_min_(Tensor & self, const Tensor & min); // {"schema": "aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clamp_min_out(const Tensor & self, const Scalar & min, Tensor & out); // {"schema": "aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & clamp_min_out(const Tensor & self, const Tensor & min, Tensor & out); // {"schema": "aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor clip(const Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor clip(const Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & clip_(Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & clip_(Tensor & self, const c10::optional & min, const c10::optional & max); // {"schema": "aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & clip_out(const Tensor & self, const c10::optional & min, const c10::optional & max, Tensor & out); // {"schema": "aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & clip_out(const Tensor & self, const c10::optional & min, const c10::optional & max, Tensor & out); // {"schema": "aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +bool cudnn_is_acceptable(const Tensor & self); // {"schema": "aten::cudnn_is_acceptable(Tensor self) -> bool", "dispatch": "False", "default": "True"} +Tensor complex(const Tensor & real, const Tensor & imag); // {"schema": "aten::complex(Tensor real, Tensor imag) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & complex_out(const Tensor & real, const Tensor & imag, Tensor & out); // {"schema": "aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor polar(const Tensor & abs, const Tensor & angle); // {"schema": "aten::polar(Tensor abs, Tensor angle) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & polar_out(const Tensor & abs, const Tensor & angle, Tensor & out); // {"schema": "aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor constant_pad_nd(const Tensor & self, IntArrayRef pad, const Scalar & value); // {"schema": "aten::constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor contiguous(const Tensor & self, MemoryFormat memory_format); // {"schema": "aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor convolution(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups); // {"schema": "aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple convolution_backward(const Tensor & grad_output, const Tensor & input, const Tensor & weight, OptionalSymIntArrayRef bias_sizes, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, ::std::array output_mask); // {"schema": "aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor convolution_overrideable(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups); // {"schema": "aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple convolution_backward_overrideable(const Tensor & grad_output, const Tensor & input, const Tensor & weight, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, ::std::array output_mask); // {"schema": "aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)", "dispatch": "True", "default": "True"} +Tensor _convolution(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32); // {"schema": "aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _convolution(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled); // {"schema": "aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _convolution_mode(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, c10::string_view padding, IntArrayRef dilation, int64_t groups); // {"schema": "aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _convolution_double_backward(const c10::optional & ggI, const c10::optional & ggW, const c10::optional & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, ::std::array output_mask); // {"schema": "aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor conv1d(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups); // {"schema": "aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv2d(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups); // {"schema": "aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv3d(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups); // {"schema": "aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv1d(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, c10::string_view padding, IntArrayRef dilation, int64_t groups); // {"schema": "aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, str padding=\"valid\", int[1] dilation=1, int groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv2d(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, c10::string_view padding, IntArrayRef dilation, int64_t groups); // {"schema": "aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, str padding=\"valid\", int[2] dilation=1, int groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv3d(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, c10::string_view padding, IntArrayRef dilation, int64_t groups); // {"schema": "aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, str padding=\"valid\", int[3] dilation=1, int groups=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad); // {"schema": "aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad); // {"schema": "aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor conv_transpose1d(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation); // {"schema": "aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv_transpose2d(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation); // {"schema": "aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor conv_transpose3d(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation); // {"schema": "aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor copy(const Tensor & self, const Tensor & src, bool non_blocking); // {"schema": "aten::copy(Tensor self, Tensor src, bool non_blocking=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & copy_(Tensor & self, const Tensor & src, bool non_blocking); // {"schema": "aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _copy_from(const Tensor & self, const Tensor & dst, bool non_blocking); // {"schema": "aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _copy_from_and_resize(const Tensor & self, const Tensor & dst); // {"schema": "aten::_copy_from_and_resize(Tensor self, Tensor dst) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cos(const Tensor & self); // {"schema": "aten::cos(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cos_(Tensor & self); // {"schema": "aten::cos_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cos_out(const Tensor & self, Tensor & out); // {"schema": "aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cosh(const Tensor & self); // {"schema": "aten::cosh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cosh_(Tensor & self); // {"schema": "aten::cosh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cosh_out(const Tensor & self, Tensor & out); // {"schema": "aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cosine_embedding_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction); // {"schema": "aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor count_nonzero(const Tensor & self, IntArrayRef dim); // {"schema": "aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor count_nonzero(const Tensor & self, c10::optional dim); // {"schema": "aten::count_nonzero(Tensor self, int? dim=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor cov(const Tensor & self, int64_t correction, const c10::optional & fweights, const c10::optional & aweights); // {"schema": "aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor corrcoef(const Tensor & self); // {"schema": "aten::corrcoef(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cudnn_affine_grid_generator(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W); // {"schema": "aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid", "dispatch": "True", "default": "False"} +Tensor cudnn_affine_grid_generator_backward(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W); // {"schema": "aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta", "dispatch": "True", "default": "False"} +::std::tuple cudnn_batch_norm(const Tensor & input, const Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); // {"schema": "aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple cudnn_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const Tensor & reserveSpace); // {"schema": "aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor cudnn_convolution(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32); // {"schema": "aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32); // {"schema": "aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _mps_convolution_transpose(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups); // {"schema": "aten::_mps_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple mps_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, ::std::array output_mask); // {"schema": "aten::mps_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor cudnn_convolution_relu(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups); // {"schema": "aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cudnn_convolution_add_relu(const Tensor & self, const Tensor & weight, const Tensor & z, const c10::optional & alpha, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups); // {"schema": "aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cudnn_grid_sampler(const Tensor & self, const Tensor & grid); // {"schema": "aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output", "dispatch": "True", "default": "False"} +::std::tuple cudnn_grid_sampler_backward(const Tensor & self, const Tensor & grid, const Tensor & grad_output); // {"schema": "aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)", "dispatch": "True", "default": "False"} +::std::tuple cummax(const Tensor & self, int64_t dim); // {"schema": "aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple cummax_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices); // {"schema": "aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "True"} +::std::tuple cummax(const Tensor & self, Dimname dim); // {"schema": "aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple cummax_out(const Tensor & self, Dimname dim, Tensor & values, Tensor & indices); // {"schema": "aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +void _cummax_helper(const Tensor & self, Tensor & values, Tensor & indices, int64_t dim); // {"schema": "aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()", "dispatch": "True", "default": "False"} +::std::tuple cummin(const Tensor & self, int64_t dim); // {"schema": "aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple cummin_out(const Tensor & self, int64_t dim, Tensor & values, Tensor & indices); // {"schema": "aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "True"} +::std::tuple cummin(const Tensor & self, Dimname dim); // {"schema": "aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple cummin_out(const Tensor & self, Dimname dim, Tensor & values, Tensor & indices); // {"schema": "aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +void _cummin_helper(const Tensor & self, Tensor & values, Tensor & indices, int64_t dim); // {"schema": "aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> ()", "dispatch": "True", "default": "False"} +Tensor cummaxmin_backward(const Tensor & grad, const Tensor & input, const Tensor & indices, int64_t dim); // {"schema": "aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cumprod(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cumprod_(Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cumprod_out(const Tensor & self, int64_t dim, c10::optional dtype, Tensor & out); // {"schema": "aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cumprod(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & cumprod_(Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & cumprod_out(const Tensor & self, Dimname dim, c10::optional dtype, Tensor & out); // {"schema": "aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor cumprod_backward(const Tensor & grad, const Tensor & input, int64_t dim, const Tensor & output); // {"schema": "aten::cumprod_backward(Tensor grad, Tensor input, int dim, Tensor output) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cumsum(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cumsum_(Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cumsum_out(const Tensor & self, int64_t dim, c10::optional dtype, Tensor & out); // {"schema": "aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cumsum(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & cumsum_(Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & cumsum_out(const Tensor & self, Dimname dim, c10::optional dtype, Tensor & out); // {"schema": "aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor cumulative_trapezoid(const Tensor & y, const Tensor & x, int64_t dim); // {"schema": "aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cumulative_trapezoid(const Tensor & y, const Scalar & dx, int64_t dim); // {"schema": "aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity); // {"schema": "aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity); // {"schema": "aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool zero_infinity); // {"schema": "aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, bool zero_infinity); // {"schema": "aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _ctc_loss_backward(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity); // {"schema": "aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _ctc_loss_backward(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity); // {"schema": "aten::_ctc_loss_backward.Tensor(Tensor grad, Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor diag_embed(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor diagflat(const Tensor & self, int64_t offset); // {"schema": "aten::diagflat(Tensor self, int offset=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor diagonal(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor linalg_diagonal(const Tensor & A, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor diagonal(const Tensor & self, Dimname outdim, Dimname dim1, Dimname dim2, int64_t offset); // {"schema": "aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor diagonal_backward(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fill_diagonal_(Tensor & self, const Scalar & fill_value, bool wrap); // {"schema": "aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor diff(const Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append); // {"schema": "aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & diff_out(const Tensor & self, int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append, Tensor & out); // {"schema": "aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, const c10::optional & spacing, c10::optional dim, int64_t edge_order); // {"schema": "aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, const Scalar & spacing, IntArrayRef dim, int64_t edge_order); // {"schema": "aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, IntArrayRef dim, int64_t edge_order); // {"schema": "aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, ArrayRef spacing, c10::optional dim, int64_t edge_order); // {"schema": "aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, ArrayRef spacing, IntArrayRef dim, int64_t edge_order); // {"schema": "aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, TensorList spacing, c10::optional dim, int64_t edge_order); // {"schema": "aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector gradient(const Tensor & self, TensorList spacing, IntArrayRef dim, int64_t edge_order); // {"schema": "aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor div(const Tensor & self, const Tensor & other); // {"schema": "aten::div.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & div_(Tensor & self, const Tensor & other); // {"schema": "aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & div_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor div(const Tensor & self, const Tensor & other, c10::optional rounding_mode); // {"schema": "aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & div_(Tensor & self, const Tensor & other, c10::optional rounding_mode); // {"schema": "aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & div_out(const Tensor & self, const Tensor & other, c10::optional rounding_mode, Tensor & out); // {"schema": "aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor div(const Tensor & self, const Scalar & other); // {"schema": "aten::div.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & div_(Tensor & self, const Scalar & other); // {"schema": "aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor div(const Tensor & self, const Scalar & other, c10::optional rounding_mode); // {"schema": "aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & div_(Tensor & self, const Scalar & other, c10::optional rounding_mode); // {"schema": "aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor divide(const Tensor & self, const Tensor & other); // {"schema": "aten::divide.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & divide_(Tensor & self, const Tensor & other); // {"schema": "aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & divide_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor divide(const Tensor & self, const Scalar & other); // {"schema": "aten::divide.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & divide_(Tensor & self, const Scalar & other); // {"schema": "aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor divide(const Tensor & self, const Tensor & other, c10::optional rounding_mode); // {"schema": "aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & divide_(Tensor & self, const Tensor & other, c10::optional rounding_mode); // {"schema": "aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & divide_out(const Tensor & self, const Tensor & other, c10::optional rounding_mode, Tensor & out); // {"schema": "aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor divide(const Tensor & self, const Scalar & other, c10::optional rounding_mode); // {"schema": "aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & divide_(Tensor & self, const Scalar & other, c10::optional rounding_mode); // {"schema": "aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor true_divide(const Tensor & self, const Tensor & other); // {"schema": "aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & true_divide_(Tensor & self, const Tensor & other); // {"schema": "aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & true_divide_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor true_divide(const Tensor & self, const Scalar & other); // {"schema": "aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & true_divide_(Tensor & self, const Scalar & other); // {"schema": "aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor dot(const Tensor & self, const Tensor & tensor); // {"schema": "aten::dot(Tensor self, Tensor tensor) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & dot_out(const Tensor & self, const Tensor & tensor, Tensor & out); // {"schema": "aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor vdot(const Tensor & self, const Tensor & other); // {"schema": "aten::vdot(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & vdot_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor einsum(c10::string_view equation, TensorList tensors, OptionalIntArrayRef path); // {"schema": "aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor embedding(const Tensor & weight, const Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse); // {"schema": "aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor embedding_backward(const Tensor & grad, const Tensor & indices, c10::SymInt num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse); // {"schema": "aten::embedding_backward(Tensor grad, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor", "dispatch": "False", "default": "True"} +Tensor embedding_dense_backward(const Tensor & grad_output, const Tensor & indices, c10::SymInt num_weights, int64_t padding_idx, bool scale_grad_by_freq); // {"schema": "aten::embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & embedding_renorm_(Tensor & self, const Tensor & indices, double max_norm, double norm_type); // {"schema": "aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor embedding_sparse_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); // {"schema": "aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _embedding_bag_forward_only(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx); // {"schema": "aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _rowwise_prune(const Tensor & weight, const Tensor & mask, ScalarType compressed_indices_dtype); // {"schema": "aten::_rowwise_prune(Tensor weight, Tensor mask, ScalarType compressed_indices_dtype) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor row_stack(TensorList tensors); // {"schema": "aten::row_stack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & row_stack_out(TensorList tensors, Tensor & out); // {"schema": "aten::row_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset); // {"schema": "aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, c10::optional padding_idx); // {"schema": "aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple _embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx); // {"schema": "aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _embedding_bag_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, int64_t padding_idx); // {"schema": "aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _embedding_bag_sparse_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx); // {"schema": "aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _embedding_bag_dense_backward(const Tensor & grad, const Tensor & indices, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx); // {"schema": "aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _embedding_bag_per_sample_weights_backward(const Tensor & grad, const Tensor & weight, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, int64_t mode, int64_t padding_idx); // {"schema": "aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor empty(IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor empty(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor new_empty(const Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_empty_strided(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_full(const Tensor & self, c10::SymIntArrayRef size, const Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_zeros(const Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor new_ones(const Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _empty_affine_quantized(IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format); // {"schema": "aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, int64_t axis, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor", "dispatch": "True", "default": "False"} +const Tensor & resize_(const Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format); // {"schema": "aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +const Tensor & _resize_output_(const Tensor & self, IntArrayRef size, Device device); // {"schema": "aten::_resize_output_(Tensor(a!) self, int[] size, Device device) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor empty_quantized(IntArrayRef size, const Tensor & qtensor, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::empty_quantized(int[] size, Tensor qtensor, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & empty_out(c10::SymIntArrayRef size, c10::optional memory_format, Tensor & out); // {"schema": "aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor empty_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor empty_strided(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor erf(const Tensor & self); // {"schema": "aten::erf(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & erf_(Tensor & self); // {"schema": "aten::erf_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & erf_out(const Tensor & self, Tensor & out); // {"schema": "aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor erfc(const Tensor & self); // {"schema": "aten::erfc(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & erfc_(Tensor & self); // {"schema": "aten::erfc_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & erfc_out(const Tensor & self, Tensor & out); // {"schema": "aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor exp(const Tensor & self); // {"schema": "aten::exp(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & exp_(Tensor & self); // {"schema": "aten::exp_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & exp_out(const Tensor & self, Tensor & out); // {"schema": "aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor exp2(const Tensor & self); // {"schema": "aten::exp2(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & exp2_(Tensor & self); // {"schema": "aten::exp2_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & exp2_out(const Tensor & self, Tensor & out); // {"schema": "aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor expm1(const Tensor & self); // {"schema": "aten::expm1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & expm1_(Tensor & self); // {"schema": "aten::expm1_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & expm1_out(const Tensor & self, Tensor & out); // {"schema": "aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor expand(const Tensor & self, c10::SymIntArrayRef size, bool implicit); // {"schema": "aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor expand_as(const Tensor & self, const Tensor & other); // {"schema": "aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor eye(int64_t n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor eye(int64_t n, int64_t m, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & eye_out(int64_t n, Tensor & out); // {"schema": "aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & eye_out(int64_t n, int64_t m, Tensor & out); // {"schema": "aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor flatten(const Tensor & self, int64_t start_dim, int64_t end_dim); // {"schema": "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor flatten(const Tensor & self, int64_t start_dim, int64_t end_dim, Dimname out_dim); // {"schema": "aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor flatten(const Tensor & self, Dimname start_dim, Dimname end_dim, Dimname out_dim); // {"schema": "aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor flatten(const Tensor & self, DimnameList dims, Dimname out_dim); // {"schema": "aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor unflatten(const Tensor & self, int64_t dim, IntArrayRef sizes); // {"schema": "aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor unflatten(const Tensor & self, Dimname dim, IntArrayRef sizes, DimnameList names); // {"schema": "aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor fill(const Tensor & self, const Scalar & value); // {"schema": "aten::fill.Scalar(Tensor self, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor fill(const Tensor & self, const Tensor & value); // {"schema": "aten::fill.Tensor(Tensor self, Tensor value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fill_(Tensor & self, const Scalar & value); // {"schema": "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & fill_(Tensor & self, const Tensor & value); // {"schema": "aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor floor(const Tensor & self); // {"schema": "aten::floor(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & floor_(Tensor & self); // {"schema": "aten::floor_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & floor_out(const Tensor & self, Tensor & out); // {"schema": "aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor floor_divide(const Tensor & self, const Tensor & other); // {"schema": "aten::floor_divide(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & floor_divide_(Tensor & self, const Tensor & other); // {"schema": "aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & floor_divide_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor floor_divide(const Tensor & self, const Scalar & other); // {"schema": "aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & floor_divide_(Tensor & self, const Scalar & other); // {"schema": "aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor frac(const Tensor & self); // {"schema": "aten::frac(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & frac_(Tensor & self); // {"schema": "aten::frac_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & frac_out(const Tensor & self, Tensor & out); // {"schema": "aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor full(IntArrayRef size, const Scalar & fill_value, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor full(c10::SymIntArrayRef size, const Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & full_out(c10::SymIntArrayRef size, const Scalar & fill_value, Tensor & out); // {"schema": "aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor full_like(const Tensor & self, const Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor from_file(c10::string_view filename, c10::optional shared, c10::optional size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & gcd_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gcd(const Tensor & self, const Tensor & other); // {"schema": "aten::gcd(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & gcd_(Tensor & self, const Tensor & other); // {"schema": "aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lcm_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lcm(const Tensor & self, const Tensor & other); // {"schema": "aten::lcm(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & lcm_(Tensor & self, const Tensor & other); // {"schema": "aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor grid_sampler(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", "dispatch": "False", "default": "True"} +Tensor grid_sampler_2d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple grid_sampler_2d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); // {"schema": "aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _grid_sampler_2d_cpu_fallback(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple _grid_sampler_2d_cpu_fallback_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor grid_sampler_3d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); // {"schema": "aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple grid_sampler_3d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask); // {"schema": "aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor hann_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hann_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hamming_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hamming_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hamming_window(int64_t window_length, bool periodic, double alpha, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor kaiser_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor kaiser_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor kaiser_window(int64_t window_length, bool periodic, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor hinge_embedding_loss(const Tensor & self, const Tensor & target, double margin, int64_t reduction); // {"schema": "aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor group_norm(const Tensor & input, int64_t num_groups, const c10::optional & weight, const c10::optional & bias, double eps, bool cudnn_enabled); // {"schema": "aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple native_group_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps); // {"schema": "aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "True"} +::std::tuple native_group_norm_backward(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask); // {"schema": "aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _fft_r2c(const Tensor & self, IntArrayRef dim, int64_t normalization, bool onesided); // {"schema": "aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _fft_r2c_out(const Tensor & self, IntArrayRef dim, int64_t normalization, bool onesided, Tensor & out); // {"schema": "aten::_fft_r2c.out(Tensor self, int[] dim, int normalization, bool onesided, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _fft_c2r(const Tensor & self, IntArrayRef dim, int64_t normalization, int64_t last_dim_size); // {"schema": "aten::_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _fft_c2r_out(const Tensor & self, IntArrayRef dim, int64_t normalization, int64_t last_dim_size, Tensor & out); // {"schema": "aten::_fft_c2r.out(Tensor self, int[] dim, int normalization, int last_dim_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _fft_c2c(const Tensor & self, IntArrayRef dim, int64_t normalization, bool forward); // {"schema": "aten::_fft_c2c(Tensor self, int[] dim, int normalization, bool forward) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _fft_c2c_out(const Tensor & self, IntArrayRef dim, int64_t normalization, bool forward, Tensor & out); // {"schema": "aten::_fft_c2c.out(Tensor self, int[] dim, int normalization, bool forward, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +void _validate_compressed_sparse_indices(bool is_crow, const Tensor & compressed_idx, const Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz); // {"schema": "aten::_validate_compressed_sparse_indices(bool is_crow, Tensor compressed_idx, Tensor plain_idx, int cdim, int dim, int nnz) -> ()", "dispatch": "True", "default": "False"} +int64_t _cufft_get_plan_cache_size(int64_t device_index); // {"schema": "aten::_cufft_get_plan_cache_size(int device_index) -> int", "dispatch": "False", "default": "True"} +int64_t _cufft_get_plan_cache_max_size(int64_t device_index); // {"schema": "aten::_cufft_get_plan_cache_max_size(int device_index) -> int", "dispatch": "False", "default": "True"} +void _cufft_set_plan_cache_max_size(int64_t device_index, int64_t max_size); // {"schema": "aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> ()", "dispatch": "False", "default": "True"} +void _cufft_clear_plan_cache(int64_t device_index); // {"schema": "aten::_cufft_clear_plan_cache(int device_index) -> ()", "dispatch": "False", "default": "True"} +Tensor index(const Tensor & self, const c10::List> & indices); // {"schema": "aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_out(const Tensor & self, const c10::List> & indices, Tensor & out); // {"schema": "aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & index_copy_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, Tensor & out); // {"schema": "aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); // {"schema": "aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor index_copy(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); // {"schema": "aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_copy_(Tensor & self, Dimname dim, const Tensor & index, const Tensor & source); // {"schema": "aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor index_copy(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source); // {"schema": "aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & index_put_(Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate); // {"schema": "aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor index_put(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate); // {"schema": "aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _index_put_impl_(Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate, bool unsafe); // {"schema": "aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor instance_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); // {"schema": "aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor", "dispatch": "False", "default": "True"} +Tensor isclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan); // {"schema": "aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & isin_out(const Tensor & elements, const Tensor & test_elements, bool assume_unique, bool invert, Tensor & out); // {"schema": "aten::isin.Tensor_Tensor_out(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor isin(const Tensor & elements, const Tensor & test_elements, bool assume_unique, bool invert); // {"schema": "aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isin_out(const Tensor & elements, const Scalar & test_element, bool assume_unique, bool invert, Tensor & out); // {"schema": "aten::isin.Tensor_Scalar_out(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor isin(const Tensor & elements, const Scalar & test_element, bool assume_unique, bool invert); // {"schema": "aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isin_out(const Scalar & element, const Tensor & test_elements, bool assume_unique, bool invert, Tensor & out); // {"schema": "aten::isin.Scalar_Tensor_out(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor isin(const Scalar & element, const Tensor & test_elements, bool assume_unique, bool invert); // {"schema": "aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor isnan(const Tensor & self); // {"schema": "aten::isnan(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +bool is_distributed(const Tensor & self); // {"schema": "aten::is_distributed(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_floating_point(const Tensor & self); // {"schema": "aten::is_floating_point(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_complex(const Tensor & self); // {"schema": "aten::is_complex(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_conj(const Tensor & self); // {"schema": "aten::is_conj(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool _is_zerotensor(const Tensor & self); // {"schema": "aten::_is_zerotensor(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_neg(const Tensor & self); // {"schema": "aten::is_neg(Tensor self) -> bool", "dispatch": "False", "default": "True"} +Tensor isreal(const Tensor & self); // {"schema": "aten::isreal(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +bool is_nonzero(const Tensor & self); // {"schema": "aten::is_nonzero(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_same_size(const Tensor & self, const Tensor & other); // {"schema": "aten::is_same_size(Tensor self, Tensor other) -> bool", "dispatch": "True", "default": "True"} +bool is_signed(const Tensor & self); // {"schema": "aten::is_signed(Tensor self) -> bool", "dispatch": "False", "default": "True"} +bool is_inference(const Tensor & self); // {"schema": "aten::is_inference(Tensor self) -> bool", "dispatch": "False", "default": "True"} +Tensor kl_div(const Tensor & self, const Tensor & target, int64_t reduction, bool log_target); // {"schema": "aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor kron(const Tensor & self, const Tensor & other); // {"schema": "aten::kron(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & kron_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::kron.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim); // {"schema": "aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple kthvalue_out(const Tensor & self, int64_t k, int64_t dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple kthvalue(const Tensor & self, int64_t k, Dimname dim, bool keepdim); // {"schema": "aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple kthvalue_out(const Tensor & self, int64_t k, Dimname dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor layer_norm(const Tensor & input, IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps, bool cudnn_enable); // {"schema": "aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple native_layer_norm(const Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps); // {"schema": "aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "True"} +::std::tuple native_layer_norm_backward(const Tensor & grad_out, const Tensor & input, c10::SymIntArrayRef normalized_shape, const Tensor & mean, const Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask); // {"schema": "aten::native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor nan_to_num(const Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf); // {"schema": "aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & nan_to_num_(Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf); // {"schema": "aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & nan_to_num_out(const Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf, Tensor & out); // {"schema": "aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor linear(const Tensor & input, const Tensor & weight, const c10::optional & bias); // {"schema": "aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple linear_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, ::std::array output_mask); // {"schema": "aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & linear_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, Tensor & out); // {"schema": "aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor mkldnn_linear(const Tensor & self, const Tensor & weight, const c10::optional & bias); // {"schema": "aten::mkldnn_linear(Tensor self, Tensor weight, Tensor? bias=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_linear_backward_input(IntArrayRef input_size, const Tensor & grad_output, const Tensor & weight); // {"schema": "aten::mkldnn_linear_backward_input(int[] input_size, Tensor grad_output, Tensor weight) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple mkldnn_linear_backward_weights(const Tensor & grad_output, const Tensor & input, const Tensor & weight, bool bias_defined); // {"schema": "aten::mkldnn_linear_backward_weights(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple mkldnn_linear_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, ::std::array output_mask); // {"schema": "aten::mkldnn_linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor fbgemm_linear_int8_weight_fp32_activation(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, const Scalar & weight_scale, const Scalar & weight_zero_point, const Tensor & bias); // {"schema": "aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_linear_int8_weight(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, const Scalar & weight_scale, const Scalar & weight_zero_point, const Tensor & bias); // {"schema": "aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple fbgemm_linear_quantize_weight(const Tensor & input); // {"schema": "aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int)", "dispatch": "False", "default": "True"} +Tensor fbgemm_pack_gemm_matrix_fp16(const Tensor & input); // {"schema": "aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_linear_fp16_weight_fp32_activation(const Tensor & input, const Tensor & packed_weight, const Tensor & bias); // {"schema": "aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_linear_fp16_weight(const Tensor & input, const Tensor & packed_weight, const Tensor & bias); // {"schema": "aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_pack_quantized_matrix(const Tensor & input); // {"schema": "aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fbgemm_pack_quantized_matrix(const Tensor & input, int64_t K, int64_t N); // {"schema": "aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor", "dispatch": "False", "default": "True"} +Tensor ldexp(const Tensor & self, const Tensor & other); // {"schema": "aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & ldexp_(Tensor & self, const Tensor & other); // {"schema": "aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & ldexp_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::ldexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linspace(const Scalar & start, const Scalar & end, int64_t steps, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linspace_out(const Scalar & start, const Scalar & end, int64_t steps, Tensor & out); // {"schema": "aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log(const Tensor & self); // {"schema": "aten::log(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log_(Tensor & self); // {"schema": "aten::log_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log_out(const Tensor & self, Tensor & out); // {"schema": "aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log10(const Tensor & self); // {"schema": "aten::log10(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log10_(Tensor & self); // {"schema": "aten::log10_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log10_out(const Tensor & self, Tensor & out); // {"schema": "aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log1p(const Tensor & self); // {"schema": "aten::log1p(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log1p_(Tensor & self); // {"schema": "aten::log1p_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log1p_out(const Tensor & self, Tensor & out); // {"schema": "aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log2(const Tensor & self); // {"schema": "aten::log2(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log2_(Tensor & self); // {"schema": "aten::log2_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log2_out(const Tensor & self, Tensor & out); // {"schema": "aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & logaddexp_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logaddexp(const Tensor & self, const Tensor & other); // {"schema": "aten::logaddexp(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logaddexp2_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logaddexp2(const Tensor & self, const Tensor & other); // {"schema": "aten::logaddexp2(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor xlogy(const Tensor & self, const Tensor & other); // {"schema": "aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor xlogy(const Scalar & self, const Tensor & other); // {"schema": "aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor xlogy(const Tensor & self, const Scalar & other); // {"schema": "aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & xlogy_(Tensor & self, const Tensor & other); // {"schema": "aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & xlogy_(Tensor & self, const Scalar & other); // {"schema": "aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & xlogy_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::xlogy.OutTensor(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & xlogy_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::xlogy.OutScalar_Self(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & xlogy_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::xlogy.OutScalar_Other(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor logspace(const Scalar & start, const Scalar & end, int64_t steps, double base, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logspace_out(const Scalar & start, const Scalar & end, int64_t steps, double base, Tensor & out); // {"schema": "aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & log_softmax_out(const Tensor & self, int64_t dim, c10::optional dtype, Tensor & out); // {"schema": "aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor log_softmax(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _log_softmax(const Tensor & self, int64_t dim, bool half_to_float); // {"schema": "aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _log_softmax_out(const Tensor & self, int64_t dim, bool half_to_float, Tensor & out); // {"schema": "aten::_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _log_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, ScalarType input_dtype); // {"schema": "aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _log_softmax_backward_data_out(const Tensor & grad_output, const Tensor & output, int64_t dim, ScalarType input_dtype, Tensor & out); // {"schema": "aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _logcumsumexp(const Tensor & self, int64_t dim); // {"schema": "aten::_logcumsumexp(Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _logcumsumexp_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logcumsumexp(const Tensor & self, int64_t dim); // {"schema": "aten::logcumsumexp(Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logcumsumexp_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor logcumsumexp(const Tensor & self, Dimname dim); // {"schema": "aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & logcumsumexp_out(const Tensor & self, Dimname dim, Tensor & out); // {"schema": "aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor logsumexp(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logsumexp_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor logsumexp(const Tensor & self, DimnameList dim, bool keepdim); // {"schema": "aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & logsumexp_out(const Tensor & self, DimnameList dim, bool keepdim, Tensor & out); // {"schema": "aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor margin_ranking_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction); // {"schema": "aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor matmul(const Tensor & self, const Tensor & other); // {"schema": "aten::matmul(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple matmul_backward(const Tensor & grad, const Tensor & self, const Tensor & other, ::std::array mask); // {"schema": "aten::matmul_backward(Tensor grad, Tensor self, Tensor other, bool[2] mask) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & matmul_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor matrix_power(const Tensor & self, int64_t n); // {"schema": "aten::matrix_power(Tensor self, int n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & matrix_power_out(const Tensor & self, int64_t n, Tensor & out); // {"schema": "aten::matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor matrix_exp(const Tensor & self); // {"schema": "aten::matrix_exp(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor matrix_exp_backward(const Tensor & self, const Tensor & grad); // {"schema": "aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _aminmax(const Tensor & self); // {"schema": "aten::_aminmax(Tensor self) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _aminmax(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple aminmax(const Tensor & self, c10::optional dim, bool keepdim); // {"schema": "aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)", "dispatch": "True", "default": "True"} +::std::tuple aminmax_out(const Tensor & self, c10::optional dim, bool keepdim, Tensor & min, Tensor & max); // {"schema": "aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)", "dispatch": "True", "default": "False"} +Tensor _compute_linear_combination(const Tensor & input, const Tensor & coefficients); // {"schema": "aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & _compute_linear_combination_out(const Tensor & input, const Tensor & coefficients, Tensor & out); // {"schema": "aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple max(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple max_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & max, Tensor & max_values); // {"schema": "aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple max(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple max_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & max, Tensor & max_values); // {"schema": "aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor value_selecting_reduction_backward(const Tensor & grad, int64_t dim, const Tensor & indices, IntArrayRef sizes, bool keepdim); // {"schema": "aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, int[] sizes, bool keepdim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor amax(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & amax_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple max_pool1d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor max_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _mps_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::_mps_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mps_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mps_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_max_pool2d_backward(const Tensor & grad_output, const Tensor & output, const Tensor & input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mkldnn_max_pool2d_backward(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_max_pool3d_backward(const Tensor & grad_output, const Tensor & output, const Tensor & input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantized_max_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantized_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor mean(const Tensor & self, c10::optional dtype); // {"schema": "aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor mean(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mean_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mean(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype); // {"schema": "aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & mean_out(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nanmean(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nanmean_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::nanmean.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor median(const Tensor & self); // {"schema": "aten::median(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple median(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple median_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple median(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple median_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor nanmedian(const Tensor & self); // {"schema": "aten::nanmedian(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple nanmedian(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple nanmedian_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple nanmedian(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple nanmedian_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::nanmedian.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +::std::tuple min(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple min_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & min, Tensor & min_indices); // {"schema": "aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple min(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple min_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & min, Tensor & min_indices); // {"schema": "aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor amin(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & amin_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _mps_convolution(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups); // {"schema": "aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple mps_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, ::std::array output_mask); // {"schema": "aten::mps_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor mkldnn_convolution(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups); // {"schema": "aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple miopen_batch_norm(const Tensor & input, const Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); // {"schema": "aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple miopen_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon); // {"schema": "aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor miopen_convolution(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); // {"schema": "aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor", "dispatch": "True", "default": "False"} +Tensor miopen_convolution_transpose(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); // {"schema": "aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor", "dispatch": "True", "default": "False"} +Tensor miopen_depthwise_convolution(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); // {"schema": "aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor", "dispatch": "True", "default": "False"} +Tensor miopen_convolution_relu(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups); // {"schema": "aten::miopen_convolution_relu(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor", "dispatch": "True", "default": "False"} +Tensor miopen_convolution_add_relu(const Tensor & self, const Tensor & weight, const Tensor & z, const c10::optional & alpha, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups); // {"schema": "aten::miopen_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple miopen_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional & dropout_state); // {"schema": "aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple> miopen_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional & cx, const Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional & dropout_state, const Tensor & reserve, ::std::array output_mask); // {"schema": "aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])", "dispatch": "True", "default": "False"} +Tensor mm(const Tensor & self, const Tensor & mat2); // {"schema": "aten::mm(Tensor self, Tensor mat2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mm_out(const Tensor & self, const Tensor & mat2, Tensor & out); // {"schema": "aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _sparse_mm(const Tensor & sparse, const Tensor & dense); // {"schema": "aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_sparse_matmul(const Tensor & self, const Tensor & other); // {"schema": "aten::_sparse_sparse_matmul(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_mask_helper(const Tensor & t, const Tensor & mask_indices); // {"schema": "aten::_sparse_mask_helper(Tensor t, Tensor mask_indices) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple mode(const Tensor & self, int64_t dim, bool keepdim); // {"schema": "aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "False"} +::std::tuple mode_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "True"} +::std::tuple mode(const Tensor & self, Dimname dim, bool keepdim); // {"schema": "aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple mode_out(const Tensor & self, Dimname dim, bool keepdim, Tensor & values, Tensor & indices); // {"schema": "aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +Tensor mul(const Tensor & self, const Tensor & other); // {"schema": "aten::mul.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mul_(Tensor & self, const Tensor & other); // {"schema": "aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mul_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mul(const Tensor & self, const Scalar & other); // {"schema": "aten::mul.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mul_(Tensor & self, const Scalar & other); // {"schema": "aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor multiply(const Tensor & self, const Tensor & other); // {"schema": "aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & multiply_(Tensor & self, const Tensor & other); // {"schema": "aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & multiply_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor multiply(const Tensor & self, const Scalar & other); // {"schema": "aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & multiply_(Tensor & self, const Scalar & other); // {"schema": "aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor mv(const Tensor & self, const Tensor & vec); // {"schema": "aten::mv(Tensor self, Tensor vec) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mv_out(const Tensor & self, const Tensor & vec, Tensor & out); // {"schema": "aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mvlgamma_out(const Tensor & self, int64_t p, Tensor & out); // {"schema": "aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mvlgamma(const Tensor & self, int64_t p); // {"schema": "aten::mvlgamma(Tensor self, int p) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mvlgamma_(Tensor & self, int64_t p); // {"schema": "aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor narrow_copy(const Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); // {"schema": "aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & narrow_copy_out(const Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length, Tensor & out); // {"schema": "aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor narrow(const Tensor & self, int64_t dim, int64_t start, int64_t length); // {"schema": "aten::narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor narrow(const Tensor & self, int64_t dim, const Tensor & start, int64_t length); // {"schema": "aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, int length) -> Tensor(a)", "dispatch": "False", "default": "True"} +::std::tuple native_batch_norm(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps); // {"schema": "aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple native_batch_norm_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double momentum, double eps, Tensor & out, Tensor & save_mean, Tensor & save_invstd); // {"schema": "aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_stats(const Tensor & input, double eps); // {"schema": "aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor batch_norm_elemt(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & mean, const Tensor & invstd, double eps); // {"schema": "aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & batch_norm_elemt_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & mean, const Tensor & invstd, double eps, Tensor & out); // {"schema": "aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_gather_stats(const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count); // {"schema": "aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_gather_stats_with_counts(const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const Tensor & counts); // {"schema": "aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple native_batch_norm_backward(const Tensor & grad_out, const Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask); // {"schema": "aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_backward_reduce(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g); // {"schema": "aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor batch_norm_backward_elemt(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & weight, const Tensor & mean_dy, const Tensor & mean_dy_xmu, const Tensor & count); // {"schema": "aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple batch_norm_update_stats(const Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum); // {"schema": "aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +bool is_vulkan_available(); // {"schema": "aten::is_vulkan_available() -> bool", "dispatch": "False", "default": "True"} +bool _nnpack_available(); // {"schema": "aten::_nnpack_available() -> bool", "dispatch": "False", "default": "True"} +Tensor _nnpack_spatial_convolution(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef padding, IntArrayRef stride); // {"schema": "aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor ones(IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor ones(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ones_out(c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor ones_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor pairwise_distance(const Tensor & x1, const Tensor & x2, double p, double eps, bool keepdim); // {"schema": "aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor cdist(const Tensor & x1, const Tensor & x2, double p, c10::optional compute_mode); // {"schema": "aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _euclidean_dist(const Tensor & x1, const Tensor & x2); // {"schema": "aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _cdist_forward(const Tensor & x1, const Tensor & x2, double p, c10::optional compute_mode); // {"schema": "aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _cdist_backward(const Tensor & grad, const Tensor & x1, const Tensor & x2, double p, const Tensor & cdist); // {"schema": "aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor", "dispatch": "True", "default": "False"} +Tensor pdist(const Tensor & self, double p); // {"schema": "aten::pdist(Tensor self, float p=2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _pdist_forward(const Tensor & self, double p); // {"schema": "aten::_pdist_forward(Tensor self, float p=2) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _pdist_backward(const Tensor & grad, const Tensor & self, double p, const Tensor & pdist); // {"schema": "aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cosine_similarity(const Tensor & x1, const Tensor & x2, int64_t dim, double eps); // {"schema": "aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor", "dispatch": "False", "default": "True"} +Tensor permute(const Tensor & self, IntArrayRef dims); // {"schema": "aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor movedim(const Tensor & self, IntArrayRef source, IntArrayRef destination); // {"schema": "aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor movedim(const Tensor & self, int64_t source, int64_t destination); // {"schema": "aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor moveaxis(const Tensor & self, IntArrayRef source, IntArrayRef destination); // {"schema": "aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor moveaxis(const Tensor & self, int64_t source, int64_t destination); // {"schema": "aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor numpy_T(const Tensor & self); // {"schema": "aten::numpy_T(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor matrix_H(const Tensor & self); // {"schema": "aten::matrix_H(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor mT(const Tensor & self); // {"schema": "aten::mT(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor mH(const Tensor & self); // {"schema": "aten::mH(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor adjoint(const Tensor & self); // {"schema": "aten::adjoint(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor pixel_shuffle(const Tensor & self, int64_t upscale_factor); // {"schema": "aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor", "dispatch": "True", "default": "True"} +Tensor pixel_unshuffle(const Tensor & self, int64_t downscale_factor); // {"schema": "aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor", "dispatch": "True", "default": "True"} +Tensor channel_shuffle(const Tensor & self, int64_t groups); // {"schema": "aten::channel_shuffle(Tensor self, int groups) -> Tensor", "dispatch": "True", "default": "False"} +Tensor native_channel_shuffle(const Tensor & self, int64_t groups); // {"schema": "aten::native_channel_shuffle(Tensor self, int groups) -> Tensor", "dispatch": "True", "default": "True"} +bool is_pinned(const Tensor & self, c10::optional device); // {"schema": "aten::is_pinned(Tensor self, Device? device=None) -> bool", "dispatch": "True", "default": "True"} +Tensor pin_memory(const Tensor & self, c10::optional device); // {"schema": "aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _pin_memory(const Tensor & self, c10::optional device); // {"schema": "aten::_pin_memory(Tensor self, Device? device=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor pinverse(const Tensor & self, double rcond); // {"schema": "aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor", "dispatch": "False", "default": "True"} +Tensor poisson_nll_loss(const Tensor & input, const Tensor & target, bool log_input, bool full, double eps, int64_t reduction); // {"schema": "aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor", "dispatch": "False", "default": "True"} +Tensor rad2deg(const Tensor & self); // {"schema": "aten::rad2deg(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & rad2deg_(Tensor & self); // {"schema": "aten::rad2deg_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rad2deg_out(const Tensor & self, Tensor & out); // {"schema": "aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor deg2rad(const Tensor & self); // {"schema": "aten::deg2rad(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & deg2rad_(Tensor & self); // {"schema": "aten::deg2rad_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & deg2rad_out(const Tensor & self, Tensor & out); // {"schema": "aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor scalar_tensor(const Scalar & s, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rand(IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rand(IntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rand(IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rand(IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & rand_out(IntArrayRef size, Tensor & out); // {"schema": "aten::rand.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rand_out(IntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor rand_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint(int64_t high, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randint(int high, int[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint(int64_t high, IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint(int64_t low, int64_t high, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randint.low(int low, int high, int[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint(int64_t low, int64_t high, IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & randint_out(int64_t high, IntArrayRef size, Tensor & out); // {"schema": "aten::randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_out(int64_t high, IntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_out(int64_t low, int64_t high, IntArrayRef size, Tensor & out); // {"schema": "aten::randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_out(int64_t low, int64_t high, IntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor randint_like(const Tensor & self, int64_t high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randint_like(const Tensor & self, int64_t low, int64_t high, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randn(IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randn(IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randn(IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randn(IntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & randn_out(IntArrayRef size, Tensor & out); // {"schema": "aten::randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & randn_out(IntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor randn_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randperm(int64_t n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randperm(int n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor randperm(int64_t n, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & randperm_out(int64_t n, Tensor & out); // {"schema": "aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randperm_out(int64_t n, c10::optional generator, Tensor & out); // {"schema": "aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor range(const Scalar & start, const Scalar & end, const Scalar & step, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor range(const Scalar & start, const Scalar & end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & range_out(const Scalar & start, const Scalar & end, Tensor & out); // {"schema": "aten::range.out_(Scalar start, Scalar end, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & range_out(const Scalar & start, const Scalar & end, const Scalar & step, Tensor & out); // {"schema": "aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ravel(const Tensor & self); // {"schema": "aten::ravel(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor reciprocal(const Tensor & self); // {"schema": "aten::reciprocal(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & reciprocal_(Tensor & self); // {"schema": "aten::reciprocal_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & reciprocal_out(const Tensor & self, Tensor & out); // {"schema": "aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor neg(const Tensor & self); // {"schema": "aten::neg(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & neg_(Tensor & self); // {"schema": "aten::neg_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & neg_out(const Tensor & self, Tensor & out); // {"schema": "aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor negative(const Tensor & self); // {"schema": "aten::negative(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & negative_(Tensor & self); // {"schema": "aten::negative_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & negative_out(const Tensor & self, Tensor & out); // {"schema": "aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor repeat(const Tensor & self, c10::SymIntArrayRef repeats); // {"schema": "aten::repeat(Tensor self, SymInt[] repeats) -> Tensor", "dispatch": "True", "default": "True"} +Tensor repeat_interleave(const Tensor & repeats, c10::optional output_size); // {"schema": "aten::repeat_interleave.Tensor(Tensor repeats, *, int? output_size=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor repeat_interleave(const Tensor & self, const Tensor & repeats, c10::optional dim, c10::optional output_size); // {"schema": "aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor repeat_interleave(const Tensor & self, int64_t repeats, c10::optional dim, c10::optional output_size); // {"schema": "aten::repeat_interleave.self_int(Tensor self, int repeats, int? dim=None, *, int? output_size=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor reshape(const Tensor & self, c10::SymIntArrayRef shape); // {"schema": "aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _reshape_alias(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _mkldnn_reshape(const Tensor & self, IntArrayRef shape); // {"schema": "aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor", "dispatch": "True", "default": "False"} +Tensor reshape_as(const Tensor & self, const Tensor & other); // {"schema": "aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor round(const Tensor & self); // {"schema": "aten::round(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & round_(Tensor & self); // {"schema": "aten::round_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & round_out(const Tensor & self, Tensor & out); // {"schema": "aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor round(const Tensor & self, int64_t decimals); // {"schema": "aten::round.decimals(Tensor self, *, int decimals) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & round_(Tensor & self, int64_t decimals); // {"schema": "aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & round_out(const Tensor & self, int64_t decimals, Tensor & out); // {"schema": "aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor rrelu(const Tensor & self, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator); // {"schema": "aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & rrelu_(Tensor & self, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator); // {"schema": "aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor relu(const Tensor & self); // {"schema": "aten::relu(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & relu_(Tensor & self); // {"schema": "aten::relu_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor relu6(const Tensor & self); // {"schema": "aten::relu6(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & relu6_(Tensor & self); // {"schema": "aten::relu6_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor prelu(const Tensor & self, const Tensor & weight); // {"schema": "aten::prelu(Tensor self, Tensor weight) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple prelu_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight); // {"schema": "aten::prelu_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & gelu_out(const Tensor & self, c10::string_view approximate, Tensor & out); // {"schema": "aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & gelu_(Tensor & self, c10::string_view approximate); // {"schema": "aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor gelu(const Tensor & self, c10::string_view approximate); // {"schema": "aten::gelu(Tensor self, *, str approximate='none') -> Tensor", "dispatch": "True", "default": "True"} +Tensor & gelu_backward_out(const Tensor & grad_output, const Tensor & self, c10::string_view approximate, Tensor & grad_input); // {"schema": "aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gelu_backward(const Tensor & grad_output, const Tensor & self, c10::string_view approximate); // {"schema": "aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor", "dispatch": "True", "default": "True"} +Tensor infinitely_differentiable_gelu_backward(const Tensor & grad, const Tensor & self); // {"schema": "aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & hardshrink_out(const Tensor & self, const Scalar & lambd, Tensor & out); // {"schema": "aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardshrink(const Tensor & self, const Scalar & lambd); // {"schema": "aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & hardshrink_backward_out(const Tensor & grad_out, const Tensor & self, const Scalar & lambd, Tensor & grad_input); // {"schema": "aten::hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardshrink_backward(const Tensor & grad_out, const Tensor & self, const Scalar & lambd); // {"schema": "aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor", "dispatch": "True", "default": "True"} +Tensor rsqrt(const Tensor & self); // {"schema": "aten::rsqrt(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & rsqrt_(Tensor & self); // {"schema": "aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rsqrt_out(const Tensor & self, Tensor & out); // {"schema": "aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor select(const Tensor & self, Dimname dim, int64_t index); // {"schema": "aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor select(const Tensor & self, int64_t dim, int64_t index); // {"schema": "aten::select.int(Tensor(a) self, int dim, int index) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor select_backward(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t index); // {"schema": "aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, int index) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _nested_select_backward(const Tensor & grad_output, const Tensor & self, int64_t dim, int64_t index); // {"schema": "aten::_nested_select_backward(Tensor grad_output, Tensor self, int dim, int index) -> Tensor", "dispatch": "True", "default": "False"} +Tensor selu(const Tensor & self); // {"schema": "aten::selu(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & selu_(Tensor & self); // {"schema": "aten::selu_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor celu(const Tensor & self, const Scalar & alpha); // {"schema": "aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & celu_(Tensor & self, const Scalar & alpha); // {"schema": "aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor silu(const Tensor & self); // {"schema": "aten::silu(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & silu_(Tensor & self); // {"schema": "aten::silu_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & silu_out(const Tensor & self, Tensor & out); // {"schema": "aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & silu_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & grad_input); // {"schema": "aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor silu_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor mish(const Tensor & self); // {"schema": "aten::mish(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mish_(Tensor & self); // {"schema": "aten::mish_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mish_out(const Tensor & self, Tensor & out); // {"schema": "aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mish_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::mish_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sigmoid(const Tensor & self); // {"schema": "aten::sigmoid(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sigmoid_(Tensor & self); // {"schema": "aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sigmoid_out(const Tensor & self, Tensor & out); // {"schema": "aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logit(const Tensor & self, c10::optional eps); // {"schema": "aten::logit(Tensor self, float? eps=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & logit_(Tensor & self, c10::optional eps); // {"schema": "aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & logit_out(const Tensor & self, c10::optional eps, Tensor & out); // {"schema": "aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sin(const Tensor & self); // {"schema": "aten::sin(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sin_(Tensor & self); // {"schema": "aten::sin_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sin_out(const Tensor & self, Tensor & out); // {"schema": "aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sinc(const Tensor & self); // {"schema": "aten::sinc(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sinc_(Tensor & self); // {"schema": "aten::sinc_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sinc_out(const Tensor & self, Tensor & out); // {"schema": "aten::sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sinh(const Tensor & self); // {"schema": "aten::sinh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sinh_(Tensor & self); // {"schema": "aten::sinh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sinh_out(const Tensor & self, Tensor & out); // {"schema": "aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor detach(const Tensor & self); // {"schema": "aten::detach(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & detach_(Tensor & self); // {"schema": "aten::detach_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +int64_t size(const Tensor & self, int64_t dim); // {"schema": "aten::size.int(Tensor self, int dim) -> int", "dispatch": "False", "default": "True"} +int64_t size(const Tensor & self, Dimname dim); // {"schema": "aten::size.Dimname(Tensor self, Dimname dim) -> int", "dispatch": "False", "default": "True"} +Tensor slice(const Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); // {"schema": "aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor slice_backward(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step); // {"schema": "aten::slice_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step) -> Tensor", "dispatch": "True", "default": "True"} +Tensor slice_scatter(const Tensor & self, const Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); // {"schema": "aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor select_scatter(const Tensor & self, const Tensor & src, int64_t dim, int64_t index); // {"schema": "aten::select_scatter(Tensor self, Tensor src, int dim, int index) -> Tensor", "dispatch": "True", "default": "True"} +Tensor diagonal_scatter(const Tensor & self, const Tensor & src, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor as_strided_scatter(const Tensor & self, const Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); // {"schema": "aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor smm(const Tensor & self, const Tensor & mat2); // {"schema": "aten::smm(Tensor self, Tensor mat2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & softmax_out(const Tensor & self, int64_t dim, c10::optional dtype, Tensor & out); // {"schema": "aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor softmax(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _softmax(const Tensor & self, int64_t dim, bool half_to_float); // {"schema": "aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _softmax_out(const Tensor & self, int64_t dim, bool half_to_float, Tensor & out); // {"schema": "aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, ScalarType input_dtype); // {"schema": "aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _softmax_backward_data_out(const Tensor & grad_output, const Tensor & output, int64_t dim, ScalarType input_dtype, Tensor & grad_input); // {"schema": "aten::_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::vector unsafe_split(const Tensor & self, int64_t split_size, int64_t dim); // {"schema": "aten::unsafe_split.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +::std::vector split(const Tensor & self, int64_t split_size, int64_t dim); // {"schema": "aten::split.Tensor(Tensor(a -> *) self, int split_size, int dim=0) -> Tensor(a)[]", "dispatch": "True", "default": "True"} +::std::vector split(const Tensor & self, IntArrayRef split_size, int64_t dim); // {"schema": "aten::split.sizes(Tensor(a -> *) self, int[] split_size, int dim=0) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector unsafe_split_with_sizes(const Tensor & self, IntArrayRef split_sizes, int64_t dim); // {"schema": "aten::unsafe_split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +::std::vector split_with_sizes(const Tensor & self, IntArrayRef split_sizes, int64_t dim); // {"schema": "aten::split_with_sizes(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[]", "dispatch": "True", "default": "True"} +::std::vector hsplit(const Tensor & self, int64_t sections); // {"schema": "aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector hsplit(const Tensor & self, IntArrayRef indices); // {"schema": "aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector vsplit(const Tensor & self, int64_t sections); // {"schema": "aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector vsplit(const Tensor & self, IntArrayRef indices); // {"schema": "aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector dsplit(const Tensor & self, int64_t sections); // {"schema": "aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +::std::vector dsplit(const Tensor & self, IntArrayRef indices); // {"schema": "aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +Tensor squeeze(const Tensor & self); // {"schema": "aten::squeeze(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor squeeze(const Tensor & self, int64_t dim); // {"schema": "aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor squeeze(const Tensor & self, Dimname dim); // {"schema": "aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor & squeeze_(Tensor & self); // {"schema": "aten::squeeze_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_(Tensor & self, int64_t dim); // {"schema": "aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_(Tensor & self, Dimname dim); // {"schema": "aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor sspaddmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & sspaddmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor stack(TensorList tensors, int64_t dim); // {"schema": "aten::stack(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & stack_out(TensorList tensors, int64_t dim, Tensor & out); // {"schema": "aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _stack(TensorList tensors, int64_t dim); // {"schema": "aten::_stack(Tensor[] tensors, int dim=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _stack_out(TensorList tensors, int64_t dim, Tensor & out); // {"schema": "aten::_stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor hstack(TensorList tensors); // {"schema": "aten::hstack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & hstack_out(TensorList tensors, Tensor & out); // {"schema": "aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor vstack(TensorList tensors); // {"schema": "aten::vstack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & vstack_out(TensorList tensors, Tensor & out); // {"schema": "aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor dstack(TensorList tensors); // {"schema": "aten::dstack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & dstack_out(TensorList tensors, Tensor & out); // {"schema": "aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor stft(const Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool normalized, c10::optional onesided, c10::optional return_complex); // {"schema": "aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor stft(const Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional onesided, c10::optional return_complex); // {"schema": "aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode=\"reflect\", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor istft(const Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool center, bool normalized, c10::optional onesided, c10::optional length, bool return_complex); // {"schema": "aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor", "dispatch": "False", "default": "True"} +int64_t stride(const Tensor & self, int64_t dim); // {"schema": "aten::stride.int(Tensor self, int dim) -> int", "dispatch": "False", "default": "True"} +int64_t stride(const Tensor & self, Dimname dim); // {"schema": "aten::stride.Dimname(Tensor self, Dimname dim) -> int", "dispatch": "False", "default": "True"} +Tensor sum(const Tensor & self, c10::optional dtype); // {"schema": "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sum(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sum(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype); // {"schema": "aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & sum_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & sum_out(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor _nested_sum_backward(const Tensor & grad, const Tensor & self, OptionalIntArrayRef dim, bool keepdim); // {"schema": "aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor nansum(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & nansum_out(const Tensor & self, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sum_to_size(const Tensor & self, IntArrayRef size); // {"schema": "aten::sum_to_size(Tensor self, int[] size) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sqrt(const Tensor & self); // {"schema": "aten::sqrt(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sqrt_(Tensor & self); // {"schema": "aten::sqrt_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sqrt_out(const Tensor & self, Tensor & out); // {"schema": "aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor square(const Tensor & self); // {"schema": "aten::square(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & square_(Tensor & self); // {"schema": "aten::square_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & square_out(const Tensor & self, Tensor & out); // {"schema": "aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor std(const Tensor & self, bool unbiased); // {"schema": "aten::std(Tensor self, bool unbiased=True) -> Tensor", "dispatch": "False", "default": "True"} +Tensor std(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim); // {"schema": "aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor std(const Tensor & self, OptionalIntArrayRef dim, c10::optional correction, bool keepdim); // {"schema": "aten::std.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple std_mean(const Tensor & self, bool unbiased); // {"schema": "aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple std_mean(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim); // {"schema": "aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple std_mean(const Tensor & self, OptionalIntArrayRef dim, c10::optional correction, bool keepdim); // {"schema": "aten::std_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple std_mean(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); // {"schema": "aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple std_mean(const Tensor & self, DimnameList dim, c10::optional correction, bool keepdim); // {"schema": "aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor & std_out(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim, Tensor & out); // {"schema": "aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & std_out(const Tensor & self, OptionalIntArrayRef dim, c10::optional correction, bool keepdim, Tensor & out); // {"schema": "aten::std.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor std(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); // {"schema": "aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & std_out(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim, Tensor & out); // {"schema": "aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor std(const Tensor & self, DimnameList dim, c10::optional correction, bool keepdim); // {"schema": "aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & std_out(const Tensor & self, DimnameList dim, c10::optional correction, bool keepdim, Tensor & out); // {"schema": "aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor prod(const Tensor & self, c10::optional dtype); // {"schema": "aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor prod(const Tensor & self, int64_t dim, bool keepdim, c10::optional dtype); // {"schema": "aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & prod_out(const Tensor & self, int64_t dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor prod(const Tensor & self, Dimname dim, bool keepdim, c10::optional dtype); // {"schema": "aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & prod_out(const Tensor & self, Dimname dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor t(const Tensor & self); // {"schema": "aten::t(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & t_(Tensor & self); // {"schema": "aten::t_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor tan(const Tensor & self); // {"schema": "aten::tan(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tan_(Tensor & self); // {"schema": "aten::tan_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & tan_out(const Tensor & self, Tensor & out); // {"schema": "aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor tanh(const Tensor & self); // {"schema": "aten::tanh(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tanh_(Tensor & self); // {"schema": "aten::tanh_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & tanh_out(const Tensor & self, Tensor & out); // {"schema": "aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor tensordot(const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other); // {"schema": "aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & tensordot_out(const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other, Tensor & out); // {"schema": "aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor threshold(const Tensor & self, const Scalar & threshold, const Scalar & value); // {"schema": "aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & threshold_(Tensor & self, const Scalar & threshold, const Scalar & value); // {"schema": "aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & threshold_out(const Tensor & self, const Scalar & threshold, const Scalar & value, Tensor & out); // {"schema": "aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & threshold_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & threshold, Tensor & grad_input); // {"schema": "aten::threshold_backward.grad_input(Tensor grad_output, Tensor self, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor threshold_backward(const Tensor & grad_output, const Tensor & self, const Scalar & threshold); // {"schema": "aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor", "dispatch": "True", "default": "True"} +Tensor tile(const Tensor & self, IntArrayRef dims); // {"schema": "aten::tile(Tensor self, int[] dims) -> Tensor", "dispatch": "False", "default": "True"} +Tensor transpose(const Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor transpose(const Tensor & self, Dimname dim0, Dimname dim1); // {"schema": "aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _mkldnn_transpose(const Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & transpose_(Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mkldnn_transpose_(Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor one_hot(const Tensor & self, int64_t num_classes); // {"schema": "aten::one_hot(Tensor self, int num_classes=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor flip(const Tensor & self, IntArrayRef dims); // {"schema": "aten::flip(Tensor self, int[] dims) -> Tensor", "dispatch": "True", "default": "False"} +Tensor fliplr(const Tensor & self); // {"schema": "aten::fliplr(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor flipud(const Tensor & self); // {"schema": "aten::flipud(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor roll(const Tensor & self, IntArrayRef shifts, IntArrayRef dims); // {"schema": "aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor", "dispatch": "True", "default": "False"} +Tensor rot90(const Tensor & self, int64_t k, IntArrayRef dims); // {"schema": "aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor", "dispatch": "True", "default": "True"} +Tensor trapezoid(const Tensor & y, const Tensor & x, int64_t dim); // {"schema": "aten::trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor trapezoid(const Tensor & y, const Scalar & dx, int64_t dim); // {"schema": "aten::trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor trapz(const Tensor & y, const Tensor & x, int64_t dim); // {"schema": "aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor trapz(const Tensor & y, double dx, int64_t dim); // {"schema": "aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _transform_bias_rescale_qkv(const Tensor & qkv, const Tensor & qkv_bias, int64_t num_heads); // {"schema": "aten::_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_from_mask(const Tensor & t, const Tensor & mask, bool mask_check); // {"schema": "aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor", "dispatch": "True", "default": "False"} +bool _nested_tensor_from_mask_left_aligned(const Tensor & t, const Tensor & mask); // {"schema": "aten::_nested_tensor_from_mask_left_aligned(Tensor t, Tensor mask) -> bool", "dispatch": "True", "default": "False"} +Tensor _nested_from_padded(const Tensor & padded, const Tensor & cpu_nested_shape_example, bool fuse_transform_0213); // {"schema": "aten::_nested_from_padded(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_size(const Tensor & self); // {"schema": "aten::_nested_tensor_size(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_strides(const Tensor & self); // {"schema": "aten::_nested_tensor_strides(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector _nested_tensor_offsets(const Tensor & self); // {"schema": "aten::_nested_tensor_offsets(Tensor self) -> int[]", "dispatch": "True", "default": "False"} +Tensor _nested_from_padded_and_nested_example(const Tensor & padded, const Tensor & nt_example); // {"schema": "aten::_nested_from_padded_and_nested_example(Tensor padded, Tensor nt_example) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_view_from_buffer(const Tensor & self, const Tensor & nested_size, const Tensor & nested_strides, IntArrayRef offsets); // {"schema": "aten::_nested_view_from_buffer(Tensor(a) self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _nested_view_from_buffer_copy(const Tensor & self, const Tensor & nested_size, const Tensor & nested_strides, IntArrayRef offsets); // {"schema": "aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _trilinear(const Tensor & i1, const Tensor & i2, const Tensor & i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, IntArrayRef sumdim, int64_t unroll_dim); // {"schema": "aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor triplet_margin_loss(const Tensor & anchor, const Tensor & positive, const Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction); // {"schema": "aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor trunc(const Tensor & self); // {"schema": "aten::trunc(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & trunc_(Tensor & self); // {"schema": "aten::trunc_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & trunc_out(const Tensor & self, Tensor & out); // {"schema": "aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fix(const Tensor & self); // {"schema": "aten::fix(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fix_(Tensor & self); // {"schema": "aten::fix_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & fix_out(const Tensor & self, Tensor & out); // {"schema": "aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor type_as(const Tensor & self, const Tensor & other); // {"schema": "aten::type_as(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +bool _has_compatible_shallow_copy_type(const Tensor & self, const Tensor & from); // {"schema": "aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool", "dispatch": "False", "default": "True"} +::std::tuple _unique(const Tensor & self, bool sorted, bool return_inverse); // {"schema": "aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple unique_dim(const Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts); // {"schema": "aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple unique_consecutive(const Tensor & self, bool return_inverse, bool return_counts, c10::optional dim); // {"schema": "aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple unique_dim_consecutive(const Tensor & self, int64_t dim, bool return_inverse, bool return_counts); // {"schema": "aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _unique2(const Tensor & self, bool sorted, bool return_inverse, bool return_counts); // {"schema": "aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor _unsafe_view(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor", "dispatch": "True", "default": "True"} +Tensor unsqueeze(const Tensor & self, int64_t dim); // {"schema": "aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & unsqueeze_(Tensor & self, int64_t dim); // {"schema": "aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor vander(const Tensor & x, c10::optional N, bool increasing); // {"schema": "aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor var(const Tensor & self, bool unbiased); // {"schema": "aten::var(Tensor self, bool unbiased=True) -> Tensor", "dispatch": "False", "default": "True"} +Tensor var(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim); // {"schema": "aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor var(const Tensor & self, OptionalIntArrayRef dim, c10::optional correction, bool keepdim); // {"schema": "aten::var.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & var_out(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim, Tensor & out); // {"schema": "aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & var_out(const Tensor & self, OptionalIntArrayRef dim, c10::optional correction, bool keepdim, Tensor & out); // {"schema": "aten::var.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor var(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); // {"schema": "aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & var_out(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim, Tensor & out); // {"schema": "aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor var(const Tensor & self, DimnameList dim, c10::optional correction, bool keepdim); // {"schema": "aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & var_out(const Tensor & self, DimnameList dim, c10::optional correction, bool keepdim, Tensor & out); // {"schema": "aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple var_mean(const Tensor & self, bool unbiased); // {"schema": "aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple var_mean(const Tensor & self, OptionalIntArrayRef dim, bool unbiased, bool keepdim); // {"schema": "aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple var_mean(const Tensor & self, OptionalIntArrayRef dim, c10::optional correction, bool keepdim); // {"schema": "aten::var_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple var_mean(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); // {"schema": "aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple var_mean(const Tensor & self, DimnameList dim, c10::optional correction, bool keepdim); // {"schema": "aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor view_as(const Tensor & self, const Tensor & other); // {"schema": "aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor where(const Tensor & condition, const Tensor & self, const Tensor & other); // {"schema": "aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & where_out(const Tensor & condition, const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor where(const Tensor & condition, const Scalar & self, const Tensor & other); // {"schema": "aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor where(const Tensor & condition, const Tensor & self, const Scalar & other); // {"schema": "aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor where(const Tensor & condition, const Scalar & self, const Scalar & other); // {"schema": "aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector where(const Tensor & condition); // {"schema": "aten::where(Tensor condition) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor norm_except_dim(const Tensor & v, int64_t pow, int64_t dim); // {"schema": "aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _weight_norm(const Tensor & v, const Tensor & g, int64_t dim); // {"schema": "aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _weight_norm_interface(const Tensor & v, const Tensor & g, int64_t dim); // {"schema": "aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _weight_norm_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim); // {"schema": "aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _weight_norm_differentiable_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim); // {"schema": "aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor zeros(IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _efficientzerotensor(IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_efficientzerotensor(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor zeros(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & zeros_out(c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor zeros_like(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format); // {"schema": "aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output); // {"schema": "aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _standard_gamma(const Tensor & self, c10::optional generator); // {"schema": "aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total); // {"schema": "aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sample_dirichlet(const Tensor & self, c10::optional generator); // {"schema": "aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor poisson(const Tensor & self, c10::optional generator); // {"schema": "aten::poisson(Tensor self, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor binomial(const Tensor & count, const Tensor & prob, c10::optional generator); // {"schema": "aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor native_norm(const Tensor & self, const Scalar & p); // {"schema": "aten::native_norm(Tensor self, Scalar p=2) -> Tensor", "dispatch": "True", "default": "False"} +Tensor native_norm(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_sum(const Tensor & self); // {"schema": "aten::_sparse_sum(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_sum(const Tensor & self, ScalarType dtype); // {"schema": "aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_sum(const Tensor & self, IntArrayRef dim); // {"schema": "aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _sparse_sum(const Tensor & self, IntArrayRef dim, ScalarType dtype); // {"schema": "aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_sum_backward(const Tensor & grad, const Tensor & self, IntArrayRef dim); // {"schema": "aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_csr_sum(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_csr_prod(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::_sparse_csr_prod.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_softmax(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_softmax(const Tensor & self, int64_t dim, bool half_to_float); // {"schema": "aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self); // {"schema": "aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_log_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_log_softmax(const Tensor & self, Dimname dim, c10::optional dtype); // {"schema": "aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_log_softmax(const Tensor & self, int64_t dim, bool half_to_float); // {"schema": "aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_log_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self); // {"schema": "aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _spdiags(const Tensor & diagonals, const Tensor & offsets, IntArrayRef shape, c10::optional layout); // {"schema": "aten::_spdiags(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor norm(const Tensor & self, const c10::optional & p, ScalarType dtype); // {"schema": "aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor norm(const Tensor & self, const Scalar & p); // {"schema": "aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor norm(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, ScalarType dtype); // {"schema": "aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor norm(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim); // {"schema": "aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, ScalarType dtype, Tensor & out); // {"schema": "aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor norm(const Tensor & self, const c10::optional & p, DimnameList dim, bool keepdim, ScalarType dtype); // {"schema": "aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor", "dispatch": "False", "default": "True"} +Tensor norm(const Tensor & self, const c10::optional & p, DimnameList dim, bool keepdim); // {"schema": "aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, DimnameList dim, bool keepdim, ScalarType dtype, Tensor & out); // {"schema": "aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, DimnameList dim, bool keepdim, Tensor & out); // {"schema": "aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple frexp(const Tensor & self); // {"schema": "aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)", "dispatch": "True", "default": "True"} +::std::tuple frexp_out(const Tensor & self, Tensor & mantissa, Tensor & exponent); // {"schema": "aten::frexp.Tensor_out(Tensor self, *, Tensor(a!) mantissa, Tensor(b!) exponent) -> (Tensor(a!) mantissa, Tensor(b!) exponent)", "dispatch": "True", "default": "False"} +Tensor frobenius_norm(const Tensor & self); // {"schema": "aten::frobenius_norm(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor frobenius_norm(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & frobenius_norm_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nuclear_norm(const Tensor & self, bool keepdim); // {"schema": "aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nuclear_norm_out(const Tensor & self, bool keepdim, Tensor & out); // {"schema": "aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nuclear_norm(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nuclear_norm_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor clone(const Tensor & self, c10::optional memory_format); // {"schema": "aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor positive(const Tensor & self); // {"schema": "aten::positive(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +const Tensor & resize_as_(const Tensor & self, const Tensor & the_template, c10::optional memory_format); // {"schema": "aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!)", "dispatch": "True", "default": "True"} +const Tensor & resize_as_sparse_(const Tensor & self, const Tensor & the_template); // {"schema": "aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & zero_(Tensor & self); // {"schema": "aten::zero_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & sub_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sub(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sub_(Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor sub(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sub_(Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & subtract_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor subtract(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & subtract_(Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor subtract(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & subtract_(Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor rsub(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & heaviside_out(const Tensor & self, const Tensor & values, Tensor & out); // {"schema": "aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor heaviside(const Tensor & self, const Tensor & values); // {"schema": "aten::heaviside(Tensor self, Tensor values) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & heaviside_(Tensor & self, const Tensor & values); // {"schema": "aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor rsub(const Tensor & self, const Scalar & other, const Scalar & alpha); // {"schema": "aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _sparse_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sparse_sampled_addmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::sparse_sampled_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sparse_sampled_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::sparse_sampled_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & addmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _addmm_activation_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, bool use_gelu, Tensor & out); // {"schema": "aten::_addmm_activation.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _addmm_activation(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, bool use_gelu); // {"schema": "aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sparse_compressed_tensor(const Tensor & compressed_indices, const Tensor & plain_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_csr_tensor(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_csr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_csc_tensor(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_bsr_tensor(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_bsc_tensor(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_bsc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_compressed_tensor(const Tensor & compressed_indices, const Tensor & plain_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_csr_tensor(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_csr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_csc_tensor(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_bsr_tensor(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_bsc_tensor(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_bsc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_compressed_tensor_unsafe(const Tensor & compressed_indices, const Tensor & plain_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_compressed_tensor_unsafe(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_csr_tensor_unsafe(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_csr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_csc_tensor_unsafe(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_csc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_bsr_tensor_unsafe(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_bsr_tensor_unsafe(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_bsc_tensor_unsafe(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_coo_tensor(IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _sparse_coo_tensor_unsafe(const Tensor & indices, const Tensor & values, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "False", "default": "True"} +void _validate_sparse_coo_tensor_args(const Tensor & indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_compressed_tensor_args(const Tensor & compressed_indices, const Tensor & plain_indices, const Tensor & values, IntArrayRef size, Layout layout); // {"schema": "aten::_validate_sparse_compressed_tensor_args(Tensor compressed_indices, Tensor plain_indices, Tensor values, int[] size, Layout layout) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_csr_tensor_args(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_csr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_csc_tensor_args(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_bsr_tensor_args(const Tensor & crow_indices, const Tensor & col_indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_bsr_tensor_args(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +void _validate_sparse_bsc_tensor_args(const Tensor & ccol_indices, const Tensor & row_indices, const Tensor & values, IntArrayRef size); // {"schema": "aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()", "dispatch": "False", "default": "True"} +Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const Tensor & indices, const Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor", "dispatch": "True", "default": "False"} +const Tensor & sparse_resize_(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); // {"schema": "aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)", "dispatch": "True", "default": "False"} +const Tensor & sparse_resize_and_clear_(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); // {"schema": "aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sparse_mask(const Tensor & self, const Tensor & mask); // {"schema": "aten::sparse_mask(Tensor self, Tensor mask) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector _to_cpu(TensorList tensors); // {"schema": "aten::_to_cpu(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor to_dense(const Tensor & self, c10::optional dtype); // {"schema": "aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _to_dense(const Tensor & self, c10::optional dtype); // {"schema": "aten::_to_dense(Tensor self, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_dense_backward(const Tensor & grad, const Tensor & input); // {"schema": "aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor", "dispatch": "False", "default": "True"} +int64_t sparse_dim(const Tensor & self); // {"schema": "aten::sparse_dim(Tensor self) -> int", "dispatch": "True", "default": "False"} +int64_t _dimI(const Tensor & self); // {"schema": "aten::_dimI(Tensor self) -> int", "dispatch": "True", "default": "False"} +int64_t dense_dim(const Tensor & self); // {"schema": "aten::dense_dim(Tensor self) -> int", "dispatch": "True", "default": "False"} +int64_t _dimV(const Tensor & self); // {"schema": "aten::_dimV(Tensor self) -> int", "dispatch": "True", "default": "False"} +int64_t _nnz(const Tensor & self); // {"schema": "aten::_nnz(Tensor self) -> int", "dispatch": "True", "default": "False"} +Tensor coalesce(const Tensor & self); // {"schema": "aten::coalesce(Tensor(a) self) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _coalesce(const Tensor & self); // {"schema": "aten::_coalesce(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +bool is_coalesced(const Tensor & self); // {"schema": "aten::is_coalesced(Tensor self) -> bool", "dispatch": "True", "default": "False"} +Tensor _indices(const Tensor & self); // {"schema": "aten::_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor _values(const Tensor & self); // {"schema": "aten::_values(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor & _coalesced_(Tensor & self, bool coalesced); // {"schema": "aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor indices(const Tensor & self); // {"schema": "aten::indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor values(const Tensor & self); // {"schema": "aten::values(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor crow_indices(const Tensor & self); // {"schema": "aten::crow_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor col_indices(const Tensor & self); // {"schema": "aten::col_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor ccol_indices(const Tensor & self); // {"schema": "aten::ccol_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor row_indices(const Tensor & self); // {"schema": "aten::row_indices(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor & hspmm_out(const Tensor & mat1, const Tensor & mat2, Tensor & out); // {"schema": "aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hspmm(const Tensor & mat1, const Tensor & mat2); // {"schema": "aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & copy_sparse_to_sparse_(Tensor & self, const Tensor & src, bool non_blocking); // {"schema": "aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::vector unbind(const Tensor & self, int64_t dim); // {"schema": "aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]", "dispatch": "True", "default": "True"} +::std::vector unbind(const Tensor & self, Dimname dim); // {"schema": "aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]", "dispatch": "False", "default": "True"} +Tensor to_sparse(const Tensor & self, int64_t sparse_dim); // {"schema": "aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse(const Tensor & self); // {"schema": "aten::to_sparse(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse_csr(const Tensor & self); // {"schema": "aten::to_sparse_csr(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse_csc(const Tensor & self); // {"schema": "aten::to_sparse_csc(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse_bsr(const Tensor & self, IntArrayRef blocksize); // {"schema": "aten::to_sparse_bsr(Tensor self, int[2] blocksize) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_sparse_bsc(const Tensor & self, IntArrayRef blocksize); // {"schema": "aten::to_sparse_bsc(Tensor self, int[2] blocksize) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_mkldnn(const Tensor & self, c10::optional dtype); // {"schema": "aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_reorder_conv2d_weight(const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups); // {"schema": "aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor mkldnn_reorder_conv3d_weight(const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups); // {"schema": "aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor to_mkldnn_backward(const Tensor & grad, const Tensor & input); // {"schema": "aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantize_per_tensor_dynamic(const Tensor & self, ScalarType dtype, bool reduce_range); // {"schema": "aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantize_per_tensor(const Tensor & self, double scale, int64_t zero_point, ScalarType dtype); // {"schema": "aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "False"} +Tensor quantize_per_tensor(const Tensor & self, const Tensor & scale, const Tensor & zero_point, ScalarType dtype); // {"schema": "aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector quantize_per_tensor(TensorList tensors, const Tensor & scales, const Tensor & zero_points, ScalarType dtype); // {"schema": "aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]", "dispatch": "True", "default": "False"} +Tensor quantize_per_channel(const Tensor & self, const Tensor & scales, const Tensor & zero_points, int64_t axis, ScalarType dtype); // {"schema": "aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "False"} +Tensor dequantize(const Tensor & self); // {"schema": "aten::dequantize.self(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector dequantize(TensorList tensors); // {"schema": "aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]", "dispatch": "True", "default": "False"} +double q_scale(const Tensor & self); // {"schema": "aten::q_scale(Tensor self) -> float", "dispatch": "True", "default": "False"} +int64_t q_zero_point(const Tensor & self); // {"schema": "aten::q_zero_point(Tensor self) -> int", "dispatch": "True", "default": "False"} +Tensor q_per_channel_scales(const Tensor & self); // {"schema": "aten::q_per_channel_scales(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor q_per_channel_zero_points(const Tensor & self); // {"schema": "aten::q_per_channel_zero_points(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +int64_t q_per_channel_axis(const Tensor & self); // {"schema": "aten::q_per_channel_axis(Tensor self) -> int", "dispatch": "True", "default": "False"} +Tensor int_repr(const Tensor & self); // {"schema": "aten::int_repr(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _make_per_tensor_quantized_tensor(const Tensor & self, double scale, int64_t zero_point); // {"schema": "aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _make_per_channel_quantized_tensor(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis); // {"schema": "aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor", "dispatch": "True", "default": "False"} +QScheme qscheme(const Tensor & self); // {"schema": "aten::qscheme(Tensor self) -> QScheme", "dispatch": "True", "default": "False"} +Tensor fake_quantize_per_tensor_affine(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fake_quantize_per_tensor_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple fake_quantize_per_tensor_affine_cachemask(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", "dispatch": "True", "default": "False"} +::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(const Tensor & self, const Tensor & scale, const Tensor & zero_point, const Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max); // {"schema": "aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", "dispatch": "True", "default": "False"} +Tensor fake_quantize_per_tensor_affine_cachemask_backward(const Tensor & grad, const Tensor & mask); // {"schema": "aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _fake_quantize_learnable_per_tensor_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor); // {"schema": "aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _fake_quantize_learnable_per_tensor_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor); // {"schema": "aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor fake_quantize_per_channel_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple fake_quantize_per_channel_affine_cachemask(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); // {"schema": "aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", "dispatch": "True", "default": "False"} +Tensor fake_quantize_per_channel_affine_cachemask_backward(const Tensor & grad, const Tensor & mask); // {"schema": "aten::fake_quantize_per_channel_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _fake_quantize_learnable_per_channel_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor); // {"schema": "aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _fake_quantize_learnable_per_channel_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor); // {"schema": "aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor fused_moving_avg_obs_fake_quant(const Tensor & self, const Tensor & observer_on, const Tensor & fake_quant_on, Tensor & running_min, Tensor & running_max, Tensor & scale, Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); // {"schema": "aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _fused_moving_avg_obs_fq_helper(const Tensor & self, const Tensor & observer_on, const Tensor & fake_quant_on, Tensor & running_min, Tensor & running_max, Tensor & scale, Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); // {"schema": "aten::_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask)", "dispatch": "True", "default": "False"} +::std::tuple _choose_qparams_per_tensor(const Tensor & self, bool reduce_range); // {"schema": "aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int)", "dispatch": "False", "default": "True"} +Tensor _saturate_weight_to_fp16(const Tensor & weight); // {"schema": "aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple choose_qparams_optimized(const Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width); // {"schema": "aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor _autocast_to_reduced_precision(const Tensor & self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype); // {"schema": "aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _autocast_to_full_precision(const Tensor & self, bool cuda_enabled, bool cpu_enabled); // {"schema": "aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor _to_copy(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, c10::optional memory_format); // {"schema": "aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor to(const Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, bool copy, c10::optional memory_format); // {"schema": "aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy, c10::optional memory_format); // {"schema": "aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor to(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy, c10::optional memory_format); // {"schema": "aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor to(const Tensor & self, const Tensor & other, bool non_blocking, bool copy, c10::optional memory_format); // {"schema": "aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", "dispatch": "False", "default": "True"} +::std::vector meshgrid(TensorList tensors); // {"schema": "aten::meshgrid(Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +::std::vector meshgrid(TensorList tensors, c10::string_view indexing); // {"schema": "aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor cartesian_prod(TensorList tensors); // {"schema": "aten::cartesian_prod(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor combinations(const Tensor & self, int64_t r, bool with_replacement); // {"schema": "aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor", "dispatch": "False", "default": "True"} +Scalar item(const Tensor & self); // {"schema": "aten::item(Tensor self) -> Scalar", "dispatch": "False", "default": "True"} +ScalarType result_type(const Tensor & tensor, const Tensor & other); // {"schema": "aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType", "dispatch": "False", "default": "True"} +ScalarType result_type(const Tensor & tensor, const Scalar & other); // {"schema": "aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType", "dispatch": "False", "default": "True"} +ScalarType result_type(const Scalar & scalar, const Tensor & tensor); // {"schema": "aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType", "dispatch": "False", "default": "True"} +ScalarType result_type(const Scalar & scalar1, const Scalar & scalar2); // {"schema": "aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType", "dispatch": "False", "default": "True"} +bool can_cast(ScalarType from, ScalarType to); // {"schema": "aten::can_cast(ScalarType from, ScalarType to) -> bool", "dispatch": "False", "default": "True"} +ScalarType promote_types(ScalarType type1, ScalarType type2); // {"schema": "aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType", "dispatch": "False", "default": "True"} +Scalar _local_scalar_dense(const Tensor & self); // {"schema": "aten::_local_scalar_dense(Tensor self) -> Scalar", "dispatch": "True", "default": "False"} +::std::tuple _lstm_mps(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple,::std::vector> lstm_mps_backward(const Tensor & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & z_state, const Tensor & cell_state_fwd, const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::lstm_mps_backward(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor[], Tensor[])", "dispatch": "True", "default": "False"} +::std::tuple _thnn_fused_lstm_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias); // {"schema": "aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _thnn_fused_lstm_cell_backward_impl(const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias); // {"schema": "aten::_thnn_fused_lstm_cell_backward_impl(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _thnn_fused_lstm_cell_backward(const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias); // {"schema": "aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple _thnn_differentiable_lstm_cell_backward(const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & input_gates, const Tensor & hidden_gates, const c10::optional & input_bias, const c10::optional & hidden_bias, const Tensor & cx, const Tensor & cy); // {"schema": "aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple _thnn_fused_gru_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias); // {"schema": "aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _thnn_fused_gru_cell_backward(const Tensor & grad_hy, const Tensor & workspace, bool has_bias); // {"schema": "aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _thnn_differentiable_gru_cell_backward(const Tensor & grad_hy, const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias); // {"schema": "aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple lstm(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple lstm(const Tensor & data, const Tensor & batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); // {"schema": "aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple gru(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple gru(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); // {"schema": "aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple rnn_tanh(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple rnn_tanh(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); // {"schema": "aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple rnn_relu(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); // {"schema": "aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple rnn_relu(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); // {"schema": "aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); // {"schema": "aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); // {"schema": "aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); // {"schema": "aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); // {"schema": "aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple quantized_lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, const Scalar & scale_ih, const Scalar & scale_hh, const Scalar & zero_point_ih, const Scalar & zero_point_hh); // {"schema": "aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor quantized_gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, const Scalar & scale_ih, const Scalar & scale_hh, const Scalar & zero_point_ih, const Scalar & zero_point_hh); // {"schema": "aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantized_rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, const Scalar & scale_ih, const Scalar & scale_hh, const Scalar & zero_point_ih, const Scalar & zero_point_hh); // {"schema": "aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantized_rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, const Scalar & scale_ih, const Scalar & scale_hh, const Scalar & zero_point_ih, const Scalar & zero_point_hh); // {"schema": "aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _pack_padded_sequence(const Tensor & input, const Tensor & lengths, bool batch_first); // {"schema": "aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor _pack_padded_sequence_backward(const Tensor & grad, IntArrayRef input_size, const Tensor & batch_sizes, bool batch_first); // {"schema": "aten::_pack_padded_sequence_backward(Tensor grad, int[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple _pad_packed_sequence(const Tensor & data, const Tensor & batch_sizes, bool batch_first, const Scalar & padding_value, int64_t total_length); // {"schema": "aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor & set_(Tensor & self, Storage source); // {"schema": "aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & set_(Tensor & self, Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & set_(Tensor & self, const Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & set_(Tensor & self, const Tensor & source); // {"schema": "aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & set_(Tensor & self); // {"schema": "aten::set_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lift(const Tensor & self); // {"schema": "aten::lift(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor lift_fresh(const Tensor & self); // {"schema": "aten::lift_fresh(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor lift_fresh_copy(const Tensor & self); // {"schema": "aten::lift_fresh_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +bool is_set_to(const Tensor & self, const Tensor & tensor); // {"schema": "aten::is_set_to(Tensor self, Tensor tensor) -> bool", "dispatch": "True", "default": "False"} +Tensor & masked_fill_(Tensor & self, const Tensor & mask, const Scalar & value); // {"schema": "aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor masked_fill(const Tensor & self, const Tensor & mask, const Scalar & value); // {"schema": "aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & masked_fill_(Tensor & self, const Tensor & mask, const Tensor & value); // {"schema": "aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor masked_fill(const Tensor & self, const Tensor & mask, const Tensor & value); // {"schema": "aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source); // {"schema": "aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor masked_scatter(const Tensor & self, const Tensor & mask, const Tensor & source); // {"schema": "aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _masked_softmax(const Tensor & self, const Tensor & mask, c10::optional dim, c10::optional mask_type); // {"schema": "aten::_masked_softmax(Tensor self, Tensor mask, int? dim=None, int? mask_type=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _masked_softmax_backward(const Tensor & grad_output, const Tensor & output, const Tensor & mask, c10::optional dim); // {"schema": "aten::_masked_softmax_backward(Tensor grad_output, Tensor output, Tensor mask, int? dim=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor view(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor view(const Tensor & self, ScalarType dtype); // {"schema": "aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor & put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate); // {"schema": "aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor put(const Tensor & self, const Tensor & index, const Tensor & source, bool accumulate); // {"schema": "aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_add_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar & alpha, Tensor & out); // {"schema": "aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & index_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar & alpha); // {"schema": "aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor index_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar & alpha); // {"schema": "aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor index_add(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source, const Scalar & alpha); // {"schema": "aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & index_reduce_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, c10::string_view reduce, bool include_self, Tensor & out); // {"schema": "aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & index_reduce_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, c10::string_view reduce, bool include_self); // {"schema": "aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor index_reduce(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, c10::string_view reduce, bool include_self); // {"schema": "aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_fill_(Tensor & self, int64_t dim, const Tensor & index, const Scalar & value); // {"schema": "aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor index_fill(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value); // {"schema": "aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_fill_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & value); // {"schema": "aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor index_fill(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & value); // {"schema": "aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & index_fill_(Tensor & self, Dimname dim, const Tensor & index, const Scalar & value); // {"schema": "aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & index_fill_(Tensor & self, Dimname dim, const Tensor & index, const Tensor & value); // {"schema": "aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor index_fill(const Tensor & self, Dimname dim, const Tensor & index, const Scalar & value); // {"schema": "aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor", "dispatch": "False", "default": "True"} +Tensor index_fill(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & value); // {"schema": "aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor", "dispatch": "False", "default": "True"} +Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, Tensor & out); // {"schema": "aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value); // {"schema": "aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Scalar & value); // {"schema": "aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_out(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, Tensor & out); // {"schema": "aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce); // {"schema": "aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce); // {"schema": "aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce, Tensor & out); // {"schema": "aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, c10::string_view reduce); // {"schema": "aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, c10::string_view reduce); // {"schema": "aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_out(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, c10::string_view reduce, Tensor & out); // {"schema": "aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor", "dispatch": "False", "default": "True"} +Tensor scatter(const Tensor & self, Dimname dim, const Tensor & index, const Scalar & value); // {"schema": "aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor", "dispatch": "False", "default": "True"} +Tensor scatter_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_add_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, Tensor & out); // {"schema": "aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor scatter_add(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src); // {"schema": "aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor", "dispatch": "False", "default": "True"} +Tensor scatter_reduce(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce, bool include_self); // {"schema": "aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & scatter_reduce_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce, bool include_self); // {"schema": "aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scatter_reduce_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, c10::string_view reduce, bool include_self, Tensor & out); // {"schema": "aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & eq_(Tensor & self, const Scalar & other); // {"schema": "aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & eq_(Tensor & self, const Tensor & other); // {"schema": "aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_and_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bitwise_and_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_and(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_and(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_and(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_and_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & bitwise_and_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor __and__(const Tensor & self, const Scalar & other); // {"schema": "aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor __and__(const Tensor & self, const Tensor & other); // {"schema": "aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & __iand__(Tensor & self, const Scalar & other); // {"schema": "aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & __iand__(Tensor & self, const Tensor & other); // {"schema": "aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & bitwise_or_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bitwise_or_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_or(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor bitwise_or(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_or(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_or_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & bitwise_or_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor __or__(const Tensor & self, const Scalar & other); // {"schema": "aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor __or__(const Tensor & self, const Tensor & other); // {"schema": "aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & __ior__(Tensor & self, const Scalar & other); // {"schema": "aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & __ior__(Tensor & self, const Tensor & other); // {"schema": "aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & bitwise_xor_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & bitwise_xor_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_xor(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor bitwise_xor(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor bitwise_xor(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_xor_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & bitwise_xor_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor __xor__(const Tensor & self, const Scalar & other); // {"schema": "aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor __xor__(const Tensor & self, const Tensor & other); // {"schema": "aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & __ixor__(Tensor & self, const Scalar & other); // {"schema": "aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & __ixor__(Tensor & self, const Tensor & other); // {"schema": "aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor __lshift__(const Tensor & self, const Scalar & other); // {"schema": "aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor __lshift__(const Tensor & self, const Tensor & other); // {"schema": "aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & __ilshift__(Tensor & self, const Scalar & other); // {"schema": "aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & __ilshift__(Tensor & self, const Tensor & other); // {"schema": "aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bitwise_left_shift(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bitwise_left_shift(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_left_shift(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor __rshift__(const Tensor & self, const Scalar & other); // {"schema": "aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor __rshift__(const Tensor & self, const Tensor & other); // {"schema": "aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & __irshift__(Tensor & self, const Scalar & other); // {"schema": "aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & __irshift__(Tensor & self, const Tensor & other); // {"schema": "aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bitwise_right_shift(const Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_(Tensor & self, const Tensor & other); // {"schema": "aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bitwise_right_shift(const Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_(Tensor & self, const Scalar & other); // {"schema": "aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bitwise_right_shift(const Scalar & self, const Tensor & other); // {"schema": "aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tril_(Tensor & self, int64_t diagonal); // {"schema": "aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & triu_(Tensor & self, int64_t diagonal); // {"schema": "aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & digamma_(Tensor & self); // {"schema": "aten::digamma_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lerp_(Tensor & self, const Tensor & end, const Scalar & weight); // {"schema": "aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lerp_(Tensor & self, const Tensor & end, const Tensor & weight); // {"schema": "aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & addbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & addbmm_out(const Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, const Scalar & beta, const Scalar & alpha); // {"schema": "aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & random_(Tensor & self, int64_t from, c10::optional to, c10::optional generator); // {"schema": "aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & random_(Tensor & self, int64_t to, c10::optional generator); // {"schema": "aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & random_(Tensor & self, c10::optional generator); // {"schema": "aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & uniform_(Tensor & self, double from, double to, c10::optional generator); // {"schema": "aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & cauchy_(Tensor & self, double median, double sigma, c10::optional generator); // {"schema": "aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & log_normal_(Tensor & self, double mean, double std, c10::optional generator); // {"schema": "aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & exponential_(Tensor & self, double lambd, c10::optional generator); // {"schema": "aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & geometric_(Tensor & self, double p, c10::optional generator); // {"schema": "aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & diag_out(const Tensor & self, int64_t diagonal, Tensor & out); // {"schema": "aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor diag(const Tensor & self, int64_t diagonal); // {"schema": "aten::diag(Tensor self, int diagonal=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor diag_backward(const Tensor & grad, c10::SymIntArrayRef input_sizes, int64_t diagonal); // {"schema": "aten::diag_backward(Tensor grad, SymInt[] input_sizes, int diagonal) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & cross_out(const Tensor & self, const Tensor & other, c10::optional dim, Tensor & out); // {"schema": "aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor cross(const Tensor & self, const Tensor & other, c10::optional dim); // {"schema": "aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & triu_out(const Tensor & self, int64_t diagonal, Tensor & out); // {"schema": "aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor triu(const Tensor & self, int64_t diagonal); // {"schema": "aten::triu(Tensor self, int diagonal=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tril_out(const Tensor & self, int64_t diagonal, Tensor & out); // {"schema": "aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor tril(const Tensor & self, int64_t diagonal); // {"schema": "aten::tril(Tensor self, int diagonal=0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor tril_indices(int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor triu_indices(int64_t row, int64_t col, int64_t offset, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor trace(const Tensor & self); // {"schema": "aten::trace(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor trace_backward(const Tensor & grad, IntArrayRef sizes); // {"schema": "aten::trace_backward(Tensor grad, int[] sizes) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & ne_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ne(const Tensor & self, const Scalar & other); // {"schema": "aten::ne.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ne_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ne(const Tensor & self, const Tensor & other); // {"schema": "aten::ne.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ne_(Tensor & self, const Scalar & other); // {"schema": "aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ne_(Tensor & self, const Tensor & other); // {"schema": "aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & not_equal_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor not_equal(const Tensor & self, const Scalar & other); // {"schema": "aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & not_equal_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor not_equal(const Tensor & self, const Tensor & other); // {"schema": "aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & not_equal_(Tensor & self, const Scalar & other); // {"schema": "aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & not_equal_(Tensor & self, const Tensor & other); // {"schema": "aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & eq_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor eq(const Tensor & self, const Scalar & other); // {"schema": "aten::eq.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & eq_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor eq(const Tensor & self, const Tensor & other); // {"schema": "aten::eq.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ge_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ge(const Tensor & self, const Scalar & other); // {"schema": "aten::ge.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ge_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ge(const Tensor & self, const Tensor & other); // {"schema": "aten::ge.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & ge_(Tensor & self, const Scalar & other); // {"schema": "aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ge_(Tensor & self, const Tensor & other); // {"schema": "aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & greater_equal_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor greater_equal(const Tensor & self, const Scalar & other); // {"schema": "aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & greater_equal_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor greater_equal(const Tensor & self, const Tensor & other); // {"schema": "aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & greater_equal_(Tensor & self, const Scalar & other); // {"schema": "aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & greater_equal_(Tensor & self, const Tensor & other); // {"schema": "aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & le_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor le(const Tensor & self, const Scalar & other); // {"schema": "aten::le.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & le_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor le(const Tensor & self, const Tensor & other); // {"schema": "aten::le.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & le_(Tensor & self, const Scalar & other); // {"schema": "aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & le_(Tensor & self, const Tensor & other); // {"schema": "aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & less_equal_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor less_equal(const Tensor & self, const Scalar & other); // {"schema": "aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & less_equal_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor less_equal(const Tensor & self, const Tensor & other); // {"schema": "aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & less_equal_(Tensor & self, const Scalar & other); // {"schema": "aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & less_equal_(Tensor & self, const Tensor & other); // {"schema": "aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & gt_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gt(const Tensor & self, const Scalar & other); // {"schema": "aten::gt.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & gt_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gt(const Tensor & self, const Tensor & other); // {"schema": "aten::gt.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & gt_(Tensor & self, const Scalar & other); // {"schema": "aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & gt_(Tensor & self, const Tensor & other); // {"schema": "aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & greater_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor greater(const Tensor & self, const Scalar & other); // {"schema": "aten::greater.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & greater_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor greater(const Tensor & self, const Tensor & other); // {"schema": "aten::greater.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & greater_(Tensor & self, const Scalar & other); // {"schema": "aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & greater_(Tensor & self, const Tensor & other); // {"schema": "aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & lt_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lt(const Tensor & self, const Scalar & other); // {"schema": "aten::lt.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & lt_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lt(const Tensor & self, const Tensor & other); // {"schema": "aten::lt.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & lt_(Tensor & self, const Scalar & other); // {"schema": "aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lt_(Tensor & self, const Tensor & other); // {"schema": "aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & less_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor less(const Tensor & self, const Scalar & other); // {"schema": "aten::less.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & less_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor less(const Tensor & self, const Tensor & other); // {"schema": "aten::less.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & less_(Tensor & self, const Scalar & other); // {"schema": "aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & less_(Tensor & self, const Tensor & other); // {"schema": "aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & take_out(const Tensor & self, const Tensor & index, Tensor & out); // {"schema": "aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor take(const Tensor & self, const Tensor & index); // {"schema": "aten::take(Tensor self, Tensor index) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & take_along_dim_out(const Tensor & self, const Tensor & indices, c10::optional dim, Tensor & out); // {"schema": "aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor take_along_dim(const Tensor & self, const Tensor & indices, c10::optional dim); // {"schema": "aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & index_select_out(const Tensor & self, int64_t dim, const Tensor & index, Tensor & out); // {"schema": "aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index); // {"schema": "aten::index_select(Tensor self, int dim, Tensor index) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & index_select_out(const Tensor & self, Dimname dim, const Tensor & index, Tensor & out); // {"schema": "aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor index_select(const Tensor & self, Dimname dim, const Tensor & index); // {"schema": "aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor", "dispatch": "False", "default": "True"} +Tensor index_select_backward(const Tensor & grad, IntArrayRef self_sizes, int64_t dim, const Tensor & index); // {"schema": "aten::index_select_backward(Tensor grad, int[] self_sizes, int dim, Tensor index) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & masked_select_out(const Tensor & self, const Tensor & mask, Tensor & out); // {"schema": "aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor masked_select(const Tensor & self, const Tensor & mask); // {"schema": "aten::masked_select(Tensor self, Tensor mask) -> Tensor", "dispatch": "True", "default": "False"} +Tensor masked_select_backward(const Tensor & grad, const Tensor & input, const Tensor & mask); // {"schema": "aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nonzero_out(const Tensor & self, Tensor & out); // {"schema": "aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nonzero(const Tensor & self); // {"schema": "aten::nonzero(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::vector nonzero_numpy(const Tensor & self); // {"schema": "aten::nonzero_numpy(Tensor self) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor argwhere(const Tensor & self); // {"schema": "aten::argwhere(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & gather_out(const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad, Tensor & out); // {"schema": "aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor gather(const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad); // {"schema": "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor gather_backward(const Tensor & grad, const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad); // {"schema": "aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & gather_out(const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad, Tensor & out); // {"schema": "aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor gather(const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad); // {"schema": "aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _gather_sparse_backward(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & grad); // {"schema": "aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & addcmul_out(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value, Tensor & out); // {"schema": "aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value); // {"schema": "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addcmul_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value); // {"schema": "aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & addcdiv_out(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value, Tensor & out); // {"schema": "aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value); // {"schema": "aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & addcdiv_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, const Scalar & value); // {"schema": "aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor cross_entropy_loss(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, double label_smoothing); // {"schema": "aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, float label_smoothing=0.0) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple triangular_solve_out(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular, Tensor & X, Tensor & M); // {"schema": "aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)", "dispatch": "True", "default": "False"} +::std::tuple triangular_solve(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular); // {"schema": "aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)", "dispatch": "True", "default": "True"} +void _linalg_check_errors(const Tensor & info, c10::string_view api_name, bool is_matrix); // {"schema": "aten::_linalg_check_errors(Tensor info, str api_name, *, bool is_matrix) -> ()", "dispatch": "True", "default": "True"} +Tensor & linalg_solve_triangular_out(const Tensor & self, const Tensor & B, bool upper, bool left, bool unitriangular, Tensor & out); // {"schema": "aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor linalg_solve_triangular(const Tensor & self, const Tensor & B, bool upper, bool left, bool unitriangular); // {"schema": "aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor linalg_vander(const Tensor & x, c10::optional N); // {"schema": "aten::linalg_vander(Tensor x, *, int? N=None) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple symeig_out(const Tensor & self, bool eigenvectors, bool upper, Tensor & e, Tensor & V); // {"schema": "aten::symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", "dispatch": "True", "default": "True"} +::std::tuple symeig(const Tensor & self, bool eigenvectors, bool upper); // {"schema": "aten::symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors)", "dispatch": "True", "default": "True"} +::std::tuple _symeig_helper(const Tensor & self, bool eigenvectors, bool upper); // {"schema": "aten::_symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple svd_out(const Tensor & self, bool some, bool compute_uv, Tensor & U, Tensor & S, Tensor & V); // {"schema": "aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)", "dispatch": "False", "default": "True"} +::std::tuple svd(const Tensor & self, bool some, bool compute_uv); // {"schema": "aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)", "dispatch": "False", "default": "True"} +Tensor swapaxes(const Tensor & self, int64_t axis0, int64_t axis1); // {"schema": "aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor & swapaxes_(Tensor & self, int64_t axis0, int64_t axis1); // {"schema": "aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor swapdims(const Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", "dispatch": "False", "default": "True"} +Tensor & swapdims_(Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & cholesky_out(const Tensor & self, bool upper, Tensor & out); // {"schema": "aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor cholesky(const Tensor & self, bool upper); // {"schema": "aten::cholesky(Tensor self, bool upper=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & cholesky_solve_out(const Tensor & self, const Tensor & input2, bool upper, Tensor & out); // {"schema": "aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor cholesky_solve(const Tensor & self, const Tensor & input2, bool upper); // {"schema": "aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper); // {"schema": "aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor", "dispatch": "True", "default": "False"} +Tensor cholesky_inverse(const Tensor & self, bool upper); // {"schema": "aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & cholesky_inverse_out(const Tensor & self, bool upper, Tensor & out); // {"schema": "aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple qr_out(const Tensor & self, bool some, Tensor & Q, Tensor & R); // {"schema": "aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)", "dispatch": "False", "default": "True"} +::std::tuple qr(const Tensor & self, bool some); // {"schema": "aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)", "dispatch": "False", "default": "True"} +::std::tuple geqrf_out(const Tensor & self, Tensor & a, Tensor & tau); // {"schema": "aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)", "dispatch": "True", "default": "False"} +::std::tuple geqrf(const Tensor & self); // {"schema": "aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)", "dispatch": "True", "default": "False"} +Tensor orgqr(const Tensor & self, const Tensor & input2); // {"schema": "aten::orgqr(Tensor self, Tensor input2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & orgqr_out(const Tensor & self, const Tensor & input2, Tensor & out); // {"schema": "aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & ormqr_out(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose, Tensor & out); // {"schema": "aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose); // {"schema": "aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _lu_with_info(const Tensor & self, bool pivot, bool check_errors); // {"schema": "aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor LU, Tensor pivots, Tensor info)", "dispatch": "False", "default": "True"} +Tensor & lu_solve_out(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots, Tensor & out); // {"schema": "aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor lu_solve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots); // {"schema": "aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple lu_unpack(const Tensor & LU_data, const Tensor & LU_pivots, bool unpack_data, bool unpack_pivots); // {"schema": "aten::lu_unpack(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True) -> (Tensor P, Tensor L, Tensor U)", "dispatch": "True", "default": "True"} +::std::tuple lu_unpack_out(const Tensor & LU_data, const Tensor & LU_pivots, bool unpack_data, bool unpack_pivots, Tensor & P, Tensor & L, Tensor & U); // {"schema": "aten::lu_unpack.out(Tensor LU_data, Tensor LU_pivots, bool unpack_data=True, bool unpack_pivots=True, *, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)", "dispatch": "True", "default": "False"} +Tensor & multinomial_out(const Tensor & self, int64_t num_samples, bool replacement, c10::optional generator, Tensor & out); // {"schema": "aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement, c10::optional generator); // {"schema": "aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & lgamma_out(const Tensor & self, Tensor & out); // {"schema": "aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & lgamma_(Tensor & self); // {"schema": "aten::lgamma_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor lgamma(const Tensor & self); // {"schema": "aten::lgamma(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & digamma_out(const Tensor & self, Tensor & out); // {"schema": "aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor digamma(const Tensor & self); // {"schema": "aten::digamma(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & polygamma_out(int64_t n, const Tensor & self, Tensor & out); // {"schema": "aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor polygamma(int64_t n, const Tensor & self); // {"schema": "aten::polygamma(int n, Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & polygamma_(Tensor & self, int64_t n); // {"schema": "aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor erfinv(const Tensor & self); // {"schema": "aten::erfinv(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & erfinv_(Tensor & self); // {"schema": "aten::erfinv_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & erfinv_out(const Tensor & self, Tensor & out); // {"schema": "aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor i0(const Tensor & self); // {"schema": "aten::i0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & i0_(Tensor & self); // {"schema": "aten::i0_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & i0_out(const Tensor & self, Tensor & out); // {"schema": "aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sign(const Tensor & self); // {"schema": "aten::sign(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sign_(Tensor & self); // {"schema": "aten::sign_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sign_out(const Tensor & self, Tensor & out); // {"schema": "aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor signbit(const Tensor & self); // {"schema": "aten::signbit(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & signbit_out(const Tensor & self, Tensor & out); // {"schema": "aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor dist(const Tensor & self, const Tensor & other, const Scalar & p); // {"schema": "aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & atan2_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & atan2_(Tensor & self, const Tensor & other); // {"schema": "aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor atan2(const Tensor & self, const Tensor & other); // {"schema": "aten::atan2(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor arctan2(const Tensor & self, const Tensor & other); // {"schema": "aten::arctan2(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & arctan2_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & arctan2_(Tensor & self, const Tensor & other); // {"schema": "aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & lerp_out(const Tensor & self, const Tensor & end, const Scalar & weight, Tensor & out); // {"schema": "aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & lerp_out(const Tensor & self, const Tensor & end, const Tensor & weight, Tensor & out); // {"schema": "aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor lerp(const Tensor & self, const Tensor & end, const Scalar & weight); // {"schema": "aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor", "dispatch": "True", "default": "True"} +Tensor lerp(const Tensor & self, const Tensor & end, const Tensor & weight); // {"schema": "aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & histc_out(const Tensor & self, int64_t bins, const Scalar & min, const Scalar & max, Tensor & out); // {"schema": "aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor histc(const Tensor & self, int64_t bins, const Scalar & min, const Scalar & max); // {"schema": "aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple histogram_out(const Tensor & self, const Tensor & bins, const c10::optional & weight, bool density, Tensor & hist, Tensor & bin_edges); // {"schema": "aten::histogram.bins_tensor_out(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)", "dispatch": "True", "default": "False"} +::std::tuple histogram(const Tensor & self, const Tensor & bins, const c10::optional & weight, bool density); // {"schema": "aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)", "dispatch": "True", "default": "False"} +::std::tuple histogram_out(const Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density, Tensor & hist, Tensor & bin_edges); // {"schema": "aten::histogram.bin_ct_out(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) hist, Tensor(b!) bin_edges) -> (Tensor(a!) hist, Tensor(b!) bin_edges)", "dispatch": "True", "default": "False"} +::std::tuple histogram(const Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges)", "dispatch": "True", "default": "False"} +::std::vector _histogramdd_bin_edges(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::_histogramdd_bin_edges(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor[]", "dispatch": "True", "default": "False"} +Tensor _histogramdd_from_bin_cts(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::_histogramdd_from_bin_cts(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _histogramdd_from_bin_tensors(const Tensor & self, TensorList bins, const c10::optional & weight, bool density); // {"schema": "aten::_histogramdd_from_bin_tensors(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple> histogramdd(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::histogramdd(Tensor self, int[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)", "dispatch": "False", "default": "True"} +::std::tuple> histogramdd(const Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::histogramdd.int_bins(Tensor self, int bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)", "dispatch": "False", "default": "True"} +::std::tuple> histogramdd(const Tensor & self, TensorList bins, c10::optional> range, const c10::optional & weight, bool density); // {"schema": "aten::histogramdd.TensorList_bins(Tensor self, Tensor[] bins, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor[] bin_edges)", "dispatch": "False", "default": "True"} +Tensor & fmod_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor fmod(const Tensor & self, const Scalar & other); // {"schema": "aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fmod_(Tensor & self, const Scalar & other); // {"schema": "aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & fmod_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fmod(const Tensor & self, const Tensor & other); // {"schema": "aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fmod_(Tensor & self, const Tensor & other); // {"schema": "aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hypot_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hypot(const Tensor & self, const Tensor & other); // {"schema": "aten::hypot(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & hypot_(Tensor & self, const Tensor & other); // {"schema": "aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & igamma_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::igamma.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor igamma(const Tensor & self, const Tensor & other); // {"schema": "aten::igamma(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & igamma_(Tensor & self, const Tensor & other); // {"schema": "aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & igammac_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor igammac(const Tensor & self, const Tensor & other); // {"schema": "aten::igammac(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & igammac_(Tensor & self, const Tensor & other); // {"schema": "aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & nextafter_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nextafter(const Tensor & self, const Tensor & other); // {"schema": "aten::nextafter(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & nextafter_(Tensor & self, const Tensor & other); // {"schema": "aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & remainder_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor remainder(const Tensor & self, const Scalar & other); // {"schema": "aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & remainder_(Tensor & self, const Scalar & other); // {"schema": "aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & remainder_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor remainder(const Tensor & self, const Tensor & other); // {"schema": "aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & remainder_(Tensor & self, const Tensor & other); // {"schema": "aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor remainder(const Scalar & self, const Tensor & other); // {"schema": "aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "False"} +Tensor min(const Tensor & self); // {"schema": "aten::min(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor fmin(const Tensor & self, const Tensor & other); // {"schema": "aten::fmin(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fmin_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max(const Tensor & self); // {"schema": "aten::max(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor fmax(const Tensor & self, const Tensor & other); // {"schema": "aten::fmax(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fmax_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor maximum(const Tensor & self, const Tensor & other); // {"schema": "aten::maximum(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & maximum_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max(const Tensor & self, const Tensor & other); // {"schema": "aten::max.other(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & max_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor minimum(const Tensor & self, const Tensor & other); // {"schema": "aten::minimum(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & minimum_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & min_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor min(const Tensor & self, const Tensor & other); // {"schema": "aten::min.other(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor quantile(const Tensor & self, const Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation); // {"schema": "aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", "dispatch": "False", "default": "True"} +Tensor & quantile_out(const Tensor & self, const Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, Tensor & out); // {"schema": "aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor quantile(const Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation); // {"schema": "aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", "dispatch": "False", "default": "True"} +Tensor & quantile_out(const Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, Tensor & out); // {"schema": "aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nanquantile(const Tensor & self, const Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation); // {"schema": "aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nanquantile_out(const Tensor & self, const Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation, Tensor & out); // {"schema": "aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nanquantile(const Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation); // {"schema": "aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", "dispatch": "False", "default": "True"} +Tensor & nanquantile_out(const Tensor & self, double q, c10::optional dim, bool keepdim, c10::string_view interpolation, Tensor & out); // {"schema": "aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple sort_out(const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices); // {"schema": "aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "True"} +::std::tuple sort_out(const Tensor & self, c10::optional stable, int64_t dim, bool descending, Tensor & values, Tensor & indices); // {"schema": "aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple sort(const Tensor & self, int64_t dim, bool descending); // {"schema": "aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple sort(const Tensor & self, c10::optional stable, int64_t dim, bool descending); // {"schema": "aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +::std::tuple sort_out(const Tensor & self, Dimname dim, bool descending, Tensor & values, Tensor & indices); // {"schema": "aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +::std::tuple sort_out(const Tensor & self, c10::optional stable, Dimname dim, bool descending, Tensor & values, Tensor & indices); // {"schema": "aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "False", "default": "True"} +::std::tuple sort(const Tensor & self, Dimname dim, bool descending); // {"schema": "aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +::std::tuple sort(const Tensor & self, c10::optional stable, Dimname dim, bool descending); // {"schema": "aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)", "dispatch": "False", "default": "True"} +Tensor & msort_out(const Tensor & self, Tensor & out); // {"schema": "aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor msort(const Tensor & self); // {"schema": "aten::msort(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor argsort(const Tensor & self, int64_t dim, bool descending); // {"schema": "aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor argsort(const Tensor & self, bool stable, int64_t dim, bool descending); // {"schema": "aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor argsort(const Tensor & self, Dimname dim, bool descending); // {"schema": "aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple topk_out(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, Tensor & values, Tensor & indices); // {"schema": "aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", "dispatch": "True", "default": "False"} +::std::tuple topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted); // {"schema": "aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)", "dispatch": "True", "default": "True"} +Tensor all(const Tensor & self); // {"schema": "aten::all(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & all_out(const Tensor & self, Tensor & out); // {"schema": "aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor any(const Tensor & self); // {"schema": "aten::any(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & any_out(const Tensor & self, Tensor & out); // {"schema": "aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & renorm_out(const Tensor & self, const Scalar & p, int64_t dim, const Scalar & maxnorm, Tensor & out); // {"schema": "aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor renorm(const Tensor & self, const Scalar & p, int64_t dim, const Scalar & maxnorm); // {"schema": "aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & renorm_(Tensor & self, const Scalar & p, int64_t dim, const Scalar & maxnorm); // {"schema": "aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step); // {"schema": "aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)", "dispatch": "True", "default": "False"} +Tensor unfold_backward(const Tensor & grad_in, IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step); // {"schema": "aten::unfold_backward(Tensor grad_in, int[] input_sizes, int dim, int size, int step) -> Tensor", "dispatch": "True", "default": "False"} +bool equal(const Tensor & self, const Tensor & other); // {"schema": "aten::equal(Tensor self, Tensor other) -> bool", "dispatch": "True", "default": "False"} +Tensor & pow_out(const Tensor & self, const Tensor & exponent, Tensor & out); // {"schema": "aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor pow(const Tensor & self, const Tensor & exponent); // {"schema": "aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & pow_out(const Scalar & self, const Tensor & exponent, Tensor & out); // {"schema": "aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor pow(const Scalar & self, const Tensor & exponent); // {"schema": "aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & pow_out(const Tensor & self, const Scalar & exponent, Tensor & out); // {"schema": "aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor pow(const Tensor & self, const Scalar & exponent); // {"schema": "aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & pow_(Tensor & self, const Scalar & exponent); // {"schema": "aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & pow_(Tensor & self, const Tensor & exponent); // {"schema": "aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & float_power_out(const Tensor & self, const Tensor & exponent, Tensor & out); // {"schema": "aten::float_power.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor float_power(const Tensor & self, const Tensor & exponent); // {"schema": "aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & float_power_out(const Scalar & self, const Tensor & exponent, Tensor & out); // {"schema": "aten::float_power.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor float_power(const Scalar & self, const Tensor & exponent); // {"schema": "aten::float_power.Scalar(Scalar self, Tensor exponent) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & float_power_out(const Tensor & self, const Scalar & exponent, Tensor & out); // {"schema": "aten::float_power.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor float_power(const Tensor & self, const Scalar & exponent); // {"schema": "aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & float_power_(Tensor & self, const Scalar & exponent); // {"schema": "aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & float_power_(Tensor & self, const Tensor & exponent); // {"schema": "aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & normal_(Tensor & self, double mean, double std, c10::optional generator); // {"schema": "aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor normal_functional(const Tensor & self, double mean, double std, c10::optional generator); // {"schema": "aten::normal_functional(Tensor self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & normal_out(const Tensor & mean, double std, c10::optional generator, Tensor & out); // {"schema": "aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor normal(const Tensor & mean, double std, c10::optional generator); // {"schema": "aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & normal_out(double mean, const Tensor & std, c10::optional generator, Tensor & out); // {"schema": "aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor normal(double mean, const Tensor & std, c10::optional generator); // {"schema": "aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & normal_out(const Tensor & mean, const Tensor & std, c10::optional generator, Tensor & out); // {"schema": "aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor normal(const Tensor & mean, const Tensor & std, c10::optional generator); // {"schema": "aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor normal(double mean, double std, IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & normal_out(double mean, double std, IntArrayRef size, c10::optional generator, Tensor & out); // {"schema": "aten::normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor alias(const Tensor & self); // {"schema": "aten::alias(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +void _amp_foreach_non_finite_check_and_unscale_(TensorList self, Tensor & found_inf, const Tensor & inv_scale); // {"schema": "aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()", "dispatch": "True", "default": "False"} +Tensor & _amp_update_scale_(Tensor & self, Tensor & growth_tracker, const Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); // {"schema": "aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::vector _foreach_add(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_add_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sub(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sub_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_mul(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_mul.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_mul_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_div(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_div.Scalar(Tensor[] self, Scalar scalar) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_div_(TensorList self, const Scalar & scalar); // {"schema": "aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_add(TensorList self, TensorList other, const Scalar & alpha); // {"schema": "aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_add_(TensorList self, TensorList other, const Scalar & alpha); // {"schema": "aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sub(TensorList self, TensorList other, const Scalar & alpha); // {"schema": "aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sub_(TensorList self, TensorList other, const Scalar & alpha); // {"schema": "aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_mul(TensorList self, TensorList other); // {"schema": "aten::_foreach_mul.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_mul_(TensorList self, TensorList other); // {"schema": "aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_div(TensorList self, TensorList other); // {"schema": "aten::_foreach_div.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_div_(TensorList self, TensorList other); // {"schema": "aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_add(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_add_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sub(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sub_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_div(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_div.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_div_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_div_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_mul(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_mul.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_mul_(TensorList self, ArrayRef scalars); // {"schema": "aten::_foreach_mul_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_exp(TensorList self); // {"schema": "aten::_foreach_exp(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_zero_(TensorList self); // {"schema": "aten::_foreach_zero_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +void _foreach_exp_(TensorList self); // {"schema": "aten::_foreach_exp_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sqrt(TensorList self); // {"schema": "aten::_foreach_sqrt(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sqrt_(TensorList self); // {"schema": "aten::_foreach_sqrt_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_abs(TensorList self); // {"schema": "aten::_foreach_abs(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_abs_(TensorList self); // {"schema": "aten::_foreach_abs_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_acos(TensorList self); // {"schema": "aten::_foreach_acos(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_acos_(TensorList self); // {"schema": "aten::_foreach_acos_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_asin(TensorList self); // {"schema": "aten::_foreach_asin(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_asin_(TensorList self); // {"schema": "aten::_foreach_asin_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_atan(TensorList self); // {"schema": "aten::_foreach_atan(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_atan_(TensorList self); // {"schema": "aten::_foreach_atan_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_ceil(TensorList self); // {"schema": "aten::_foreach_ceil(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_ceil_(TensorList self); // {"schema": "aten::_foreach_ceil_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_cos(TensorList self); // {"schema": "aten::_foreach_cos(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_cos_(TensorList self); // {"schema": "aten::_foreach_cos_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_cosh(TensorList self); // {"schema": "aten::_foreach_cosh(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_cosh_(TensorList self); // {"schema": "aten::_foreach_cosh_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_erf(TensorList self); // {"schema": "aten::_foreach_erf(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_erf_(TensorList self); // {"schema": "aten::_foreach_erf_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_erfc(TensorList self); // {"schema": "aten::_foreach_erfc(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_erfc_(TensorList self); // {"schema": "aten::_foreach_erfc_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_expm1(TensorList self); // {"schema": "aten::_foreach_expm1(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_expm1_(TensorList self); // {"schema": "aten::_foreach_expm1_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_floor(TensorList self); // {"schema": "aten::_foreach_floor(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_floor_(TensorList self); // {"schema": "aten::_foreach_floor_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_log(TensorList self); // {"schema": "aten::_foreach_log(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_log_(TensorList self); // {"schema": "aten::_foreach_log_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_log10(TensorList self); // {"schema": "aten::_foreach_log10(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_log10_(TensorList self); // {"schema": "aten::_foreach_log10_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_log1p(TensorList self); // {"schema": "aten::_foreach_log1p(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_log1p_(TensorList self); // {"schema": "aten::_foreach_log1p_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_log2(TensorList self); // {"schema": "aten::_foreach_log2(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_log2_(TensorList self); // {"schema": "aten::_foreach_log2_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_neg(TensorList self); // {"schema": "aten::_foreach_neg(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_neg_(TensorList self); // {"schema": "aten::_foreach_neg_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_tan(TensorList self); // {"schema": "aten::_foreach_tan(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_tan_(TensorList self); // {"schema": "aten::_foreach_tan_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_tanh(TensorList self); // {"schema": "aten::_foreach_tanh(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_tanh_(TensorList self); // {"schema": "aten::_foreach_tanh_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sin(TensorList self); // {"schema": "aten::_foreach_sin(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sin_(TensorList self); // {"schema": "aten::_foreach_sin_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sinh(TensorList self); // {"schema": "aten::_foreach_sinh(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sinh_(TensorList self); // {"schema": "aten::_foreach_sinh_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_round(TensorList self); // {"schema": "aten::_foreach_round(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_round_(TensorList self); // {"schema": "aten::_foreach_round_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_lgamma(TensorList self); // {"schema": "aten::_foreach_lgamma(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_lgamma_(TensorList self); // {"schema": "aten::_foreach_lgamma_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_frac(TensorList self); // {"schema": "aten::_foreach_frac(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_frac_(TensorList self); // {"schema": "aten::_foreach_frac_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_reciprocal(TensorList self); // {"schema": "aten::_foreach_reciprocal(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_reciprocal_(TensorList self); // {"schema": "aten::_foreach_reciprocal_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_sigmoid(TensorList self); // {"schema": "aten::_foreach_sigmoid(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_sigmoid_(TensorList self); // {"schema": "aten::_foreach_sigmoid_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_trunc(TensorList self); // {"schema": "aten::_foreach_trunc(Tensor[] self) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_trunc_(TensorList self); // {"schema": "aten::_foreach_trunc_(Tensor(a!)[] self) -> ()", "dispatch": "True", "default": "False"} +void _foreach_addcdiv_(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value); // {"schema": "aten::_foreach_addcdiv_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()", "dispatch": "True", "default": "False"} +void _foreach_addcmul_(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value); // {"schema": "aten::_foreach_addcmul_.Scalar(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> ()", "dispatch": "True", "default": "False"} +void _foreach_addcdiv_(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars); // {"schema": "aten::_foreach_addcdiv_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +void _foreach_addcmul_(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars); // {"schema": "aten::_foreach_addcmul_.ScalarList(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcdiv(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value); // {"schema": "aten::_foreach_addcdiv.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcmul(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value); // {"schema": "aten::_foreach_addcmul.Scalar(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcdiv(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars); // {"schema": "aten::_foreach_addcdiv.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_addcmul(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars); // {"schema": "aten::_foreach_addcmul.ScalarList(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars) -> Tensor[]", "dispatch": "True", "default": "False"} +::std::vector _foreach_maximum(TensorList self, TensorList other); // {"schema": "aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_maximum_(TensorList self, TensorList other); // {"schema": "aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_minimum(TensorList self, TensorList other); // {"schema": "aten::_foreach_minimum.List(Tensor[] self, Tensor[] other) -> Tensor[]", "dispatch": "True", "default": "False"} +void _foreach_minimum_(TensorList self, TensorList other); // {"schema": "aten::_foreach_minimum_.List(Tensor(a!)[] self, Tensor[] other) -> ()", "dispatch": "True", "default": "False"} +::std::vector _foreach_norm(TensorList self, const Scalar & ord); // {"schema": "aten::_foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[]", "dispatch": "True", "default": "False"} +Tensor bucketize(const Tensor & self, const Tensor & boundaries, bool out_int32, bool right); // {"schema": "aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & bucketize_out(const Tensor & self, const Tensor & boundaries, bool out_int32, bool right, Tensor & out); // {"schema": "aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor bucketize(const Scalar & self, const Tensor & boundaries, bool out_int32, bool right); // {"schema": "aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor", "dispatch": "True", "default": "False"} +Tensor searchsorted(const Tensor & sorted_sequence, const Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter); // {"schema": "aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _torch_cuda_cu_linker_symbol_op(const Tensor & self); // {"schema": "aten::_torch_cuda_cu_linker_symbol_op(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & searchsorted_out(const Tensor & sorted_sequence, const Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, Tensor & out); // {"schema": "aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor searchsorted(const Tensor & sorted_sequence, const Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter); // {"schema": "aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _convert_indices_from_coo_to_csr(const Tensor & self, int64_t size, bool out_int32); // {"schema": "aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _convert_indices_from_coo_to_csr_out(const Tensor & self, int64_t size, bool out_int32, Tensor & out); // {"schema": "aten::_convert_indices_from_coo_to_csr.out(Tensor self, int size, *, bool out_int32=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _convert_indices_from_csr_to_coo(const Tensor & crow_indices, const Tensor & col_indices, bool out_int32, bool transpose); // {"schema": "aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _convert_indices_from_csr_to_coo_out(const Tensor & crow_indices, const Tensor & col_indices, bool out_int32, bool transpose, Tensor & out); // {"schema": "aten::_convert_indices_from_csr_to_coo.out(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & mse_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, Tensor & out); // {"schema": "aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & mse_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & grad_input); // {"schema": "aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor", "dispatch": "True", "default": "False"} +Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & multi_margin_loss_out(const Tensor & self, const Tensor & target, const Scalar & p, const Scalar & margin, const c10::optional & weight, int64_t reduction, Tensor & out); // {"schema": "aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor multi_margin_loss(const Tensor & self, const Tensor & target, const Scalar & p, const Scalar & margin, const c10::optional & weight, int64_t reduction); // {"schema": "aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & multi_margin_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Scalar & p, const Scalar & margin, const c10::optional & weight, int64_t reduction, Tensor & grad_input); // {"schema": "aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Scalar & p, const Scalar & margin, const c10::optional & weight, int64_t reduction); // {"schema": "aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & multilabel_margin_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, Tensor & out); // {"schema": "aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple multilabel_margin_loss_forward_out(const Tensor & self, const Tensor & target, int64_t reduction, Tensor & output, Tensor & is_target); // {"schema": "aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target)", "dispatch": "True", "default": "False"} +Tensor & multilabel_margin_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target, Tensor & grad_input); // {"schema": "aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target); // {"schema": "aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & nll_loss_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, Tensor & out); // {"schema": "aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nll_loss_nd(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index); // {"schema": "aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor", "dispatch": "False", "default": "True"} +Tensor nll_loss(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index); // {"schema": "aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple nll_loss_forward_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, Tensor & output, Tensor & total_weight); // {"schema": "aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple nll_loss_forward(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index); // {"schema": "aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight)", "dispatch": "True", "default": "True"} +Tensor & nll_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight, Tensor & grad_input); // {"schema": "aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); // {"schema": "aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & nll_loss2d_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, Tensor & out); // {"schema": "aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nll_loss2d(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index); // {"schema": "aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple nll_loss2d_forward_out(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, Tensor & output, Tensor & total_weight); // {"schema": "aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple nll_loss2d_forward(const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index); // {"schema": "aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight)", "dispatch": "True", "default": "False"} +Tensor & nll_loss2d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight, Tensor & grad_input); // {"schema": "aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); // {"schema": "aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & smooth_l1_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, double beta, Tensor & out); // {"schema": "aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction, double beta); // {"schema": "aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & smooth_l1_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double beta, Tensor & grad_input); // {"schema": "aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double beta); // {"schema": "aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & huber_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, double delta, Tensor & out); // {"schema": "aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor huber_loss(const Tensor & self, const Tensor & target, int64_t reduction, double delta); // {"schema": "aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & huber_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double delta, Tensor & grad_input); // {"schema": "aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor huber_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double delta); // {"schema": "aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & soft_margin_loss_out(const Tensor & self, const Tensor & target, int64_t reduction, Tensor & out); // {"schema": "aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & soft_margin_loss_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & grad_input); // {"schema": "aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); // {"schema": "aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & elu_out(const Tensor & self, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale, Tensor & out); // {"schema": "aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor elu(const Tensor & self, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale); // {"schema": "aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & elu_backward_out(const Tensor & grad_output, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale, bool is_result, const Tensor & self_or_result, Tensor & grad_input); // {"schema": "aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor elu_backward(const Tensor & grad_output, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale, bool is_result, const Tensor & self_or_result); // {"schema": "aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & elu_(Tensor & self, const Scalar & alpha, const Scalar & scale, const Scalar & input_scale); // {"schema": "aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & glu_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor glu(const Tensor & self, int64_t dim); // {"schema": "aten::glu(Tensor self, int dim=-1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & glu_backward_out(const Tensor & grad_output, const Tensor & self, int64_t dim, Tensor & grad_input); // {"schema": "aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim); // {"schema": "aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor glu_jvp(const Tensor & glu, const Tensor & x, const Tensor & dx, int64_t dim); // {"schema": "aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor glu_backward_jvp(const Tensor & grad_x, const Tensor & grad_glu, const Tensor & x, const Tensor & dgrad_glu, const Tensor & dx, int64_t dim); // {"schema": "aten::glu_backward_jvp(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & hardsigmoid_out(const Tensor & self, Tensor & out); // {"schema": "aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardsigmoid(const Tensor & self); // {"schema": "aten::hardsigmoid(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & hardsigmoid_(Tensor & self); // {"schema": "aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hardsigmoid_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & grad_input); // {"schema": "aten::hardsigmoid_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardsigmoid_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & hardtanh_out(const Tensor & self, const Scalar & min_val, const Scalar & max_val, Tensor & out); // {"schema": "aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardtanh(const Tensor & self, const Scalar & min_val, const Scalar & max_val); // {"schema": "aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & hardtanh_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & min_val, const Scalar & max_val, Tensor & grad_input); // {"schema": "aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, const Scalar & min_val, const Scalar & max_val); // {"schema": "aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & hardtanh_(Tensor & self, const Scalar & min_val, const Scalar & max_val); // {"schema": "aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & hardswish_out(const Tensor & self, Tensor & out); // {"schema": "aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardswish(const Tensor & self); // {"schema": "aten::hardswish(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & hardswish_(Tensor & self); // {"schema": "aten::hardswish_(Tensor(a!) self) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor hardswish_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & leaky_relu_out(const Tensor & self, const Scalar & negative_slope, Tensor & out); // {"schema": "aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor leaky_relu(const Tensor & self, const Scalar & negative_slope); // {"schema": "aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & leaky_relu_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & negative_slope, bool self_is_result, Tensor & grad_input); // {"schema": "aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, const Scalar & negative_slope, bool self_is_result); // {"schema": "aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & leaky_relu_(Tensor & self, const Scalar & negative_slope); // {"schema": "aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & log_sigmoid_out(const Tensor & self, Tensor & out); // {"schema": "aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor log_sigmoid(const Tensor & self); // {"schema": "aten::log_sigmoid(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple log_sigmoid_forward_out(const Tensor & self, Tensor & output, Tensor & buffer); // {"schema": "aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple log_sigmoid_forward(const Tensor & self); // {"schema": "aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)", "dispatch": "True", "default": "False"} +Tensor & log_sigmoid_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & buffer, Tensor & grad_input); // {"schema": "aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer); // {"schema": "aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & rrelu_with_noise_out(const Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator, Tensor & out); // {"schema": "aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator); // {"schema": "aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, bool self_is_result); // {"schema": "aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, c10::optional generator); // {"schema": "aten::rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & softplus_out(const Tensor & self, const Scalar & beta, const Scalar & threshold, Tensor & out); // {"schema": "aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor softplus(const Tensor & self, const Scalar & beta, const Scalar & threshold); // {"schema": "aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & softplus_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & beta, const Scalar & threshold, Tensor & grad_input); // {"schema": "aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, const Scalar & beta, const Scalar & threshold); // {"schema": "aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & softshrink_out(const Tensor & self, const Scalar & lambd, Tensor & out); // {"schema": "aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor softshrink(const Tensor & self, const Scalar & lambd); // {"schema": "aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & softshrink_backward_out(const Tensor & grad_output, const Tensor & self, const Scalar & lambd, Tensor & grad_input); // {"schema": "aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, const Scalar & lambd); // {"schema": "aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & adaptive_avg_pool2d_out(const Tensor & self, c10::SymIntArrayRef output_size, Tensor & out); // {"schema": "aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor adaptive_avg_pool2d(const Tensor & self, c10::SymIntArrayRef output_size); // {"schema": "aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor", "dispatch": "False", "default": "True"} +Tensor mkldnn_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & mkldnn_adaptive_avg_pool2d_out(const Tensor & self, IntArrayRef output_size, Tensor & out); // {"schema": "aten::mkldnn_adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor mkldnn_adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::mkldnn_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _adaptive_avg_pool2d(const Tensor & self, c10::SymIntArrayRef output_size); // {"schema": "aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & adaptive_avg_pool3d_out(const Tensor & self, IntArrayRef output_size, Tensor & out); // {"schema": "aten::adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::_adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & adaptive_avg_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & grad_input); // {"schema": "aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self); // {"schema": "aten::_adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple adaptive_max_pool2d_out(const Tensor & self, IntArrayRef output_size, Tensor & out, Tensor & indices); // {"schema": "aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple adaptive_max_pool2d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & adaptive_max_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices); // {"schema": "aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple adaptive_max_pool3d_out(const Tensor & self, IntArrayRef output_size, Tensor & out, Tensor & indices); // {"schema": "aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size); // {"schema": "aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & adaptive_max_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices); // {"schema": "aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & avg_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, Tensor & out); // {"schema": "aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); // {"schema": "aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & avg_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, Tensor & grad_input); // {"schema": "aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); // {"schema": "aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & avg_pool3d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, Tensor & out); // {"schema": "aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); // {"schema": "aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & avg_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, Tensor & grad_input); // {"schema": "aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); // {"schema": "aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple fractional_max_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples, Tensor & output, Tensor & indices); // {"schema": "aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple fractional_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); // {"schema": "aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & fractional_max_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); // {"schema": "aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple fractional_max_pool3d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples, Tensor & output, Tensor & indices); // {"schema": "aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); // {"schema": "aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & fractional_max_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); // {"schema": "aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple max_pool2d_with_indices_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out, Tensor & indices); // {"schema": "aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple max_pool2d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "True"} +Tensor & max_pool2d_with_indices_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max_pool2d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); // {"schema": "aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple max_pool3d_with_indices_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out, Tensor & indices); // {"schema": "aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "False"} +::std::tuple max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); // {"schema": "aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor & max_pool3d_with_indices_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices, Tensor & grad_input); // {"schema": "aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); // {"schema": "aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & max_unpool2d_out(const Tensor & self, const Tensor & indices, IntArrayRef output_size, Tensor & out); // {"schema": "aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size); // {"schema": "aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & max_unpool3d_out(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding, Tensor & out); // {"schema": "aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); // {"schema": "aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & reflection_pad1d_out(const Tensor & self, IntArrayRef padding, Tensor & out); // {"schema": "aten::reflection_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding); // {"schema": "aten::reflection_pad1d(Tensor self, int[2] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & reflection_pad1d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef padding, Tensor & grad_input); // {"schema": "aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); // {"schema": "aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & reflection_pad2d_out(const Tensor & self, IntArrayRef padding, Tensor & out); // {"schema": "aten::reflection_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding); // {"schema": "aten::reflection_pad2d(Tensor self, int[4] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & reflection_pad2d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef padding, Tensor & grad_input); // {"schema": "aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); // {"schema": "aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & reflection_pad3d_out(const Tensor & self, IntArrayRef padding, Tensor & out); // {"schema": "aten::reflection_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad3d(const Tensor & self, IntArrayRef padding); // {"schema": "aten::reflection_pad3d(Tensor self, int[6] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & reflection_pad3d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef padding, Tensor & grad_input); // {"schema": "aten::reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor reflection_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); // {"schema": "aten::reflection_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad1d_out(const Tensor & self, IntArrayRef padding, Tensor & out); // {"schema": "aten::replication_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad1d(const Tensor & self, IntArrayRef padding); // {"schema": "aten::replication_pad1d(Tensor self, int[2] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad1d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef padding, Tensor & grad_input); // {"schema": "aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); // {"schema": "aten::replication_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad2d_out(const Tensor & self, IntArrayRef padding, Tensor & out); // {"schema": "aten::replication_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad2d(const Tensor & self, IntArrayRef padding); // {"schema": "aten::replication_pad2d(Tensor self, int[4] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad2d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef padding, Tensor & grad_input); // {"schema": "aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); // {"schema": "aten::replication_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & replication_pad3d_out(const Tensor & self, IntArrayRef padding, Tensor & out); // {"schema": "aten::replication_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad3d(const Tensor & self, IntArrayRef padding); // {"schema": "aten::replication_pad3d(Tensor self, int[6] padding) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & replication_pad3d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef padding, Tensor & grad_input); // {"schema": "aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); // {"schema": "aten::replication_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _pad_circular(const Tensor & self, IntArrayRef pad); // {"schema": "aten::_pad_circular(Tensor self, int[] pad) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _pad_enum(const Tensor & self, IntArrayRef pad, int64_t mode, c10::optional value); // {"schema": "aten::_pad_enum(Tensor self, int[] pad, int mode, float? value=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor pad(const Tensor & self, IntArrayRef pad, c10::string_view mode, c10::optional value); // {"schema": "aten::pad(Tensor self, int[] pad, str mode=\"constant\", float? value=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor upsample_linear1d(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor upsample_linear1d_backward(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_linear1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor upsample_bilinear2d(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor upsample_bilinear2d_backward(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_bilinear2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_bilinear2d_aa(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_bilinear2d_aa_backward(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::_upsample_bilinear2d_aa_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor upsample_trilinear3d(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor upsample_trilinear3d_backward(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_trilinear3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor upsample_bicubic2d(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor upsample_bicubic2d_backward(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::upsample_bicubic2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_bicubic2d_aa(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::_upsample_bicubic2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_bicubic2d_aa_backward(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors); // {"schema": "aten::_upsample_bicubic2d_aa_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor upsample_nearest1d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact1d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::_upsample_nearest_exact1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor upsample_nearest1d_backward(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors); // {"schema": "aten::upsample_nearest1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact1d_backward(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors); // {"schema": "aten::_upsample_nearest_exact1d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor upsample_nearest2d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact2d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::_upsample_nearest_exact2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor upsample_nearest2d_backward(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors); // {"schema": "aten::upsample_nearest2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact2d_backward(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors); // {"schema": "aten::_upsample_nearest_exact2d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "True"} +Tensor upsample_nearest3d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _upsample_nearest_exact3d(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors); // {"schema": "aten::_upsample_nearest_exact3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "False"} +Tensor upsample_nearest3d_backward(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors); // {"schema": "aten::upsample_nearest3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _upsample_nearest_exact3d_backward(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors); // {"schema": "aten::_upsample_nearest_exact3d_backward.vec(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & upsample_linear1d_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales, Tensor & out); // {"schema": "aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_linear1d(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales); // {"schema": "aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_linear1d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales, Tensor & grad_input); // {"schema": "aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_linear1d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales); // {"schema": "aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_bilinear2d_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_bilinear2d(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_bilinear2d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_bilinear2d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _upsample_bilinear2d_aa_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _upsample_bilinear2d_aa(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _upsample_bilinear2d_aa_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::_upsample_bilinear2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _upsample_bilinear2d_aa_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_bilinear2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_bicubic2d_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_bicubic2d(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_bicubic2d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_bicubic2d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_bicubic2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _upsample_bicubic2d_aa_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::_upsample_bicubic2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _upsample_bicubic2d_aa(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_bicubic2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _upsample_bicubic2d_aa_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::_upsample_bicubic2d_aa_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _upsample_bicubic2d_aa_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_bicubic2d_aa_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_trilinear3d_out(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_trilinear3d(const Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_trilinear3d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_trilinear3d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest1d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, Tensor & out); // {"schema": "aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact1d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, Tensor & out); // {"schema": "aten::_upsample_nearest_exact1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest1d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales); // {"schema": "aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact1d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales); // {"schema": "aten::_upsample_nearest_exact1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest1d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, Tensor & grad_input); // {"schema": "aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact1d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, Tensor & grad_input); // {"schema": "aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest1d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales); // {"schema": "aten::upsample_nearest1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact1d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales); // {"schema": "aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest2d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact2d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::_upsample_nearest_exact2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest2d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact2d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_nearest_exact2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest2d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact2d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::_upsample_nearest_exact2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest2d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact2d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_nearest_exact2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest3d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact3d_out(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & out); // {"schema": "aten::_upsample_nearest_exact3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest3d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact3d(const Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_nearest_exact3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest3d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & _upsample_nearest_exact3d_backward_out(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, Tensor & grad_input); // {"schema": "aten::_upsample_nearest_exact3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor upsample_nearest3d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _upsample_nearest_exact3d_backward(const Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); // {"schema": "aten::_upsample_nearest_exact3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sigmoid_backward_out(const Tensor & grad_output, const Tensor & output, Tensor & grad_input); // {"schema": "aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output); // {"schema": "aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & logit_backward_out(const Tensor & grad_output, const Tensor & self, c10::optional eps, Tensor & grad_input); // {"schema": "aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor logit_backward(const Tensor & grad_output, const Tensor & self, c10::optional eps); // {"schema": "aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tanh_backward_out(const Tensor & grad_output, const Tensor & output, Tensor & grad_input); // {"schema": "aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor tanh_backward(const Tensor & grad_output, const Tensor & output); // {"schema": "aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & slow_conv_transpose2d_out(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor & out); // {"schema": "aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor slow_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); // {"schema": "aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & slow_conv_transpose3d_out(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor & out); // {"schema": "aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor slow_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); // {"schema": "aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & thnn_conv2d_out(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, Tensor & out); // {"schema": "aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding); // {"schema": "aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & _slow_conv2d_forward_out(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, Tensor & output); // {"schema": "aten::_slow_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _slow_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding); // {"schema": "aten::_slow_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _slow_conv2d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias); // {"schema": "aten::_slow_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, *, Tensor(a!) grad_input, Tensor(b!) grad_weight, Tensor(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "False"} +::std::tuple _slow_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, ::std::array output_mask); // {"schema": "aten::_slow_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)", "dispatch": "True", "default": "False"} +const Tensor & _conv_depthwise2d_out(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor & out); // {"schema": "aten::_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); // {"schema": "aten::_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation) -> Tensor", "dispatch": "True", "default": "False"} +Tensor conv_depthwise3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); // {"schema": "aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & slow_conv3d_out(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, Tensor & out); // {"schema": "aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor slow_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding); // {"schema": "aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & slow_conv3d_forward_out(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, Tensor & output); // {"schema": "aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor slow_conv3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding); // {"schema": "aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding) -> Tensor", "dispatch": "True", "default": "False"} +Tensor slow_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); // {"schema": "aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor slow_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); // {"schema": "aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & col2im_out(const Tensor & self, c10::SymIntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor & out); // {"schema": "aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor col2im(const Tensor & self, c10::SymIntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); // {"schema": "aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor", "dispatch": "True", "default": "False"} +Tensor column_stack(TensorList tensors); // {"schema": "aten::column_stack(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & column_stack_out(TensorList tensors, Tensor & out); // {"schema": "aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & im2col_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor & out); // {"schema": "aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor im2col(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); // {"schema": "aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor", "dispatch": "True", "default": "False"} +Tensor isfinite(const Tensor & self); // {"schema": "aten::isfinite(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor isinf(const Tensor & self); // {"schema": "aten::isinf(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +void record_stream(Tensor & self, Stream s); // {"schema": "aten::record_stream(Tensor(a!) self, Stream s) -> ()", "dispatch": "True", "default": "False"} +Tensor isposinf(const Tensor & self); // {"schema": "aten::isposinf(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isposinf_out(const Tensor & self, Tensor & out); // {"schema": "aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor isneginf(const Tensor & self); // {"schema": "aten::isneginf(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isneginf_out(const Tensor & self, Tensor & out); // {"schema": "aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _add_batch_dim(const Tensor & self, int64_t batch_dim, int64_t level); // {"schema": "aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _remove_batch_dim(const Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim); // {"schema": "aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_entr(const Tensor & self); // {"schema": "aten::special_entr(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_entr_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_entr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_ndtri(const Tensor & self); // {"schema": "aten::special_ndtri(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_ndtri_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_ndtri.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_log_ndtr(const Tensor & self); // {"schema": "aten::special_log_ndtr(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_log_ndtr_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_expm1(const Tensor & self); // {"schema": "aten::special_expm1(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_expm1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_exp2(const Tensor & self); // {"schema": "aten::special_exp2(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_exp2_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_psi(const Tensor & self); // {"schema": "aten::special_psi(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_psi_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_digamma(const Tensor & self); // {"schema": "aten::special_digamma(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_digamma_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_gammaln(const Tensor & self); // {"schema": "aten::special_gammaln(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_gammaln_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_gammaln.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_erf(const Tensor & self); // {"schema": "aten::special_erf(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_erf_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_erfc(const Tensor & self); // {"schema": "aten::special_erfc(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_erfc_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_erfcx(const Tensor & self); // {"schema": "aten::special_erfcx(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_erfcx_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_erfcx.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_erfinv(const Tensor & self); // {"schema": "aten::special_erfinv(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_erfinv_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_ndtr(const Tensor & self); // {"schema": "aten::special_ndtr(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_ndtr_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_xlog1py(const Tensor & self, const Tensor & other); // {"schema": "aten::special_xlog1py(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_xlog1py(const Scalar & self, const Tensor & other); // {"schema": "aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_xlog1py(const Tensor & self, const Scalar & other); // {"schema": "aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_xlog1py_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_xlog1py.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_xlog1py_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_xlog1py.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_xlog1py_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::special_xlog1py.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_xlogy(const Tensor & self, const Tensor & other); // {"schema": "aten::special_xlogy(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_xlogy(const Scalar & self, const Tensor & other); // {"schema": "aten::special_xlogy.self_scalar(Scalar self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_xlogy(const Tensor & self, const Scalar & other); // {"schema": "aten::special_xlogy.other_scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_xlogy_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_xlogy.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_xlogy_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_xlogy.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_xlogy_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::special_xlogy.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_zeta(const Tensor & self, const Tensor & other); // {"schema": "aten::special_zeta(Tensor self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_zeta(const Scalar & self, const Tensor & other); // {"schema": "aten::special_zeta.self_scalar(Scalar self, Tensor other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_zeta(const Tensor & self, const Scalar & other); // {"schema": "aten::special_zeta.other_scalar(Tensor self, Scalar other) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_zeta_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_zeta.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_zeta_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_zeta.self_scalar_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & special_zeta_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::special_zeta.other_scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_i0(const Tensor & self); // {"schema": "aten::special_i0(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_i0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_i0e(const Tensor & self); // {"schema": "aten::special_i0e(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_i0e_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_i1(const Tensor & self); // {"schema": "aten::special_i1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_i1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_i1e(const Tensor & self); // {"schema": "aten::special_i1e(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_i1e_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_i1e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_logit(const Tensor & self, c10::optional eps); // {"schema": "aten::special_logit(Tensor self, float? eps=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_logit_out(const Tensor & self, c10::optional eps, Tensor & out); // {"schema": "aten::special_logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_polygamma(int64_t n, const Tensor & self); // {"schema": "aten::special_polygamma(int n, Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_polygamma_out(int64_t n, const Tensor & self, Tensor & out); // {"schema": "aten::special_polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_logsumexp(const Tensor & self, IntArrayRef dim, bool keepdim); // {"schema": "aten::special_logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_logsumexp_out(const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out); // {"schema": "aten::special_logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_expit(const Tensor & self); // {"schema": "aten::special_expit(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_expit_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_sinc(const Tensor & self); // {"schema": "aten::special_sinc(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_sinc_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_sinc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_round(const Tensor & self, int64_t decimals); // {"schema": "aten::special_round(Tensor self, *, int decimals=0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_round_out(const Tensor & self, int64_t decimals, Tensor & out); // {"schema": "aten::special_round.out(Tensor self, *, int decimals=0, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_log1p(const Tensor & self); // {"schema": "aten::special_log1p(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_log1p_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_log_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::special_log_softmax(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_gammainc_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_gammainc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_gammainc(const Tensor & self, const Tensor & other); // {"schema": "aten::special_gammainc(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_gammaincc_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::special_gammaincc.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_gammaincc(const Tensor & self, const Tensor & other); // {"schema": "aten::special_gammaincc(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_multigammaln(const Tensor & self, int64_t p); // {"schema": "aten::special_multigammaln(Tensor self, int p) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_multigammaln_out(const Tensor & self, int64_t p, Tensor & out); // {"schema": "aten::special_multigammaln.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor special_softmax(const Tensor & self, int64_t dim, c10::optional dtype); // {"schema": "aten::special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fft_fft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_fft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_fft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ifft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_ifft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_ifft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_rfft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_rfft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_rfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_irfft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_irfft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_irfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_hfft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_hfft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_hfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ihfft(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm); // {"schema": "aten::fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_ihfft_out(const Tensor & self, c10::optional n, int64_t dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_ihfft.out(Tensor self, int? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_fft2(const Tensor & self, OptionalIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_fft2_out(const Tensor & self, OptionalIntArrayRef s, IntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_fft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ifft2(const Tensor & self, OptionalIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_ifft2_out(const Tensor & self, OptionalIntArrayRef s, IntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_ifft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_rfft2(const Tensor & self, OptionalIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_rfft2_out(const Tensor & self, OptionalIntArrayRef s, IntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_rfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_irfft2(const Tensor & self, OptionalIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_irfft2_out(const Tensor & self, OptionalIntArrayRef s, IntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_irfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_hfft2(const Tensor & self, OptionalIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_hfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +const Tensor & fft_hfft2_out(const Tensor & self, OptionalIntArrayRef s, IntArrayRef dim, c10::optional norm, const Tensor & out); // {"schema": "aten::fft_hfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ihfft2(const Tensor & self, OptionalIntArrayRef s, IntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_ihfft2(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +const Tensor & fft_ihfft2_out(const Tensor & self, OptionalIntArrayRef s, IntArrayRef dim, c10::optional norm, const Tensor & out); // {"schema": "aten::fft_ihfft2.out(Tensor self, int[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_fftn(const Tensor & self, OptionalIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_fftn_out(const Tensor & self, OptionalIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_fftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ifftn(const Tensor & self, OptionalIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_ifftn_out(const Tensor & self, OptionalIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_ifftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_rfftn(const Tensor & self, OptionalIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_rfftn_out(const Tensor & self, OptionalIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_rfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_irfftn(const Tensor & self, OptionalIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & fft_irfftn_out(const Tensor & self, OptionalIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, Tensor & out); // {"schema": "aten::fft_irfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_hfftn(const Tensor & self, OptionalIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_hfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +const Tensor & fft_hfftn_out(const Tensor & self, OptionalIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, const Tensor & out); // {"schema": "aten::fft_hfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_ihfftn(const Tensor & self, OptionalIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm); // {"schema": "aten::fft_ihfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", "dispatch": "False", "default": "True"} +const Tensor & fft_ihfftn_out(const Tensor & self, OptionalIntArrayRef s, OptionalIntArrayRef dim, c10::optional norm, const Tensor & out); // {"schema": "aten::fft_ihfftn.out(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor fft_fftfreq(int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fft_fftfreq_out(int64_t n, double d, Tensor & out); // {"schema": "aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor fft_rfftfreq(int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::fft_rfftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & fft_rfftfreq_out(int64_t n, double d, Tensor & out); // {"schema": "aten::fft_rfftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor fft_fftshift(const Tensor & self, OptionalIntArrayRef dim); // {"schema": "aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor fft_ifftshift(const Tensor & self, OptionalIntArrayRef dim); // {"schema": "aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple linalg_cholesky_ex(const Tensor & self, bool upper, bool check_errors); // {"schema": "aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple linalg_cholesky_ex_out(const Tensor & self, bool upper, bool check_errors, Tensor & L, Tensor & info); // {"schema": "aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)", "dispatch": "True", "default": "False"} +Tensor linalg_cholesky(const Tensor & self, bool upper); // {"schema": "aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_cholesky_out(const Tensor & self, bool upper, Tensor & out); // {"schema": "aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_cross(const Tensor & self, const Tensor & other, int64_t dim); // {"schema": "aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_cross_out(const Tensor & self, const Tensor & other, int64_t dim, Tensor & out); // {"schema": "aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple linalg_lu_factor(const Tensor & A, bool pivot); // {"schema": "aten::linalg_lu_factor(Tensor A, *, bool pivot=True) -> (Tensor LU, Tensor pivots)", "dispatch": "False", "default": "True"} +::std::tuple linalg_lu_factor_out(const Tensor & A, bool pivot, Tensor & LU, Tensor & pivots); // {"schema": "aten::linalg_lu_factor.out(Tensor A, *, bool pivot=True, Tensor(a!) LU, Tensor(b!) pivots) -> (Tensor(a!) LU, Tensor(b!) pivots)", "dispatch": "False", "default": "True"} +::std::tuple linalg_lu_factor_ex(const Tensor & A, bool pivot, bool check_errors); // {"schema": "aten::linalg_lu_factor_ex(Tensor A, *, bool pivot=True, bool check_errors=False) -> (Tensor LU, Tensor pivots, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple linalg_lu_factor_ex_out(const Tensor & A, bool pivot, bool check_errors, Tensor & LU, Tensor & pivots, Tensor & info); // {"schema": "aten::linalg_lu_factor_ex.out(Tensor A, *, bool pivot=True, bool check_errors=False, Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LU, Tensor(b!) pivots, Tensor(c!) info)", "dispatch": "True", "default": "False"} +::std::tuple linalg_lu(const Tensor & A, bool pivot); // {"schema": "aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U)", "dispatch": "True", "default": "True"} +::std::tuple linalg_lu_out(const Tensor & A, bool pivot, Tensor & P, Tensor & L, Tensor & U); // {"schema": "aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U)", "dispatch": "True", "default": "False"} +Tensor linalg_lu_solve(const Tensor & LU, const Tensor & pivots, const Tensor & B, bool left, bool adjoint); // {"schema": "aten::linalg_lu_solve(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_lu_solve_out(const Tensor & LU, const Tensor & pivots, const Tensor & B, bool left, bool adjoint, Tensor & out); // {"schema": "aten::linalg_lu_solve.out(Tensor LU, Tensor pivots, Tensor B, *, bool left=True, bool adjoint=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple _linalg_det(const Tensor & A); // {"schema": "aten::_linalg_det(Tensor A) -> (Tensor result, Tensor LU, Tensor pivots)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_det_out(const Tensor & A, Tensor & result, Tensor & LU, Tensor & pivots); // {"schema": "aten::_linalg_det.result(Tensor A, *, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots)", "dispatch": "True", "default": "False"} +Tensor linalg_det(const Tensor & A); // {"schema": "aten::linalg_det(Tensor A) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_det_out(const Tensor & A, Tensor & out); // {"schema": "aten::linalg_det.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor det(const Tensor & self); // {"schema": "aten::det(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple linalg_ldl_factor_ex(const Tensor & self, bool hermitian, bool check_errors); // {"schema": "aten::linalg_ldl_factor_ex(Tensor self, *, bool hermitian=False, bool check_errors=False) -> (Tensor LD, Tensor pivots, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple linalg_ldl_factor_ex_out(const Tensor & self, bool hermitian, bool check_errors, Tensor & LD, Tensor & pivots, Tensor & info); // {"schema": "aten::linalg_ldl_factor_ex.out(Tensor self, *, bool hermitian=False, bool check_errors=False, Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info) -> (Tensor(a!) LD, Tensor(b!) pivots, Tensor(c!) info)", "dispatch": "True", "default": "False"} +::std::tuple linalg_ldl_factor(const Tensor & self, bool hermitian); // {"schema": "aten::linalg_ldl_factor(Tensor self, *, bool hermitian=False) -> (Tensor LD, Tensor pivots)", "dispatch": "False", "default": "True"} +::std::tuple linalg_ldl_factor_out(const Tensor & self, bool hermitian, Tensor & LD, Tensor & pivots); // {"schema": "aten::linalg_ldl_factor.out(Tensor self, *, bool hermitian=False, Tensor(a!) LD, Tensor(b!) pivots) -> (Tensor(a!) LD, Tensor(b!) pivots)", "dispatch": "False", "default": "True"} +Tensor linalg_ldl_solve(const Tensor & LD, const Tensor & pivots, const Tensor & B, bool hermitian); // {"schema": "aten::linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_ldl_solve_out(const Tensor & LD, const Tensor & pivots, const Tensor & B, bool hermitian, Tensor & out); // {"schema": "aten::linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple linalg_lstsq(const Tensor & self, const Tensor & b, c10::optional rcond, c10::optional driver); // {"schema": "aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)", "dispatch": "True", "default": "True"} +::std::tuple linalg_lstsq_out(const Tensor & self, const Tensor & b, c10::optional rcond, c10::optional driver, Tensor & solution, Tensor & residuals, Tensor & rank, Tensor & singular_values); // {"schema": "aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)", "dispatch": "True", "default": "False"} +Tensor linalg_matmul(const Tensor & self, const Tensor & other); // {"schema": "aten::linalg_matmul(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matmul_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::linalg_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_vecdot(const Tensor & x, const Tensor & y, int64_t dim); // {"schema": "aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_vecdot_out(const Tensor & x, const Tensor & y, int64_t dim, Tensor & out); // {"schema": "aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_exp(const Tensor & self); // {"schema": "aten::linalg_matrix_exp(Tensor self) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _linalg_slogdet(const Tensor & A); // {"schema": "aten::_linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet, Tensor LU, Tensor pivots)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_slogdet_out(const Tensor & A, Tensor & sign, Tensor & logabsdet, Tensor & LU, Tensor & pivots); // {"schema": "aten::_linalg_slogdet.sign(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots) -> (Tensor(a!) sign, Tensor(b!) logabsdet, Tensor(c!) LU, Tensor(d!) pivots)", "dispatch": "True", "default": "False"} +::std::tuple linalg_slogdet(const Tensor & A); // {"schema": "aten::linalg_slogdet(Tensor A) -> (Tensor sign, Tensor logabsdet)", "dispatch": "False", "default": "True"} +::std::tuple linalg_slogdet_out(const Tensor & A, Tensor & sign, Tensor & logabsdet); // {"schema": "aten::linalg_slogdet.out(Tensor A, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)", "dispatch": "False", "default": "True"} +::std::tuple slogdet(const Tensor & self); // {"schema": "aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)", "dispatch": "False", "default": "True"} +::std::tuple slogdet_out(const Tensor & self, Tensor & sign, Tensor & logabsdet); // {"schema": "aten::slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)", "dispatch": "False", "default": "True"} +Tensor logdet(const Tensor & self); // {"schema": "aten::logdet(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +::std::tuple linalg_eig(const Tensor & self); // {"schema": "aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)", "dispatch": "True", "default": "False"} +::std::tuple linalg_eig_out(const Tensor & self, Tensor & eigenvalues, Tensor & eigenvectors); // {"schema": "aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", "dispatch": "True", "default": "False"} +Tensor linalg_eigvals(const Tensor & self); // {"schema": "aten::linalg_eigvals(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_eigvals_out(const Tensor & self, Tensor & out); // {"schema": "aten::linalg_eigvals.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple _linalg_eigh(const Tensor & A, c10::string_view UPLO, bool compute_v); // {"schema": "aten::_linalg_eigh(Tensor A, str UPLO=\"L\", bool compute_v=True) -> (Tensor eigenvalues, Tensor eigenvectors)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_eigh_out(const Tensor & A, c10::string_view UPLO, bool compute_v, Tensor & eigenvalues, Tensor & eigenvectors); // {"schema": "aten::_linalg_eigh.eigenvalues(Tensor A, str UPLO=\"L\", bool compute_v=True, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", "dispatch": "True", "default": "False"} +::std::tuple linalg_eigh(const Tensor & self, c10::string_view UPLO); // {"schema": "aten::linalg_eigh(Tensor self, str UPLO=\"L\") -> (Tensor eigenvalues, Tensor eigenvectors)", "dispatch": "False", "default": "True"} +::std::tuple linalg_eigh_out(const Tensor & self, c10::string_view UPLO, Tensor & eigvals, Tensor & eigvecs); // {"schema": "aten::linalg_eigh.eigvals(Tensor self, str UPLO=\"L\", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)", "dispatch": "False", "default": "True"} +Tensor linalg_eigvalsh(const Tensor & self, c10::string_view UPLO); // {"schema": "aten::linalg_eigvalsh(Tensor self, str UPLO=\"L\") -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_eigvalsh_out(const Tensor & self, c10::string_view UPLO, Tensor & out); // {"schema": "aten::linalg_eigvalsh.out(Tensor self, str UPLO=\"L\", *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_householder_product(const Tensor & input, const Tensor & tau); // {"schema": "aten::linalg_householder_product(Tensor input, Tensor tau) -> Tensor", "dispatch": "True", "default": "False"} +Tensor & linalg_householder_product_out(const Tensor & input, const Tensor & tau, Tensor & out); // {"schema": "aten::linalg_householder_product.out(Tensor input, Tensor tau, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +::std::tuple linalg_inv_ex(const Tensor & A, bool check_errors); // {"schema": "aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple linalg_inv_ex_out(const Tensor & A, bool check_errors, Tensor & inverse, Tensor & info); // {"schema": "aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)", "dispatch": "True", "default": "False"} +Tensor linalg_inv(const Tensor & A); // {"schema": "aten::linalg_inv(Tensor A) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_inv_out(const Tensor & A, Tensor & out); // {"schema": "aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor inverse(const Tensor & self); // {"schema": "aten::inverse(Tensor self) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & inverse_out(const Tensor & self, Tensor & out); // {"schema": "aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor inner(const Tensor & self, const Tensor & other); // {"schema": "aten::inner(Tensor self, Tensor other) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & inner_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::inner.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor outer(const Tensor & self, const Tensor & vec2); // {"schema": "aten::outer(Tensor self, Tensor vec2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & outer_out(const Tensor & self, const Tensor & vec2, Tensor & out); // {"schema": "aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor ger(const Tensor & self, const Tensor & vec2); // {"schema": "aten::ger(Tensor self, Tensor vec2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & ger_out(const Tensor & self, const Tensor & vec2, Tensor & out); // {"schema": "aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_norm(const Tensor & self, const c10::optional & ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor linalg_norm(const Tensor & self, c10::string_view ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_norm_out(const Tensor & self, const c10::optional & ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & linalg_norm_out(const Tensor & self, c10::string_view ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_vector_norm(const Tensor & self, const Scalar & ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_vector_norm_out(const Tensor & self, const Scalar & ord, OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor linalg_matrix_norm(const Tensor & self, const Scalar & ord, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_norm_out(const Tensor & self, const Scalar & ord, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_matrix_norm.out(Tensor self, Scalar ord, int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_norm(const Tensor & self, c10::string_view ord, IntArrayRef dim, bool keepdim, c10::optional dtype); // {"schema": "aten::linalg_matrix_norm.str_ord(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_norm_out(const Tensor & self, c10::string_view ord, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::linalg_matrix_norm.str_ord_out(Tensor self, str ord='fro', int[] dim=[-2,-1], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple _linalg_svd(const Tensor & A, bool full_matrices, bool compute_uv, c10::optional driver); // {"schema": "aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_svd_out(const Tensor & A, bool full_matrices, bool compute_uv, c10::optional driver, Tensor & U, Tensor & S, Tensor & Vh); // {"schema": "aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)", "dispatch": "True", "default": "False"} +::std::tuple linalg_svd(const Tensor & A, bool full_matrices, c10::optional driver); // {"schema": "aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)", "dispatch": "False", "default": "True"} +::std::tuple linalg_svd_out(const Tensor & A, bool full_matrices, c10::optional driver, Tensor & U, Tensor & S, Tensor & Vh); // {"schema": "aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)", "dispatch": "False", "default": "True"} +Tensor linalg_svdvals(const Tensor & A, c10::optional driver); // {"schema": "aten::linalg_svdvals(Tensor A, *, str? driver=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_svdvals_out(const Tensor & A, c10::optional driver, Tensor & out); // {"schema": "aten::linalg_svdvals.out(Tensor A, *, str? driver=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_cond(const Tensor & self, const c10::optional & p); // {"schema": "aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_cond_out(const Tensor & self, const c10::optional & p, Tensor & out); // {"schema": "aten::linalg_cond.out(Tensor self, Scalar? p=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_cond(const Tensor & self, c10::string_view p); // {"schema": "aten::linalg_cond.p_str(Tensor self, str p) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_cond_out(const Tensor & self, c10::string_view p, Tensor & out); // {"schema": "aten::linalg_cond.p_str_out(Tensor self, str p, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_pinv(const Tensor & self, const c10::optional & atol, const c10::optional & rtol, bool hermitian); // {"schema": "aten::linalg_pinv.atol_rtol_tensor(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & linalg_pinv_out(const Tensor & self, const c10::optional & atol, const c10::optional & rtol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_pinv.atol_rtol_tensor_out(Tensor self, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor linalg_pinv(const Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian); // {"schema": "aten::linalg_pinv.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_pinv_out(const Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_pinv.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_pinv(const Tensor & self, double rcond, bool hermitian); // {"schema": "aten::linalg_pinv(Tensor self, float rcond, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor linalg_pinv(const Tensor & self, const Tensor & rcond, bool hermitian); // {"schema": "aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_pinv_out(const Tensor & self, double rcond, bool hermitian, Tensor & out); // {"schema": "aten::linalg_pinv.out(Tensor self, float rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & linalg_pinv_out(const Tensor & self, const Tensor & rcond, bool hermitian, Tensor & out); // {"schema": "aten::linalg_pinv.out_rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple _linalg_solve_ex(const Tensor & A, const Tensor & B, bool left, bool check_errors); // {"schema": "aten::_linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor LU, Tensor pivots, Tensor info)", "dispatch": "True", "default": "True"} +::std::tuple _linalg_solve_ex_out(const Tensor & A, const Tensor & B, bool left, bool check_errors, Tensor & result, Tensor & LU, Tensor & pivots, Tensor & info); // {"schema": "aten::_linalg_solve_ex.result(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info) -> (Tensor(a!) result, Tensor(b!) LU, Tensor(c!) pivots, Tensor(d!) info)", "dispatch": "True", "default": "False"} +::std::tuple linalg_solve_ex(const Tensor & A, const Tensor & B, bool left, bool check_errors); // {"schema": "aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)", "dispatch": "False", "default": "True"} +::std::tuple linalg_solve_ex_out(const Tensor & A, const Tensor & B, bool left, bool check_errors, Tensor & result, Tensor & info); // {"schema": "aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)", "dispatch": "False", "default": "True"} +Tensor linalg_solve(const Tensor & A, const Tensor & B, bool left); // {"schema": "aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_solve_out(const Tensor & A, const Tensor & B, bool left, Tensor & out); // {"schema": "aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_tensorinv(const Tensor & self, int64_t ind); // {"schema": "aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_tensorinv_out(const Tensor & self, int64_t ind, Tensor & out); // {"schema": "aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_tensorsolve(const Tensor & self, const Tensor & other, OptionalIntArrayRef dims); // {"schema": "aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_tensorsolve_out(const Tensor & self, const Tensor & other, OptionalIntArrayRef dims, Tensor & out); // {"schema": "aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +::std::tuple linalg_qr(const Tensor & A, c10::string_view mode); // {"schema": "aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)", "dispatch": "True", "default": "True"} +::std::tuple linalg_qr_out(const Tensor & A, c10::string_view mode, Tensor & Q, Tensor & R); // {"schema": "aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)", "dispatch": "True", "default": "False"} +Tensor linalg_matrix_power(const Tensor & self, int64_t n); // {"schema": "aten::linalg_matrix_power(Tensor self, int n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_power_out(const Tensor & self, int64_t n, Tensor & out); // {"schema": "aten::linalg_matrix_power.out(Tensor self, int n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_rank(const Tensor & input, const c10::optional & atol, const c10::optional & rtol, bool hermitian); // {"schema": "aten::linalg_matrix_rank.atol_rtol_tensor(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_rank_out(const Tensor & input, const c10::optional & atol, const c10::optional & rtol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_matrix_rank.atol_rtol_tensor_out(Tensor input, *, Tensor? atol=None, Tensor? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_rank(const Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian); // {"schema": "aten::linalg_matrix_rank.atol_rtol_float(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_rank_out(const Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_matrix_rank.atol_rtol_float_out(Tensor self, *, float? atol=None, float? rtol=None, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_rank(const Tensor & self, double tol, bool hermitian); // {"schema": "aten::linalg_matrix_rank(Tensor self, float tol, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_rank_out(const Tensor & self, double tol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_matrix_rank.out(Tensor self, float tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_matrix_rank(const Tensor & input, const Tensor & tol, bool hermitian); // {"schema": "aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_matrix_rank_out(const Tensor & input, const Tensor & tol, bool hermitian, Tensor & out); // {"schema": "aten::linalg_matrix_rank.out_tol_tensor(Tensor input, Tensor tol, bool hermitian=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor linalg_multi_dot(TensorList tensors); // {"schema": "aten::linalg_multi_dot(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & linalg_multi_dot_out(TensorList tensors, Tensor & out); // {"schema": "aten::linalg_multi_dot.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor nested_to_padded_tensor(const Tensor & self, double padding, OptionalIntArrayRef output_size); // {"schema": "aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_serialization_subcmul(const Tensor & self, const Tensor & other, const Scalar & alpha); // {"schema": "aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_optional_intlist(const Tensor & values, OptionalIntArrayRef addends); // {"schema": "aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _test_optional_filled_intlist(const Tensor & values, OptionalIntArrayRef addends); // {"schema": "aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _test_optional_floatlist(const Tensor & values, c10::optional> addends); // {"schema": "aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _test_string_default(const Tensor & dummy, c10::string_view a, c10::string_view b); // {"schema": "aten::_test_string_default(Tensor dummy, str a=\"\\\"'\\\\\", str b='\"\\'\\\\') -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_ambiguous_defaults(const Tensor & dummy, int64_t a, int64_t b); // {"schema": "aten::_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_ambiguous_defaults(const Tensor & dummy, int64_t a, c10::string_view b); // {"schema": "aten::_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b=\"2\") -> Tensor", "dispatch": "False", "default": "True"} +Tensor _test_warn_in_autograd(const Tensor & self); // {"schema": "aten::_test_warn_in_autograd(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_autograd_multiple_dispatch(const Tensor & self); // {"schema": "aten::_test_autograd_multiple_dispatch.fullcoverage(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_autograd_multiple_dispatch(const Tensor & self, bool b); // {"schema": "aten::_test_autograd_multiple_dispatch.ntonly(Tensor self, bool b) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _test_autograd_multiple_dispatch_view(const Tensor & self); // {"schema": "aten::_test_autograd_multiple_dispatch_view(Tensor(a) self) -> Tensor(a)", "dispatch": "True", "default": "True"} +Tensor _test_autograd_multiple_dispatch_view_copy(const Tensor & self); // {"schema": "aten::_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor segment_reduce(const Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial); // {"schema": "aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _segment_reduce_backward(const Tensor & grad, const Tensor & output, const Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial); // {"schema": "aten::_segment_reduce_backward(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor pad_sequence(TensorList sequences, bool batch_first, double padding_value); // {"schema": "aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor", "dispatch": "False", "default": "True"} +Tensor flatten_dense_tensors(TensorList tensors); // {"schema": "aten::flatten_dense_tensors(Tensor[] tensors) -> Tensor", "dispatch": "False", "default": "True"} +::std::vector unflatten_dense_tensors(const Tensor & flat, TensorList tensors); // {"schema": "aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> Tensor[]", "dispatch": "False", "default": "True"} +Tensor _nested_tensor_from_tensor_list(TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); // {"schema": "aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _fw_primal_copy(const Tensor & self, int64_t level); // {"schema": "aten::_fw_primal_copy(Tensor self, int level) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _make_dual_copy(const Tensor & primal, const Tensor & tangent, int64_t level); // {"schema": "aten::_make_dual_copy(Tensor primal, Tensor tangent, int level) -> Tensor", "dispatch": "True", "default": "True"} +Tensor view_as_real_copy(const Tensor & self); // {"schema": "aten::view_as_real_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor view_as_complex_copy(const Tensor & self); // {"schema": "aten::view_as_complex_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _conj_copy(const Tensor & self); // {"schema": "aten::_conj_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _neg_view_copy(const Tensor & self); // {"schema": "aten::_neg_view_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor as_strided_copy(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset); // {"schema": "aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _sparse_broadcast_to_copy(const Tensor & self, IntArrayRef size); // {"schema": "aten::_sparse_broadcast_to_copy(Tensor self, int[] size) -> Tensor", "dispatch": "True", "default": "True"} +Tensor diagonal_copy(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); // {"schema": "aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor expand_copy(const Tensor & self, c10::SymIntArrayRef size, bool implicit); // {"schema": "aten::expand_copy(Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor permute_copy(const Tensor & self, IntArrayRef dims); // {"schema": "aten::permute_copy(Tensor self, int[] dims) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _reshape_alias_copy(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::_reshape_alias_copy(Tensor self, SymInt[] size, SymInt[] stride) -> Tensor", "dispatch": "True", "default": "True"} +Tensor select_copy(const Tensor & self, int64_t dim, int64_t index); // {"schema": "aten::select_copy.int(Tensor self, int dim, int index) -> Tensor", "dispatch": "True", "default": "True"} +Tensor detach_copy(const Tensor & self); // {"schema": "aten::detach_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor slice_copy(const Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step); // {"schema": "aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor", "dispatch": "True", "default": "True"} +::std::vector split_copy(const Tensor & self, int64_t split_size, int64_t dim); // {"schema": "aten::split_copy.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +::std::vector split_with_sizes_copy(const Tensor & self, IntArrayRef split_sizes, int64_t dim); // {"schema": "aten::split_with_sizes_copy(Tensor self, int[] split_sizes, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +Tensor squeeze_copy(const Tensor & self); // {"schema": "aten::squeeze_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor squeeze_copy(const Tensor & self, int64_t dim); // {"schema": "aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor t_copy(const Tensor & self); // {"schema": "aten::t_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor transpose_copy(const Tensor & self, int64_t dim0, int64_t dim1); // {"schema": "aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor", "dispatch": "True", "default": "True"} +Tensor unsqueeze_copy(const Tensor & self, int64_t dim); // {"schema": "aten::unsqueeze_copy(Tensor self, int dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _indices_copy(const Tensor & self); // {"schema": "aten::_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor _values_copy(const Tensor & self); // {"schema": "aten::_values_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor indices_copy(const Tensor & self); // {"schema": "aten::indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor values_copy(const Tensor & self); // {"schema": "aten::values_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor crow_indices_copy(const Tensor & self); // {"schema": "aten::crow_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor col_indices_copy(const Tensor & self); // {"schema": "aten::col_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor ccol_indices_copy(const Tensor & self); // {"schema": "aten::ccol_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor row_indices_copy(const Tensor & self); // {"schema": "aten::row_indices_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +::std::vector unbind_copy(const Tensor & self, int64_t dim); // {"schema": "aten::unbind_copy.int(Tensor self, int dim=0) -> Tensor[]", "dispatch": "True", "default": "True"} +Tensor view_copy(const Tensor & self, c10::SymIntArrayRef size); // {"schema": "aten::view_copy(Tensor self, SymInt[] size) -> Tensor", "dispatch": "True", "default": "True"} +Tensor view_copy(const Tensor & self, ScalarType dtype); // {"schema": "aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor", "dispatch": "True", "default": "True"} +Tensor unfold_copy(const Tensor & self, int64_t dimension, int64_t size, int64_t step); // {"schema": "aten::unfold_copy(Tensor self, int dimension, int size, int step) -> Tensor", "dispatch": "True", "default": "True"} +Tensor alias_copy(const Tensor & self); // {"schema": "aten::alias_copy(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & _fw_primal_copy_out(const Tensor & self, int64_t level, Tensor & out); // {"schema": "aten::_fw_primal_copy.out(Tensor self, int level, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _make_dual_copy_out(const Tensor & primal, const Tensor & tangent, int64_t level, Tensor & out); // {"schema": "aten::_make_dual_copy.out(Tensor primal, Tensor tangent, int level, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & view_as_real_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & view_as_complex_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _conj_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_conj_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _neg_view_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & as_strided_copy_out(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, Tensor & out); // {"schema": "aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_broadcast_to_copy_out(const Tensor & self, IntArrayRef size, Tensor & out); // {"schema": "aten::_sparse_broadcast_to_copy.out(Tensor self, int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & diagonal_copy_out(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, Tensor & out); // {"schema": "aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & expand_copy_out(const Tensor & self, c10::SymIntArrayRef size, bool implicit, Tensor & out); // {"schema": "aten::expand_copy.out(Tensor self, SymInt[] size, *, bool implicit=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & permute_copy_out(const Tensor & self, IntArrayRef dims, Tensor & out); // {"schema": "aten::permute_copy.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _reshape_alias_copy_out(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::_reshape_alias_copy.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & select_copy_out(const Tensor & self, int64_t dim, int64_t index, Tensor & out); // {"schema": "aten::select_copy.int_out(Tensor self, int dim, int index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & detach_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slice_copy_out(const Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, Tensor & out); // {"schema": "aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void split_copy_out(const Tensor & self, int64_t split_size, int64_t dim, TensorList out); // {"schema": "aten::split_copy.Tensor_out(Tensor self, int split_size, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void split_with_sizes_copy_out(const Tensor & self, IntArrayRef split_sizes, int64_t dim, TensorList out); // {"schema": "aten::split_with_sizes_copy.out(Tensor self, int[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & squeeze_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & squeeze_copy_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & t_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::t_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & transpose_copy_out(const Tensor & self, int64_t dim0, int64_t dim1, Tensor & out); // {"schema": "aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & unsqueeze_copy_out(const Tensor & self, int64_t dim, Tensor & out); // {"schema": "aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _values_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & values_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::values_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & crow_indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & col_indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::col_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void unbind_copy_out(const Tensor & self, int64_t dim, TensorList out); // {"schema": "aten::unbind_copy.int_out(Tensor self, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & view_copy_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & view_copy_out(const Tensor & self, ScalarType dtype, Tensor & out); // {"schema": "aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & unfold_copy_out(const Tensor & self, int64_t dimension, int64_t size, int64_t step, Tensor & out); // {"schema": "aten::unfold_copy.out(Tensor self, int dimension, int size, int step, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & alias_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor to_padded_tensor(const Tensor & self, double padding, OptionalIntArrayRef output_size); // {"schema": "aten::to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_softmax_with_shape(const Tensor & self, const Tensor & query); // {"schema": "aten::_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _nested_tensor_layer_norm(const Tensor & self, const c10::optional & weight, const c10::optional & bias, double eps); // {"schema": "aten::_nested_tensor_layer_norm(Tensor self, Tensor? weight, Tensor? bias, float eps) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _transformer_encoder_layer_fwd(const Tensor & src, int64_t embed_dim, int64_t num_heads, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const Tensor & norm_weight_1, const Tensor & norm_bias_1, const Tensor & norm_weight_2, const Tensor & norm_bias_2, const Tensor & ffn_weight_1, const Tensor & ffn_bias_1, const Tensor & ffn_weight_2, const Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type); // {"schema": "aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _native_multi_head_attention(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type); // {"schema": "aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_attention(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal); // {"schema": "aten::_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +::std::tuple _scaled_dot_product_attention_forward(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal); // {"schema": "aten::_scaled_dot_product_attention_forward(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _scaled_dot_product_attention_math(const Tensor & query, const Tensor & key, const Tensor & value, const c10::optional & attn_mask, double dropout_p, bool need_attn_weights, bool is_causal); // {"schema": "aten::_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool need_attn_weights=False, bool is_causal=False) -> (Tensor, Tensor)", "dispatch": "False", "default": "True"} +Tensor _triton_scaled_dot_attention(const Tensor & q, const Tensor & k, const Tensor & v, double dropout_p); // {"schema": "aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor", "dispatch": "True", "default": "False"} +Tensor _triton_multi_head_attention(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask); // {"schema": "aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor", "dispatch": "True", "default": "False"} +Tensor special_airy_ai(const Tensor & x); // {"schema": "aten::special_airy_ai(Tensor x) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_airy_ai_out(const Tensor & x, Tensor & out); // {"schema": "aten::special_airy_ai.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _flash_scaled_dot_product_attention(const Tensor & query, const Tensor & key, const Tensor & value, const Tensor & cum_seq_q, const Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal); // {"schema": "aten::_flash_scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor cum_seq_q, Tensor cum_seq_k, int max_q, int max_k, float dropout_p, bool is_causal) -> Tensor", "dispatch": "True", "default": "False"} +::std::tuple _transformer_decoder_only_layer_fwd(const Tensor & src, int64_t embed_dim, int64_t num_heads, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const Tensor & norm_weight_1, const Tensor & norm_bias_1, const Tensor & norm_weight_2, const Tensor & norm_bias_2, const Tensor & ffn_weight_1, const Tensor & ffn_bias_1, const Tensor & ffn_weight_2, const Tensor & ffn_bias_2, const c10::optional & mask, const c10::optional & incr_key, const c10::optional & incr_value); // {"schema": "aten::_transformer_decoder_only_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None) -> (Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +::std::tuple _native_decoder_only_multi_head_attention(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask, const c10::optional & incr_key, const c10::optional & incr_value, bool need_weights, bool average_attn_weights); // {"schema": "aten::_native_decoder_only_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True) -> (Tensor, Tensor, Tensor, Tensor)", "dispatch": "True", "default": "False"} +Tensor special_bessel_j0(const Tensor & self); // {"schema": "aten::special_bessel_j0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_bessel_j0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_bessel_j1(const Tensor & self); // {"schema": "aten::special_bessel_j1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_bessel_j1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_bessel_j1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_bessel_y0(const Tensor & self); // {"schema": "aten::special_bessel_y0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_bessel_y0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_bessel_y0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_bessel_y1(const Tensor & self); // {"schema": "aten::special_bessel_y1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_bessel_y1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_chebyshev_polynomial_t(const Tensor & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_t(const Scalar & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_chebyshev_polynomial_t(const Tensor & x, const Scalar & n); // {"schema": "aten::special_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_chebyshev_polynomial_t_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_chebyshev_polynomial_t_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_chebyshev_polynomial_t_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_u(const Tensor & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_u(const Scalar & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_chebyshev_polynomial_u(const Tensor & x, const Scalar & n); // {"schema": "aten::special_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_chebyshev_polynomial_u_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_chebyshev_polynomial_u_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_chebyshev_polynomial_u_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_v(const Tensor & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_v(const Scalar & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_chebyshev_polynomial_v(const Tensor & x, const Scalar & n); // {"schema": "aten::special_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_chebyshev_polynomial_v_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_chebyshev_polynomial_v_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_chebyshev_polynomial_v_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_w(const Tensor & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_chebyshev_polynomial_w(const Scalar & x, const Tensor & n); // {"schema": "aten::special_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_chebyshev_polynomial_w(const Tensor & x, const Scalar & n); // {"schema": "aten::special_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_chebyshev_polynomial_w_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_chebyshev_polynomial_w_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_chebyshev_polynomial_w_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_h(const Tensor & x, const Tensor & n); // {"schema": "aten::special_hermite_polynomial_h(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_h(const Scalar & x, const Tensor & n); // {"schema": "aten::special_hermite_polynomial_h.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_hermite_polynomial_h(const Tensor & x, const Scalar & n); // {"schema": "aten::special_hermite_polynomial_h.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_hermite_polynomial_h_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_h.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_hermite_polynomial_h_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_h.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_hermite_polynomial_h_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_h.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_he(const Tensor & x, const Tensor & n); // {"schema": "aten::special_hermite_polynomial_he(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_hermite_polynomial_he(const Scalar & x, const Tensor & n); // {"schema": "aten::special_hermite_polynomial_he.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_hermite_polynomial_he(const Tensor & x, const Scalar & n); // {"schema": "aten::special_hermite_polynomial_he.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_hermite_polynomial_he_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_he.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_hermite_polynomial_he_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_he.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_hermite_polynomial_he_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_hermite_polynomial_he.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_laguerre_polynomial_l(const Tensor & x, const Tensor & n); // {"schema": "aten::special_laguerre_polynomial_l(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_laguerre_polynomial_l(const Scalar & x, const Tensor & n); // {"schema": "aten::special_laguerre_polynomial_l.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_laguerre_polynomial_l(const Tensor & x, const Scalar & n); // {"schema": "aten::special_laguerre_polynomial_l.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_laguerre_polynomial_l_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_laguerre_polynomial_l.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_laguerre_polynomial_l_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_laguerre_polynomial_l.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_laguerre_polynomial_l_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_laguerre_polynomial_l.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_legendre_polynomial_p(const Tensor & x, const Tensor & n); // {"schema": "aten::special_legendre_polynomial_p(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_legendre_polynomial_p(const Scalar & x, const Tensor & n); // {"schema": "aten::special_legendre_polynomial_p.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_legendre_polynomial_p(const Tensor & x, const Scalar & n); // {"schema": "aten::special_legendre_polynomial_p.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_legendre_polynomial_p_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_legendre_polynomial_p.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_legendre_polynomial_p_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_legendre_polynomial_p.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_legendre_polynomial_p_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_legendre_polynomial_p.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_modified_bessel_i0(const Tensor & self); // {"schema": "aten::special_modified_bessel_i0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_modified_bessel_i0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_modified_bessel_i1(const Tensor & self); // {"schema": "aten::special_modified_bessel_i1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_modified_bessel_i1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_modified_bessel_i1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_modified_bessel_k0(const Tensor & self); // {"schema": "aten::special_modified_bessel_k0(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_modified_bessel_k0_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_modified_bessel_k0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_modified_bessel_k1(const Tensor & self); // {"schema": "aten::special_modified_bessel_k1(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_modified_bessel_k1_out(const Tensor & self, Tensor & out); // {"schema": "aten::special_modified_bessel_k1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_scaled_modified_bessel_k0(const Tensor & x); // {"schema": "aten::special_scaled_modified_bessel_k0(Tensor x) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_scaled_modified_bessel_k0_out(const Tensor & x, Tensor & out); // {"schema": "aten::special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_scaled_modified_bessel_k1(const Tensor & x); // {"schema": "aten::special_scaled_modified_bessel_k1(Tensor x) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_scaled_modified_bessel_k1_out(const Tensor & x, Tensor & out); // {"schema": "aten::special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor special_shifted_chebyshev_polynomial_t(const Tensor & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_t(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_t(const Scalar & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_t(const Tensor & x, const Scalar & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_t_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_shifted_chebyshev_polynomial_t_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_t_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_t.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_u(const Tensor & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_u(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_u(const Scalar & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_u(const Tensor & x, const Scalar & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_u_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_shifted_chebyshev_polynomial_u_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_u_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_u.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_v(const Tensor & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_v(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_v(const Scalar & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_v(const Tensor & x, const Scalar & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_v_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_shifted_chebyshev_polynomial_v_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_v_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_v.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_w(const Tensor & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_w(Tensor x, Tensor n) -> Tensor", "dispatch": "True", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_w(const Scalar & x, const Tensor & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.x_scalar(Scalar x, Tensor n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor special_shifted_chebyshev_polynomial_w(const Tensor & x, const Scalar & n); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.n_scalar(Tensor x, Scalar n) -> Tensor", "dispatch": "False", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_w_out(const Tensor & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.out(Tensor x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor & special_shifted_chebyshev_polynomial_w_out(const Scalar & x, const Tensor & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.x_scalar_out(Scalar x, Tensor n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "False", "default": "True"} +Tensor & special_shifted_chebyshev_polynomial_w_out(const Tensor & x, const Scalar & n, Tensor & out); // {"schema": "aten::special_shifted_chebyshev_polynomial_w.n_scalar_out(Tensor x, Scalar n, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor special_spherical_bessel_j0(const Tensor & x); // {"schema": "aten::special_spherical_bessel_j0(Tensor x) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & special_spherical_bessel_j0_out(const Tensor & x, Tensor & out); // {"schema": "aten::special_spherical_bessel_j0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "False"} +Tensor _foobar(const Tensor & self, bool arg1, bool arg2, bool arg3); // {"schema": "aten::_foobar(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True) -> Tensor", "dispatch": "True", "default": "False"} +void _fused_adam_(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adam_(Tensor(a!)[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> ()", "dispatch": "True", "default": "False"} +Tensor & _new_zeros_with_same_feature_meta_out(const Tensor & self, const Tensor & other, int64_t self_num_batch_dims, Tensor & out); // {"schema": "aten::_new_zeros_with_same_feature_meta.out(Tensor self, Tensor other, *, int self_num_batch_dims=0, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _cudnn_ctc_loss_out(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity, Tensor & out0, Tensor & out1); // {"schema": "aten::_cudnn_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _cudnn_rnn_flatten_weight_out(TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional, Tensor & out); // {"schema": "aten::_cudnn_rnn_flatten_weight.out(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, bool bidirectional, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _cudnn_rnn_out(const Tensor & input, TensorList weight, int64_t weight_stride0, const c10::optional & weight_buf, const Tensor & hx, const c10::optional & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4); // {"schema": "aten::_cudnn_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", "dispatch": "True", "default": "True"} +void _cudnn_rnn_backward_out(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional & cx, const Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional & dropout_state, const Tensor & reserve, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2, TensorList out3); // {"schema": "aten::_cudnn_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, SymInt[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()", "dispatch": "True", "default": "True"} +Tensor & _cudnn_init_dropout_state_out(double dropout, bool train, int64_t dropout_seed, Tensor & out); // {"schema": "aten::_cudnn_init_dropout_state.out(float dropout, bool train, int dropout_seed, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _fused_dropout_out(const Tensor & self, double p, c10::optional generator, Tensor & out0, Tensor & out1); // {"schema": "aten::_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _masked_scale_out(const Tensor & self, const Tensor & mask, double scale, Tensor & out); // {"schema": "aten::_masked_scale.out(Tensor self, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple native_dropout_out(const Tensor & input, double p, c10::optional train, Tensor & out0, Tensor & out1); // {"schema": "aten::native_dropout.out(Tensor input, float p, bool? train, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & native_dropout_backward_out(const Tensor & grad_output, const Tensor & mask, double scale, Tensor & out); // {"schema": "aten::native_dropout_backward.out(Tensor grad_output, Tensor mask, float scale, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _conj_physical_out(const Tensor & self, Tensor & out); // {"schema": "aten::_conj_physical.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _add_relu_out(const Tensor & self, const Scalar & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & add_out(const Tensor & self, const Scalar & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & affine_grid_generator_out(const Tensor & theta, IntArrayRef size, bool align_corners, Tensor & out); // {"schema": "aten::affine_grid_generator.out(Tensor theta, int[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bartlett_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bartlett_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantized_batch_norm_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, const Tensor & mean, const Tensor & var, double eps, double output_scale, int64_t output_zero_point, Tensor & out); // {"schema": "aten::quantized_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bernoulli_out(const Tensor & self, const Tensor & p, c10::optional generator, Tensor & out); // {"schema": "aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor bernoulli(const Tensor & self, const Tensor & p, c10::optional generator); // {"schema": "aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & bernoulli_out(const Tensor & self, double p, c10::optional generator, Tensor & out); // {"schema": "aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & binary_cross_entropy_with_logits_out(const Tensor & self, const Tensor & target, const c10::optional & weight, const c10::optional & pos_weight, int64_t reduction, Tensor & out); // {"schema": "aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bincount_out(const Tensor & self, const c10::optional & weights, int64_t minlength, Tensor & out); // {"schema": "aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & blackman_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::blackman_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & blackman_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::blackman_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & block_diag_out(TensorList tensors, Tensor & out); // {"schema": "aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & constant_pad_nd_out(const Tensor & self, IntArrayRef pad, const Scalar & value, Tensor & out); // {"schema": "aten::constant_pad_nd.out(Tensor self, int[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & convolution_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, Tensor & out); // {"schema": "aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple convolution_backward_out(const Tensor & grad_output, const Tensor & input, const Tensor & weight, OptionalSymIntArrayRef bias_sizes, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & convolution_overrideable_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, Tensor & out); // {"schema": "aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple convolution_backward_overrideable_out(const Tensor & grad_output, const Tensor & input, const Tensor & weight, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & _convolution_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, Tensor & out); // {"schema": "aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & conv_tbc_out(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad, Tensor & out); // {"schema": "aten::conv_tbc.out(Tensor self, Tensor weight, Tensor bias, int pad=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & copy_out(const Tensor & self, const Tensor & src, bool non_blocking, Tensor & out); // {"schema": "aten::copy.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _copy_from_out(const Tensor & self, const Tensor & dst, bool non_blocking, Tensor & out); // {"schema": "aten::_copy_from.out(Tensor self, Tensor dst, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _copy_from_and_resize_out(const Tensor & self, const Tensor & dst, Tensor & out); // {"schema": "aten::_copy_from_and_resize.out(Tensor self, Tensor dst, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & count_nonzero_out(const Tensor & self, IntArrayRef dim, Tensor & out); // {"schema": "aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & count_nonzero_out(const Tensor & self, c10::optional dim, Tensor & out); // {"schema": "aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_affine_grid_generator_out(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, Tensor & out); // {"schema": "aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_affine_grid_generator_backward_out(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, Tensor & out); // {"schema": "aten::cudnn_affine_grid_generator_backward.out(Tensor grad, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple cudnn_batch_norm_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +::std::tuple cudnn_batch_norm_backward_out(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, const Tensor & reserveSpace, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::cudnn_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & cudnn_convolution_out(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, Tensor & out); // {"schema": "aten::cudnn_convolution.out(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_convolution_transpose_out(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, Tensor & out); // {"schema": "aten::cudnn_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mps_convolution_transpose_out(const Tensor & self, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, Tensor & out); // {"schema": "aten::_mps_convolution_transpose.out(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple mps_convolution_transpose_backward_out(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, ::std::array output_mask, Tensor & out0, Tensor & out1); // {"schema": "aten::mps_convolution_transpose_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & cudnn_convolution_relu_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups, Tensor & out); // {"schema": "aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_convolution_add_relu_out(const Tensor & self, const Tensor & weight, const Tensor & z, const c10::optional & alpha, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups, Tensor & out); // {"schema": "aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & cudnn_grid_sampler_out(const Tensor & self, const Tensor & grid, Tensor & out); // {"schema": "aten::cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple cudnn_grid_sampler_backward_out(const Tensor & self, const Tensor & grid, const Tensor & grad_output, Tensor & out0, Tensor & out1); // {"schema": "aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _ctc_loss_out(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool zero_infinity, Tensor & out0, Tensor & out1); // {"schema": "aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _ctc_loss_backward_out(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity, Tensor & out); // {"schema": "aten::_ctc_loss_backward.out(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & diag_embed_out(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, Tensor & out); // {"schema": "aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & diagonal_backward_out(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2, Tensor & out); // {"schema": "aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & div_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & div_out(const Tensor & self, const Scalar & other, c10::optional rounding_mode, Tensor & out); // {"schema": "aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & embedding_out(const Tensor & weight, const Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse, Tensor & out); // {"schema": "aten::embedding.out(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & embedding_dense_backward_out(const Tensor & grad_output, const Tensor & indices, c10::SymInt num_weights, int64_t padding_idx, bool scale_grad_by_freq, Tensor & out); // {"schema": "aten::embedding_dense_backward.out(Tensor grad_output, Tensor indices, SymInt num_weights, int padding_idx, bool scale_grad_by_freq, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & embedding_renorm_out(const Tensor & self, const Tensor & indices, double max_norm, double norm_type, Tensor & out); // {"schema": "aten::embedding_renorm.out(Tensor self, Tensor indices, float max_norm, float norm_type, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor embedding_renorm(const Tensor & self, const Tensor & indices, double max_norm, double norm_type); // {"schema": "aten::embedding_renorm(Tensor self, Tensor indices, float max_norm, float norm_type) -> Tensor", "dispatch": "True", "default": "True"} +::std::tuple _embedding_bag_forward_only_out(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::_embedding_bag_forward_only.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +::std::tuple _embedding_bag_out(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional & per_sample_weights, bool include_last_offset, int64_t padding_idx, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::_embedding_bag.out(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +Tensor & _embedding_bag_dense_backward_out(const Tensor & grad, const Tensor & indices, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx, Tensor & out); // {"schema": "aten::_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _embedding_bag_per_sample_weights_backward_out(const Tensor & grad, const Tensor & weight, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, int64_t mode, int64_t padding_idx, Tensor & out); // {"schema": "aten::_embedding_bag_per_sample_weights_backward.out(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & empty_out(IntArrayRef size, c10::optional names, c10::optional memory_format, Tensor & out); // {"schema": "aten::empty.names_out(int[] size, *, Dimname[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_empty_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_empty_strided_out(const Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_full_out(const Tensor & self, c10::SymIntArrayRef size, const Scalar & fill_value, Tensor & out); // {"schema": "aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_zeros_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & new_ones_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _empty_affine_quantized_out(IntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, Tensor & out); // {"schema": "aten::_empty_affine_quantized.out(int[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _empty_per_channel_affine_quantized_out(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, int64_t axis, c10::optional memory_format, Tensor & out); // {"schema": "aten::_empty_per_channel_affine_quantized.out(int[] size, *, Tensor scales, Tensor zero_points, int axis, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +const Tensor & resize_out(const Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format, const Tensor & out); // {"schema": "aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor resize(const Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format); // {"schema": "aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +const Tensor & _resize_output_out(const Tensor & self, IntArrayRef size, Device device, const Tensor & out); // {"schema": "aten::_resize_output.out(Tensor self, int[] size, Device device, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _resize_output(const Tensor & self, IntArrayRef size, Device device); // {"schema": "aten::_resize_output(Tensor self, int[] size, Device device) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & empty_quantized_out(IntArrayRef size, const Tensor & qtensor, c10::optional memory_format, Tensor & out); // {"schema": "aten::empty_quantized.out(int[] size, Tensor qtensor, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & empty_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & empty_strided_out(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::empty_strided.out(SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & fill_out(const Tensor & self, const Scalar & value, Tensor & out); // {"schema": "aten::fill.Scalar_out(Tensor self, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & fill_out(const Tensor & self, const Tensor & value, Tensor & out); // {"schema": "aten::fill.Tensor_out(Tensor self, Tensor value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & full_out(IntArrayRef size, const Scalar & fill_value, c10::optional names, Tensor & out); // {"schema": "aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & full_like_out(const Tensor & self, const Scalar & fill_value, c10::optional memory_format, Tensor & out); // {"schema": "aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & from_file_out(c10::string_view filename, c10::optional shared, c10::optional size, Tensor & out); // {"schema": "aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & grid_sampler_2d_out(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, Tensor & out); // {"schema": "aten::grid_sampler_2d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple grid_sampler_2d_backward_out(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, Tensor & out0, Tensor & out1); // {"schema": "aten::grid_sampler_2d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _grid_sampler_2d_cpu_fallback_out(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, Tensor & out); // {"schema": "aten::_grid_sampler_2d_cpu_fallback.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & grid_sampler_3d_out(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, Tensor & out); // {"schema": "aten::grid_sampler_3d.out(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple grid_sampler_3d_backward_out(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array output_mask, Tensor & out0, Tensor & out1); // {"schema": "aten::grid_sampler_3d_backward.out(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners, bool[2] output_mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & hann_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hann_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hamming_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hamming_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hamming_window_out(int64_t window_length, bool periodic, double alpha, Tensor & out); // {"schema": "aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hamming_window_out(int64_t window_length, bool periodic, double alpha, double beta, Tensor & out); // {"schema": "aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & kaiser_window_out(int64_t window_length, Tensor & out); // {"schema": "aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & kaiser_window_out(int64_t window_length, bool periodic, Tensor & out); // {"schema": "aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & kaiser_window_out(int64_t window_length, bool periodic, double beta, Tensor & out); // {"schema": "aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple native_group_norm_out(const Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple native_group_norm_backward_out(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_group_norm_backward.out(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & index_put_out(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate, Tensor & out); // {"schema": "aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _index_put_impl_out(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate, bool unsafe, Tensor & out); // {"schema": "aten::_index_put_impl.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _index_put_impl(const Tensor & self, const c10::List> & indices, const Tensor & values, bool accumulate, bool unsafe); // {"schema": "aten::_index_put_impl(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & isnan_out(const Tensor & self, Tensor & out); // {"schema": "aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple native_layer_norm_out(const Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple native_layer_norm_backward_out(const Tensor & grad_out, const Tensor & input, c10::SymIntArrayRef normalized_shape, const Tensor & mean, const Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_layer_norm_backward.out(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple linear_backward_out(const Tensor & self, const Tensor & grad_output, const Tensor & weight, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & mkldnn_linear_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, Tensor & out); // {"schema": "aten::mkldnn_linear.out(Tensor self, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_linear_backward_input_out(IntArrayRef input_size, const Tensor & grad_output, const Tensor & weight, Tensor & out); // {"schema": "aten::mkldnn_linear_backward_input.out(int[] input_size, Tensor grad_output, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple mkldnn_linear_backward_weights_out(const Tensor & grad_output, const Tensor & input, const Tensor & weight, bool bias_defined, Tensor & out0, Tensor & out1); // {"schema": "aten::mkldnn_linear_backward_weights.out(Tensor grad_output, Tensor input, Tensor weight, bool bias_defined, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple mkldnn_linear_backward_out(const Tensor & self, const Tensor & grad_output, const Tensor & weight, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::mkldnn_linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple matmul_backward_out(const Tensor & grad, const Tensor & self, const Tensor & other, ::std::array mask, Tensor & out0, Tensor & out1); // {"schema": "aten::matmul_backward.out(Tensor grad, Tensor self, Tensor other, bool[2] mask, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _aminmax_out(const Tensor & self, Tensor & out0, Tensor & out1); // {"schema": "aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _aminmax_out(const Tensor & self, int64_t dim, bool keepdim, Tensor & out0, Tensor & out1); // {"schema": "aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _mps_max_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::_mps_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mps_max_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mps_max_pool2d_backward.out(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_max_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mkldnn_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_max_pool2d_backward_out(const Tensor & grad_output, const Tensor & output, const Tensor & input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mkldnn_max_pool2d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_max_pool3d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mkldnn_max_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_max_pool3d_backward_out(const Tensor & grad_output, const Tensor & output, const Tensor & input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantized_max_pool1d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::quantized_max_pool1d.out(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantized_max_pool2d_out(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out); // {"schema": "aten::quantized_max_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & median_out(const Tensor & self, Tensor & out); // {"schema": "aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & nanmedian_out(const Tensor & self, Tensor & out); // {"schema": "aten::nanmedian.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mps_convolution_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, Tensor & out); // {"schema": "aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple mps_convolution_backward_out(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::mps_convolution_backward.out(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & mkldnn_convolution_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, Tensor & out); // {"schema": "aten::mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple miopen_batch_norm_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::miopen_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple miopen_batch_norm_backward_out(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var, double epsilon, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::miopen_batch_norm_backward.out(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & miopen_convolution_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, Tensor & out); // {"schema": "aten::miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & miopen_convolution_transpose_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, Tensor & out); // {"schema": "aten::miopen_convolution_transpose.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & miopen_depthwise_convolution_out(const Tensor & self, const Tensor & weight, const c10::optional & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, Tensor & out); // {"schema": "aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple miopen_rnn_out(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional & dropout_state, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4); // {"schema": "aten::miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", "dispatch": "True", "default": "True"} +void miopen_rnn_backward_out(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const c10::optional & cx, const Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const c10::optional & dropout_state, const Tensor & reserve, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2, TensorList out3); // {"schema": "aten::miopen_rnn_backward.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!)[] out3) -> ()", "dispatch": "True", "default": "True"} +Tensor & _sparse_sparse_matmul_out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::_sparse_sparse_matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_mask_helper_out(const Tensor & t, const Tensor & mask_indices, Tensor & out); // {"schema": "aten::_sparse_mask_helper.out(Tensor t, Tensor mask_indices, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mul_out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_stats_out(const Tensor & input, double eps, Tensor & out0, Tensor & out1); // {"schema": "aten::batch_norm_stats.out(Tensor input, float eps, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_gather_stats_out(const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, int64_t count, Tensor & out0, Tensor & out1); // {"schema": "aten::batch_norm_gather_stats.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_gather_stats_with_counts_out(const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const Tensor & counts, Tensor & out0, Tensor & out1); // {"schema": "aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple native_batch_norm_backward_out(const Tensor & grad_out, const Tensor & input, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_invstd, bool train, double eps, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::native_batch_norm_backward.out(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_backward_reduce_out(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & weight, bool input_g, bool weight_g, bool bias_g, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::batch_norm_backward_reduce.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +Tensor & batch_norm_backward_elemt_out(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const c10::optional & weight, const Tensor & mean_dy, const Tensor & mean_dy_xmu, const Tensor & count, Tensor & out); // {"schema": "aten::batch_norm_backward_elemt.out(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple batch_norm_update_stats_out(const Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum, Tensor & out0, Tensor & out1); // {"schema": "aten::batch_norm_update_stats.out(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _nnpack_spatial_convolution_out(const Tensor & input, const Tensor & weight, const c10::optional & bias, IntArrayRef padding, IntArrayRef stride, Tensor & out); // {"schema": "aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ones_out(IntArrayRef size, c10::optional names, Tensor & out); // {"schema": "aten::ones.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ones_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _euclidean_dist_out(const Tensor & x1, const Tensor & x2, Tensor & out); // {"schema": "aten::_euclidean_dist.out(Tensor x1, Tensor x2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _cdist_forward_out(const Tensor & x1, const Tensor & x2, double p, c10::optional compute_mode, Tensor & out); // {"schema": "aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _cdist_backward_out(const Tensor & grad, const Tensor & x1, const Tensor & x2, double p, const Tensor & cdist, Tensor & out); // {"schema": "aten::_cdist_backward.out(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _pdist_forward_out(const Tensor & self, double p, Tensor & out); // {"schema": "aten::_pdist_forward.out(Tensor self, float p=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _pdist_backward_out(const Tensor & grad, const Tensor & self, double p, const Tensor & pdist, Tensor & out); // {"schema": "aten::_pdist_backward.out(Tensor grad, Tensor self, float p, Tensor pdist, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & pixel_shuffle_out(const Tensor & self, int64_t upscale_factor, Tensor & out); // {"schema": "aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & pixel_unshuffle_out(const Tensor & self, int64_t downscale_factor, Tensor & out); // {"schema": "aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & channel_shuffle_out(const Tensor & self, int64_t groups, Tensor & out); // {"schema": "aten::channel_shuffle.out(Tensor self, int groups, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _pin_memory_out(const Tensor & self, c10::optional device, Tensor & out); // {"schema": "aten::_pin_memory.out(Tensor self, Device? device=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & scalar_tensor_out(const Scalar & s, Tensor & out); // {"schema": "aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rand_out(IntArrayRef size, c10::optional names, Tensor & out); // {"schema": "aten::rand.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rand_out(IntArrayRef size, c10::optional generator, c10::optional names, Tensor & out); // {"schema": "aten::rand.generator_with_names_out(int[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rand_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_like_out(const Tensor & self, int64_t high, c10::optional memory_format, Tensor & out); // {"schema": "aten::randint_like.out(Tensor self, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randint_like_out(const Tensor & self, int64_t low, int64_t high, c10::optional memory_format, Tensor & out); // {"schema": "aten::randint_like.low_dtype_out(Tensor self, int low, int high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randn_out(IntArrayRef size, c10::optional names, Tensor & out); // {"schema": "aten::randn.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randn_out(IntArrayRef size, c10::optional generator, c10::optional names, Tensor & out); // {"schema": "aten::randn.generator_with_names_out(int[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & randn_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & repeat_out(const Tensor & self, c10::SymIntArrayRef repeats, Tensor & out); // {"schema": "aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & repeat_interleave_out(const Tensor & repeats, c10::optional output_size, Tensor & out); // {"schema": "aten::repeat_interleave.Tensor_out(Tensor repeats, *, int? output_size=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mkldnn_reshape_out(const Tensor & self, IntArrayRef shape, Tensor & out); // {"schema": "aten::_mkldnn_reshape.out(Tensor self, int[] shape, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & relu_out(const Tensor & self, Tensor & out); // {"schema": "aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & prelu_out(const Tensor & self, const Tensor & weight, Tensor & out); // {"schema": "aten::prelu.out(Tensor self, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple prelu_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & weight, Tensor & out0, Tensor & out1); // {"schema": "aten::prelu_backward.out(Tensor grad_output, Tensor self, Tensor weight, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & select_backward_out(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t index, Tensor & out); // {"schema": "aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, int index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & celu_out(const Tensor & self, const Scalar & alpha, Tensor & out); // {"schema": "aten::celu.out(Tensor self, Scalar alpha=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slice_backward_out(const Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, Tensor & out); // {"schema": "aten::slice_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt start, SymInt end, SymInt step, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slice_scatter_out(const Tensor & self, const Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, Tensor & out); // {"schema": "aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & select_scatter_out(const Tensor & self, const Tensor & src, int64_t dim, int64_t index, Tensor & out); // {"schema": "aten::select_scatter.out(Tensor self, Tensor src, int dim, int index, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & diagonal_scatter_out(const Tensor & self, const Tensor & src, int64_t offset, int64_t dim1, int64_t dim2, Tensor & out); // {"schema": "aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & as_strided_scatter_out(const Tensor & self, const Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset, Tensor & out); // {"schema": "aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void unsafe_split_out(const Tensor & self, int64_t split_size, int64_t dim, TensorList out); // {"schema": "aten::unsafe_split.Tensor_out(Tensor self, int split_size, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void unsafe_split_with_sizes_out(const Tensor & self, IntArrayRef split_sizes, int64_t dim, TensorList out); // {"schema": "aten::unsafe_split_with_sizes.out(Tensor self, int[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & sum_out(const Tensor & self, c10::optional dtype, Tensor & out); // {"schema": "aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple std_mean_out(const Tensor & self, OptionalIntArrayRef dim, c10::optional correction, bool keepdim, Tensor & out0, Tensor & out1); // {"schema": "aten::std_mean.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & prod_out(const Tensor & self, c10::optional dtype, Tensor & out); // {"schema": "aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _mkldnn_transpose_out(const Tensor & self, int64_t dim0, int64_t dim1, Tensor & out); // {"schema": "aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & flip_out(const Tensor & self, IntArrayRef dims, Tensor & out); // {"schema": "aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & roll_out(const Tensor & self, IntArrayRef shifts, IntArrayRef dims, Tensor & out); // {"schema": "aten::roll.out(Tensor self, int[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rot90_out(const Tensor & self, int64_t k, IntArrayRef dims, Tensor & out); // {"schema": "aten::rot90.out(Tensor self, int k=1, int[] dims=[0,1], *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _transform_bias_rescale_qkv_out(const Tensor & qkv, const Tensor & qkv_bias, int64_t num_heads, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_from_mask_out(const Tensor & t, const Tensor & mask, bool mask_check, Tensor & out); // {"schema": "aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_from_padded_out(const Tensor & padded, const Tensor & cpu_nested_shape_example, bool fuse_transform_0213, Tensor & out); // {"schema": "aten::_nested_from_padded.out(Tensor padded, Tensor cpu_nested_shape_example, bool fuse_transform_0213=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_size_out(const Tensor & self, Tensor & out); // {"schema": "aten::_nested_tensor_size.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_strides_out(const Tensor & self, Tensor & out); // {"schema": "aten::_nested_tensor_strides.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_from_padded_and_nested_example_out(const Tensor & padded, const Tensor & nt_example, Tensor & out); // {"schema": "aten::_nested_from_padded_and_nested_example.out(Tensor padded, Tensor nt_example, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_view_from_buffer_copy_out(const Tensor & self, const Tensor & nested_size, const Tensor & nested_strides, IntArrayRef offsets, Tensor & out); // {"schema": "aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, int[] offsets, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _trilinear_out(const Tensor & i1, const Tensor & i2, const Tensor & i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, IntArrayRef sumdim, int64_t unroll_dim, Tensor & out); // {"schema": "aten::_trilinear.out(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _unique_out(const Tensor & self, bool sorted, bool return_inverse, Tensor & out0, Tensor & out1); // {"schema": "aten::_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple unique_dim_out(const Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple unique_consecutive_out(const Tensor & self, bool return_inverse, bool return_counts, c10::optional dim, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple unique_dim_consecutive_out(const Tensor & self, int64_t dim, bool return_inverse, bool return_counts, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple _unique2_out(const Tensor & self, bool sorted, bool return_inverse, bool return_counts, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & _unsafe_view_out(const Tensor & self, c10::SymIntArrayRef size, Tensor & out); // {"schema": "aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple var_mean_out(const Tensor & self, OptionalIntArrayRef dim, c10::optional correction, bool keepdim, Tensor & out0, Tensor & out1); // {"schema": "aten::var_mean.correction_out(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _weight_norm_interface_out(const Tensor & v, const Tensor & g, int64_t dim, Tensor & out0, Tensor & out1); // {"schema": "aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _weight_norm_interface_backward_out(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim, Tensor & out0, Tensor & out1); // {"schema": "aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & zeros_out(IntArrayRef size, c10::optional names, Tensor & out); // {"schema": "aten::zeros.names_out(int[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _efficientzerotensor_out(IntArrayRef size, Tensor & out); // {"schema": "aten::_efficientzerotensor.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & zeros_like_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _standard_gamma_grad_out(const Tensor & self, const Tensor & output, Tensor & out); // {"schema": "aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _standard_gamma_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::_standard_gamma.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _dirichlet_grad_out(const Tensor & x, const Tensor & alpha, const Tensor & total, Tensor & out); // {"schema": "aten::_dirichlet_grad.out(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sample_dirichlet_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & poisson_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::poisson.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & binomial_out(const Tensor & count, const Tensor & prob, c10::optional generator, Tensor & out); // {"schema": "aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & native_norm_out(const Tensor & self, const Scalar & p, Tensor & out); // {"schema": "aten::native_norm.out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & native_norm_out(const Tensor & self, const c10::optional & p, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::native_norm.ScalarOpt_dim_dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_sum_out(const Tensor & self, IntArrayRef dim, Tensor & out); // {"schema": "aten::_sparse_sum.dim_out(Tensor self, int[1] dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_sum_backward_out(const Tensor & grad, const Tensor & self, IntArrayRef dim, Tensor & out); // {"schema": "aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_csr_sum_out(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_csr_prod_out(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype, Tensor & out); // {"schema": "aten::_sparse_csr_prod.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_softmax_out(const Tensor & self, int64_t dim, bool half_to_float, Tensor & out); // {"schema": "aten::_sparse_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_softmax_backward_data_out(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self, Tensor & out); // {"schema": "aten::_sparse_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_log_softmax_out(const Tensor & self, int64_t dim, bool half_to_float, Tensor & out); // {"schema": "aten::_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_log_softmax_backward_data_out(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self, Tensor & out); // {"schema": "aten::_sparse_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _spdiags_out(const Tensor & diagonals, const Tensor & offsets, IntArrayRef shape, c10::optional layout, Tensor & out); // {"schema": "aten::_spdiags.out(Tensor diagonals, Tensor offsets, int[] shape, Layout? layout=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & norm_out(const Tensor & self, const c10::optional & p, ScalarType dtype, Tensor & out); // {"schema": "aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & norm_out(const Tensor & self, const Scalar & p, Tensor & out); // {"schema": "aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & clone_out(const Tensor & self, c10::optional memory_format, Tensor & out); // {"schema": "aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +const Tensor & resize_as_out(const Tensor & self, const Tensor & the_template, c10::optional memory_format, const Tensor & out); // {"schema": "aten::resize_as.out(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor resize_as(const Tensor & self, const Tensor & the_template, c10::optional memory_format); // {"schema": "aten::resize_as(Tensor self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor", "dispatch": "True", "default": "True"} +const Tensor & resize_as_sparse_out(const Tensor & self, const Tensor & the_template, const Tensor & out); // {"schema": "aten::resize_as_sparse.out(Tensor self, Tensor the_template, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor resize_as_sparse(const Tensor & self, const Tensor & the_template); // {"schema": "aten::resize_as_sparse(Tensor self, Tensor the_template) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & zero_out(const Tensor & self, Tensor & out); // {"schema": "aten::zero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor zero(const Tensor & self); // {"schema": "aten::zero(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sub_out(const Tensor & self, const Scalar & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rsub_out(const Tensor & self, const Tensor & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rsub_out(const Tensor & self, const Scalar & other, const Scalar & alpha, Tensor & out); // {"schema": "aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_addmm_out(const Tensor & self, const Tensor & mat1, const Tensor & mat2, const Scalar & beta, const Scalar & alpha, Tensor & out); // {"schema": "aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & sparse_coo_tensor_out(IntArrayRef size, Tensor & out); // {"schema": "aten::sparse_coo_tensor.size_out(int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_coo_tensor_with_dims_out(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, Tensor & out); // {"schema": "aten::_sparse_coo_tensor_with_dims.out(int sparse_dim, int dense_dim, int[] size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _sparse_coo_tensor_with_dims_and_tensors_out(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const Tensor & indices, const Tensor & values, Tensor & out); // {"schema": "aten::_sparse_coo_tensor_with_dims_and_tensors.out(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +const Tensor & sparse_resize_out(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const Tensor & out); // {"schema": "aten::sparse_resize.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor sparse_resize(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); // {"schema": "aten::sparse_resize(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor", "dispatch": "True", "default": "True"} +const Tensor & sparse_resize_and_clear_out(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim, const Tensor & out); // {"schema": "aten::sparse_resize_and_clear.out(Tensor self, int[] size, int sparse_dim, int dense_dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor sparse_resize_and_clear(const Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); // {"schema": "aten::sparse_resize_and_clear(Tensor self, int[] size, int sparse_dim, int dense_dim) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & sparse_mask_out(const Tensor & self, const Tensor & mask, Tensor & out); // {"schema": "aten::sparse_mask.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _to_dense_out(const Tensor & self, c10::optional dtype, Tensor & out); // {"schema": "aten::_to_dense.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _coalesce_out(const Tensor & self, Tensor & out); // {"schema": "aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _coalesced_out(const Tensor & self, bool coalesced, Tensor & out); // {"schema": "aten::_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor _coalesced(const Tensor & self, bool coalesced); // {"schema": "aten::_coalesced(Tensor self, bool coalesced) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & copy_sparse_to_sparse_out(const Tensor & self, const Tensor & src, bool non_blocking, Tensor & out); // {"schema": "aten::copy_sparse_to_sparse.out(Tensor self, Tensor src, bool non_blocking=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor copy_sparse_to_sparse(const Tensor & self, const Tensor & src, bool non_blocking); // {"schema": "aten::copy_sparse_to_sparse(Tensor self, Tensor src, bool non_blocking=False) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & to_sparse_out(const Tensor & self, int64_t sparse_dim, Tensor & out); // {"schema": "aten::to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & to_sparse_out(const Tensor & self, Tensor & out); // {"schema": "aten::to_sparse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & to_sparse_csr_out(const Tensor & self, Tensor & out); // {"schema": "aten::to_sparse_csr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & to_sparse_csc_out(const Tensor & self, Tensor & out); // {"schema": "aten::to_sparse_csc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & to_sparse_bsr_out(const Tensor & self, IntArrayRef blocksize, Tensor & out); // {"schema": "aten::to_sparse_bsr.out(Tensor self, int[2] blocksize, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & to_sparse_bsc_out(const Tensor & self, IntArrayRef blocksize, Tensor & out); // {"schema": "aten::to_sparse_bsc.out(Tensor self, int[2] blocksize, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & to_mkldnn_out(const Tensor & self, c10::optional dtype, Tensor & out); // {"schema": "aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_reorder_conv2d_weight_out(const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, Tensor & out); // {"schema": "aten::mkldnn_reorder_conv2d_weight.out(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_reorder_conv3d_weight_out(const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, Tensor & out); // {"schema": "aten::mkldnn_reorder_conv3d_weight.out(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantize_per_tensor_dynamic_out(const Tensor & self, ScalarType dtype, bool reduce_range, Tensor & out); // {"schema": "aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantize_per_tensor_out(const Tensor & self, double scale, int64_t zero_point, ScalarType dtype, Tensor & out); // {"schema": "aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & quantize_per_tensor_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, ScalarType dtype, Tensor & out); // {"schema": "aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void quantize_per_tensor_out(TensorList tensors, const Tensor & scales, const Tensor & zero_points, ScalarType dtype, TensorList out); // {"schema": "aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & quantize_per_channel_out(const Tensor & self, const Tensor & scales, const Tensor & zero_points, int64_t axis, ScalarType dtype, Tensor & out); // {"schema": "aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & dequantize_out(const Tensor & self, Tensor & out); // {"schema": "aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void dequantize_out(TensorList tensors, TensorList out); // {"schema": "aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & q_per_channel_scales_out(const Tensor & self, Tensor & out); // {"schema": "aten::q_per_channel_scales.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & q_per_channel_zero_points_out(const Tensor & self, Tensor & out); // {"schema": "aten::q_per_channel_zero_points.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & int_repr_out(const Tensor & self, Tensor & out); // {"schema": "aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _make_per_tensor_quantized_tensor_out(const Tensor & self, double scale, int64_t zero_point, Tensor & out); // {"schema": "aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _make_per_channel_quantized_tensor_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, Tensor & out); // {"schema": "aten::_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple fake_quantize_per_tensor_affine_cachemask_out(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, Tensor & out0, Tensor & out1); // {"schema": "aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, const Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max, Tensor & out0, Tensor & out1); // {"schema": "aten::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams.out(Tensor self, Tensor scale, Tensor zero_point, Tensor fake_quant_enabled, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _fake_quantize_learnable_per_tensor_affine_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, Tensor & out); // {"schema": "aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple fake_quantize_per_channel_affine_cachemask_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, Tensor & out0, Tensor & out1); // {"schema": "aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _fake_quantize_learnable_per_channel_affine_out(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, Tensor & out); // {"schema": "aten::_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _fused_moving_avg_obs_fq_helper_out(const Tensor & self, const Tensor & observer_on, const Tensor & fake_quant_on, Tensor & running_min, Tensor & running_max, Tensor & scale, Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, Tensor & out0, Tensor & out1); // {"schema": "aten::_fused_moving_avg_obs_fq_helper.out(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False, *, Tensor(e!) out0, Tensor(f!) out1) -> (Tensor(e!), Tensor(f!))", "dispatch": "True", "default": "True"} +::std::tuple _fused_moving_avg_obs_fq_helper_functional(const Tensor & self, const Tensor & observer_on, const Tensor & fake_quant_on, const Tensor & running_min, const Tensor & running_max, const Tensor & scale, const Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant); // {"schema": "aten::_fused_moving_avg_obs_fq_helper_functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out)", "dispatch": "True", "default": "True"} +Tensor & _to_copy_out(const Tensor & self, bool non_blocking, c10::optional memory_format, Tensor & out); // {"schema": "aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _lstm_mps_out(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4); // {"schema": "aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", "dispatch": "True", "default": "True"} +void lstm_mps_backward_out(const Tensor & grad_y, const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & z_state, const Tensor & cell_state_fwd, const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, Tensor & out0, TensorList out1, TensorList out2); // {"schema": "aten::lstm_mps_backward.out(Tensor grad_y, Tensor? grad_hy, Tensor? grad_cy, Tensor z_state, Tensor cell_state_fwd, Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!)[] out1, Tensor(c!)[] out2) -> ()", "dispatch": "True", "default": "True"} +::std::tuple _thnn_fused_lstm_cell_out(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple _thnn_fused_lstm_cell_backward_impl_out(const c10::optional & grad_hy, const c10::optional & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_thnn_fused_lstm_cell_backward_impl.out(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple _thnn_fused_gru_cell_out(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const c10::optional & input_bias, const c10::optional & hidden_bias, Tensor & out0, Tensor & out1); // {"schema": "aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +::std::tuple _thnn_fused_gru_cell_backward_out(const Tensor & grad_hy, const Tensor & workspace, bool has_bias, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3, Tensor & out4); // {"schema": "aten::_thnn_fused_gru_cell_backward.out(Tensor grad_hy, Tensor workspace, bool has_bias, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))", "dispatch": "True", "default": "True"} +::std::tuple _pack_padded_sequence_out(const Tensor & input, const Tensor & lengths, bool batch_first, Tensor & out0, Tensor & out1); // {"schema": "aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & set_out(const Tensor & self, Storage source, Tensor & out); // {"schema": "aten::set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor set(const Tensor & self, Storage source); // {"schema": "aten::set.source_Storage(Tensor self, Storage source) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & set_out(const Tensor & self, Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, Tensor & out); // {"schema": "aten::set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor set(const Tensor & self, Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); // {"schema": "aten::set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & set_out(const Tensor & self, const Tensor & source, Tensor & out); // {"schema": "aten::set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor set(const Tensor & self, const Tensor & source); // {"schema": "aten::set.source_Tensor(Tensor self, Tensor source) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & set_out(const Tensor & self, Tensor & out); // {"schema": "aten::set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor set(const Tensor & self); // {"schema": "aten::set(Tensor self) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & lift_out(const Tensor & self, Tensor & out); // {"schema": "aten::lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & lift_fresh_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & masked_fill_out(const Tensor & self, const Tensor & mask, const Scalar & value, Tensor & out); // {"schema": "aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & masked_fill_out(const Tensor & self, const Tensor & mask, const Tensor & value, Tensor & out); // {"schema": "aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & masked_scatter_out(const Tensor & self, const Tensor & mask, const Tensor & source, Tensor & out); // {"schema": "aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _masked_softmax_out(const Tensor & self, const Tensor & mask, c10::optional dim, c10::optional mask_type, Tensor & out); // {"schema": "aten::_masked_softmax.out(Tensor self, Tensor mask, int? dim=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _masked_softmax_backward_out(const Tensor & grad_output, const Tensor & output, const Tensor & mask, c10::optional dim, Tensor & out); // {"schema": "aten::_masked_softmax_backward.out(Tensor grad_output, Tensor output, Tensor mask, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & put_out(const Tensor & self, const Tensor & index, const Tensor & source, bool accumulate, Tensor & out); // {"schema": "aten::put.out(Tensor self, Tensor index, Tensor source, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & index_fill_out(const Tensor & self, int64_t dim, const Tensor & index, const Scalar & value, Tensor & out); // {"schema": "aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & index_fill_out(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & value, Tensor & out); // {"schema": "aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_and_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_or_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_xor_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & __lshift___out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & __lshift___out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_left_shift_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & __rshift___out(const Tensor & self, const Scalar & other, Tensor & out); // {"schema": "aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & __rshift___out(const Tensor & self, const Tensor & other, Tensor & out); // {"schema": "aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & bitwise_right_shift_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & random_out(const Tensor & self, int64_t from, c10::optional to, c10::optional generator, Tensor & out); // {"schema": "aten::random.from_out(Tensor self, int from, int? to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor random(const Tensor & self, int64_t from, c10::optional to, c10::optional generator); // {"schema": "aten::random.from(Tensor self, int from, int? to, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & random_out(const Tensor & self, int64_t to, c10::optional generator, Tensor & out); // {"schema": "aten::random.to_out(Tensor self, int to, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor random(const Tensor & self, int64_t to, c10::optional generator); // {"schema": "aten::random.to(Tensor self, int to, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & random_out(const Tensor & self, c10::optional generator, Tensor & out); // {"schema": "aten::random.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor random(const Tensor & self, c10::optional generator); // {"schema": "aten::random(Tensor self, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & uniform_out(const Tensor & self, double from, double to, c10::optional generator, Tensor & out); // {"schema": "aten::uniform.out(Tensor self, float from=0, float to=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor uniform(const Tensor & self, double from, double to, c10::optional generator); // {"schema": "aten::uniform(Tensor self, float from=0, float to=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & cauchy_out(const Tensor & self, double median, double sigma, c10::optional generator, Tensor & out); // {"schema": "aten::cauchy.out(Tensor self, float median=0, float sigma=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor cauchy(const Tensor & self, double median, double sigma, c10::optional generator); // {"schema": "aten::cauchy(Tensor self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & log_normal_out(const Tensor & self, double mean, double std, c10::optional generator, Tensor & out); // {"schema": "aten::log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor log_normal(const Tensor & self, double mean, double std, c10::optional generator); // {"schema": "aten::log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & exponential_out(const Tensor & self, double lambd, c10::optional generator, Tensor & out); // {"schema": "aten::exponential.out(Tensor self, float lambd=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor exponential(const Tensor & self, double lambd, c10::optional generator); // {"schema": "aten::exponential(Tensor self, float lambd=1, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & geometric_out(const Tensor & self, double p, c10::optional generator, Tensor & out); // {"schema": "aten::geometric.out(Tensor self, float p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor geometric(const Tensor & self, double p, c10::optional generator); // {"schema": "aten::geometric(Tensor self, float p, *, Generator? generator=None) -> Tensor", "dispatch": "True", "default": "True"} +Tensor & tril_indices_out(int64_t row, int64_t col, int64_t offset, Tensor & out); // {"schema": "aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & triu_indices_out(int64_t row, int64_t col, int64_t offset, Tensor & out); // {"schema": "aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & trace_out(const Tensor & self, Tensor & out); // {"schema": "aten::trace.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _symeig_helper_out(const Tensor & self, bool eigenvectors, bool upper, Tensor & out0, Tensor & out1); // {"schema": "aten::_symeig_helper.out(Tensor self, bool eigenvectors, bool upper, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _cholesky_solve_helper_out(const Tensor & self, const Tensor & A, bool upper, Tensor & out); // {"schema": "aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & dist_out(const Tensor & self, const Tensor & other, const Scalar & p, Tensor & out); // {"schema": "aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void _histogramdd_bin_edges_out(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, TensorList out); // {"schema": "aten::_histogramdd_bin_edges.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & _histogramdd_from_bin_cts_out(const Tensor & self, IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, Tensor & out); // {"schema": "aten::_histogramdd_from_bin_cts.out(Tensor self, int[] bins, *, float[]? range=None, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _histogramdd_from_bin_tensors_out(const Tensor & self, TensorList bins, const c10::optional & weight, bool density, Tensor & out); // {"schema": "aten::_histogramdd_from_bin_tensors.out(Tensor self, Tensor[] bins, *, Tensor? weight=None, bool density=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & remainder_out(const Scalar & self, const Tensor & other, Tensor & out); // {"schema": "aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & argsort_out(const Tensor & self, bool stable, int64_t dim, bool descending, Tensor & out); // {"schema": "aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & unfold_backward_out(const Tensor & grad_in, IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, Tensor & out); // {"schema": "aten::unfold_backward.out(Tensor grad_in, int[] input_sizes, int dim, int size, int step, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & normal_out(const Tensor & self, double mean, double std, c10::optional generator, Tensor & out); // {"schema": "aten::normal.out(Tensor self, float mean=0, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void _amp_foreach_non_finite_check_and_unscale_out(TensorList self, Tensor & found_inf, const Tensor & inv_scale, TensorList out); // {"schema": "aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,Tensor> _amp_foreach_non_finite_check_and_unscale(TensorList self, const Tensor & found_inf, const Tensor & inv_scale); // {"schema": "aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)", "dispatch": "True", "default": "True"} +Tensor & _amp_update_scale_out(const Tensor & self, Tensor & growth_tracker, const Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, Tensor & out); // {"schema": "aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _amp_update_scale(const Tensor & self, const Tensor & growth_tracker, const Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); // {"schema": "aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out)", "dispatch": "True", "default": "True"} +void _foreach_add_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sub_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_mul_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_mul.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_div_out(TensorList self, const Scalar & scalar, TensorList out); // {"schema": "aten::_foreach_div.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_add_out(TensorList self, TensorList other, const Scalar & alpha, TensorList out); // {"schema": "aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sub_out(TensorList self, TensorList other, const Scalar & alpha, TensorList out); // {"schema": "aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_mul_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_mul.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_div_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_div.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_add_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sub_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_div_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_div.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_mul_out(TensorList self, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_mul.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_exp_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_exp.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_zero_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_zero.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::vector _foreach_zero(TensorList self); // {"schema": "aten::_foreach_zero(Tensor[] self) -> Tensor[] self_out", "dispatch": "True", "default": "True"} +void _foreach_sqrt_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sqrt.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_abs_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_abs.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_acos_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_asin_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_asin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_atan_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_ceil_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_ceil.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_cos_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_cos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_cosh_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_cosh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_erf_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_erf.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_erfc_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_erfc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_expm1_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_expm1.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_floor_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_log_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_log.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_log10_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_log10.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_log1p_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_log1p.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_log2_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_neg_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_neg.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_tan_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_tan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_tanh_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_tanh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sin_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sin.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sinh_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_round_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_lgamma_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_lgamma.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_frac_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_frac.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_reciprocal_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_reciprocal.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_sigmoid_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_sigmoid.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_trunc_out(TensorList self, TensorList out); // {"schema": "aten::_foreach_trunc.out(Tensor[] self, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcdiv_out(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value, TensorList out); // {"schema": "aten::_foreach_addcdiv.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcmul_out(TensorList self, TensorList tensor1, TensorList tensor2, const Scalar & value, TensorList out); // {"schema": "aten::_foreach_addcmul.Scalar_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcdiv_out(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_addcdiv.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_addcmul_out(TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef scalars, TensorList out); // {"schema": "aten::_foreach_addcmul.ScalarList_out(Tensor[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar[] scalars, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_maximum_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_minimum_out(TensorList self, TensorList other, TensorList out); // {"schema": "aten::_foreach_minimum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +void _foreach_norm_out(TensorList self, const Scalar & ord, TensorList out); // {"schema": "aten::_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +Tensor & bucketize_out(const Scalar & self, const Tensor & boundaries, bool out_int32, bool right, Tensor & out); // {"schema": "aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _torch_cuda_cu_linker_symbol_op_out(const Tensor & self, Tensor & out); // {"schema": "aten::_torch_cuda_cu_linker_symbol_op.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & searchsorted_out(const Tensor & sorted_sequence, const Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, Tensor & out); // {"schema": "aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & glu_jvp_out(const Tensor & glu, const Tensor & x, const Tensor & dx, int64_t dim, Tensor & out); // {"schema": "aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & glu_backward_jvp_out(const Tensor & grad_x, const Tensor & grad_glu, const Tensor & x, const Tensor & dgrad_glu, const Tensor & dx, int64_t dim, Tensor & out); // {"schema": "aten::glu_backward_jvp.out(Tensor grad_x, Tensor grad_glu, Tensor x, Tensor dgrad_glu, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & hardswish_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & out); // {"schema": "aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & rrelu_with_noise_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & noise, const Scalar & lower, const Scalar & upper, bool training, bool self_is_result, Tensor & out); // {"schema": "aten::rrelu_with_noise_backward.out(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & mkldnn_adaptive_avg_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & out); // {"schema": "aten::mkldnn_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _adaptive_avg_pool2d_out(const Tensor & self, c10::SymIntArrayRef output_size, Tensor & out); // {"schema": "aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _adaptive_avg_pool2d_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & out); // {"schema": "aten::_adaptive_avg_pool2d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _adaptive_avg_pool3d_out(const Tensor & self, IntArrayRef output_size, Tensor & out); // {"schema": "aten::_adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _adaptive_avg_pool3d_backward_out(const Tensor & grad_output, const Tensor & self, Tensor & out); // {"schema": "aten::_adaptive_avg_pool3d_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_linear1d_out(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_linear1d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_linear1d_backward_out(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_linear1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_bilinear2d_out(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_bilinear2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_bilinear2d_backward_out(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_bilinear2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _upsample_bilinear2d_aa_out(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::_upsample_bilinear2d_aa.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _upsample_bilinear2d_aa_backward_out(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::_upsample_bilinear2d_aa_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_trilinear3d_out(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_trilinear3d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_trilinear3d_backward_out(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_trilinear3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_bicubic2d_out(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_bicubic2d.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_bicubic2d_backward_out(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_bicubic2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _upsample_bicubic2d_aa_out(const Tensor & input, OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::_upsample_bicubic2d_aa.vec_out(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _upsample_bicubic2d_aa_backward_out(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::_upsample_bicubic2d_aa_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, bool align_corners, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest1d_out(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_nearest1d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _upsample_nearest_exact1d_out(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::_upsample_nearest_exact1d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest1d_backward_out(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_nearest1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _upsample_nearest_exact1d_backward_out(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::_upsample_nearest_exact1d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest2d_out(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_nearest2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _upsample_nearest_exact2d_out(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::_upsample_nearest_exact2d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest2d_backward_out(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_nearest2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _upsample_nearest_exact2d_backward_out(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::_upsample_nearest_exact2d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest3d_out(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_nearest3d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _upsample_nearest_exact3d_out(const Tensor & input, OptionalSymIntArrayRef output_size, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::_upsample_nearest_exact3d.vec_out(Tensor input, SymInt[]? output_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & upsample_nearest3d_backward_out(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::upsample_nearest3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _upsample_nearest_exact3d_backward_out(const Tensor & grad_output, OptionalSymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional> scale_factors, Tensor & out); // {"schema": "aten::_upsample_nearest_exact3d_backward.vec_out(Tensor grad_output, SymInt[]? output_size, SymInt[] input_size, float[]? scale_factors, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _slow_conv2d_backward_out(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, ::std::array output_mask, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_slow_conv2d_backward.output_mask_out(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +Tensor & conv_depthwise3d_out(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor & out); // {"schema": "aten::conv_depthwise3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slow_conv_dilated2d_out(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor & out); // {"schema": "aten::slow_conv_dilated2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & slow_conv_dilated3d_out(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor & out); // {"schema": "aten::slow_conv_dilated3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & isinf_out(const Tensor & self, Tensor & out); // {"schema": "aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & linalg_matrix_exp_out(const Tensor & self, Tensor & out); // {"schema": "aten::linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_optional_intlist_out(const Tensor & values, OptionalIntArrayRef addends, Tensor & out); // {"schema": "aten::_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_optional_filled_intlist_out(const Tensor & values, OptionalIntArrayRef addends, Tensor & out); // {"schema": "aten::_test_optional_filled_intlist.out(Tensor values, int[2]? addends, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_optional_floatlist_out(const Tensor & values, c10::optional> addends, Tensor & out); // {"schema": "aten::_test_optional_floatlist.out(Tensor values, float[]? addends, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_warn_in_autograd_out(const Tensor & self, Tensor & out); // {"schema": "aten::_test_warn_in_autograd.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_autograd_multiple_dispatch_out(const Tensor & self, Tensor & out); // {"schema": "aten::_test_autograd_multiple_dispatch.fullcoverage_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _test_autograd_multiple_dispatch_view_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & segment_reduce_out(const Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & indices, const c10::optional & offsets, int64_t axis, bool unsafe, const c10::optional & initial, Tensor & out); // {"schema": "aten::segment_reduce.out(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, Tensor? offsets=None, int axis=0, bool unsafe=False, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _segment_reduce_backward_out(const Tensor & grad, const Tensor & output, const Tensor & data, c10::string_view reduce, const c10::optional & lengths, const c10::optional & offsets, int64_t axis, const c10::optional & initial, Tensor & out); // {"schema": "aten::_segment_reduce_backward.out(Tensor grad, Tensor output, Tensor data, str reduce, *, Tensor? lengths=None, Tensor? offsets=None, int axis=0, Scalar? initial=None, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_from_tensor_list_out(TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, Tensor & out); // {"schema": "aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & ccol_indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::ccol_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & row_indices_copy_out(const Tensor & self, Tensor & out); // {"schema": "aten::row_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & to_padded_tensor_out(const Tensor & self, double padding, OptionalIntArrayRef output_size, Tensor & out); // {"schema": "aten::to_padded_tensor.out(Tensor self, float padding, int[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _nested_tensor_layer_norm_out(const Tensor & self, const c10::optional & weight, const c10::optional & bias, double eps, Tensor & out); // {"schema": "aten::_nested_tensor_layer_norm.out(Tensor self, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _transformer_encoder_layer_fwd_out(const Tensor & src, int64_t embed_dim, int64_t num_heads, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const Tensor & norm_weight_1, const Tensor & norm_bias_1, const Tensor & norm_weight_2, const Tensor & norm_bias_2, const Tensor & ffn_weight_1, const Tensor & ffn_bias_1, const Tensor & ffn_weight_2, const Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type, Tensor & out); // {"schema": "aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _native_multi_head_attention_out(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask, bool need_weights, bool average_attn_weights, c10::optional mask_type, Tensor & out0, Tensor & out1); // {"schema": "aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", "dispatch": "True", "default": "True"} +Tensor & _triton_scaled_dot_attention_out(const Tensor & q, const Tensor & k, const Tensor & v, double dropout_p, Tensor & out); // {"schema": "aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +Tensor & _triton_multi_head_attention_out(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask, Tensor & out); // {"schema": "aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +::std::tuple _transformer_decoder_only_layer_fwd_out(const Tensor & src, int64_t embed_dim, int64_t num_heads, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const Tensor & norm_weight_1, const Tensor & norm_bias_1, const Tensor & norm_weight_2, const Tensor & norm_bias_2, const Tensor & ffn_weight_1, const Tensor & ffn_bias_1, const Tensor & ffn_weight_2, const Tensor & ffn_bias_2, const c10::optional & mask, const c10::optional & incr_key, const c10::optional & incr_value, Tensor & out0, Tensor & out1, Tensor & out2); // {"schema": "aten::_transformer_decoder_only_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", "dispatch": "True", "default": "True"} +::std::tuple _native_decoder_only_multi_head_attention_out(const Tensor & query, const Tensor & key, const Tensor & value, int64_t embed_dim, int64_t num_head, const Tensor & qkv_weight, const Tensor & qkv_bias, const Tensor & proj_weight, const Tensor & proj_bias, const c10::optional & mask, const c10::optional & incr_key, const c10::optional & incr_value, bool need_weights, bool average_attn_weights, Tensor & out0, Tensor & out1, Tensor & out2, Tensor & out3); // {"schema": "aten::_native_decoder_only_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, Tensor? incr_key=None, Tensor? incr_value=None, bool need_weights=True, bool average_attn_weights=True, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!))", "dispatch": "True", "default": "True"} +Tensor & _foobar_out(const Tensor & self, bool arg1, bool arg2, bool arg3, Tensor & out); // {"schema": "aten::_foobar.out(Tensor self, bool arg1=True, bool arg2=True, *, bool arg3=True, Tensor(a!) out) -> Tensor(a!)", "dispatch": "True", "default": "True"} +void _fused_adam_out(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf, TensorList out); // {"schema": "aten::_fused_adam.out(Tensor[] self, Tensor(b!)[] grads, Tensor(c!)[] exp_avgs, Tensor(d!)[] exp_avg_sqs, Tensor(e!)[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None, Tensor(a!)[] out) -> ()", "dispatch": "True", "default": "True"} +::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adam(TensorList self, TensorList grads, TensorList exp_avgs, TensorList exp_avg_sqs, TensorList max_exp_avg_sqs, TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional & grad_scale, const c10::optional & found_inf); // {"schema": "aten::_fused_adam(Tensor[] self, Tensor[] grads, Tensor[] exp_avgs, Tensor[] exp_avg_sqs, Tensor[] max_exp_avg_sqs, Tensor[] state_steps, *, float lr, float beta1, float beta2, float weight_decay, float eps, bool amsgrad, bool maximize, Tensor? grad_scale=None, Tensor? found_inf=None) -> (Tensor[] self_out, Tensor[] grads_out, Tensor[] exp_avgs_out, Tensor[] exp_avg_sqs_out, Tensor[] max_exp_avg_sqs_out)", "dispatch": "True", "default": "True"} diff --git a/voice_bridge/torch/include/ATen/SavedTensorHooks.h b/voice_bridge/torch/include/ATen/SavedTensorHooks.h new file mode 100644 index 0000000000000000000000000000000000000000..af821cb908c6a6675d21c3543809f9668ffccb64 --- /dev/null +++ b/voice_bridge/torch/include/ATen/SavedTensorHooks.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace at { + +namespace impl { + +struct TORCH_API SavedTensorDefaultHooksTLS { + // PyObject is defined in c10/util/python_stub.h + std::stack> stack; + + // See NOTE: [Disabling SavedTensorDefaultHooks] for context + // NOTE: [disabled_error_message invariant] + // disabled_error_message is nullopt IFF Saved Tensor hooks is enabled + // We did this for efficiency (so we didn't have to keep a separate bool + // around) + c10::optional disabled_error_message; +}; + +} // namespace impl + +struct TORCH_API SavedTensorDefaultHooks { + static void push_hooks(PyObject* pack_hook, PyObject* unpack_hook); + static void pop_hooks(); + static std::pair get_hooks(); + static void lazy_initialize(); + static std::stack> get_stack(); + static void set_stack(std::stack>); + + static const impl::SavedTensorDefaultHooksTLS& get_tls_state(); + static void set_tls_state(const impl::SavedTensorDefaultHooksTLS& tls); + + // NOTE: [Disabling SavedTensorDefaultHooks] + // A developer of a PyTorch feature may choose to disable SavedTensorDefault + // hooks, especially if their feature does not work with it. If they are + // disabled, then the following will raise an error: + // - Attempting to push_hooks + // - calling disable(message) with a non-zero stack (from get_stack) size + static void disable(const std::string& error_message); + static void enable(); + static bool is_enabled(); + static const c10::optional& get_disabled_error_message(); +}; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/Scalar.h b/voice_bridge/torch/include/ATen/Scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..e12557428f15674e4382983c07de64c3e43e8af0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/Scalar.h @@ -0,0 +1,3 @@ +#pragma once + +#include diff --git a/voice_bridge/torch/include/ATen/ScalarOps.h b/voice_bridge/torch/include/ATen/ScalarOps.h new file mode 100644 index 0000000000000000000000000000000000000000..6fb98a51d890988efcff24ba9688de97dce5a131 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ScalarOps.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at { +namespace detail { +// When filling a number to 1-element CPU tensor, we want to skip +// everything but manipulate data ptr directly. +// Ideally this fast pass should be implemented in TensorIterator, +// but we also want to skip compute_types which in not avoidable +// in TensorIterator for now. +Tensor& scalar_fill(Tensor& self, const Scalar& value); +TORCH_API Tensor scalar_tensor_static( + const Scalar& s, + c10::optional dtype_opt, + c10::optional device_opt); +} // namespace detail +} // namespace at + +// This is in the c10 namespace because we use ADL to find the functions in it. +namespace c10 { + +// FIXME: this should be (and was) Scalar::toTensor, but there is currently no +// way to implement this without going through Derived Types (which are not part +// of core). +inline at::Tensor scalar_to_tensor( + const Scalar& s, + const Device device = at::kCPU) { + // This is the fast track we have for CPU scalar tensors. + if (device == at::kCPU) { + if (s.isFloatingPoint()) { + return at::detail::scalar_tensor_static(s, at::kDouble, at::kCPU); + } else if (s.isComplex()) { + return at::detail::scalar_tensor_static(s, at::kComplexDouble, at::kCPU); + } else if (s.isBoolean()) { + return at::detail::scalar_tensor_static(s, at::kBool, at::kCPU); + } else { + AT_ASSERT(s.isIntegral(false)); + return at::detail::scalar_tensor_static(s, at::kLong, at::kCPU); + } + } + if (s.isFloatingPoint()) { + return at::scalar_tensor(s, at::device(device).dtype(at::kDouble)); + } else if (s.isBoolean()) { + return at::scalar_tensor(s, at::device(device).dtype(at::kBool)); + } else if (s.isComplex()) { + return at::scalar_tensor(s, at::device(device).dtype(at::kComplexDouble)); + } else { + AT_ASSERT(s.isIntegral(false)); + return at::scalar_tensor(s, at::device(device).dtype(at::kLong)); + } +} + +} // namespace c10 + +namespace at { +namespace native { + +inline Tensor wrapped_scalar_tensor( + const Scalar& scalar, + const Device device = at::kCPU) { + auto tensor = scalar_to_tensor(scalar, device); + tensor.unsafeGetTensorImpl()->set_wrapped_number(true); + return tensor; +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ScalarType.h b/voice_bridge/torch/include/ATen/ScalarType.h new file mode 100644 index 0000000000000000000000000000000000000000..2181250740e23808f06e63660f50ca887169bcb1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ScalarType.h @@ -0,0 +1,4 @@ +#pragma once +#include // for BC reasons +#include +#include diff --git a/voice_bridge/torch/include/ATen/SequenceNumber.h b/voice_bridge/torch/include/ATen/SequenceNumber.h new file mode 100644 index 0000000000000000000000000000000000000000..4e3b074d13e373e9666a8c512dd486c21c701c6a --- /dev/null +++ b/voice_bridge/torch/include/ATen/SequenceNumber.h @@ -0,0 +1,16 @@ +#pragma once + +#include +#include + +namespace at { + +// A simple thread local enumeration, used to link forward and backward pass +// ops and is used by autograd and observers framework +namespace sequence_number { + +TORCH_API uint64_t peek(); +TORCH_API uint64_t get_and_increment(); + +} // namespace sequence_number +} // namespace at diff --git a/voice_bridge/torch/include/ATen/SmallVector.h b/voice_bridge/torch/include/ATen/SmallVector.h new file mode 100644 index 0000000000000000000000000000000000000000..fabfa44190c727c9fdf9aa034d042559da1b621d --- /dev/null +++ b/voice_bridge/torch/include/ATen/SmallVector.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/voice_bridge/torch/include/ATen/SparseCsrTensorImpl.h b/voice_bridge/torch/include/ATen/SparseCsrTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..12ef1de24ff7e0a31db34e362e8c00e0bc7b6aac --- /dev/null +++ b/voice_bridge/torch/include/ATen/SparseCsrTensorImpl.h @@ -0,0 +1,179 @@ +#pragma once + +#include +#include +#include +namespace at { + +// Struct implementing a sparse CSR tensor. It uses three 1-D tensors for +// denoting the data: `crow_indices_`, `col_indices_` and `values_`. +// The `crow_indices_` tensor is a integer tensor of shape `(size(0) + 1)` +// that represents the compressed row indices of the CSR tensor. The +// `col_indices_` tensor is an integer tensor of shape `(nnz())` +// that explicitly stores the column indices of each value of the sparse +// tensor. The `values_` tensor can be of any pytorch-supported data type +// and has shape `(nnz())`. +// +// Since the main advantage of the CSR format over the COO format is speed of +// computation, care must be taken to facilitate smooth interfacing of +// these data structures with optimized libraries such as MKL and MAGMA. +// Since the MKL interface for pytorch currently uses indexing with int32 +// type, it is important to make sure that the `crow_indices` and `col_indices` +// are of type int32 when calling MKL routines such as SPMM or SPMV. +// +// If not calling MKL, it should be alright to use 64 bit integer tensors +// for indexing. +struct TORCH_API SparseCsrTensorImpl : public TensorImpl { + Tensor crow_indices_; + Tensor col_indices_; + Tensor values_; + Layout layout_; + + public: + explicit SparseCsrTensorImpl( + at::DispatchKeySet, + at::Device device, + Layout layout, + const caffe2::TypeMeta); + + void resize_(int64_t nnz, IntArrayRef size); + void resize_and_clear_(int64_t sparse_dim, IntArrayRef size); + void resize_as_sparse_compressed_tensor_(const Tensor& src); + void set_member_tensors( + const Tensor& crow_indices, + const Tensor& col_indices, + const Tensor& values, + IntArrayRef size); + + const Tensor& compressed_indices() const { + return crow_indices_; + } + const Tensor& plain_indices() const { + return col_indices_; + } + const Tensor& values() const { + return values_; + } + int nnz() { + return col_indices_.size(-1); + } + + inline int64_t batch_dim() const noexcept { + return crow_indices_.dim() - 1; + } + + inline int64_t sparse_dim() const noexcept { + return 2; + } + + inline int64_t dense_dim() const noexcept { + return values_.dim() - batch_dim() - block_dim() - 1; + } + + private: + inline int64_t block_dim() const noexcept { + return (layout_ == kSparseBsr || layout_ == kSparseBsc ? 2 : 0); + } + + protected: + IntArrayRef strides_custom() const override; + SymIntArrayRef sym_strides_custom() const override; + bool is_contiguous_custom(MemoryFormat) const override; + + public: + void set_size(int64_t dim, int64_t new_size) override; + void set_stride(int64_t dim, int64_t new_stride) override; + void set_storage_offset(int64_t storage_offset) override; + Layout layout_impl() const override { + return layout_; + } + void set_layout(Layout layout) { + switch (layout) { + case kSparseCsr: + case kSparseCsc: + case kSparseBsr: + case kSparseBsc: + layout_ = layout; + break; + default: + TORCH_CHECK(false, "unsupported layout ", layout); + } + } + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive( + key_set(), device(), layout_impl(), dtype()); + copy_tensor_metadata( + /*src_impl=*/this, + /*dest_impl=*/impl.get(), + /*version_counter=*/version_counter, + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + return impl; + } + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive( + key_set(), device(), layout_impl(), dtype()); + copy_tensor_metadata( + /*src_impl=*/this, + /*dest_impl=*/impl.get(), + /*version_counter=*/std::move(version_counter), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + return impl; + } + + private: + explicit SparseCsrTensorImpl( + at::DispatchKeySet key_set, + const caffe2::TypeMeta data_type, + at::Tensor crow_indices, + at::Tensor col_indices, + at::Tensor values, + at::Layout layout); + + const char* tensorimpl_type_name() const override; + + /** + * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / + * storage_offset) from one TensorImpl to another TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE + * [ TensorImpl Shallow-Copying ]. + */ + static void copy_tensor_metadata( + const SparseCsrTensorImpl* src_sparse_impl, + SparseCsrTensorImpl* dest_sparse_impl, + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) { + TensorImpl::copy_tensor_metadata( + src_sparse_impl, + dest_sparse_impl, + version_counter, + allow_tensor_metadata_change); + + // Sparse-specific fields + dest_sparse_impl->crow_indices_ = src_sparse_impl->compressed_indices(); + dest_sparse_impl->col_indices_ = src_sparse_impl->plain_indices(); + dest_sparse_impl->values_ = src_sparse_impl->values(); + dest_sparse_impl->layout_ = src_sparse_impl->layout_impl(); + } +}; +} // namespace at diff --git a/voice_bridge/torch/include/ATen/SparseCsrTensorUtils.h b/voice_bridge/torch/include/ATen/SparseCsrTensorUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..aec49f2bb2b3049876030ebf730fb0fa2ac4c84c --- /dev/null +++ b/voice_bridge/torch/include/ATen/SparseCsrTensorUtils.h @@ -0,0 +1,275 @@ +#pragma once + +#include +#include +#include +#include + +#define AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS(LAYOUT, NAME, ...) \ + [&] { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseCsr: \ + case kSparseCsc: \ + case kSparseBsr: \ + case kSparseBsc: \ + return __VA_ARGS__(); \ + default: \ + AT_ERROR( \ + #NAME, \ + " expected sparse compressed tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( \ + LAYOUT, NAME, ROW_DIM_ACTION, COLUMN_DIM_ACTION) \ + [&]() { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseCsr: \ + case kSparseBsr: \ + return (ROW_DIM_ACTION)(); \ + case kSparseCsc: \ + case kSparseBsc: \ + return (COLUMN_DIM_ACTION)(); \ + default: \ + AT_ERROR( \ + #NAME, \ + " expected sparse compressed tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_PLAIN_SPARSE_COMPRESSED_LAYOUTS( \ + LAYOUT, NAME, NO_BLOCK_ACTION, BLOCK_ACTION) \ + [&]() { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseCsr: \ + case kSparseCsc: \ + return (NO_BLOCK_ACTION)(); \ + case kSparseBsr: \ + case kSparseBsc: \ + return (BLOCK_ACTION)(); \ + default: \ + AT_ERROR( \ + #NAME, \ + " expected sparse compressed tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_SPARSE_ROW_COMPRESSED_LAYOUTS( \ + LAYOUT, NAME, ROW_DIM_ACTION) \ + [&]() { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseCsr: \ + case kSparseBsr: \ + return (ROW_DIM_ACTION)(); \ + default: \ + AT_ERROR( \ + #NAME, \ + " expected sparse row compressed tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_SPARSE_COL_COMPRESSED_LAYOUTS( \ + LAYOUT, NAME, COL_DIM_ACTION) \ + [&]() { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseCsc: \ + case kSparseBsc: \ + return (COL_DIM_ACTION)(); \ + default: \ + AT_ERROR( \ + #NAME, \ + " expected sparse column compressed tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_SPARSE_COMPRESSED_NONBLOCK_LAYOUTS(LAYOUT, NAME, ACTION) \ + [&]() { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseCsr: \ + case kSparseCsc: \ + return (ACTION)(); \ + default: \ + AT_ERROR( \ + #NAME, \ + " expected sparse compressed (non-block) tensor layout but got ", \ + the_layout); \ + } \ + }() + +#define AT_DISPATCH_SPARSE_COMPRESSED_BLOCK_LAYOUTS(LAYOUT, NAME, ACTION) \ + [&]() { \ + const auto& the_layout = LAYOUT; \ + switch (the_layout) { \ + case kSparseBsr: \ + case kSparseBsc: \ + return (ACTION)(); \ + default: \ + AT_ERROR( \ + #NAME, \ + " expected sparse compressed block tensor layout but got ", \ + the_layout); \ + } \ + }() + +namespace at { +namespace sparse_csr { + +using SparseCsrTensor = Tensor; + +inline SparseCsrTensorImpl* get_sparse_csr_impl(const SparseCsrTensor& self) { + AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS( + self.layout(), "get_sparse_csr_impl", [&] {}); + return static_cast(self.unsafeGetTensorImpl()); +} + +inline std::string layoutToString( + Layout layout, + bool upper = false, + bool lower = false) { + switch (layout) { + case kSparseCsr: + return (upper ? "CSR" : (lower ? "csr" : "Csr")); + case kSparseCsc: + return (upper ? "CSC" : (lower ? "csc" : "Csc")); + case kSparseBsr: + return (upper ? "BSR" : (lower ? "bsr" : "Bsr")); + case kSparseBsc: + return (upper ? "BSC" : (lower ? "bsc" : "Bsc")); + default: + TORCH_CHECK(false, "Not a sparse compressed layout:", layout); + return ""; + } +} + +inline bool isCompressedRow(Layout layout) { + return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( + layout, "isCompressedRow", [&] { return true; }, [&] { return false; }); +} + +inline bool isCompressedColumn(Layout layout) { + return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( + layout, + "isCompressedColumn", + [&] { return false; }, + [&] { return true; }); +} + +inline std::string compressedIndicesName(Layout layout) { + return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( + layout, + "compressedIndicesName", + [&] { return "crow_indices"; }, + [&] { return "ccol_indices"; }); +} + +inline std::string plainIndicesName(Layout layout) { + return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( + layout, + "plainIndicesName", + [&] { return "col_indices"; }, + [&] { return "row_indices"; }); +} + +inline std::string compressedDimName(Layout layout) { + switch (layout) { + case kSparseCsr: + return "row"; + case kSparseCsc: + return "column"; + case kSparseBsr: + return "row block"; + case kSparseBsc: + return "column block"; + default: + TORCH_CHECK(false, "Not a sparse compressed layout:", layout); + return ""; + } +} + +inline std::string plainDimName(Layout layout) { + switch (layout) { + case kSparseCsr: + return "column"; + case kSparseCsc: + return "row"; + case kSparseBsr: + return "column block"; + case kSparseBsc: + return "row block"; + default: + TORCH_CHECK(false, "Not a sparse compressed layout:", layout); + return ""; + } +} + +inline int rowDimension(Layout layout, IntArrayRef size) { + return size.size() - (isCompressedRow(layout) ? 2 : 1); +} + +inline int columnDimension(Layout layout, IntArrayRef size) { + return size.size() - (isCompressedColumn(layout) ? 2 : 1); +} + +inline int compressedDimension( + Layout layout, + IntArrayRef size, + size_t dense_ndim = 0) { + return size.size() - dense_ndim - (isCompressedRow(layout) ? 2 : 1); +} + +inline int plainDimension( + Layout layout, + IntArrayRef size, + size_t dense_ndim = 0) { + return size.size() - dense_ndim - (isCompressedRow(layout) ? 1 : 2); +} + +inline int64_t numBatchDimensions(Tensor const& self) { + return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( + self.layout(), + "numBatchDimensions", + [&self] { return self.crow_indices().dim() - 1; }, + [&self] { return self.ccol_indices().dim() - 1; }); +} + +inline std::pair getCompressedPlainIndices(Tensor const& self) { + return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( + self.layout(), + "getCompressedPlainIndices", + [&self] { + return std::make_pair(self.crow_indices(), self.col_indices()); + }, + [&self] { + return std::make_pair(self.ccol_indices(), self.row_indices()); + }); +} + +inline Layout flip_compressed_layout(Layout layout) { + switch (layout) { + case kSparseCsr: + return kSparseCsc; + case kSparseCsc: + return kSparseCsr; + case kSparseBsr: + return kSparseBsc; + case kSparseBsc: + return kSparseBsr; + default: + TORCH_CHECK(false, "Not a sparse compressed layout:", layout); + return kSparseCsr; + } +} + +} // namespace sparse_csr +} // namespace at diff --git a/voice_bridge/torch/include/ATen/SparseTensorImpl.h b/voice_bridge/torch/include/ATen/SparseTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..c36d89be5b6106732c1035ad3b9fbaed64ced59c --- /dev/null +++ b/voice_bridge/torch/include/ATen/SparseTensorImpl.h @@ -0,0 +1,383 @@ +#pragma once + +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at { +struct TORCH_API SparseTensorImpl : public TensorImpl { + // Stored in COO format, indices + values. + + // INVARIANTS: + // sparse_dim: range [0, len(shape)]; sparse_dim + dense_dim = len(shape) + // dense_dim : range [0, len(shape)]; sparse_dim + dense_dim = len(shape) + // _indices.shape: dimensionality: 2, shape: (sparse_dim, nnz) + // _values.shape: dimensionality: 1 + dense_dim. shape: (nnz, + // shape[sparse_dim:]) + + int64_t sparse_dim_ = 0; // number of sparse dimensions + int64_t dense_dim_ = 0; // number of dense dimensions + + Tensor indices_; // always a LongTensor + Tensor values_; + + // A sparse tensor is 'coalesced' if every index occurs at most once in + // the indices tensor, and the indices are in sorted order. (This means + // that it is very easy to convert a coalesced tensor to CSR format: you + // need only compute CSR format indices.) + // + // Most math operations can only be performed on coalesced sparse tensors, + // because many algorithms proceed by merging two sorted lists (of indices). + bool coalesced_ = false; + + // compute_numel with integer multiplication overflow check, see gh-57542 + void refresh_numel() { + TensorImpl::safe_refresh_numel(); + } + + public: + // Public for now... + explicit SparseTensorImpl(at::DispatchKeySet, const caffe2::TypeMeta); + + void release_resources() override; + + int64_t nnz() const { + return values_.size(0); + } + int64_t sparse_dim() const { + return sparse_dim_; + } + int64_t dense_dim() const { + return dense_dim_; + } + bool coalesced() const { + return coalesced_; + } + Tensor indices() const { + return indices_; + } + Tensor values() const { + return values_; + } + + void set_size(int64_t dim, int64_t new_size) override; + void set_stride(int64_t dim, int64_t new_stride) override; + void set_storage_offset(int64_t storage_offset) override; + +#ifdef DEBUG + bool has_storage() const override; +#endif + + // WARNING: This function does NOT preserve invariants of sparse_dim/dense_dim + // with respect to indices and values + void raw_resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "raw_resize_ ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "raw_resize_ called on tensor with symbolic shape") + sizes_and_strides_.set_sizes(size); + sparse_dim_ = sparse_dim; + dense_dim_ = dense_dim; + refresh_numel(); + } + + // NOTE: This function preserves invariants of sparse_dim/dense_dim with + // respect to indices and values. + // + // NOTE: This function supports the following cases: + // 1. When we keep the number of dense dimensions unchanged, and NOT shrinking + // the size of any of the dense dimensions. + // 2. When we keep the number of sparse dimensions unchanged, and NOT + // shrinking the size of any of the sparse dimensions. + // 3. When the sparse tensor has zero nnz, in which case we are free to change + // the shapes of both its sparse and dense dimensions. + // + // This function DOESN'T support (and will throw an error) the following + // cases: + // 1. When we attempt to change the number of sparse dimensions on a non-empty + // sparse tensor (such an operation will invalidate the indices stored). + // 2. When we attempt to change the number of dense dimensions on a non-empty + // sparse tensor (such an operation will behave differently from an equivalent + // dense tensor's resize method, and for API consistency we don't support it). + // 3. When we attempt to shrink the size of any of the dense dimensions on a + // non-empty sparse tensor (such an operation will behave differently from an + // equivalent dense tensor's resize method, and for API consistency we don't + // support it). + // 4. When we attempt to shrink the size of any of the sparse dimensions on a + // non-empty sparse tensor (this could make some of the stored indices + // out-of-bound and thus unsafe). + void resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "resize_ ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "resize_ called on tensor with symbolic shape") + TORCH_CHECK( + sparse_dim + dense_dim == static_cast(size.size()), + "number of dimensions must be sparse_dim (", + sparse_dim, + ") + dense_dim (", + dense_dim, + "), but got ", + size.size()); + if (nnz() > 0) { + auto alt_options_msg = + "You could try the following options:\n\ +1. If you need an empty sparse tensor of this size, call `x = torch.sparse_coo_tensor(size)`.\n\ +2. If you need to resize this tensor, you have the following options:\n\ + 1. For both sparse and dense dimensions, keep the number of them constant and the size of them non-shrinking, and then try the same call again.\n\ + 2. Or, create a new sparse tensor with the correct indices and values from this sparse tensor."; + + TORCH_CHECK( + sparse_dim == sparse_dim_, + "changing the number of sparse dimensions (from ", + sparse_dim_, + " to ", + sparse_dim, + ") on a non-empty sparse tensor is not supported.\n", + alt_options_msg); + + TORCH_CHECK( + dense_dim == dense_dim_, + "changing the number of dense dimensions (from ", + dense_dim_, + " to ", + dense_dim, + ") on a non-empty sparse tensor is not supported.\n", + alt_options_msg); + + bool shrinking_sparse_dims = false; + bool shrinking_dense_dim = false; + auto sparse_size_original = sizes().slice(0, sparse_dim); + auto sparse_size_new = size.slice(0, sparse_dim); + for (const auto i : c10::irange(sparse_dim)) { + if (sparse_size_new[i] < sparse_size_original[i]) { + shrinking_sparse_dims = true; + break; + } + } + auto dense_size_original = sizes().slice(sparse_dim); + auto dense_size_new = size.slice(sparse_dim); + for (const auto i : c10::irange(dense_dim)) { + if (dense_size_new[i] < dense_size_original[i]) { + shrinking_dense_dim = true; + break; + } + } + + TORCH_CHECK( + !shrinking_sparse_dims, + "shrinking the size of sparse dimensions (from ", + sparse_size_original, + " to ", + sparse_size_new, + ") on a non-empty sparse tensor is not supported.\n", + alt_options_msg); + + TORCH_CHECK( + !shrinking_dense_dim, + "shrinking the size of dense dimensions (from ", + dense_size_original, + " to ", + dense_size_new, + ") on a non-empty sparse tensor is not supported.\n", + alt_options_msg); + } + + IntArrayRef sizes_and_strides = sizes_and_strides_.sizes_arrayref(); + const bool size_equals_sizes = std::equal( + size.begin(), + size.end(), + sizes_and_strides.begin(), + sizes_and_strides.end()); + if ((!size_equals_sizes) || (sparse_dim != sparse_dim_) || + (dense_dim != dense_dim_)) { + auto nnz = values().size(0); + std::vector values_size = {nnz}; + auto dense_size = size.slice(sparse_dim); + values_size.insert( + values_size.end(), dense_size.begin(), dense_size.end()); + values_.resize_(values_size); + indices_.resize_({sparse_dim, nnz}); + } + + if (!size_equals_sizes) { + sizes_and_strides_.set_sizes(size); + } + sparse_dim_ = sparse_dim; + dense_dim_ = dense_dim; + refresh_numel(); + } + + // NOTE: this function will resize the sparse tensor and also set `indices` + // and `values` to empty. + void resize_and_clear_( + int64_t sparse_dim, + int64_t dense_dim, + IntArrayRef size) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "resize_and_clear_ ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "resize_and_clear_ called on tensor with symbolic shape") + TORCH_CHECK( + sparse_dim + dense_dim == static_cast(size.size()), + "number of dimensions must be sparse_dim (", + sparse_dim, + ") + dense_dim (", + dense_dim, + "), but got ", + size.size()); + + sizes_and_strides_.set_sizes(size); + sparse_dim_ = sparse_dim; + dense_dim_ = dense_dim; + + auto empty_indices = at::empty({sparse_dim, 0}, indices().options()); + std::vector values_size = {0}; + auto dense_size = sizes().slice(sparse_dim); + values_size.insert(values_size.end(), dense_size.begin(), dense_size.end()); + auto empty_values = at::empty(values_size, values().options()); + set_indices_and_values_unsafe(empty_indices, empty_values); + refresh_numel(); + } + + void set_coalesced(bool coalesced) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_coalesced ", + err_msg_tensor_metadata_change_not_allowed); + coalesced_ = coalesced; + } + + // NOTE: this function is only used internally and not exposed to Python + // frontend + void set_nnz_and_narrow(int64_t new_nnz) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_nnz_and_narrow ", + err_msg_tensor_metadata_change_not_allowed); + AT_ASSERT(new_nnz <= nnz()); + indices_ = indices_.narrow(1, 0, new_nnz); + values_ = values_.narrow(0, 0, new_nnz); + if (new_nnz < 2) { + coalesced_ = true; + } + } + + // Takes indices and values and directly puts them into the sparse tensor, no + // copy. NOTE: this function is unsafe because it doesn't check whether any + // indices are out of boundaries of `sizes`, so it should ONLY be used where + // we know that the indices are guaranteed to be within bounds. This used to + // be called THSTensor_(_move) NB: This used to be able to avoid a refcount + // bump, but I was too lazy to make it happen + void set_indices_and_values_unsafe( + const Tensor& indices, + const Tensor& values); + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive(key_set(), dtype()); + copy_tensor_metadata( + /*src_impl=*/this, + /*dest_impl=*/impl.get(), + /*version_counter=*/version_counter, + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + return impl; + } + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive(key_set(), dtype()); + copy_tensor_metadata( + /*src_impl=*/this, + /*dest_impl=*/impl.get(), + /*version_counter=*/std::move(version_counter), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + return impl; + } + + /** + * Shallow-copies data from another TensorImpl into this TensorImpl. + * + * For why this function doesn't check this TensorImpl's + * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ]. + */ + void shallow_copy_from(const c10::intrusive_ptr& impl) override { + AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set())); + auto sparse_impl = static_cast(impl.get()); + copy_tensor_metadata( + /*src_impl=*/sparse_impl, + /*dest_impl=*/this, + /*version_counter=*/version_counter(), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); + refresh_numel(); + } + + private: + explicit SparseTensorImpl( + at::DispatchKeySet, + const caffe2::TypeMeta, + at::Tensor indices, + at::Tensor values); + + /** + * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / + * storage_offset) from one TensorImpl to another TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE + * [ TensorImpl Shallow-Copying ]. + */ + static void copy_tensor_metadata( + const SparseTensorImpl* src_sparse_impl, + SparseTensorImpl* dest_sparse_impl, + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) { + TensorImpl::copy_tensor_metadata( + src_sparse_impl, + dest_sparse_impl, + version_counter, + allow_tensor_metadata_change); + + // Sparse-specific fields + dest_sparse_impl->sparse_dim_ = src_sparse_impl->sparse_dim(); + dest_sparse_impl->dense_dim_ = src_sparse_impl->dense_dim(); + dest_sparse_impl->indices_ = src_sparse_impl->indices(); + dest_sparse_impl->values_ = src_sparse_impl->values(); + dest_sparse_impl->coalesced_ = src_sparse_impl->coalesced(); + } + + const char* tensorimpl_type_name() const override; +}; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/SparseTensorUtils.h b/voice_bridge/torch/include/ATen/SparseTensorUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..31c061400e6257b18de3e5085fd2fdf4e54e7889 --- /dev/null +++ b/voice_bridge/torch/include/ATen/SparseTensorUtils.h @@ -0,0 +1,123 @@ +#pragma once + +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at { +namespace sparse { + +// Just for documentary purposes +using SparseTensor = Tensor; +using SparseType = Type; + +// This is an internal utility function for getting at the SparseTensorImpl, +// so that we can write sparse tensor specific accessors for special fields +// in SparseTensor. You should only use this for writing low level +// setters/getters for SparseTensorImpl fields; otherwise, you should use +// the low level setters/getters that were implemented using this. +// +// This may be called repeatedly, so make sure it's pretty cheap. +inline SparseTensorImpl* get_sparse_impl(const SparseTensor& self) { + TORCH_INTERNAL_ASSERT( + self.is_sparse(), "_internal_get_SparseTensorImpl: not a sparse tensor"); + return static_cast(self.unsafeGetTensorImpl()); +} + +// Takes indices and values and directly puts them into the sparse tensor, no +// copy. This used to be called THSTensor_(_move) +inline void alias_into_sparse( + const SparseTensor& self, + const Tensor& indices, + const Tensor& values) { + get_sparse_impl(self)->set_indices_and_values_unsafe(indices, values); +} + +// Take indices and values and makes a (data) copy of them to put into the +// sparse indices/values. This used to be called THSTensor_(_set) +inline void copy_into_sparse( + const SparseTensor& self, + const Tensor& indices, + const Tensor& values, + bool non_blocking) { + alias_into_sparse( + self, + indices.to(self._indices().options(), non_blocking, /*copy=*/true), + values.to(self._values().options(), non_blocking, /*copy=*/true)); +} + +// TODO: put this into the public API +inline bool is_same_tensor(const Tensor& lhs, const Tensor& rhs) { + return lhs.unsafeGetTensorImpl() == rhs.unsafeGetTensorImpl(); +} + +inline bool is_same_density(const SparseTensor& self, const SparseTensor& src) { + return self.sparse_dim() == src.sparse_dim() && + self.dense_dim() == src.dense_dim(); +} + +// Give us a new values tensor, with the same dimensionality +// as 'values' but with a new number of non-zero elements. +// TODO: Expose this for real in ATen, some day? +// NB: Doesn't preserve data. +inline Tensor new_values_with_size_of(const Tensor& values, int64_t nnz) { + std::vector size = values.sizes().vec(); + size[0] = nnz; + return at::empty(size, values.options()); +} + +// NOTE [ Flatten Sparse Indices ] +// This helper function flattens a sparse indices tensor (a Tensor) into a 1D +// indices tensor. E.g., +// input = [[2, 4, 0], +// [3, 1, 10]] +// full_size = [2, 12] +// output = [ 2 * 12 + 3, 4 * 12 + 1, 0 * 12 + 10 ] = [27, 49, 10] +// +// In other words, assuming that each `indices[i, :]` is a valid index to a +// tensor `t` of shape `full_size`. This returns the corresponding indices to +// the flattened tensor `t.reshape( prod(full_size[:indices.size(0)]), -1 )`. +// if forceClone is true, the result will forced to be a clone of self. +// if force_clone is true, the result will forced to be a clone of self. +TORCH_API Tensor flatten_indices( + const Tensor& indices, + IntArrayRef full_size, + bool force_clone = false); + +// Flatten sparse tensor's indices from nD to 1D, similar to NOTE [ Flatten +// Sparse Indices ], except this one allows partial flatten: only flatten on +// specified dims. Note that the flatten indices might be uncoalesced if +// dims_to_flatten.size() < sparse_dim. Also if input indices is already +// coalesced, the flattened indices will also be sorted. +// +// args: +// indices: sparse tensor indices +// sizes: sparse tensor sizes +// dims_to_flatten: a list of dim index to flatten +// +// Ex1: +// indices = [[2, 4, 0], +// [3, 1, 3]] +// sizes = [2, 12] +// dims_to_flatten = [0, 1] +// new_indices = [ 2 * 12 + 3, 4 * 12 + 1, 0 * 12 + 3 ] = [27, 49, 3] +// +// Ex2: +// dims_to_flatten = [1] +// new_indices = [ 3, 1, 3 ] # uncoalesced +TORCH_API Tensor flatten_indices_by_dims( + const Tensor& indices, + const IntArrayRef& sizes, + const IntArrayRef& dims_to_flatten); + +// Find the CSR representation for a row `indices` from the COO format +TORCH_API Tensor coo_to_csr(const int64_t* indices, int64_t dim, int64_t nnz); + +} // namespace sparse +} // namespace at diff --git a/voice_bridge/torch/include/ATen/autocast_mode.h b/voice_bridge/torch/include/ATen/autocast_mode.h new file mode 100644 index 0000000000000000000000000000000000000000..f5e88a0b88f12e11cae990d51a14aa2908607ad1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/autocast_mode.h @@ -0,0 +1,235 @@ +#pragma once + +#include + +namespace at { +namespace autocast { + +TORCH_API bool is_enabled(); +TORCH_API void set_enabled(bool enabled); +TORCH_API void clear_cache(); +TORCH_API int increment_nesting(); +TORCH_API int decrement_nesting(); +TORCH_API bool is_cpu_enabled(); +TORCH_API void set_cpu_enabled(bool enabled); +TORCH_API at::ScalarType get_autocast_gpu_dtype(); +TORCH_API at::ScalarType get_autocast_cpu_dtype(); +TORCH_API void set_autocast_gpu_dtype(at::ScalarType dtype); +TORCH_API void set_autocast_cpu_dtype(at::ScalarType dtype); +TORCH_API bool is_xpu_enabled(); +TORCH_API void set_xpu_enabled(bool enabled); +TORCH_API at::ScalarType get_autocast_xpu_dtype(); +TORCH_API void set_autocast_xpu_dtype(at::ScalarType dtype); +TORCH_API bool is_autocast_cache_enabled(); +TORCH_API void set_autocast_cache_enabled(bool enabled); + +namespace { +bool is_autocast_eligible(const Tensor& tensor, DeviceType device_type) { + switch (device_type) { + case DeviceType::CUDA: + return (tensor.is_cuda() || tensor.is_xla()) && + tensor.is_floating_point(); + case DeviceType::CPU: + return (tensor.is_cpu() || tensor.is_mkldnn()) && + tensor.is_floating_point(); + case DeviceType::XPU: + return tensor.is_xpu() && tensor.is_floating_point(); + default: + return false; + } +} +} // namespace + +inline DispatchKey get_autocast_dispatch_key_from_device_type( + DeviceType device_type) { + switch (device_type) { + case DeviceType::CUDA: + return DispatchKey::Autocast; + case DeviceType::CPU: + return DispatchKey::AutocastCPU; + case DeviceType::XPU: + return DispatchKey::AutocastXPU; + default: + throw std::runtime_error( + "unknown device type for autocast in get_autocast_dispatch_key_from_device_type"); + } +} + +inline at::ScalarType get_lower_precision_fp_from_device_type( + DeviceType device_type) { + switch (device_type) { + case DeviceType::CUDA: + return get_autocast_gpu_dtype(); + case DeviceType::CPU: + return get_autocast_cpu_dtype(); + case DeviceType::XPU: + return get_autocast_xpu_dtype(); + default: + throw std::runtime_error( + "unknown device type for autocast in get_lower_precision_fp_from_device_type"); + } +} + +/******************************************************************** +Logic to extract the promote type from any Tensor or TensorList args. +********************************************************************/ + +// Overload to catch Tensor args. +// If nextArg is floating-point, compare its scalar_type with our +// current best guess for the promote type, and update if necessary. +inline at::ScalarType prioritize( + at::ScalarType current, + const Tensor& nextArg, + DeviceType device_type = DeviceType::CUDA) { + if (current == at::kDouble) { + AT_ERROR("promote type is double in at::autocast::prioritize"); + return current; + } + at::ScalarType lower_precision_fp = + get_lower_precision_fp_from_device_type(device_type); + if (is_autocast_eligible(nextArg, device_type)) { + auto next = nextArg.scalar_type(); + if (next == at::kDouble) { + return current; // ignores double tensors + } else if (current == at::kFloat || next == at::kFloat) { + return at::kFloat; // prioritizes float over lower_precision_fp + } else if (current == lower_precision_fp && next == lower_precision_fp) { + return lower_precision_fp; + } else { + AT_ERROR("Unexpected floating ScalarType in at::autocast::prioritize"); + return current; + } + } else { + return current; + } +} + +// Overload to catch TensorList args (for e.g. cat, stack). +// Reuses the overload above to process each Tensor in the list. +inline at::ScalarType prioritize( + at::ScalarType current, + const TensorList& list, + DeviceType device_type = DeviceType::CUDA) { + for (const auto& tensor : list) { + current = prioritize(current, tensor, device_type); + } + return current; +} + +// Template to catch non-Tensor args (no-op that returns current best guess) +template +inline at::ScalarType prioritize( + at::ScalarType current, + T nextArg, + DeviceType device_type = DeviceType::CUDA) { + return current; +} + +// Overload for the tail case. +inline at::ScalarType promote_type( + at::ScalarType current, + DeviceType device_type) { + return current; +} + +// Unpack args and determine if incoming lower_precision_fp tensors need to be +// promoted to float32. Non-Tensor arguments are ignored. +template +inline at::ScalarType promote_type( + at::ScalarType current, + DeviceType device_type, + Arg0 arg0, + Args... args) { + auto new_current = prioritize(current, arg0, device_type); + return promote_type(new_current, device_type, args...); +} + +/**************************************************** +Logic to apply cached casting to any Tensor argument. +****************************************************/ +inline bool is_eligible( + const Tensor& arg, + DeviceType device_type = DeviceType::CUDA) { + return ( + arg.defined() && is_autocast_eligible(arg, device_type) && + (arg.scalar_type() != at::kDouble)); +} + +// Overload to catch Tensor args +TORCH_API Tensor cached_cast( + at::ScalarType to_type, + const Tensor& arg, + DeviceType device_type = DeviceType::CUDA); + +// Overload to process optional +inline c10::optional cached_cast( + at::ScalarType to_type, + const c10::optional& arg, + DeviceType device_type = DeviceType::CUDA) { + if (arg.has_value()) { + return cached_cast(to_type, *arg, device_type); + } else { + return c10::nullopt; + } +} + +// Overload to process TensorLists +inline std::vector cached_cast( + at::ScalarType to_type, + const TensorList& arg, + DeviceType device_type = DeviceType::CUDA) { + std::vector vec; + vec.reserve(arg.size()); + for (const auto& t : arg) { + vec.push_back(cached_cast(to_type, t, device_type)); + } + return vec; +} + +// Template to catch non-Tensor args. +template +inline T cached_cast( + at::ScalarType to_type, + T arg, + DeviceType device_type = DeviceType::CUDA) { + return arg; +} + +/******************************************************* +Logic to flip an output dtype flag. +Keep it simple for now by assuming only one such flag is +present in the argument list. If I ever need a function +with more than flag I'll figure out something else. +The policy is: +If the user has explicity specified a dtype, respect it. +Otherwise, set it to the autocast type. +********************************************************/ + +// Overload to catch dtype flags +c10::optional inline set_opt_dtype( + at::ScalarType to_type, + const c10::optional& dtype) { + return dtype.has_value() ? dtype : to_type; +} + +// Template to catch other args +template +inline T set_opt_dtype(at::ScalarType to_type, T arg) { + return arg; +} + +template +inline bool firstarg_is_eligible(const Tensor& arg, Args... args) { + return is_eligible(arg); +} + +template +inline at::ScalarType type_from_firstarg( + at::ScalarType to_type, + const Tensor& arg, + Args... args) { + return (is_eligible(arg) ? to_type : arg.scalar_type()); +} + +} // namespace autocast +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ceil_div.h b/voice_bridge/torch/include/ATen/ceil_div.h new file mode 100644 index 0000000000000000000000000000000000000000..4564b1d39c0aeab290a3e61622de4bf635ed3992 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ceil_div.h @@ -0,0 +1,23 @@ +#pragma once +#include + +namespace at { + +/** + Computes ceil(a / b) +*/ +template ::value>> +C10_ALWAYS_INLINE C10_HOST_DEVICE T ceil_div(T a, T b) { + return (a + b - 1) / b; +} + +/** + Computes ceil(a / b) * b; i.e., rounds up `a` to the next highest + multiple of b +*/ +template +C10_ALWAYS_INLINE C10_HOST_DEVICE T round_up(T a, T b) { + return ceil_div(a, b) * b; +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/code_template.h b/voice_bridge/torch/include/ATen/code_template.h new file mode 100644 index 0000000000000000000000000000000000000000..c84165e67ec338e81b0274f31863b09f06d596c2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/code_template.h @@ -0,0 +1,248 @@ +#pragma once + +#include + +#include +#include +#include +#include + +namespace at { +namespace jit { + +// A template environment is a mapping from template variable names, e.g., +// identifier (corresponding to $identifier) to their expansions. +// +// This template environment supports storing strings, numbers and lists +// of strings, and can be chained together (so that lookup proceeds in +// in the top level environment, and then recurses into a parent +// environment if the key is not found.) +struct TemplateEnv { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + TemplateEnv() : parent(nullptr) {} + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + TemplateEnv(TemplateEnv& parent) : parent(&parent) {} + + using string_list = std::vector; + + // Add a string 'v' to the map at key 'k'. + void s(const std::string& k, const std::string& v) { + strings_[k] = v; + lists_.erase(k); + } + + // Add a number 'v' to the map at key 'k' + template + void d(const std::string& k, const T& v) { + strings_[k] = c10::to_string(v); + lists_.erase(k); + } + + // Retrieve the string representation of the value stored at 'k' from the map. + // Raises an exception if the key is not found. + const std::string& s(const std::string& k) const { + if (strings_.count(k) == 0) { + if (parent) { + return parent->s(k); + } + notFound(k); + } + return strings_.at(k); + } + + // Store a list of strings 'v' in the map at 'k'. + void v(const std::string& k, const string_list& v) { + lists_[k] = v; + strings_.erase(k); + } + + // Retrieve a list of strings stored at 'k' from the map. + // Raises an exception if the key is not found. + const string_list& v(const std::string& k) const { + if (lists_.count(k) == 0) { + if (parent) { + return parent->v(k); + } + notFound(k); + } + return lists_.at(k); + } + + // Test if a string 'k' is a string (as opposed to a list.) + bool keyIsString(const std::string& k) const { + if (strings_.count(k) > 0) + return true; + if (lists_.count(k) > 0) + return false; + if (parent) + return parent->keyIsString(k); + notFound(k); + } + + private: + [[noreturn]] void notFound(const std::string& k) const { + std::stringstream ss; + ss << "key not found: " << k; + throw std::logic_error(ss.str()); + } + + std::unordered_map strings_; + std::unordered_map lists_; + TemplateEnv* parent; +}; + +/* +# Match $identifier or ${identifier} and replace with the value in env. +# If this identifier is at the beginning of whitespace on a line +# and its value is a list then it is treated as +# block substitution by indenting all lines of all elements. +# If the identifier is on a line starting with non-whitespace and a list +# then it is comma separated. ${,foo} will insert a comma before the list +# if this list is not empty and ${foo,} will insert one after. +*/ +struct CodeTemplate { + /* implicit */ CodeTemplate(std::string t) : template_text(std::move(t)) {} + + std::string format(const TemplateEnv& env) const { + std::stringstream out; + size_t pos = 0; + size_t indent = 0; + bool all_whitespace = true; + while (pos < template_text.size()) { + char c = template_text[pos]; + if (c == '$') { + std::stringstream kss; + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + bool comma_before; + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + bool comma_after; + size_t new_pos = parseKey(pos, kss, comma_before, comma_after); + std::string k = kss.str(); + bool is_string = env.keyIsString(k); + if (all_whitespace) { + if (is_string) + emitStringWithIndents(out, indent, env.s(k)); + else + emitLinesIndented(out, indent, env.v(k)); + } else { + if (is_string) + out << env.s(k); + else + emitCommaSeparatedList(out, env.v(k), comma_before, comma_after); + } + all_whitespace = false; + pos = new_pos; + } else { + out << c; + if (!isspace(c)) + all_whitespace = false; + indent++; + if (c == '\n') { + indent = 0; + all_whitespace = true; + } + pos++; + } + } + return out.str(); + } + + private: + using string_list = std::vector; + char charAt(size_t p) const { + if (p >= template_text.size()) + throw std::logic_error("EOS found in key"); + return template_text[p]; + } + size_t parseKey( + size_t pos, + std::ostream& k, + bool& comma_before, + bool& comma_after) const { + comma_before = false; + comma_after = false; + pos++; + if (charAt(pos) == '{') { + pos++; + if (charAt(pos) == ',') { + comma_before = true; + pos++; + } + pos = parseIdent(pos, k); + if (charAt(pos) == ',') { + comma_after = true; + pos++; + } + if (charAt(pos) != '}') + throw std::logic_error("missing terminating '}'"); + pos++; + return pos; + } else { + return parseIdent(pos, k); + } + } + size_t parseIdent(size_t pos, std::ostream& k) const { + while (pos < template_text.size() && + (isalnum(template_text[pos]) || template_text[pos] == '_')) { + k << template_text[pos]; + pos++; + } + return pos; + } + void emitCommaSeparatedList( + std::ostream& out, + const string_list& strings, + bool comma_before, + bool comma_after) const { + if (comma_before && strings.size() > 0) + out << ", "; + for (const auto i : c10::irange(strings.size())) { + if (i > 0) + out << ", "; + out << strings[i]; + } + if (comma_after && strings.size() > 0) + out << ", "; + } + // These indentation functions follow the convention that they never emit + // leading or trailing newlines when the input string does not have leading + // or trailing newlines. It's the responsibility of the calling function + // to indent correctly in the context. + void emitIndent(std::ostream& out, size_t indent) const { + for (const auto i : c10::irange(indent)) { + (void)i; // Suppress unused variable warning + out << " "; + } + } + void emitStringWithIndents( + std::ostream& out, + size_t indent, + const std::string& str) const { + for (auto c : str) { + out << c; + if (c == '\n') { + emitIndent(out, indent); + } + } + } + void emitLinesIndented( + std::stringstream& out, + size_t indent, + const string_list& strings) const { + for (const auto i : c10::irange(strings.size())) { + if (i > 0) + emitIndent(out, indent); + emitStringWithIndents(out, indent, strings[i]); + if (i + 1 != strings.size()) + out << "\n"; + } + } + std::string template_text; +}; + +static inline std::string format(const std::string& fmt, TemplateEnv& env) { + return CodeTemplate(fmt).format(env); +} + +} // namespace jit +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/ATenGeneral.h b/voice_bridge/torch/include/ATen/core/ATenGeneral.h new file mode 100644 index 0000000000000000000000000000000000000000..9b787a2163e87c903ce0bd034b424eb1773c644d --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/ATenGeneral.h @@ -0,0 +1,3 @@ +#pragma once + +#include diff --git a/voice_bridge/torch/include/ATen/core/ATenOpList.h b/voice_bridge/torch/include/ATen/core/ATenOpList.h new file mode 100644 index 0000000000000000000000000000000000000000..1419376a9017db4c7ca788816bd0c9a6d65a82fc --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/ATenOpList.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace c10 { +struct OperatorName; +} + +namespace at { + +// check if an op is a custom op (i.e. did not come from native_functions.yaml) +TORCH_API bool is_custom_op(const c10::OperatorName& opName); +} diff --git a/voice_bridge/torch/include/ATen/core/ATen_fwd.h b/voice_bridge/torch/include/ATen/core/ATen_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..63d576797251c46f13c1e4b5211d7e09ee771ec5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/ATen_fwd.h @@ -0,0 +1,48 @@ +#pragma once +#include + +// Forward declarations of core ATen types used in dispatch functions +namespace c10 { + +template +class optional; +template +class List; +template +class IListRef; +class Stream; +class Scalar; +class SymInt; +class SymIntList; +struct Storage; +struct TensorOptions; +template +class ArrayRef; +template +class OptionalArrayRef; + +} // namespace c10 + +namespace at { + +class Tensor; +class OptionalTensorRef; +struct Dimname; +struct Generator; +using TensorList = c10::ArrayRef; +using ITensorListRef = c10::IListRef; +using IOptTensorListRef = c10::IListRef; +using DimnameList = c10::ArrayRef; +using IntArrayRef = c10::ArrayRef; +using OptionalIntArrayRef = c10::OptionalArrayRef; +using OptionalSymIntArrayRef = c10::OptionalArrayRef; + +using c10::Stream; +using c10::Storage; +using c10::QScheme; +using c10::Scalar; +using c10::SymInt; +using c10::SymIntList; +using c10::TensorOptions; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/ATen_pch.h b/voice_bridge/torch/include/ATen/core/ATen_pch.h new file mode 100644 index 0000000000000000000000000000000000000000..10b5b53b933b1873965db978089f849b176bf9b2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/ATen_pch.h @@ -0,0 +1,175 @@ +// This global header must not depend on native_functions.yaml or +// incremental builds will be next to useless +#pragma push_macro("TORCH_ASSERT_NO_OPERATORS") +#define TORCH_ASSERT_NO_OPERATORS + +// This macro doesn't work if defined after the first time inttypes.h +// is included, so won't work anywhere if not defined here. +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif +#include + +// This list of headers was generated using a script that finds +// high-impact headers and then manually tweaked to remove OS specific +// or duplicate headers (e.g. and ) and to remove +// "impl" headers (e.g BFloat16-inl.h or complex_math.h in c10). + +// To generate the initial list: +// 1. Build pytorch from scratch with all build caching disabled +// 2. Generate a build trace with ninjatracing (https://github.com/nico/ninjatracing) +// $ ninjatracing /path/to/pytorch/build/.ninja_log > trace_all.json +// 3. Run pch_gen.py from https://github.com/peterbell10/build_analysis/ +// $ python pch_gen.py --threshold .80 --target torch_cpu --build_dir /path/to/pytorch/build --trace trace_all.json +// Where the threshold can be tweaked until c10 and some of ATen +// core are included but TORCH_ASSERT_NO_OPERATORS still passes. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#pragma pop_macro("TORCH_ASSERT_NO_OPERATORS") diff --git a/voice_bridge/torch/include/ATen/core/Array.h b/voice_bridge/torch/include/ATen/core/Array.h new file mode 100644 index 0000000000000000000000000000000000000000..300ae51cef6b9044b1060376b19f7278fcf18a94 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Array.h @@ -0,0 +1,39 @@ +#pragma once + +// A fixed-size array type usable from both host and +// device code. + +#include +#include + +namespace at { namespace detail { + +template +struct Array { + T data[size_]; + + C10_HOST_DEVICE T operator[](int i) const { + return data[i]; + } + C10_HOST_DEVICE T& operator[](int i) { + return data[i]; + } +#if defined(USE_ROCM) + C10_HOST_DEVICE Array() = default; + C10_HOST_DEVICE Array(const Array&) = default; + C10_HOST_DEVICE Array& operator=(const Array&) = default; +#else + Array() = default; + Array(const Array&) = default; + Array& operator=(const Array&) = default; +#endif + static constexpr int size(){return size_;} + // Fill the array with x. + C10_HOST_DEVICE Array(T x) { + for (int i = 0; i < size_; i++) { + data[i] = x; + } + } +}; + +}} diff --git a/voice_bridge/torch/include/ATen/core/Backtrace.h b/voice_bridge/torch/include/ATen/core/Backtrace.h new file mode 100644 index 0000000000000000000000000000000000000000..ac728968750297227c1be4aa3e444557c1899b03 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Backtrace.h @@ -0,0 +1,2 @@ +#include +#include diff --git a/voice_bridge/torch/include/ATen/core/CheckMemoryFormat.h b/voice_bridge/torch/include/ATen/core/CheckMemoryFormat.h new file mode 100644 index 0000000000000000000000000000000000000000..3d1712a2ff19b159beaba48064bfd37b1587673f --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/CheckMemoryFormat.h @@ -0,0 +1,25 @@ +#include + +namespace c10 { namespace impl { + +inline c10::optional +check_tensor_options_and_extract_memory_format( + const TensorOptions& options, + c10::optional memory_format) { + TORCH_CHECK( + options.requires_grad_opt() == c10::nullopt || + options.requires_grad_opt().value() == false, + "Operators taking TensorOptions cannot take a TensorOptions with " + "options.requires_grad set as true. This isn't implemented yet."); + TORCH_CHECK( + !(options.has_memory_format() && memory_format.has_value()), + "Cannot set memory_format both in TensorOptions and explicit argument; please delete " + "the redundant setter."); + if (memory_format.has_value()) { + return memory_format; + } else { + return options.memory_format_opt(); + } +} + +}} // namespace impl namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/DeprecatedTypeProperties.h b/voice_bridge/torch/include/ATen/core/DeprecatedTypeProperties.h new file mode 100644 index 0000000000000000000000000000000000000000..a6298683ef75de5e6177f7efb269e3dddcf2f8b6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/DeprecatedTypeProperties.h @@ -0,0 +1,135 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + + +namespace at { + +class Tensor; + +// This class specifies a Backend and a ScalarType. Currently, it primarily +// serves as a replacement return value for Tensor::type(). Previously, +// Tensor::type() returned Type&, but we are changing Type to not be +// dtype-specific. +class TORCH_API DeprecatedTypeProperties { + public: + DeprecatedTypeProperties(Backend backend, ScalarType scalar_type) + : backend_(backend), scalar_type_(scalar_type) {} + + Backend backend() const { + return backend_; + } + + Layout layout() const { + return layout_from_backend(backend_); + } + + bool is_sparse() const { + return layout_from_backend(backend()) == kSparse; + } + + bool is_sparse_csr() const { + return layout_from_backend(backend()) == kSparseCsr; + } + + DeviceType device_type() const { + return backendToDeviceType(backend_); + } + + bool is_cuda() const { + return backendToDeviceType(backend_) == kCUDA; + } + + ScalarType scalarType() const { + return scalar_type_; + } + + caffe2::TypeMeta typeMeta() const { + return scalarTypeToTypeMeta(scalar_type_); + } + + bool operator==(const DeprecatedTypeProperties& other) const { + return backend_ == other.backend() && scalar_type_ == other.scalarType(); + } + + bool operator!=(const DeprecatedTypeProperties& other) const { + return !(*this == other); + } + + std::string toString() const { + std::string base_str; + if (backend_ == Backend::Undefined || scalar_type_ == ScalarType::Undefined) { + base_str = "UndefinedType"; + } else { + base_str = std::string(at::toString(backend_)) + at::toString(scalar_type_) + "Type"; + } + return base_str; + } + + DeprecatedTypeProperties & toBackend(Backend b) const { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + b, scalar_type_); + } + + DeprecatedTypeProperties & toScalarType(ScalarType s) const { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + backend_, s); + } + + DeprecatedTypeProperties & cpu() const { + return toBackend(Backend::CPU); + } + + DeprecatedTypeProperties & cuda() const { + return toBackend(Backend::CUDA); + } + + DeprecatedTypeProperties & hip() const { + return toBackend(Backend::HIP); + } + + /// Constructs the `TensorOptions` from a type and a `device_index`. + TensorOptions options(int16_t device_index = -1) const { + return TensorOptions().dtype(typeMeta()) + .device(device_type(), static_cast(device_index)) + .layout(layout()); + } + + /// Constructs the `TensorOptions` from a type and a Device. Asserts that + /// the device type matches the device type of the type. + TensorOptions options(c10::optional device_opt) const { + if (!device_opt.has_value()) { + return options(-1); + } else { + Device device = device_opt.value(); + AT_ASSERT(device.type() == device_type()); + return options(device.index()); + } + } + + operator TensorOptions() const { + return options(); + } + + int64_t id() const { + return static_cast(backend()) * + static_cast(ScalarType::NumOptions) + + static_cast(scalarType()); + } + + Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const; + Storage unsafeStorageFromTH(void * th_pointer, bool retain) const; + Tensor copy(const Tensor & src, bool non_blocking=false, c10::optional to_device={}) const; + + private: + Backend backend_; + ScalarType scalar_type_; +}; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h b/voice_bridge/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h new file mode 100644 index 0000000000000000000000000000000000000000..a21f1abbe97f4327005d10be3358076858c3d09d --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h @@ -0,0 +1,31 @@ +#pragma once + +// In order to preserve bc, we make DeprecatedTypeProperties instances unique +// just like they are for Type. + +#include +#include + +namespace at { + +class DeprecatedTypeProperties; + +struct TORCH_API DeprecatedTypePropertiesDeleter { + void operator()(DeprecatedTypeProperties * ptr); +}; + +class TORCH_API DeprecatedTypePropertiesRegistry { + public: + DeprecatedTypePropertiesRegistry(); + + DeprecatedTypeProperties& getDeprecatedTypeProperties(Backend p, ScalarType s) const; + +private: + std::unique_ptr registry + [static_cast(Backend::NumOptions)] + [static_cast(ScalarType::NumOptions)]; +}; + +TORCH_API DeprecatedTypePropertiesRegistry& globalDeprecatedTypePropertiesRegistry(); + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/Dict.h b/voice_bridge/torch/include/ATen/core/Dict.h new file mode 100644 index 0000000000000000000000000000000000000000..7ae106b6618cf360a9098d180ecf35db5d5def81 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Dict.h @@ -0,0 +1,390 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { +struct IValue; +template class Dict; +struct Type; + +namespace impl { + +using valid_dict_key_types = guts::typelist::typelist< + int64_t, + std::string, + double, + c10::complex, + bool, + at::Tensor +>; +} + +namespace detail { + +struct DictKeyHash { + size_t operator()(const IValue& ivalue) const; +}; + +struct DictKeyEqualTo { + bool operator()(const IValue& lhs, const IValue& rhs) const; +}; + +struct DictImpl final : public c10::intrusive_ptr_target { + using dict_map_type = ska_ordered::order_preserving_flat_hash_map; + struct DictElementTypes final { + TypePtr keyType; + TypePtr valueType; + }; + + explicit DictImpl(dict_map_type dict_, DictElementTypes elementTypes_) + : dict(std::move(dict_)) + , elementTypes(std::move(elementTypes_)) {} + dict_map_type dict; + + DictElementTypes elementTypes; + + intrusive_ptr copy() const; + friend TORCH_API bool operator==(const DictImpl& lhs, const DictImpl& rhs); +}; + +} + +namespace impl { +template class DictIterator; + +/** + * A reference to an entry in the Dict. + * Use the `key()` and `value()` methods to read the element. + */ +template +class DictEntryRef final { +public: + explicit DictEntryRef(Iterator iterator) + : iterator_(std::move(iterator)) {} + + decltype(auto) key() const { + return iterator_->first.template to(); + } + + decltype(auto) value() const { + return iterator_->second.template to(); + } + + template + void setValue(Value_&& value) const { + static_assert(std::is_constructible::value, "Wrong type for the value argument of setValue()"); + iterator_->second = Value(std::forward(value)); + } + +private: + // allow copying and moving, but only our friends (i.e. the Dict class) can do + // it. Copying/moving this reference wrapper would be too ambiguous to allow it + // in the public API. + DictEntryRef(const DictEntryRef&) = default; + DictEntryRef& operator=(const DictEntryRef&) = default; + DictEntryRef(DictEntryRef&&) noexcept = default; + DictEntryRef& operator=(DictEntryRef&& rhs) & noexcept = default; + + Iterator iterator_; + friend class DictIterator; + friend class Dict; +}; + +// this wraps map_type::iterator to make sure user code can't rely +// on it being the type of the underlying map. +template +class DictIterator final : public std::iterator> { +public: + explicit DictIterator() = default; + ~DictIterator() = default; + + DictIterator(const DictIterator& rhs): entryRef_(rhs.entryRef_) {} + DictIterator(DictIterator&& rhs) noexcept: entryRef_(std::move(rhs.entryRef_)) {} + DictIterator& operator=(const DictIterator& rhs) { + entryRef_ = rhs.entryRef_; + return *this; + } + DictIterator& operator=(DictIterator&& rhs) noexcept { + entryRef_ = std::move(rhs.entryRef_); + return *this; + } + + DictIterator& operator++() { + ++entryRef_.iterator_; + return *this; + } + + DictIterator operator++(int) { + DictIterator copy(*this); + ++*this; + return copy; + } + + const DictEntryRef& operator*() const { + return entryRef_; + } + + const DictEntryRef* operator->() const { + return &entryRef_; + } + + friend typename std::iterator>::difference_type operator-(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.entryRef_.iterator_ - rhs.entryRef_.iterator_; + } + +private: + explicit DictIterator(Iterator iterator): entryRef_(std::move(iterator)) {} + + const Iterator& get_iterator_() const { + return entryRef_.iterator_; + } + + friend bool operator==(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.get_iterator_() == rhs.get_iterator_(); + } + + friend bool operator!=(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.get_iterator_() != rhs.get_iterator_(); + } + + friend bool operator<(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.get_iterator_() < rhs.get_iterator_(); + } + + friend bool operator<=(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.get_iterator_() <= rhs.get_iterator_(); + } + + friend bool operator>(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.get_iterator_() > rhs.get_iterator_(); + } + + friend bool operator>=(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.get_iterator_() >= rhs.get_iterator_(); + } + + DictEntryRef entryRef_; + + friend class DictIterator; + friend class Dict; +}; + +template Dict toTypedDict(Dict dict); +template Dict toGenericDict(Dict dict); +} + +/** + * An object of this class stores a map from Key to Value. + * + * This is a pointer type. After a copy, both Dicts + * will share the same storage: + * + * > Dict a; + * > Dict b = a; + * > b.insert(3, "three"); + * > ASSERT("three" == a.at(3)); + * + * We use this class in the PyTorch kernel API because that + * allows us to do optimizations and switch out the underlying + * map implementation without breaking backwards compatibility + * for the kernel API. + */ +template +class Dict final { +private: + static_assert((std::is_same::value && std::is_same::value) || guts::typelist::contains::value, "Invalid Key type for Dict. We only support int64_t, double, bool, and string."); + + // impl_ stores the underlying map as a ska_ordered::order_preserving_flat_hash_map. + // We intentionally don't offer conversion from/to + // order_preserving_flat_hash_map, return references to it or something like that, + // because such operations would get expensive if we switch out + // the actual map implementation. + // This is an intrusive_ptr because Dict is a pointer type. + // Invariant: This will never be a nullptr, there will always be a valid + // DictImpl. + c10::intrusive_ptr impl_; + + explicit Dict(c10::intrusive_ptr&& impl); + friend struct IValue; + template friend Dict impl::toTypedDict(Dict); + template friend Dict impl::toGenericDict(Dict); + +public: + using key_type = Key; + using mapped_type = Value; + using size_type = typename detail::DictImpl::dict_map_type::size_type; + using iterator = impl::DictIterator; + + /** + * Creates an empty dict. + */ + explicit Dict(); + + /** + * Create a generic dict with runtime type information. + * This only works for c10::impl::GenericDict and is not part of the public API + * but only supposed to be used internally by PyTorch. + */ + explicit Dict(TypePtr keyType, TypePtr valueType); + + ~Dict() = default; + + Dict(const Dict&) = default; + Dict& operator=(const Dict&) = default; + + /** + * Create a new Dict pointing to a deep copy of the same data. + * The Dict returned is a new dict with separate storage. + * Changes in it are not reflected in the original dict or vice versa. + */ + Dict copy() const; + + /** + * Returns an iterator to the first element of the container. + * If the container is empty, the returned iterator will be equal to end(). + */ + iterator begin() const; + + /** + * Returns an iterator to the element following the last element of the container. + * This element acts as a placeholder; attempting to access it results in undefined behavior. + */ + iterator end() const; + + /** + * Checks if the container has no elements. + */ + bool empty() const; + + /** + * Returns the number of elements in the container. + */ + size_type size() const; + + /** + * Erases all elements from the container. After this call, size() returns zero. + * Invalidates any references, pointers, or iterators referring to contained elements. May also invalidate past-the-end iterators. + */ + void clear() const; + + /** + * Inserts element(s) into the container, if the container doesn't already contain an element with an equivalent key. + * May invalidate any references, pointers, or iterators referring to contained elements. + * + * @return A pair consisting of an iterator to the inserted element (or to the element that prevented the insertion) and a bool denoting whether the insertion took place. + */ + template + std::pair insert(Key_&& key, Value_&& value) const; + + /** + * If an element with the given key already exists, it is overwritten with the given value. + * Otherwise, a new element with the given key and value are inserted. + * May invalidate any references, pointers, or iterators referring to contained elements. + * + * @return The bool component is true if the insertion took place and false if the assignment took place. The iterator component is pointing at the element that was inserted or updated. + */ + template + std::pair insert_or_assign(Key_&& key, Value_&& value) const; + + /** + * Removes the element pointed to by iter. + * May invalidate any references, pointers, or iterators referring to contained elements. + * The iterator iter must be valid and dereferenceable. Thus the end() iterator (which is valid, but is not dereferenceable) cannot be used as a value for iter. + */ + void erase(iterator iter) const; + + /** + * Removes the element with the given key, if it exists. + * May invalidate any references, pointers, or iterators referring to contained elements. + * + * @return The number of elements removed. This is either '1' if an element with the key existed, or '0' if it didn't. + */ + C10_NODISCARD size_t erase(const Key& key) const; + + /** + * Returns the mapped value of the element with key equivalent to key. + * If no such element exists, an exception of type std::out_of_range is thrown. + */ + Value at(const Key& key) const; + + /** + * Finds an element with key equivalent to key. + * + * @return Iterator to an element with key equivalent to key. + * If no such element is found, past-the-end (see end()) iterator is returned. + */ + iterator find(const Key& key) const; + + /** + * Checks if there is an element with key equivalent to key in the container. + * + * @return true if there is such an element, otherwise false. + */ + bool contains(const Key& key) const; + + /** + * Increase the capacity so that at least count elements can be stored without + * having to reallocate or rehash. + */ + void reserve(size_type count) const; + + /** + * Value equality comparison. This function implements Python-like semantics for + * equality: two dicts with the same identity (e.g. same pointer) trivially + * compare equal, otherwise each element is compared for equality. + */ + template + friend bool operator==( + const Dict& lhs, + const Dict& rhs); + template + friend bool operator!=( + const Dict& lhs, + const Dict& rhs); + + /** + * Identity comparison. Returns true if and only if `rhs` represents the same + * Dict object as `this`. + */ + bool is(const Dict& rhs) const; + + // private API for now because the return type will change to TypePtr + // instead of optional once types are mandatory. + TypePtr keyType() const; + TypePtr valueType() const; + + // [unsafe set type] + // These functions mutate the tagged type of this dictionary in place. + // There is no checking that the members of the dictionary are instances + // of the new types, nor is there a check that other IValues which + // hold references to this dictionary have the right static type. + // This functionality is used only in the unpickler, where at + // creation type the real type of the dictionary is unknown, but + // then later recovered from the static type information of the + // unpickled object. + void unsafeSetKeyType(TypePtr t); + void unsafeSetValueType(TypePtr t); +}; + +namespace impl { +// GenericDict is how IValue stores dicts. It is, however, not part of the +// public API. Kernels should use Dicts with concrete Key, Value types instead +// (maybe except for some internal prim ops). +using GenericDict = Dict; + +} +} + +namespace torch { + template using Dict = c10::Dict; +} + +#include // IWYU pragma: keep diff --git a/voice_bridge/torch/include/ATen/core/Dict_inl.h b/voice_bridge/torch/include/ATen/core/Dict_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..69f6791d91cace6637d772b7d65dda769a7c9f4b --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Dict_inl.h @@ -0,0 +1,209 @@ +#pragma once + +#include +#include + +namespace c10 { +namespace detail { +inline bool DictKeyEqualTo::operator()(const IValue& lhs, const IValue& rhs) const { + if (lhs.isTensor() && rhs.isTensor()) { + // for tensors, we compare only by identity (following how it's done in Python). + return lhs.is(rhs); + } + // Otherwise, we first compare by identity for efficiency, then by value (see: + // [container equality]) + return _fastEqualsForContainer(lhs, rhs); +} +} + +template decltype(auto) getTypePtr(); +std::string toString(const Type& type); + +namespace impl { + +template +Dict toTypedDict(GenericDict dict) { + TORCH_INTERNAL_ASSERT(*getTypePtr() == *dict.impl_->elementTypes.keyType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr()), ", ", toString(*getTypePtr()), ">. Key types mismatch."); + TORCH_INTERNAL_ASSERT(*getTypePtr() == *dict.impl_->elementTypes.valueType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr()), ", ", toString(*getTypePtr()), ">. Value types mismatch."); + + return Dict(std::move(dict.impl_)); +} + +template +GenericDict toGenericDict(Dict dict) { + return GenericDict(std::move(dict.impl_)); +} +} + +namespace detail { + +inline size_t DictKeyHash::operator()(const IValue& ivalue) const { + if (ivalue.isInt()) { + return std::hash()(ivalue.toInt()); + } else if (ivalue.isString()) { + return std::hash()(ivalue.toStringView()); + } else if (ivalue.isDouble()) { + return std::hash()(ivalue.toDouble()); + } else if (ivalue.isComplexDouble()) { + return c10::hash>()(ivalue.toComplexDouble()); + } else if (ivalue.isBool()) { + return std::hash()(ivalue.toBool()); + } else if (ivalue.isTensor()) { + return std::hash()(ivalue.toTensor().unsafeGetTensorImpl()); + } else if (ivalue.isDevice()) { + return std::hash()(ivalue.toDevice()); + } else { + throw std::runtime_error( + "Can't hash IValues with tag '" + ivalue.tagKind() + "'"); + } +} + +inline intrusive_ptr DictImpl::copy() const { + return make_intrusive(dict, elementTypes); +} + +} + +template +Dict::Dict() + :Dict(make_intrusive( + detail::DictImpl::dict_map_type(), + detail::DictImpl::DictElementTypes{getTypePtr(), getTypePtr()})) { + static_assert(!std::is_same::value, "This constructor is not valid for Dict. Please use c10::impl::GenericDict(keyType, valueType) instead."); + static_assert(!std::is_same::value, "This constructor is not valid for Dict<_, IValue>. Please use c10::impl::GenericDict(keyType, valueType) instead."); +} + +template +Dict::Dict(TypePtr keyType, TypePtr valueType) +: Dict(make_intrusive( + detail::DictImpl::dict_map_type(), + detail::DictImpl::DictElementTypes {std::move(keyType), std::move(valueType)})) { + static_assert(std::is_same::value, "This constructor is only valid for c10::impl::GenericDict."); + static_assert(std::is_same::value, "This constructor is only valid for c10::impl::GenericDict."); +} + +template +Dict::Dict(c10::intrusive_ptr&& impl): impl_(std::move(impl)) {} + +template +Dict Dict::copy() const { + return Dict(impl_->copy()); +} + +template +typename Dict::iterator Dict::begin() const { + return iterator{impl_->dict.begin()}; +} + +template +typename Dict::iterator Dict::end() const { + return iterator{impl_->dict.end()}; +} + +template +bool Dict::empty() const { + return impl_->dict.empty(); +} + +template +typename Dict::size_type Dict::size() const { + return impl_->dict.size(); +} + +template +void Dict::clear() const { + impl_->dict.clear(); +} + +template +template +std::pair::iterator, bool> Dict::insert(Key_&& key, Value_&& value) const { + static_assert(std::is_constructible::value, "Wrong type for the key argument of Dict::insert"); + static_assert(std::is_constructible::value, "Wrong type for the value argument of Dict::insert"); + auto inserted = impl_->dict.insert(std::pair{ + Key(std::forward(key)), + Value(std::forward(value))}); + return {iterator{inserted.first}, inserted.second}; +} + +template +template +std::pair::iterator, bool> Dict::insert_or_assign(Key_&& key, Value_&& value) const { + static_assert(std::is_constructible::value, "Wrong type for the key argument of Dict::insert_or_assign"); + static_assert(std::is_constructible::value, "Wrong type for the value argument of Dict::insert_or_assign"); + auto inserted = impl_->dict.insert_or_assign( + Key(std::forward(key)), + Value(std::forward(value))); + return {iterator{inserted.first}, inserted.second}; +} + +template +void Dict::erase(iterator iter) const { + impl_->dict.erase(iter.entryRef_.iterator_); +} + +template +C10_NODISCARD size_t Dict::erase(const Key& key) const { + return impl_->dict.erase(key); +} + +template +Value Dict::at(const Key& key) const { + return impl_->dict.at(key).template to(); +} + +template +typename Dict::iterator Dict::find(const Key& key) const { + return iterator{impl_->dict.find(key)}; +} + +template +bool Dict::contains(const Key& key) const { + return end() != find(key); +} + +template +void Dict::reserve(size_type count) const { + impl_->dict.reserve(count); +} + +template +TypePtr Dict::keyType() const { + return impl_->elementTypes.keyType; +} + +template +TypePtr Dict::valueType() const { + return impl_->elementTypes.valueType; +} +template +void Dict::unsafeSetKeyType(TypePtr t) { + impl_->elementTypes.keyType = std::move(t); +} + +template +void Dict::unsafeSetValueType(TypePtr t) { + impl_->elementTypes.valueType = std::move(t); +} + +template +bool operator==(const Dict& lhs, const Dict& rhs) { + // Dicts with the same identity trivially compare equal. + if (lhs.impl_ == rhs.impl_) { + return true; + } + + // Otherwise compare the values + return *lhs.impl_ == *rhs.impl_; +} + +template +bool operator!=(const Dict& lhs, const Dict& rhs) { + return !(lhs == rhs); +} + +template +bool Dict::is(const Dict& rhs) const { + return this->impl_ == rhs.impl_; +} +} diff --git a/voice_bridge/torch/include/ATen/core/DimVector.h b/voice_bridge/torch/include/ATen/core/DimVector.h new file mode 100644 index 0000000000000000000000000000000000000000..576b9e142ebf17d048262763c888845a7d0386e8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/DimVector.h @@ -0,0 +1,13 @@ +#pragma once +#include + +namespace at { + +// Re-declaring 'DimVector' type and size inside 'at' namespace. +// This is done to avoid modifying every use into their 'c10' +// equivalent. + +using c10::kDimVectorStaticSize; +using c10::DimVector; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/Dimname.h b/voice_bridge/torch/include/ATen/core/Dimname.h new file mode 100644 index 0000000000000000000000000000000000000000..e53db14732c8952d5803105683fec8c4a78a51ac --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Dimname.h @@ -0,0 +1,48 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { + +enum class NameType: uint8_t { BASIC, WILDCARD }; + +struct TORCH_API Dimname { + static Dimname fromSymbol(Symbol name); + static Dimname wildcard(); + static bool isValidName(const std::string& name); + + NameType type() const { return type_; } + Symbol symbol() const { return name_; } + + bool isBasic() const { return type_ == NameType::BASIC; } + bool isWildcard() const { return type_ == NameType::WILDCARD; } + + bool matches(Dimname other) const; + c10::optional unify(Dimname other) const; + + private: + Dimname(Symbol name) + : name_(name), type_(NameType::BASIC) {} + Dimname(Symbol name, NameType type) + : name_(name), type_(type) {} + + Symbol name_; + NameType type_; +}; + +using DimnameList = c10::ArrayRef; + +TORCH_API std::ostream& operator<<(std::ostream& out, const Dimname& dimname); + +inline bool operator==(const Dimname& lhs, const Dimname& rhs) { + return lhs.symbol() == rhs.symbol(); +} + +inline bool operator!=(const Dimname& lhs, const Dimname& rhs) { + return !(lhs == rhs); +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/DistributionsHelper.h b/voice_bridge/torch/include/ATen/core/DistributionsHelper.h new file mode 100644 index 0000000000000000000000000000000000000000..1ef6fb0f3c2e4581c73fcaaedf54b98829fc6cae --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/DistributionsHelper.h @@ -0,0 +1,345 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/** + * Distributions kernel adapted from THRandom.cpp + * The kernels try to follow std::random distributions signature + * For instance: in ATen + * auto gen = at::detail::createCPUGenerator(); + * at::uniform_real_distribution uniform(0, 1); + * auto sample = uniform(gen.get()); + * + * vs std::random + * + * std::mt19937 gen; + * std::uniform_real_distribution uniform(0, 1); + * auto sample = uniform(gen); + */ + + +namespace at { +namespace { + +/** + * Samples a discrete uniform distribution in the range [base, base+range) of type T + */ +template +struct uniform_int_from_to_distribution { + + C10_HOST_DEVICE inline uniform_int_from_to_distribution(uint64_t range, int64_t base) { + range_ = range; + base_ = base; + } + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + if (( + std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value) && range_ >= 1ULL << 32) + { + return transformation::uniform_int_from_to(generator->random64(), range_, base_); + } else { + return transformation::uniform_int_from_to(generator->random(), range_, base_); + } + } + + private: + uint64_t range_; + int64_t base_; +}; + +/** + * Samples a discrete uniform distribution in the range [min_value(int64_t), max_value(int64_t)] + */ +template +struct uniform_int_full_range_distribution { + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + return transformation::uniform_int_full_range(generator->random64()); + } + +}; + +/** + * Samples a discrete uniform distribution in the range [0, max_value(T)] for integral types + * and [0, 2^mantissa] for floating-point types. + */ +template +struct uniform_int_distribution { + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + if (std::is_same::value || std::is_same::value) { + return transformation::uniform_int(generator->random64()); + } else { + return transformation::uniform_int(generator->random()); + } + } + +}; + +/** + * Samples a uniform distribution in the range [from, to) of type T + */ +template +struct uniform_real_distribution { + + C10_HOST_DEVICE inline uniform_real_distribution(T from, T to) { + TORCH_CHECK_IF_NOT_ON_CUDA(from <= to); + TORCH_CHECK_IF_NOT_ON_CUDA(to - from <= std::numeric_limits::max()); + from_ = from; + to_ = to; + } + + template + C10_HOST_DEVICE inline dist_acctype operator()(RNG generator){ + if(std::is_same::value) { + return transformation::uniform_real(generator->random64(), from_, to_); + } else { + return transformation::uniform_real(generator->random(), from_, to_); + } + } + + private: + T from_; + T to_; +}; + +// The SFINAE checks introduced in #39816 looks overcomplicated and must revisited +// https://github.com/pytorch/pytorch/issues/40052 +#define DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(member) \ +template \ +struct has_member_##member \ +{ \ + typedef char yes; \ + typedef long no; \ + template static yes test(decltype(&U::member)); \ + template static no test(...); \ + static constexpr bool value = sizeof(test(0)) == sizeof(yes); \ +} + +DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_double_normal_sample); +DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_double_normal_sample); +DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_float_normal_sample); +DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_float_normal_sample); + +#define DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(TYPE) \ + \ +template ::value && \ + has_member_set_next_##TYPE##_normal_sample::value \ + ), int> = 0> \ +C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* generator, ret_type* ret) { \ + if (generator->next_##TYPE##_normal_sample()) { \ + *ret = *(generator->next_##TYPE##_normal_sample()); \ + generator->set_next_##TYPE##_normal_sample(c10::optional()); \ + return true; \ + } \ + return false; \ +} \ + \ +template ::value || \ + !has_member_set_next_##TYPE##_normal_sample::value \ + ), int> = 0> \ +C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type* /*ret*/) { \ + return false; \ +} \ + \ +template ::value \ + ), int> = 0> \ +C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* generator, ret_type cache) { \ + generator->set_next_##TYPE##_normal_sample(cache); \ +} \ + \ +template ::value \ + ), int> = 0> \ +C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type /*cache*/) { \ +} + +DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(double); +DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(float); + +/** + * Samples a normal distribution using the Box-Muller method + * Takes mean and standard deviation as inputs + * Note that Box-muller method returns two samples at a time. + * Hence, we cache the "next" sample in the CPUGeneratorImpl class. + */ +template +struct normal_distribution { + + C10_HOST_DEVICE inline normal_distribution(T mean_in, T stdv_in) { + TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in >= 0, "stdv_in must be positive: ", stdv_in); + mean = mean_in; + stdv = stdv_in; + } + + template + C10_HOST_DEVICE inline dist_acctype operator()(RNG generator){ + dist_acctype ret; + // return cached values if available + if (std::is_same::value) { + if (maybe_get_next_double_normal_sample(generator, &ret)) { + return transformation::normal(ret, mean, stdv); + } + } else { + if (maybe_get_next_float_normal_sample(generator, &ret)) { + return transformation::normal(ret, mean, stdv); + } + } + // otherwise generate new normal values + uniform_real_distribution uniform(0.0, 1.0); + const dist_acctype u1 = uniform(generator); + const dist_acctype u2 = uniform(generator); + const dist_acctype r = ::sqrt(static_cast(-2.0) * ::log(static_cast(1.0)-u2)); + const dist_acctype theta = static_cast(2.0) * c10::pi * u1; + if (std::is_same::value) { + maybe_set_next_double_normal_sample(generator, r * ::sin(theta)); + } else { + maybe_set_next_float_normal_sample(generator, r * ::sin(theta)); + } + ret = r * ::cos(theta); + return transformation::normal(ret, mean, stdv); + } + + private: + T mean; + T stdv; +}; + +template +struct DiscreteDistributionType { using type = float; }; + +template <> struct DiscreteDistributionType { using type = double; }; + +/** + * Samples a bernoulli distribution given a probability input + */ +template +struct bernoulli_distribution { + + C10_HOST_DEVICE inline bernoulli_distribution(T p_in) { + TORCH_CHECK_IF_NOT_ON_CUDA(p_in >= 0 && p_in <= 1); + p = p_in; + } + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + uniform_real_distribution uniform(0.0, 1.0); + return transformation::bernoulli(uniform(generator), p); + } + + private: + T p; +}; + +/** + * Samples a geometric distribution given a probability input + */ +template +struct geometric_distribution { + + C10_HOST_DEVICE inline geometric_distribution(T p_in) { + TORCH_CHECK_IF_NOT_ON_CUDA(p_in > 0 && p_in < 1); + p = p_in; + } + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + uniform_real_distribution uniform(0.0, 1.0); + return transformation::geometric(uniform(generator), p); + } + + private: + T p; +}; + +/** + * Samples an exponential distribution given a lambda input + */ +template +struct exponential_distribution { + + C10_HOST_DEVICE inline exponential_distribution(T lambda_in) { + lambda = lambda_in; + } + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + uniform_real_distribution uniform(0.0, 1.0); + return transformation::exponential(uniform(generator), lambda); + } + + private: + T lambda; +}; + +/** + * Samples a cauchy distribution given median and sigma as inputs + */ +template +struct cauchy_distribution { + + C10_HOST_DEVICE inline cauchy_distribution(T median_in, T sigma_in) { + median = median_in; + sigma = sigma_in; + } + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + uniform_real_distribution uniform(0.0, 1.0); + return transformation::cauchy(uniform(generator), median, sigma); + } + + private: + T median; + T sigma; +}; + +/** + * Samples a lognormal distribution + * Takes mean and standard deviation as inputs + * Outputs two samples at a time + */ +template +struct lognormal_distribution { + + C10_HOST_DEVICE inline lognormal_distribution(T mean_in, T stdv_in) { + TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in > 0); + mean = mean_in; + stdv = stdv_in; + } + + template + C10_HOST_DEVICE inline T operator()(RNG generator){ + normal_distribution normal(mean, stdv); + return transformation::log_normal(normal(generator)); + } + + private: + T mean; + T stdv; +}; +} +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/Formatting.h b/voice_bridge/torch/include/ATen/core/Formatting.h new file mode 100644 index 0000000000000000000000000000000000000000..6dcfc6c7b3cd151f6a4c6d9e6b30b31fc6fd362d --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Formatting.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +#include +#include + +namespace c10 { +TORCH_API std::ostream& operator<<(std::ostream& out, Backend b); +TORCH_API std::ostream& operator<<(std::ostream & out, Scalar s); +TORCH_API std::string toString(Scalar s); +} +namespace at { + +TORCH_API std::ostream& operator<<(std::ostream& out, const DeprecatedTypeProperties& t); +TORCH_API std::ostream& print( + std::ostream& stream, + const Tensor& tensor, + int64_t linesize); +static inline std::ostream& operator<<(std::ostream & out, const Tensor & t) { + return print(out,t,80); +} +TORCH_API void print(const Tensor & t, int64_t linesize=80); +} diff --git a/voice_bridge/torch/include/ATen/core/Generator.h b/voice_bridge/torch/include/ATen/core/Generator.h new file mode 100644 index 0000000000000000000000000000000000000000..60323f3d3a000af0bbeb7ad5daf019e8a6af42aa --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Generator.h @@ -0,0 +1,184 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +// For the record I don't think this is a correct pimpl idiom. +// Including Impl header in interface header defeats the purpose +// because you can't change Impl private members without forcing +// everything that included the interface to rebuild. +// Impl should be forward-declared in the interface header instead. +#include + +/** + * Note [Generator] + * ~~~~~~~~~~~~~~~~ + * A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm to + * generate a seemingly random sequence of numbers, that may be later be used in creating + * a random distribution. Such an engine almost always maintains a state and requires a + * seed to start off the creation of random numbers. Often times, users have + * found it beneficial to be able to explicitly create, retain, and destroy + * PRNG states and also be able to have control over the seed value. + * + * A Generator in ATen gives users the ability to read, write and modify a PRNG engine. + * For instance, it does so by letting users seed a PRNG engine, fork the state of the + * engine, etc. + * + * By default, there is one generator per device, and a device's generator is + * lazily created. A user can use the torch.Generator() api to create their own generator. + */ + +/** + * Note [Acquire lock when using random generators] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Generator and its derived classes are NOT thread-safe. Please note that most of the + * places where we have inserted locking for generators are historically based, and we + * haven't actually checked that everything is truly thread safe (and it probably isn't). + * Please use the public mutex_ when using any methods from these classes, except for the + * read-only methods. You can learn about the usage by looking into the unittests + * (aten/src/ATen/cpu_generator_test.cpp) and other places where we have used lock_guard. + * + * TODO: Look into changing the threading semantics of Generators in ATen (e.g., making + * them non-thread safe and instead making the generator state splittable, to accommodate + * forks into other threads). + */ + +namespace at { + +class Tensor; + +struct TORCH_API Generator { + Generator() {} + + explicit Generator(c10::intrusive_ptr gen_impl) + : impl_(std::move(gen_impl)) { + if (impl_.get() == nullptr) { + throw std::runtime_error("GeneratorImpl with nullptr is not supported"); + } + } + + bool operator==(const Generator& rhs) const { + return this->impl_ == rhs.impl_; + } + + bool operator!=(const Generator& rhs) const { + return !((*this) == rhs); + } + + bool defined() const { + return static_cast(impl_); + } + + c10::GeneratorImpl* unsafeGetGeneratorImpl() const { + return impl_.get(); + } + + c10::GeneratorImpl* unsafeReleaseGeneratorImpl() { + return impl_.release(); + } + + const c10::intrusive_ptr& getIntrusivePtr() const { + return impl_; + } + + void set_current_seed(uint64_t seed) { impl_->set_current_seed(seed); } + + uint64_t current_seed() const { return impl_->current_seed(); } + + uint64_t seed() { return impl_->seed(); } + + // Implementation not inlined to prevent cycle reference between + // `ATen/core/Generator.h` and `ATen/core/Tensor.h` + void set_state(const at::Tensor& new_state); + + at::Tensor get_state() const; + + std::mutex& mutex() { + return impl_->mutex_; + } + + DispatchKeySet key_set() const { + return impl_->key_set(); + } + + Device device() const { return impl_->device(); } + + inline void set_pyobj(PyObject* pyobj) const noexcept { + impl_->set_pyobj(pyobj); + } + + inline PyObject* pyobj() const noexcept { + return impl_->pyobj(); + } + + template + T* get() const { return static_cast(impl_.get()); } + + Generator clone() const { + return Generator(impl_->clone()); + } + + private: + c10::intrusive_ptr impl_; +}; + +template +Generator make_generator(Args&&... args) { + return Generator(c10::make_intrusive(std::forward(args)...)); +} + +/** + * Utility function to static cast input Generator* to + * the backend generator type (CPU/CUDAGeneratorImpl etc.) + */ +template +static inline T * check_generator(c10::optional gen) { + TORCH_CHECK(gen.has_value(), "Expected Generator but received nullopt"); + TORCH_CHECK(gen->defined(), "Generator with undefined implementation is not allowed"); + TORCH_CHECK(T::device_type() == gen->device().type(), "Expected a '", T::device_type(), "' device type for generator but found '", gen->device().type(), "'"); + return gen->get(); +} + +/** + * Utility function used in tensor implementations, which + * supplies the default generator to tensors, if an input generator + * is not supplied. The input Generator* is also static casted to + * the backend generator type (CPU/CUDAGeneratorImpl etc.) + */ +template +static inline T* get_generator_or_default(const c10::optional& gen, const Generator& default_gen) { + return gen.has_value() && gen->defined() ? check_generator(gen) : check_generator(default_gen); +} + +namespace detail { + +/** + * Helper function for checking the validity of new random generator + * state. Right now following conditions are checked: + * + * - The new state tensor must be a torch.ByteTensor + * - Data of the new state tensor must be contiguous + */ +static inline void check_rng_state(const c10::TensorImpl& new_state) { + TORCH_CHECK_TYPE( + new_state.layout() == kStrided && new_state.device().type() == kCPU && new_state.dtype() == kByte, + "RNG state must be a torch.ByteTensor" + ); + + TORCH_CHECK(new_state.is_contiguous(), "RNG state must be contiguous"); +} + +} // namespace detail + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/IListRef.h b/voice_bridge/torch/include/ATen/core/IListRef.h new file mode 100644 index 0000000000000000000000000000000000000000..0b0ff67b02e2d2ff28be4fa0e3dc288099d47112 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/IListRef.h @@ -0,0 +1,625 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +/* + * [Note: IListRef] + * Wrapper around different API containers (e.g. boxed and unboxed). + * + * What is it? + * =========== + * It is a tagged union of both boxed and unboxed API containers. + * Working implementations: + * + * - `IListRef` + * - `IListRef` + * + * Note that `IListRef` is a view type. Meaning that it won't own the + * tensors it holds. It's intended to be used only as argument parameters. + * Specifically, where these 2 worlds overlap. + * + * What is this for? + * ================= + * Historically, PyTorch has maintained 2 different APIs: the unboxed + * (called from C++ API and Python eager mode) and boxed APIs (called + * from the TorchScript JIT, mobile interpreter, and boxed fallbacks). + * + * Calling unboxed kernels from the boxed "world" and vice-versa may + * result in non-negligible overhead. Lists are one of those types: + * + * - Boxed world: `c10::List` + * - Unboxed world: `c10::ArrayRef` + * + * In this context, `c10::IListRef` solves this problem by wrapping those + * 2 container types, so that we don't need to convert from one to + * the other. + * + * (see https://github.com/pytorch/pytorch/issues/66328) + * + * What does it do? + * ================ + * This container wraps around the different tagged containers + * (currently, only boxed and unboxed), without incurring in extra + * overhead for converting from one to another. It does so while + * exposing usual container methods, which dispatch to corresponding + * implementations. + * + * While it works with different container types, it introduces + * overhead for repeatedly calling member functions (since those will + * get dispatched, again). Therefore, you should only use it to iterate + * through the list up to one time. If you need to do more complex things, + * call `materialize()` first. + * + * Adding support for a new Tag + * ============================ + * Suppose we want to add a new tag: `Chest`. Here are the steps + * we would have to go through: + * + * 1. Add a line for it in the macro `TORCH_ILISTREF_FORALL_TAGS`. + * + * #define TORCH_ILISTREF_FORALL_TAGS(_, ...) \ + * ... + * _(Chest, ##__VA_ARGS__) + * + * 2. Add type aliases, union members, and constructors. + * + * template + * class IListRef { + * ... + * using chest_type = + * typename detail::IListRefTagImpl::list_type; + * ... + * IListRef(...) : tag_(IListRefTag::Chest) { + * ... + * } + * ... + * union Payload { + * ... + * chest_type chest; + * ... + * }; + * ... + * }; + * + * 3. Add a default implementation for it (in 'IListRef_inl.h'). It's + * preferable to make the default implementation work for `T = Tensor` + * (both `Unboxed` and `Boxed` do it). + * + * template + * class IListRefTagImplBase { + * public: + * using elem_type = ListElemT; + * using list_type = ChestContainer; + * + * static const list_type& unwrap(const IListRef& ilist) { ... } + * + * static typename list_type::const_iterator& unwrap( + * IListRefIterator& it) { ... } + * + * static const typename list_type::const_iterator& unwrap( + * const IListRefIterator& it) { ... } + * + * static IListRefConstRef iterator_get( + * const typename list_type::const_iterator& it) { ... } + * } + * + * 4. Add an specialization for each of the already supported types. + * Finally, for consistency, add them to the tracking list. + * (see [Note: IListRefTagImpl Specializations]) + * + * template <> + * class IListRefTagImpl + * : public IListRefTagImplBase {}; + * + * Adding support for a new Type + * ============================= + * Suppose we want to add support for a new type: `Matrix`. + * Here are the steps we would have to go through: + * + * 1. Add an specialization for each of the existing tags. + * For consistency, add them to the tracking list. + * (see [Note: IListRefTagImpl Specializations]) + * + * template <> + * class IListRefTagImpl + * : public IListRefTagImplBase {}; + * + * template <> + * class IListRefTagImpl + * : public IListRefTagImplBase {}; + * + * Common Problems + * =============== + * 1. One of `IListRef(Iterator)` methods are failing to compile. + * + * That may be happening because the container type you added + * is not compatible with the code written for that method. If + * that's true, then you might have to transform that code into + * a static method call (see `List::operator[]` method). + * + * 2. Can't make `IListRefIterator::operator*` return a const-reference. + * + * First, keep in mind that we assume that boxed containers will + * have to deal with `IValue` (e.g. `c10::List`). In this context, + * what may be happening is that `IValue` doesn't store internally + * your type `T`. Instead, it constructs a type new `T` everytime + * you try to get `T` for it (see `IListRef`). + */ + +namespace c10 { +template +class IListRef; + +/* + * Applies arbitrary macros to each `IListRefTag`. + */ +#define TORCH_ILISTREF_FORALL_TAGS(_, ...) \ + _(Unboxed, ##__VA_ARGS__) \ + _(Boxed, ##__VA_ARGS__) \ + _(Materialized, ##__VA_ARGS__) + +/* + * Defines a "switch-case" for `TAG`. Inside, it executes `BODY`, + * while bringing to scope: + * + * - `ImplT`: the implementation class for `TAG` + * - `this_`: the result of unwrapping `this` + */ +#define TORCH_ILISTREF_UNWRAP_CASE(TAG, BODY) \ + case c10::IListRefTag::TAG: { \ + using ImplT = c10::detail::IListRefTagImpl; \ + auto& this_ = ImplT::unwrap(*this); \ + BODY \ + } break; + +/* + * Dispatches the unwrap call, depending on `TAG`, followed by + * the execution of `BODY`. It aborts if `TAG` is not a `IListRefTag`. + * + * This macro is useful because it allows us to handle different + * types (that correspond to different tags) to be implemented + * only once. We can do it even when the implementation of the + * different tags aren't syntatically the same, by dispatching + * it to a function (e.g. `ImplT::(this_)`). + */ +#define TORCH_ILISTREF_UNWRAP(TAG, BODY) \ + switch (TAG) { \ + TORCH_ILISTREF_FORALL_TAGS(TORCH_ILISTREF_UNWRAP_CASE, BODY) \ + break; \ + default: \ + TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); \ + } + +enum class IListRefTag { +#define DEFINE_TAG(tag, ...) tag, + TORCH_ILISTREF_FORALL_TAGS(DEFINE_TAG) +#undef DEFINE_TAG + None +}; + +namespace detail { +/* + * Type alias that specifies whether we return a reference or a copy of `T`. + * + * What is this for? + * ================= + * Since values in the boxed world are represented by an `IValue`, we also + * depend on whether it can be converted to a const-reference (`Tensor`) or + * has to create a new copy of `T` (`OptionalTensorRef`). + */ +template +using IListRefConstRef = typename ivalue_to_const_ref_overload_return::type; + +/* + * Interface that implements key functions for each `IListRefTag` type. + * + * What is this for? + * ================= + * Given an `IListRef(Iterator)`, some methods have to be implemented + * differently for each `TAG`. Therefore, the methods inside this class + * are used as dispatch targets for the different `IListRefTag` values. + * + * You should create an specialization of this class for each possible + * combination of `IListRefTag` type (except `None`) and element types + * (e.g. `Tensor`). + * + * What does it do? + * ================ + * 1. defines static methods to be used as dispatch targets by both + * `IListRef` and `IListRefIterator` (see the implementation of + * `IListRefTagImplBase`). + * + * 2. defines the `elem_type` and `list_type` aliases that will be + * used in the definition of `IListRef`. In general, we should do + * so by inheriting from `IListRefTagImplBase`. + * + * [Note: IListRefTagImpl Specialization] + * ====================================== + * For `IListRef(Iterator)`: + * - + * - + * - + * + * For `IListRef(Iterator)`: + * - + * - + * - + */ +template +class IListRefTagImpl {}; + +/* + * Base implementation of `IListRefTagImpl` methods. + * + * What is this for? + * ================= + * This should make adding specializations for new types easier. For + * example, one should be able to add a new type just by making its + * `IListRefTagImpl` specialization inherit from `IListRefTagImplBase`. + * + * You should create a partial specialization for this class only if + * you introduce a new `IListRefTag`. The idea being that there is one + * default implementation for each possible value of `IListRefTag`. + * + * What does it do? + * ================ + * 1. defines `elem_type` as an alias to `ListElemT`. + * + * 1. defines `list_type` as an alias to the default container type + * that will hold a collection of `elem_type`. The idea being that + * all types tagged as `TAG` will have `list_type` as its container, + * with different `elem_type`. + * + * 3. defines the default implementation for each of the methods that + * are supposed to be defined on `IListRefTagImpl` specializations. + * + * 4. inheriting from `IListRefTagImplBase` also means + * that the payload of the type `IListRef` will be of type `list_type` + * when it is tagged as `TAG`. + */ +template +class IListRefTagImplBase {}; + +/* + * Materialized container for `IListRef`. + * + * What is this for? + * ================= + * Container that groups `T` references together. This exchanges the + * overhead of every method call from `IListRef` for a dynamic allocation. + * + * You should use this container instead of `IListRef` if: + * + * - You are going to iterate the list more than once + * - You need to repeatedly access arbitrary elements (using `operator[]`) + * What does it do? + + * ================ + * Removes the reference (&) from the type, and wraps it into a + * `std::reference_wrapper`. If `IListRefConstRef` is not a + * reference type, then it's left unchanged. + */ +template +using _MaterializedIListRefElem = typename std::conditional< + std::is_reference::value, + typename std::reference_wrapper::type>, + T>::type; + +template +using MaterializedIListRefElem = _MaterializedIListRefElem>; + +template +using MaterializedIListRef = std::vector>; + +} // namespace detail + +/* + * Iterator for `IListRef`. + * + * What is it? + * =========== + * Currently, a `std::bidirectional_iterator` that wraps the iterator + * types defined for each of the `IListRefTag`. + * + * One should be able to use it, as if it were the unwrapped + * iterators themselves. + + * What does it do? + * ================ + * Similarly to `IListRef`, this is a wrapper class. Specifically, it + * wraps each container's `const_iterator` type alias. So, for example, + * given that the container for `IListRefTag::Boxed` is `c10::List`, this + * iterator will wrap a `c10::List::const_iterator`. + * + * [Note: MSVC Iterator Debug] + * =========================== + * MSVC `vector::iterator` implementation (used in the boxed variant) + * makes it so this union's destructor, copy-constructor (assignment), and + * move-constructor (assignment) are implicitly deleted. + * + * Therefore, we need to explicitly define them as needed. Follows a list + * of places where these are needed and their reason: + * + * - `Payload` destructor: + * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is set to 2. + * + * - `IListRefIterator` destructor: + * same as above. However, we need to explicitly call the variant + * destructor explicitly. + * + * - `IListRefIterator` copy-constructor: + * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is different + * than 0. + */ +template +class IListRefIterator : public std::iterator { + private: +#define DEFINE_FRIEND_CLASS(TAG, ...) \ + friend class detail::IListRefTagImpl; \ + friend class detail::IListRefTagImplBase< \ + IListRefTag::TAG, \ + T, \ + typename detail::IListRefTagImpl::elem_type>; + TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS) +#undef DEFINE_FRIEND_CLASS + + public: + using unboxed_iterator_type = typename detail:: + IListRefTagImpl::list_type::const_iterator; + using boxed_iterator_type = typename detail:: + IListRefTagImpl::list_type::const_iterator; + using materialized_iterator_type = + typename detail::MaterializedIListRef::const_iterator; + + IListRefIterator() : tag_(IListRefTag::None) {} + +#if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL != 0 + // See [Note: MSVC Iterator Debug] + IListRefIterator(const IListRefIterator& iterator) + : tag_(iterator.tag_) { + switch (tag_) { + case IListRefTag::Boxed: + payload_.boxed_iterator = iterator.payload_.boxed_iterator; + break; + case IListRefTag::Unboxed: + payload_.unboxed_iterator = iterator.payload_.unboxed_iterator; + break; + case IListRefTag::Materialized: + payload_.materialized_iterator = iterator.payload_.materialized_iterator; + break; + default: + TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); + } + } +#endif + +#if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL == 2 + // See [Note: MSVC Iterator Debug] + ~IListRefIterator() noexcept(false) { + switch (tag_) { + case IListRefTag::Boxed: + payload_.boxed_iterator.~boxed_iterator_type(); + break; + case IListRefTag::Unboxed: + payload_.unboxed_iterator.~unboxed_iterator_type(); + break; + case IListRefTag::Materialized: + payload_.materialized_iterator.~materialized_iterator_type(); + break; + default: + TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); + } + } +#endif + + IListRefIterator(boxed_iterator_type boxed) : tag_(IListRefTag::Boxed) { + payload_.boxed_iterator = boxed; + } + + IListRefIterator(unboxed_iterator_type unboxed) : tag_(IListRefTag::Unboxed) { + payload_.unboxed_iterator = unboxed; + } + + IListRefIterator(materialized_iterator_type materialized) : tag_(IListRefTag::Materialized) { + payload_.materialized_iterator = materialized; + } + + detail::IListRefConstRef operator*() const { + TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::iterator_get(this_); }); + } + + IListRefIterator& operator++() { + TORCH_ILISTREF_UNWRAP(tag_, { ++this_; }); + return *this; + } + + IListRefIterator operator++(int) { + auto old = *this; + TORCH_ILISTREF_UNWRAP(tag_, { ++this_; }); + return old; + } + + IListRefIterator& operator--() { + TORCH_ILISTREF_UNWRAP(tag_, { --this_; }); + return *this; + } + + IListRefIterator operator--(int) { + auto old = *this; + TORCH_ILISTREF_UNWRAP(tag_, { --this_; }); + return old; + } + + bool operator==(const IListRefIterator& rhs) const { + if (tag_ != rhs.tag_) { + return false; + } + TORCH_ILISTREF_UNWRAP(tag_, { + auto& rhs_it = ImplT::unwrap(rhs); + return this_ == rhs_it; + }); + } + + bool operator!=(const IListRefIterator& rhs) const { + return !(*this == rhs); + } + + private: + union Payload { + boxed_iterator_type boxed_iterator; + unboxed_iterator_type unboxed_iterator; + materialized_iterator_type materialized_iterator; + void* _init_ptr; + Payload() : _init_ptr(nullptr) {} +#if defined(_MSC_VER) + // See [Note: MSVC Iterator Debug] + ~Payload() {} +#endif + }; + + Payload payload_; + IListRefTag tag_; +}; + +/* + * See [Note: IListRef] + */ +template +class IListRef { + private: +#define DEFINE_FRIEND_CLASS(TAG, ...) \ + friend class detail::IListRefTagImpl; \ + friend class detail::IListRefTagImplBase< \ + IListRefTag::TAG, \ + T, \ + typename detail::IListRefTagImpl::elem_type>; + TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS) +#undef DEFINE_FRIEND_CLASS + + public: + using unboxed_type = + typename detail::IListRefTagImpl::list_type; + using boxed_type = + typename detail::IListRefTagImpl::list_type; + using materialized_type = + typename detail::MaterializedIListRef; + + using iterator = IListRefIterator; + using const_iterator = IListRefIterator; + using reverse_iterator = std::reverse_iterator; + using value_type = typename iterator::value_type; + + IListRef() : tag_(IListRefTag::None) {} + + IListRef(const boxed_type& boxed) : tag_(IListRefTag::Boxed) { + payload_.boxed = &boxed; + } + + IListRef(const unboxed_type& unboxed) : tag_(IListRefTag::Unboxed) { + payload_.unboxed = unboxed; + } + + IListRef(const std::initializer_list& list) : tag_(IListRefTag::Unboxed) { + payload_.unboxed = at::ArrayRef(list); + } + + template < + typename... UnboxedConstructorArgs, + typename = std::enable_if_t< + std::is_constructible::value>> + IListRef(UnboxedConstructorArgs&&... args) : tag_(IListRefTag::Unboxed) { + payload_.unboxed = unboxed_type(std::forward(args)...); + } + + IListRef(const materialized_type& materialized) : tag_(IListRefTag::Materialized) { + payload_.materialized = &materialized; + } + + size_t size() const { + TORCH_ILISTREF_UNWRAP(tag_, { return this_.size(); }); + } + + bool empty() const { + return size() == 0; + } + + iterator begin() const { + TORCH_ILISTREF_UNWRAP(tag_, { return this_.begin(); }); + } + + iterator end() const { + TORCH_ILISTREF_UNWRAP(tag_, { return this_.end(); }); + } + + detail::IListRefConstRef front() const { + TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::front(this_); }); + } + + /* + * Materializes the `IListRef` into a `std::vector`. + * + * This should be used when one wishes to either: + * + * - iterate over the list more than once: each `IListRefIterator` + * member function call has to go through a switch, introducing + * non-negligible overhead + * + * - randomly access an arbitrary element using `operator[]`: + * same reason as above + */ + detail::MaterializedIListRef materialize() const { + if (isMaterialized()) { + return toMaterialized(); + } + + detail::MaterializedIListRef materialized; + materialized.reserve(size()); + for (const auto& t : *this) { + materialized.emplace_back(t); + } + return materialized; + } + +#define DEFINE_CHECK(TAG, ...) \ + bool is##TAG() const { \ + return tag_ == IListRefTag::TAG; \ + } + TORCH_ILISTREF_FORALL_TAGS(DEFINE_CHECK); +#undef DEFINE_CHECK + + bool isNone() const { + return tag_ == IListRefTag::None; + } + +#define DEFINE_CASTING(TAG, ...) \ + const typename detail::IListRefTagImpl::list_type& \ + to##TAG() const { \ + TORCH_INTERNAL_ASSERT(is##TAG()); \ + return detail::IListRefTagImpl::unwrap(*this); \ + } + TORCH_ILISTREF_FORALL_TAGS(DEFINE_CASTING); +#undef DEFINE_CASTING + + private: + union Payload { + const boxed_type* boxed; + unboxed_type unboxed; + const materialized_type* materialized; + Payload() : boxed(nullptr) {} + ~Payload() {} + }; + + Payload payload_; + IListRefTag tag_; +}; + +} // namespace c10 + +#include diff --git a/voice_bridge/torch/include/ATen/core/IListRef_inl.h b/voice_bridge/torch/include/ATen/core/IListRef_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..534272f69b64fd6ecb5d6a562ee2a5fda5691e12 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/IListRef_inl.h @@ -0,0 +1,201 @@ +#pragma once + +#include +#include + +namespace at { +class Tensor; +class OptionalTensorRef; +} + +namespace c10 { +namespace detail { + +/* + * Specializations of `IListRefTagImplBase` that implement the default + * implementation for `IListRefTag::Unboxed`. + */ +template +class IListRefTagImplBase { + public: + using elem_type = ListElemT; + using list_type = ArrayRef; + + /* + * These `unwrap` static methods unwraps the inner containers out + * of `IListRef` (and `IListRefIterator`). They are required when + * the macro `TORCH_ILISTREF_UNWRAP` is called. + */ + static const list_type& unwrap(const IListRef& ilist) { + return ilist.payload_.unboxed; + } + + static typename list_type::const_iterator& unwrap(IListRefIterator& it) { + return it.payload_.unboxed_iterator; + } + + static const typename list_type::const_iterator& unwrap( + const IListRefIterator& it) { + return it.payload_.unboxed_iterator; + } + + /* + * We have these function (besides the `unwrap`s above) because the + * implementation for both `IListRef::operator[]` and `IListRefIterator::operator*` + * weren't syntatically equal for the existing tags at the time + * (`Unboxed` and `Boxed`). + */ + static IListRefConstRef front(const list_type& lst) { + return lst.front(); + } + + static IListRefConstRef iterator_get( + const typename list_type::const_iterator& it) { + return *it; + } +}; + +/* + * Specializations of `IListRefTagImplBase` that implement the default + * implementation for `IListRefTag::Boxed`. + */ +template +class IListRefTagImplBase { + public: + using elem_type = ListElemT; + using list_type = List; + + static const list_type& unwrap(const IListRef& ilist) { + return *ilist.payload_.boxed; + } + + static typename list_type::const_iterator& unwrap(IListRefIterator& it) { + return it.payload_.boxed_iterator; + } + + static const typename list_type::const_iterator& unwrap( + const IListRefIterator& it) { + return it.payload_.boxed_iterator; + } + + static IListRefConstRef front(const list_type& lst) { + return lst[0]; + } + + static IListRefConstRef iterator_get( + const typename list_type::const_iterator& it) { + return (*it).get().toTensor(); + } +}; + +/* + * Specializations of `IListRefTagImplBase` that implement the default + * implementation for `IListRefTag::Materialized`. + */ +template +class IListRefTagImplBase> { + public: + using elem_type = MaterializedIListRefElem; + using list_type = MaterializedIListRef; + + static const list_type& unwrap(const IListRef& ilist) { + return *ilist.payload_.materialized; + } + + static typename list_type::const_iterator& unwrap(IListRefIterator& it) { + return it.payload_.materialized_iterator; + } + + static const typename list_type::const_iterator& unwrap( + const IListRefIterator& it) { + return it.payload_.materialized_iterator; + } + + static IListRefConstRef front(const list_type& lst) { + return lst[0]; + } + + static IListRefConstRef iterator_get( + const typename list_type::const_iterator& it) { + return *it; + } +}; + +/* + * [Note: ITensorListRef] + * Specializations necessary for `IListRef` type. + * + * Since the default implementations are usually done with supporting + * `Tensor` in mind, we only have to inherit from the base implementations. + */ +template <> +class IListRefTagImpl + : public IListRefTagImplBase {}; + +template <> +class IListRefTagImpl + : public IListRefTagImplBase {}; + +template <> +class IListRefTagImpl + : public IListRefTagImplBase< + IListRefTag::Materialized, + at::Tensor, + MaterializedIListRefElem> {}; + +/* + * [Note: IOptTensorListRef] + * Specializations necessary for `IListRef` type. + * + * We can't get an `at::OptionalTensorRef` directly from an instance of + * `List>` (the type that corresponds to the boxed world). + * + * So, the default implementation won't help us. Thus, we have to implement + * this method ourselves. + */ +template <> +class IListRefTagImpl + : public IListRefTagImplBase {}; + +template <> +class IListRefTagImpl + : public IListRefTagImplBase> { + + public: + /* + * Given an instance of the types corresponding to the `Boxed` tag, we override + * the default implementation, so that we can return a `at::OptionalTensorRef`. + */ + static IListRefConstRef iterator_get( + const typename list_type::const_iterator& it) { + const auto& ivalue = (*it).get(); + if (!ivalue.isNone()) { + const auto& tensor = ivalue.toTensor(); + return (tensor.defined()) ? tensor : at::OptionalTensorRef{}; + } + return {}; + } +}; + +template <> +class IListRefTagImpl + : public IListRefTagImplBase< + IListRefTag::Materialized, + at::OptionalTensorRef, + MaterializedIListRefElem> {}; + +} // namespace detail +} // namespace c10 + +namespace at { + +// [Note: ITensorListRef] +using ITensorListRef = c10::IListRef; +using ITensorListRefIterator = c10::IListRefIterator; +using MaterializedITensorListRef = c10::detail::MaterializedIListRef; +// [Note: IOptTensorListRef] +using IOptTensorListRef = c10::IListRef; +using IOptTensorListRefIterator = c10::IListRefIterator; +using MaterializedIOptTensorListRef = c10::detail::MaterializedIListRef; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/LegacyTypeDispatch.h b/voice_bridge/torch/include/ATen/core/LegacyTypeDispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3efe5e0f7b87b66a1cf22ef1d5fb0f93e2f78494 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/LegacyTypeDispatch.h @@ -0,0 +1,111 @@ +#pragma once + +// The legacy mechanism for dispatching operators in ATen is a Type +// object, which is essentially a giant virtual dispatch table +// for every operation we support dynamically dispatching over. +// +// This has been deprecated in favor of ATenDispatch, and in the future, +// c10 dispatcher. +// TODO: Clean up what remains here + +#include + +namespace at { + +// A RAII, thread local (!) guard that will disable dispatch to variable +// handler. +// +// NOTE [ Treating Variables as non-Variables in type dispatch ] +// +// What exactly does AutoDispatchBelowAutograd do? The short answer is, it causes +// dispatches on ATen functions to go to the non-variable implementation, +// bypassing autograd handling (and also profiling and tracing). +// +// To understand why this guard exists, it's helpful to understand the history +// behind how Variable was implemented. Previously, Variables were implemented +// as a wrapper on Tensors; so the act of processing a Variable involved +// unwrapping the underlying Tensor, and then calling the underlying base +// operation on /that/ operation +// +// However, after the Variable/Tensor merge, there is no concept of unwrapping +// a tensor anymore. If you just call the operation on the same variable +// again inside your VariableType handler, you'll dispatch back to +// VariableType, which is not what we want. +// +// The solution to the above problem is to add `at::AutoDispatchBelowAutograd`, which +// when enabled will cause `legacyTensorType()` and `getType()` to always return +// non-Variable type, even if the tensor being called on is a variable. + +/* Note [AutoDispatchBelowAutograd] + * AutoDispatchBelowAutograd is **INTERNAL ONLY** that it should be used + * for kernel implementations and customized C++ kernels. + * If you are looking for a guard to run workload in inference mode, please use + * c10::InferenceMode RAII which is user facing API. + * In the past AutoDispatchBelowAutograd(or its old version AutoNonVariableTypeMode) + * was used in the user code for inference-only workload, this was under risk of + * producing wrong results silently in some edge cases. For example: + * ``` + * torch::Tensor s = torch::ones({1, 2, 3}).set_requires_grad(true); + * torch::Tensor out = s * s; + * { + * at::AutoDispatchBelowAutograd guard; + * s.add_(1); // Skips version bump on `s`. + * } + * // WRONG GRADIENT! s.grad() are now computed using `s` value after the + * // inplace update. + * out.backward(torch::ones_like(out)); + * ``` + * Users should use `c10::InferenceMode` here so that it'll properly throw an + * error saying "one of the variables needed for gradient computation has be modified." + */ +struct TORCH_API AutoDispatchBelowAutograd { + AutoDispatchBelowAutograd() : + autograd_guard_(c10::autograd_dispatch_keyset) { + } + + // disable all autograd dispatch keys + c10::impl::ExcludeDispatchKeyGuard autograd_guard_; +}; + +// TODO: AutoNonVariableTypeMode should be removed in release 1.10. +struct TORCH_API AutoNonVariableTypeMode { + AutoNonVariableTypeMode(bool enabled = true) : + autograd_guard_(c10::autograd_dispatch_keyset) { + TORCH_WARN_ONCE("AutoNonVariableTypeMode is deprecated and will be removed in 1.10 release. " + "For kernel implementations please use AutoDispatchBelowADInplaceOrView instead, " + "If you are looking for a user facing API to enable running your inference-only " + "workload, please use c10::InferenceMode. Using AutoDispatchBelowADInplaceOrView in user code " + "is under risk of producing silent wrong result in some edge cases. " + "See Note [AutoDispatchBelowAutograd] for more details."); + TORCH_INTERNAL_ASSERT(enabled); + } + + // disable all autograd dispatch keys + c10::impl::ExcludeDispatchKeyGuard autograd_guard_; +}; + +struct TORCH_API AutoDispatchSkipFunctionalize { + AutoDispatchSkipFunctionalize() : + dispatch_key_guard_(c10::DispatchKeySet(c10::DispatchKey::Functionalize)) { + } + c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_; +}; + +/* Note [AutoDispatchBelowADInplaceOrView] + * AutoDispatchBelowADInplaceOrView is equivalent to AutoNonVariableTypeMode + * before we split inplace & view ops out of VariableType kernel. + * Note this guard is used in VariableType kernels for functional ops + * as well as ADInplaceOrView kernels for inplace/view ops to enforce the + * Invariant: + * Once you are in VariableType/ADInplaceOrView kernel for an op, + * you never go back to a kernel on same dispatch key until + * you finish the current op. + */ +struct TORCH_API AutoDispatchBelowADInplaceOrView { + AutoDispatchBelowADInplaceOrView() : + dispatch_key_guard_(c10::autograd_dispatch_keyset_with_ADInplaceOrView) { + } + // disable Autograd & ADInplaceOrView dispatch keys + c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_; +}; +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/List.h b/voice_bridge/torch/include/ATen/core/List.h new file mode 100644 index 0000000000000000000000000000000000000000..fe75bf37cb7fab9c14a2dc969dae25d2f39c9de5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/List.h @@ -0,0 +1,492 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +class Tensor; +} +namespace c10 { +struct IValue; +template class List; +struct Type; + +namespace detail { + +struct ListImpl final : public c10::intrusive_ptr_target { + using list_type = std::vector; + + explicit ListImpl(list_type list_, TypePtr elementType_) + : list(std::move(list_)) + , elementType(std::move(elementType_)) {} + + list_type list; + + TypePtr elementType; + + intrusive_ptr copy() const { + return make_intrusive(list, elementType); + } + friend TORCH_API bool operator==(const ListImpl& lhs, const ListImpl& rhs); +}; +} + +namespace impl { + +template class ListIterator; + +template class ListElementReference; + +template +void swap(ListElementReference&& lhs, ListElementReference&& rhs); + +template +bool operator==(const ListElementReference& lhs, const T& rhs); + +template +bool operator==(const T& lhs, const ListElementReference& rhs); + +template +struct ListElementConstReferenceTraits { + // In the general case, we use IValue::to(). + using const_reference = typename c10::detail::ivalue_to_const_ref_overload_return::type; +}; + +// There is no to() overload for c10::optional. +template<> +struct ListElementConstReferenceTraits> { + using const_reference = c10::optional>; +}; + +template +class ListElementReference final { +public: + operator std::conditional_t< + std::is_reference::type>::value, + const T&, + T>() const; + + ListElementReference& operator=(T&& new_value) &&; + + ListElementReference& operator=(const T& new_value) &&; + + // assigning another ref to this assigns the underlying value + ListElementReference& operator=(ListElementReference&& rhs) &&; + + const IValue& get() const& { + return *iterator_; + } + + friend void swap(ListElementReference&& lhs, ListElementReference&& rhs); + +private: + ListElementReference(Iterator iter) + : iterator_(iter) {} + + ListElementReference(const ListElementReference&) = delete; + ListElementReference& operator=(const ListElementReference&) = delete; + + // allow moving, but only our friends (i.e. the List class) can move us + ListElementReference(ListElementReference&&) noexcept = default; + ListElementReference& operator=(ListElementReference&& rhs) & noexcept { + iterator_ = std::move(rhs.iterator_); + return *this; + } + + friend class List; + friend class ListIterator; + + Iterator iterator_; +}; + +// this wraps vector::iterator to make sure user code can't rely +// on it being the type of the underlying vector. +template +class ListIterator final : public std::iterator< + std::random_access_iterator_tag, + T, + std::ptrdiff_t, + T*, + ListElementReference> { + public: + explicit ListIterator() = default; + ~ListIterator() = default; + + ListIterator(const ListIterator&) = default; + ListIterator(ListIterator&&) noexcept = default; + ListIterator& operator=(const ListIterator&) = default; + ListIterator& operator=(ListIterator&&) = default; + + ListIterator& operator++() { + ++iterator_; + return *this; + } + + ListIterator operator++(int) { + ListIterator copy(*this); + ++*this; + return copy; + } + + ListIterator& operator--() { + --iterator_; + return *this; + } + + ListIterator operator--(int) { + ListIterator copy(*this); + --*this; + return copy; + } + + ListIterator& operator+=(typename List::size_type offset) { + iterator_ += offset; + return *this; + } + + ListIterator& operator-=(typename List::size_type offset) { + iterator_ -= offset; + return *this; + } + + ListIterator operator+(typename List::size_type offset) const { + return ListIterator{iterator_ + offset}; + } + + ListIterator operator-(typename List::size_type offset) const { + return ListIterator{iterator_ - offset}; + } + + friend typename std::iterator::difference_type operator-(const ListIterator& lhs, const ListIterator& rhs) { + return lhs.iterator_ - rhs.iterator_; + } + + ListElementReference operator*() const { + return {iterator_}; + } + + ListElementReference operator[](typename List::size_type offset) const { + return {iterator_ + offset}; + } + +private: + explicit ListIterator(Iterator iterator): iterator_(std::move(iterator)) {} + + Iterator iterator_; + + friend bool operator==(const ListIterator& lhs, const ListIterator& rhs) { + return lhs.iterator_ == rhs.iterator_; + } + + friend bool operator!=(const ListIterator& lhs, const ListIterator& rhs) { + return !(lhs == rhs); + } + + friend bool operator<(const ListIterator& lhs, const ListIterator& rhs) { + return lhs.iterator_ < rhs.iterator_; + } + + friend bool operator<=(const ListIterator& lhs, const ListIterator& rhs) { + return lhs.iterator_ <= rhs.iterator_; + } + + friend bool operator>(const ListIterator& lhs, const ListIterator& rhs) { + return lhs.iterator_ > rhs.iterator_; + } + + friend bool operator>=(const ListIterator& lhs, const ListIterator& rhs) { + return lhs.iterator_ >= rhs.iterator_; + } + + friend class ListIterator; + friend class List; +}; + +template List toTypedList(List list); +template List toList(List&& list); +template List toList(const List& list); +const IValue* ptr_to_first_element(const List& list); +} + +/** + * An object of this class stores a list of values of type T. + * + * This is a pointer type. After a copy, both Lists + * will share the same storage: + * + * > List a; + * > List b = a; + * > b.push_back("three"); + * > ASSERT("three" == a.get(0)); + * + * We use this class in the PyTorch kernel API instead of + * std::vector, because that allows us to do optimizations + * and switch out the underlying list implementation without + * breaking backwards compatibility for the kernel API. + */ +template +class List final { +private: + // This is an intrusive_ptr because List is a pointer type. + // Invariant: This will never be a nullptr, there will always be a valid + // ListImpl. + c10::intrusive_ptr impl_; + + using internal_reference_type = impl::ListElementReference; + using internal_const_reference_type = typename impl::ListElementConstReferenceTraits::const_reference; + +public: + using value_type = T; + using size_type = typename c10::detail::ListImpl::list_type::size_type; + using iterator = impl::ListIterator; + using const_iterator = impl::ListIterator; + using reverse_iterator = impl::ListIterator; + + /** + * Constructs an empty list. + */ + explicit List(); + + /** + * Constructs a list with some initial values. + * Example: + * List a({2, 3, 4}); + */ + List(std::initializer_list initial_values); + explicit List(ArrayRef initial_values); + + /** + * Create a generic list with runtime type information. + * This only works for c10::impl::GenericList and is not part of the public API + * but only supposed to be used internally by PyTorch. + */ + explicit List(TypePtr elementType); + + List(const List&) = default; + List& operator=(const List&) = default; + + /** + * Create a new List pointing to a deep copy of the same data. + * The List returned is a new list with separate storage. + * Changes in it are not reflected in the original list or vice versa. + */ + List copy() const; + + /** + * Returns the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + */ + value_type get(size_type pos) const; + + /** + * Moves out the element at the specified location pos and returns it, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * The list contains an invalid element at position pos afterwards. Any operations + * on it before re-setting it are invalid. + */ + value_type extract(size_type pos) const; + + /** + * Returns a reference to the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * + * You cannot store the reference, but you can read it and assign new values to it: + * + * List list = ...; + * list[2] = 5; + * int64_t v = list[1]; + */ + internal_const_reference_type operator[](size_type pos) const; + + internal_reference_type operator[](size_type pos); + + /** + * Assigns a new value to the element at location pos. + */ + void set(size_type pos, const value_type& value) const; + + /** + * Assigns a new value to the element at location pos. + */ + void set(size_type pos, value_type&& value) const; + + /** + * Returns an iterator to the first element of the container. + * If the container is empty, the returned iterator will be equal to end(). + */ + iterator begin() const; + + /** + * Returns an iterator to the element following the last element of the container. + * This element acts as a placeholder; attempting to access it results in undefined behavior. + */ + iterator end() const; + + /** + * Checks if the container has no elements. + */ + bool empty() const; + + /** + * Returns the number of elements in the container + */ + size_type size() const; + + /** + * Increase the capacity of the vector to a value that's greater or equal to new_cap. + */ + void reserve(size_type new_cap) const; + + /** + * Erases all elements from the container. After this call, size() returns zero. + * Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated. + */ + void clear() const; + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + iterator insert(iterator pos, const T& value) const; + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + iterator insert(iterator pos, T&& value) const; + + /** + * Inserts a new element into the container directly before pos. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + template + iterator emplace(iterator pos, Args&&... value) const; + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + void push_back(const T& value) const; + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + void push_back(T&& value) const; + + /** + * Appends the given list to the end of the container. Uses at most one memory allocation. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + void append(List lst) const; + + /** + * Appends the given element value to the end of the container. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + template + void emplace_back(Args&&... args) const; + + /** + * Removes the element at pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + iterator erase(iterator pos) const; + + /** + * Removes the elements in the range [first, last). + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + iterator erase(iterator first, iterator last) const; + + /** + * Removes the last element of the container. + * Calling pop_back on an empty container is undefined. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + void pop_back() const; + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional default-inserted elements are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + void resize(size_type count) const; + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional copies of value are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + void resize(size_type count, const T& value) const; + + /** + * Value equality comparison. This function implements Python-like semantics for + * equality: two lists with the same identity (e.g. same pointer) trivially + * compare equal, otherwise each element is compared for equality. + */ + template + friend bool operator==(const List& lhs, const List& rhs); + + template + friend bool operator!=(const List& lhs, const List& rhs); + + /** + * Identity comparison. Returns true if and only if `rhs` represents the same + * List object as `this`. + */ + bool is(const List& rhs) const; + + std::vector vec() const; + + /** + * Returns the number of Lists currently pointing to this same list. + * If this is the only instance pointing to this list, returns 1. + */ + // TODO Test use_count + size_t use_count() const; + + TypePtr elementType() const; + + // See [unsafe set type] for why this exists. + void unsafeSetElementType(TypePtr t); + +private: + explicit List(c10::intrusive_ptr&& elements); + explicit List(const c10::intrusive_ptr& elements); + friend struct IValue; + template friend List impl::toTypedList(List); + template friend List impl::toList(List&&); + template friend List impl::toList(const List&); + friend const IValue* impl::ptr_to_first_element(const List& list); +}; + +namespace impl { +// GenericList is how IValue stores lists. It is, however, not part of the +// public API. Kernels should use Lists with concrete types instead +// (maybe except for some internal prim ops). +using GenericList = List; + +inline const IValue* ptr_to_first_element(const GenericList& list) { + return &list.impl_->list[0]; +} + +} +} + +namespace torch { + template using List = c10::List; +} + +#include // IWYU pragma: keep diff --git a/voice_bridge/torch/include/ATen/core/List_inl.h b/voice_bridge/torch/include/ATen/core/List_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..e8acb89bf3cb586b2807de1ea9a6ec63b2687bd6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/List_inl.h @@ -0,0 +1,352 @@ +#pragma once + +#include +#include + +namespace c10 { + +template decltype(auto) getTypePtr(); +std::string toString(const Type& type); + +template +List::List(c10::intrusive_ptr&& elements) +: impl_(std::move(elements)) {} + +template +List::List(const c10::intrusive_ptr& elements) +: impl_(elements) {} + +template +List::List() +: List(make_intrusive( + typename c10::detail::ListImpl::list_type(), + getTypePtr())) { + static_assert(!std::is_same::value, "This constructor is not valid for List. Please use c10::impl::GenericList(elementType) instead."); +} + +template +List::List(ArrayRef values) +: List(make_intrusive( + typename c10::detail::ListImpl::list_type(), + getTypePtr())) { + static_assert(!std::is_same::value, "This constructor is not valid for List. Please use c10::impl::GenericList(elementType)."); + impl_->list.reserve(values.size()); + for (const T& element : values) { + impl_->list.push_back(element); + } +} + +template +List::List(std::initializer_list initial_values) +: List(ArrayRef(initial_values)) { + static_assert(!std::is_same::value, "This constructor is not valid for List. Please use c10::impl::GenericList(elementType)."); +} + +template +List::List(TypePtr elementType) +: List(make_intrusive( + typename c10::detail::ListImpl::list_type(), + std::move(elementType))) { + static_assert(std::is_same::value || std::is_same>::value, + "This constructor is only valid for c10::impl::GenericList or List."); +} + +namespace impl { +template +List toTypedList(impl::GenericList list) { + // If there's other instances of the list (i.e. list.use_count() > 1), then we have to be invariant + // because upcasting would allow people to add types into the new list that would break the old list. + // However, if there aren't any other instances of this list (i.e. list.use_count() == 1), then we can + // allow upcasting. This can be a perf improvement since we can cast List to List> + // without having to copy it. This is also used to provide backwards compatibility with some old models + // that serialized the index arguments to aten::index, aten::index_put, aten::index_put_ and aten::index_put_impl_ + // as List before we changed that argument to be List>. When deserializing, we + // have list.use_count() == 1 and can deserialize the List directly as List>. + TORCH_CHECK(*list.impl_->elementType == *getTypePtr() + || (list.use_count() == 1 && list.impl_->elementType->isSubtypeOf(*getTypePtr())) + , "Tried to cast a List<", toString(*list.impl_->elementType), "> to a List<", toString(*getTypePtr()), ">. Types mismatch."); + return List(std::move(list.impl_)); +} + +template +impl::GenericList toList(List&& list) { + return GenericList(std::move(list.impl_)); +} +template +impl::GenericList toList(const List& list) { + return GenericList(list.impl_); +} +} + +template +List List::copy() const { + return List(impl_->copy()); +} + +namespace detail { + template + T list_element_to(T element) { + return element; + } + template + T list_element_to(const IValue& element) { + return element.template to(); + } + template + T list_element_to(IValue&& element) { + return std::move(element).template to(); + } + template + struct ListElementFrom { + static IValue from(const T& element) { + return element; + } + static IValue from(T&& element) { + return std::move(element); + } + }; + template<> + struct ListElementFrom { + static const IValue& from(const IValue& element) { + return element; + } + static IValue&& from(IValue&& element) { + return std::move(element); + } + }; +} + +namespace impl { + +template +ListElementReference::operator std::conditional_t< + std::is_reference::type>::value, + const T&, + T>() const { + return iterator_->template to(); +} + +template +ListElementReference& ListElementReference::operator=(T&& new_value) && { + *iterator_ = c10::detail::ListElementFrom::from(std::move(new_value)); + return *this; +} + +template +ListElementReference& ListElementReference::operator=(const T& new_value) && { + *iterator_ = c10::detail::ListElementFrom::from(std::move(new_value)); + return *this; +} + +template +ListElementReference& ListElementReference::operator=(ListElementReference&& rhs) && { + *iterator_ = *rhs.iterator_; + return *this; +} + +template +void swap(ListElementReference&& lhs, ListElementReference&& rhs) { + std::swap(*lhs.iterator_, *rhs.iterator_); +} + +template +bool operator==(const ListElementReference& lhs, const T& rhs) { + T lhs_tmp = lhs; + return lhs_tmp == rhs; +} + +template +inline bool operator==(const T& lhs, const ListElementReference& rhs) { + return rhs == lhs; +} + +template +inline typename ListElementConstReferenceTraits::const_reference +list_element_to_const_ref(const IValue& element) { + return element.template to(); +} + +template<> +inline typename ListElementConstReferenceTraits>::const_reference +list_element_to_const_ref>(const IValue& element) { + return element.toOptionalStringRef(); +} + +} // namespace impl + +template +void List::set(size_type pos, const value_type& value) const { + impl_->list.at(pos) = c10::detail::ListElementFrom::from(value); +} + +template +void List::set(size_type pos, value_type&& value) const { + impl_->list.at(pos) = c10::detail::ListElementFrom::from(std::move(value)); +} + +template +typename List::value_type List::get(size_type pos) const { + return c10::detail::list_element_to(impl_->list.at(pos)); +} + +template +typename List::internal_const_reference_type List::operator[](size_type pos) const { + return c10::impl::list_element_to_const_ref(impl_->list.at(pos)); +} + +template +typename List::internal_reference_type List::operator[](size_type pos) { + static_cast(impl_->list.at(pos)); // Throw the exception if it is out of range. + return {impl_->list.begin() + pos}; +} + +template +typename List::value_type List::extract(size_type pos) const { + auto& elem = impl_->list.at(pos); + auto result = c10::detail::list_element_to(std::move(elem)); + // Reset the list element to a T() instead of None to keep it correctly typed + elem = c10::detail::ListElementFrom::from(T{}); + return result; +} + +template +typename List::iterator List::begin() const { + return iterator(impl_->list.begin()); +} + +template +typename List::iterator List::end() const { + return iterator(impl_->list.end()); +} + +template +bool List::empty() const { + return impl_->list.empty(); +} + +template +typename List::size_type List::size() const { + return impl_->list.size(); +} + +template +void List::reserve(size_type new_cap) const { + impl_->list.reserve(new_cap); +} + +template +void List::clear() const { + impl_->list.clear(); +} + +template +typename List::iterator List::insert(iterator pos, const T& value) const { + return iterator { impl_->list.insert(pos.iterator_, c10::detail::ListElementFrom::from(value)) }; +} + +template +typename List::iterator List::insert(iterator pos, T&& value) const { + return iterator { impl_->list.insert(pos.iterator_, c10::detail::ListElementFrom::from(std::move(value))) }; +} + +template +template +typename List::iterator List::emplace(iterator pos, Args&&... value) const { + // TODO Use list_element_from? + return iterator { impl_->list.emplace(pos.iterator_, std::forward(value)...) }; +} + +template +void List::push_back(const T& value) const { + impl_->list.push_back(c10::detail::ListElementFrom::from(value)); +} + +template +void List::push_back(T&& value) const { + impl_->list.push_back(c10::detail::ListElementFrom::from(std::move(value))); +} + +template +void List::append(List b) const { + if (b.use_count() == 1) { + impl_->list.insert(impl_->list.end(), make_move_iterator(b.impl_->list.begin()), make_move_iterator(b.impl_->list.end())); + } else { + impl_->list.insert(impl_->list.end(), b.impl_->list.begin(), b.impl_->list.end()); + } +} + +template +template +void List::emplace_back(Args&&... args) const { + // TODO Use list_element_from? + impl_->list.push_back(T(std::forward(args)...)); +} + +template +typename List::iterator List::erase(iterator pos) const { + return iterator { impl_->list.erase(pos.iterator_) }; +} + +template +typename List::iterator List::erase(iterator first, iterator last) const { + return iterator { impl_->list.erase(first.iterator_, last.iterator_) }; +} + +template +void List::pop_back() const { + impl_->list.pop_back(); +} + +template +void List::resize(size_type count) const { + impl_->list.resize(count, T{}); +} + +template +void List::resize(size_type count, const T& value) const { + impl_->list.resize(count, value); +} + +template +bool operator==(const List& lhs, const List& rhs) { + // Lists with the same identity trivially compare equal. + if (lhs.impl_ == rhs.impl_) { + return true; + } + + // Otherwise, just compare values directly. + return *lhs.impl_ == *rhs.impl_; +} + +template +bool operator!=(const List& lhs, const List& rhs) { + return !(lhs == rhs); +} + +template +bool List::is(const List& rhs) const { + return this->impl_ == rhs.impl_; +} + +template +std::vector List::vec() const { + std::vector result(begin(), end()); + return result; +} + +template +size_t List::use_count() const { + return impl_.use_count(); +} + +template +TypePtr List::elementType() const { + return impl_->elementType; +} + +template +void List::unsafeSetElementType(TypePtr t) { + impl_->elementType = std::move(t); +} +} diff --git a/voice_bridge/torch/include/ATen/core/MT19937RNGEngine.h b/voice_bridge/torch/include/ATen/core/MT19937RNGEngine.h new file mode 100644 index 0000000000000000000000000000000000000000..68b9c0c7e64c469379fd90a5b9a6c0451045c693 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/MT19937RNGEngine.h @@ -0,0 +1,195 @@ +#pragma once + +#include + +// define constants like M_PI and C keywords for MSVC +#ifdef _MSC_VER +#ifndef _USE_MATH_DEFINES +#define _USE_MATH_DEFINES +#endif +#include +#endif + +#include +#include +#include + +namespace at { + +constexpr int MERSENNE_STATE_N = 624; +constexpr int MERSENNE_STATE_M = 397; +constexpr uint32_t MATRIX_A = 0x9908b0df; +constexpr uint32_t UMASK = 0x80000000; +constexpr uint32_t LMASK = 0x7fffffff; + +/** + * Note [Mt19937 Engine implementation] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Originally implemented in: + * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/MTARCOK/mt19937ar-cok.c + * and modified with C++ constructs. Moreover the state array of the engine + * has been modified to hold 32 bit uints instead of 64 bits. + * + * Note that we reimplemented mt19937 instead of using std::mt19937 because, + * at::mt19937 turns out to be faster in the pytorch codebase. PyTorch builds with -O2 + * by default and following are the benchmark numbers (benchmark code can be found at + * https://github.com/syed-ahmed/benchmark-rngs): + * + * with -O2 + * Time to get 100000000 philox randoms with at::uniform_real_distribution = 0.462759s + * Time to get 100000000 at::mt19937 randoms with at::uniform_real_distribution = 0.39628s + * Time to get 100000000 std::mt19937 randoms with std::uniform_real_distribution = 0.352087s + * Time to get 100000000 std::mt19937 randoms with at::uniform_real_distribution = 0.419454s + * + * std::mt19937 is faster when used in conjunction with std::uniform_real_distribution, + * however we can't use std::uniform_real_distribution because of this bug: + * http://open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#2524. Plus, even if we used + * std::uniform_real_distribution and filtered out the 1's, it is a different algorithm + * than what's in pytorch currently and that messes up the tests in tests_distributions.py. + * The other option, using std::mt19937 with at::uniform_real_distribution is a tad bit slower + * than at::mt19937 with at::uniform_real_distribution and hence, we went with the latter. + * + * Copyright notice: + * A C-program for MT19937, with initialization improved 2002/2/10. + * Coded by Takuji Nishimura and Makoto Matsumoto. + * This is a faster version by taking Shawn Cokus's optimization, + * Matthe Bellew's simplification, Isaku Wada's real version. + * + * Before using, initialize the state by using init_genrand(seed) + * or init_by_array(init_key, key_length). + * + * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Any feedback is very welcome. + * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html + * email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space) + */ + +/** + * mt19937_data_pod is used to get POD data in and out + * of mt19937_engine. Used in torch.get_rng_state and + * torch.set_rng_state functions. + */ +struct mt19937_data_pod { + uint64_t seed_; + int left_; + bool seeded_; + uint32_t next_; + std::array state_; +}; + +class mt19937_engine { +public: + + inline explicit mt19937_engine(uint64_t seed = 5489) { + init_with_uint32(seed); + } + + inline mt19937_data_pod data() const { + return data_; + } + + inline void set_data(mt19937_data_pod data) { + data_ = data; + } + + inline uint64_t seed() const { + return data_.seed_; + } + + inline bool is_valid() { + if ((data_.seeded_ == true) + && (data_.left_ > 0 && data_.left_ <= MERSENNE_STATE_N) + && (data_.next_ <= MERSENNE_STATE_N)) { + return true; + } + return false; + } + + inline uint32_t operator()() { + uint32_t y; + + if (--(data_.left_) == 0) { + next_state(); + } + y = *(data_.state_.data() + data_.next_++); + y ^= (y >> 11); + y ^= (y << 7) & 0x9d2c5680; + y ^= (y << 15) & 0xefc60000; + y ^= (y >> 18); + + return y; + } + +private: + mt19937_data_pod data_; + + inline void init_with_uint32(uint64_t seed) { + data_.seed_ = seed; + data_.seeded_ = true; + data_.state_[0] = seed & 0xffffffff; + for (const auto j : c10::irange(1, MERSENNE_STATE_N)) { + data_.state_[j] = (1812433253 * (data_.state_[j-1] ^ (data_.state_[j-1] >> 30)) + j); + } + data_.left_ = 1; + data_.next_ = 0; + } + + inline uint32_t mix_bits(uint32_t u, uint32_t v) { + return (u & UMASK) | (v & LMASK); + } + + inline uint32_t twist(uint32_t u, uint32_t v) { + return (mix_bits(u,v) >> 1) ^ (v & 1 ? MATRIX_A : 0); + } + + inline void next_state() { + uint32_t* p = data_.state_.data(); + data_.left_ = MERSENNE_STATE_N; + data_.next_ = 0; + + for(int j = MERSENNE_STATE_N - MERSENNE_STATE_M + 1; --j; p++) { + *p = p[MERSENNE_STATE_M] ^ twist(p[0], p[1]); + } + + for(int j = MERSENNE_STATE_M; --j; p++) { + *p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], p[1]); + } + + *p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], data_.state_[0]); + } + +}; + +typedef mt19937_engine mt19937; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/NamedTensor.h b/voice_bridge/torch/include/ATen/core/NamedTensor.h new file mode 100644 index 0000000000000000000000000000000000000000..73a0d7d02551b40b37a4632034c9d555e3dac088 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/NamedTensor.h @@ -0,0 +1,140 @@ +#pragma once + +#include +#include +#include + +namespace at { + +class TensorBase; + +// XXX: This file exists because TensorImpl is in c10, but Dimname is in ATen. +// Due to the c10/ATen library split, TensorImpl cannot depend on Dimname, +// so we have a couple of workarounds. +// +// In the long term, we'll move Dimname to c10 and everything in this file +// can be refactored out. The main blocker for that is that "c10::Symbol" +// actually exists outside of c10 and needs to be moved in. + +// TensorImpl has a unique_ptr field. +// XXX: Ideally we would just put optional> into TensorImpl. +// +// This class has an important invariant: there must be at least ONE +// non-wildcard +struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface { + // This enum is to remind people that the invariant on constructors is that + // the list of dimnames must have at least one non-wildcard + enum HAS_NON_WILDCARD { + HasNonWildcard + }; + + explicit NamedTensorMeta(HAS_NON_WILDCARD, DimnameList names) + : names_(names.vec()) { + check_invariants(); + } + explicit NamedTensorMeta(HAS_NON_WILDCARD, std::vector&& names) + : names_(std::move(names)) { + check_invariants(); + } + + std::unique_ptr clone() const override { + return std::make_unique(HasNonWildcard, names_); + } + + DimnameList names() const { return names_; } + + // Used for an assertion in TensorImpl.h + int64_t slow_dim() const override { + return names_.size(); + } + + void check_invariants() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + std::any_of(names_.begin(), names_.end(), [](const Dimname& n) { return !n.isWildcard(); })); + } + + void set_names(HAS_NON_WILDCARD, DimnameList new_names) { + TORCH_INTERNAL_ASSERT(new_names.size() == names_.size()); + std::copy(new_names.begin(), new_names.end(), names_.begin()); + check_invariants(); + } + + void set_names(HAS_NON_WILDCARD, std::vector&& new_names) { + TORCH_INTERNAL_ASSERT(new_names.size() == names_.size()); + names_ = std::move(new_names); + check_invariants(); + } + + // INVARIANT: at least one Dimname is non-WILDCARD + std::vector names_; +}; + +// When NamesMode is disabled, then all operations ignore tensors' names fields. +// Concretely speaking, all tensors are treated as having nullopt names. +struct TORCH_API NamesMode { + static bool is_enabled(); + static void set_enabled(bool enabled); +}; + + +// A RAII, thread local (!) guard that enables or disables names upon +// construction, and sets it back to the original value upon destruction. +struct TORCH_API NoNamesGuard { + NoNamesGuard() : prev_mode(NamesMode::is_enabled()), initialized(true) { + NamesMode::set_enabled(false); + } + ~NoNamesGuard() { + if (initialized) { + reset(); + } + } + void reset() { + TORCH_INTERNAL_ASSERT(initialized); + NamesMode::set_enabled(prev_mode); + } + private: + bool prev_mode; + bool initialized; +}; + +void check_names_valid_for(const TensorBase& tensor, DimnameList names); +void check_names_valid_for(size_t tensor_dim, DimnameList names); + +// Sets the names of `tensor` to be `names`. +TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, c10::optional names); +TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::vector&& names, bool validate_names); + +constexpr size_t kMaxNamedTensorDim = 64; + +DimnameList default_names(size_t len); + +namespace impl { + +// Some helper functions on TensorImpl. Useful for working with names in TH. +// XXX: Ideally these would exist as methods on TensorImpl +TORCH_API void internal_set_names_inplace(TensorImpl* impl, c10::optional names, bool validate_names); +TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector&& names, bool validate_names); + +void check_names_valid_for(TensorImpl* impl, DimnameList names); + +// Returns true if the tensor's names exist and are not all 'None'. +// Returns false if the tensor's names don't exist (were not allocated), +// or if all names are 'None'. +// We treat not-allocated-names the same as allocated names that are all 'None'. +TORCH_API bool has_names(const TensorImpl* impl); + +// Returns the names of the tensor's dimensions. +// Unnamed tensors are treated as having 'None' in all dimension; this method +// would return a DimnameList of all 'None's for an unnamed tensor. +TORCH_API DimnameList get_names(const TensorImpl* impl); + +// This is more of an implementation detail; one should use impl::get_names / +// Tensor::names() whenever possible because it provides a cleaner API. +// Returns the names of the tensor if they have been allocated; returns nullopt +// instead if the haven't been. The names of a tensor are not allocated if a +// tensor is constructed with names=None. +TORCH_API c10::optional get_opt_names(const TensorImpl* impl); + +} // namespace impl + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/PhiloxRNGEngine.h b/voice_bridge/torch/include/ATen/core/PhiloxRNGEngine.h new file mode 100644 index 0000000000000000000000000000000000000000..c6536d29e7982751ee4641dd118d210fe0bf9fec --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/PhiloxRNGEngine.h @@ -0,0 +1,237 @@ +#pragma once + +// define constants like M_PI and C keywords for MSVC +#ifdef _MSC_VER +#define _USE_MATH_DEFINES +#include +#endif + +#include + +#ifdef __CUDACC__ +#include +#endif + +#include +#include +#include +#include +#include + +namespace at { + +// typedefs for holding vector data +namespace detail { + +typedef at::detail::Array UINT4; +typedef at::detail::Array UINT2; +typedef at::detail::Array DOUBLE2; +typedef at::detail::Array FLOAT2; + +} // namespace detail + +/** + * Note [Philox Engine implementation] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Originally implemented in PyTorch's fusion compiler + * Refer to: http://www.thesalmons.org/john/random123/papers/random123sc11.pdf + * for details regarding the engine. + * + * Note that currently this implementation of the philox engine is not used + * anywhere except for tests in cpu_generator_test.cpp. However, this engine + * will replace curandStatePhilox4_32_10_t in the future. + * + * The philox engine takes a seed value, a subsequeunce + * for starting the generation and an offset for the subsequence. + * Think of this engine as an algorithm producing a huge array. We are + * parallelizing this array by partitioning the huge array and assigning + * a thread index to each partition. In other words, each seed value + * (there are 2^64 possible seed values) gives a sub array of size + * 2^128 (each element in that array is a 128 bit number). Reasoning + * behind the array being of size 2^128 is, there are 2^64 possible + * thread index value and there is an array of size 2^64 for each of + * those thread index. Hence 2^64 * 2^64 = 2^128 for each seed value. + * + * In short, this generator can produce 2^64 (seed values) * 2^128 (number + * of elements in an array given by a seed value) = 2^192 values. + * + * Arguments: + * seed: Seed values could be any number from 0 to 2^64-1. + * subsequence: Subsequence is just the cuda thread indexing with: + * - blockIdx.x * blockDim.x + threadIdx.x + * offset: The offset variable in PhiloxEngine decides how many 128-bit + * random numbers to skip (i.e. how many groups of 4, 32-bit numbers to skip) + * and hence really decides the total number of randoms that can be achieved + * for the given subsequence. + */ + +class philox_engine { +public: + + C10_HOST_DEVICE inline explicit philox_engine(uint64_t seed = 67280421310721, + uint64_t subsequence = 0, + uint64_t offset = 0) { + + reset_state(seed, subsequence); + incr_n(offset); + } + + C10_HOST_DEVICE inline void reset_state(uint64_t seed = 67280421310721, + uint64_t subsequence = 0) { + key_[0] = static_cast(seed); + key_[1] = static_cast(seed >> 32); + counter_ = detail::UINT4(0); + counter_[2] = static_cast(subsequence); + counter_[3] = static_cast(subsequence >> 32); + STATE = 0; + } + + /** + * Produces a unique 32-bit pseudo random number on every invocation. Bookeeps state to avoid waste. + */ + C10_HOST_DEVICE inline uint32_t operator()(int32_t n_rounds = 10) { // 10 here to preserve back-compat behavior + if(STATE == 0) { + detail::UINT4 counter = counter_; + detail::UINT2 key = key_; + output_ = rand(counter, key, n_rounds); + incr(); + } + uint32_t ret = output_[STATE]; + STATE = (STATE + 1) & 3; + return ret; + } + + inline float randn(uint32_t n_rounds) { + #ifdef __CUDA_ARCH__ + AT_ASSERT(false, "Unsupported invocation of randn on CUDA"); + #endif + reset_state(); // Reset state for randn - a little wasteful, but easier to ensure correctness. + detail::UINT4 counter = counter_; + detail::UINT2 key = key_; + detail::UINT4 i = rand(counter, key, n_rounds); + detail::FLOAT2 prenorm; + prenorm[0] = 1 - uint32_to_uniform_float(i[0]); // uint32_to_uniform_float returns [0,1), we need (0,1] to avoid passing 0 to log. + prenorm[1] = 1 - uint32_to_uniform_float(i[1]); + detail::FLOAT2 ret = normalize_pair_uniform(prenorm); + return ret[0]; + } + + /** + * Function that Skips N 128 bit numbers in a subsequence + */ + C10_HOST_DEVICE inline void incr_n(uint64_t n) { + uint32_t nlo = static_cast(n); + uint32_t nhi = static_cast(n >> 32); + counter_[0] += nlo; + // if overflow in x has occurred, carry over to nhi + if (counter_[0] < nlo) { + nhi++; + // if overflow in nhi has occurred during carry over, + // propagate that overflow to y and exit to increment z + // otherwise return + counter_[1] += nhi; + if(nhi != 0) { + if (nhi <= counter_[1]) { + return; + } + } + } else { + // if overflow in y has occurred during addition, + // exit to increment z + // otherwise return + counter_[1] += nhi; + if (nhi <= counter_[1]) { + return; + } + } + if (++counter_[2]) + return; + ++counter_[3]; + } + + /** + * Function that Skips one 128 bit number in a subsequence + */ + C10_HOST_DEVICE inline void incr() { + if (++counter_[0]) + return; + if (++counter_[1]) + return; + if (++counter_[2]) { + return; + } + ++counter_[3]; + } + +private: + detail::UINT4 counter_; + detail::UINT4 output_; + detail::UINT2 key_; + uint32_t STATE; + + C10_HOST_DEVICE inline uint32_t mulhilo32(uint32_t a, uint32_t b, + uint32_t *result_high) { + #ifdef __CUDA_ARCH__ + *result_high = __umulhi(a, b); + return a*b; + #else + const uint64_t product = static_cast(a) * b; + *result_high = static_cast(product >> 32); + return static_cast(product); + #endif + } + + C10_HOST_DEVICE inline detail::UINT4 single_round(detail::UINT4 ctr, detail::UINT2 in_key) { + uint32_t hi0; + uint32_t hi1; + uint32_t lo0 = mulhilo32(kPhiloxSA, ctr[0], &hi0); + uint32_t lo1 = mulhilo32(kPhiloxSB, ctr[2], &hi1); + detail::UINT4 ret; + ret[0] = hi1 ^ ctr[1] ^ in_key[0]; + ret[1] = lo1; + ret[2] = hi0 ^ ctr[3] ^ in_key[1]; + ret[3] = lo0; + return ret; + } + + C10_HOST_DEVICE constexpr float uint32_to_uniform_float(uint32_t value) { + // maximum value such that `MAX_INT * scale < 1.0` (with float rounding) + constexpr float scale = 4.6566127342e-10; + return static_cast(value & 0x7FFFFFFF) * scale; + } + + + + C10_HOST_DEVICE inline detail::UINT4 rand(detail::UINT4& counter, detail::UINT2& key, uint32_t n_rounds) { + for (uint32_t round = 0; round < (n_rounds - 1); round++) { + counter = single_round(counter, key); + key[0] += (kPhilox10A); key[1] += (kPhilox10B); + } + return single_round(counter, key); + } + + inline detail::FLOAT2 normalize_pair_uniform(detail::FLOAT2 in) { + // TODO(voz) We use std:: below, and thus need a separate impl for CUDA. + float u1 = in[0]; + + constexpr float two_pi = 2.0 * M_PI; + + float mag = std::sqrt(-2.0 * std::log(u1)); + + detail::FLOAT2 ret; + + ret[0] = mag * std::cos(two_pi); + ret[1] = mag * std::sin(two_pi); + return ret; + } + + + static const uint32_t kPhilox10A = 0x9E3779B9; + static const uint32_t kPhilox10B = 0xBB67AE85; + static const uint32_t kPhiloxSA = 0xD2511F53; + static const uint32_t kPhiloxSB = 0xCD9E8D57; +}; + +typedef philox_engine Philox4_32; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/PythonFallbackKernel.h b/voice_bridge/torch/include/ATen/core/PythonFallbackKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..f38bdd2ada90a99a231df4193a72c8bd44e699d9 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/PythonFallbackKernel.h @@ -0,0 +1,28 @@ +#pragma once +#include + +namespace at { +namespace impl { + +struct TORCH_API RestorePythonTLSSnapshot { + RestorePythonTLSSnapshot(); + ~RestorePythonTLSSnapshot(); + +private: + c10::impl::LocalDispatchKeySet saved_; + c10::impl::ForceDispatchKeyGuard guard_; +}; + + +// RAII guard to make working with the above TLS safer. +struct TORCH_API MaybeSetTLSOnEntryGuard { +public: + MaybeSetTLSOnEntryGuard(); + ~MaybeSetTLSOnEntryGuard(); + +private: + bool value_set_; +}; + +} // namespace impl +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/QuantizerBase.h b/voice_bridge/torch/include/ATen/core/QuantizerBase.h new file mode 100644 index 0000000000000000000000000000000000000000..922ea8a38f50d02749a336f7ef8211b2da502066 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/QuantizerBase.h @@ -0,0 +1,83 @@ +#pragma once + +#include +#include +#include + +namespace at { + +class Tensor; +struct QTensorImpl; +struct Quantizer; +using ConstQuantizerPtr = const c10::intrusive_ptr&; +using QuantizerPtr = c10::intrusive_ptr; + +/** + * Quantizer is the class for storing all the information + * that's necessary to perform quantize and dequantize + * operation. + * + * We might have different types of quantization schemes and this is + * the base class for all quantizers. + * + * QTensorImpl will hold a pointer to Quantizer so that we can support + * different quantization schemes on Tensor. + * + * For example, the most common quantization scheme, Affine Quantization, + * requires scale and zero_point as parameters, we'll store scale and zero_point + * inside the instance and we can use it to quantize a float Tensor or + * dequantize a quantized Tensor. + * + * When you add new types of leaf Quantizer class, please also + * make sure to add a corresponding QScheme enum since + * they should have one to one mapping. + * + * Note about intrusive_ptr: + * Quantized Tensor holds an intrusive_ptr to Quantizer, and multiple Tensor can + * share the same Quantizer. Quantizer should be immutable. + */ +struct TORCH_API Quantizer : public c10::intrusive_ptr_target { + const ScalarType scalar_type_; + explicit Quantizer(ScalarType scalar_type) : scalar_type_(scalar_type) {} + virtual ~Quantizer(); + + // Copied from torch/csrc/jit/ir/scope.h + QuantizerPtr intrusive_from_this() { + c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer + // from a raw `this` pointer + // so we need to bump the refcount + // to account for this ownership + return c10::intrusive_ptr::reclaim(this); + } + + /** + * Each concrete Quantizer type should have a unique QScheme type. + */ + virtual QScheme qscheme() const = 0; + + ScalarType scalar_type() const { + return scalar_type_; + } + + /** + * quantize a float Tensor into a quantized Tensor. + */ + virtual Tensor quantize(const Tensor& t) = 0; + + /** + * dequantize a quantized Tensor into a float Tensor. + */ + virtual Tensor dequantize(const Tensor& t) = 0; + + /** + * dequantize a quantized Tensor into a float Tensor, out= variant + */ + virtual Tensor& dequantize_out(Tensor& out, const Tensor& t) = 0; + + /** + * Compare against `other` for equality. + */ + virtual bool equalTo(QuantizerPtr other) const = 0; +}; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/Range.h b/voice_bridge/torch/include/ATen/core/Range.h new file mode 100644 index 0000000000000000000000000000000000000000..2bf6b2b73ac4d4c178ac0388e9b45e262e506b86 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Range.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +namespace at { + +struct Range { + Range(int64_t begin, int64_t end) + : begin(begin) + , end(end) {} + + int64_t size() const { return end - begin; } + + Range operator/(int64_t divisor) { + return Range(begin / divisor, end / divisor); + } + + int64_t begin; + int64_t end; +}; + +std::ostream& operator<<(std::ostream& out, const Range& range); + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/Reduction.h b/voice_bridge/torch/include/ATen/core/Reduction.h new file mode 100644 index 0000000000000000000000000000000000000000..23c6ea3cabefb2ee72f4aea61af4b10f4e769975 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Reduction.h @@ -0,0 +1,16 @@ +#pragma once + +namespace at { +namespace Reduction { + +// NB: Keep this in sync with Reduction class in torch/nn/_reduction.py +// These constants control the reduction behavior of loss functions. +// Ideally, this would be a scoped enum, but jit doesn't support that +enum Reduction { + None, // Do not reduce + Mean, // (Possibly weighted) mean of losses + Sum, // Sum losses + END +}; +} // namespace Reduction +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/Scalar.h b/voice_bridge/torch/include/ATen/core/Scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..a14b48f0120cbfc23c45db14dd363b0c88c59a2c --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Scalar.h @@ -0,0 +1 @@ +#include diff --git a/voice_bridge/torch/include/ATen/core/ScalarType.h b/voice_bridge/torch/include/ATen/core/ScalarType.h new file mode 100644 index 0000000000000000000000000000000000000000..eb30ee86f737a3544e727c42f72147fdb64a5e3b --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/ScalarType.h @@ -0,0 +1 @@ +#include diff --git a/voice_bridge/torch/include/ATen/core/Tensor.h b/voice_bridge/torch/include/ATen/core/Tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..5293928a827df7afcb9c3ff6c974a244bdb63bd8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Tensor.h @@ -0,0 +1,74 @@ +#pragma once + +#include +#include + +namespace at { +class TORCH_API OptionalTensorRef { + public: + OptionalTensorRef() = default; + + ~OptionalTensorRef() { + ref_.unsafeReleaseTensorImpl(); + } + + OptionalTensorRef(const TensorBase& src) + : ref_(Tensor::unsafe_borrow_t{}, src) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(src.defined()); + } + + OptionalTensorRef(const OptionalTensorRef& rhs) + : ref_(Tensor::unsafe_borrow_t{}, rhs.ref_) {} + + OptionalTensorRef& operator=(OptionalTensorRef rhs) { + std::swap(ref_, rhs.ref_); + return *this; + } + + bool has_value() const { + return ref_.defined(); + } + + const Tensor& getTensorRef() const & { + return ref_; + } + + const Tensor& operator*() const & { + return ref_; + } + + const Tensor* operator->() const & { + return &ref_; + } + + operator bool() const { + return ref_.defined(); + } + + private: + Tensor ref_; +}; + +template +auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_void_t { + // Return the grad argument in case of a hook with void return type to have an + // std::function with Tensor return type + static_assert(std::is_same::value, + "Expected hook to return void"); + return _register_hook([fn=std::forward(hook)](const TensorBase& grad_base) { + OptionalTensorRef grad(grad_base); + fn(*grad); + return Tensor(); + }); +} + +template +auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_var_t { + return _register_hook([fn=std::forward(hook)](const TensorBase& grad_base) { + OptionalTensorRef grad(grad_base); + Tensor ret = fn(*grad); + return TensorBase(std::move(ret)); + }); +} + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/TensorAccessor.h b/voice_bridge/torch/include/ATen/core/TensorAccessor.h new file mode 100644 index 0000000000000000000000000000000000000000..fea6c09f262fe945d762fb700812db83a782f97b --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/TensorAccessor.h @@ -0,0 +1,272 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +// The PtrTraits argument to the TensorAccessor/GenericPackedTensorAccessor +// is used to enable the __restrict__ keyword/modifier for the data +// passed to cuda. +template +struct DefaultPtrTraits { + typedef T* PtrType; +}; + +#if defined(__CUDACC__) || defined(__HIPCC__) +template +struct RestrictPtrTraits { + typedef T* __restrict__ PtrType; +}; +#endif + +// TensorAccessorBase and TensorAccessor are used for both CPU and CUDA tensors. +// For CUDA tensors it is used in device code (only). This means that we restrict ourselves +// to functions and types available there (e.g. IntArrayRef isn't). + +// The PtrTraits argument is only relevant to cuda to support `__restrict__` pointers. +template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> +class TensorAccessorBase { +public: + typedef typename PtrTraits::PtrType PtrType; + + C10_HOST_DEVICE TensorAccessorBase( + PtrType data_, + const index_t* sizes_, + const index_t* strides_) + : data_(data_), sizes_(sizes_), strides_(strides_) {} + C10_HOST IntArrayRef sizes() const { + return IntArrayRef(sizes_,N); + } + C10_HOST IntArrayRef strides() const { + return IntArrayRef(strides_,N); + } + C10_HOST_DEVICE index_t stride(index_t i) const { + return strides_[i]; + } + C10_HOST_DEVICE index_t size(index_t i) const { + return sizes_[i]; + } + C10_HOST_DEVICE PtrType data() { + return data_; + } + C10_HOST_DEVICE const PtrType data() const { + return data_; + } +protected: + PtrType data_; + const index_t* sizes_; + const index_t* strides_; +}; + +// The `TensorAccessor` is typically instantiated for CPU `Tensor`s using +// `Tensor.accessor()`. +// For CUDA `Tensor`s, `GenericPackedTensorAccessor` is used on the host and only +// indexing on the device uses `TensorAccessor`s. +template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> +class TensorAccessor : public TensorAccessorBase { +public: + typedef typename PtrTraits::PtrType PtrType; + + C10_HOST_DEVICE TensorAccessor( + PtrType data_, + const index_t* sizes_, + const index_t* strides_) + : TensorAccessorBase(data_,sizes_,strides_) {} + + C10_HOST_DEVICE TensorAccessor operator[](index_t i) { + return TensorAccessor(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1); + } + + C10_HOST_DEVICE const TensorAccessor operator[](index_t i) const { + return TensorAccessor(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1); + } +}; + +template class PtrTraits, typename index_t> +class TensorAccessor : public TensorAccessorBase { +public: + typedef typename PtrTraits::PtrType PtrType; + + C10_HOST_DEVICE TensorAccessor( + PtrType data_, + const index_t* sizes_, + const index_t* strides_) + : TensorAccessorBase(data_,sizes_,strides_) {} + C10_HOST_DEVICE T & operator[](index_t i) { + // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) + return this->data_[this->strides_[0]*i]; + } + C10_HOST_DEVICE const T & operator[](index_t i) const { + return this->data_[this->strides_[0]*i]; + } +}; + + +// GenericPackedTensorAccessorBase and GenericPackedTensorAccessor are used on for CUDA `Tensor`s on the host +// and as +// In contrast to `TensorAccessor`s, they copy the strides and sizes on instantiation (on the host) +// in order to transfer them on the device when calling kernels. +// On the device, indexing of multidimensional tensors gives to `TensorAccessor`s. +// Use RestrictPtrTraits as PtrTraits if you want the tensor's data pointer to be marked as __restrict__. +// Instantiation from data, sizes, strides is only needed on the host and std::copy isn't available +// on the device, so those functions are host only. +template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> +class GenericPackedTensorAccessorBase { +public: + typedef typename PtrTraits::PtrType PtrType; + C10_HOST GenericPackedTensorAccessorBase( + PtrType data_, + const index_t* sizes_, + const index_t* strides_) + : data_(data_) { + std::copy(sizes_, sizes_ + N, std::begin(this->sizes_)); + std::copy(strides_, strides_ + N, std::begin(this->strides_)); + } + + // if index_t is not int64_t, we want to have an int64_t constructor + template ::value>::type> + C10_HOST GenericPackedTensorAccessorBase( + PtrType data_, + const source_index_t* sizes_, + const source_index_t* strides_) + : data_(data_) { + for (const auto i : c10::irange(N)) { + this->sizes_[i] = sizes_[i]; + this->strides_[i] = strides_[i]; + } + } + + C10_HOST_DEVICE index_t stride(index_t i) const { + return strides_[i]; + } + C10_HOST_DEVICE index_t size(index_t i) const { + return sizes_[i]; + } + C10_HOST_DEVICE PtrType data() { + return data_; + } + C10_HOST_DEVICE const PtrType data() const { + return data_; + } +protected: + PtrType data_; + index_t sizes_[N]; + index_t strides_[N]; + C10_HOST void bounds_check_(index_t i) const { + TORCH_CHECK_INDEX( + 0 <= i && i < index_t{N}, + "Index ", + i, + " is not within bounds of a tensor of dimension ", + N); + } +}; + +template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> +class GenericPackedTensorAccessor : public GenericPackedTensorAccessorBase { +public: + typedef typename PtrTraits::PtrType PtrType; + + C10_HOST GenericPackedTensorAccessor( + PtrType data_, + const index_t* sizes_, + const index_t* strides_) + : GenericPackedTensorAccessorBase(data_, sizes_, strides_) {} + + // if index_t is not int64_t, we want to have an int64_t constructor + template ::value>::type> + C10_HOST GenericPackedTensorAccessor( + PtrType data_, + const source_index_t* sizes_, + const source_index_t* strides_) + : GenericPackedTensorAccessorBase(data_, sizes_, strides_) {} + + C10_DEVICE TensorAccessor operator[](index_t i) { + index_t* new_sizes = this->sizes_ + 1; + index_t* new_strides = this->strides_ + 1; + return TensorAccessor(this->data_ + this->strides_[0]*i, new_sizes, new_strides); + } + + C10_DEVICE const TensorAccessor operator[](index_t i) const { + const index_t* new_sizes = this->sizes_ + 1; + const index_t* new_strides = this->strides_ + 1; + return TensorAccessor(this->data_ + this->strides_[0]*i, new_sizes, new_strides); + } + + /// Returns a PackedTensorAccessor of the same dimension after transposing the + /// two dimensions given. Does not actually move elements; transposition is + /// made by permuting the size/stride arrays. If the dimensions are not valid, + /// asserts. + C10_HOST GenericPackedTensorAccessor transpose( + index_t dim1, + index_t dim2) const { + this->bounds_check_(dim1); + this->bounds_check_(dim2); + GenericPackedTensorAccessor result( + this->data_, this->sizes_, this->strides_); + std::swap(result.strides_[dim1], result.strides_[dim2]); + std::swap(result.sizes_[dim1], result.sizes_[dim2]); + return result; + } +}; + +template class PtrTraits, typename index_t> +class GenericPackedTensorAccessor : public GenericPackedTensorAccessorBase { +public: + typedef typename PtrTraits::PtrType PtrType; + C10_HOST GenericPackedTensorAccessor( + PtrType data_, + const index_t* sizes_, + const index_t* strides_) + : GenericPackedTensorAccessorBase(data_, sizes_, strides_) {} + + // if index_t is not int64_t, we want to have an int64_t constructor + template ::value>::type> + C10_HOST GenericPackedTensorAccessor( + PtrType data_, + const source_index_t* sizes_, + const source_index_t* strides_) + : GenericPackedTensorAccessorBase(data_, sizes_, strides_) {} + + C10_DEVICE T & operator[](index_t i) { + return this->data_[this->strides_[0] * i]; + } + C10_DEVICE const T& operator[](index_t i) const { + return this->data_[this->strides_[0]*i]; + } + + // Same as in the general N-dimensional case, but note that in the + // 1-dimensional case the returned PackedTensorAccessor will always be an + // identical copy of the original + C10_HOST GenericPackedTensorAccessor transpose( + index_t dim1, + index_t dim2) const { + this->bounds_check_(dim1); + this->bounds_check_(dim2); + return GenericPackedTensorAccessor( + this->data_, this->sizes_, this->strides_); + } +}; + + +// Can't put this directly into the macro function args because of commas +#define AT_X GenericPackedTensorAccessor + +// Old name for `GenericPackedTensorAccessor` +template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> +C10_DEFINE_DEPRECATED_USING(PackedTensorAccessor, AT_X) + +#undef AT_X + +template class PtrTraits = DefaultPtrTraits> +using PackedTensorAccessor32 = GenericPackedTensorAccessor; + +template class PtrTraits = DefaultPtrTraits> +using PackedTensorAccessor64 = GenericPackedTensorAccessor; +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/TensorBase.h b/voice_bridge/torch/include/ATen/core/TensorBase.h new file mode 100644 index 0000000000000000000000000000000000000000..8ec5670664af2651725ce801f6f7b18a0b885e6e --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/TensorBase.h @@ -0,0 +1,945 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace c10 { +class Scalar; +} + +namespace torch { namespace autograd { + +struct Node; + +}} // namespace torch::autograd + +namespace at { + +class Tensor; +class TensorBase; + +// Convert Tensor to TensorBase without any need to include Tensor.h +TORCH_API const TensorBase& get_tensor_base(const Tensor& t); + +namespace impl { +inline bool variable_excluded_from_dispatch() { +#ifdef C10_MOBILE + // Please read the comment in `VariableFallbackKernel.cpp` about the background of this change. + return true; +#else + return c10::impl::tls_local_dispatch_key_set().excluded_.isSupersetOf(c10::autograd_dispatch_keyset); +#endif +} + +} + +// NOTE: [Tensor vs. TensorBase] +// +// Tensor, being the central data structure in PyTorch, gets used and +// it's header included almost everywhere. Unfortunately this means +// every time an operator signature is updated or changed in +// native_functions.yaml, you (and every other PyTorch developer) need +// to recompile all of ATen and it's dependencies. +// +// TensorBase aims to break up these header dependencies, and improve +// incremental build times for all PyTorch developers. TensorBase +// represents a reference counted handle to TensorImpl, exactly the +// same as Tensor. However, TensorBase doesn't have code generated +// methods in it's API and thus no dependence on native_functions.yaml. +// +// Usage tips +// ---------- +// - You can `#define TORCH_ASSERT_NO_OPERATORS` at the top of a .cpp +// or .cu file to ensure it has no header dependencies on +// native_functions.yaml (direct or indirect). +// - Tensor inherits from TensorBase, so functions taking +// `const TensorBase &` are callable with Tensor as well. +// - TensorBase can be converted to tensor with `Tensor(tensor_base)`, +// but this requires a reference-count bump. OptionalTensorRef on +// the other hand can materialize a `const Tensor &` without +// touching the reference-count. +class TORCH_API TensorBase { + public: + struct unsafe_borrow_t { explicit unsafe_borrow_t() = default; }; + + protected: + // Create a Tensor with a +0 reference count. Special care must be + // taken to avoid decrementing this reference count at destruction + // time. Intended to support MaybeOwnedTraits. + explicit TensorBase(unsafe_borrow_t, const TensorBase& rhs) + : impl_(c10::intrusive_ptr::reclaim(rhs.impl_.get())) {} + friend MaybeOwnedTraits; + + public: + TensorBase() = default; + // This constructor should not be used by end users and is an implementation + // detail invoked by autogenerated code. + explicit TensorBase( + c10::intrusive_ptr tensor_impl) + : impl_(std::move(tensor_impl)) { + if (impl_.get() == nullptr) { + throw std::runtime_error("TensorImpl with nullptr is not supported"); + } + } + TensorBase(const TensorBase&) = default; + TensorBase(TensorBase&&) = default; + + public: + // Creates a new wrapper from TensorImpl. Intentionally a free method because + // it should be used with care. Checks necessary invariants + static TensorBase wrap_tensor_impl( + c10::intrusive_ptr tensor_impl) { + TensorBase r(std::move(tensor_impl)); + r.enforce_invariants(); + return r; + } + + int64_t dim() const { + return impl_->dim(); + } + int64_t storage_offset() const { + return impl_->storage_offset(); + } + + TensorBase contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const { + if (is_contiguous(memory_format)) { + return *this; + } else { + return __dispatch_contiguous(memory_format); + } + } + + /// Should be used if *this can reasonably be expected to be contiguous and + /// performance is important. + /// Compared to contiguous, it saves a reference count + /// increment/decrement if *this is already contiguous, at the cost + /// in all cases of an extra pointer of stack usage, an extra branch + /// to access, and an extra branch at destruction time. + c10::MaybeOwned expect_contiguous( + MemoryFormat memory_format=MemoryFormat::Contiguous) const &; + + // Use .contiguous() instead. Trying to borrow from a prvalue + // will only lead to trouble and dangling references. + c10::MaybeOwned expect_contiguous( + MemoryFormat memory_format=MemoryFormat::Contiguous) && = delete; + + const TensorBase& fill_(const c10::Scalar& scalar) const; + const TensorBase& zero_() const; + + TensorBase to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) const; + + bool is_complex() const { + return at::isComplexType(this->scalar_type()); + } + + bool is_floating_point() const { + return at::isFloatingType(this->scalar_type()); + } + + bool is_signed() const { + return at::isSignedType(this->scalar_type()); + } + + c10::SymInt sym_size(int64_t dim) const { + return impl_->sym_size(dim); + } + + c10::SymInt sym_stride(int64_t dim) const { + const auto sizes = this->sym_strides(); + const auto ndim = static_cast(sizes.size()); + // false is passed to maybe_wrap_dim so behavior is identical to array access (but with wrapping) + return sizes[c10::maybe_wrap_dim(dim, ndim, /*wrap_scalar=*/false)]; + + } + + int64_t size(int64_t dim) const { + return impl_->size(dim); + } + + int64_t stride(int64_t dim) const { + const auto strides = this->strides(); + const auto ndim = static_cast(strides.size()); + // false is passed to maybe_wrap_dim so behavior is identical to array access (but with wrapping) + return strides[c10::maybe_wrap_dim(dim, ndim, /*wrap_scalar=*/false)]; + } + + TensorImpl * unsafeGetTensorImpl() const { + return impl_.get(); + } + TensorImpl * unsafeReleaseTensorImpl() { + return impl_.release(); + } + const c10::intrusive_ptr& getIntrusivePtr() const { + return impl_; + } + + c10::intrusive_ptr unsafeReleaseIntrusivePtr() { + return std::move(impl_); + } + + bool defined() const { + return impl_; + } + + void reset() { + impl_.reset(); + } + + TensorBase& operator=(const TensorBase& x) & { + impl_ = x.impl_; + return *this; + }; + TensorBase& operator=(TensorBase&& x) & { + impl_ = std::move(x.impl_); + return *this; + } + + // Ban assignment to rvalues, since at::Tensor (weirdly) performs a deep copy here + TensorBase& operator=(const TensorBase&) && = delete; + TensorBase& operator=(TensorBase&&) && = delete; + + bool is_same(const TensorBase& other) const noexcept { + return impl_ == other.impl_; + } + size_t use_count() const noexcept { + return impl_.use_count(); + } + size_t weak_use_count() const noexcept { + return impl_.weak_use_count(); + } + + std::string toString() const; + + IntArrayRef sizes() const { + return impl_->sizes(); + } + c10::SymIntArrayRef sym_sizes() const { + return impl_->sym_sizes(); + } + c10::SymIntArrayRef sym_strides() const { + return impl_->sym_strides(); + } + IntArrayRef strides() const { + return impl_->strides(); + } + // See impl::get_opt_names in ATen/NamedTensor.h for docs. + c10::optional opt_names() const { + return impl::get_opt_names(unsafeGetTensorImpl()); + } + // See impl::get_names in ATen/NamedTensor.h for docs. + DimnameList names() const { + return impl::get_names(unsafeGetTensorImpl()); + } + int64_t ndimension() const { + return dim(); + } + + bool is_contiguous(at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const { + return impl_->is_contiguous(memory_format); + } + + bool is_non_overlapping_and_dense() const { + return impl_->is_non_overlapping_and_dense(); + } + + at::MemoryFormat suggest_memory_format( + bool channels_last_strides_exact_match = false) const { + // Setting channels_last_strides_exact_match to true forces function to + // check 0,1 - sized dimension strides. + if (layout() == at::kStrided) { + if (impl_->is_strides_like_channels_last()) { + if (!channels_last_strides_exact_match || + get_channels_last_strides_2d(sizes()) == strides()) { + return at::MemoryFormat::ChannelsLast; + } + } + else if (impl_->is_strides_like_channels_last_3d()) { + if (!channels_last_strides_exact_match || + get_channels_last_strides_3d(sizes()) == strides()) { + return at::MemoryFormat::ChannelsLast3d; + } + } + } + return at::MemoryFormat::Contiguous; + } + + // Total bytes consumed by the "view" of elements of the array. Does not + // include size of metadata. The number reported here does not necessarily + // correspond to the true physical memory consumed by a tensor; instead, + // it reports the memory the tensor would take *if* it were contiguous. + // Defined to be numel() * itemsize() + size_t nbytes() const { + TORCH_CHECK(layout () != at::kSparse, + "nbytes is not defined for sparse tensors. If you want the size of the constituent " \ + "tensors, add the nbytes of the indices and values. If you want the size of the " \ + "equivalent dense tensor, multiply numel() by element_size()"); + return impl_->numel() * impl_->itemsize(); + } + + c10::SymInt sym_nbytes() const { + TORCH_CHECK(layout () != at::kSparse, + "nbytes is not defined for sparse tensors. If you want the size of the constituent " \ + "tensors, add the nbytes of the indices and values. If you want the size of the " \ + "equivalent dense tensor, multiply numel() by element_size()"); + return impl_->sym_numel() * impl_->itemsize(); + } + + int64_t numel() const { + return impl_->numel(); + } + + c10::SymInt sym_numel() const { + return impl_->sym_numel(); + } + + c10::SymInt sym_storage_offset() const { + return impl_->sym_storage_offset(); + } + + // Length of one array element in bytes. This is the traditional + // Numpy naming. + size_t itemsize() const { + return impl_->itemsize(); + } + + // Same as itemsize(). This is the PyTorch naming. + int64_t element_size() const { + return static_cast(impl_->itemsize()); + } + + DispatchKeySet key_set() const { + return impl_->key_set(); + } + ScalarType scalar_type() const { + return typeMetaToScalarType(impl_->dtype()); + } + bool has_storage() const { + return defined() && impl_->has_storage(); + } + const Storage& storage() const { + return impl_->storage(); + } + bool is_alias_of(const at::TensorBase& other) const{ + return impl_->storage().is_alias_of(other.storage()); + } + + inline bool _is_zerotensor() const { + return impl_->_is_zerotensor(); + } + + inline void _set_zero(bool zero) const { + impl_->_set_zero(zero); + } + + inline bool is_conj() const { + return impl_->is_conj(); + } + + // sets the conjugate bit of a tensor. + // NOTE: Conjugate bit is supposed to be a read-only field. Only change this, if you are sure + // that's what you want. Changing this might lead to incorrect behavior since conjugation is + // a lazy operation and we rely on this bit to determine if a conjugation needs to be materialized. + inline void _set_conj(bool conjugate) const { + impl_->_set_conj(conjugate); + } + + inline bool is_neg() const { + return impl_->is_neg(); + } + + // sets the negative bit of a tensor. + // NOTE: Negative bit is supposed to be a read-only field. Only change this, if you are sure + // that's what you want. Changing this might lead to incorrect behavior since we rely on this + // bit to determine if a negation needs to be materialized. + inline void _set_neg(bool negative) const { + impl_->_set_neg(negative); + } + + /// Returns a `Tensor`'s layout. + Layout layout() const { + return impl_->layout(); + } + + /// Returns a `Tensor`'s dtype (`TypeMeta`). + caffe2::TypeMeta dtype() const { + return impl_->dtype(); + } + + /// Returns a `Tensor`'s device. + inline Device device() const { + return impl_->device(); + } + + /// Returns a `Tensor`'s device index. + int64_t get_device() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->get_device(); + } + + /// Returns if a `Tensor` has CPU backend. + bool is_cpu() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_cpu(); + } + + /// Returns if a `Tensor` has CUDA backend. + bool is_cuda() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_cuda(); + } + + /// Returns if a `Tensor` has IPU backend. + bool is_ipu() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_ipu(); + } + + /// Returns if a `Tensor` has XPU backend. + bool is_xpu() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_xpu(); + } + + /// Returns if a `Tensor` has XLA backend. + bool is_xla() const { + return impl_->is_xla(); + } + + /// Returns if a `Tensor` has HPU backend. + bool is_hpu() const { + return impl_->is_hpu(); + } + + /// Returns if a `Tensor` has Lazy backend. + bool is_lazy() const { + return impl_->is_lazy(); + } + + /// Returns if a `Tensor` has HIP backend. + bool is_hip() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_hip(); + } + + /// Returns if a `Tensor` has VE backend. + bool is_ve() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_ve(); + } + + /// Returns if a `Tensor` has sparse backend. + bool is_sparse() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_sparse(); + } + + /// Returns is a `Tensor` has a sparse CSR backend. + bool is_sparse_csr() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_sparse_csr(); + } + + /// Returns if a `Tensor` is mkldnn tensor. + bool is_mkldnn() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_mkldnn(); + } + + /// Returns if a `Tensor` is mps tensor. + bool is_mps() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_mps(); + } + + /// Returns if a `Tensor` is ort tensor. + bool is_ort() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_ort(); + } + + /// Returns if a `Tensor` is vulkan tensor. + bool is_vulkan() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_vulkan(); + } + + /// Returns if a `Tensor` is metal tensor. + bool is_metal() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_metal(); + } + + /// Returns if a `Tensor` has quantized backend. + bool is_quantized() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_quantized(); + } + + /// Returns if a `Tensor` is a meta tensor. Meta tensors can + /// also have other designations. + bool is_meta() const { + return impl_->is_meta(); + } + + /// Returns if a `Tensor` is an inference tensor. + bool is_inference() const { + return impl_->is_inference(); + } + + // Returns if a `Tensor` is a NestedTensor. + bool is_nested() const { + return impl_->is_nested(); + } + + /// If a tensor is a quantized tensor, returns its quantizer + /// TODO: it's not in native_functions.yaml yet as it's not exposed to python + QuantizerPtr quantizer() const; + + /// Returns if a `Tensor` has any dimension names + bool has_names() const { + // If a user is using unnamed tensors, then we can short-circuit right here. + // Otherwise, impl::has_names attempts to retrieve names. + if (!impl_->has_named_tensor_meta()) { + return false; + } + return impl::has_names(unsafeGetTensorImpl()); + } + + /// Returns a `Tensor`'s dimension names data structure + const NamedTensorMeta* get_named_tensor_meta() const { + return static_cast(impl_->named_tensor_meta()); + } + + NamedTensorMeta* get_named_tensor_meta() { + return static_cast(impl_->named_tensor_meta()); + } + + /// Returns the `TensorOptions` corresponding to this `Tensor`. Defined in + /// TensorOptions.h. + TensorOptions options() const { + return TensorOptions().dtype(dtype()) + .device(device()) + .layout(layout()); + } + + void* data_ptr() const { + return this->unsafeGetTensorImpl()->data(); + } + + template + T * data_ptr() const; + + // Purposely not defined here to avoid inlining + void print() const; + + // Return a `TensorAccessor` for CPU `Tensor`s. You have to specify scalar type and + // dimension. + template + TensorAccessor accessor() const& { + static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr()"); + TORCH_CHECK(dim() == N, "TensorAccessor expected ", N, " dims but tensor has ", dim()); + return TensorAccessor(data_ptr(),sizes().data(),strides().data()); + } + template + TensorAccessor accessor() && = delete; + + // Return a `GenericPackedTensorAccessor` for CUDA `Tensor`s. You have to specify scalar type and + // dimension. You can optionally specify RestrictPtrTraits as a template parameter to + // cast the data pointer to a __restrict__ pointer. + // In order to use this, your CUDA kernel has to take a corresponding GenericPackedTensorAccessor + // as an argument. + template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> + GenericPackedTensorAccessor generic_packed_accessor() const& { + static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr()"); + TORCH_CHECK(dim() == N, "TensorAccessor expected ", N, " dims but tensor has ", dim()); + return GenericPackedTensorAccessor(static_cast::PtrType>(data_ptr()),sizes().data(),strides().data()); + } + template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> + GenericPackedTensorAccessor generic_packed_accessor() && = delete; + + template class PtrTraits = DefaultPtrTraits> + PackedTensorAccessor32 packed_accessor32() const& { + TORCH_CHECK( + impl_->numel() <= + static_cast(std::numeric_limits::max()), + "numel needs to be smaller than int32_t max; otherwise, please use packed_accessor64"); + return generic_packed_accessor(); + } + template class PtrTraits = DefaultPtrTraits> + PackedTensorAccessor32 packed_accessor32() && = delete; + + template class PtrTraits = DefaultPtrTraits> + PackedTensorAccessor64 packed_accessor64() const& { + return generic_packed_accessor(); + } + template class PtrTraits = DefaultPtrTraits> + PackedTensorAccessor64 packed_accessor64() && = delete; + + // ~~~~~ Autograd API ~~~~~ + + /// \fn bool is_leaf() const; + /// + /// All Tensors that have `requires_grad()` which is ``false`` will be leaf Tensors by convention. + /// + /// For Tensors that have `requires_grad()` which is ``true``, they will be leaf Tensors if they were + /// created by the user. This means that they are not the result of an operation and so + /// `grad_fn()` is `nullptr`. + /// + /// Only leaf Tensors will have their `grad()` populated during a call to `backward()`. + /// To get `grad()` populated for non-leaf Tensors, you can use `retain_grad()`. + /// + /// Example: + /// @code + /// auto a = torch::rand(10, torch::requires_grad()); + /// std::cout << a.is_leaf() << std::endl; // prints `true` + /// + /// auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA); + /// std::cout << b.is_leaf() << std::endl; // prints `false` + /// // b was created by the operation that cast a cpu Tensor into a cuda Tensor + /// + /// auto c = torch::rand(10, torch::requires_grad()) + 2; + /// std::cout << c.is_leaf() << std::endl; // prints `false` + /// // c was created by the addition operation + /// + /// auto d = torch::rand(10).cuda(); + /// std::cout << d.is_leaf() << std::endl; // prints `true` + /// // d does not require gradients and so has no operation creating it (that is tracked by the autograd engine) + /// + /// auto e = torch::rand(10).cuda().requires_grad_(); + /// std::cout << e.is_leaf() << std::endl; // prints `true` + /// // e requires gradients and has no operations creating it + /// + /// auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true)); + /// std::cout << f.is_leaf() << std::endl; // prints `true` + /// // f requires grad, has no operation creating it + /// @endcode + + /// \fn void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const; + /// + /// Computes the gradient of current tensor with respect to graph leaves. + /// + /// The graph is differentiated using the chain rule. If the tensor is + /// non-scalar (i.e. its data has more than one element) and requires + /// gradient, the function additionally requires specifying ``gradient``. + /// It should be a tensor of matching type and location, that contains + /// the gradient of the differentiated function w.r.t. this Tensor. + /// + /// This function accumulates gradients in the leaves - you might need to + /// zero them before calling it. + /// + /// \param gradient Gradient w.r.t. the + /// tensor. If it is a tensor, it will be automatically converted + /// to a Tensor that does not require grad unless ``create_graph`` is True. + /// None values can be specified for scalar Tensors or ones that + /// don't require grad. If a None value would be acceptable then + /// this argument is optional. + /// \param retain_graph If ``false``, the graph used to compute + /// the grads will be freed. Note that in nearly all cases setting + /// this option to True is not needed and often can be worked around + /// in a much more efficient way. Defaults to the value of + /// ``create_graph``. + /// \param create_graph If ``true``, graph of the derivative will + /// be constructed, allowing to compute higher order derivative + /// products. Defaults to ``false``. + /// \param inputs Inputs w.r.t. which the gradient will be accumulated into + /// ``at::Tensor::grad``. All other Tensors will be ignored. If not + /// provided, the gradient is accumulated into all the leaf Tensors + /// that were used to compute the current tensor. + /// When inputs are provided and a given input is not a leaf, + /// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients). + /// It is an implementation detail on which the user should not rely. + /// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details. + + /// \fn Tensor detach() const; + /// + /// Returns a new Tensor, detached from the current graph. + /// The result will never require gradient. + + /// \fn Tensor & detach_() const; + /// + /// Detaches the Tensor from the graph that created it, making it a leaf. + /// Views cannot be detached in-place. + + /// \fn void retain_grad() const; + /// + /// Enables this Tensor to have their :attr:`grad` populated during + /// :func:`backward`. This is a no-op for leaf tensors. + + /// \fn bool retains_grad() const; + /// + /// Is ``true`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be + /// populated during :func:`backward`, ``false`` otherwise. + + const TensorBase& set_requires_grad(bool requires_grad) const { + impl_->set_requires_grad(requires_grad); + return *this; + } + bool requires_grad() const { + return impl_->requires_grad(); + } + + // The Forward AD API functions below are low level and are not to be used by end + // users who should use the API provided in torch/csrc/autograd.h + + /// This function returns the forward gradient for this Tensor at the given level. + const Tensor& _fw_grad(uint64_t level) const { + return impl_->_fw_grad(level, *this); + } + + /// This function can be used to set the value of the forward grad. + /// Note that the given new_grad might not be used directly if it has different + /// metadata (size/stride/storage offset) compared to this Tensor. In that case, + /// new_grad content will be copied into a new Tensor + void _set_fw_grad(const TensorBase& new_grad, uint64_t level, bool is_inplace_op) const { + impl_->_set_fw_grad(new_grad, *this, level, is_inplace_op); + } + + /// NOTE: This is similar to the legacy `.data()` function on `Variable`, and is intended + /// to be used from functions that need to access the `Variable`'s equivalent `Tensor` + /// (i.e. `Tensor` that shares the same storage and tensor metadata with the `Variable`). + /// + /// One notable difference with the legacy `.data()` function is that changes to the + /// returned `Tensor`'s tensor metadata (e.g. sizes / strides / storage / storage_offset) + /// will not update the original `Variable`, due to the fact that this function + /// shallow-copies the `Variable`'s underlying TensorImpl. + at::TensorBase tensor_data() const; + + /// NOTE: `var.variable_data()` in C++ has the same semantics as `tensor.data` + /// in Python, which create a new `Variable` that shares the same storage and + /// tensor metadata with the original `Variable`, but with a completely new + /// autograd history. + /// + /// NOTE: If we change the tensor metadata (e.g. sizes / strides / + /// storage / storage_offset) of a variable created from `var.variable_data()`, those + /// changes will not update the original variable `var`. In `.variable_data()`, we set + /// `allow_tensor_metadata_change_` to false to make such changes explicitly illegal, + /// in order to prevent users from changing metadata of `var.variable_data()` + /// and expecting the original variable `var` to also be updated. + at::TensorBase variable_data() const; + + // Gradient Node and Edges + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + /// Gets the gradient function of the `Variable`. If this is a leaf variable, + /// the pointer returned will be null. + /// + /// For View Variables: + /// Gets the up-to-date grad_fn. If the shared data or base was modified, we + /// re-create the grad_fn to express the up-to-date view relationship between + /// this and the base Variable. + const std::shared_ptr& grad_fn() const; + + // Hooks + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + template + using hook_return_void_t = std::enable_if_t>::value, unsigned>; + template + using hook_return_var_t = std::enable_if_t, TensorBase>::value, unsigned>; + + /// Registers a backward hook. + /// + /// The hook will be called every time a gradient with respect to the Tensor is computed. + /// The hook should have one of the following signature: + /// ``` + /// hook(TensorBase grad) -> TensorBase + /// ``` + /// ``` + /// hook(TensorBase grad) -> void + /// ``` + /// The hook should not modify its argument, but it can optionally return a new gradient + /// which will be used in place of `grad`. + /// + /// This function returns the index of the hook in the list which can be used to remove hook. + /// + /// Example: + /// @code + /// auto v = torch::tensor({0., 0., 0.}, torch::requires_grad()); + /// auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient + /// v.backward(torch::tensor({1., 2., 3.})); + /// // This prints: + /// // ``` + /// // 2 + /// // 4 + /// // 6 + /// // [ CPUFloatType{3} ] + /// // ``` + /// std::cout << v.grad() << std::endl; + /// v.remove_hook(h); // removes the hook + /// @endcode + template + hook_return_void_t register_hook(T&& hook) const; + template + hook_return_var_t register_hook(T&& hook) const; + +protected: + unsigned _register_hook(std::function hook) const; + +public: + + /// Remove hook at given position + void remove_hook(unsigned pos) const; + + // Variable methods + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + bool is_leaf() const; + + int64_t output_nr() const; + + void set_data(const TensorBase & new_data) const; + + TensorBase data() const; + + int64_t _version() const; + + void retain_grad() const; + + bool retains_grad() const; + + const TensorBase& requires_grad_(bool _requires_grad=true) const; + + // View Variables + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + /// Returns true if this `Variable` is a view of another `Variable`. + bool is_view() const; + + /// Returns the `Variable` that this `Variable` is a view of. If this + /// `Variable` is not a view, throw a `std::runtime_error`. + const TensorBase& _base() const; + + // Miscellaneous + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + const std::string& name() const; + +protected: + void enforce_invariants(); + c10::intrusive_ptr impl_; + +private: + TensorBase __dispatch_contiguous(c10::MemoryFormat) const; +}; + +inline int64_t get_device(const TensorBase& self) { + return self.get_device(); +} + +template +auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_void_t { + // Return the grad argument in case of a hook with void return type to have an + // std::function with Tensor return type + static_assert(std::is_same::value, + "Expected hook to return void"); + return _register_hook([fn=std::forward(hook)](const TensorBase& grad) { + fn(grad); + return TensorBase(); + }); +} + +template +auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_var_t { + return _register_hook(std::move(hook)); +} + +namespace detail { +// Helper creator for Tensor class which doesn't requires the users to pass +// in an intrusive_ptr instead it just converts the argument passed to +// requested intrusive_ptr type. +template +TensorBase make_tensor_base(Args&&... args) { + return TensorBase(c10::make_intrusive(std::forward(args)...)); +} + +} // namespace detail + +static inline DispatchKey legacyExtractDispatchKey(const TensorBase& t) { + return legacyExtractDispatchKey(t.key_set()); +} + +} // namespace at + +namespace c10 { +template <> +struct MaybeOwnedTraits { + using owned_type = at::TensorBase; + using borrow_type = at::TensorBase; + + static borrow_type createBorrow(const owned_type& from) { + // NOTE: this can be implemented without the special + // unsafe_borrow_t Tensor constructor as + // + // return borrow_type(c10::intrusive_ptr::reclaim(from.unsafeGetTensorImpl())); + // + // but that hurts inlining due to the nullptr check in the + // Tensor(c10::intrusive_ptr<...>) constructor. We already know + // that from.impl_ isn't null because from is a valid Tensor, so + // we needn't do the check again. (using __builtin_assume can + // avoid this, but wouldn't be portable to MSVC.) + return borrow_type(borrow_type::unsafe_borrow_t{}, from); + } + + static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) { + lhs.unsafeReleaseTensorImpl(); + // See above note: this can be implemented with public API + // similarly to createBorrow(), but that would hurt inlining. + lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs); + } + + static void destroyBorrow(borrow_type& toDestroy) { + toDestroy.unsafeReleaseTensorImpl(); // "leak" it, but it was already +0. + } + + static const owned_type& referenceFromBorrow(const borrow_type& borrow) { + return borrow; + } + + static const owned_type* pointerFromBorrow(const borrow_type& borrow) { + return &borrow; + } + + static bool debugBorrowIsValid(const borrow_type& /*borrow*/) { + return true; + } +}; + +template <> +struct ExclusivelyOwnedTraits : public c10::ExclusivelyOwnedTensorTraits {}; +} // namespace c10 + +namespace at { + +inline c10::MaybeOwned borrow_from_optional_tensor( + const c10::optional& opt) { + return opt.has_value() + ? c10::MaybeOwned::borrowed(*opt) + : c10::MaybeOwned::owned(c10::in_place); +} + +inline c10::MaybeOwned TensorBase::expect_contiguous(MemoryFormat memory_format) const & { + if (is_contiguous(memory_format)) { + return c10::MaybeOwned::borrowed(*this); + } else { + return c10::MaybeOwned::owned(__dispatch_contiguous(memory_format)); + } +} +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/TensorBody.h b/voice_bridge/torch/include/ATen/core/TensorBody.h new file mode 100644 index 0000000000000000000000000000000000000000..0116366d76da3878f36bd664e2d9118a7c358e4f --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/TensorBody.h @@ -0,0 +1,5506 @@ +#pragma once + +#ifdef TORCH_ASSERT_NO_OPERATORS +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if your change would be better placed in \ + another file, or if a more specific header might achieve the same goal. \ + See NOTE: [Tensor vs. TensorBase] +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include + +namespace c10{ +template class List; +template class IListRef; +} +namespace at { +struct Generator; +struct Type; +class DeprecatedTypeProperties; +class Tensor; +} // namespace at +namespace at { +namespace indexing { +struct TensorIndex; +} // namespace indexing +} // namespace at + +namespace torch { namespace autograd { + +struct Node; + +}} // namespace torch::autograd + +namespace at { + +class OptionalTensorRef; +class Tensor; +using TensorList = ArrayRef; +using ITensorList = c10::IListRef; + +using Stream = c10::Stream; + +// Tensor is a "generic" object holding a pointer to the underlying TensorImpl object, which +// has an embedded reference count. In this way, Tensor is similar to boost::intrusive_ptr. +// +// For example: +// +// void func(Tensor a) { +// Tensor b = a; +// ... +// } +// +// In this example, when we say Tensor b = a, we are creating a new object that points to the +// same underlying TensorImpl, and bumps its reference count. When b goes out of scope, the +// destructor decrements the reference count by calling release() on the TensorImpl it points to. +// The existing constructors, operator overloads, etc. take care to implement the correct semantics. +// +// Note that Tensor can also be NULL, i.e. it is not associated with any underlying TensorImpl, and +// special care must be taken to handle this. +class TORCH_API Tensor: public TensorBase { + protected: + // Create a Tensor with a +0 reference count. Special care must be + // taken to avoid decrementing this reference count at destruction + // time. Intended to support MaybeOwnedTraits. + explicit Tensor(unsafe_borrow_t, const TensorBase& rhs): TensorBase(unsafe_borrow_t{}, rhs) {} + friend MaybeOwnedTraits; + friend OptionalTensorRef; + + public: + Tensor() = default; + // This constructor should not be used by end users and is an implementation + // detail invoked by autogenerated code. + explicit Tensor( + c10::intrusive_ptr tensor_impl) + : TensorBase(std::move(tensor_impl)) {} + Tensor(const Tensor &tensor) = default; + Tensor(Tensor &&tensor) = default; + + // Implicitly move-constructible from TensorBase, but must be explicit to increase refcount + explicit Tensor(const TensorBase &base): TensorBase(base) {} + /*implicit*/ Tensor(TensorBase &&base): TensorBase(std::move(base)) {} + + // Creates a new wrapper from TensorImpl. Intentionally a free method because + // it should be used with care. Checks necessary invariants + static Tensor wrap_tensor_impl( + c10::intrusive_ptr tensor_impl) { + return TensorBase::wrap_tensor_impl(std::move(tensor_impl)); + } + + Tensor contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const { + return TensorBase::contiguous(memory_format); + } + + Tensor conj() const { + if (!this->is_complex()) { + return *this; + } else { + if (this->is_sparse()) { + return this->conj_physical(); + } + return this->_conj(); + } + } + + // Aliased by Dimname overloads, so need explicit using + using TensorBase::size; + using TensorBase::sym_size; + using TensorBase::stride; + + /// Should be used if *this can reasonably be expected to be contiguous and + /// performance is important. + /// Compared to contiguous, it saves a reference count + /// increment/decrement if *this is already contiguous, at the cost + /// in all cases of an extra pointer of stack usage, an extra branch + /// to access, and an extra branch at destruction time. + c10::MaybeOwned expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const &; + + // Use .contiguous() instead. Trying to borrow from a prvalue Tensor + // will only lead to trouble and dangling references. + c10::MaybeOwned expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) && = delete; + + // The following overloads are very intruiging. Consider the following + // program: + // + // x[1] = 3; + // + // We would expect that the first entry of x is written to 3. But how can we + // actually achieve this? x[1] evaluates to a tensor... + // + // The answer is, using a ref-qualifier. x[1] is an rvalue, which cannot be + // (profitably) assigned to in the traditional sense, so we overload + // assignment to mean, "Actually, copy 3 into the tensor data." This is done + // with an rvalue-reference ref-qualified overload (the methods with && at the + // end of their type.) + // + // There's one more fly in the ointment: We also want + // + // Tensor x = y; + // + // to work, and we want it NOT to copy. So we need a traditional operator= + // overload. But we MUST specify a mutable lvalue ref-qualifier, to + // disambiguate the traditional overload from the rvalue-reference + // ref-qualified overload. Otherwise, it will be ambiguous, because + // a non ref-qualified method is eligible for all situations. + + // Unfortunately, we have to write these constructors out manually + // to work around an MSVC bug: + // error C2580: 'at::Tensor &at::Tensor::operator =(const at::Tensor &) &': + // multiple versions of a defaulted special member functions are not allowed + // Tensor& operator=(const Tensor&) & = default; + // Tensor& operator=(Tensor&&) & = default; + + // Also MSVC will wrongly issue the following warning with the aforementioned fix + // warning C4522: 'at::Tensor': multiple assignment operators specified + // Let's just skip the warning. + // + // TODO: temporarily disabled + + Tensor& operator=(const TensorBase& x) & { + impl_ = x.getIntrusivePtr(); + return *this; + } + Tensor& operator=(TensorBase&& x) & { + impl_ = x.unsafeReleaseIntrusivePtr(); + return *this; + } + + Tensor& operator=(const Tensor &x) & { + return operator=(static_cast(x)); + } + Tensor& operator=(Tensor &&x) & { + return operator=(static_cast(x)); + } + + Tensor& operator=(Scalar v) && { + return fill_(v); + } + Tensor& operator=(const Tensor &rhs) && { + return copy_(rhs); + } + Tensor& operator=(Tensor&& rhs) && { + return copy_(rhs); + } + + C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().") + DeprecatedTypeProperties & type() const { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + dispatchKeyToBackend(legacyExtractDispatchKey(key_set())), + scalar_type()); + } + + Tensor toType(ScalarType t) const { + return to(options().dtype(t), /*non_blocking*/ false, /*copy*/ false); + } + + // TODO: Deprecate me + Tensor toBackend(Backend b) const { + return to(options().device(backendToDeviceType(b)).layout(layout_from_backend(b)), /*non_blocking*/ false, /*copy*/ false); + } + + C10_DEPRECATED_MESSAGE("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())") + bool is_variable() const noexcept { + return !at::impl::variable_excluded_from_dispatch(); + } + + template + C10_DEPRECATED_MESSAGE("Tensor.data() is deprecated. Please use Tensor.data_ptr() instead.") + T * data() const { + return data_ptr(); + } + + template + T item() const; + + template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> + C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead") + GenericPackedTensorAccessor packed_accessor() const & { + return generic_packed_accessor(); + } + template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> + C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead") + GenericPackedTensorAccessor packed_accessor() && = delete; + + Tensor operator~() const { + return bitwise_not(); + } + Tensor operator-() const { + return neg(); + } + Tensor& operator+=(const Tensor & other) { + return add_(other); + } + Tensor& operator+=(Scalar other) { + return add_(other); + } + Tensor& operator-=(const Tensor & other) { + return sub_(other); + } + Tensor& operator-=(Scalar other) { + return sub_(other); + } + Tensor& operator*=(const Tensor & other) { + return mul_(other); + } + Tensor& operator*=(Scalar other) { + return mul_(other); + } + Tensor& operator/=(const Tensor & other) { + return div_(other); + } + Tensor& operator/=(Scalar other) { + return div_(other); + } + Tensor& operator&=(const Tensor & other) { + return bitwise_and_(other); + } + Tensor& operator|=(const Tensor & other) { + return bitwise_or_(other); + } + Tensor& operator^=(const Tensor & other) { + return bitwise_xor_(other); + } + Tensor operator[](Scalar index) const { + if (!index.isIntegral(false)) { + TORCH_CHECK_INDEX(false, "Can only index tensors with integral scalars"); + } + return this->operator[](index.toLong()); + } + Tensor operator[](Tensor index) const { + // These properties are checked in the Scalar constructor, but we already + // check them here to provide more useful diagnostics for the user. + if (!index.defined()) { + TORCH_CHECK_INDEX(false, "Can only index with tensors that are defined"); + } + if (index.dim() != 0) { + TORCH_CHECK_INDEX(false, + "Can only index with tensors that are scalars (zero-dim)"); + } + // The Scalar(Tensor) constructor is explicit, so we need to call it. + return this->operator[](index.item()); + } + Tensor operator[](int64_t index) const { + return select(0, index); + } + + Tensor index(ArrayRef indices) const; + Tensor index(std::initializer_list indices) const; + + Tensor & index_put_(ArrayRef indices, Tensor const & rhs); + Tensor & index_put_(ArrayRef indices, const Scalar& v); + Tensor & index_put_(std::initializer_list indices, Tensor const & rhs); + Tensor & index_put_(std::initializer_list indices, const Scalar& v); + + Tensor cpu() const { + return to(options().device(DeviceType::CPU), /*non_blocking*/ false, /*copy*/ false); + } + + // TODO: The Python version also accepts arguments + Tensor cuda() const { + return to(options().device(DeviceType::CUDA), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor hip() const { + return to(options().device(DeviceType::HIP), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor ve() const { + return to(options().device(DeviceType::VE), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor vulkan() const { + return to(options().device(DeviceType::Vulkan), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor metal() const { + return to(options().device(DeviceType::Metal), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor meta() const { + return to(options().device(DeviceType::Meta), /*non_blocking*/ false, /*copy*/ false); + } + + // ~~~~~ Autograd API ~~~~~ + + /// \fn bool is_leaf() const; + /// + /// All Tensors that have `requires_grad()` which is ``false`` will be leaf Tensors by convention. + /// + /// For Tensors that have `requires_grad()` which is ``true``, they will be leaf Tensors if they were + /// created by the user. This means that they are not the result of an operation and so + /// `grad_fn()` is `nullptr`. + /// + /// Only leaf Tensors will have their `grad()` populated during a call to `backward()`. + /// To get `grad()` populated for non-leaf Tensors, you can use `retain_grad()`. + /// + /// Example: + /// @code + /// auto a = torch::rand(10, torch::requires_grad()); + /// std::cout << a.is_leaf() << std::endl; // prints `true` + /// + /// auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA); + /// std::cout << b.is_leaf() << std::endl; // prints `false` + /// // b was created by the operation that cast a cpu Tensor into a cuda Tensor + /// + /// auto c = torch::rand(10, torch::requires_grad()) + 2; + /// std::cout << c.is_leaf() << std::endl; // prints `false` + /// // c was created by the addition operation + /// + /// auto d = torch::rand(10).cuda(); + /// std::cout << d.is_leaf() << std::endl; // prints `true` + /// // d does not require gradients and so has no operation creating it (that is tracked by the autograd engine) + /// + /// auto e = torch::rand(10).cuda().requires_grad_(); + /// std::cout << e.is_leaf() << std::endl; // prints `true` + /// // e requires gradients and has no operations creating it + /// + /// auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true)); + /// std::cout << f.is_leaf() << std::endl; // prints `true` + /// // f requires grad, has no operation creating it + /// @endcode + + /// \fn void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const; + /// + /// Computes the gradient of current tensor with respect to graph leaves. + /// + /// The graph is differentiated using the chain rule. If the tensor is + /// non-scalar (i.e. its data has more than one element) and requires + /// gradient, the function additionally requires specifying ``gradient``. + /// It should be a tensor of matching type and location, that contains + /// the gradient of the differentiated function w.r.t. this Tensor. + /// + /// This function accumulates gradients in the leaves - you might need to + /// zero them before calling it. + /// + /// \param gradient Gradient w.r.t. the + /// tensor. If it is a tensor, it will be automatically converted + /// to a Tensor that does not require grad unless ``create_graph`` is True. + /// None values can be specified for scalar Tensors or ones that + /// don't require grad. If a None value would be acceptable then + /// this argument is optional. + /// \param retain_graph If ``false``, the graph used to compute + /// the grads will be freed. Note that in nearly all cases setting + /// this option to True is not needed and often can be worked around + /// in a much more efficient way. Defaults to the value of + /// ``create_graph``. + /// \param create_graph If ``true``, graph of the derivative will + /// be constructed, allowing to compute higher order derivative + /// products. Defaults to ``false``. + /// \param inputs Inputs w.r.t. which the gradient will be accumulated into + /// ``at::Tensor::grad``. All other Tensors will be ignored. If not + /// provided, the gradient is accumulated into all the leaf Tensors + /// that were used to compute the current tensor. + /// When inputs are provided and a given input is not a leaf, + /// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients). + /// It is an implementation detail on which the user should not rely. + /// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details. + void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const { + // NB: Adding this wrapper to _backward here because we'd like our + // 'backwards' api to accept the 'inputs' argument optionally. Since code gen + // currently does not support optional of TensorList our approach is to replace + // backward in native_functions.yaml with _backward and call it here instead. + if (inputs.has_value()) { + TORCH_CHECK(inputs.value().size() > 0, "'inputs' argument to backward cannot be empty") + this->_backward(inputs.value(), gradient, retain_graph, create_graph); + } else { + this->_backward({}, gradient, retain_graph, create_graph); + } + } + + /// \fn Tensor detach() const; + /// + /// Returns a new Tensor, detached from the current graph. + /// The result will never require gradient. + + /// \fn Tensor & detach_() const; + /// + /// Detaches the Tensor from the graph that created it, making it a leaf. + /// Views cannot be detached in-place. + + /// \fn void retain_grad() const; + /// + /// Enables this Tensor to have their :attr:`grad` populated during + /// :func:`backward`. This is a no-op for leaf tensors. + + /// \fn bool retains_grad() const; + /// + /// Is ``true`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be + /// populated during :func:`backward`, ``false`` otherwise. + + const Tensor& set_requires_grad(bool requires_grad) const { + TensorBase::set_requires_grad(requires_grad); + return *this; + } + + /// Return a mutable reference to the gradient. This is conventionally + /// used as `t.grad() = x` to set a gradient to a completely new tensor. + /// Note that this function work with a non-const Tensor and is not + /// thread safe. + Tensor& mutable_grad() const { + return impl_->mutable_grad(); + } + + /// This function returns an undefined tensor by default and returns a defined tensor + /// the first time a call to `backward()` computes gradients for this Tensor. + /// The attribute will then contain the gradients computed and future calls + /// to `backward()` will accumulate (add) gradients into it. + const Tensor& grad() const { + const Tensor& maybe_grad = impl_->grad(); + if (!is_leaf() && !retains_grad() && !maybe_grad.defined()) { + TORCH_WARN( + "The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad " + "attribute won't be populated during autograd.backward(). If you indeed want the .grad " + "field to be populated for a non-leaf Tensor, use .retain_grad() on the non-leaf Tensor. " + "If you access the non-leaf Tensor by mistake, make sure you access the leaf Tensor " + "instead. See github.com/pytorch/pytorch/pull/30531 for more informations."); + } + return maybe_grad; + } + + // The Forward AD API functions below are low level and are not to be used by end + // users who should use the API provided in torch/csrc/autograd.h + + /// This function returns the forward gradient for this Tensor at the given level. + const Tensor& _fw_grad(uint64_t level) const { + return impl_->_fw_grad(level, *this); + } + + /// This function can be used to set the value of the forward grad. + /// Note that the given new_grad might not be used directly if it has different + /// metadata (size/stride/storage offset) compared to this Tensor. In that case, + /// new_grad content will be copied into a new Tensor + void _set_fw_grad(const TensorBase& new_grad, uint64_t level, bool is_inplace_op) const { + impl_->_set_fw_grad(new_grad, *this, level, is_inplace_op); + } + + + // STOP. Thinking of adding a method here, which only makes use + // of other ATen methods? Define it in native_functions.yaml. + + //example + //Tensor * add(Tensor & b); + void __dispatch__backward(at::TensorList inputs, const c10::optional & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false) const; + void __dispatch_set_data(const at::Tensor & new_data) const; + at::Tensor __dispatch_data() const; + bool __dispatch_is_leaf() const; + int64_t __dispatch_output_nr() const; + int64_t __dispatch__version() const; + at::Tensor & __dispatch_requires_grad_(bool requires_grad=true) const; + void __dispatch_retain_grad() const; + bool __dispatch_retains_grad() const; + at::Tensor _fw_primal(int64_t level) const; + at::Tensor & rename_(c10::optional names) const; + at::Tensor rename(c10::optional names) const; + at::Tensor align_to(at::DimnameList names) const; + at::Tensor align_to(at::DimnameList order, int64_t ellipsis_idx) const; + at::Tensor align_as(const at::Tensor & other) const; + at::Tensor refine_names(at::DimnameList names) const; + at::Tensor abs() const; + at::Tensor & abs_() const; + at::Tensor absolute() const; + at::Tensor & absolute_() const; + at::Tensor angle() const; + at::Tensor sgn() const; + at::Tensor & sgn_() const; + at::Tensor chalf(c10::optional memory_format=c10::nullopt) const; + at::Tensor _conj() const; + at::Tensor __dispatch_conj() const; + at::Tensor _conj_physical() const; + at::Tensor conj_physical() const; + at::Tensor & conj_physical_() const; + at::Tensor resolve_conj() const; + at::Tensor resolve_neg() const; + at::Tensor _neg_view() const; + at::Tensor acos() const; + at::Tensor & acos_() const; + at::Tensor arccos() const; + at::Tensor & arccos_() const; + at::Tensor add(const at::Tensor & other, const at::Scalar & alpha=1) const; + at::Tensor & add_(const at::Tensor & other, const at::Scalar & alpha=1) const; + at::Tensor add(const at::Scalar & other, const at::Scalar & alpha=1) const; + at::Tensor & add_(const at::Scalar & other, const at::Scalar & alpha=1) const; + at::Tensor addmv(const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor & addmv_(const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor addr(const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor & addr_(const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor all(int64_t dim, bool keepdim=false) const; + at::Tensor all(at::Dimname dim, bool keepdim=false) const; + bool allclose(const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) const; + at::Tensor any(int64_t dim, bool keepdim=false) const; + at::Tensor any(at::Dimname dim, bool keepdim=false) const; + at::Tensor argmax(c10::optional dim=c10::nullopt, bool keepdim=false) const; + at::Tensor argmin(c10::optional dim=c10::nullopt, bool keepdim=false) const; + at::Tensor acosh() const; + at::Tensor & acosh_() const; + at::Tensor arccosh() const; + at::Tensor & arccosh_() const; + at::Tensor asinh() const; + at::Tensor & asinh_() const; + at::Tensor arcsinh() const; + at::Tensor & arcsinh_() const; + at::Tensor atanh() const; + at::Tensor & atanh_() const; + at::Tensor arctanh() const; + at::Tensor & arctanh_() const; + at::Tensor as_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) const; + at::Tensor as_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) const; + const at::Tensor & as_strided_(at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) const; + const at::Tensor & as_strided__symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) const; + at::Tensor asin() const; + at::Tensor & asin_() const; + at::Tensor arcsin() const; + at::Tensor & arcsin_() const; + at::Tensor atan() const; + at::Tensor & atan_() const; + at::Tensor arctan() const; + at::Tensor & arctan_() const; + at::Tensor baddbmm(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor & baddbmm_(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor bernoulli(c10::optional generator=c10::nullopt) const; + at::Tensor & bernoulli_(const at::Tensor & p, c10::optional generator=c10::nullopt) const; + at::Tensor & bernoulli_(double p=0.5, c10::optional generator=c10::nullopt) const; + at::Tensor bernoulli(double p, c10::optional generator=c10::nullopt) const; + at::Tensor bincount(const c10::optional & weights={}, int64_t minlength=0) const; + at::Tensor bitwise_not() const; + at::Tensor & bitwise_not_() const; + at::Tensor copysign(const at::Tensor & other) const; + at::Tensor & copysign_(const at::Tensor & other) const; + at::Tensor copysign(const at::Scalar & other) const; + at::Tensor & copysign_(const at::Scalar & other) const; + at::Tensor logical_not() const; + at::Tensor & logical_not_() const; + at::Tensor logical_xor(const at::Tensor & other) const; + at::Tensor & logical_xor_(const at::Tensor & other) const; + at::Tensor logical_and(const at::Tensor & other) const; + at::Tensor & logical_and_(const at::Tensor & other) const; + at::Tensor logical_or(const at::Tensor & other) const; + at::Tensor & logical_or_(const at::Tensor & other) const; + at::Tensor bmm(const at::Tensor & mat2) const; + at::Tensor broadcast_to(at::IntArrayRef size) const; + at::Tensor ceil() const; + at::Tensor & ceil_() const; + ::std::vector unsafe_chunk(int64_t chunks, int64_t dim=0) const; + ::std::vector chunk(int64_t chunks, int64_t dim=0) const; + ::std::vector tensor_split(int64_t sections, int64_t dim=0) const; + ::std::vector tensor_split(at::IntArrayRef indices, int64_t dim=0) const; + ::std::vector tensor_split(const at::Tensor & tensor_indices_or_sections, int64_t dim=0) const; + at::Tensor clamp(const c10::optional & min, const c10::optional & max=c10::nullopt) const; + at::Tensor clamp(const c10::optional & min={}, const c10::optional & max={}) const; + at::Tensor & clamp_(const c10::optional & min, const c10::optional & max=c10::nullopt) const; + at::Tensor & clamp_(const c10::optional & min={}, const c10::optional & max={}) const; + at::Tensor clamp_max(const at::Scalar & max) const; + at::Tensor clamp_max(const at::Tensor & max) const; + at::Tensor & clamp_max_(const at::Scalar & max) const; + at::Tensor & clamp_max_(const at::Tensor & max) const; + at::Tensor clamp_min(const at::Scalar & min) const; + at::Tensor clamp_min(const at::Tensor & min) const; + at::Tensor & clamp_min_(const at::Scalar & min) const; + at::Tensor & clamp_min_(const at::Tensor & min) const; + at::Tensor clip(const c10::optional & min, const c10::optional & max=c10::nullopt) const; + at::Tensor clip(const c10::optional & min={}, const c10::optional & max={}) const; + at::Tensor & clip_(const c10::optional & min, const c10::optional & max=c10::nullopt) const; + at::Tensor & clip_(const c10::optional & min={}, const c10::optional & max={}) const; + at::Tensor __dispatch_contiguous(at::MemoryFormat memory_format=MemoryFormat::Contiguous) const; + at::Tensor & copy_(const at::Tensor & src, bool non_blocking=false) const; + at::Tensor cos() const; + at::Tensor & cos_() const; + at::Tensor cosh() const; + at::Tensor & cosh_() const; + at::Tensor count_nonzero(at::IntArrayRef dim) const; + at::Tensor count_nonzero(c10::optional dim=c10::nullopt) const; + at::Tensor cov(int64_t correction=1, const c10::optional & fweights={}, const c10::optional & aweights={}) const; + at::Tensor corrcoef() const; + ::std::tuple cummax(int64_t dim) const; + ::std::tuple cummax(at::Dimname dim) const; + ::std::tuple cummin(int64_t dim) const; + ::std::tuple cummin(at::Dimname dim) const; + at::Tensor cumprod(int64_t dim, c10::optional dtype=c10::nullopt) const; + at::Tensor & cumprod_(int64_t dim, c10::optional dtype=c10::nullopt) const; + at::Tensor cumprod(at::Dimname dim, c10::optional dtype=c10::nullopt) const; + at::Tensor & cumprod_(at::Dimname dim, c10::optional dtype=c10::nullopt) const; + at::Tensor cumsum(int64_t dim, c10::optional dtype=c10::nullopt) const; + at::Tensor & cumsum_(int64_t dim, c10::optional dtype=c10::nullopt) const; + at::Tensor cumsum(at::Dimname dim, c10::optional dtype=c10::nullopt) const; + at::Tensor & cumsum_(at::Dimname dim, c10::optional dtype=c10::nullopt) const; + at::Tensor diag_embed(int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) const; + at::Tensor diagflat(int64_t offset=0) const; + at::Tensor diagonal(int64_t offset=0, int64_t dim1=0, int64_t dim2=1) const; + at::Tensor diagonal(at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset=0) const; + at::Tensor & fill_diagonal_(const at::Scalar & fill_value, bool wrap=false) const; + at::Tensor diff(int64_t n=1, int64_t dim=-1, const c10::optional & prepend={}, const c10::optional & append={}) const; + at::Tensor div(const at::Tensor & other) const; + at::Tensor & div_(const at::Tensor & other) const; + at::Tensor div(const at::Tensor & other, c10::optional rounding_mode) const; + at::Tensor & div_(const at::Tensor & other, c10::optional rounding_mode) const; + at::Tensor div(const at::Scalar & other) const; + at::Tensor & div_(const at::Scalar & other) const; + at::Tensor div(const at::Scalar & other, c10::optional rounding_mode) const; + at::Tensor & div_(const at::Scalar & other, c10::optional rounding_mode) const; + at::Tensor divide(const at::Tensor & other) const; + at::Tensor & divide_(const at::Tensor & other) const; + at::Tensor divide(const at::Scalar & other) const; + at::Tensor & divide_(const at::Scalar & other) const; + at::Tensor divide(const at::Tensor & other, c10::optional rounding_mode) const; + at::Tensor & divide_(const at::Tensor & other, c10::optional rounding_mode) const; + at::Tensor divide(const at::Scalar & other, c10::optional rounding_mode) const; + at::Tensor & divide_(const at::Scalar & other, c10::optional rounding_mode) const; + at::Tensor true_divide(const at::Tensor & other) const; + at::Tensor & true_divide_(const at::Tensor & other) const; + at::Tensor true_divide(const at::Scalar & other) const; + at::Tensor & true_divide_(const at::Scalar & other) const; + at::Tensor dot(const at::Tensor & tensor) const; + at::Tensor vdot(const at::Tensor & other) const; + at::Tensor new_empty(at::IntArrayRef size, at::TensorOptions options={}) const; + at::Tensor new_empty(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_empty_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) const; + at::Tensor new_empty_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={}) const; + at::Tensor new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options={}) const; + at::Tensor new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) const; + at::Tensor new_full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) const; + at::Tensor new_full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_zeros(at::IntArrayRef size, at::TensorOptions options={}) const; + at::Tensor new_zeros(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) const; + at::Tensor new_zeros_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_ones(at::IntArrayRef size, at::TensorOptions options={}) const; + at::Tensor new_ones(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_ones_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) const; + at::Tensor new_ones_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + const at::Tensor & resize_(at::IntArrayRef size, c10::optional memory_format=c10::nullopt) const; + const at::Tensor & resize__symint(c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) const; + at::Tensor erf() const; + at::Tensor & erf_() const; + at::Tensor erfc() const; + at::Tensor & erfc_() const; + at::Tensor exp() const; + at::Tensor & exp_() const; + at::Tensor exp2() const; + at::Tensor & exp2_() const; + at::Tensor expm1() const; + at::Tensor & expm1_() const; + at::Tensor expand(at::IntArrayRef size, bool implicit=false) const; + at::Tensor expand_symint(c10::SymIntArrayRef size, bool implicit=false) const; + at::Tensor expand_as(const at::Tensor & other) const; + at::Tensor flatten(int64_t start_dim=0, int64_t end_dim=-1) const; + at::Tensor flatten(int64_t start_dim, int64_t end_dim, at::Dimname out_dim) const; + at::Tensor flatten(at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) const; + at::Tensor flatten(at::DimnameList dims, at::Dimname out_dim) const; + at::Tensor unflatten(int64_t dim, at::IntArrayRef sizes) const; + at::Tensor unflatten(at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) const; + at::Tensor & fill_(const at::Scalar & value) const; + at::Tensor & fill_(const at::Tensor & value) const; + at::Tensor floor() const; + at::Tensor & floor_() const; + at::Tensor floor_divide(const at::Tensor & other) const; + at::Tensor & floor_divide_(const at::Tensor & other) const; + at::Tensor floor_divide(const at::Scalar & other) const; + at::Tensor & floor_divide_(const at::Scalar & other) const; + at::Tensor frac() const; + at::Tensor & frac_() const; + at::Tensor gcd(const at::Tensor & other) const; + at::Tensor & gcd_(const at::Tensor & other) const; + at::Tensor lcm(const at::Tensor & other) const; + at::Tensor & lcm_(const at::Tensor & other) const; + at::Tensor index(const c10::List> & indices) const; + at::Tensor & index_copy_(int64_t dim, const at::Tensor & index, const at::Tensor & source) const; + at::Tensor index_copy(int64_t dim, const at::Tensor & index, const at::Tensor & source) const; + at::Tensor & index_copy_(at::Dimname dim, const at::Tensor & index, const at::Tensor & source) const; + at::Tensor index_copy(at::Dimname dim, const at::Tensor & index, const at::Tensor & source) const; + at::Tensor & index_put_(const c10::List> & indices, const at::Tensor & values, bool accumulate=false) const; + at::Tensor index_put(const c10::List> & indices, const at::Tensor & values, bool accumulate=false) const; + at::Tensor isclose(const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) const; + at::Tensor isnan() const; + bool is_distributed() const; + bool __dispatch_is_floating_point() const; + bool __dispatch_is_complex() const; + bool __dispatch_is_conj() const; + bool __dispatch__is_zerotensor() const; + bool __dispatch_is_neg() const; + at::Tensor isreal() const; + bool is_nonzero() const; + bool is_same_size(const at::Tensor & other) const; + bool __dispatch_is_signed() const; + bool __dispatch_is_inference() const; + at::Tensor kron(const at::Tensor & other) const; + ::std::tuple kthvalue(int64_t k, int64_t dim=-1, bool keepdim=false) const; + ::std::tuple kthvalue(int64_t k, at::Dimname dim, bool keepdim=false) const; + at::Tensor nan_to_num(c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt) const; + at::Tensor & nan_to_num_(c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt) const; + at::Tensor ldexp(const at::Tensor & other) const; + at::Tensor & ldexp_(const at::Tensor & other) const; + at::Tensor log() const; + at::Tensor & log_() const; + at::Tensor log10() const; + at::Tensor & log10_() const; + at::Tensor log1p() const; + at::Tensor & log1p_() const; + at::Tensor log2() const; + at::Tensor & log2_() const; + at::Tensor logaddexp(const at::Tensor & other) const; + at::Tensor logaddexp2(const at::Tensor & other) const; + at::Tensor xlogy(const at::Tensor & other) const; + at::Tensor xlogy(const at::Scalar & other) const; + at::Tensor & xlogy_(const at::Tensor & other) const; + at::Tensor & xlogy_(const at::Scalar & other) const; + at::Tensor log_softmax(int64_t dim, c10::optional dtype=c10::nullopt) const; + at::Tensor log_softmax(at::Dimname dim, c10::optional dtype=c10::nullopt) const; + at::Tensor logcumsumexp(int64_t dim) const; + at::Tensor logcumsumexp(at::Dimname dim) const; + at::Tensor logsumexp(at::IntArrayRef dim, bool keepdim=false) const; + at::Tensor logsumexp(at::DimnameList dim, bool keepdim=false) const; + at::Tensor matmul(const at::Tensor & other) const; + at::Tensor matrix_power(int64_t n) const; + at::Tensor matrix_exp() const; + ::std::tuple aminmax(c10::optional dim=c10::nullopt, bool keepdim=false) const; + ::std::tuple max(int64_t dim, bool keepdim=false) const; + ::std::tuple max(at::Dimname dim, bool keepdim=false) const; + at::Tensor amax(at::IntArrayRef dim={}, bool keepdim=false) const; + at::Tensor mean(c10::optional dtype=c10::nullopt) const; + at::Tensor mean(at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor mean(at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor nanmean(at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor median() const; + ::std::tuple median(int64_t dim, bool keepdim=false) const; + ::std::tuple median(at::Dimname dim, bool keepdim=false) const; + at::Tensor nanmedian() const; + ::std::tuple nanmedian(int64_t dim, bool keepdim=false) const; + ::std::tuple nanmedian(at::Dimname dim, bool keepdim=false) const; + ::std::tuple min(int64_t dim, bool keepdim=false) const; + ::std::tuple min(at::Dimname dim, bool keepdim=false) const; + at::Tensor amin(at::IntArrayRef dim={}, bool keepdim=false) const; + at::Tensor mm(const at::Tensor & mat2) const; + ::std::tuple mode(int64_t dim=-1, bool keepdim=false) const; + ::std::tuple mode(at::Dimname dim, bool keepdim=false) const; + at::Tensor mul(const at::Tensor & other) const; + at::Tensor & mul_(const at::Tensor & other) const; + at::Tensor mul(const at::Scalar & other) const; + at::Tensor & mul_(const at::Scalar & other) const; + at::Tensor multiply(const at::Tensor & other) const; + at::Tensor & multiply_(const at::Tensor & other) const; + at::Tensor multiply(const at::Scalar & other) const; + at::Tensor & multiply_(const at::Scalar & other) const; + at::Tensor mv(const at::Tensor & vec) const; + at::Tensor mvlgamma(int64_t p) const; + at::Tensor & mvlgamma_(int64_t p) const; + at::Tensor narrow_copy(int64_t dim, int64_t start, int64_t length) const; + at::Tensor narrow_copy_symint(int64_t dim, c10::SymInt start, c10::SymInt length) const; + at::Tensor narrow(int64_t dim, int64_t start, int64_t length) const; + at::Tensor narrow(int64_t dim, const at::Tensor & start, int64_t length) const; + at::Tensor permute(at::IntArrayRef dims) const; + at::Tensor movedim(at::IntArrayRef source, at::IntArrayRef destination) const; + at::Tensor movedim(int64_t source, int64_t destination) const; + at::Tensor moveaxis(at::IntArrayRef source, at::IntArrayRef destination) const; + at::Tensor moveaxis(int64_t source, int64_t destination) const; + at::Tensor numpy_T() const; + at::Tensor matrix_H() const; + at::Tensor mT() const; + at::Tensor mH() const; + at::Tensor adjoint() const; + bool is_pinned(c10::optional device=c10::nullopt) const; + at::Tensor pin_memory(c10::optional device=c10::nullopt) const; + at::Tensor pinverse(double rcond=1e-15) const; + at::Tensor rad2deg() const; + at::Tensor & rad2deg_() const; + at::Tensor deg2rad() const; + at::Tensor & deg2rad_() const; + at::Tensor ravel() const; + at::Tensor reciprocal() const; + at::Tensor & reciprocal_() const; + at::Tensor neg() const; + at::Tensor & neg_() const; + at::Tensor negative() const; + at::Tensor & negative_() const; + at::Tensor repeat(at::IntArrayRef repeats) const; + at::Tensor repeat_symint(c10::SymIntArrayRef repeats) const; + at::Tensor repeat_interleave(const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) const; + at::Tensor repeat_interleave(int64_t repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) const; + at::Tensor reshape(at::IntArrayRef shape) const; + at::Tensor reshape_symint(c10::SymIntArrayRef shape) const; + at::Tensor _reshape_alias(at::IntArrayRef size, at::IntArrayRef stride) const; + at::Tensor _reshape_alias_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const; + at::Tensor reshape_as(const at::Tensor & other) const; + at::Tensor round() const; + at::Tensor & round_() const; + at::Tensor round(int64_t decimals) const; + at::Tensor & round_(int64_t decimals) const; + at::Tensor relu() const; + at::Tensor & relu_() const; + at::Tensor prelu(const at::Tensor & weight) const; + ::std::tuple prelu_backward(const at::Tensor & grad_output, const at::Tensor & weight) const; + at::Tensor hardshrink(const at::Scalar & lambd=0.5) const; + at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Scalar & lambd) const; + at::Tensor rsqrt() const; + at::Tensor & rsqrt_() const; + at::Tensor select(at::Dimname dim, int64_t index) const; + at::Tensor select(int64_t dim, int64_t index) const; + at::Tensor sigmoid() const; + at::Tensor & sigmoid_() const; + at::Tensor logit(c10::optional eps=c10::nullopt) const; + at::Tensor & logit_(c10::optional eps=c10::nullopt) const; + at::Tensor sin() const; + at::Tensor & sin_() const; + at::Tensor sinc() const; + at::Tensor & sinc_() const; + at::Tensor sinh() const; + at::Tensor & sinh_() const; + at::Tensor detach() const; + at::Tensor & detach_() const; + int64_t size(at::Dimname dim) const; + at::Tensor slice(int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) const; + at::Tensor slice_symint(int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) const; + at::Tensor slice_scatter(const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) const; + at::Tensor slice_scatter_symint(const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) const; + at::Tensor select_scatter(const at::Tensor & src, int64_t dim, int64_t index) const; + at::Tensor diagonal_scatter(const at::Tensor & src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) const; + at::Tensor as_strided_scatter(const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) const; + at::Tensor as_strided_scatter_symint(const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) const; + at::Tensor smm(const at::Tensor & mat2) const; + at::Tensor softmax(int64_t dim, c10::optional dtype=c10::nullopt) const; + at::Tensor softmax(at::Dimname dim, c10::optional dtype=c10::nullopt) const; + ::std::vector unsafe_split(int64_t split_size, int64_t dim=0) const; + ::std::vector split(int64_t split_size, int64_t dim=0) const; + ::std::vector split(at::IntArrayRef split_size, int64_t dim=0) const; + ::std::vector unsafe_split_with_sizes(at::IntArrayRef split_sizes, int64_t dim=0) const; + ::std::vector split_with_sizes(at::IntArrayRef split_sizes, int64_t dim=0) const; + ::std::vector hsplit(int64_t sections) const; + ::std::vector hsplit(at::IntArrayRef indices) const; + ::std::vector vsplit(int64_t sections) const; + ::std::vector vsplit(at::IntArrayRef indices) const; + ::std::vector dsplit(int64_t sections) const; + ::std::vector dsplit(at::IntArrayRef indices) const; + at::Tensor squeeze() const; + at::Tensor squeeze(int64_t dim) const; + at::Tensor squeeze(at::Dimname dim) const; + at::Tensor & squeeze_() const; + at::Tensor & squeeze_(int64_t dim) const; + at::Tensor & squeeze_(at::Dimname dim) const; + at::Tensor sspaddmm(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor stft(int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool normalized, c10::optional onesided=c10::nullopt, c10::optional return_complex=c10::nullopt) const; + at::Tensor stft(int64_t n_fft, c10::optional hop_length=c10::nullopt, c10::optional win_length=c10::nullopt, const c10::optional & window={}, bool center=true, c10::string_view pad_mode="reflect", bool normalized=false, c10::optional onesided=c10::nullopt, c10::optional return_complex=c10::nullopt) const; + at::Tensor istft(int64_t n_fft, c10::optional hop_length=c10::nullopt, c10::optional win_length=c10::nullopt, const c10::optional & window={}, bool center=true, bool normalized=false, c10::optional onesided=c10::nullopt, c10::optional length=c10::nullopt, bool return_complex=false) const; + int64_t stride(at::Dimname dim) const; + at::Tensor sum(c10::optional dtype=c10::nullopt) const; + at::Tensor sum(at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor sum(at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor nansum(at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor sum_to_size(at::IntArrayRef size) const; + at::Tensor sqrt() const; + at::Tensor & sqrt_() const; + at::Tensor square() const; + at::Tensor & square_() const; + at::Tensor std(bool unbiased=true) const; + at::Tensor std(at::OptionalIntArrayRef dim, bool unbiased=true, bool keepdim=false) const; + at::Tensor std(at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false) const; + at::Tensor std(at::DimnameList dim, bool unbiased=true, bool keepdim=false) const; + at::Tensor std(at::DimnameList dim, c10::optional correction, bool keepdim=false) const; + at::Tensor prod(c10::optional dtype=c10::nullopt) const; + at::Tensor prod(int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor prod(at::Dimname dim, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor t() const; + at::Tensor & t_() const; + at::Tensor tan() const; + at::Tensor & tan_() const; + at::Tensor tanh() const; + at::Tensor & tanh_() const; + at::Tensor tile(at::IntArrayRef dims) const; + at::Tensor transpose(int64_t dim0, int64_t dim1) const; + at::Tensor transpose(at::Dimname dim0, at::Dimname dim1) const; + at::Tensor & transpose_(int64_t dim0, int64_t dim1) const; + at::Tensor flip(at::IntArrayRef dims) const; + at::Tensor fliplr() const; + at::Tensor flipud() const; + at::Tensor roll(at::IntArrayRef shifts, at::IntArrayRef dims={}) const; + at::Tensor rot90(int64_t k=1, at::IntArrayRef dims={0,1}) const; + at::Tensor _nested_tensor_size() const; + at::Tensor _nested_tensor_strides() const; + ::std::vector _nested_tensor_offsets() const; + at::Tensor trunc() const; + at::Tensor & trunc_() const; + at::Tensor fix() const; + at::Tensor & fix_() const; + at::Tensor type_as(const at::Tensor & other) const; + at::Tensor unsqueeze(int64_t dim) const; + at::Tensor & unsqueeze_(int64_t dim) const; + at::Tensor var(bool unbiased=true) const; + at::Tensor var(at::OptionalIntArrayRef dim, bool unbiased=true, bool keepdim=false) const; + at::Tensor var(at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim=false) const; + at::Tensor var(at::DimnameList dim, bool unbiased=true, bool keepdim=false) const; + at::Tensor var(at::DimnameList dim, c10::optional correction, bool keepdim=false) const; + at::Tensor view_as(const at::Tensor & other) const; + at::Tensor where(const at::Tensor & condition, const at::Tensor & other) const; + at::Tensor norm(const c10::optional & p, at::ScalarType dtype) const; + at::Tensor norm(const at::Scalar & p=2) const; + at::Tensor norm(const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) const; + at::Tensor norm(const c10::optional & p, at::IntArrayRef dim, bool keepdim=false) const; + at::Tensor norm(const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) const; + at::Tensor norm(const c10::optional & p, at::DimnameList dim, bool keepdim=false) const; + ::std::tuple frexp() const; + at::Tensor clone(c10::optional memory_format=c10::nullopt) const; + at::Tensor positive() const; + const at::Tensor & resize_as_(const at::Tensor & the_template, c10::optional memory_format=c10::nullopt) const; + const at::Tensor & resize_as_sparse_(const at::Tensor & the_template) const; + at::Tensor & zero_() const; + at::Tensor sub(const at::Tensor & other, const at::Scalar & alpha=1) const; + at::Tensor & sub_(const at::Tensor & other, const at::Scalar & alpha=1) const; + at::Tensor sub(const at::Scalar & other, const at::Scalar & alpha=1) const; + at::Tensor & sub_(const at::Scalar & other, const at::Scalar & alpha=1) const; + at::Tensor subtract(const at::Tensor & other, const at::Scalar & alpha=1) const; + at::Tensor & subtract_(const at::Tensor & other, const at::Scalar & alpha=1) const; + at::Tensor subtract(const at::Scalar & other, const at::Scalar & alpha=1) const; + at::Tensor & subtract_(const at::Scalar & other, const at::Scalar & alpha=1) const; + at::Tensor heaviside(const at::Tensor & values) const; + at::Tensor & heaviside_(const at::Tensor & values) const; + at::Tensor addmm(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor & addmm_(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor _addmm_activation(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) const; + const at::Tensor & sparse_resize_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const; + const at::Tensor & sparse_resize_and_clear_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const; + at::Tensor sparse_mask(const at::Tensor & mask) const; + at::Tensor to_dense(c10::optional dtype=c10::nullopt) const; + at::Tensor _to_dense(c10::optional dtype=c10::nullopt) const; + int64_t sparse_dim() const; + int64_t _dimI() const; + int64_t dense_dim() const; + int64_t _dimV() const; + int64_t _nnz() const; + at::Tensor coalesce() const; + bool is_coalesced() const; + at::Tensor _indices() const; + at::Tensor _values() const; + at::Tensor & _coalesced_(bool coalesced) const; + at::Tensor indices() const; + at::Tensor values() const; + at::Tensor crow_indices() const; + at::Tensor col_indices() const; + at::Tensor ccol_indices() const; + at::Tensor row_indices() const; + ::std::vector unbind(int64_t dim=0) const; + ::std::vector unbind(at::Dimname dim) const; + at::Tensor to_sparse(int64_t sparse_dim) const; + at::Tensor to_sparse() const; + at::Tensor to_sparse_csr() const; + at::Tensor to_sparse_csc() const; + at::Tensor to_sparse_bsr(at::IntArrayRef blocksize) const; + at::Tensor to_sparse_bsc(at::IntArrayRef blocksize) const; + at::Tensor to_mkldnn(c10::optional dtype=c10::nullopt) const; + at::Tensor dequantize() const; + double q_scale() const; + int64_t q_zero_point() const; + at::Tensor q_per_channel_scales() const; + at::Tensor q_per_channel_zero_points() const; + int64_t q_per_channel_axis() const; + at::Tensor int_repr() const; + at::QScheme qscheme() const; + at::Tensor _autocast_to_reduced_precision(bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) const; + at::Tensor _autocast_to_full_precision(bool cuda_enabled, bool cpu_enabled) const; + at::Tensor to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) const; + at::Tensor to(c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, bool copy, c10::optional memory_format) const; + at::Tensor to(at::Device device, at::ScalarType dtype, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) const; + at::Tensor to(at::ScalarType dtype, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) const; + at::Tensor to(const at::Tensor & other, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) const; + at::Scalar item() const; + at::Tensor & set_(at::Storage source) const; + at::Tensor & set_(at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) const; + at::Tensor & set__symint(at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) const; + at::Tensor & set_(const at::Tensor & source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) const; + at::Tensor & set__symint(const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) const; + at::Tensor & set_(const at::Tensor & source) const; + at::Tensor & set_() const; + bool is_set_to(const at::Tensor & tensor) const; + at::Tensor & masked_fill_(const at::Tensor & mask, const at::Scalar & value) const; + at::Tensor masked_fill(const at::Tensor & mask, const at::Scalar & value) const; + at::Tensor & masked_fill_(const at::Tensor & mask, const at::Tensor & value) const; + at::Tensor masked_fill(const at::Tensor & mask, const at::Tensor & value) const; + at::Tensor & masked_scatter_(const at::Tensor & mask, const at::Tensor & source) const; + at::Tensor masked_scatter(const at::Tensor & mask, const at::Tensor & source) const; + at::Tensor view(at::IntArrayRef size) const; + at::Tensor view_symint(c10::SymIntArrayRef size) const; + at::Tensor view(at::ScalarType dtype) const; + at::Tensor & put_(const at::Tensor & index, const at::Tensor & source, bool accumulate=false) const; + at::Tensor put(const at::Tensor & index, const at::Tensor & source, bool accumulate=false) const; + at::Tensor & index_add_(int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) const; + at::Tensor index_add(int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) const; + at::Tensor index_add(at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) const; + at::Tensor & index_reduce_(int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) const; + at::Tensor index_reduce(int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) const; + at::Tensor & index_fill_(int64_t dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor index_fill(int64_t dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor & index_fill_(int64_t dim, const at::Tensor & index, const at::Tensor & value) const; + at::Tensor index_fill(int64_t dim, const at::Tensor & index, const at::Tensor & value) const; + at::Tensor & index_fill_(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor & index_fill_(at::Dimname dim, const at::Tensor & index, const at::Tensor & value) const; + at::Tensor index_fill(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor index_fill(at::Dimname dim, const at::Tensor & index, const at::Tensor & value) const; + at::Tensor scatter(int64_t dim, const at::Tensor & index, const at::Tensor & src) const; + at::Tensor & scatter_(int64_t dim, const at::Tensor & index, const at::Tensor & src) const; + at::Tensor scatter(int64_t dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor & scatter_(int64_t dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor scatter(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) const; + at::Tensor & scatter_(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) const; + at::Tensor scatter(int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) const; + at::Tensor & scatter_(int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) const; + at::Tensor scatter(at::Dimname dim, const at::Tensor & index, const at::Tensor & src) const; + at::Tensor scatter(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor scatter_add(int64_t dim, const at::Tensor & index, const at::Tensor & src) const; + at::Tensor & scatter_add_(int64_t dim, const at::Tensor & index, const at::Tensor & src) const; + at::Tensor scatter_add(at::Dimname dim, const at::Tensor & index, const at::Tensor & src) const; + at::Tensor scatter_reduce(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) const; + at::Tensor & scatter_reduce_(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) const; + at::Tensor & eq_(const at::Scalar & other) const; + at::Tensor & eq_(const at::Tensor & other) const; + at::Tensor bitwise_and(const at::Scalar & other) const; + at::Tensor bitwise_and(const at::Tensor & other) const; + at::Tensor & bitwise_and_(const at::Scalar & other) const; + at::Tensor & bitwise_and_(const at::Tensor & other) const; + at::Tensor __and__(const at::Scalar & other) const; + at::Tensor __and__(const at::Tensor & other) const; + at::Tensor & __iand__(const at::Scalar & other) const; + at::Tensor & __iand__(const at::Tensor & other) const; + at::Tensor bitwise_or(const at::Scalar & other) const; + at::Tensor bitwise_or(const at::Tensor & other) const; + at::Tensor & bitwise_or_(const at::Scalar & other) const; + at::Tensor & bitwise_or_(const at::Tensor & other) const; + at::Tensor __or__(const at::Scalar & other) const; + at::Tensor __or__(const at::Tensor & other) const; + at::Tensor & __ior__(const at::Scalar & other) const; + at::Tensor & __ior__(const at::Tensor & other) const; + at::Tensor bitwise_xor(const at::Scalar & other) const; + at::Tensor bitwise_xor(const at::Tensor & other) const; + at::Tensor & bitwise_xor_(const at::Scalar & other) const; + at::Tensor & bitwise_xor_(const at::Tensor & other) const; + at::Tensor __xor__(const at::Scalar & other) const; + at::Tensor __xor__(const at::Tensor & other) const; + at::Tensor & __ixor__(const at::Scalar & other) const; + at::Tensor & __ixor__(const at::Tensor & other) const; + at::Tensor __lshift__(const at::Scalar & other) const; + at::Tensor __lshift__(const at::Tensor & other) const; + at::Tensor & __ilshift__(const at::Scalar & other) const; + at::Tensor & __ilshift__(const at::Tensor & other) const; + at::Tensor bitwise_left_shift(const at::Tensor & other) const; + at::Tensor & bitwise_left_shift_(const at::Tensor & other) const; + at::Tensor bitwise_left_shift(const at::Scalar & other) const; + at::Tensor & bitwise_left_shift_(const at::Scalar & other) const; + at::Tensor __rshift__(const at::Scalar & other) const; + at::Tensor __rshift__(const at::Tensor & other) const; + at::Tensor & __irshift__(const at::Scalar & other) const; + at::Tensor & __irshift__(const at::Tensor & other) const; + at::Tensor bitwise_right_shift(const at::Tensor & other) const; + at::Tensor & bitwise_right_shift_(const at::Tensor & other) const; + at::Tensor bitwise_right_shift(const at::Scalar & other) const; + at::Tensor & bitwise_right_shift_(const at::Scalar & other) const; + at::Tensor & tril_(int64_t diagonal=0) const; + at::Tensor & triu_(int64_t diagonal=0) const; + at::Tensor & digamma_() const; + at::Tensor & lerp_(const at::Tensor & end, const at::Scalar & weight) const; + at::Tensor & lerp_(const at::Tensor & end, const at::Tensor & weight) const; + at::Tensor & addbmm_(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor addbmm(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor & random_(int64_t from, c10::optional to, c10::optional generator=c10::nullopt) const; + at::Tensor & random_(int64_t to, c10::optional generator=c10::nullopt) const; + at::Tensor & random_(c10::optional generator=c10::nullopt) const; + at::Tensor & uniform_(double from=0, double to=1, c10::optional generator=c10::nullopt) const; + at::Tensor & cauchy_(double median=0, double sigma=1, c10::optional generator=c10::nullopt) const; + at::Tensor & log_normal_(double mean=1, double std=2, c10::optional generator=c10::nullopt) const; + at::Tensor & exponential_(double lambd=1, c10::optional generator=c10::nullopt) const; + at::Tensor & geometric_(double p, c10::optional generator=c10::nullopt) const; + at::Tensor diag(int64_t diagonal=0) const; + at::Tensor cross(const at::Tensor & other, c10::optional dim=c10::nullopt) const; + at::Tensor triu(int64_t diagonal=0) const; + at::Tensor tril(int64_t diagonal=0) const; + at::Tensor trace() const; + at::Tensor ne(const at::Scalar & other) const; + at::Tensor ne(const at::Tensor & other) const; + at::Tensor & ne_(const at::Scalar & other) const; + at::Tensor & ne_(const at::Tensor & other) const; + at::Tensor not_equal(const at::Scalar & other) const; + at::Tensor not_equal(const at::Tensor & other) const; + at::Tensor & not_equal_(const at::Scalar & other) const; + at::Tensor & not_equal_(const at::Tensor & other) const; + at::Tensor eq(const at::Scalar & other) const; + at::Tensor eq(const at::Tensor & other) const; + at::Tensor ge(const at::Scalar & other) const; + at::Tensor ge(const at::Tensor & other) const; + at::Tensor & ge_(const at::Scalar & other) const; + at::Tensor & ge_(const at::Tensor & other) const; + at::Tensor greater_equal(const at::Scalar & other) const; + at::Tensor greater_equal(const at::Tensor & other) const; + at::Tensor & greater_equal_(const at::Scalar & other) const; + at::Tensor & greater_equal_(const at::Tensor & other) const; + at::Tensor le(const at::Scalar & other) const; + at::Tensor le(const at::Tensor & other) const; + at::Tensor & le_(const at::Scalar & other) const; + at::Tensor & le_(const at::Tensor & other) const; + at::Tensor less_equal(const at::Scalar & other) const; + at::Tensor less_equal(const at::Tensor & other) const; + at::Tensor & less_equal_(const at::Scalar & other) const; + at::Tensor & less_equal_(const at::Tensor & other) const; + at::Tensor gt(const at::Scalar & other) const; + at::Tensor gt(const at::Tensor & other) const; + at::Tensor & gt_(const at::Scalar & other) const; + at::Tensor & gt_(const at::Tensor & other) const; + at::Tensor greater(const at::Scalar & other) const; + at::Tensor greater(const at::Tensor & other) const; + at::Tensor & greater_(const at::Scalar & other) const; + at::Tensor & greater_(const at::Tensor & other) const; + at::Tensor lt(const at::Scalar & other) const; + at::Tensor lt(const at::Tensor & other) const; + at::Tensor & lt_(const at::Scalar & other) const; + at::Tensor & lt_(const at::Tensor & other) const; + at::Tensor less(const at::Scalar & other) const; + at::Tensor less(const at::Tensor & other) const; + at::Tensor & less_(const at::Scalar & other) const; + at::Tensor & less_(const at::Tensor & other) const; + at::Tensor take(const at::Tensor & index) const; + at::Tensor take_along_dim(const at::Tensor & indices, c10::optional dim=c10::nullopt) const; + at::Tensor index_select(int64_t dim, const at::Tensor & index) const; + at::Tensor index_select(at::Dimname dim, const at::Tensor & index) const; + at::Tensor masked_select(const at::Tensor & mask) const; + at::Tensor nonzero() const; + ::std::vector nonzero_numpy() const; + at::Tensor argwhere() const; + at::Tensor gather(int64_t dim, const at::Tensor & index, bool sparse_grad=false) const; + at::Tensor gather(at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) const; + at::Tensor addcmul(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) const; + at::Tensor & addcmul_(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) const; + at::Tensor addcdiv(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) const; + at::Tensor & addcdiv_(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) const; + ::std::tuple triangular_solve(const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) const; + ::std::tuple symeig(bool eigenvectors=false, bool upper=true) const; + ::std::tuple svd(bool some=true, bool compute_uv=true) const; + at::Tensor swapaxes(int64_t axis0, int64_t axis1) const; + at::Tensor & swapaxes_(int64_t axis0, int64_t axis1) const; + at::Tensor swapdims(int64_t dim0, int64_t dim1) const; + at::Tensor & swapdims_(int64_t dim0, int64_t dim1) const; + at::Tensor cholesky(bool upper=false) const; + at::Tensor cholesky_solve(const at::Tensor & input2, bool upper=false) const; + at::Tensor cholesky_inverse(bool upper=false) const; + ::std::tuple qr(bool some=true) const; + ::std::tuple geqrf() const; + at::Tensor orgqr(const at::Tensor & input2) const; + at::Tensor ormqr(const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false) const; + at::Tensor lu_solve(const at::Tensor & LU_data, const at::Tensor & LU_pivots) const; + at::Tensor multinomial(int64_t num_samples, bool replacement=false, c10::optional generator=c10::nullopt) const; + at::Tensor & lgamma_() const; + at::Tensor lgamma() const; + at::Tensor digamma() const; + at::Tensor polygamma(int64_t n) const; + at::Tensor & polygamma_(int64_t n) const; + at::Tensor erfinv() const; + at::Tensor & erfinv_() const; + at::Tensor i0() const; + at::Tensor & i0_() const; + at::Tensor sign() const; + at::Tensor & sign_() const; + at::Tensor signbit() const; + at::Tensor dist(const at::Tensor & other, const at::Scalar & p=2) const; + at::Tensor & atan2_(const at::Tensor & other) const; + at::Tensor atan2(const at::Tensor & other) const; + at::Tensor arctan2(const at::Tensor & other) const; + at::Tensor & arctan2_(const at::Tensor & other) const; + at::Tensor lerp(const at::Tensor & end, const at::Scalar & weight) const; + at::Tensor lerp(const at::Tensor & end, const at::Tensor & weight) const; + at::Tensor histc(int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) const; + ::std::tuple histogram(const at::Tensor & bins, const c10::optional & weight={}, bool density=false) const; + ::std::tuple histogram(int64_t bins=100, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) const; + at::Tensor fmod(const at::Scalar & other) const; + at::Tensor & fmod_(const at::Scalar & other) const; + at::Tensor fmod(const at::Tensor & other) const; + at::Tensor & fmod_(const at::Tensor & other) const; + at::Tensor hypot(const at::Tensor & other) const; + at::Tensor & hypot_(const at::Tensor & other) const; + at::Tensor igamma(const at::Tensor & other) const; + at::Tensor & igamma_(const at::Tensor & other) const; + at::Tensor igammac(const at::Tensor & other) const; + at::Tensor & igammac_(const at::Tensor & other) const; + at::Tensor nextafter(const at::Tensor & other) const; + at::Tensor & nextafter_(const at::Tensor & other) const; + at::Tensor remainder(const at::Scalar & other) const; + at::Tensor & remainder_(const at::Scalar & other) const; + at::Tensor remainder(const at::Tensor & other) const; + at::Tensor & remainder_(const at::Tensor & other) const; + at::Tensor min() const; + at::Tensor fmin(const at::Tensor & other) const; + at::Tensor max() const; + at::Tensor fmax(const at::Tensor & other) const; + at::Tensor maximum(const at::Tensor & other) const; + at::Tensor max(const at::Tensor & other) const; + at::Tensor minimum(const at::Tensor & other) const; + at::Tensor min(const at::Tensor & other) const; + at::Tensor quantile(const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") const; + at::Tensor quantile(double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") const; + at::Tensor nanquantile(const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") const; + at::Tensor nanquantile(double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") const; + ::std::tuple sort(int64_t dim=-1, bool descending=false) const; + ::std::tuple sort(c10::optional stable, int64_t dim=-1, bool descending=false) const; + ::std::tuple sort(at::Dimname dim, bool descending=false) const; + ::std::tuple sort(c10::optional stable, at::Dimname dim, bool descending=false) const; + at::Tensor msort() const; + at::Tensor argsort(int64_t dim=-1, bool descending=false) const; + at::Tensor argsort(bool stable, int64_t dim=-1, bool descending=false) const; + at::Tensor argsort(at::Dimname dim, bool descending=false) const; + ::std::tuple topk(int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) const; + at::Tensor all() const; + at::Tensor any() const; + at::Tensor renorm(const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) const; + at::Tensor & renorm_(const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) const; + at::Tensor unfold(int64_t dimension, int64_t size, int64_t step) const; + bool equal(const at::Tensor & other) const; + at::Tensor pow(const at::Tensor & exponent) const; + at::Tensor pow(const at::Scalar & exponent) const; + at::Tensor & pow_(const at::Scalar & exponent) const; + at::Tensor & pow_(const at::Tensor & exponent) const; + at::Tensor float_power(const at::Tensor & exponent) const; + at::Tensor float_power(const at::Scalar & exponent) const; + at::Tensor & float_power_(const at::Scalar & exponent) const; + at::Tensor & float_power_(const at::Tensor & exponent) const; + at::Tensor & normal_(double mean=0, double std=1, c10::optional generator=c10::nullopt) const; + at::Tensor alias() const; + at::Tensor isfinite() const; + at::Tensor isinf() const; + void record_stream(at::Stream s) const; + at::Tensor isposinf() const; + at::Tensor isneginf() const; + at::Tensor det() const; + ::std::tuple slogdet() const; + at::Tensor logdet() const; + at::Tensor inverse() const; + at::Tensor inner(const at::Tensor & other) const; + at::Tensor outer(const at::Tensor & vec2) const; + at::Tensor ger(const at::Tensor & vec2) const; + at::Tensor to_padded_tensor(double padding, at::OptionalIntArrayRef output_size=c10::nullopt) const; + at::Tensor _nested_tensor_layer_norm(const c10::optional & weight, const c10::optional & bias, double eps) const; + + // Special C++ only overloads for std()-like functions (See gh-40287) + // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef + // So, for example std(0) would select the std(unbiased=False) overload + + Tensor var(int dim) const { + return var(IntArrayRef{dim}); + } + + Tensor std(int dim) const { + return std(IntArrayRef{dim}); + } + + // We changed .dtype() to return a TypeMeta in #12766. Ideally, we want the + // at::kDouble and its friends to be TypeMeta's, but that hasn't happened yet. + // Before that change, we make this method to maintain BC for C++ usage like + // `x.to(y.dtype)`. + // TODO: remove following two after at::kDouble and its friends are TypeMeta's. + inline Tensor to(caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const { + return this->to(/*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy); + } + inline Tensor to(Device device, caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const { + return this->to(device, /*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy); + } + + template + decltype(auto) m(F func, Args&&... params) const { + return func(*this, std::forward(params)...); + } + + /// NOTE: This is similar to the legacy `.data()` function on `Variable`, and is intended + /// to be used from functions that need to access the `Variable`'s equivalent `Tensor` + /// (i.e. `Tensor` that shares the same storage and tensor metadata with the `Variable`). + /// + /// One notable difference with the legacy `.data()` function is that changes to the + /// returned `Tensor`'s tensor metadata (e.g. sizes / strides / storage / storage_offset) + /// will not update the original `Variable`, due to the fact that this function + /// shallow-copies the `Variable`'s underlying TensorImpl. + at::Tensor tensor_data() const { + return TensorBase::tensor_data(); + } + + /// NOTE: `var.variable_data()` in C++ has the same semantics as `tensor.data` + /// in Python, which create a new `Variable` that shares the same storage and + /// tensor metadata with the original `Variable`, but with a completely new + /// autograd history. + /// + /// NOTE: If we change the tensor metadata (e.g. sizes / strides / + /// storage / storage_offset) of a variable created from `var.variable_data()`, those + /// changes will not update the original variable `var`. In `.variable_data()`, we set + /// `allow_tensor_metadata_change_` to false to make such changes explicitly illegal, + /// in order to prevent users from changing metadata of `var.variable_data()` + /// and expecting the original variable `var` to also be updated. + at::Tensor variable_data() const { + return TensorBase::variable_data(); + } + + // Hooks + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + template + using hook_return_void_t = std::enable_if_t>::value, unsigned>; + template + using hook_return_var_t = std::enable_if_t, Tensor>::value, unsigned>; + + /// Registers a backward hook. + /// + /// The hook will be called every time a gradient with respect to the Tensor is computed. + /// The hook should have one of the following signature: + /// ``` + /// hook(Tensor grad) -> Tensor + /// ``` + /// ``` + /// hook(Tensor grad) -> void + /// ``` + /// The hook should not modify its argument, but it can optionally return a new gradient + /// which will be used in place of `grad`. + /// + /// This function returns the index of the hook in the list which can be used to remove hook. + /// + /// Example: + /// @code + /// auto v = torch::tensor({0., 0., 0.}, torch::requires_grad()); + /// auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient + /// v.backward(torch::tensor({1., 2., 3.})); + /// // This prints: + /// // ``` + /// // 2 + /// // 4 + /// // 6 + /// // [ CPUFloatType{3} ] + /// // ``` + /// std::cout << v.grad() << std::endl; + /// v.remove_hook(h); // removes the hook + /// @endcode + template + hook_return_void_t register_hook(T&& hook) const; + template + hook_return_var_t register_hook(T&& hook) const; + + // Variable methods + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Tensor data() const { + return TensorBase::data(); + } + + void _backward(TensorList inputs, const c10::optional& gradient, c10::optional keep_graph, bool create_graph) const; + + const Tensor& requires_grad_(bool _requires_grad=true) const { + TensorBase::requires_grad_(_requires_grad); + return *this; + } +}; + +namespace detail { +// Helper creator for Tensor class which doesn't requires the users to pass +// in an intrusive_ptr instead it just converts the argument passed to +// requested intrusive_ptr type. +template +Tensor make_tensor(Args&&... args) { + return Tensor(c10::make_intrusive(std::forward(args)...)); +} + +} // namespace detail + +} // namespace at + + +namespace at { + +// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> () +inline void Tensor::__dispatch__backward(at::TensorList inputs, const c10::optional & gradient, c10::optional retain_graph, bool create_graph) const { + return at::_ops::_backward::call(const_cast(*this), inputs, gradient, retain_graph, create_graph); +} + +// aten::set_data(Tensor(a!) self, Tensor new_data) -> () +inline void Tensor::__dispatch_set_data(const at::Tensor & new_data) const { + return at::_ops::set_data::call(const_cast(*this), new_data); +} + +// aten::data(Tensor self) -> Tensor +inline at::Tensor Tensor::__dispatch_data() const { + return at::_ops::data::call(const_cast(*this)); +} + +// aten::is_leaf(Tensor self) -> bool +inline bool Tensor::__dispatch_is_leaf() const { + return at::_ops::is_leaf::call(const_cast(*this)); +} + +// aten::output_nr(Tensor self) -> int +inline int64_t Tensor::__dispatch_output_nr() const { + return at::_ops::output_nr::call(const_cast(*this)); +} + +// aten::_version(Tensor self) -> int +inline int64_t Tensor::__dispatch__version() const { + return at::_ops::_version::call(const_cast(*this)); +} + +// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!) +inline at::Tensor & Tensor::__dispatch_requires_grad_(bool requires_grad) const { + return at::_ops::requires_grad_::call(const_cast(*this), requires_grad); +} + +// aten::retain_grad(Tensor(a!) self) -> () +inline void Tensor::__dispatch_retain_grad() const { + return at::_ops::retain_grad::call(const_cast(*this)); +} + +// aten::retains_grad(Tensor self) -> bool +inline bool Tensor::__dispatch_retains_grad() const { + return at::_ops::retains_grad::call(const_cast(*this)); +} + +// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a) +inline at::Tensor Tensor::_fw_primal(int64_t level) const { + return at::_ops::_fw_primal::call(const_cast(*this), level); +} + +// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!) +inline at::Tensor & Tensor::rename_(c10::optional names) const { + return at::_ops::rename_::call(const_cast(*this), names); +} + +// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a) +inline at::Tensor Tensor::rename(c10::optional names) const { + return at::_ops::rename::call(const_cast(*this), names); +} + +// aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a) +inline at::Tensor Tensor::align_to(at::DimnameList names) const { + return at::_ops::align_to::call(const_cast(*this), names); +} + +// aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a) +inline at::Tensor Tensor::align_to(at::DimnameList order, int64_t ellipsis_idx) const { + return at::_ops::align_to_ellipsis_idx::call(const_cast(*this), order, ellipsis_idx); +} + +// aten::align_as(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::align_as(const at::Tensor & other) const { + return at::_ops::align_as::call(const_cast(*this), other); +} + +// aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a) +inline at::Tensor Tensor::refine_names(at::DimnameList names) const { + return at::_ops::refine_names::call(const_cast(*this), names); +} + +// aten::abs(Tensor self) -> Tensor +inline at::Tensor Tensor::abs() const { + return at::_ops::abs::call(const_cast(*this)); +} + +// aten::abs_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::abs_() const { + return at::_ops::abs_::call(const_cast(*this)); +} + +// aten::absolute(Tensor self) -> Tensor +inline at::Tensor Tensor::absolute() const { + return at::_ops::absolute::call(const_cast(*this)); +} + +// aten::absolute_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::absolute_() const { + return at::_ops::absolute_::call(const_cast(*this)); +} + +// aten::angle(Tensor self) -> Tensor +inline at::Tensor Tensor::angle() const { + return at::_ops::angle::call(const_cast(*this)); +} + +// aten::sgn(Tensor self) -> Tensor +inline at::Tensor Tensor::sgn() const { + return at::_ops::sgn::call(const_cast(*this)); +} + +// aten::sgn_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sgn_() const { + return at::_ops::sgn_::call(const_cast(*this)); +} + +// aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor Tensor::chalf(c10::optional memory_format) const { + return at::_ops::chalf::call(const_cast(*this), memory_format); +} + +// aten::_conj(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::_conj() const { + return at::_ops::_conj::call(const_cast(*this)); +} + +// aten::conj(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::__dispatch_conj() const { + return at::_ops::conj::call(const_cast(*this)); +} + +// aten::_conj_physical(Tensor self) -> Tensor +inline at::Tensor Tensor::_conj_physical() const { + return at::_ops::_conj_physical::call(const_cast(*this)); +} + +// aten::conj_physical(Tensor self) -> Tensor +inline at::Tensor Tensor::conj_physical() const { + return at::_ops::conj_physical::call(const_cast(*this)); +} + +// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::conj_physical_() const { + return at::_ops::conj_physical_::call(const_cast(*this)); +} + +// aten::resolve_conj(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::resolve_conj() const { + return at::_ops::resolve_conj::call(const_cast(*this)); +} + +// aten::resolve_neg(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::resolve_neg() const { + return at::_ops::resolve_neg::call(const_cast(*this)); +} + +// aten::_neg_view(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::_neg_view() const { + return at::_ops::_neg_view::call(const_cast(*this)); +} + +// aten::acos(Tensor self) -> Tensor +inline at::Tensor Tensor::acos() const { + return at::_ops::acos::call(const_cast(*this)); +} + +// aten::acos_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::acos_() const { + return at::_ops::acos_::call(const_cast(*this)); +} + +// aten::arccos(Tensor self) -> Tensor +inline at::Tensor Tensor::arccos() const { + return at::_ops::arccos::call(const_cast(*this)); +} + +// aten::arccos_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::arccos_() const { + return at::_ops::arccos_::call(const_cast(*this)); +} + +// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::add(const at::Tensor & other, const at::Scalar & alpha) const { + return at::_ops::add_Tensor::call(const_cast(*this), other, alpha); +} + +// aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::add_(const at::Tensor & other, const at::Scalar & alpha) const { + return at::_ops::add__Tensor::call(const_cast(*this), other, alpha); +} + +// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::add(const at::Scalar & other, const at::Scalar & alpha) const { + return at::_ops::add_Scalar::call(const_cast(*this), other, alpha); +} + +// aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::add_(const at::Scalar & other, const at::Scalar & alpha) const { + return at::_ops::add__Scalar::call(const_cast(*this), other, alpha); +} + +// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::addmv(const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addmv::call(const_cast(*this), mat, vec, beta, alpha); +} + +// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::addmv_(const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addmv_::call(const_cast(*this), mat, vec, beta, alpha); +} + +// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::addr(const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addr::call(const_cast(*this), vec1, vec2, beta, alpha); +} + +// aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::addr_(const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addr_::call(const_cast(*this), vec1, vec2, beta, alpha); +} + +// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::all(int64_t dim, bool keepdim) const { + return at::_ops::all_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::all(at::Dimname dim, bool keepdim) const { + return at::_ops::all_dimname::call(const_cast(*this), dim, keepdim); +} + +// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool +inline bool Tensor::allclose(const at::Tensor & other, double rtol, double atol, bool equal_nan) const { + return at::_ops::allclose::call(const_cast(*this), other, rtol, atol, equal_nan); +} + +// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::any(int64_t dim, bool keepdim) const { + return at::_ops::any_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::any(at::Dimname dim, bool keepdim) const { + return at::_ops::any_dimname::call(const_cast(*this), dim, keepdim); +} + +// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::argmax(c10::optional dim, bool keepdim) const { + return at::_ops::argmax::call(const_cast(*this), dim, keepdim); +} + +// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::argmin(c10::optional dim, bool keepdim) const { + return at::_ops::argmin::call(const_cast(*this), dim, keepdim); +} + +// aten::acosh(Tensor self) -> Tensor +inline at::Tensor Tensor::acosh() const { + return at::_ops::acosh::call(const_cast(*this)); +} + +// aten::acosh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::acosh_() const { + return at::_ops::acosh_::call(const_cast(*this)); +} + +// aten::arccosh(Tensor self) -> Tensor +inline at::Tensor Tensor::arccosh() const { + return at::_ops::arccosh::call(const_cast(*this)); +} + +// aten::arccosh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::arccosh_() const { + return at::_ops::arccosh_::call(const_cast(*this)); +} + +// aten::asinh(Tensor self) -> Tensor +inline at::Tensor Tensor::asinh() const { + return at::_ops::asinh::call(const_cast(*this)); +} + +// aten::asinh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::asinh_() const { + return at::_ops::asinh_::call(const_cast(*this)); +} + +// aten::arcsinh(Tensor self) -> Tensor +inline at::Tensor Tensor::arcsinh() const { + return at::_ops::arcsinh::call(const_cast(*this)); +} + +// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::arcsinh_() const { + return at::_ops::arcsinh_::call(const_cast(*this)); +} + +// aten::atanh(Tensor self) -> Tensor +inline at::Tensor Tensor::atanh() const { + return at::_ops::atanh::call(const_cast(*this)); +} + +// aten::atanh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::atanh_() const { + return at::_ops::atanh_::call(const_cast(*this)); +} + +// aten::arctanh(Tensor self) -> Tensor +inline at::Tensor Tensor::arctanh() const { + return at::_ops::arctanh::call(const_cast(*this)); +} + +// aten::arctanh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::arctanh_() const { + return at::_ops::arctanh_::call(const_cast(*this)); +} + +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +inline at::Tensor Tensor::as_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset) const { + return at::_ops::as_strided::call(const_cast(*this), c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); +} + +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +inline at::Tensor Tensor::as_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset) const { + return at::_ops::as_strided::call(const_cast(*this), size, stride, storage_offset); +} + +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +inline const at::Tensor & Tensor::as_strided_(at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset) const { + return at::_ops::as_strided_::call(const_cast(*this), c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); +} + +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +inline const at::Tensor & Tensor::as_strided__symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset) const { + return at::_ops::as_strided_::call(const_cast(*this), size, stride, storage_offset); +} + +// aten::asin(Tensor self) -> Tensor +inline at::Tensor Tensor::asin() const { + return at::_ops::asin::call(const_cast(*this)); +} + +// aten::asin_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::asin_() const { + return at::_ops::asin_::call(const_cast(*this)); +} + +// aten::arcsin(Tensor self) -> Tensor +inline at::Tensor Tensor::arcsin() const { + return at::_ops::arcsin::call(const_cast(*this)); +} + +// aten::arcsin_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::arcsin_() const { + return at::_ops::arcsin_::call(const_cast(*this)); +} + +// aten::atan(Tensor self) -> Tensor +inline at::Tensor Tensor::atan() const { + return at::_ops::atan::call(const_cast(*this)); +} + +// aten::atan_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::atan_() const { + return at::_ops::atan_::call(const_cast(*this)); +} + +// aten::arctan(Tensor self) -> Tensor +inline at::Tensor Tensor::arctan() const { + return at::_ops::arctan::call(const_cast(*this)); +} + +// aten::arctan_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::arctan_() const { + return at::_ops::arctan_::call(const_cast(*this)); +} + +// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::baddbmm(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::baddbmm::call(const_cast(*this), batch1, batch2, beta, alpha); +} + +// aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::baddbmm_(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::baddbmm_::call(const_cast(*this), batch1, batch2, beta, alpha); +} + +// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor +inline at::Tensor Tensor::bernoulli(c10::optional generator) const { + return at::_ops::bernoulli::call(const_cast(*this), generator); +} + +// aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::bernoulli_(const at::Tensor & p, c10::optional generator) const { + return at::_ops::bernoulli__Tensor::call(const_cast(*this), p, generator); +} + +// aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::bernoulli_(double p, c10::optional generator) const { + return at::_ops::bernoulli__float::call(const_cast(*this), p, generator); +} + +// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor +inline at::Tensor Tensor::bernoulli(double p, c10::optional generator) const { + return at::_ops::bernoulli_p::call(const_cast(*this), p, generator); +} + +// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor +inline at::Tensor Tensor::bincount(const c10::optional & weights, int64_t minlength) const { + return at::_ops::bincount::call(const_cast(*this), weights, minlength); +} + +// aten::bitwise_not(Tensor self) -> Tensor +inline at::Tensor Tensor::bitwise_not() const { + return at::_ops::bitwise_not::call(const_cast(*this)); +} + +// aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_not_() const { + return at::_ops::bitwise_not_::call(const_cast(*this)); +} + +// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::copysign(const at::Tensor & other) const { + return at::_ops::copysign_Tensor::call(const_cast(*this), other); +} + +// aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::copysign_(const at::Tensor & other) const { + return at::_ops::copysign__Tensor::call(const_cast(*this), other); +} + +// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::copysign(const at::Scalar & other) const { + return at::_ops::copysign_Scalar::call(const_cast(*this), other); +} + +// aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::copysign_(const at::Scalar & other) const { + return at::_ops::copysign__Scalar::call(const_cast(*this), other); +} + +// aten::logical_not(Tensor self) -> Tensor +inline at::Tensor Tensor::logical_not() const { + return at::_ops::logical_not::call(const_cast(*this)); +} + +// aten::logical_not_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::logical_not_() const { + return at::_ops::logical_not_::call(const_cast(*this)); +} + +// aten::logical_xor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::logical_xor(const at::Tensor & other) const { + return at::_ops::logical_xor::call(const_cast(*this), other); +} + +// aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::logical_xor_(const at::Tensor & other) const { + return at::_ops::logical_xor_::call(const_cast(*this), other); +} + +// aten::logical_and(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::logical_and(const at::Tensor & other) const { + return at::_ops::logical_and::call(const_cast(*this), other); +} + +// aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::logical_and_(const at::Tensor & other) const { + return at::_ops::logical_and_::call(const_cast(*this), other); +} + +// aten::logical_or(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::logical_or(const at::Tensor & other) const { + return at::_ops::logical_or::call(const_cast(*this), other); +} + +// aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::logical_or_(const at::Tensor & other) const { + return at::_ops::logical_or_::call(const_cast(*this), other); +} + +// aten::bmm(Tensor self, Tensor mat2) -> Tensor +inline at::Tensor Tensor::bmm(const at::Tensor & mat2) const { + return at::_ops::bmm::call(const_cast(*this), mat2); +} + +// aten::broadcast_to(Tensor(a) self, int[] size) -> Tensor(a) +inline at::Tensor Tensor::broadcast_to(at::IntArrayRef size) const { + return at::_ops::broadcast_to::call(const_cast(*this), size); +} + +// aten::ceil(Tensor self) -> Tensor +inline at::Tensor Tensor::ceil() const { + return at::_ops::ceil::call(const_cast(*this)); +} + +// aten::ceil_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::ceil_() const { + return at::_ops::ceil_::call(const_cast(*this)); +} + +// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] +inline ::std::vector Tensor::unsafe_chunk(int64_t chunks, int64_t dim) const { + return at::_ops::unsafe_chunk::call(const_cast(*this), chunks, dim); +} + +// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::chunk(int64_t chunks, int64_t dim) const { + return at::_ops::chunk::call(const_cast(*this), chunks, dim); +} + +// aten::tensor_split.sections(Tensor(a -> *) self, int sections, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::tensor_split(int64_t sections, int64_t dim) const { + return at::_ops::tensor_split_sections::call(const_cast(*this), sections, dim); +} + +// aten::tensor_split.indices(Tensor(a -> *) self, int[] indices, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::tensor_split(at::IntArrayRef indices, int64_t dim) const { + return at::_ops::tensor_split_indices::call(const_cast(*this), indices, dim); +} + +// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::tensor_split(const at::Tensor & tensor_indices_or_sections, int64_t dim) const { + return at::_ops::tensor_split_tensor_indices_or_sections::call(const_cast(*this), tensor_indices_or_sections, dim); +} + +// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor +inline at::Tensor Tensor::clamp(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clamp::call(const_cast(*this), min, max); +} + +// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor +inline at::Tensor Tensor::clamp(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clamp_Tensor::call(const_cast(*this), min, max); +} + +// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) +inline at::Tensor & Tensor::clamp_(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clamp_::call(const_cast(*this), min, max); +} + +// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) +inline at::Tensor & Tensor::clamp_(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clamp__Tensor::call(const_cast(*this), min, max); +} + +// aten::clamp_max(Tensor self, Scalar max) -> Tensor +inline at::Tensor Tensor::clamp_max(const at::Scalar & max) const { + return at::_ops::clamp_max::call(const_cast(*this), max); +} + +// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor +inline at::Tensor Tensor::clamp_max(const at::Tensor & max) const { + return at::_ops::clamp_max_Tensor::call(const_cast(*this), max); +} + +// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) +inline at::Tensor & Tensor::clamp_max_(const at::Scalar & max) const { + return at::_ops::clamp_max_::call(const_cast(*this), max); +} + +// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) +inline at::Tensor & Tensor::clamp_max_(const at::Tensor & max) const { + return at::_ops::clamp_max__Tensor::call(const_cast(*this), max); +} + +// aten::clamp_min(Tensor self, Scalar min) -> Tensor +inline at::Tensor Tensor::clamp_min(const at::Scalar & min) const { + return at::_ops::clamp_min::call(const_cast(*this), min); +} + +// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor +inline at::Tensor Tensor::clamp_min(const at::Tensor & min) const { + return at::_ops::clamp_min_Tensor::call(const_cast(*this), min); +} + +// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) +inline at::Tensor & Tensor::clamp_min_(const at::Scalar & min) const { + return at::_ops::clamp_min_::call(const_cast(*this), min); +} + +// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!) +inline at::Tensor & Tensor::clamp_min_(const at::Tensor & min) const { + return at::_ops::clamp_min__Tensor::call(const_cast(*this), min); +} + +// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor +inline at::Tensor Tensor::clip(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clip::call(const_cast(*this), min, max); +} + +// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor +inline at::Tensor Tensor::clip(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clip_Tensor::call(const_cast(*this), min, max); +} + +// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) +inline at::Tensor & Tensor::clip_(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clip_::call(const_cast(*this), min, max); +} + +// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) +inline at::Tensor & Tensor::clip_(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clip__Tensor::call(const_cast(*this), min, max); +} + +// aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a) +inline at::Tensor Tensor::__dispatch_contiguous(at::MemoryFormat memory_format) const { + return at::_ops::contiguous::call(const_cast(*this), memory_format); +} + +// aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) +inline at::Tensor & Tensor::copy_(const at::Tensor & src, bool non_blocking) const { + return at::_ops::copy_::call(const_cast(*this), src, non_blocking); +} + +// aten::cos(Tensor self) -> Tensor +inline at::Tensor Tensor::cos() const { + return at::_ops::cos::call(const_cast(*this)); +} + +// aten::cos_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::cos_() const { + return at::_ops::cos_::call(const_cast(*this)); +} + +// aten::cosh(Tensor self) -> Tensor +inline at::Tensor Tensor::cosh() const { + return at::_ops::cosh::call(const_cast(*this)); +} + +// aten::cosh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::cosh_() const { + return at::_ops::cosh_::call(const_cast(*this)); +} + +// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor +inline at::Tensor Tensor::count_nonzero(at::IntArrayRef dim) const { + return at::_ops::count_nonzero_dim_IntList::call(const_cast(*this), dim); +} + +// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor +inline at::Tensor Tensor::count_nonzero(c10::optional dim) const { + return at::_ops::count_nonzero::call(const_cast(*this), dim); +} + +// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor +inline at::Tensor Tensor::cov(int64_t correction, const c10::optional & fweights, const c10::optional & aweights) const { + return at::_ops::cov::call(const_cast(*this), correction, fweights, aweights); +} + +// aten::corrcoef(Tensor self) -> Tensor +inline at::Tensor Tensor::corrcoef() const { + return at::_ops::corrcoef::call(const_cast(*this)); +} + +// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::cummax(int64_t dim) const { + return at::_ops::cummax::call(const_cast(*this), dim); +} + +// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::cummax(at::Dimname dim) const { + return at::_ops::cummax_dimname::call(const_cast(*this), dim); +} + +// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::cummin(int64_t dim) const { + return at::_ops::cummin::call(const_cast(*this), dim); +} + +// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::cummin(at::Dimname dim) const { + return at::_ops::cummin_dimname::call(const_cast(*this), dim); +} + +// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::cumprod(int64_t dim, c10::optional dtype) const { + return at::_ops::cumprod::call(const_cast(*this), dim, dtype); +} + +// aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) +inline at::Tensor & Tensor::cumprod_(int64_t dim, c10::optional dtype) const { + return at::_ops::cumprod_::call(const_cast(*this), dim, dtype); +} + +// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::cumprod(at::Dimname dim, c10::optional dtype) const { + return at::_ops::cumprod_dimname::call(const_cast(*this), dim, dtype); +} + +// aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) +inline at::Tensor & Tensor::cumprod_(at::Dimname dim, c10::optional dtype) const { + return at::_ops::cumprod__dimname::call(const_cast(*this), dim, dtype); +} + +// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::cumsum(int64_t dim, c10::optional dtype) const { + return at::_ops::cumsum::call(const_cast(*this), dim, dtype); +} + +// aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) +inline at::Tensor & Tensor::cumsum_(int64_t dim, c10::optional dtype) const { + return at::_ops::cumsum_::call(const_cast(*this), dim, dtype); +} + +// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::cumsum(at::Dimname dim, c10::optional dtype) const { + return at::_ops::cumsum_dimname::call(const_cast(*this), dim, dtype); +} + +// aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) +inline at::Tensor & Tensor::cumsum_(at::Dimname dim, c10::optional dtype) const { + return at::_ops::cumsum__dimname::call(const_cast(*this), dim, dtype); +} + +// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor +inline at::Tensor Tensor::diag_embed(int64_t offset, int64_t dim1, int64_t dim2) const { + return at::_ops::diag_embed::call(const_cast(*this), offset, dim1, dim2); +} + +// aten::diagflat(Tensor self, int offset=0) -> Tensor +inline at::Tensor Tensor::diagflat(int64_t offset) const { + return at::_ops::diagflat::call(const_cast(*this), offset); +} + +// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) +inline at::Tensor Tensor::diagonal(int64_t offset, int64_t dim1, int64_t dim2) const { + return at::_ops::diagonal::call(const_cast(*this), offset, dim1, dim2); +} + +// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) +inline at::Tensor Tensor::diagonal(at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) const { + return at::_ops::diagonal_Dimname::call(const_cast(*this), outdim, dim1, dim2, offset); +} + +// aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) +inline at::Tensor & Tensor::fill_diagonal_(const at::Scalar & fill_value, bool wrap) const { + return at::_ops::fill_diagonal_::call(const_cast(*this), fill_value, wrap); +} + +// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor +inline at::Tensor Tensor::diff(int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append) const { + return at::_ops::diff::call(const_cast(*this), n, dim, prepend, append); +} + +// aten::div.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::div(const at::Tensor & other) const { + return at::_ops::div_Tensor::call(const_cast(*this), other); +} + +// aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::div_(const at::Tensor & other) const { + return at::_ops::div__Tensor::call(const_cast(*this), other); +} + +// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor +inline at::Tensor Tensor::div(const at::Tensor & other, c10::optional rounding_mode) const { + return at::_ops::div_Tensor_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) +inline at::Tensor & Tensor::div_(const at::Tensor & other, c10::optional rounding_mode) const { + return at::_ops::div__Tensor_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::div.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::div(const at::Scalar & other) const { + return at::_ops::div_Scalar::call(const_cast(*this), other); +} + +// aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::div_(const at::Scalar & other) const { + return at::_ops::div__Scalar::call(const_cast(*this), other); +} + +// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor +inline at::Tensor Tensor::div(const at::Scalar & other, c10::optional rounding_mode) const { + return at::_ops::div_Scalar_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) +inline at::Tensor & Tensor::div_(const at::Scalar & other, c10::optional rounding_mode) const { + return at::_ops::div__Scalar_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::divide(const at::Tensor & other) const { + return at::_ops::divide_Tensor::call(const_cast(*this), other); +} + +// aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::divide_(const at::Tensor & other) const { + return at::_ops::divide__Tensor::call(const_cast(*this), other); +} + +// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::divide(const at::Scalar & other) const { + return at::_ops::divide_Scalar::call(const_cast(*this), other); +} + +// aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::divide_(const at::Scalar & other) const { + return at::_ops::divide__Scalar::call(const_cast(*this), other); +} + +// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor +inline at::Tensor Tensor::divide(const at::Tensor & other, c10::optional rounding_mode) const { + return at::_ops::divide_Tensor_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) +inline at::Tensor & Tensor::divide_(const at::Tensor & other, c10::optional rounding_mode) const { + return at::_ops::divide__Tensor_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor +inline at::Tensor Tensor::divide(const at::Scalar & other, c10::optional rounding_mode) const { + return at::_ops::divide_Scalar_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) +inline at::Tensor & Tensor::divide_(const at::Scalar & other, c10::optional rounding_mode) const { + return at::_ops::divide__Scalar_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::true_divide(const at::Tensor & other) const { + return at::_ops::true_divide_Tensor::call(const_cast(*this), other); +} + +// aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::true_divide_(const at::Tensor & other) const { + return at::_ops::true_divide__Tensor::call(const_cast(*this), other); +} + +// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::true_divide(const at::Scalar & other) const { + return at::_ops::true_divide_Scalar::call(const_cast(*this), other); +} + +// aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::true_divide_(const at::Scalar & other) const { + return at::_ops::true_divide__Scalar::call(const_cast(*this), other); +} + +// aten::dot(Tensor self, Tensor tensor) -> Tensor +inline at::Tensor Tensor::dot(const at::Tensor & tensor) const { + return at::_ops::dot::call(const_cast(*this), tensor); +} + +// aten::vdot(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::vdot(const at::Tensor & other) const { + return at::_ops::vdot::call(const_cast(*this), other); +} + +// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty(at::IntArrayRef size, at::TensorOptions options) const { + return at::_ops::new_empty::call(const_cast(*this), c10::fromIntArrayRef(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_empty::call(const_cast(*this), c10::fromIntArrayRef(size), dtype, layout, device, pin_memory); +} + +// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty_symint(c10::SymIntArrayRef size, at::TensorOptions options) const { + return at::_ops::new_empty::call(const_cast(*this), size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_empty::call(const_cast(*this), size, dtype, layout, device, pin_memory); +} + +// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options) const { + return at::_ops::new_empty_strided::call(const_cast(*this), c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_empty_strided::call(const_cast(*this), c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), dtype, layout, device, pin_memory); +} + +// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options) const { + return at::_ops::new_empty_strided::call(const_cast(*this), size, stride, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_empty_strided::call(const_cast(*this), size, stride, dtype, layout, device, pin_memory); +} + +// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options) const { + return at::_ops::new_full::call(const_cast(*this), c10::fromIntArrayRef(size), fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_full::call(const_cast(*this), c10::fromIntArrayRef(size), fill_value, dtype, layout, device, pin_memory); +} + +// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options) const { + return at::_ops::new_full::call(const_cast(*this), size, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_full::call(const_cast(*this), size, fill_value, dtype, layout, device, pin_memory); +} + +// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_zeros(at::IntArrayRef size, at::TensorOptions options) const { + return at::_ops::new_zeros::call(const_cast(*this), c10::fromIntArrayRef(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_zeros(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_zeros::call(const_cast(*this), c10::fromIntArrayRef(size), dtype, layout, device, pin_memory); +} + +// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options) const { + return at::_ops::new_zeros::call(const_cast(*this), size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_zeros_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_zeros::call(const_cast(*this), size, dtype, layout, device, pin_memory); +} + +// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_ones(at::IntArrayRef size, at::TensorOptions options) const { + return at::_ops::new_ones::call(const_cast(*this), c10::fromIntArrayRef(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_ones(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_ones::call(const_cast(*this), c10::fromIntArrayRef(size), dtype, layout, device, pin_memory); +} + +// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_ones_symint(c10::SymIntArrayRef size, at::TensorOptions options) const { + return at::_ops::new_ones::call(const_cast(*this), size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_ones_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_ones::call(const_cast(*this), size, dtype, layout, device, pin_memory); +} + +// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) +inline const at::Tensor & Tensor::resize_(at::IntArrayRef size, c10::optional memory_format) const { + return at::_ops::resize_::call(const_cast(*this), c10::fromIntArrayRef(size), memory_format); +} + +// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) +inline const at::Tensor & Tensor::resize__symint(c10::SymIntArrayRef size, c10::optional memory_format) const { + return at::_ops::resize_::call(const_cast(*this), size, memory_format); +} + +// aten::erf(Tensor self) -> Tensor +inline at::Tensor Tensor::erf() const { + return at::_ops::erf::call(const_cast(*this)); +} + +// aten::erf_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::erf_() const { + return at::_ops::erf_::call(const_cast(*this)); +} + +// aten::erfc(Tensor self) -> Tensor +inline at::Tensor Tensor::erfc() const { + return at::_ops::erfc::call(const_cast(*this)); +} + +// aten::erfc_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::erfc_() const { + return at::_ops::erfc_::call(const_cast(*this)); +} + +// aten::exp(Tensor self) -> Tensor +inline at::Tensor Tensor::exp() const { + return at::_ops::exp::call(const_cast(*this)); +} + +// aten::exp_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::exp_() const { + return at::_ops::exp_::call(const_cast(*this)); +} + +// aten::exp2(Tensor self) -> Tensor +inline at::Tensor Tensor::exp2() const { + return at::_ops::exp2::call(const_cast(*this)); +} + +// aten::exp2_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::exp2_() const { + return at::_ops::exp2_::call(const_cast(*this)); +} + +// aten::expm1(Tensor self) -> Tensor +inline at::Tensor Tensor::expm1() const { + return at::_ops::expm1::call(const_cast(*this)); +} + +// aten::expm1_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::expm1_() const { + return at::_ops::expm1_::call(const_cast(*this)); +} + +// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) +inline at::Tensor Tensor::expand(at::IntArrayRef size, bool implicit) const { + return at::_ops::expand::call(const_cast(*this), c10::fromIntArrayRef(size), implicit); +} + +// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) +inline at::Tensor Tensor::expand_symint(c10::SymIntArrayRef size, bool implicit) const { + return at::_ops::expand::call(const_cast(*this), size, implicit); +} + +// aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a) +inline at::Tensor Tensor::expand_as(const at::Tensor & other) const { + return at::_ops::expand_as::call(const_cast(*this), other); +} + +// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) +inline at::Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim) const { + return at::_ops::flatten_using_ints::call(const_cast(*this), start_dim, end_dim); +} + +// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a) +inline at::Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim, at::Dimname out_dim) const { + return at::_ops::flatten_named_out_dim::call(const_cast(*this), start_dim, end_dim, out_dim); +} + +// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a) +inline at::Tensor Tensor::flatten(at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) const { + return at::_ops::flatten_using_names::call(const_cast(*this), start_dim, end_dim, out_dim); +} + +// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) +inline at::Tensor Tensor::flatten(at::DimnameList dims, at::Dimname out_dim) const { + return at::_ops::flatten_DimnameList::call(const_cast(*this), dims, out_dim); +} + +// aten::unflatten.int(Tensor(a) self, int dim, int[] sizes) -> Tensor(a) +inline at::Tensor Tensor::unflatten(int64_t dim, at::IntArrayRef sizes) const { + return at::_ops::unflatten_int::call(const_cast(*this), dim, sizes); +} + +// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) +inline at::Tensor Tensor::unflatten(at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) const { + return at::_ops::unflatten_Dimname::call(const_cast(*this), dim, sizes, names); +} + +// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) +inline at::Tensor & Tensor::fill_(const at::Scalar & value) const { + return at::_ops::fill__Scalar::call(const_cast(*this), value); +} + +// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) +inline at::Tensor & Tensor::fill_(const at::Tensor & value) const { + return at::_ops::fill__Tensor::call(const_cast(*this), value); +} + +// aten::floor(Tensor self) -> Tensor +inline at::Tensor Tensor::floor() const { + return at::_ops::floor::call(const_cast(*this)); +} + +// aten::floor_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::floor_() const { + return at::_ops::floor_::call(const_cast(*this)); +} + +// aten::floor_divide(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::floor_divide(const at::Tensor & other) const { + return at::_ops::floor_divide::call(const_cast(*this), other); +} + +// aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::floor_divide_(const at::Tensor & other) const { + return at::_ops::floor_divide__Tensor::call(const_cast(*this), other); +} + +// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::floor_divide(const at::Scalar & other) const { + return at::_ops::floor_divide_Scalar::call(const_cast(*this), other); +} + +// aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::floor_divide_(const at::Scalar & other) const { + return at::_ops::floor_divide__Scalar::call(const_cast(*this), other); +} + +// aten::frac(Tensor self) -> Tensor +inline at::Tensor Tensor::frac() const { + return at::_ops::frac::call(const_cast(*this)); +} + +// aten::frac_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::frac_() const { + return at::_ops::frac_::call(const_cast(*this)); +} + +// aten::gcd(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::gcd(const at::Tensor & other) const { + return at::_ops::gcd::call(const_cast(*this), other); +} + +// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::gcd_(const at::Tensor & other) const { + return at::_ops::gcd_::call(const_cast(*this), other); +} + +// aten::lcm(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::lcm(const at::Tensor & other) const { + return at::_ops::lcm::call(const_cast(*this), other); +} + +// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::lcm_(const at::Tensor & other) const { + return at::_ops::lcm_::call(const_cast(*this), other); +} + +// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor +inline at::Tensor Tensor::index(const c10::List> & indices) const { + return at::_ops::index_Tensor::call(const_cast(*this), indices); +} + +// aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) +inline at::Tensor & Tensor::index_copy_(int64_t dim, const at::Tensor & index, const at::Tensor & source) const { + return at::_ops::index_copy_::call(const_cast(*this), dim, index, source); +} + +// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor +inline at::Tensor Tensor::index_copy(int64_t dim, const at::Tensor & index, const at::Tensor & source) const { + return at::_ops::index_copy::call(const_cast(*this), dim, index, source); +} + +// aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!) +inline at::Tensor & Tensor::index_copy_(at::Dimname dim, const at::Tensor & index, const at::Tensor & source) const { + return at::_ops::index_copy__dimname::call(const_cast(*this), dim, index, source); +} + +// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor +inline at::Tensor Tensor::index_copy(at::Dimname dim, const at::Tensor & index, const at::Tensor & source) const { + return at::_ops::index_copy_dimname::call(const_cast(*this), dim, index, source); +} + +// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) +inline at::Tensor & Tensor::index_put_(const c10::List> & indices, const at::Tensor & values, bool accumulate) const { + return at::_ops::index_put_::call(const_cast(*this), indices, values, accumulate); +} + +// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor +inline at::Tensor Tensor::index_put(const c10::List> & indices, const at::Tensor & values, bool accumulate) const { + return at::_ops::index_put::call(const_cast(*this), indices, values, accumulate); +} + +// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor +inline at::Tensor Tensor::isclose(const at::Tensor & other, double rtol, double atol, bool equal_nan) const { + return at::_ops::isclose::call(const_cast(*this), other, rtol, atol, equal_nan); +} + +// aten::isnan(Tensor self) -> Tensor +inline at::Tensor Tensor::isnan() const { + return at::_ops::isnan::call(const_cast(*this)); +} + +// aten::is_distributed(Tensor self) -> bool +inline bool Tensor::is_distributed() const { + return at::_ops::is_distributed::call(const_cast(*this)); +} + +// aten::is_floating_point(Tensor self) -> bool +inline bool Tensor::__dispatch_is_floating_point() const { + return at::_ops::is_floating_point::call(const_cast(*this)); +} + +// aten::is_complex(Tensor self) -> bool +inline bool Tensor::__dispatch_is_complex() const { + return at::_ops::is_complex::call(const_cast(*this)); +} + +// aten::is_conj(Tensor self) -> bool +inline bool Tensor::__dispatch_is_conj() const { + return at::_ops::is_conj::call(const_cast(*this)); +} + +// aten::_is_zerotensor(Tensor self) -> bool +inline bool Tensor::__dispatch__is_zerotensor() const { + return at::_ops::_is_zerotensor::call(const_cast(*this)); +} + +// aten::is_neg(Tensor self) -> bool +inline bool Tensor::__dispatch_is_neg() const { + return at::_ops::is_neg::call(const_cast(*this)); +} + +// aten::isreal(Tensor self) -> Tensor +inline at::Tensor Tensor::isreal() const { + return at::_ops::isreal::call(const_cast(*this)); +} + +// aten::is_nonzero(Tensor self) -> bool +inline bool Tensor::is_nonzero() const { + return at::_ops::is_nonzero::call(const_cast(*this)); +} + +// aten::is_same_size(Tensor self, Tensor other) -> bool +inline bool Tensor::is_same_size(const at::Tensor & other) const { + return at::_ops::is_same_size::call(const_cast(*this), other); +} + +// aten::is_signed(Tensor self) -> bool +inline bool Tensor::__dispatch_is_signed() const { + return at::_ops::is_signed::call(const_cast(*this)); +} + +// aten::is_inference(Tensor self) -> bool +inline bool Tensor::__dispatch_is_inference() const { + return at::_ops::is_inference::call(const_cast(*this)); +} + +// aten::kron(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::kron(const at::Tensor & other) const { + return at::_ops::kron::call(const_cast(*this), other); +} + +// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::kthvalue(int64_t k, int64_t dim, bool keepdim) const { + return at::_ops::kthvalue::call(const_cast(*this), k, dim, keepdim); +} + +// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::kthvalue(int64_t k, at::Dimname dim, bool keepdim) const { + return at::_ops::kthvalue_dimname::call(const_cast(*this), k, dim, keepdim); +} + +// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor +inline at::Tensor Tensor::nan_to_num(c10::optional nan, c10::optional posinf, c10::optional neginf) const { + return at::_ops::nan_to_num::call(const_cast(*this), nan, posinf, neginf); +} + +// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) +inline at::Tensor & Tensor::nan_to_num_(c10::optional nan, c10::optional posinf, c10::optional neginf) const { + return at::_ops::nan_to_num_::call(const_cast(*this), nan, posinf, neginf); +} + +// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::ldexp(const at::Tensor & other) const { + return at::_ops::ldexp_Tensor::call(const_cast(*this), other); +} + +// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::ldexp_(const at::Tensor & other) const { + return at::_ops::ldexp_::call(const_cast(*this), other); +} + +// aten::log(Tensor self) -> Tensor +inline at::Tensor Tensor::log() const { + return at::_ops::log::call(const_cast(*this)); +} + +// aten::log_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::log_() const { + return at::_ops::log_::call(const_cast(*this)); +} + +// aten::log10(Tensor self) -> Tensor +inline at::Tensor Tensor::log10() const { + return at::_ops::log10::call(const_cast(*this)); +} + +// aten::log10_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::log10_() const { + return at::_ops::log10_::call(const_cast(*this)); +} + +// aten::log1p(Tensor self) -> Tensor +inline at::Tensor Tensor::log1p() const { + return at::_ops::log1p::call(const_cast(*this)); +} + +// aten::log1p_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::log1p_() const { + return at::_ops::log1p_::call(const_cast(*this)); +} + +// aten::log2(Tensor self) -> Tensor +inline at::Tensor Tensor::log2() const { + return at::_ops::log2::call(const_cast(*this)); +} + +// aten::log2_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::log2_() const { + return at::_ops::log2_::call(const_cast(*this)); +} + +// aten::logaddexp(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::logaddexp(const at::Tensor & other) const { + return at::_ops::logaddexp::call(const_cast(*this), other); +} + +// aten::logaddexp2(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::logaddexp2(const at::Tensor & other) const { + return at::_ops::logaddexp2::call(const_cast(*this), other); +} + +// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::xlogy(const at::Tensor & other) const { + return at::_ops::xlogy_Tensor::call(const_cast(*this), other); +} + +// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::xlogy(const at::Scalar & other) const { + return at::_ops::xlogy_Scalar_Other::call(const_cast(*this), other); +} + +// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::xlogy_(const at::Tensor & other) const { + return at::_ops::xlogy__Tensor::call(const_cast(*this), other); +} + +// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::xlogy_(const at::Scalar & other) const { + return at::_ops::xlogy__Scalar_Other::call(const_cast(*this), other); +} + +// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::log_softmax(int64_t dim, c10::optional dtype) const { + return at::_ops::log_softmax_int::call(const_cast(*this), dim, dtype); +} + +// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::log_softmax(at::Dimname dim, c10::optional dtype) const { + return at::_ops::log_softmax_Dimname::call(const_cast(*this), dim, dtype); +} + +// aten::logcumsumexp(Tensor self, int dim) -> Tensor +inline at::Tensor Tensor::logcumsumexp(int64_t dim) const { + return at::_ops::logcumsumexp::call(const_cast(*this), dim); +} + +// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor +inline at::Tensor Tensor::logcumsumexp(at::Dimname dim) const { + return at::_ops::logcumsumexp_dimname::call(const_cast(*this), dim); +} + +// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::logsumexp(at::IntArrayRef dim, bool keepdim) const { + return at::_ops::logsumexp::call(const_cast(*this), dim, keepdim); +} + +// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::logsumexp(at::DimnameList dim, bool keepdim) const { + return at::_ops::logsumexp_names::call(const_cast(*this), dim, keepdim); +} + +// aten::matmul(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::matmul(const at::Tensor & other) const { + return at::_ops::matmul::call(const_cast(*this), other); +} + +// aten::matrix_power(Tensor self, int n) -> Tensor +inline at::Tensor Tensor::matrix_power(int64_t n) const { + return at::_ops::matrix_power::call(const_cast(*this), n); +} + +// aten::matrix_exp(Tensor self) -> Tensor +inline at::Tensor Tensor::matrix_exp() const { + return at::_ops::matrix_exp::call(const_cast(*this)); +} + +// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) +inline ::std::tuple Tensor::aminmax(c10::optional dim, bool keepdim) const { + return at::_ops::aminmax::call(const_cast(*this), dim, keepdim); +} + +// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::max(int64_t dim, bool keepdim) const { + return at::_ops::max_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::max(at::Dimname dim, bool keepdim) const { + return at::_ops::max_names_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor +inline at::Tensor Tensor::amax(at::IntArrayRef dim, bool keepdim) const { + return at::_ops::amax::call(const_cast(*this), dim, keepdim); +} + +// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::mean(c10::optional dtype) const { + return at::_ops::mean::call(const_cast(*this), dtype); +} + +// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::mean(at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) const { + return at::_ops::mean_dim::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::mean(at::DimnameList dim, bool keepdim, c10::optional dtype) const { + return at::_ops::mean_names_dim::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::nanmean(at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) const { + return at::_ops::nanmean::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::median(Tensor self) -> Tensor +inline at::Tensor Tensor::median() const { + return at::_ops::median::call(const_cast(*this)); +} + +// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::median(int64_t dim, bool keepdim) const { + return at::_ops::median_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::median(at::Dimname dim, bool keepdim) const { + return at::_ops::median_names_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::nanmedian(Tensor self) -> Tensor +inline at::Tensor Tensor::nanmedian() const { + return at::_ops::nanmedian::call(const_cast(*this)); +} + +// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::nanmedian(int64_t dim, bool keepdim) const { + return at::_ops::nanmedian_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::nanmedian(at::Dimname dim, bool keepdim) const { + return at::_ops::nanmedian_names_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::min(int64_t dim, bool keepdim) const { + return at::_ops::min_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::min(at::Dimname dim, bool keepdim) const { + return at::_ops::min_names_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor +inline at::Tensor Tensor::amin(at::IntArrayRef dim, bool keepdim) const { + return at::_ops::amin::call(const_cast(*this), dim, keepdim); +} + +// aten::mm(Tensor self, Tensor mat2) -> Tensor +inline at::Tensor Tensor::mm(const at::Tensor & mat2) const { + return at::_ops::mm::call(const_cast(*this), mat2); +} + +// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::mode(int64_t dim, bool keepdim) const { + return at::_ops::mode::call(const_cast(*this), dim, keepdim); +} + +// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::mode(at::Dimname dim, bool keepdim) const { + return at::_ops::mode_dimname::call(const_cast(*this), dim, keepdim); +} + +// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::mul(const at::Tensor & other) const { + return at::_ops::mul_Tensor::call(const_cast(*this), other); +} + +// aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::mul_(const at::Tensor & other) const { + return at::_ops::mul__Tensor::call(const_cast(*this), other); +} + +// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::mul(const at::Scalar & other) const { + return at::_ops::mul_Scalar::call(const_cast(*this), other); +} + +// aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::mul_(const at::Scalar & other) const { + return at::_ops::mul__Scalar::call(const_cast(*this), other); +} + +// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::multiply(const at::Tensor & other) const { + return at::_ops::multiply_Tensor::call(const_cast(*this), other); +} + +// aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::multiply_(const at::Tensor & other) const { + return at::_ops::multiply__Tensor::call(const_cast(*this), other); +} + +// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::multiply(const at::Scalar & other) const { + return at::_ops::multiply_Scalar::call(const_cast(*this), other); +} + +// aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::multiply_(const at::Scalar & other) const { + return at::_ops::multiply__Scalar::call(const_cast(*this), other); +} + +// aten::mv(Tensor self, Tensor vec) -> Tensor +inline at::Tensor Tensor::mv(const at::Tensor & vec) const { + return at::_ops::mv::call(const_cast(*this), vec); +} + +// aten::mvlgamma(Tensor self, int p) -> Tensor +inline at::Tensor Tensor::mvlgamma(int64_t p) const { + return at::_ops::mvlgamma::call(const_cast(*this), p); +} + +// aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) +inline at::Tensor & Tensor::mvlgamma_(int64_t p) const { + return at::_ops::mvlgamma_::call(const_cast(*this), p); +} + +// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor +inline at::Tensor Tensor::narrow_copy(int64_t dim, int64_t start, int64_t length) const { + return at::_ops::narrow_copy::call(const_cast(*this), dim, start, length); +} + +// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor +inline at::Tensor Tensor::narrow_copy_symint(int64_t dim, c10::SymInt start, c10::SymInt length) const { + return at::_ops::narrow_copy::call(const_cast(*this), dim, start, length); +} + +// aten::narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a) +inline at::Tensor Tensor::narrow(int64_t dim, int64_t start, int64_t length) const { + return at::_ops::narrow::call(const_cast(*this), dim, start, length); +} + +// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, int length) -> Tensor(a) +inline at::Tensor Tensor::narrow(int64_t dim, const at::Tensor & start, int64_t length) const { + return at::_ops::narrow_Tensor::call(const_cast(*this), dim, start, length); +} + +// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) +inline at::Tensor Tensor::permute(at::IntArrayRef dims) const { + return at::_ops::permute::call(const_cast(*this), dims); +} + +// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) +inline at::Tensor Tensor::movedim(at::IntArrayRef source, at::IntArrayRef destination) const { + return at::_ops::movedim_intlist::call(const_cast(*this), source, destination); +} + +// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) +inline at::Tensor Tensor::movedim(int64_t source, int64_t destination) const { + return at::_ops::movedim_int::call(const_cast(*this), source, destination); +} + +// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) +inline at::Tensor Tensor::moveaxis(at::IntArrayRef source, at::IntArrayRef destination) const { + return at::_ops::moveaxis_intlist::call(const_cast(*this), source, destination); +} + +// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) +inline at::Tensor Tensor::moveaxis(int64_t source, int64_t destination) const { + return at::_ops::moveaxis_int::call(const_cast(*this), source, destination); +} + +// aten::numpy_T(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::numpy_T() const { + return at::_ops::numpy_T::call(const_cast(*this)); +} + +// aten::matrix_H(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::matrix_H() const { + return at::_ops::matrix_H::call(const_cast(*this)); +} + +// aten::mT(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::mT() const { + return at::_ops::mT::call(const_cast(*this)); +} + +// aten::mH(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::mH() const { + return at::_ops::mH::call(const_cast(*this)); +} + +// aten::adjoint(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::adjoint() const { + return at::_ops::adjoint::call(const_cast(*this)); +} + +// aten::is_pinned(Tensor self, Device? device=None) -> bool +inline bool Tensor::is_pinned(c10::optional device) const { + return at::_ops::is_pinned::call(const_cast(*this), device); +} + +// aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a) +inline at::Tensor Tensor::pin_memory(c10::optional device) const { + return at::_ops::pin_memory::call(const_cast(*this), device); +} + +// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor +inline at::Tensor Tensor::pinverse(double rcond) const { + return at::_ops::pinverse::call(const_cast(*this), rcond); +} + +// aten::rad2deg(Tensor self) -> Tensor +inline at::Tensor Tensor::rad2deg() const { + return at::_ops::rad2deg::call(const_cast(*this)); +} + +// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::rad2deg_() const { + return at::_ops::rad2deg_::call(const_cast(*this)); +} + +// aten::deg2rad(Tensor self) -> Tensor +inline at::Tensor Tensor::deg2rad() const { + return at::_ops::deg2rad::call(const_cast(*this)); +} + +// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::deg2rad_() const { + return at::_ops::deg2rad_::call(const_cast(*this)); +} + +// aten::ravel(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::ravel() const { + return at::_ops::ravel::call(const_cast(*this)); +} + +// aten::reciprocal(Tensor self) -> Tensor +inline at::Tensor Tensor::reciprocal() const { + return at::_ops::reciprocal::call(const_cast(*this)); +} + +// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::reciprocal_() const { + return at::_ops::reciprocal_::call(const_cast(*this)); +} + +// aten::neg(Tensor self) -> Tensor +inline at::Tensor Tensor::neg() const { + return at::_ops::neg::call(const_cast(*this)); +} + +// aten::neg_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::neg_() const { + return at::_ops::neg_::call(const_cast(*this)); +} + +// aten::negative(Tensor self) -> Tensor +inline at::Tensor Tensor::negative() const { + return at::_ops::negative::call(const_cast(*this)); +} + +// aten::negative_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::negative_() const { + return at::_ops::negative_::call(const_cast(*this)); +} + +// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor +inline at::Tensor Tensor::repeat(at::IntArrayRef repeats) const { + return at::_ops::repeat::call(const_cast(*this), c10::fromIntArrayRef(repeats)); +} + +// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor +inline at::Tensor Tensor::repeat_symint(c10::SymIntArrayRef repeats) const { + return at::_ops::repeat::call(const_cast(*this), repeats); +} + +// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, int? output_size=None) -> Tensor +inline at::Tensor Tensor::repeat_interleave(const at::Tensor & repeats, c10::optional dim, c10::optional output_size) const { + return at::_ops::repeat_interleave_self_Tensor::call(const_cast(*this), repeats, dim, output_size); +} + +// aten::repeat_interleave.self_int(Tensor self, int repeats, int? dim=None, *, int? output_size=None) -> Tensor +inline at::Tensor Tensor::repeat_interleave(int64_t repeats, c10::optional dim, c10::optional output_size) const { + return at::_ops::repeat_interleave_self_int::call(const_cast(*this), repeats, dim, output_size); +} + +// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) +inline at::Tensor Tensor::reshape(at::IntArrayRef shape) const { + return at::_ops::reshape::call(const_cast(*this), c10::fromIntArrayRef(shape)); +} + +// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) +inline at::Tensor Tensor::reshape_symint(c10::SymIntArrayRef shape) const { + return at::_ops::reshape::call(const_cast(*this), shape); +} + +// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) +inline at::Tensor Tensor::_reshape_alias(at::IntArrayRef size, at::IntArrayRef stride) const { + return at::_ops::_reshape_alias::call(const_cast(*this), c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride)); +} + +// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) +inline at::Tensor Tensor::_reshape_alias_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const { + return at::_ops::_reshape_alias::call(const_cast(*this), size, stride); +} + +// aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a) +inline at::Tensor Tensor::reshape_as(const at::Tensor & other) const { + return at::_ops::reshape_as::call(const_cast(*this), other); +} + +// aten::round(Tensor self) -> Tensor +inline at::Tensor Tensor::round() const { + return at::_ops::round::call(const_cast(*this)); +} + +// aten::round_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::round_() const { + return at::_ops::round_::call(const_cast(*this)); +} + +// aten::round.decimals(Tensor self, *, int decimals) -> Tensor +inline at::Tensor Tensor::round(int64_t decimals) const { + return at::_ops::round_decimals::call(const_cast(*this), decimals); +} + +// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!) +inline at::Tensor & Tensor::round_(int64_t decimals) const { + return at::_ops::round__decimals::call(const_cast(*this), decimals); +} + +// aten::relu(Tensor self) -> Tensor +inline at::Tensor Tensor::relu() const { + return at::_ops::relu::call(const_cast(*this)); +} + +// aten::relu_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::relu_() const { + return at::_ops::relu_::call(const_cast(*this)); +} + +// aten::prelu(Tensor self, Tensor weight) -> Tensor +inline at::Tensor Tensor::prelu(const at::Tensor & weight) const { + return at::_ops::prelu::call(const_cast(*this), weight); +} + +// aten::prelu_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor) +inline ::std::tuple Tensor::prelu_backward(const at::Tensor & grad_output, const at::Tensor & weight) const { + return at::_ops::prelu_backward::call(grad_output, const_cast(*this), weight); +} + +// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor +inline at::Tensor Tensor::hardshrink(const at::Scalar & lambd) const { + return at::_ops::hardshrink::call(const_cast(*this), lambd); +} + +// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor +inline at::Tensor Tensor::hardshrink_backward(const at::Tensor & grad_out, const at::Scalar & lambd) const { + return at::_ops::hardshrink_backward::call(grad_out, const_cast(*this), lambd); +} + +// aten::rsqrt(Tensor self) -> Tensor +inline at::Tensor Tensor::rsqrt() const { + return at::_ops::rsqrt::call(const_cast(*this)); +} + +// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::rsqrt_() const { + return at::_ops::rsqrt_::call(const_cast(*this)); +} + +// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) +inline at::Tensor Tensor::select(at::Dimname dim, int64_t index) const { + return at::_ops::select_Dimname::call(const_cast(*this), dim, index); +} + +// aten::select.int(Tensor(a) self, int dim, int index) -> Tensor(a) +inline at::Tensor Tensor::select(int64_t dim, int64_t index) const { + return at::_ops::select_int::call(const_cast(*this), dim, index); +} + +// aten::sigmoid(Tensor self) -> Tensor +inline at::Tensor Tensor::sigmoid() const { + return at::_ops::sigmoid::call(const_cast(*this)); +} + +// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sigmoid_() const { + return at::_ops::sigmoid_::call(const_cast(*this)); +} + +// aten::logit(Tensor self, float? eps=None) -> Tensor +inline at::Tensor Tensor::logit(c10::optional eps) const { + return at::_ops::logit::call(const_cast(*this), eps); +} + +// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) +inline at::Tensor & Tensor::logit_(c10::optional eps) const { + return at::_ops::logit_::call(const_cast(*this), eps); +} + +// aten::sin(Tensor self) -> Tensor +inline at::Tensor Tensor::sin() const { + return at::_ops::sin::call(const_cast(*this)); +} + +// aten::sin_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sin_() const { + return at::_ops::sin_::call(const_cast(*this)); +} + +// aten::sinc(Tensor self) -> Tensor +inline at::Tensor Tensor::sinc() const { + return at::_ops::sinc::call(const_cast(*this)); +} + +// aten::sinc_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sinc_() const { + return at::_ops::sinc_::call(const_cast(*this)); +} + +// aten::sinh(Tensor self) -> Tensor +inline at::Tensor Tensor::sinh() const { + return at::_ops::sinh::call(const_cast(*this)); +} + +// aten::sinh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sinh_() const { + return at::_ops::sinh_::call(const_cast(*this)); +} + +// aten::detach(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::detach() const { + return at::_ops::detach::call(const_cast(*this)); +} + +// aten::detach_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::detach_() const { + return at::_ops::detach_::call(const_cast(*this)); +} + +// aten::size.Dimname(Tensor self, Dimname dim) -> int +inline int64_t Tensor::size(at::Dimname dim) const { + return at::_ops::size_Dimname::call(const_cast(*this), dim); +} + +// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) +inline at::Tensor Tensor::slice(int64_t dim, c10::optional start, c10::optional end, int64_t step) const { + return at::_ops::slice_Tensor::call(const_cast(*this), dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); +} + +// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) +inline at::Tensor Tensor::slice_symint(int64_t dim, c10::optional start, c10::optional end, c10::SymInt step) const { + return at::_ops::slice_Tensor::call(const_cast(*this), dim, start, end, step); +} + +// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +inline at::Tensor Tensor::slice_scatter(const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, int64_t step) const { + return at::_ops::slice_scatter::call(const_cast(*this), src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); +} + +// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +inline at::Tensor Tensor::slice_scatter_symint(const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step) const { + return at::_ops::slice_scatter::call(const_cast(*this), src, dim, start, end, step); +} + +// aten::select_scatter(Tensor self, Tensor src, int dim, int index) -> Tensor +inline at::Tensor Tensor::select_scatter(const at::Tensor & src, int64_t dim, int64_t index) const { + return at::_ops::select_scatter::call(const_cast(*this), src, dim, index); +} + +// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor +inline at::Tensor Tensor::diagonal_scatter(const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) const { + return at::_ops::diagonal_scatter::call(const_cast(*this), src, offset, dim1, dim2); +} + +// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +inline at::Tensor Tensor::as_strided_scatter(const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset) const { + return at::_ops::as_strided_scatter::call(const_cast(*this), src, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); +} + +// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +inline at::Tensor Tensor::as_strided_scatter_symint(const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset) const { + return at::_ops::as_strided_scatter::call(const_cast(*this), src, size, stride, storage_offset); +} + +// aten::smm(Tensor self, Tensor mat2) -> Tensor +inline at::Tensor Tensor::smm(const at::Tensor & mat2) const { + return at::_ops::smm::call(const_cast(*this), mat2); +} + +// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::softmax(int64_t dim, c10::optional dtype) const { + return at::_ops::softmax_int::call(const_cast(*this), dim, dtype); +} + +// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::softmax(at::Dimname dim, c10::optional dtype) const { + return at::_ops::softmax_Dimname::call(const_cast(*this), dim, dtype); +} + +// aten::unsafe_split.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[] +inline ::std::vector Tensor::unsafe_split(int64_t split_size, int64_t dim) const { + return at::_ops::unsafe_split_Tensor::call(const_cast(*this), split_size, dim); +} + +// aten::split.Tensor(Tensor(a -> *) self, int split_size, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::split(int64_t split_size, int64_t dim) const { + return at::_ops::split_Tensor::call(const_cast(*this), split_size, dim); +} + +// aten::split.sizes(Tensor(a -> *) self, int[] split_size, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::split(at::IntArrayRef split_size, int64_t dim) const { + return at::_ops::split_sizes::call(const_cast(*this), split_size, dim); +} + +// aten::unsafe_split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[] +inline ::std::vector Tensor::unsafe_split_with_sizes(at::IntArrayRef split_sizes, int64_t dim) const { + return at::_ops::unsafe_split_with_sizes::call(const_cast(*this), split_sizes, dim); +} + +// aten::split_with_sizes(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::split_with_sizes(at::IntArrayRef split_sizes, int64_t dim) const { + return at::_ops::split_with_sizes::call(const_cast(*this), split_sizes, dim); +} + +// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +inline ::std::vector Tensor::hsplit(int64_t sections) const { + return at::_ops::hsplit_int::call(const_cast(*this), sections); +} + +// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +inline ::std::vector Tensor::hsplit(at::IntArrayRef indices) const { + return at::_ops::hsplit_array::call(const_cast(*this), indices); +} + +// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +inline ::std::vector Tensor::vsplit(int64_t sections) const { + return at::_ops::vsplit_int::call(const_cast(*this), sections); +} + +// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +inline ::std::vector Tensor::vsplit(at::IntArrayRef indices) const { + return at::_ops::vsplit_array::call(const_cast(*this), indices); +} + +// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +inline ::std::vector Tensor::dsplit(int64_t sections) const { + return at::_ops::dsplit_int::call(const_cast(*this), sections); +} + +// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +inline ::std::vector Tensor::dsplit(at::IntArrayRef indices) const { + return at::_ops::dsplit_array::call(const_cast(*this), indices); +} + +// aten::squeeze(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::squeeze() const { + return at::_ops::squeeze::call(const_cast(*this)); +} + +// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) +inline at::Tensor Tensor::squeeze(int64_t dim) const { + return at::_ops::squeeze_dim::call(const_cast(*this), dim); +} + +// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) +inline at::Tensor Tensor::squeeze(at::Dimname dim) const { + return at::_ops::squeeze_dimname::call(const_cast(*this), dim); +} + +// aten::squeeze_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::squeeze_() const { + return at::_ops::squeeze_::call(const_cast(*this)); +} + +// aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) +inline at::Tensor & Tensor::squeeze_(int64_t dim) const { + return at::_ops::squeeze__dim::call(const_cast(*this), dim); +} + +// aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) +inline at::Tensor & Tensor::squeeze_(at::Dimname dim) const { + return at::_ops::squeeze__dimname::call(const_cast(*this), dim); +} + +// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::sspaddmm(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::sspaddmm::call(const_cast(*this), mat1, mat2, beta, alpha); +} + +// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor +inline at::Tensor Tensor::stft(int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool normalized, c10::optional onesided, c10::optional return_complex) const { + return at::_ops::stft::call(const_cast(*this), n_fft, hop_length, win_length, window, normalized, onesided, return_complex); +} + +// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor +inline at::Tensor Tensor::stft(int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional onesided, c10::optional return_complex) const { + return at::_ops::stft_center::call(const_cast(*this), n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex); +} + +// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor +inline at::Tensor Tensor::istft(int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool center, bool normalized, c10::optional onesided, c10::optional length, bool return_complex) const { + return at::_ops::istft::call(const_cast(*this), n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex); +} + +// aten::stride.Dimname(Tensor self, Dimname dim) -> int +inline int64_t Tensor::stride(at::Dimname dim) const { + return at::_ops::stride_Dimname::call(const_cast(*this), dim); +} + +// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::sum(c10::optional dtype) const { + return at::_ops::sum::call(const_cast(*this), dtype); +} + +// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::sum(at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) const { + return at::_ops::sum_dim_IntList::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::sum(at::DimnameList dim, bool keepdim, c10::optional dtype) const { + return at::_ops::sum_dim_DimnameList::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::nansum(at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) const { + return at::_ops::nansum::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::sum_to_size(Tensor self, int[] size) -> Tensor +inline at::Tensor Tensor::sum_to_size(at::IntArrayRef size) const { + return at::_ops::sum_to_size::call(const_cast(*this), size); +} + +// aten::sqrt(Tensor self) -> Tensor +inline at::Tensor Tensor::sqrt() const { + return at::_ops::sqrt::call(const_cast(*this)); +} + +// aten::sqrt_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sqrt_() const { + return at::_ops::sqrt_::call(const_cast(*this)); +} + +// aten::square(Tensor self) -> Tensor +inline at::Tensor Tensor::square() const { + return at::_ops::square::call(const_cast(*this)); +} + +// aten::square_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::square_() const { + return at::_ops::square_::call(const_cast(*this)); +} + +// aten::std(Tensor self, bool unbiased=True) -> Tensor +inline at::Tensor Tensor::std(bool unbiased) const { + return at::_ops::std::call(const_cast(*this), unbiased); +} + +// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::std(at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) const { + return at::_ops::std_dim::call(const_cast(*this), dim, unbiased, keepdim); +} + +// aten::std.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::std(at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim) const { + return at::_ops::std_correction::call(const_cast(*this), dim, correction, keepdim); +} + +// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::std(at::DimnameList dim, bool unbiased, bool keepdim) const { + return at::_ops::std_names_dim::call(const_cast(*this), dim, unbiased, keepdim); +} + +// aten::std.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::std(at::DimnameList dim, c10::optional correction, bool keepdim) const { + return at::_ops::std_correction_names::call(const_cast(*this), dim, correction, keepdim); +} + +// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::prod(c10::optional dtype) const { + return at::_ops::prod::call(const_cast(*this), dtype); +} + +// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::prod(int64_t dim, bool keepdim, c10::optional dtype) const { + return at::_ops::prod_dim_int::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::prod(at::Dimname dim, bool keepdim, c10::optional dtype) const { + return at::_ops::prod_dim_Dimname::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::t(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::t() const { + return at::_ops::t::call(const_cast(*this)); +} + +// aten::t_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::t_() const { + return at::_ops::t_::call(const_cast(*this)); +} + +// aten::tan(Tensor self) -> Tensor +inline at::Tensor Tensor::tan() const { + return at::_ops::tan::call(const_cast(*this)); +} + +// aten::tan_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::tan_() const { + return at::_ops::tan_::call(const_cast(*this)); +} + +// aten::tanh(Tensor self) -> Tensor +inline at::Tensor Tensor::tanh() const { + return at::_ops::tanh::call(const_cast(*this)); +} + +// aten::tanh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::tanh_() const { + return at::_ops::tanh_::call(const_cast(*this)); +} + +// aten::tile(Tensor self, int[] dims) -> Tensor +inline at::Tensor Tensor::tile(at::IntArrayRef dims) const { + return at::_ops::tile::call(const_cast(*this), dims); +} + +// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) +inline at::Tensor Tensor::transpose(int64_t dim0, int64_t dim1) const { + return at::_ops::transpose_int::call(const_cast(*this), dim0, dim1); +} + +// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) +inline at::Tensor Tensor::transpose(at::Dimname dim0, at::Dimname dim1) const { + return at::_ops::transpose_Dimname::call(const_cast(*this), dim0, dim1); +} + +// aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) +inline at::Tensor & Tensor::transpose_(int64_t dim0, int64_t dim1) const { + return at::_ops::transpose_::call(const_cast(*this), dim0, dim1); +} + +// aten::flip(Tensor self, int[] dims) -> Tensor +inline at::Tensor Tensor::flip(at::IntArrayRef dims) const { + return at::_ops::flip::call(const_cast(*this), dims); +} + +// aten::fliplr(Tensor self) -> Tensor +inline at::Tensor Tensor::fliplr() const { + return at::_ops::fliplr::call(const_cast(*this)); +} + +// aten::flipud(Tensor self) -> Tensor +inline at::Tensor Tensor::flipud() const { + return at::_ops::flipud::call(const_cast(*this)); +} + +// aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor +inline at::Tensor Tensor::roll(at::IntArrayRef shifts, at::IntArrayRef dims) const { + return at::_ops::roll::call(const_cast(*this), shifts, dims); +} + +// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor +inline at::Tensor Tensor::rot90(int64_t k, at::IntArrayRef dims) const { + return at::_ops::rot90::call(const_cast(*this), k, dims); +} + +// aten::_nested_tensor_size(Tensor self) -> Tensor +inline at::Tensor Tensor::_nested_tensor_size() const { + return at::_ops::_nested_tensor_size::call(const_cast(*this)); +} + +// aten::_nested_tensor_strides(Tensor self) -> Tensor +inline at::Tensor Tensor::_nested_tensor_strides() const { + return at::_ops::_nested_tensor_strides::call(const_cast(*this)); +} + +// aten::_nested_tensor_offsets(Tensor self) -> int[] +inline ::std::vector Tensor::_nested_tensor_offsets() const { + return at::_ops::_nested_tensor_offsets::call(const_cast(*this)); +} + +// aten::trunc(Tensor self) -> Tensor +inline at::Tensor Tensor::trunc() const { + return at::_ops::trunc::call(const_cast(*this)); +} + +// aten::trunc_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::trunc_() const { + return at::_ops::trunc_::call(const_cast(*this)); +} + +// aten::fix(Tensor self) -> Tensor +inline at::Tensor Tensor::fix() const { + return at::_ops::fix::call(const_cast(*this)); +} + +// aten::fix_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::fix_() const { + return at::_ops::fix_::call(const_cast(*this)); +} + +// aten::type_as(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::type_as(const at::Tensor & other) const { + return at::_ops::type_as::call(const_cast(*this), other); +} + +// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a) +inline at::Tensor Tensor::unsqueeze(int64_t dim) const { + return at::_ops::unsqueeze::call(const_cast(*this), dim); +} + +// aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) +inline at::Tensor & Tensor::unsqueeze_(int64_t dim) const { + return at::_ops::unsqueeze_::call(const_cast(*this), dim); +} + +// aten::var(Tensor self, bool unbiased=True) -> Tensor +inline at::Tensor Tensor::var(bool unbiased) const { + return at::_ops::var::call(const_cast(*this), unbiased); +} + +// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::var(at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) const { + return at::_ops::var_dim::call(const_cast(*this), dim, unbiased, keepdim); +} + +// aten::var.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::var(at::OptionalIntArrayRef dim, c10::optional correction, bool keepdim) const { + return at::_ops::var_correction::call(const_cast(*this), dim, correction, keepdim); +} + +// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::var(at::DimnameList dim, bool unbiased, bool keepdim) const { + return at::_ops::var_names_dim::call(const_cast(*this), dim, unbiased, keepdim); +} + +// aten::var.correction_names(Tensor self, Dimname[1] dim, *, int? correction, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::var(at::DimnameList dim, c10::optional correction, bool keepdim) const { + return at::_ops::var_correction_names::call(const_cast(*this), dim, correction, keepdim); +} + +// aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a) +inline at::Tensor Tensor::view_as(const at::Tensor & other) const { + return at::_ops::view_as::call(const_cast(*this), other); +} + +// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::where(const at::Tensor & condition, const at::Tensor & other) const { + return at::_ops::where_self::call(condition, const_cast(*this), other); +} + +// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor +inline at::Tensor Tensor::norm(const c10::optional & p, at::ScalarType dtype) const { + return at::_ops::norm_ScalarOpt_dtype::call(const_cast(*this), p, dtype); +} + +// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor +inline at::Tensor Tensor::norm(const at::Scalar & p) const { + return at::_ops::norm_Scalar::call(const_cast(*this), p); +} + +// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor +inline at::Tensor Tensor::norm(const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) const { + return at::_ops::norm_ScalarOpt_dim_dtype::call(const_cast(*this), p, dim, keepdim, dtype); +} + +// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::norm(const c10::optional & p, at::IntArrayRef dim, bool keepdim) const { + return at::_ops::norm_ScalarOpt_dim::call(const_cast(*this), p, dim, keepdim); +} + +// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor +inline at::Tensor Tensor::norm(const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) const { + return at::_ops::norm_names_ScalarOpt_dim_dtype::call(const_cast(*this), p, dim, keepdim, dtype); +} + +// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::norm(const c10::optional & p, at::DimnameList dim, bool keepdim) const { + return at::_ops::norm_names_ScalarOpt_dim::call(const_cast(*this), p, dim, keepdim); +} + +// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) +inline ::std::tuple Tensor::frexp() const { + return at::_ops::frexp_Tensor::call(const_cast(*this)); +} + +// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor Tensor::clone(c10::optional memory_format) const { + return at::_ops::clone::call(const_cast(*this), memory_format); +} + +// aten::positive(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::positive() const { + return at::_ops::positive::call(const_cast(*this)); +} + +// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) +inline const at::Tensor & Tensor::resize_as_(const at::Tensor & the_template, c10::optional memory_format) const { + return at::_ops::resize_as_::call(const_cast(*this), the_template, memory_format); +} + +// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) +inline const at::Tensor & Tensor::resize_as_sparse_(const at::Tensor & the_template) const { + return at::_ops::resize_as_sparse_::call(const_cast(*this), the_template); +} + +// aten::zero_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::zero_() const { + return at::_ops::zero_::call(const_cast(*this)); +} + +// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::sub(const at::Tensor & other, const at::Scalar & alpha) const { + return at::_ops::sub_Tensor::call(const_cast(*this), other, alpha); +} + +// aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::sub_(const at::Tensor & other, const at::Scalar & alpha) const { + return at::_ops::sub__Tensor::call(const_cast(*this), other, alpha); +} + +// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::sub(const at::Scalar & other, const at::Scalar & alpha) const { + return at::_ops::sub_Scalar::call(const_cast(*this), other, alpha); +} + +// aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::sub_(const at::Scalar & other, const at::Scalar & alpha) const { + return at::_ops::sub__Scalar::call(const_cast(*this), other, alpha); +} + +// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::subtract(const at::Tensor & other, const at::Scalar & alpha) const { + return at::_ops::subtract_Tensor::call(const_cast(*this), other, alpha); +} + +// aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::subtract_(const at::Tensor & other, const at::Scalar & alpha) const { + return at::_ops::subtract__Tensor::call(const_cast(*this), other, alpha); +} + +// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::subtract(const at::Scalar & other, const at::Scalar & alpha) const { + return at::_ops::subtract_Scalar::call(const_cast(*this), other, alpha); +} + +// aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::subtract_(const at::Scalar & other, const at::Scalar & alpha) const { + return at::_ops::subtract__Scalar::call(const_cast(*this), other, alpha); +} + +// aten::heaviside(Tensor self, Tensor values) -> Tensor +inline at::Tensor Tensor::heaviside(const at::Tensor & values) const { + return at::_ops::heaviside::call(const_cast(*this), values); +} + +// aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!) +inline at::Tensor & Tensor::heaviside_(const at::Tensor & values) const { + return at::_ops::heaviside_::call(const_cast(*this), values); +} + +// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::addmm(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addmm::call(const_cast(*this), mat1, mat2, beta, alpha); +} + +// aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::addmm_(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addmm_::call(const_cast(*this), mat1, mat2, beta, alpha); +} + +// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor +inline at::Tensor Tensor::_addmm_activation(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) const { + return at::_ops::_addmm_activation::call(const_cast(*this), mat1, mat2, beta, alpha, use_gelu); +} + +// aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) +inline const at::Tensor & Tensor::sparse_resize_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const { + return at::_ops::sparse_resize_::call(const_cast(*this), size, sparse_dim, dense_dim); +} + +// aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) +inline const at::Tensor & Tensor::sparse_resize_and_clear_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const { + return at::_ops::sparse_resize_and_clear_::call(const_cast(*this), size, sparse_dim, dense_dim); +} + +// aten::sparse_mask(Tensor self, Tensor mask) -> Tensor +inline at::Tensor Tensor::sparse_mask(const at::Tensor & mask) const { + return at::_ops::sparse_mask::call(const_cast(*this), mask); +} + +// aten::to_dense(Tensor self, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::to_dense(c10::optional dtype) const { + return at::_ops::to_dense::call(const_cast(*this), dtype); +} + +// aten::_to_dense(Tensor self, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::_to_dense(c10::optional dtype) const { + return at::_ops::_to_dense::call(const_cast(*this), dtype); +} + +// aten::sparse_dim(Tensor self) -> int +inline int64_t Tensor::sparse_dim() const { + return at::_ops::sparse_dim::call(const_cast(*this)); +} + +// aten::_dimI(Tensor self) -> int +inline int64_t Tensor::_dimI() const { + return at::_ops::_dimI::call(const_cast(*this)); +} + +// aten::dense_dim(Tensor self) -> int +inline int64_t Tensor::dense_dim() const { + return at::_ops::dense_dim::call(const_cast(*this)); +} + +// aten::_dimV(Tensor self) -> int +inline int64_t Tensor::_dimV() const { + return at::_ops::_dimV::call(const_cast(*this)); +} + +// aten::_nnz(Tensor self) -> int +inline int64_t Tensor::_nnz() const { + return at::_ops::_nnz::call(const_cast(*this)); +} + +// aten::coalesce(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::coalesce() const { + return at::_ops::coalesce::call(const_cast(*this)); +} + +// aten::is_coalesced(Tensor self) -> bool +inline bool Tensor::is_coalesced() const { + return at::_ops::is_coalesced::call(const_cast(*this)); +} + +// aten::_indices(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::_indices() const { + return at::_ops::_indices::call(const_cast(*this)); +} + +// aten::_values(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::_values() const { + return at::_ops::_values::call(const_cast(*this)); +} + +// aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) +inline at::Tensor & Tensor::_coalesced_(bool coalesced) const { + return at::_ops::_coalesced_::call(const_cast(*this), coalesced); +} + +// aten::indices(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::indices() const { + return at::_ops::indices::call(const_cast(*this)); +} + +// aten::values(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::values() const { + return at::_ops::values::call(const_cast(*this)); +} + +// aten::crow_indices(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::crow_indices() const { + return at::_ops::crow_indices::call(const_cast(*this)); +} + +// aten::col_indices(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::col_indices() const { + return at::_ops::col_indices::call(const_cast(*this)); +} + +// aten::ccol_indices(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::ccol_indices() const { + return at::_ops::ccol_indices::call(const_cast(*this)); +} + +// aten::row_indices(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::row_indices() const { + return at::_ops::row_indices::call(const_cast(*this)); +} + +// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::unbind(int64_t dim) const { + return at::_ops::unbind_int::call(const_cast(*this), dim); +} + +// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] +inline ::std::vector Tensor::unbind(at::Dimname dim) const { + return at::_ops::unbind_Dimname::call(const_cast(*this), dim); +} + +// aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor +inline at::Tensor Tensor::to_sparse(int64_t sparse_dim) const { + return at::_ops::to_sparse_sparse_dim::call(const_cast(*this), sparse_dim); +} + +// aten::to_sparse(Tensor self) -> Tensor +inline at::Tensor Tensor::to_sparse() const { + return at::_ops::to_sparse::call(const_cast(*this)); +} + +// aten::to_sparse_csr(Tensor self) -> Tensor +inline at::Tensor Tensor::to_sparse_csr() const { + return at::_ops::to_sparse_csr::call(const_cast(*this)); +} + +// aten::to_sparse_csc(Tensor self) -> Tensor +inline at::Tensor Tensor::to_sparse_csc() const { + return at::_ops::to_sparse_csc::call(const_cast(*this)); +} + +// aten::to_sparse_bsr(Tensor self, int[2] blocksize) -> Tensor +inline at::Tensor Tensor::to_sparse_bsr(at::IntArrayRef blocksize) const { + return at::_ops::to_sparse_bsr::call(const_cast(*this), blocksize); +} + +// aten::to_sparse_bsc(Tensor self, int[2] blocksize) -> Tensor +inline at::Tensor Tensor::to_sparse_bsc(at::IntArrayRef blocksize) const { + return at::_ops::to_sparse_bsc::call(const_cast(*this), blocksize); +} + +// aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::to_mkldnn(c10::optional dtype) const { + return at::_ops::to_mkldnn::call(const_cast(*this), dtype); +} + +// aten::dequantize.self(Tensor self) -> Tensor +inline at::Tensor Tensor::dequantize() const { + return at::_ops::dequantize_self::call(const_cast(*this)); +} + +// aten::q_scale(Tensor self) -> float +inline double Tensor::q_scale() const { + return at::_ops::q_scale::call(const_cast(*this)); +} + +// aten::q_zero_point(Tensor self) -> int +inline int64_t Tensor::q_zero_point() const { + return at::_ops::q_zero_point::call(const_cast(*this)); +} + +// aten::q_per_channel_scales(Tensor self) -> Tensor +inline at::Tensor Tensor::q_per_channel_scales() const { + return at::_ops::q_per_channel_scales::call(const_cast(*this)); +} + +// aten::q_per_channel_zero_points(Tensor self) -> Tensor +inline at::Tensor Tensor::q_per_channel_zero_points() const { + return at::_ops::q_per_channel_zero_points::call(const_cast(*this)); +} + +// aten::q_per_channel_axis(Tensor self) -> int +inline int64_t Tensor::q_per_channel_axis() const { + return at::_ops::q_per_channel_axis::call(const_cast(*this)); +} + +// aten::int_repr(Tensor self) -> Tensor +inline at::Tensor Tensor::int_repr() const { + return at::_ops::int_repr::call(const_cast(*this)); +} + +// aten::qscheme(Tensor self) -> QScheme +inline at::QScheme Tensor::qscheme() const { + return at::_ops::qscheme::call(const_cast(*this)); +} + +// aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a) +inline at::Tensor Tensor::_autocast_to_reduced_precision(bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) const { + return at::_ops::_autocast_to_reduced_precision::call(const_cast(*this), cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype); +} + +// aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a) +inline at::Tensor Tensor::_autocast_to_full_precision(bool cuda_enabled, bool cpu_enabled) const { + return at::_ops::_autocast_to_full_precision::call(const_cast(*this), cuda_enabled, cpu_enabled); +} + +// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +inline at::Tensor Tensor::to(at::TensorOptions options, bool non_blocking, bool copy, c10::optional memory_format) const { + return at::_ops::to_dtype_layout::call(const_cast(*this), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, copy, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} + +// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +inline at::Tensor Tensor::to(c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, bool copy, c10::optional memory_format) const { + return at::_ops::to_dtype_layout::call(const_cast(*this), dtype, layout, device, pin_memory, non_blocking, copy, memory_format); +} + +// aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +inline at::Tensor Tensor::to(at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional memory_format) const { + return at::_ops::to_device::call(const_cast(*this), device, dtype, non_blocking, copy, memory_format); +} + +// aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +inline at::Tensor Tensor::to(at::ScalarType dtype, bool non_blocking, bool copy, c10::optional memory_format) const { + return at::_ops::to_dtype::call(const_cast(*this), dtype, non_blocking, copy, memory_format); +} + +// aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +inline at::Tensor Tensor::to(const at::Tensor & other, bool non_blocking, bool copy, c10::optional memory_format) const { + return at::_ops::to_other::call(const_cast(*this), other, non_blocking, copy, memory_format); +} + +// aten::item(Tensor self) -> Scalar +inline at::Scalar Tensor::item() const { + return at::_ops::item::call(const_cast(*this)); +} + +// aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) +inline at::Tensor & Tensor::set_(at::Storage source) const { + return at::_ops::set__source_Storage::call(const_cast(*this), source); +} + +// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) +inline at::Tensor & Tensor::set_(at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) const { + return at::_ops::set__source_Storage_storage_offset::call(const_cast(*this), source, storage_offset, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride)); +} + +// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) +inline at::Tensor & Tensor::set__symint(at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const { + return at::_ops::set__source_Storage_storage_offset::call(const_cast(*this), source, storage_offset, size, stride); +} + +// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) +inline at::Tensor & Tensor::set_(const at::Tensor & source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) const { + return at::_ops::set__source_Tensor_storage_offset::call(const_cast(*this), source, storage_offset, c10::fromIntArrayRef(size), c10::fromIntArrayRef(stride)); +} + +// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) +inline at::Tensor & Tensor::set__symint(const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const { + return at::_ops::set__source_Tensor_storage_offset::call(const_cast(*this), source, storage_offset, size, stride); +} + +// aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) +inline at::Tensor & Tensor::set_(const at::Tensor & source) const { + return at::_ops::set__source_Tensor::call(const_cast(*this), source); +} + +// aten::set_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::set_() const { + return at::_ops::set_::call(const_cast(*this)); +} + +// aten::is_set_to(Tensor self, Tensor tensor) -> bool +inline bool Tensor::is_set_to(const at::Tensor & tensor) const { + return at::_ops::is_set_to::call(const_cast(*this), tensor); +} + +// aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) +inline at::Tensor & Tensor::masked_fill_(const at::Tensor & mask, const at::Scalar & value) const { + return at::_ops::masked_fill__Scalar::call(const_cast(*this), mask, value); +} + +// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor +inline at::Tensor Tensor::masked_fill(const at::Tensor & mask, const at::Scalar & value) const { + return at::_ops::masked_fill_Scalar::call(const_cast(*this), mask, value); +} + +// aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) +inline at::Tensor & Tensor::masked_fill_(const at::Tensor & mask, const at::Tensor & value) const { + return at::_ops::masked_fill__Tensor::call(const_cast(*this), mask, value); +} + +// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor +inline at::Tensor Tensor::masked_fill(const at::Tensor & mask, const at::Tensor & value) const { + return at::_ops::masked_fill_Tensor::call(const_cast(*this), mask, value); +} + +// aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) +inline at::Tensor & Tensor::masked_scatter_(const at::Tensor & mask, const at::Tensor & source) const { + return at::_ops::masked_scatter_::call(const_cast(*this), mask, source); +} + +// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor +inline at::Tensor Tensor::masked_scatter(const at::Tensor & mask, const at::Tensor & source) const { + return at::_ops::masked_scatter::call(const_cast(*this), mask, source); +} + +// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a) +inline at::Tensor Tensor::view(at::IntArrayRef size) const { + return at::_ops::view::call(const_cast(*this), c10::fromIntArrayRef(size)); +} + +// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a) +inline at::Tensor Tensor::view_symint(c10::SymIntArrayRef size) const { + return at::_ops::view::call(const_cast(*this), size); +} + +// aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a) +inline at::Tensor Tensor::view(at::ScalarType dtype) const { + return at::_ops::view_dtype::call(const_cast(*this), dtype); +} + +// aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) +inline at::Tensor & Tensor::put_(const at::Tensor & index, const at::Tensor & source, bool accumulate) const { + return at::_ops::put_::call(const_cast(*this), index, source, accumulate); +} + +// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor +inline at::Tensor Tensor::put(const at::Tensor & index, const at::Tensor & source, bool accumulate) const { + return at::_ops::put::call(const_cast(*this), index, source, accumulate); +} + +// aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::index_add_(int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) const { + return at::_ops::index_add_::call(const_cast(*this), dim, index, source, alpha); +} + +// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::index_add(int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) const { + return at::_ops::index_add::call(const_cast(*this), dim, index, source, alpha); +} + +// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::index_add(at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) const { + return at::_ops::index_add_dimname::call(const_cast(*this), dim, index, source, alpha); +} + +// aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!) +inline at::Tensor & Tensor::index_reduce_(int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) const { + return at::_ops::index_reduce_::call(const_cast(*this), dim, index, source, reduce, include_self); +} + +// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor +inline at::Tensor Tensor::index_reduce(int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) const { + return at::_ops::index_reduce::call(const_cast(*this), dim, index, source, reduce, include_self); +} + +// aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) +inline at::Tensor & Tensor::index_fill_(int64_t dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::index_fill__int_Scalar::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor Tensor::index_fill(int64_t dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::index_fill_int_Scalar::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) +inline at::Tensor & Tensor::index_fill_(int64_t dim, const at::Tensor & index, const at::Tensor & value) const { + return at::_ops::index_fill__int_Tensor::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor +inline at::Tensor Tensor::index_fill(int64_t dim, const at::Tensor & index, const at::Tensor & value) const { + return at::_ops::index_fill_int_Tensor::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) +inline at::Tensor & Tensor::index_fill_(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::index_fill__Dimname_Scalar::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!) +inline at::Tensor & Tensor::index_fill_(at::Dimname dim, const at::Tensor & index, const at::Tensor & value) const { + return at::_ops::index_fill__Dimname_Tensor::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor Tensor::index_fill(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::index_fill_Dimname_Scalar::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor +inline at::Tensor Tensor::index_fill(at::Dimname dim, const at::Tensor & index, const at::Tensor & value) const { + return at::_ops::index_fill_Dimname_Tensor::call(const_cast(*this), dim, index, value); +} + +// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor +inline at::Tensor Tensor::scatter(int64_t dim, const at::Tensor & index, const at::Tensor & src) const { + return at::_ops::scatter_src::call(const_cast(*this), dim, index, src); +} + +// aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) +inline at::Tensor & Tensor::scatter_(int64_t dim, const at::Tensor & index, const at::Tensor & src) const { + return at::_ops::scatter__src::call(const_cast(*this), dim, index, src); +} + +// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor Tensor::scatter(int64_t dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::scatter_value::call(const_cast(*this), dim, index, value); +} + +// aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) +inline at::Tensor & Tensor::scatter_(int64_t dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::scatter__value::call(const_cast(*this), dim, index, value); +} + +// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor +inline at::Tensor Tensor::scatter(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) const { + return at::_ops::scatter_reduce::call(const_cast(*this), dim, index, src, reduce); +} + +// aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!) +inline at::Tensor & Tensor::scatter_(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) const { + return at::_ops::scatter__reduce::call(const_cast(*this), dim, index, src, reduce); +} + +// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor +inline at::Tensor Tensor::scatter(int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) const { + return at::_ops::scatter_value_reduce::call(const_cast(*this), dim, index, value, reduce); +} + +// aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!) +inline at::Tensor & Tensor::scatter_(int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) const { + return at::_ops::scatter__value_reduce::call(const_cast(*this), dim, index, value, reduce); +} + +// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor +inline at::Tensor Tensor::scatter(at::Dimname dim, const at::Tensor & index, const at::Tensor & src) const { + return at::_ops::scatter_dimname_src::call(const_cast(*this), dim, index, src); +} + +// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor Tensor::scatter(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::scatter_dimname_value::call(const_cast(*this), dim, index, value); +} + +// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor +inline at::Tensor Tensor::scatter_add(int64_t dim, const at::Tensor & index, const at::Tensor & src) const { + return at::_ops::scatter_add::call(const_cast(*this), dim, index, src); +} + +// aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) +inline at::Tensor & Tensor::scatter_add_(int64_t dim, const at::Tensor & index, const at::Tensor & src) const { + return at::_ops::scatter_add_::call(const_cast(*this), dim, index, src); +} + +// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor +inline at::Tensor Tensor::scatter_add(at::Dimname dim, const at::Tensor & index, const at::Tensor & src) const { + return at::_ops::scatter_add_dimname::call(const_cast(*this), dim, index, src); +} + +// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor +inline at::Tensor Tensor::scatter_reduce(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) const { + return at::_ops::scatter_reduce_two::call(const_cast(*this), dim, index, src, reduce, include_self); +} + +// aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!) +inline at::Tensor & Tensor::scatter_reduce_(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) const { + return at::_ops::scatter_reduce__two::call(const_cast(*this), dim, index, src, reduce, include_self); +} + +// aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::eq_(const at::Scalar & other) const { + return at::_ops::eq__Scalar::call(const_cast(*this), other); +} + +// aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::eq_(const at::Tensor & other) const { + return at::_ops::eq__Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::bitwise_and(const at::Scalar & other) const { + return at::_ops::bitwise_and_Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::bitwise_and(const at::Tensor & other) const { + return at::_ops::bitwise_and_Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_and_(const at::Scalar & other) const { + return at::_ops::bitwise_and__Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_and_(const at::Tensor & other) const { + return at::_ops::bitwise_and__Tensor::call(const_cast(*this), other); +} + +// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::__and__(const at::Scalar & other) const { + return at::_ops::__and___Scalar::call(const_cast(*this), other); +} + +// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::__and__(const at::Tensor & other) const { + return at::_ops::__and___Tensor::call(const_cast(*this), other); +} + +// aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::__iand__(const at::Scalar & other) const { + return at::_ops::__iand___Scalar::call(const_cast(*this), other); +} + +// aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::__iand__(const at::Tensor & other) const { + return at::_ops::__iand___Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::bitwise_or(const at::Scalar & other) const { + return at::_ops::bitwise_or_Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::bitwise_or(const at::Tensor & other) const { + return at::_ops::bitwise_or_Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_or_(const at::Scalar & other) const { + return at::_ops::bitwise_or__Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_or_(const at::Tensor & other) const { + return at::_ops::bitwise_or__Tensor::call(const_cast(*this), other); +} + +// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::__or__(const at::Scalar & other) const { + return at::_ops::__or___Scalar::call(const_cast(*this), other); +} + +// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::__or__(const at::Tensor & other) const { + return at::_ops::__or___Tensor::call(const_cast(*this), other); +} + +// aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::__ior__(const at::Scalar & other) const { + return at::_ops::__ior___Scalar::call(const_cast(*this), other); +} + +// aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::__ior__(const at::Tensor & other) const { + return at::_ops::__ior___Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::bitwise_xor(const at::Scalar & other) const { + return at::_ops::bitwise_xor_Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::bitwise_xor(const at::Tensor & other) const { + return at::_ops::bitwise_xor_Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_xor_(const at::Scalar & other) const { + return at::_ops::bitwise_xor__Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_xor_(const at::Tensor & other) const { + return at::_ops::bitwise_xor__Tensor::call(const_cast(*this), other); +} + +// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::__xor__(const at::Scalar & other) const { + return at::_ops::__xor___Scalar::call(const_cast(*this), other); +} + +// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::__xor__(const at::Tensor & other) const { + return at::_ops::__xor___Tensor::call(const_cast(*this), other); +} + +// aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::__ixor__(const at::Scalar & other) const { + return at::_ops::__ixor___Scalar::call(const_cast(*this), other); +} + +// aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::__ixor__(const at::Tensor & other) const { + return at::_ops::__ixor___Tensor::call(const_cast(*this), other); +} + +// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::__lshift__(const at::Scalar & other) const { + return at::_ops::__lshift___Scalar::call(const_cast(*this), other); +} + +// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::__lshift__(const at::Tensor & other) const { + return at::_ops::__lshift___Tensor::call(const_cast(*this), other); +} + +// aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::__ilshift__(const at::Scalar & other) const { + return at::_ops::__ilshift___Scalar::call(const_cast(*this), other); +} + +// aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::__ilshift__(const at::Tensor & other) const { + return at::_ops::__ilshift___Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::bitwise_left_shift(const at::Tensor & other) const { + return at::_ops::bitwise_left_shift_Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_left_shift_(const at::Tensor & other) const { + return at::_ops::bitwise_left_shift__Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::bitwise_left_shift(const at::Scalar & other) const { + return at::_ops::bitwise_left_shift_Tensor_Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_left_shift_(const at::Scalar & other) const { + return at::_ops::bitwise_left_shift__Tensor_Scalar::call(const_cast(*this), other); +} + +// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::__rshift__(const at::Scalar & other) const { + return at::_ops::__rshift___Scalar::call(const_cast(*this), other); +} + +// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::__rshift__(const at::Tensor & other) const { + return at::_ops::__rshift___Tensor::call(const_cast(*this), other); +} + +// aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::__irshift__(const at::Scalar & other) const { + return at::_ops::__irshift___Scalar::call(const_cast(*this), other); +} + +// aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::__irshift__(const at::Tensor & other) const { + return at::_ops::__irshift___Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::bitwise_right_shift(const at::Tensor & other) const { + return at::_ops::bitwise_right_shift_Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_right_shift_(const at::Tensor & other) const { + return at::_ops::bitwise_right_shift__Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::bitwise_right_shift(const at::Scalar & other) const { + return at::_ops::bitwise_right_shift_Tensor_Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_right_shift_(const at::Scalar & other) const { + return at::_ops::bitwise_right_shift__Tensor_Scalar::call(const_cast(*this), other); +} + +// aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) +inline at::Tensor & Tensor::tril_(int64_t diagonal) const { + return at::_ops::tril_::call(const_cast(*this), diagonal); +} + +// aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) +inline at::Tensor & Tensor::triu_(int64_t diagonal) const { + return at::_ops::triu_::call(const_cast(*this), diagonal); +} + +// aten::digamma_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::digamma_() const { + return at::_ops::digamma_::call(const_cast(*this)); +} + +// aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) +inline at::Tensor & Tensor::lerp_(const at::Tensor & end, const at::Scalar & weight) const { + return at::_ops::lerp__Scalar::call(const_cast(*this), end, weight); +} + +// aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) +inline at::Tensor & Tensor::lerp_(const at::Tensor & end, const at::Tensor & weight) const { + return at::_ops::lerp__Tensor::call(const_cast(*this), end, weight); +} + +// aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::addbmm_(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addbmm_::call(const_cast(*this), batch1, batch2, beta, alpha); +} + +// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::addbmm(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addbmm::call(const_cast(*this), batch1, batch2, beta, alpha); +} + +// aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::random_(int64_t from, c10::optional to, c10::optional generator) const { + return at::_ops::random__from::call(const_cast(*this), from, to, generator); +} + +// aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::random_(int64_t to, c10::optional generator) const { + return at::_ops::random__to::call(const_cast(*this), to, generator); +} + +// aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::random_(c10::optional generator) const { + return at::_ops::random_::call(const_cast(*this), generator); +} + +// aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::uniform_(double from, double to, c10::optional generator) const { + return at::_ops::uniform_::call(const_cast(*this), from, to, generator); +} + +// aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::cauchy_(double median, double sigma, c10::optional generator) const { + return at::_ops::cauchy_::call(const_cast(*this), median, sigma, generator); +} + +// aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::log_normal_(double mean, double std, c10::optional generator) const { + return at::_ops::log_normal_::call(const_cast(*this), mean, std, generator); +} + +// aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::exponential_(double lambd, c10::optional generator) const { + return at::_ops::exponential_::call(const_cast(*this), lambd, generator); +} + +// aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::geometric_(double p, c10::optional generator) const { + return at::_ops::geometric_::call(const_cast(*this), p, generator); +} + +// aten::diag(Tensor self, int diagonal=0) -> Tensor +inline at::Tensor Tensor::diag(int64_t diagonal) const { + return at::_ops::diag::call(const_cast(*this), diagonal); +} + +// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor +inline at::Tensor Tensor::cross(const at::Tensor & other, c10::optional dim) const { + return at::_ops::cross::call(const_cast(*this), other, dim); +} + +// aten::triu(Tensor self, int diagonal=0) -> Tensor +inline at::Tensor Tensor::triu(int64_t diagonal) const { + return at::_ops::triu::call(const_cast(*this), diagonal); +} + +// aten::tril(Tensor self, int diagonal=0) -> Tensor +inline at::Tensor Tensor::tril(int64_t diagonal) const { + return at::_ops::tril::call(const_cast(*this), diagonal); +} + +// aten::trace(Tensor self) -> Tensor +inline at::Tensor Tensor::trace() const { + return at::_ops::trace::call(const_cast(*this)); +} + +// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::ne(const at::Scalar & other) const { + return at::_ops::ne_Scalar::call(const_cast(*this), other); +} + +// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::ne(const at::Tensor & other) const { + return at::_ops::ne_Tensor::call(const_cast(*this), other); +} + +// aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::ne_(const at::Scalar & other) const { + return at::_ops::ne__Scalar::call(const_cast(*this), other); +} + +// aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::ne_(const at::Tensor & other) const { + return at::_ops::ne__Tensor::call(const_cast(*this), other); +} + +// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::not_equal(const at::Scalar & other) const { + return at::_ops::not_equal_Scalar::call(const_cast(*this), other); +} + +// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::not_equal(const at::Tensor & other) const { + return at::_ops::not_equal_Tensor::call(const_cast(*this), other); +} + +// aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::not_equal_(const at::Scalar & other) const { + return at::_ops::not_equal__Scalar::call(const_cast(*this), other); +} + +// aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::not_equal_(const at::Tensor & other) const { + return at::_ops::not_equal__Tensor::call(const_cast(*this), other); +} + +// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::eq(const at::Scalar & other) const { + return at::_ops::eq_Scalar::call(const_cast(*this), other); +} + +// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::eq(const at::Tensor & other) const { + return at::_ops::eq_Tensor::call(const_cast(*this), other); +} + +// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::ge(const at::Scalar & other) const { + return at::_ops::ge_Scalar::call(const_cast(*this), other); +} + +// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::ge(const at::Tensor & other) const { + return at::_ops::ge_Tensor::call(const_cast(*this), other); +} + +// aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::ge_(const at::Scalar & other) const { + return at::_ops::ge__Scalar::call(const_cast(*this), other); +} + +// aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::ge_(const at::Tensor & other) const { + return at::_ops::ge__Tensor::call(const_cast(*this), other); +} + +// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::greater_equal(const at::Scalar & other) const { + return at::_ops::greater_equal_Scalar::call(const_cast(*this), other); +} + +// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::greater_equal(const at::Tensor & other) const { + return at::_ops::greater_equal_Tensor::call(const_cast(*this), other); +} + +// aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::greater_equal_(const at::Scalar & other) const { + return at::_ops::greater_equal__Scalar::call(const_cast(*this), other); +} + +// aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::greater_equal_(const at::Tensor & other) const { + return at::_ops::greater_equal__Tensor::call(const_cast(*this), other); +} + +// aten::le.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::le(const at::Scalar & other) const { + return at::_ops::le_Scalar::call(const_cast(*this), other); +} + +// aten::le.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::le(const at::Tensor & other) const { + return at::_ops::le_Tensor::call(const_cast(*this), other); +} + +// aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::le_(const at::Scalar & other) const { + return at::_ops::le__Scalar::call(const_cast(*this), other); +} + +// aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::le_(const at::Tensor & other) const { + return at::_ops::le__Tensor::call(const_cast(*this), other); +} + +// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::less_equal(const at::Scalar & other) const { + return at::_ops::less_equal_Scalar::call(const_cast(*this), other); +} + +// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::less_equal(const at::Tensor & other) const { + return at::_ops::less_equal_Tensor::call(const_cast(*this), other); +} + +// aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::less_equal_(const at::Scalar & other) const { + return at::_ops::less_equal__Scalar::call(const_cast(*this), other); +} + +// aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::less_equal_(const at::Tensor & other) const { + return at::_ops::less_equal__Tensor::call(const_cast(*this), other); +} + +// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::gt(const at::Scalar & other) const { + return at::_ops::gt_Scalar::call(const_cast(*this), other); +} + +// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::gt(const at::Tensor & other) const { + return at::_ops::gt_Tensor::call(const_cast(*this), other); +} + +// aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::gt_(const at::Scalar & other) const { + return at::_ops::gt__Scalar::call(const_cast(*this), other); +} + +// aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::gt_(const at::Tensor & other) const { + return at::_ops::gt__Tensor::call(const_cast(*this), other); +} + +// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::greater(const at::Scalar & other) const { + return at::_ops::greater_Scalar::call(const_cast(*this), other); +} + +// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::greater(const at::Tensor & other) const { + return at::_ops::greater_Tensor::call(const_cast(*this), other); +} + +// aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::greater_(const at::Scalar & other) const { + return at::_ops::greater__Scalar::call(const_cast(*this), other); +} + +// aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::greater_(const at::Tensor & other) const { + return at::_ops::greater__Tensor::call(const_cast(*this), other); +} + +// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::lt(const at::Scalar & other) const { + return at::_ops::lt_Scalar::call(const_cast(*this), other); +} + +// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::lt(const at::Tensor & other) const { + return at::_ops::lt_Tensor::call(const_cast(*this), other); +} + +// aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::lt_(const at::Scalar & other) const { + return at::_ops::lt__Scalar::call(const_cast(*this), other); +} + +// aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::lt_(const at::Tensor & other) const { + return at::_ops::lt__Tensor::call(const_cast(*this), other); +} + +// aten::less.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::less(const at::Scalar & other) const { + return at::_ops::less_Scalar::call(const_cast(*this), other); +} + +// aten::less.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::less(const at::Tensor & other) const { + return at::_ops::less_Tensor::call(const_cast(*this), other); +} + +// aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::less_(const at::Scalar & other) const { + return at::_ops::less__Scalar::call(const_cast(*this), other); +} + +// aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::less_(const at::Tensor & other) const { + return at::_ops::less__Tensor::call(const_cast(*this), other); +} + +// aten::take(Tensor self, Tensor index) -> Tensor +inline at::Tensor Tensor::take(const at::Tensor & index) const { + return at::_ops::take::call(const_cast(*this), index); +} + +// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor +inline at::Tensor Tensor::take_along_dim(const at::Tensor & indices, c10::optional dim) const { + return at::_ops::take_along_dim::call(const_cast(*this), indices, dim); +} + +// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor +inline at::Tensor Tensor::index_select(int64_t dim, const at::Tensor & index) const { + return at::_ops::index_select::call(const_cast(*this), dim, index); +} + +// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor +inline at::Tensor Tensor::index_select(at::Dimname dim, const at::Tensor & index) const { + return at::_ops::index_select_dimname::call(const_cast(*this), dim, index); +} + +// aten::masked_select(Tensor self, Tensor mask) -> Tensor +inline at::Tensor Tensor::masked_select(const at::Tensor & mask) const { + return at::_ops::masked_select::call(const_cast(*this), mask); +} + +// aten::nonzero(Tensor self) -> Tensor +inline at::Tensor Tensor::nonzero() const { + return at::_ops::nonzero::call(const_cast(*this)); +} + +// aten::nonzero_numpy(Tensor self) -> Tensor[] +inline ::std::vector Tensor::nonzero_numpy() const { + return at::_ops::nonzero_numpy::call(const_cast(*this)); +} + +// aten::argwhere(Tensor self) -> Tensor +inline at::Tensor Tensor::argwhere() const { + return at::_ops::argwhere::call(const_cast(*this)); +} + +// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor +inline at::Tensor Tensor::gather(int64_t dim, const at::Tensor & index, bool sparse_grad) const { + return at::_ops::gather::call(const_cast(*this), dim, index, sparse_grad); +} + +// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor +inline at::Tensor Tensor::gather(at::Dimname dim, const at::Tensor & index, bool sparse_grad) const { + return at::_ops::gather_dimname::call(const_cast(*this), dim, index, sparse_grad); +} + +// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +inline at::Tensor Tensor::addcmul(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) const { + return at::_ops::addcmul::call(const_cast(*this), tensor1, tensor2, value); +} + +// aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) +inline at::Tensor & Tensor::addcmul_(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) const { + return at::_ops::addcmul_::call(const_cast(*this), tensor1, tensor2, value); +} + +// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +inline at::Tensor Tensor::addcdiv(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) const { + return at::_ops::addcdiv::call(const_cast(*this), tensor1, tensor2, value); +} + +// aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) +inline at::Tensor & Tensor::addcdiv_(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) const { + return at::_ops::addcdiv_::call(const_cast(*this), tensor1, tensor2, value); +} + +// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) +inline ::std::tuple Tensor::triangular_solve(const at::Tensor & A, bool upper, bool transpose, bool unitriangular) const { + return at::_ops::triangular_solve::call(const_cast(*this), A, upper, transpose, unitriangular); +} + +// aten::symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors) +inline ::std::tuple Tensor::symeig(bool eigenvectors, bool upper) const { + return at::_ops::symeig::call(const_cast(*this), eigenvectors, upper); +} + +// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) +inline ::std::tuple Tensor::svd(bool some, bool compute_uv) const { + return at::_ops::svd::call(const_cast(*this), some, compute_uv); +} + +// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) +inline at::Tensor Tensor::swapaxes(int64_t axis0, int64_t axis1) const { + return at::_ops::swapaxes::call(const_cast(*this), axis0, axis1); +} + +// aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!) +inline at::Tensor & Tensor::swapaxes_(int64_t axis0, int64_t axis1) const { + return at::_ops::swapaxes_::call(const_cast(*this), axis0, axis1); +} + +// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a) +inline at::Tensor Tensor::swapdims(int64_t dim0, int64_t dim1) const { + return at::_ops::swapdims::call(const_cast(*this), dim0, dim1); +} + +// aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) +inline at::Tensor & Tensor::swapdims_(int64_t dim0, int64_t dim1) const { + return at::_ops::swapdims_::call(const_cast(*this), dim0, dim1); +} + +// aten::cholesky(Tensor self, bool upper=False) -> Tensor +inline at::Tensor Tensor::cholesky(bool upper) const { + return at::_ops::cholesky::call(const_cast(*this), upper); +} + +// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor +inline at::Tensor Tensor::cholesky_solve(const at::Tensor & input2, bool upper) const { + return at::_ops::cholesky_solve::call(const_cast(*this), input2, upper); +} + +// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor +inline at::Tensor Tensor::cholesky_inverse(bool upper) const { + return at::_ops::cholesky_inverse::call(const_cast(*this), upper); +} + +// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) +inline ::std::tuple Tensor::qr(bool some) const { + return at::_ops::qr::call(const_cast(*this), some); +} + +// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau) +inline ::std::tuple Tensor::geqrf() const { + return at::_ops::geqrf::call(const_cast(*this)); +} + +// aten::orgqr(Tensor self, Tensor input2) -> Tensor +inline at::Tensor Tensor::orgqr(const at::Tensor & input2) const { + return at::_ops::orgqr::call(const_cast(*this), input2); +} + +// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor +inline at::Tensor Tensor::ormqr(const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) const { + return at::_ops::ormqr::call(const_cast(*this), input2, input3, left, transpose); +} + +// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor +inline at::Tensor Tensor::lu_solve(const at::Tensor & LU_data, const at::Tensor & LU_pivots) const { + return at::_ops::lu_solve::call(const_cast(*this), LU_data, LU_pivots); +} + +// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor +inline at::Tensor Tensor::multinomial(int64_t num_samples, bool replacement, c10::optional generator) const { + return at::_ops::multinomial::call(const_cast(*this), num_samples, replacement, generator); +} + +// aten::lgamma_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::lgamma_() const { + return at::_ops::lgamma_::call(const_cast(*this)); +} + +// aten::lgamma(Tensor self) -> Tensor +inline at::Tensor Tensor::lgamma() const { + return at::_ops::lgamma::call(const_cast(*this)); +} + +// aten::digamma(Tensor self) -> Tensor +inline at::Tensor Tensor::digamma() const { + return at::_ops::digamma::call(const_cast(*this)); +} + +// aten::polygamma(int n, Tensor self) -> Tensor +inline at::Tensor Tensor::polygamma(int64_t n) const { + return at::_ops::polygamma::call(n, const_cast(*this)); +} + +// aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!) +inline at::Tensor & Tensor::polygamma_(int64_t n) const { + return at::_ops::polygamma_::call(const_cast(*this), n); +} + +// aten::erfinv(Tensor self) -> Tensor +inline at::Tensor Tensor::erfinv() const { + return at::_ops::erfinv::call(const_cast(*this)); +} + +// aten::erfinv_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::erfinv_() const { + return at::_ops::erfinv_::call(const_cast(*this)); +} + +// aten::i0(Tensor self) -> Tensor +inline at::Tensor Tensor::i0() const { + return at::_ops::i0::call(const_cast(*this)); +} + +// aten::i0_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::i0_() const { + return at::_ops::i0_::call(const_cast(*this)); +} + +// aten::sign(Tensor self) -> Tensor +inline at::Tensor Tensor::sign() const { + return at::_ops::sign::call(const_cast(*this)); +} + +// aten::sign_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sign_() const { + return at::_ops::sign_::call(const_cast(*this)); +} + +// aten::signbit(Tensor self) -> Tensor +inline at::Tensor Tensor::signbit() const { + return at::_ops::signbit::call(const_cast(*this)); +} + +// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor +inline at::Tensor Tensor::dist(const at::Tensor & other, const at::Scalar & p) const { + return at::_ops::dist::call(const_cast(*this), other, p); +} + +// aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::atan2_(const at::Tensor & other) const { + return at::_ops::atan2_::call(const_cast(*this), other); +} + +// aten::atan2(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::atan2(const at::Tensor & other) const { + return at::_ops::atan2::call(const_cast(*this), other); +} + +// aten::arctan2(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::arctan2(const at::Tensor & other) const { + return at::_ops::arctan2::call(const_cast(*this), other); +} + +// aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::arctan2_(const at::Tensor & other) const { + return at::_ops::arctan2_::call(const_cast(*this), other); +} + +// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor +inline at::Tensor Tensor::lerp(const at::Tensor & end, const at::Scalar & weight) const { + return at::_ops::lerp_Scalar::call(const_cast(*this), end, weight); +} + +// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor +inline at::Tensor Tensor::lerp(const at::Tensor & end, const at::Tensor & weight) const { + return at::_ops::lerp_Tensor::call(const_cast(*this), end, weight); +} + +// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor +inline at::Tensor Tensor::histc(int64_t bins, const at::Scalar & min, const at::Scalar & max) const { + return at::_ops::histc::call(const_cast(*this), bins, min, max); +} + +// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) +inline ::std::tuple Tensor::histogram(const at::Tensor & bins, const c10::optional & weight, bool density) const { + return at::_ops::histogram_bins_tensor::call(const_cast(*this), bins, weight, density); +} + +// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) +inline ::std::tuple Tensor::histogram(int64_t bins, c10::optional> range, const c10::optional & weight, bool density) const { + return at::_ops::histogram_bin_ct::call(const_cast(*this), bins, range, weight, density); +} + +// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::fmod(const at::Scalar & other) const { + return at::_ops::fmod_Scalar::call(const_cast(*this), other); +} + +// aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::fmod_(const at::Scalar & other) const { + return at::_ops::fmod__Scalar::call(const_cast(*this), other); +} + +// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::fmod(const at::Tensor & other) const { + return at::_ops::fmod_Tensor::call(const_cast(*this), other); +} + +// aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::fmod_(const at::Tensor & other) const { + return at::_ops::fmod__Tensor::call(const_cast(*this), other); +} + +// aten::hypot(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::hypot(const at::Tensor & other) const { + return at::_ops::hypot::call(const_cast(*this), other); +} + +// aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::hypot_(const at::Tensor & other) const { + return at::_ops::hypot_::call(const_cast(*this), other); +} + +// aten::igamma(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::igamma(const at::Tensor & other) const { + return at::_ops::igamma::call(const_cast(*this), other); +} + +// aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::igamma_(const at::Tensor & other) const { + return at::_ops::igamma_::call(const_cast(*this), other); +} + +// aten::igammac(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::igammac(const at::Tensor & other) const { + return at::_ops::igammac::call(const_cast(*this), other); +} + +// aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::igammac_(const at::Tensor & other) const { + return at::_ops::igammac_::call(const_cast(*this), other); +} + +// aten::nextafter(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::nextafter(const at::Tensor & other) const { + return at::_ops::nextafter::call(const_cast(*this), other); +} + +// aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::nextafter_(const at::Tensor & other) const { + return at::_ops::nextafter_::call(const_cast(*this), other); +} + +// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::remainder(const at::Scalar & other) const { + return at::_ops::remainder_Scalar::call(const_cast(*this), other); +} + +// aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::remainder_(const at::Scalar & other) const { + return at::_ops::remainder__Scalar::call(const_cast(*this), other); +} + +// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::remainder(const at::Tensor & other) const { + return at::_ops::remainder_Tensor::call(const_cast(*this), other); +} + +// aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::remainder_(const at::Tensor & other) const { + return at::_ops::remainder__Tensor::call(const_cast(*this), other); +} + +// aten::min(Tensor self) -> Tensor +inline at::Tensor Tensor::min() const { + return at::_ops::min::call(const_cast(*this)); +} + +// aten::fmin(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::fmin(const at::Tensor & other) const { + return at::_ops::fmin::call(const_cast(*this), other); +} + +// aten::max(Tensor self) -> Tensor +inline at::Tensor Tensor::max() const { + return at::_ops::max::call(const_cast(*this)); +} + +// aten::fmax(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::fmax(const at::Tensor & other) const { + return at::_ops::fmax::call(const_cast(*this), other); +} + +// aten::maximum(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::maximum(const at::Tensor & other) const { + return at::_ops::maximum::call(const_cast(*this), other); +} + +// aten::max.other(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::max(const at::Tensor & other) const { + return at::_ops::max_other::call(const_cast(*this), other); +} + +// aten::minimum(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::minimum(const at::Tensor & other) const { + return at::_ops::minimum::call(const_cast(*this), other); +} + +// aten::min.other(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::min(const at::Tensor & other) const { + return at::_ops::min_other::call(const_cast(*this), other); +} + +// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor Tensor::quantile(const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation) const { + return at::_ops::quantile::call(const_cast(*this), q, dim, keepdim, interpolation); +} + +// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor Tensor::quantile(double q, c10::optional dim, bool keepdim, c10::string_view interpolation) const { + return at::_ops::quantile_scalar::call(const_cast(*this), q, dim, keepdim, interpolation); +} + +// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor Tensor::nanquantile(const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation) const { + return at::_ops::nanquantile::call(const_cast(*this), q, dim, keepdim, interpolation); +} + +// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor Tensor::nanquantile(double q, c10::optional dim, bool keepdim, c10::string_view interpolation) const { + return at::_ops::nanquantile_scalar::call(const_cast(*this), q, dim, keepdim, interpolation); +} + +// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::sort(int64_t dim, bool descending) const { + return at::_ops::sort::call(const_cast(*this), dim, descending); +} + +// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::sort(c10::optional stable, int64_t dim, bool descending) const { + return at::_ops::sort_stable::call(const_cast(*this), stable, dim, descending); +} + +// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::sort(at::Dimname dim, bool descending) const { + return at::_ops::sort_dimname::call(const_cast(*this), dim, descending); +} + +// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::sort(c10::optional stable, at::Dimname dim, bool descending) const { + return at::_ops::sort_dimname_stable::call(const_cast(*this), stable, dim, descending); +} + +// aten::msort(Tensor self) -> Tensor +inline at::Tensor Tensor::msort() const { + return at::_ops::msort::call(const_cast(*this)); +} + +// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor +inline at::Tensor Tensor::argsort(int64_t dim, bool descending) const { + return at::_ops::argsort::call(const_cast(*this), dim, descending); +} + +// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor +inline at::Tensor Tensor::argsort(bool stable, int64_t dim, bool descending) const { + return at::_ops::argsort_stable::call(const_cast(*this), stable, dim, descending); +} + +// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor +inline at::Tensor Tensor::argsort(at::Dimname dim, bool descending) const { + return at::_ops::argsort_dimname::call(const_cast(*this), dim, descending); +} + +// aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::topk(int64_t k, int64_t dim, bool largest, bool sorted) const { + return at::_ops::topk::call(const_cast(*this), k, dim, largest, sorted); +} + +// aten::all(Tensor self) -> Tensor +inline at::Tensor Tensor::all() const { + return at::_ops::all::call(const_cast(*this)); +} + +// aten::any(Tensor self) -> Tensor +inline at::Tensor Tensor::any() const { + return at::_ops::any::call(const_cast(*this)); +} + +// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor +inline at::Tensor Tensor::renorm(const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) const { + return at::_ops::renorm::call(const_cast(*this), p, dim, maxnorm); +} + +// aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) +inline at::Tensor & Tensor::renorm_(const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) const { + return at::_ops::renorm_::call(const_cast(*this), p, dim, maxnorm); +} + +// aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) +inline at::Tensor Tensor::unfold(int64_t dimension, int64_t size, int64_t step) const { + return at::_ops::unfold::call(const_cast(*this), dimension, size, step); +} + +// aten::equal(Tensor self, Tensor other) -> bool +inline bool Tensor::equal(const at::Tensor & other) const { + return at::_ops::equal::call(const_cast(*this), other); +} + +// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor +inline at::Tensor Tensor::pow(const at::Tensor & exponent) const { + return at::_ops::pow_Tensor_Tensor::call(const_cast(*this), exponent); +} + +// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor +inline at::Tensor Tensor::pow(const at::Scalar & exponent) const { + return at::_ops::pow_Tensor_Scalar::call(const_cast(*this), exponent); +} + +// aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) +inline at::Tensor & Tensor::pow_(const at::Scalar & exponent) const { + return at::_ops::pow__Scalar::call(const_cast(*this), exponent); +} + +// aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) +inline at::Tensor & Tensor::pow_(const at::Tensor & exponent) const { + return at::_ops::pow__Tensor::call(const_cast(*this), exponent); +} + +// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor +inline at::Tensor Tensor::float_power(const at::Tensor & exponent) const { + return at::_ops::float_power_Tensor_Tensor::call(const_cast(*this), exponent); +} + +// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor +inline at::Tensor Tensor::float_power(const at::Scalar & exponent) const { + return at::_ops::float_power_Tensor_Scalar::call(const_cast(*this), exponent); +} + +// aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) +inline at::Tensor & Tensor::float_power_(const at::Scalar & exponent) const { + return at::_ops::float_power__Scalar::call(const_cast(*this), exponent); +} + +// aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) +inline at::Tensor & Tensor::float_power_(const at::Tensor & exponent) const { + return at::_ops::float_power__Tensor::call(const_cast(*this), exponent); +} + +// aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::normal_(double mean, double std, c10::optional generator) const { + return at::_ops::normal_::call(const_cast(*this), mean, std, generator); +} + +// aten::alias(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::alias() const { + return at::_ops::alias::call(const_cast(*this)); +} + +// aten::isfinite(Tensor self) -> Tensor +inline at::Tensor Tensor::isfinite() const { + return at::_ops::isfinite::call(const_cast(*this)); +} + +// aten::isinf(Tensor self) -> Tensor +inline at::Tensor Tensor::isinf() const { + return at::_ops::isinf::call(const_cast(*this)); +} + +// aten::record_stream(Tensor(a!) self, Stream s) -> () +inline void Tensor::record_stream(at::Stream s) const { + return at::_ops::record_stream::call(const_cast(*this), s); +} + +// aten::isposinf(Tensor self) -> Tensor +inline at::Tensor Tensor::isposinf() const { + return at::_ops::isposinf::call(const_cast(*this)); +} + +// aten::isneginf(Tensor self) -> Tensor +inline at::Tensor Tensor::isneginf() const { + return at::_ops::isneginf::call(const_cast(*this)); +} + +// aten::det(Tensor self) -> Tensor +inline at::Tensor Tensor::det() const { + return at::_ops::det::call(const_cast(*this)); +} + +// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) +inline ::std::tuple Tensor::slogdet() const { + return at::_ops::slogdet::call(const_cast(*this)); +} + +// aten::logdet(Tensor self) -> Tensor +inline at::Tensor Tensor::logdet() const { + return at::_ops::logdet::call(const_cast(*this)); +} + +// aten::inverse(Tensor self) -> Tensor +inline at::Tensor Tensor::inverse() const { + return at::_ops::inverse::call(const_cast(*this)); +} + +// aten::inner(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::inner(const at::Tensor & other) const { + return at::_ops::inner::call(const_cast(*this), other); +} + +// aten::outer(Tensor self, Tensor vec2) -> Tensor +inline at::Tensor Tensor::outer(const at::Tensor & vec2) const { + return at::_ops::outer::call(const_cast(*this), vec2); +} + +// aten::ger(Tensor self, Tensor vec2) -> Tensor +inline at::Tensor Tensor::ger(const at::Tensor & vec2) const { + return at::_ops::ger::call(const_cast(*this), vec2); +} + +// aten::to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor +inline at::Tensor Tensor::to_padded_tensor(double padding, at::OptionalIntArrayRef output_size) const { + return at::_ops::to_padded_tensor::call(const_cast(*this), padding, output_size); +} + +// aten::_nested_tensor_layer_norm(Tensor self, Tensor? weight, Tensor? bias, float eps) -> Tensor +inline at::Tensor Tensor::_nested_tensor_layer_norm(const c10::optional & weight, const c10::optional & bias, double eps) const { + return at::_ops::_nested_tensor_layer_norm::call(const_cast(*this), weight, bias, eps); +} +} // namespace at + + +namespace c10 { +template <> +struct MaybeOwnedTraits { + using owned_type = at::Tensor; + using borrow_type = at::Tensor; + + static borrow_type createBorrow(const owned_type& from) { + // NOTE: this can be implemented without the special + // unsafe_borrow_t Tensor constructor as + // + // return borrow_type(c10::intrusive_ptr::reclaim(from.unsafeGetTensorImpl())); + // + // but that hurts inlining due to the nullptr check in the + // Tensor(c10::intrusive_ptr<...>) constructor. We already know + // that from.impl_ isn't null because from is a valid Tensor, so + // we needn't do the check again. (using __builtin_assume can + // avoid this, but wouldn't be portable to MSVC.) + return borrow_type(borrow_type::unsafe_borrow_t{}, from); + } + + static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) { + lhs.unsafeReleaseTensorImpl(); + // See above note: this can be implemented with public API + // similarly to createBorrow(), but that would hurt inlining. + lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs); + } + + static void destroyBorrow(borrow_type& toDestroy) { + toDestroy.unsafeReleaseTensorImpl(); // "leak" it, but it was already +0. + } + + static const owned_type& referenceFromBorrow(const borrow_type& borrow) { + return borrow; + } + + static const owned_type* pointerFromBorrow(const borrow_type& borrow) { + return &borrow; + } + + static bool debugBorrowIsValid(const borrow_type& /*borrow*/) { + return true; + } +}; + +template <> +struct ExclusivelyOwnedTraits { + using repr_type = at::Tensor; + using pointer_type = at::Tensor*; + using const_pointer_type = const at::Tensor*; + + static repr_type nullRepr() { + return at::Tensor(); + } + + template + static repr_type createInPlace(Args&&... args) { + return at::Tensor(std::forward(args)...); + } + + static repr_type moveToRepr(at::Tensor&& x) { + return std::move(x); + } + + static void destroyOwned(at::Tensor& x) { + return ExclusivelyOwnedTraits::destroyOwned(x); + } + + static at::Tensor take(at::Tensor& x) { + return std::move(x); + } + + static pointer_type getImpl(repr_type& x) { + return &x; + } + + static const_pointer_type getImpl(const repr_type& x) { + return &x; + } +}; +} // namespace c10 + +namespace at { + +inline c10::MaybeOwned borrow_from_optional_tensor( + const c10::optional& opt) { + return opt.has_value() + ? c10::MaybeOwned::borrowed(*opt) + : c10::MaybeOwned::owned(c10::in_place); +} + +inline c10::MaybeOwned Tensor::expect_contiguous(MemoryFormat memory_format) const & { + if (is_contiguous(memory_format)) { + return c10::MaybeOwned::borrowed(*this); + } else { + return c10::MaybeOwned::owned(__dispatch_contiguous(memory_format)); + } +} +} // namespace at diff --git a/voice_bridge/torch/include/ATen/core/TorchDispatchUtils.h b/voice_bridge/torch/include/ATen/core/TorchDispatchUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..ed7b4181095d5dc2703058a743ed9e7fc9c8ac56 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/TorchDispatchUtils.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace at { +namespace impl { + +bool tensor_has_dispatch(const at::Tensor& t); +bool tensorlist_has_dispatch(at::ITensorListRef li); +bool tensorlist_has_dispatch(const c10::List>& li); +using c10::impl::dispatch_mode_enabled; + +}} diff --git a/voice_bridge/torch/include/ATen/core/TransformationHelper.h b/voice_bridge/torch/include/ATen/core/TransformationHelper.h new file mode 100644 index 0000000000000000000000000000000000000000..ca6f6308813919cb058ce42fbf4173409648c22e --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/TransformationHelper.h @@ -0,0 +1,173 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +// Using DistAccumType in accumulate types for distributions. +// Note: Ideally we'd be using ATen/AccumulateType.h but looks +// like the there is some inconsistency in how accumulate types +// are mapped currently, e.g. for the cpu side, float is mapped +// to double. +template +struct DistAccumType { }; + +#if defined(__CUDACC__) || defined(__HIPCC__) +template <> struct DistAccumType { using type = float; }; +#endif +template <> struct DistAccumType { using type = float; }; +template <> struct DistAccumType { using type = float; }; +template <> struct DistAccumType { using type = float; }; +template <> struct DistAccumType { using type = double; }; + +template +using dist_acctype = typename DistAccumType::type; + +namespace transformation { + +/** + * A transformation function for `torch.Tensor.random_()`, when both `from` and `to` are specified. + * `range` is `to - from` + * `base` is `from` + */ +template +C10_HOST_DEVICE inline T uniform_int_from_to(V val, uint64_t range, int64_t base) { + return static_cast(static_cast((val % range) + base)); +} + +/** + * A transformation function for `torch.Tensor.random_()`, when `from=min_value(int64_t)` and to=None + */ +template +C10_HOST_DEVICE inline T uniform_int_full_range(V val) { + return static_cast(static_cast(val)); +} + +/** + * A transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`. + * In order to prevent compiler warnings reported in GitHub issue 46391, T can't be float or double + * in this overloaded version + */ +template +C10_HOST_DEVICE inline typename std::enable_if::value), T>::type uniform_int(V val) { + if (std::is_same::value) { + return static_cast(val & 1); + } else if (std::is_same::value) { + return static_cast(val % (static_cast(std::numeric_limits::max()) + 1)); + } else if (std::is_same::value || std::is_same::value) { + return static_cast(val % static_cast((1ULL << std::numeric_limits::digits) + 1)); + } else if (std::is_integral::value) { + return static_cast(val % (static_cast(std::numeric_limits::max()) + 1)); + } else { + assert(false); + return 0; + } +} + +/** + * An overloaded transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`, + * added to fix compiler warnings reported in GitHub issue 46391. T is either float or double in this version. + */ +template +C10_HOST_DEVICE inline typename std::enable_if::value, T>::type uniform_int(V val) { + return static_cast(val % static_cast((1ULL << std::numeric_limits::digits) + 1)); +} + +template +C10_HOST_DEVICE inline dist_acctype uniform_real(V val, T from, T to) { + constexpr auto MASK = static_cast((static_cast(1) << std::numeric_limits::digits) - 1); + constexpr auto DIVISOR = static_cast>(1) / (static_cast(1) << std::numeric_limits::digits); + dist_acctype x = (val & MASK) * DIVISOR; + return (x * (to - from) + from); +} + +/** + * Transforms normally distributed `val` with mean 0.0 and standard deviation 1.0 to + * normally distributed with `mean` and standard deviation `std`. + */ +template +C10_HOST_DEVICE inline T normal(T val, T mean, T std) { + return val * std + mean; +} + +/** + * Transforms uniformly distributed `val` between 0.0 and 1.0 to + * Cauchy distribution with location parameter `median` and scale parameter `sigma`. + */ +template +C10_HOST_DEVICE inline T cauchy(T val, T median, T sigma) { + // https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function + // __tanf overflows and returns `inf/-inf` when (val > 1 - eps) or (val < 0 + eps), + // thus we clip those values. + constexpr T eps = std::numeric_limits::epsilon(); + constexpr T one_minus_eps = 1 - eps; + constexpr T zero_plus_eps = 0 + eps; + val = (val > one_minus_eps ? one_minus_eps : val); + val = (val < zero_plus_eps ? zero_plus_eps : val); + return median + sigma * at::tan(c10::pi * (val - static_cast(0.5))); +} + +template <> +C10_HOST_DEVICE inline double cauchy(double val, double median, double sigma) { + // https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function + return median + sigma * at::tan(c10::pi * (val - static_cast(0.5))); +} + +/** + * Transforms uniformly distributed `val` between 0.0 and 1.0 to + * exponentialy distributed with `lambda` parameter of the distribution. + */ +template +C10_HOST_DEVICE __ubsan_ignore_float_divide_by_zero__ inline T exponential(T val, T lambda) { + // https://en.wikipedia.org/wiki/Exponential_distribution#Generating_exponential_variates + // Different implementations for CUDA and CPU to preserve original logic + // TODO: must be investigated and unified!!! + // https://github.com/pytorch/pytorch/issues/38662 +#if defined(__CUDACC__) || defined(__HIPCC__) + // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/16706 + // curand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0. + // we need log to be not 0, and not underflow when converted to half + // fast __logf approximation can underflow, so set log to -epsilon/2 for 1 or close to 1 args + auto log = val >= static_cast(1.) - std::numeric_limits::epsilon() / 2 + ? -std::numeric_limits::epsilon() / 2 + : at::log(val); + return static_cast(-1.0) / lambda * log; +#else + return static_cast(-1.0) / lambda * at::log(static_cast(1.0) - val); +#endif +} + +/** + * Transforms uniformly distributed `val` between 0.0 and 1.0 to + * geometricaly distributed with success probability `p`. + */ +template +C10_HOST_DEVICE inline T geometric(T val, T p) { + // https://en.wikipedia.org/wiki/Geometric_distribution#Related_distributions + return static_cast(::ceil(at::log(val) / at::log(static_cast(1.0) - p))); +} + +/** + * Transforms normally distributed `val` to log-normally distributed. + */ +template +C10_HOST_DEVICE inline T log_normal(T val) { + // https://en.wikipedia.org/wiki/Log-normal_distribution#Mode,_median,_quantiles + return at::exp(val); +} + +/** + * Transforms uniformly distributed `val` between 0.0 and 1.0 to + * bernoulli distributed with success probability `p`. + */ +template +C10_HOST_DEVICE inline T bernoulli(T val, T p) { + return val < p; +} + +}} // namespace at::transformation diff --git a/voice_bridge/torch/include/ATen/core/UndefinedTensorImpl.h b/voice_bridge/torch/include/ATen/core/UndefinedTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..885f6e195f05d37ab4253315242167f8e546dcc1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/UndefinedTensorImpl.h @@ -0,0 +1 @@ +#include diff --git a/voice_bridge/torch/include/ATen/core/UnsafeFromTH.h b/voice_bridge/torch/include/ATen/core/UnsafeFromTH.h new file mode 100644 index 0000000000000000000000000000000000000000..9ad5c45d3ab6ca499bc10b71c68bc04ededfeb87 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/UnsafeFromTH.h @@ -0,0 +1,21 @@ +#pragma once +#include + +namespace at { + +inline Tensor unsafeTensorFromTH(void * th_pointer, bool retain) { + auto tensor_impl = c10::intrusive_ptr::reclaim(static_cast(th_pointer)); + if (retain && tensor_impl.get() != UndefinedTensorImpl::singleton()) { + c10::raw::intrusive_ptr::incref(tensor_impl.get()); + } + return Tensor(std::move(tensor_impl)); +} + +inline Storage unsafeStorageFromTH(void * th_pointer, bool retain) { + if (retain && th_pointer) { + c10::raw::intrusive_ptr::incref(static_cast(th_pointer)); + } + return Storage(c10::intrusive_ptr::reclaim(static_cast(th_pointer))); +} + +} diff --git a/voice_bridge/torch/include/ATen/core/VariableHooksInterface.h b/voice_bridge/torch/include/ATen/core/VariableHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..9b0067b94996a3a83331c1bf2944307c568ccff2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/VariableHooksInterface.h @@ -0,0 +1,73 @@ +#pragma once + +#include +#include + +// A little explanation about why this file exists at all. We have +// a few methods on Tensor class which require access to reified access to +// AutogradMeta. In open source, this isn't a big deal: we just access +// torch/csrc/autograd/variable.h from aten/src/ATen/core/Tensor.cpp and +// we can put the definitions inline. This is because everything gets balled +// into a single dynamic library in the end. +// +// However, inside our Facebook internal version of our build system, we +// have a split between aten and torch/csrc. So we cannot simply just +// cross this boundary. "Now wait," you might say, "Why don't we just +// merge the libraries inside Facebook". Well, the problem is that there +// are some downstream applications which are at binary size limit, and +// incorporating all of the extra code from libtorch would push them +// over (admarket/adreview/service:adreviewservice, see also +// https://github.com/pytorch/pytorch/pull/29299) So if you want to do that, +// we have to fix all of the services like this. +// +// I didn't want to block eliminating Tensor-Variable on this work, so I +// had to introduce another dynamic dispatch to get to the variable +// implementations (which live in torch/csrc/autograd/variable.cpp, FYI). +// +// I also considered using our existing dynamic dispatch mechanism, c10 +// dispatcher, to do this. However, (1) some of the functions on Tensor +// have weird signatures that are not supported by autograd, and (2) +// see this bug https://github.com/pytorch/pytorch/issues/30102 + +namespace torch { namespace autograd { + +struct Node; + +}} // namespace torch::autograd + +namespace at { +namespace impl { + +struct TORCH_API VariableHooksInterface { + virtual ~VariableHooksInterface() = default; + virtual TensorBase tensor_data(const TensorBase&) const = 0; + virtual TensorBase variable_data(const TensorBase&) const = 0; + virtual const std::shared_ptr& grad_fn(const TensorBase&) const = 0; + virtual unsigned _register_hook( + const TensorBase&, + std::function hook) const = 0; + virtual void remove_hook(const TensorBase&, unsigned pos) const = 0; + virtual bool is_view(const TensorBase&) const = 0; + virtual const TensorBase& base(const TensorBase&) const = 0; + virtual const std::string& name(const TensorBase&) const = 0; + virtual bool is_leaf(const TensorBase&) const = 0; + virtual int64_t output_nr(const TensorBase&) const = 0; + virtual void set_data(const TensorBase&, const TensorBase&) const = 0; + virtual TensorBase data(const TensorBase&) const = 0; + virtual int64_t _version(const TensorBase&) const = 0; + virtual void retain_grad(const TensorBase&) const = 0; + virtual bool retains_grad(const TensorBase&) const = 0; + virtual void _backward(const Tensor&, TensorList, const c10::optional&, c10::optional, bool) const = 0; + virtual void requires_grad_(const TensorBase&, bool) const = 0; +}; + +TORCH_API void SetVariableHooks(VariableHooksInterface* hooks); +TORCH_API VariableHooksInterface* GetVariableHooks(); + +struct TORCH_API VariableHooksRegisterer { + explicit VariableHooksRegisterer(VariableHooksInterface* hooks) { + SetVariableHooks(hooks); + } +}; + +}} // namespace at::impl diff --git a/voice_bridge/torch/include/ATen/core/Variadic.h b/voice_bridge/torch/include/ATen/core/Variadic.h new file mode 100644 index 0000000000000000000000000000000000000000..61b6a35a0b1cb1ffd3bcfbe2b0d9d7ea6ac15ded --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Variadic.h @@ -0,0 +1,95 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace at { + +// This class allows you to write variadic functions which +// call a (possibly overloaded) function on each argument, +// in order. This is most commonly used in autogenerated code, +// where it is convenient to have a function that can uniformly +// take arguments of different types. If your arguments +// are homogenous consider using a std::initializer_list instead. +// +// For examples of this in use, see torch/csrc/utils/variadic.h +template +struct IterArgs { + template + inline F& apply() { + return self(); + } + + // NB: Use perfect forwarding here, otherwise we'll make value + // copies of all arguments! + template + inline F& apply(T&& arg, Args&&... args) { + self()(std::forward(arg)); + if (self().short_circuit()) { + return self(); + } else { + return apply(std::forward(args)...); + } + } + + // Here are some handy overloads which provide sensible + // defaults for container-like structures that one might + // be interested in recursing into. You can enable them + // by adding: + // + // using IterArgs::operator() + // + // to your struct. These are not enabled by default because + // you may be able to process these structures more efficiently + // than handling them one-by-one. + + template + void operator()(c10::IListRef args) { + for (const auto& arg : args) { + self()(arg); + if (self().short_circuit()) + return; + } + } + + template + void operator()(at::ArrayRef args) { + for (const auto& arg : args) { + self()(arg); + if (self().short_circuit()) + return; + } + } + + template + void operator()(const torch::List& args) { + for (const auto& arg : args) { + self()(arg); + if (self().short_circuit()) + return; + } + } + + // NB: we need to specify std::vector manually as C++ won't + // do an implicit conversion to make a template deduction go through. + template + void operator()(const std::vector& args) { + self()(at::ArrayRef{args}); + } + + constexpr bool short_circuit() const { + return false; + } + + private: + inline F& self() { + return *static_cast(this); + } +}; + +} // namespace torch diff --git a/voice_bridge/torch/include/ATen/core/Vitals.h b/voice_bridge/torch/include/ATen/core/Vitals.h new file mode 100644 index 0000000000000000000000000000000000000000..48913c54185f3ea8ed757487000d9820b8038f92 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/Vitals.h @@ -0,0 +1,94 @@ +#pragma once +#include +#include +#include +#include +#include +#include + +#include + +namespace at { +namespace vitals { + +TORCH_API bool torchVitalEnabled(); + +struct TORCH_API TorchVitalAttr { + // always initialized to empty + std::string value = ""; + template + TorchVitalAttr& operator<<(const T& t) { + if (torchVitalEnabled()) { + std::stringstream ss; + ss << t; + value += ss.str(); + } + return *this; + } + + template + void write(const T& t, bool force) { + if (force || torchVitalEnabled()) { + std::stringstream ss; + ss << t; + value = ss.str(); + } + } +}; + +struct TORCH_API TorchVital { + std::string name; + std::unordered_map attrs; + + explicit TorchVital(std::string n) : name(std::move(n)) {} + TorchVital() = delete; + + TorchVitalAttr& create(const std::string& attr); + TorchVitalAttr& create(const std::string& attr, bool force); + friend std::ostream& operator<<(std::ostream& os, const TorchVital& dt); + + ~TorchVital(); +}; + +std::ostream& operator<<(std::ostream& os, TorchVital const& tv); + +// A way to access vitals by string names instead of by global reference. +// This enables access to vitals from the PythonAPI. +class TORCH_API APIVitals { + public: + bool vitals_enabled; + + // Set any vital sign that was added to the map. + bool setVital( + const std::string& vital_name, + const std::string& attr_name, + const std::string& value, + bool force = false); + std::string readVitals(); + + APIVitals(); + + // Ensure this stays a singleton + APIVitals(APIVitals const& other) = delete; + APIVitals(APIVitals&& other) = delete; + APIVitals& operator=(const APIVitals&) = delete; + APIVitals& operator=(APIVitals&&) = delete; + + private: + std::unordered_map name_map_; +}; + +extern TORCH_API APIVitals VitalsAPI; + +} // namespace vitals +} // namespace at + +#define TORCH_VITAL_DECLARE(name) \ + TORCH_API at::vitals::TorchVital TorchVital_##name; + +#define TORCH_VITAL_DEFINE(name) \ + TORCH_API at::vitals::TorchVital TorchVital_##name(#name); + +#define TORCH_VITAL_BASE(name) TorchVital_##name + +#define TORCH_VITAL(name, attr) TORCH_VITAL_BASE(name).create(#attr) diff --git a/voice_bridge/torch/include/ATen/core/alias_info.h b/voice_bridge/torch/include/ATen/core/alias_info.h new file mode 100644 index 0000000000000000000000000000000000000000..dd844dc392ac69063c121a0b76b9fb7eae55d367 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/alias_info.h @@ -0,0 +1,119 @@ +#pragma once +#include +#include +#include +#include + +namespace c10 { +/** + * class AliasInfo + * + * Data structure to hold aliasing information for an `Argument`. They can be + * nested to represent aliasing information on contained types. + * + * There is a `beforeSet` which describes the aliasing information before the + * operator executes, and an `afterSet` that describes aliasing info + * after execution. + */ +class AliasInfo { + public: + // Symbol for the set that can alias anything + static Symbol wildcardSet() { + static const Symbol wc = Symbol::fromQualString("alias::*"); + return wc; + } + + void setIsWrite(bool isWrite) { + isWrite_ = isWrite; + } + + bool isWrite() const { + return isWrite_; + } + + void addBeforeSet(Symbol aliasSet) { + beforeSets_.insert(aliasSet); + } + + void addAfterSet(Symbol aliasSet) { + afterSets_.insert(aliasSet); + } + + const std::unordered_set& beforeSets() const { + return beforeSets_; + } + + const std::unordered_set& afterSets() const { + return afterSets_; + } + + Symbol beforeSet() const { + AT_ASSERT(beforeSets_.size() == 1); + return *beforeSets_.begin(); + } + + bool isWildcardBefore() const { + return beforeSets_.count(wildcardSet()) != 0; + } + + bool isWildcardAfter() const { + return afterSets_.count(wildcardSet()) != 0; + } + + // the alias info for the contained types of the type + // e.g. if this is an annotation on List[T], `sets` refers to + // the alias sets that the list may be in + // while containedTypes()[0] refers to the sets that members of the list + // may be in + void addContainedType(AliasInfo aliasInfo) { + containedTypes_.push_back(std::move(aliasInfo)); + } + const std::vector& containedTypes() const { + return containedTypes_; + } + + private: + std::unordered_set beforeSets_; + std::unordered_set afterSets_; + std::vector containedTypes_; + bool isWrite_ = false; +}; + +inline bool operator==(const AliasInfo& lhs, const AliasInfo& rhs) { + return lhs.isWrite() == rhs.isWrite() + && lhs.beforeSets() == rhs.beforeSets() + && lhs.afterSets() == rhs.afterSets() + && lhs.containedTypes() == rhs.containedTypes(); +} + +// this does match the way things are represented in the schema +inline std::ostream& operator<<(std::ostream& out, const AliasInfo& aliasInfo) { + out << "("; + bool first = true; + for (const auto& set : aliasInfo.beforeSets()) { + if (first) { + first = false; + } else { + out << "|"; + } + out << set.toUnqualString(); + } + if (aliasInfo.isWrite()) { + out << "!"; + } + if (aliasInfo.beforeSets() != aliasInfo.afterSets()) { + out << " -> "; + first = true; + for (const auto& set : aliasInfo.afterSets()) { + if (first) { + first = false; + } else { + out << "|"; + } + out << set.toUnqualString(); + } + } + out << ")"; + return out; +} +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/aten_interned_strings.h b/voice_bridge/torch/include/ATen/core/aten_interned_strings.h new file mode 100644 index 0000000000000000000000000000000000000000..349b344f001a0e57590860fbdc5376137563ec81 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/aten_interned_strings.h @@ -0,0 +1,2071 @@ +#pragma once + +// @generated by torchgen/gen.py from aten_interned_strings.h + +#if defined(TORCH_ASSERT_NO_OPERATORS) || defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if including for \ + the c10::Symbol class would be sufficient, or if your change would be \ + better placed in another file. +#endif + +// ATen symbols correspond exactly to operators defined in ATen. Every +// symbol here corresponds exactly to an ATen operation defined in +// native_functions.yaml; attributes are in one-to-one correspondence +// with their ATen name. + +#define FORALL_ATEN_BASE_SYMBOLS(_) \ +_(aten, __and__) \ +_(aten, __iand__) \ +_(aten, __ilshift__) \ +_(aten, __ior__) \ +_(aten, __irshift__) \ +_(aten, __ixor__) \ +_(aten, __lshift__) \ +_(aten, __or__) \ +_(aten, __rshift__) \ +_(aten, __xor__) \ +_(aten, _adaptive_avg_pool2d) \ +_(aten, _adaptive_avg_pool2d_backward) \ +_(aten, _adaptive_avg_pool3d) \ +_(aten, _adaptive_avg_pool3d_backward) \ +_(aten, _add_batch_dim) \ +_(aten, _add_relu) \ +_(aten, _add_relu_) \ +_(aten, _addmm_activation) \ +_(aten, _aminmax) \ +_(aten, _amp_foreach_non_finite_check_and_unscale) \ +_(aten, _amp_foreach_non_finite_check_and_unscale_) \ +_(aten, _amp_update_scale) \ +_(aten, _amp_update_scale_) \ +_(aten, _assert_async) \ +_(aten, _assert_tensor_metadata) \ +_(aten, _autocast_to_full_precision) \ +_(aten, _autocast_to_reduced_precision) \ +_(aten, _backward) \ +_(aten, _batch_norm_impl_index) \ +_(aten, _batch_norm_impl_index_backward) \ +_(aten, _cast_Byte) \ +_(aten, _cast_Char) \ +_(aten, _cast_Double) \ +_(aten, _cast_Float) \ +_(aten, _cast_Half) \ +_(aten, _cast_Int) \ +_(aten, _cast_Long) \ +_(aten, _cast_Short) \ +_(aten, _cdist_backward) \ +_(aten, _cdist_forward) \ +_(aten, _cholesky_solve_helper) \ +_(aten, _choose_qparams_per_tensor) \ +_(aten, _coalesce) \ +_(aten, _coalesced) \ +_(aten, _coalesced_) \ +_(aten, _compute_linear_combination) \ +_(aten, _conj) \ +_(aten, _conj_copy) \ +_(aten, _conj_physical) \ +_(aten, _conv_depthwise2d) \ +_(aten, _convert_indices_from_coo_to_csr) \ +_(aten, _convert_indices_from_csr_to_coo) \ +_(aten, _convolution) \ +_(aten, _convolution_double_backward) \ +_(aten, _convolution_mode) \ +_(aten, _copy_from) \ +_(aten, _copy_from_and_resize) \ +_(aten, _ctc_loss) \ +_(aten, _ctc_loss_backward) \ +_(aten, _cudnn_ctc_loss) \ +_(aten, _cudnn_init_dropout_state) \ +_(aten, _cudnn_rnn) \ +_(aten, _cudnn_rnn_backward) \ +_(aten, _cudnn_rnn_flatten_weight) \ +_(aten, _cufft_clear_plan_cache) \ +_(aten, _cufft_get_plan_cache_max_size) \ +_(aten, _cufft_get_plan_cache_size) \ +_(aten, _cufft_set_plan_cache_max_size) \ +_(aten, _cummax_helper) \ +_(aten, _cummin_helper) \ +_(aten, _debug_has_internal_overlap) \ +_(aten, _dimI) \ +_(aten, _dimV) \ +_(aten, _dim_arange) \ +_(aten, _dirichlet_grad) \ +_(aten, _efficientzerotensor) \ +_(aten, _embedding_bag) \ +_(aten, _embedding_bag_backward) \ +_(aten, _embedding_bag_dense_backward) \ +_(aten, _embedding_bag_forward_only) \ +_(aten, _embedding_bag_per_sample_weights_backward) \ +_(aten, _embedding_bag_sparse_backward) \ +_(aten, _empty_affine_quantized) \ +_(aten, _empty_per_channel_affine_quantized) \ +_(aten, _euclidean_dist) \ +_(aten, _fake_quantize_learnable_per_channel_affine) \ +_(aten, _fake_quantize_learnable_per_channel_affine_backward) \ +_(aten, _fake_quantize_learnable_per_tensor_affine) \ +_(aten, _fake_quantize_learnable_per_tensor_affine_backward) \ +_(aten, _fake_quantize_per_tensor_affine_cachemask_tensor_qparams) \ +_(aten, _fft_c2c) \ +_(aten, _fft_c2r) \ +_(aten, _fft_r2c) \ +_(aten, _flash_scaled_dot_product_attention) \ +_(aten, _foobar) \ +_(aten, _foreach_abs) \ +_(aten, _foreach_abs_) \ +_(aten, _foreach_acos) \ +_(aten, _foreach_acos_) \ +_(aten, _foreach_add) \ +_(aten, _foreach_add_) \ +_(aten, _foreach_addcdiv) \ +_(aten, _foreach_addcdiv_) \ +_(aten, _foreach_addcmul) \ +_(aten, _foreach_addcmul_) \ +_(aten, _foreach_asin) \ +_(aten, _foreach_asin_) \ +_(aten, _foreach_atan) \ +_(aten, _foreach_atan_) \ +_(aten, _foreach_ceil) \ +_(aten, _foreach_ceil_) \ +_(aten, _foreach_cos) \ +_(aten, _foreach_cos_) \ +_(aten, _foreach_cosh) \ +_(aten, _foreach_cosh_) \ +_(aten, _foreach_div) \ +_(aten, _foreach_div_) \ +_(aten, _foreach_erf) \ +_(aten, _foreach_erf_) \ +_(aten, _foreach_erfc) \ +_(aten, _foreach_erfc_) \ +_(aten, _foreach_exp) \ +_(aten, _foreach_exp_) \ +_(aten, _foreach_expm1) \ +_(aten, _foreach_expm1_) \ +_(aten, _foreach_floor) \ +_(aten, _foreach_floor_) \ +_(aten, _foreach_frac) \ +_(aten, _foreach_frac_) \ +_(aten, _foreach_lgamma) \ +_(aten, _foreach_lgamma_) \ +_(aten, _foreach_log) \ +_(aten, _foreach_log10) \ +_(aten, _foreach_log10_) \ +_(aten, _foreach_log1p) \ +_(aten, _foreach_log1p_) \ +_(aten, _foreach_log2) \ +_(aten, _foreach_log2_) \ +_(aten, _foreach_log_) \ +_(aten, _foreach_maximum) \ +_(aten, _foreach_maximum_) \ +_(aten, _foreach_minimum) \ +_(aten, _foreach_minimum_) \ +_(aten, _foreach_mul) \ +_(aten, _foreach_mul_) \ +_(aten, _foreach_neg) \ +_(aten, _foreach_neg_) \ +_(aten, _foreach_norm) \ +_(aten, _foreach_reciprocal) \ +_(aten, _foreach_reciprocal_) \ +_(aten, _foreach_round) \ +_(aten, _foreach_round_) \ +_(aten, _foreach_sigmoid) \ +_(aten, _foreach_sigmoid_) \ +_(aten, _foreach_sin) \ +_(aten, _foreach_sin_) \ +_(aten, _foreach_sinh) \ +_(aten, _foreach_sinh_) \ +_(aten, _foreach_sqrt) \ +_(aten, _foreach_sqrt_) \ +_(aten, _foreach_sub) \ +_(aten, _foreach_sub_) \ +_(aten, _foreach_tan) \ +_(aten, _foreach_tan_) \ +_(aten, _foreach_tanh) \ +_(aten, _foreach_tanh_) \ +_(aten, _foreach_trunc) \ +_(aten, _foreach_trunc_) \ +_(aten, _foreach_zero) \ +_(aten, _foreach_zero_) \ +_(aten, _fused_adam) \ +_(aten, _fused_adam_) \ +_(aten, _fused_dropout) \ +_(aten, _fused_moving_avg_obs_fq_helper) \ +_(aten, _fused_moving_avg_obs_fq_helper_functional) \ +_(aten, _fw_primal) \ +_(aten, _fw_primal_copy) \ +_(aten, _gather_sparse_backward) \ +_(aten, _grid_sampler_2d_cpu_fallback) \ +_(aten, _grid_sampler_2d_cpu_fallback_backward) \ +_(aten, _has_compatible_shallow_copy_type) \ +_(aten, _has_same_storage_numel) \ +_(aten, _histogramdd_bin_edges) \ +_(aten, _histogramdd_from_bin_cts) \ +_(aten, _histogramdd_from_bin_tensors) \ +_(aten, _index_put_impl) \ +_(aten, _index_put_impl_) \ +_(aten, _indices) \ +_(aten, _indices_copy) \ +_(aten, _is_zerotensor) \ +_(aten, _linalg_check_errors) \ +_(aten, _linalg_det) \ +_(aten, _linalg_eigh) \ +_(aten, _linalg_slogdet) \ +_(aten, _linalg_solve_ex) \ +_(aten, _linalg_svd) \ +_(aten, _local_scalar_dense) \ +_(aten, _log_softmax) \ +_(aten, _log_softmax_backward_data) \ +_(aten, _logcumsumexp) \ +_(aten, _lstm_mps) \ +_(aten, _lu_with_info) \ +_(aten, _make_dual) \ +_(aten, _make_dual_copy) \ +_(aten, _make_per_channel_quantized_tensor) \ +_(aten, _make_per_tensor_quantized_tensor) \ +_(aten, _masked_scale) \ +_(aten, _masked_softmax) \ +_(aten, _masked_softmax_backward) \ +_(aten, _mkldnn_reshape) \ +_(aten, _mkldnn_transpose) \ +_(aten, _mkldnn_transpose_) \ +_(aten, _mps_convolution) \ +_(aten, _mps_convolution_transpose) \ +_(aten, _mps_max_pool2d) \ +_(aten, _native_decoder_only_multi_head_attention) \ +_(aten, _native_multi_head_attention) \ +_(aten, _neg_view) \ +_(aten, _neg_view_copy) \ +_(aten, _nested_from_padded) \ +_(aten, _nested_from_padded_and_nested_example) \ +_(aten, _nested_select_backward) \ +_(aten, _nested_sum_backward) \ +_(aten, _nested_tensor_from_mask) \ +_(aten, _nested_tensor_from_mask_left_aligned) \ +_(aten, _nested_tensor_from_tensor_list) \ +_(aten, _nested_tensor_layer_norm) \ +_(aten, _nested_tensor_offsets) \ +_(aten, _nested_tensor_size) \ +_(aten, _nested_tensor_softmax_with_shape) \ +_(aten, _nested_tensor_strides) \ +_(aten, _nested_view_from_buffer) \ +_(aten, _nested_view_from_buffer_copy) \ +_(aten, _new_zeros_with_same_feature_meta) \ +_(aten, _nnpack_available) \ +_(aten, _nnpack_spatial_convolution) \ +_(aten, _nnz) \ +_(aten, _pack_padded_sequence) \ +_(aten, _pack_padded_sequence_backward) \ +_(aten, _pad_circular) \ +_(aten, _pad_enum) \ +_(aten, _pad_packed_sequence) \ +_(aten, _pdist_backward) \ +_(aten, _pdist_forward) \ +_(aten, _pin_memory) \ +_(aten, _remove_batch_dim) \ +_(aten, _reshape_alias) \ +_(aten, _reshape_alias_copy) \ +_(aten, _reshape_from_tensor) \ +_(aten, _resize_output) \ +_(aten, _resize_output_) \ +_(aten, _rowwise_prune) \ +_(aten, _sample_dirichlet) \ +_(aten, _saturate_weight_to_fp16) \ +_(aten, _scaled_dot_product_attention) \ +_(aten, _scaled_dot_product_attention_forward) \ +_(aten, _scaled_dot_product_attention_math) \ +_(aten, _segment_reduce_backward) \ +_(aten, _shape_as_tensor) \ +_(aten, _slow_conv2d_backward) \ +_(aten, _slow_conv2d_forward) \ +_(aten, _sobol_engine_draw) \ +_(aten, _sobol_engine_ff) \ +_(aten, _sobol_engine_ff_) \ +_(aten, _sobol_engine_initialize_state) \ +_(aten, _sobol_engine_initialize_state_) \ +_(aten, _sobol_engine_scramble) \ +_(aten, _sobol_engine_scramble_) \ +_(aten, _softmax) \ +_(aten, _softmax_backward_data) \ +_(aten, _sparse_addmm) \ +_(aten, _sparse_broadcast_to) \ +_(aten, _sparse_broadcast_to_copy) \ +_(aten, _sparse_bsc_tensor_unsafe) \ +_(aten, _sparse_bsr_tensor_unsafe) \ +_(aten, _sparse_compressed_tensor_unsafe) \ +_(aten, _sparse_coo_tensor_unsafe) \ +_(aten, _sparse_coo_tensor_with_dims) \ +_(aten, _sparse_coo_tensor_with_dims_and_tensors) \ +_(aten, _sparse_csc_tensor_unsafe) \ +_(aten, _sparse_csr_prod) \ +_(aten, _sparse_csr_sum) \ +_(aten, _sparse_csr_tensor_unsafe) \ +_(aten, _sparse_log_softmax) \ +_(aten, _sparse_log_softmax_backward_data) \ +_(aten, _sparse_mask_helper) \ +_(aten, _sparse_mm) \ +_(aten, _sparse_softmax) \ +_(aten, _sparse_softmax_backward_data) \ +_(aten, _sparse_sparse_matmul) \ +_(aten, _sparse_sum) \ +_(aten, _sparse_sum_backward) \ +_(aten, _spdiags) \ +_(aten, _stack) \ +_(aten, _standard_gamma) \ +_(aten, _standard_gamma_grad) \ +_(aten, _symeig_helper) \ +_(aten, _test_ambiguous_defaults) \ +_(aten, _test_autograd_multiple_dispatch) \ +_(aten, _test_autograd_multiple_dispatch_view) \ +_(aten, _test_autograd_multiple_dispatch_view_copy) \ +_(aten, _test_optional_filled_intlist) \ +_(aten, _test_optional_floatlist) \ +_(aten, _test_optional_intlist) \ +_(aten, _test_serialization_subcmul) \ +_(aten, _test_string_default) \ +_(aten, _test_warn_in_autograd) \ +_(aten, _thnn_differentiable_gru_cell_backward) \ +_(aten, _thnn_differentiable_lstm_cell_backward) \ +_(aten, _thnn_fused_gru_cell) \ +_(aten, _thnn_fused_gru_cell_backward) \ +_(aten, _thnn_fused_lstm_cell) \ +_(aten, _thnn_fused_lstm_cell_backward) \ +_(aten, _thnn_fused_lstm_cell_backward_impl) \ +_(aten, _to_copy) \ +_(aten, _to_cpu) \ +_(aten, _to_dense) \ +_(aten, _torch_cuda_cu_linker_symbol_op) \ +_(aten, _transform_bias_rescale_qkv) \ +_(aten, _transformer_decoder_only_layer_fwd) \ +_(aten, _transformer_encoder_layer_fwd) \ +_(aten, _trilinear) \ +_(aten, _triton_multi_head_attention) \ +_(aten, _triton_scaled_dot_attention) \ +_(aten, _unique) \ +_(aten, _unique2) \ +_(aten, _unpack_dual) \ +_(aten, _unsafe_view) \ +_(aten, _upsample_bicubic2d_aa) \ +_(aten, _upsample_bicubic2d_aa_backward) \ +_(aten, _upsample_bilinear2d_aa) \ +_(aten, _upsample_bilinear2d_aa_backward) \ +_(aten, _upsample_nearest_exact1d) \ +_(aten, _upsample_nearest_exact1d_backward) \ +_(aten, _upsample_nearest_exact2d) \ +_(aten, _upsample_nearest_exact2d_backward) \ +_(aten, _upsample_nearest_exact3d) \ +_(aten, _upsample_nearest_exact3d_backward) \ +_(aten, _use_cudnn_ctc_loss) \ +_(aten, _use_cudnn_rnn_flatten_weight) \ +_(aten, _validate_compressed_sparse_indices) \ +_(aten, _validate_sparse_bsc_tensor_args) \ +_(aten, _validate_sparse_bsr_tensor_args) \ +_(aten, _validate_sparse_compressed_tensor_args) \ +_(aten, _validate_sparse_coo_tensor_args) \ +_(aten, _validate_sparse_csc_tensor_args) \ +_(aten, _validate_sparse_csr_tensor_args) \ +_(aten, _values) \ +_(aten, _values_copy) \ +_(aten, _version) \ +_(aten, _weight_norm) \ +_(aten, _weight_norm_differentiable_backward) \ +_(aten, _weight_norm_interface) \ +_(aten, _weight_norm_interface_backward) \ +_(aten, abs) \ +_(aten, abs_) \ +_(aten, absolute) \ +_(aten, absolute_) \ +_(aten, acos) \ +_(aten, acos_) \ +_(aten, acosh) \ +_(aten, acosh_) \ +_(aten, adaptive_avg_pool1d) \ +_(aten, adaptive_avg_pool2d) \ +_(aten, adaptive_avg_pool3d) \ +_(aten, adaptive_avg_pool3d_backward) \ +_(aten, adaptive_max_pool1d) \ +_(aten, adaptive_max_pool2d) \ +_(aten, adaptive_max_pool2d_backward) \ +_(aten, adaptive_max_pool3d) \ +_(aten, adaptive_max_pool3d_backward) \ +_(aten, add) \ +_(aten, add_) \ +_(aten, addbmm) \ +_(aten, addbmm_) \ +_(aten, addcdiv) \ +_(aten, addcdiv_) \ +_(aten, addcmul) \ +_(aten, addcmul_) \ +_(aten, addmm) \ +_(aten, addmm_) \ +_(aten, addmv) \ +_(aten, addmv_) \ +_(aten, addr) \ +_(aten, addr_) \ +_(aten, adjoint) \ +_(aten, affine_grid_generator) \ +_(aten, affine_grid_generator_backward) \ +_(aten, alias) \ +_(aten, alias_copy) \ +_(aten, align_as) \ +_(aten, align_tensors) \ +_(aten, align_to) \ +_(aten, all) \ +_(aten, allclose) \ +_(aten, alpha_dropout) \ +_(aten, alpha_dropout_) \ +_(aten, amax) \ +_(aten, amin) \ +_(aten, aminmax) \ +_(aten, angle) \ +_(aten, any) \ +_(aten, arange) \ +_(aten, arccos) \ +_(aten, arccos_) \ +_(aten, arccosh) \ +_(aten, arccosh_) \ +_(aten, arcsin) \ +_(aten, arcsin_) \ +_(aten, arcsinh) \ +_(aten, arcsinh_) \ +_(aten, arctan) \ +_(aten, arctan2) \ +_(aten, arctan2_) \ +_(aten, arctan_) \ +_(aten, arctanh) \ +_(aten, arctanh_) \ +_(aten, argmax) \ +_(aten, argmin) \ +_(aten, argsort) \ +_(aten, argwhere) \ +_(aten, as_strided) \ +_(aten, as_strided_) \ +_(aten, as_strided_copy) \ +_(aten, as_strided_scatter) \ +_(aten, asin) \ +_(aten, asin_) \ +_(aten, asinh) \ +_(aten, asinh_) \ +_(aten, atan) \ +_(aten, atan2) \ +_(aten, atan2_) \ +_(aten, atan_) \ +_(aten, atanh) \ +_(aten, atanh_) \ +_(aten, atleast_1d) \ +_(aten, atleast_2d) \ +_(aten, atleast_3d) \ +_(aten, avg_pool1d) \ +_(aten, avg_pool2d) \ +_(aten, avg_pool2d_backward) \ +_(aten, avg_pool3d) \ +_(aten, avg_pool3d_backward) \ +_(aten, baddbmm) \ +_(aten, baddbmm_) \ +_(aten, bartlett_window) \ +_(aten, batch_norm) \ +_(aten, batch_norm_backward_elemt) \ +_(aten, batch_norm_backward_reduce) \ +_(aten, batch_norm_elemt) \ +_(aten, batch_norm_gather_stats) \ +_(aten, batch_norm_gather_stats_with_counts) \ +_(aten, batch_norm_stats) \ +_(aten, batch_norm_update_stats) \ +_(aten, bernoulli) \ +_(aten, bernoulli_) \ +_(aten, bilinear) \ +_(aten, binary_cross_entropy) \ +_(aten, binary_cross_entropy_backward) \ +_(aten, binary_cross_entropy_with_logits) \ +_(aten, bincount) \ +_(aten, binomial) \ +_(aten, bitwise_and) \ +_(aten, bitwise_and_) \ +_(aten, bitwise_left_shift) \ +_(aten, bitwise_left_shift_) \ +_(aten, bitwise_not) \ +_(aten, bitwise_not_) \ +_(aten, bitwise_or) \ +_(aten, bitwise_or_) \ +_(aten, bitwise_right_shift) \ +_(aten, bitwise_right_shift_) \ +_(aten, bitwise_xor) \ +_(aten, bitwise_xor_) \ +_(aten, blackman_window) \ +_(aten, block_diag) \ +_(aten, bmm) \ +_(aten, broadcast_tensors) \ +_(aten, broadcast_to) \ +_(aten, bucketize) \ +_(aten, can_cast) \ +_(aten, cartesian_prod) \ +_(aten, cat) \ +_(aten, cauchy) \ +_(aten, cauchy_) \ +_(aten, ccol_indices) \ +_(aten, ccol_indices_copy) \ +_(aten, cdist) \ +_(aten, ceil) \ +_(aten, ceil_) \ +_(aten, celu) \ +_(aten, celu_) \ +_(aten, chain_matmul) \ +_(aten, chalf) \ +_(aten, channel_shuffle) \ +_(aten, cholesky) \ +_(aten, cholesky_inverse) \ +_(aten, cholesky_solve) \ +_(aten, choose_qparams_optimized) \ +_(aten, chunk) \ +_(aten, clamp) \ +_(aten, clamp_) \ +_(aten, clamp_max) \ +_(aten, clamp_max_) \ +_(aten, clamp_min) \ +_(aten, clamp_min_) \ +_(aten, clip) \ +_(aten, clip_) \ +_(aten, clone) \ +_(aten, coalesce) \ +_(aten, col2im) \ +_(aten, col_indices) \ +_(aten, col_indices_copy) \ +_(aten, column_stack) \ +_(aten, combinations) \ +_(aten, complex) \ +_(aten, concat) \ +_(aten, concatenate) \ +_(aten, conj) \ +_(aten, conj_physical) \ +_(aten, conj_physical_) \ +_(aten, constant_pad_nd) \ +_(aten, contiguous) \ +_(aten, conv1d) \ +_(aten, conv2d) \ +_(aten, conv3d) \ +_(aten, conv_depthwise3d) \ +_(aten, conv_tbc) \ +_(aten, conv_tbc_backward) \ +_(aten, conv_transpose1d) \ +_(aten, conv_transpose2d) \ +_(aten, conv_transpose3d) \ +_(aten, convolution) \ +_(aten, convolution_backward) \ +_(aten, convolution_backward_overrideable) \ +_(aten, convolution_overrideable) \ +_(aten, copy) \ +_(aten, copy_) \ +_(aten, copy_sparse_to_sparse) \ +_(aten, copy_sparse_to_sparse_) \ +_(aten, copysign) \ +_(aten, copysign_) \ +_(aten, corrcoef) \ +_(aten, cos) \ +_(aten, cos_) \ +_(aten, cosh) \ +_(aten, cosh_) \ +_(aten, cosine_embedding_loss) \ +_(aten, cosine_similarity) \ +_(aten, count_nonzero) \ +_(aten, cov) \ +_(aten, cross) \ +_(aten, cross_entropy_loss) \ +_(aten, crow_indices) \ +_(aten, crow_indices_copy) \ +_(aten, ctc_loss) \ +_(aten, cudnn_affine_grid_generator) \ +_(aten, cudnn_affine_grid_generator_backward) \ +_(aten, cudnn_batch_norm) \ +_(aten, cudnn_batch_norm_backward) \ +_(aten, cudnn_convolution) \ +_(aten, cudnn_convolution_add_relu) \ +_(aten, cudnn_convolution_relu) \ +_(aten, cudnn_convolution_transpose) \ +_(aten, cudnn_grid_sampler) \ +_(aten, cudnn_grid_sampler_backward) \ +_(aten, cudnn_is_acceptable) \ +_(aten, cummax) \ +_(aten, cummaxmin_backward) \ +_(aten, cummin) \ +_(aten, cumprod) \ +_(aten, cumprod_) \ +_(aten, cumprod_backward) \ +_(aten, cumsum) \ +_(aten, cumsum_) \ +_(aten, cumulative_trapezoid) \ +_(aten, data) \ +_(aten, deg2rad) \ +_(aten, deg2rad_) \ +_(aten, dense_dim) \ +_(aten, dequantize) \ +_(aten, det) \ +_(aten, detach) \ +_(aten, detach_) \ +_(aten, detach_copy) \ +_(aten, diag) \ +_(aten, diag_backward) \ +_(aten, diag_embed) \ +_(aten, diagflat) \ +_(aten, diagonal) \ +_(aten, diagonal_backward) \ +_(aten, diagonal_copy) \ +_(aten, diagonal_scatter) \ +_(aten, diff) \ +_(aten, digamma) \ +_(aten, digamma_) \ +_(aten, dist) \ +_(aten, div) \ +_(aten, div_) \ +_(aten, divide) \ +_(aten, divide_) \ +_(aten, dot) \ +_(aten, dropout) \ +_(aten, dropout_) \ +_(aten, dsplit) \ +_(aten, dstack) \ +_(aten, einsum) \ +_(aten, elu) \ +_(aten, elu_) \ +_(aten, elu_backward) \ +_(aten, embedding) \ +_(aten, embedding_backward) \ +_(aten, embedding_bag) \ +_(aten, embedding_dense_backward) \ +_(aten, embedding_renorm) \ +_(aten, embedding_renorm_) \ +_(aten, embedding_sparse_backward) \ +_(aten, empty) \ +_(aten, empty_like) \ +_(aten, empty_quantized) \ +_(aten, empty_strided) \ +_(aten, eq) \ +_(aten, eq_) \ +_(aten, equal) \ +_(aten, erf) \ +_(aten, erf_) \ +_(aten, erfc) \ +_(aten, erfc_) \ +_(aten, erfinv) \ +_(aten, erfinv_) \ +_(aten, exp) \ +_(aten, exp2) \ +_(aten, exp2_) \ +_(aten, exp_) \ +_(aten, expand) \ +_(aten, expand_as) \ +_(aten, expand_copy) \ +_(aten, expm1) \ +_(aten, expm1_) \ +_(aten, exponential) \ +_(aten, exponential_) \ +_(aten, eye) \ +_(aten, fake_quantize_per_channel_affine) \ +_(aten, fake_quantize_per_channel_affine_cachemask) \ +_(aten, fake_quantize_per_channel_affine_cachemask_backward) \ +_(aten, fake_quantize_per_tensor_affine) \ +_(aten, fake_quantize_per_tensor_affine_cachemask) \ +_(aten, fake_quantize_per_tensor_affine_cachemask_backward) \ +_(aten, fbgemm_linear_fp16_weight) \ +_(aten, fbgemm_linear_fp16_weight_fp32_activation) \ +_(aten, fbgemm_linear_int8_weight) \ +_(aten, fbgemm_linear_int8_weight_fp32_activation) \ +_(aten, fbgemm_linear_quantize_weight) \ +_(aten, fbgemm_pack_gemm_matrix_fp16) \ +_(aten, fbgemm_pack_quantized_matrix) \ +_(aten, feature_alpha_dropout) \ +_(aten, feature_alpha_dropout_) \ +_(aten, feature_dropout) \ +_(aten, feature_dropout_) \ +_(aten, fft_fft) \ +_(aten, fft_fft2) \ +_(aten, fft_fftfreq) \ +_(aten, fft_fftn) \ +_(aten, fft_fftshift) \ +_(aten, fft_hfft) \ +_(aten, fft_hfft2) \ +_(aten, fft_hfftn) \ +_(aten, fft_ifft) \ +_(aten, fft_ifft2) \ +_(aten, fft_ifftn) \ +_(aten, fft_ifftshift) \ +_(aten, fft_ihfft) \ +_(aten, fft_ihfft2) \ +_(aten, fft_ihfftn) \ +_(aten, fft_irfft) \ +_(aten, fft_irfft2) \ +_(aten, fft_irfftn) \ +_(aten, fft_rfft) \ +_(aten, fft_rfft2) \ +_(aten, fft_rfftfreq) \ +_(aten, fft_rfftn) \ +_(aten, fill) \ +_(aten, fill_) \ +_(aten, fill_diagonal) \ +_(aten, fill_diagonal_) \ +_(aten, fix) \ +_(aten, fix_) \ +_(aten, flatten) \ +_(aten, flatten_dense_tensors) \ +_(aten, flip) \ +_(aten, fliplr) \ +_(aten, flipud) \ +_(aten, float_power) \ +_(aten, float_power_) \ +_(aten, floor) \ +_(aten, floor_) \ +_(aten, floor_divide) \ +_(aten, floor_divide_) \ +_(aten, fmax) \ +_(aten, fmin) \ +_(aten, fmod) \ +_(aten, fmod_) \ +_(aten, frac) \ +_(aten, frac_) \ +_(aten, fractional_max_pool2d) \ +_(aten, fractional_max_pool2d_backward) \ +_(aten, fractional_max_pool3d) \ +_(aten, fractional_max_pool3d_backward) \ +_(aten, frexp) \ +_(aten, frobenius_norm) \ +_(aten, from_file) \ +_(aten, full) \ +_(aten, full_like) \ +_(aten, fused_moving_avg_obs_fake_quant) \ +_(aten, gather) \ +_(aten, gather_backward) \ +_(aten, gcd) \ +_(aten, gcd_) \ +_(aten, ge) \ +_(aten, ge_) \ +_(aten, gelu) \ +_(aten, gelu_) \ +_(aten, gelu_backward) \ +_(aten, geometric) \ +_(aten, geometric_) \ +_(aten, geqrf) \ +_(aten, ger) \ +_(aten, glu) \ +_(aten, glu_backward) \ +_(aten, glu_backward_jvp) \ +_(aten, glu_jvp) \ +_(aten, gradient) \ +_(aten, greater) \ +_(aten, greater_) \ +_(aten, greater_equal) \ +_(aten, greater_equal_) \ +_(aten, grid_sampler) \ +_(aten, grid_sampler_2d) \ +_(aten, grid_sampler_2d_backward) \ +_(aten, grid_sampler_3d) \ +_(aten, grid_sampler_3d_backward) \ +_(aten, group_norm) \ +_(aten, gru) \ +_(aten, gru_cell) \ +_(aten, gt) \ +_(aten, gt_) \ +_(aten, hamming_window) \ +_(aten, hann_window) \ +_(aten, hardshrink) \ +_(aten, hardshrink_backward) \ +_(aten, hardsigmoid) \ +_(aten, hardsigmoid_) \ +_(aten, hardsigmoid_backward) \ +_(aten, hardswish) \ +_(aten, hardswish_) \ +_(aten, hardswish_backward) \ +_(aten, hardtanh) \ +_(aten, hardtanh_) \ +_(aten, hardtanh_backward) \ +_(aten, heaviside) \ +_(aten, heaviside_) \ +_(aten, hinge_embedding_loss) \ +_(aten, histc) \ +_(aten, histogram) \ +_(aten, histogramdd) \ +_(aten, hsplit) \ +_(aten, hspmm) \ +_(aten, hstack) \ +_(aten, huber_loss) \ +_(aten, huber_loss_backward) \ +_(aten, hypot) \ +_(aten, hypot_) \ +_(aten, i0) \ +_(aten, i0_) \ +_(aten, igamma) \ +_(aten, igamma_) \ +_(aten, igammac) \ +_(aten, igammac_) \ +_(aten, im2col) \ +_(aten, imag) \ +_(aten, index) \ +_(aten, index_add) \ +_(aten, index_add_) \ +_(aten, index_copy) \ +_(aten, index_copy_) \ +_(aten, index_fill) \ +_(aten, index_fill_) \ +_(aten, index_put) \ +_(aten, index_put_) \ +_(aten, index_reduce) \ +_(aten, index_reduce_) \ +_(aten, index_select) \ +_(aten, index_select_backward) \ +_(aten, indices) \ +_(aten, indices_copy) \ +_(aten, infinitely_differentiable_gelu_backward) \ +_(aten, inner) \ +_(aten, instance_norm) \ +_(aten, int_repr) \ +_(aten, inverse) \ +_(aten, is_coalesced) \ +_(aten, is_complex) \ +_(aten, is_conj) \ +_(aten, is_distributed) \ +_(aten, is_floating_point) \ +_(aten, is_inference) \ +_(aten, is_leaf) \ +_(aten, is_neg) \ +_(aten, is_nonzero) \ +_(aten, is_pinned) \ +_(aten, is_same_size) \ +_(aten, is_set_to) \ +_(aten, is_signed) \ +_(aten, is_vulkan_available) \ +_(aten, isclose) \ +_(aten, isfinite) \ +_(aten, isin) \ +_(aten, isinf) \ +_(aten, isnan) \ +_(aten, isneginf) \ +_(aten, isposinf) \ +_(aten, isreal) \ +_(aten, istft) \ +_(aten, item) \ +_(aten, kaiser_window) \ +_(aten, kl_div) \ +_(aten, kron) \ +_(aten, kthvalue) \ +_(aten, l1_loss) \ +_(aten, layer_norm) \ +_(aten, lcm) \ +_(aten, lcm_) \ +_(aten, ldexp) \ +_(aten, ldexp_) \ +_(aten, le) \ +_(aten, le_) \ +_(aten, leaky_relu) \ +_(aten, leaky_relu_) \ +_(aten, leaky_relu_backward) \ +_(aten, lerp) \ +_(aten, lerp_) \ +_(aten, less) \ +_(aten, less_) \ +_(aten, less_equal) \ +_(aten, less_equal_) \ +_(aten, lgamma) \ +_(aten, lgamma_) \ +_(aten, lift) \ +_(aten, lift_fresh) \ +_(aten, lift_fresh_copy) \ +_(aten, linalg_cholesky) \ +_(aten, linalg_cholesky_ex) \ +_(aten, linalg_cond) \ +_(aten, linalg_cross) \ +_(aten, linalg_det) \ +_(aten, linalg_diagonal) \ +_(aten, linalg_eig) \ +_(aten, linalg_eigh) \ +_(aten, linalg_eigvals) \ +_(aten, linalg_eigvalsh) \ +_(aten, linalg_householder_product) \ +_(aten, linalg_inv) \ +_(aten, linalg_inv_ex) \ +_(aten, linalg_ldl_factor) \ +_(aten, linalg_ldl_factor_ex) \ +_(aten, linalg_ldl_solve) \ +_(aten, linalg_lstsq) \ +_(aten, linalg_lu) \ +_(aten, linalg_lu_factor) \ +_(aten, linalg_lu_factor_ex) \ +_(aten, linalg_lu_solve) \ +_(aten, linalg_matmul) \ +_(aten, linalg_matrix_exp) \ +_(aten, linalg_matrix_norm) \ +_(aten, linalg_matrix_power) \ +_(aten, linalg_matrix_rank) \ +_(aten, linalg_multi_dot) \ +_(aten, linalg_norm) \ +_(aten, linalg_pinv) \ +_(aten, linalg_qr) \ +_(aten, linalg_slogdet) \ +_(aten, linalg_solve) \ +_(aten, linalg_solve_ex) \ +_(aten, linalg_solve_triangular) \ +_(aten, linalg_svd) \ +_(aten, linalg_svdvals) \ +_(aten, linalg_tensorinv) \ +_(aten, linalg_tensorsolve) \ +_(aten, linalg_vander) \ +_(aten, linalg_vecdot) \ +_(aten, linalg_vector_norm) \ +_(aten, linear) \ +_(aten, linear_backward) \ +_(aten, linspace) \ +_(aten, log) \ +_(aten, log10) \ +_(aten, log10_) \ +_(aten, log1p) \ +_(aten, log1p_) \ +_(aten, log2) \ +_(aten, log2_) \ +_(aten, log_) \ +_(aten, log_normal) \ +_(aten, log_normal_) \ +_(aten, log_sigmoid) \ +_(aten, log_sigmoid_backward) \ +_(aten, log_sigmoid_forward) \ +_(aten, log_softmax) \ +_(aten, logaddexp) \ +_(aten, logaddexp2) \ +_(aten, logcumsumexp) \ +_(aten, logdet) \ +_(aten, logical_and) \ +_(aten, logical_and_) \ +_(aten, logical_not) \ +_(aten, logical_not_) \ +_(aten, logical_or) \ +_(aten, logical_or_) \ +_(aten, logical_xor) \ +_(aten, logical_xor_) \ +_(aten, logit) \ +_(aten, logit_) \ +_(aten, logit_backward) \ +_(aten, logspace) \ +_(aten, logsumexp) \ +_(aten, lshift) \ +_(aten, lstm) \ +_(aten, lstm_cell) \ +_(aten, lstm_mps_backward) \ +_(aten, lt) \ +_(aten, lt_) \ +_(aten, lu_solve) \ +_(aten, lu_unpack) \ +_(aten, mH) \ +_(aten, mT) \ +_(aten, margin_ranking_loss) \ +_(aten, masked_fill) \ +_(aten, masked_fill_) \ +_(aten, masked_scatter) \ +_(aten, masked_scatter_) \ +_(aten, masked_select) \ +_(aten, masked_select_backward) \ +_(aten, matmul) \ +_(aten, matmul_backward) \ +_(aten, matrix_H) \ +_(aten, matrix_exp) \ +_(aten, matrix_exp_backward) \ +_(aten, matrix_power) \ +_(aten, max) \ +_(aten, max_pool1d) \ +_(aten, max_pool1d_with_indices) \ +_(aten, max_pool2d) \ +_(aten, max_pool2d_with_indices) \ +_(aten, max_pool2d_with_indices_backward) \ +_(aten, max_pool3d) \ +_(aten, max_pool3d_with_indices) \ +_(aten, max_pool3d_with_indices_backward) \ +_(aten, max_unpool2d) \ +_(aten, max_unpool3d) \ +_(aten, maximum) \ +_(aten, mean) \ +_(aten, median) \ +_(aten, meshgrid) \ +_(aten, min) \ +_(aten, minimum) \ +_(aten, miopen_batch_norm) \ +_(aten, miopen_batch_norm_backward) \ +_(aten, miopen_convolution) \ +_(aten, miopen_convolution_add_relu) \ +_(aten, miopen_convolution_relu) \ +_(aten, miopen_convolution_transpose) \ +_(aten, miopen_depthwise_convolution) \ +_(aten, miopen_rnn) \ +_(aten, miopen_rnn_backward) \ +_(aten, mish) \ +_(aten, mish_) \ +_(aten, mish_backward) \ +_(aten, mkldnn_adaptive_avg_pool2d) \ +_(aten, mkldnn_adaptive_avg_pool2d_backward) \ +_(aten, mkldnn_convolution) \ +_(aten, mkldnn_linear) \ +_(aten, mkldnn_linear_backward) \ +_(aten, mkldnn_linear_backward_input) \ +_(aten, mkldnn_linear_backward_weights) \ +_(aten, mkldnn_max_pool2d) \ +_(aten, mkldnn_max_pool2d_backward) \ +_(aten, mkldnn_max_pool3d) \ +_(aten, mkldnn_max_pool3d_backward) \ +_(aten, mkldnn_reorder_conv2d_weight) \ +_(aten, mkldnn_reorder_conv3d_weight) \ +_(aten, mm) \ +_(aten, mode) \ +_(aten, moveaxis) \ +_(aten, movedim) \ +_(aten, mps_convolution_backward) \ +_(aten, mps_convolution_transpose_backward) \ +_(aten, mps_max_pool2d_backward) \ +_(aten, mse_loss) \ +_(aten, mse_loss_backward) \ +_(aten, msort) \ +_(aten, mul) \ +_(aten, mul_) \ +_(aten, multi_margin_loss) \ +_(aten, multi_margin_loss_backward) \ +_(aten, multilabel_margin_loss) \ +_(aten, multilabel_margin_loss_backward) \ +_(aten, multilabel_margin_loss_forward) \ +_(aten, multinomial) \ +_(aten, multiply) \ +_(aten, multiply_) \ +_(aten, mv) \ +_(aten, mvlgamma) \ +_(aten, mvlgamma_) \ +_(aten, nan_to_num) \ +_(aten, nan_to_num_) \ +_(aten, nanmean) \ +_(aten, nanmedian) \ +_(aten, nanquantile) \ +_(aten, nansum) \ +_(aten, narrow) \ +_(aten, narrow_copy) \ +_(aten, native_batch_norm) \ +_(aten, native_batch_norm_backward) \ +_(aten, native_channel_shuffle) \ +_(aten, native_dropout) \ +_(aten, native_dropout_backward) \ +_(aten, native_group_norm) \ +_(aten, native_group_norm_backward) \ +_(aten, native_layer_norm) \ +_(aten, native_layer_norm_backward) \ +_(aten, native_norm) \ +_(aten, ne) \ +_(aten, ne_) \ +_(aten, neg) \ +_(aten, neg_) \ +_(aten, negative) \ +_(aten, negative_) \ +_(aten, nested_to_padded_tensor) \ +_(aten, new_empty) \ +_(aten, new_empty_strided) \ +_(aten, new_full) \ +_(aten, new_ones) \ +_(aten, new_zeros) \ +_(aten, nextafter) \ +_(aten, nextafter_) \ +_(aten, nll_loss) \ +_(aten, nll_loss2d) \ +_(aten, nll_loss2d_backward) \ +_(aten, nll_loss2d_forward) \ +_(aten, nll_loss_backward) \ +_(aten, nll_loss_forward) \ +_(aten, nll_loss_nd) \ +_(aten, nonzero) \ +_(aten, nonzero_numpy) \ +_(aten, norm) \ +_(aten, norm_except_dim) \ +_(aten, normal) \ +_(aten, normal_) \ +_(aten, normal_functional) \ +_(aten, not_equal) \ +_(aten, not_equal_) \ +_(aten, nuclear_norm) \ +_(aten, numpy_T) \ +_(aten, one_hot) \ +_(aten, ones) \ +_(aten, ones_like) \ +_(aten, orgqr) \ +_(aten, ormqr) \ +_(aten, outer) \ +_(aten, output_nr) \ +_(aten, pad) \ +_(aten, pad_sequence) \ +_(aten, pairwise_distance) \ +_(aten, pdist) \ +_(aten, permute) \ +_(aten, permute_copy) \ +_(aten, pin_memory) \ +_(aten, pinverse) \ +_(aten, pixel_shuffle) \ +_(aten, pixel_unshuffle) \ +_(aten, poisson) \ +_(aten, poisson_nll_loss) \ +_(aten, polar) \ +_(aten, polygamma) \ +_(aten, polygamma_) \ +_(aten, positive) \ +_(aten, pow) \ +_(aten, pow_) \ +_(aten, prelu) \ +_(aten, prelu_backward) \ +_(aten, prod) \ +_(aten, promote_types) \ +_(aten, put) \ +_(aten, put_) \ +_(aten, q_per_channel_axis) \ +_(aten, q_per_channel_scales) \ +_(aten, q_per_channel_zero_points) \ +_(aten, q_scale) \ +_(aten, q_zero_point) \ +_(aten, qr) \ +_(aten, qscheme) \ +_(aten, quantile) \ +_(aten, quantize_per_channel) \ +_(aten, quantize_per_tensor) \ +_(aten, quantize_per_tensor_dynamic) \ +_(aten, quantized_batch_norm) \ +_(aten, quantized_gru_cell) \ +_(aten, quantized_lstm_cell) \ +_(aten, quantized_max_pool1d) \ +_(aten, quantized_max_pool2d) \ +_(aten, quantized_rnn_relu_cell) \ +_(aten, quantized_rnn_tanh_cell) \ +_(aten, rad2deg) \ +_(aten, rad2deg_) \ +_(aten, rand) \ +_(aten, rand_like) \ +_(aten, randint) \ +_(aten, randint_like) \ +_(aten, randn) \ +_(aten, randn_like) \ +_(aten, random) \ +_(aten, random_) \ +_(aten, randperm) \ +_(aten, range) \ +_(aten, ravel) \ +_(aten, real) \ +_(aten, reciprocal) \ +_(aten, reciprocal_) \ +_(aten, record_stream) \ +_(aten, refine_names) \ +_(aten, reflection_pad1d) \ +_(aten, reflection_pad1d_backward) \ +_(aten, reflection_pad2d) \ +_(aten, reflection_pad2d_backward) \ +_(aten, reflection_pad3d) \ +_(aten, reflection_pad3d_backward) \ +_(aten, relu) \ +_(aten, relu6) \ +_(aten, relu6_) \ +_(aten, relu_) \ +_(aten, remainder) \ +_(aten, remainder_) \ +_(aten, rename) \ +_(aten, rename_) \ +_(aten, renorm) \ +_(aten, renorm_) \ +_(aten, repeat) \ +_(aten, repeat_interleave) \ +_(aten, replication_pad1d) \ +_(aten, replication_pad1d_backward) \ +_(aten, replication_pad2d) \ +_(aten, replication_pad2d_backward) \ +_(aten, replication_pad3d) \ +_(aten, replication_pad3d_backward) \ +_(aten, requires_grad) \ +_(aten, requires_grad_) \ +_(aten, reshape) \ +_(aten, reshape_as) \ +_(aten, resize) \ +_(aten, resize_) \ +_(aten, resize_as) \ +_(aten, resize_as_) \ +_(aten, resize_as_sparse) \ +_(aten, resize_as_sparse_) \ +_(aten, resolve_conj) \ +_(aten, resolve_neg) \ +_(aten, result_type) \ +_(aten, retain_grad) \ +_(aten, retains_grad) \ +_(aten, rnn_relu) \ +_(aten, rnn_relu_cell) \ +_(aten, rnn_tanh) \ +_(aten, rnn_tanh_cell) \ +_(aten, roll) \ +_(aten, rot90) \ +_(aten, round) \ +_(aten, round_) \ +_(aten, row_indices) \ +_(aten, row_indices_copy) \ +_(aten, row_stack) \ +_(aten, rrelu) \ +_(aten, rrelu_) \ +_(aten, rrelu_with_noise) \ +_(aten, rrelu_with_noise_) \ +_(aten, rrelu_with_noise_backward) \ +_(aten, rshift) \ +_(aten, rsqrt) \ +_(aten, rsqrt_) \ +_(aten, rsub) \ +_(aten, scalar_tensor) \ +_(aten, scatter) \ +_(aten, scatter_) \ +_(aten, scatter_add) \ +_(aten, scatter_add_) \ +_(aten, scatter_reduce) \ +_(aten, scatter_reduce_) \ +_(aten, searchsorted) \ +_(aten, segment_reduce) \ +_(aten, select) \ +_(aten, select_backward) \ +_(aten, select_copy) \ +_(aten, select_scatter) \ +_(aten, selu) \ +_(aten, selu_) \ +_(aten, set) \ +_(aten, set_) \ +_(aten, set_data) \ +_(aten, sgn) \ +_(aten, sgn_) \ +_(aten, sigmoid) \ +_(aten, sigmoid_) \ +_(aten, sigmoid_backward) \ +_(aten, sign) \ +_(aten, sign_) \ +_(aten, signbit) \ +_(aten, silu) \ +_(aten, silu_) \ +_(aten, silu_backward) \ +_(aten, sin) \ +_(aten, sin_) \ +_(aten, sinc) \ +_(aten, sinc_) \ +_(aten, sinh) \ +_(aten, sinh_) \ +_(aten, size) \ +_(aten, slice) \ +_(aten, slice_backward) \ +_(aten, slice_copy) \ +_(aten, slice_scatter) \ +_(aten, slogdet) \ +_(aten, slow_conv3d) \ +_(aten, slow_conv3d_forward) \ +_(aten, slow_conv_dilated2d) \ +_(aten, slow_conv_dilated3d) \ +_(aten, slow_conv_transpose2d) \ +_(aten, slow_conv_transpose3d) \ +_(aten, smm) \ +_(aten, smooth_l1_loss) \ +_(aten, smooth_l1_loss_backward) \ +_(aten, soft_margin_loss) \ +_(aten, soft_margin_loss_backward) \ +_(aten, softmax) \ +_(aten, softplus) \ +_(aten, softplus_backward) \ +_(aten, softshrink) \ +_(aten, softshrink_backward) \ +_(aten, sort) \ +_(aten, sparse_bsc_tensor) \ +_(aten, sparse_bsr_tensor) \ +_(aten, sparse_compressed_tensor) \ +_(aten, sparse_coo_tensor) \ +_(aten, sparse_csc_tensor) \ +_(aten, sparse_csr_tensor) \ +_(aten, sparse_dim) \ +_(aten, sparse_mask) \ +_(aten, sparse_resize) \ +_(aten, sparse_resize_) \ +_(aten, sparse_resize_and_clear) \ +_(aten, sparse_resize_and_clear_) \ +_(aten, sparse_sampled_addmm) \ +_(aten, special_airy_ai) \ +_(aten, special_bessel_j0) \ +_(aten, special_bessel_j1) \ +_(aten, special_bessel_y0) \ +_(aten, special_bessel_y1) \ +_(aten, special_chebyshev_polynomial_t) \ +_(aten, special_chebyshev_polynomial_u) \ +_(aten, special_chebyshev_polynomial_v) \ +_(aten, special_chebyshev_polynomial_w) \ +_(aten, special_digamma) \ +_(aten, special_entr) \ +_(aten, special_erf) \ +_(aten, special_erfc) \ +_(aten, special_erfcx) \ +_(aten, special_erfinv) \ +_(aten, special_exp2) \ +_(aten, special_expit) \ +_(aten, special_expm1) \ +_(aten, special_gammainc) \ +_(aten, special_gammaincc) \ +_(aten, special_gammaln) \ +_(aten, special_hermite_polynomial_h) \ +_(aten, special_hermite_polynomial_he) \ +_(aten, special_i0) \ +_(aten, special_i0e) \ +_(aten, special_i1) \ +_(aten, special_i1e) \ +_(aten, special_laguerre_polynomial_l) \ +_(aten, special_legendre_polynomial_p) \ +_(aten, special_log1p) \ +_(aten, special_log_ndtr) \ +_(aten, special_log_softmax) \ +_(aten, special_logit) \ +_(aten, special_logsumexp) \ +_(aten, special_modified_bessel_i0) \ +_(aten, special_modified_bessel_i1) \ +_(aten, special_modified_bessel_k0) \ +_(aten, special_modified_bessel_k1) \ +_(aten, special_multigammaln) \ +_(aten, special_ndtr) \ +_(aten, special_ndtri) \ +_(aten, special_polygamma) \ +_(aten, special_psi) \ +_(aten, special_round) \ +_(aten, special_scaled_modified_bessel_k0) \ +_(aten, special_scaled_modified_bessel_k1) \ +_(aten, special_shifted_chebyshev_polynomial_t) \ +_(aten, special_shifted_chebyshev_polynomial_u) \ +_(aten, special_shifted_chebyshev_polynomial_v) \ +_(aten, special_shifted_chebyshev_polynomial_w) \ +_(aten, special_sinc) \ +_(aten, special_softmax) \ +_(aten, special_spherical_bessel_j0) \ +_(aten, special_xlog1py) \ +_(aten, special_xlogy) \ +_(aten, special_zeta) \ +_(aten, split) \ +_(aten, split_copy) \ +_(aten, split_with_sizes) \ +_(aten, split_with_sizes_copy) \ +_(aten, sqrt) \ +_(aten, sqrt_) \ +_(aten, square) \ +_(aten, square_) \ +_(aten, squeeze) \ +_(aten, squeeze_) \ +_(aten, squeeze_copy) \ +_(aten, sspaddmm) \ +_(aten, stack) \ +_(aten, std) \ +_(aten, std_mean) \ +_(aten, stft) \ +_(aten, stride) \ +_(aten, sub) \ +_(aten, sub_) \ +_(aten, subtract) \ +_(aten, subtract_) \ +_(aten, sum) \ +_(aten, sum_to_size) \ +_(aten, svd) \ +_(aten, swapaxes) \ +_(aten, swapaxes_) \ +_(aten, swapdims) \ +_(aten, swapdims_) \ +_(aten, symeig) \ +_(aten, t) \ +_(aten, t_) \ +_(aten, t_copy) \ +_(aten, take) \ +_(aten, take_along_dim) \ +_(aten, tan) \ +_(aten, tan_) \ +_(aten, tanh) \ +_(aten, tanh_) \ +_(aten, tanh_backward) \ +_(aten, tensor_split) \ +_(aten, tensordot) \ +_(aten, thnn_conv2d) \ +_(aten, threshold) \ +_(aten, threshold_) \ +_(aten, threshold_backward) \ +_(aten, tile) \ +_(aten, to) \ +_(aten, to_dense) \ +_(aten, to_dense_backward) \ +_(aten, to_mkldnn) \ +_(aten, to_mkldnn_backward) \ +_(aten, to_padded_tensor) \ +_(aten, to_sparse) \ +_(aten, to_sparse_bsc) \ +_(aten, to_sparse_bsr) \ +_(aten, to_sparse_csc) \ +_(aten, to_sparse_csr) \ +_(aten, topk) \ +_(aten, trace) \ +_(aten, trace_backward) \ +_(aten, transpose) \ +_(aten, transpose_) \ +_(aten, transpose_copy) \ +_(aten, trapezoid) \ +_(aten, trapz) \ +_(aten, triangular_solve) \ +_(aten, tril) \ +_(aten, tril_) \ +_(aten, tril_indices) \ +_(aten, triplet_margin_loss) \ +_(aten, triu) \ +_(aten, triu_) \ +_(aten, triu_indices) \ +_(aten, true_divide) \ +_(aten, true_divide_) \ +_(aten, trunc) \ +_(aten, trunc_) \ +_(aten, type_as) \ +_(aten, unbind) \ +_(aten, unbind_copy) \ +_(aten, unflatten) \ +_(aten, unflatten_dense_tensors) \ +_(aten, unfold) \ +_(aten, unfold_backward) \ +_(aten, unfold_copy) \ +_(aten, uniform) \ +_(aten, uniform_) \ +_(aten, unique_consecutive) \ +_(aten, unique_dim) \ +_(aten, unique_dim_consecutive) \ +_(aten, unsafe_chunk) \ +_(aten, unsafe_split) \ +_(aten, unsafe_split_with_sizes) \ +_(aten, unsqueeze) \ +_(aten, unsqueeze_) \ +_(aten, unsqueeze_copy) \ +_(aten, upsample_bicubic2d) \ +_(aten, upsample_bicubic2d_backward) \ +_(aten, upsample_bilinear2d) \ +_(aten, upsample_bilinear2d_backward) \ +_(aten, upsample_linear1d) \ +_(aten, upsample_linear1d_backward) \ +_(aten, upsample_nearest1d) \ +_(aten, upsample_nearest1d_backward) \ +_(aten, upsample_nearest2d) \ +_(aten, upsample_nearest2d_backward) \ +_(aten, upsample_nearest3d) \ +_(aten, upsample_nearest3d_backward) \ +_(aten, upsample_trilinear3d) \ +_(aten, upsample_trilinear3d_backward) \ +_(aten, value_selecting_reduction_backward) \ +_(aten, values) \ +_(aten, values_copy) \ +_(aten, vander) \ +_(aten, var) \ +_(aten, var_mean) \ +_(aten, vdot) \ +_(aten, view) \ +_(aten, view_as) \ +_(aten, view_as_complex) \ +_(aten, view_as_complex_copy) \ +_(aten, view_as_real) \ +_(aten, view_as_real_copy) \ +_(aten, view_copy) \ +_(aten, vsplit) \ +_(aten, vstack) \ +_(aten, where) \ +_(aten, xlogy) \ +_(aten, xlogy_) \ +_(aten, zero) \ +_(aten, zero_) \ +_(aten, zeros) \ +_(aten, zeros_like) + +#define FORALL_ATTR_BASE_SYMBOLS(_) \ +_(attr, A) \ +_(attr, B) \ +_(attr, C) \ +_(attr, H) \ +_(attr, HxW) \ +_(attr, K) \ +_(attr, L) \ +_(attr, LD) \ +_(attr, LU) \ +_(attr, LU_data) \ +_(attr, LU_pivots) \ +_(attr, M) \ +_(attr, N) \ +_(attr, P) \ +_(attr, Q) \ +_(attr, R) \ +_(attr, S) \ +_(attr, U) \ +_(attr, UPLO) \ +_(attr, V) \ +_(attr, Vh) \ +_(attr, W) \ +_(attr, X) \ +_(attr, a) \ +_(attr, abs) \ +_(attr, accumulate) \ +_(attr, addends) \ +_(attr, adjoint) \ +_(attr, align_corners) \ +_(attr, allow_tf32) \ +_(attr, alpha) \ +_(attr, amsgrad) \ +_(attr, anchor) \ +_(attr, angle) \ +_(attr, api_name) \ +_(attr, append) \ +_(attr, approximate) \ +_(attr, arg1) \ +_(attr, arg2) \ +_(attr, arg3) \ +_(attr, assume_unique) \ +_(attr, atol) \ +_(attr, attn_mask) \ +_(attr, average_attn_weights) \ +_(attr, averaging_const) \ +_(attr, aweights) \ +_(attr, axis) \ +_(attr, axis0) \ +_(attr, axis1) \ +_(attr, b) \ +_(attr, b_hh) \ +_(attr, b_ih) \ +_(attr, bag_size) \ +_(attr, base) \ +_(attr, batch1) \ +_(attr, batch2) \ +_(attr, batch_dim) \ +_(attr, batch_first) \ +_(attr, batch_size) \ +_(attr, batch_sizes) \ +_(attr, benchmark) \ +_(attr, beta) \ +_(attr, beta1) \ +_(attr, beta2) \ +_(attr, bias) \ +_(attr, bias_defined) \ +_(attr, bias_g) \ +_(attr, bias_sizes) \ +_(attr, bidirectional) \ +_(attr, bin_edges) \ +_(attr, bins) \ +_(attr, bit_width) \ +_(attr, blank) \ +_(attr, blocksize) \ +_(attr, boundaries) \ +_(attr, buffer) \ +_(attr, ccol_indices) \ +_(attr, cdim) \ +_(attr, cdist) \ +_(attr, ceil_mode) \ +_(attr, cell_state_fwd) \ +_(attr, center) \ +_(attr, ch_axis) \ +_(attr, check_errors) \ +_(attr, chunks) \ +_(attr, coalesced) \ +_(attr, coefficients) \ +_(attr, col) \ +_(attr, col_indices) \ +_(attr, col_offsets) \ +_(attr, col_offsets_hh) \ +_(attr, col_offsets_ih) \ +_(attr, compressed_idx) \ +_(attr, compressed_indices) \ +_(attr, compressed_indices_dtype) \ +_(attr, compute_mode) \ +_(attr, compute_uv) \ +_(attr, compute_v) \ +_(attr, condition) \ +_(attr, copy) \ +_(attr, correction) \ +_(attr, count) \ +_(attr, count_include_pad) \ +_(attr, counts) \ +_(attr, cpu_dtype) \ +_(attr, cpu_enabled) \ +_(attr, cpu_nested_shape_example) \ +_(attr, create_graph) \ +_(attr, crow_indices) \ +_(attr, cuda_dtype) \ +_(attr, cuda_enabled) \ +_(attr, cudnn_enable) \ +_(attr, cudnn_enabled) \ +_(attr, cum_seq_k) \ +_(attr, cum_seq_q) \ +_(attr, cx) \ +_(attr, cy) \ +_(attr, d) \ +_(attr, data) \ +_(attr, decimals) \ +_(attr, delta) \ +_(attr, dense) \ +_(attr, dense_dim) \ +_(attr, density) \ +_(attr, descending) \ +_(attr, destination) \ +_(attr, deterministic) \ +_(attr, device) \ +_(attr, device_index) \ +_(attr, dgrad_glu) \ +_(attr, diagonal) \ +_(attr, diagonals) \ +_(attr, dilation) \ +_(attr, dim) \ +_(attr, dim0) \ +_(attr, dim1) \ +_(attr, dim2) \ +_(attr, dimension) \ +_(attr, dims) \ +_(attr, dims_other) \ +_(attr, dims_self) \ +_(attr, divisor_override) \ +_(attr, downscale_factor) \ +_(attr, driver) \ +_(attr, dropout) \ +_(attr, dropout_p) \ +_(attr, dropout_seed) \ +_(attr, dropout_state) \ +_(attr, dst) \ +_(attr, dtype) \ +_(attr, dual) \ +_(attr, dummy) \ +_(attr, dx) \ +_(attr, e) \ +_(attr, edge_order) \ +_(attr, eigenvalues) \ +_(attr, eigenvectors) \ +_(attr, eigvals) \ +_(attr, eigvecs) \ +_(attr, element) \ +_(attr, elements) \ +_(attr, ellipsis_idx) \ +_(attr, embed_dim) \ +_(attr, end) \ +_(attr, end_dim) \ +_(attr, eps) \ +_(attr, epsilon) \ +_(attr, equal_nan) \ +_(attr, equation) \ +_(attr, exp_avg_sqs) \ +_(attr, exp_avgs) \ +_(attr, expand1) \ +_(attr, expand2) \ +_(attr, expand3) \ +_(attr, exponent) \ +_(attr, exponential_average_factor) \ +_(attr, fake_quant_enabled) \ +_(attr, fake_quant_on) \ +_(attr, ffn_bias_1) \ +_(attr, ffn_bias_2) \ +_(attr, ffn_weight_1) \ +_(attr, ffn_weight_2) \ +_(attr, filename) \ +_(attr, fill_value) \ +_(attr, flat) \ +_(attr, forward) \ +_(attr, found_inf) \ +_(attr, from) \ +_(attr, full) \ +_(attr, full_matrices) \ +_(attr, fuse_transform_0213) \ +_(attr, fweights) \ +_(attr, g) \ +_(attr, gO) \ +_(attr, generator) \ +_(attr, ggI) \ +_(attr, ggW) \ +_(attr, ggb) \ +_(attr, glu) \ +_(attr, grad) \ +_(attr, grad_bias) \ +_(attr, grad_cy) \ +_(attr, grad_factor) \ +_(attr, grad_glu) \ +_(attr, grad_hy) \ +_(attr, grad_in) \ +_(attr, grad_input) \ +_(attr, grad_out) \ +_(attr, grad_output) \ +_(attr, grad_scale) \ +_(attr, grad_w) \ +_(attr, grad_weight) \ +_(attr, grad_x) \ +_(attr, grad_y) \ +_(attr, gradient) \ +_(attr, grads) \ +_(attr, grid) \ +_(attr, group) \ +_(attr, groups) \ +_(attr, growth_interval) \ +_(attr, growth_tracker) \ +_(attr, half_to_float) \ +_(attr, has_bias) \ +_(attr, has_biases) \ +_(attr, hermitian) \ +_(attr, hidden_bias) \ +_(attr, hidden_gates) \ +_(attr, hidden_size) \ +_(attr, high) \ +_(attr, hist) \ +_(attr, hop_length) \ +_(attr, hx) \ +_(attr, i1) \ +_(attr, i2) \ +_(attr, i3) \ +_(attr, ignore_index) \ +_(attr, imag) \ +_(attr, impl_index) \ +_(attr, implicit) \ +_(attr, include_last_offset) \ +_(attr, include_self) \ +_(attr, incr_key) \ +_(attr, incr_value) \ +_(attr, increasing) \ +_(attr, ind) \ +_(attr, index) \ +_(attr, indexing) \ +_(attr, indices) \ +_(attr, info) \ +_(attr, initial) \ +_(attr, input) \ +_(attr, input1) \ +_(attr, input2) \ +_(attr, input3) \ +_(attr, input_bias) \ +_(attr, input_dtype) \ +_(attr, input_g) \ +_(attr, input_gates) \ +_(attr, input_lengths) \ +_(attr, input_scale) \ +_(attr, input_size) \ +_(attr, input_sizes) \ +_(attr, inputs) \ +_(attr, interpolation) \ +_(attr, interpolation_mode) \ +_(attr, inv_scale) \ +_(attr, inverse) \ +_(attr, invert) \ +_(attr, invstd) \ +_(attr, is_causal) \ +_(attr, is_crow) \ +_(attr, is_matrix) \ +_(attr, is_result) \ +_(attr, is_target) \ +_(attr, k) \ +_(attr, keepdim) \ +_(attr, kernel_size) \ +_(attr, key) \ +_(attr, label_smoothing) \ +_(attr, lambd) \ +_(attr, largest) \ +_(attr, last_dim_size) \ +_(attr, layout) \ +_(attr, left) \ +_(attr, length) \ +_(attr, lengths) \ +_(attr, level) \ +_(attr, like) \ +_(attr, list) \ +_(attr, log_alpha) \ +_(attr, log_input) \ +_(attr, log_probs) \ +_(attr, log_target) \ +_(attr, logabsdet) \ +_(attr, low) \ +_(attr, lower) \ +_(attr, lr) \ +_(attr, ltm) \ +_(attr, m) \ +_(attr, mantissa) \ +_(attr, margin) \ +_(attr, mask) \ +_(attr, mask_check) \ +_(attr, mask_indices) \ +_(attr, mask_type) \ +_(attr, mat) \ +_(attr, mat1) \ +_(attr, mat2) \ +_(attr, matrices) \ +_(attr, max) \ +_(attr, max_exp_avg_sqs) \ +_(attr, max_k) \ +_(attr, max_norm) \ +_(attr, max_q) \ +_(attr, max_size) \ +_(attr, max_val) \ +_(attr, max_values) \ +_(attr, maximize) \ +_(attr, maximum_indices) \ +_(attr, maxnorm) \ +_(attr, mean) \ +_(attr, mean_dy) \ +_(attr, mean_dy_xmu) \ +_(attr, median) \ +_(attr, memory_format) \ +_(attr, min) \ +_(attr, min_indices) \ +_(attr, min_val) \ +_(attr, minlength) \ +_(attr, mode) \ +_(attr, momentum) \ +_(attr, n) \ +_(attr, n_bins) \ +_(attr, n_fft) \ +_(attr, names) \ +_(attr, nan) \ +_(attr, need_attn_weights) \ +_(attr, need_weights) \ +_(attr, neg_log_likelihood) \ +_(attr, negative) \ +_(attr, negative_slope) \ +_(attr, neginf) \ +_(attr, nested_size) \ +_(attr, nested_strides) \ +_(attr, new_data) \ +_(attr, nnz) \ +_(attr, noise) \ +_(attr, non_blocking) \ +_(attr, norm) \ +_(attr, norm_bias_1) \ +_(attr, norm_bias_2) \ +_(attr, norm_first) \ +_(attr, norm_type) \ +_(attr, norm_weight_1) \ +_(attr, norm_weight_2) \ +_(attr, normalization) \ +_(attr, normalized) \ +_(attr, normalized_shape) \ +_(attr, nt_example) \ +_(attr, num_classes) \ +_(attr, num_generated) \ +_(attr, num_groups) \ +_(attr, num_head) \ +_(attr, num_heads) \ +_(attr, num_layers) \ +_(attr, num_samples) \ +_(attr, num_weights) \ +_(attr, numel) \ +_(attr, observer_on) \ +_(attr, offset) \ +_(attr, offset2bag) \ +_(attr, offsets) \ +_(attr, onesided) \ +_(attr, ord) \ +_(attr, order) \ +_(attr, other) \ +_(attr, out) \ +_(attr, out0) \ +_(attr, out1) \ +_(attr, out2) \ +_(attr, out3) \ +_(attr, out4) \ +_(attr, out_dim) \ +_(attr, out_int32) \ +_(attr, outdim) \ +_(attr, output) \ +_(attr, output_mask) \ +_(attr, output_padding) \ +_(attr, output_scale) \ +_(attr, output_size) \ +_(attr, output_zero_point) \ +_(attr, p) \ +_(attr, packed) \ +_(attr, packed_hh) \ +_(attr, packed_ih) \ +_(attr, packed_weight) \ +_(attr, pad) \ +_(attr, pad_mode) \ +_(attr, padded) \ +_(attr, padding) \ +_(attr, padding_idx) \ +_(attr, padding_mode) \ +_(attr, padding_value) \ +_(attr, params) \ +_(attr, path) \ +_(attr, pdist) \ +_(attr, per_row_fake_quant) \ +_(attr, per_sample_weights) \ +_(attr, periodic) \ +_(attr, pin_memory) \ +_(attr, pivot) \ +_(attr, pivots) \ +_(attr, plain_idx) \ +_(attr, plain_indices) \ +_(attr, pos_weight) \ +_(attr, posinf) \ +_(attr, positive) \ +_(attr, pow) \ +_(attr, prepend) \ +_(attr, primal) \ +_(attr, prob) \ +_(attr, proj_bias) \ +_(attr, proj_size) \ +_(attr, proj_weight) \ +_(attr, q) \ +_(attr, qkv) \ +_(attr, qkv_bias) \ +_(attr, qkv_weight) \ +_(attr, qtensor) \ +_(attr, quant_max) \ +_(attr, quant_min) \ +_(attr, quasi) \ +_(attr, query) \ +_(attr, r) \ +_(attr, random_samples) \ +_(attr, range) \ +_(attr, rank) \ +_(attr, ratio) \ +_(attr, rcond) \ +_(attr, real) \ +_(attr, reduce) \ +_(attr, reduce_range) \ +_(attr, reduction) \ +_(attr, repeats) \ +_(attr, replacement) \ +_(attr, requires_grad) \ +_(attr, reserve) \ +_(attr, reserveSpace) \ +_(attr, reservedSpace) \ +_(attr, residuals) \ +_(attr, result) \ +_(attr, retain_graph) \ +_(attr, return_complex) \ +_(attr, return_counts) \ +_(attr, return_inverse) \ +_(attr, right) \ +_(attr, rounding_mode) \ +_(attr, row) \ +_(attr, row_indices) \ +_(attr, rstd) \ +_(attr, rtol) \ +_(attr, running_max) \ +_(attr, running_mean) \ +_(attr, running_min) \ +_(attr, running_var) \ +_(attr, s) \ +_(attr, save_invstd) \ +_(attr, save_mean) \ +_(attr, save_var) \ +_(attr, save_var_transform) \ +_(attr, saved_g) \ +_(attr, saved_norms) \ +_(attr, saved_v) \ +_(attr, scalar) \ +_(attr, scalar1) \ +_(attr, scalar2) \ +_(attr, scalars) \ +_(attr, scale) \ +_(attr, scale_backoff_factor) \ +_(attr, scale_factors) \ +_(attr, scale_grad_by_freq) \ +_(attr, scale_growth_factor) \ +_(attr, scale_hh) \ +_(attr, scale_ih) \ +_(attr, scales) \ +_(attr, scales_d) \ +_(attr, scales_h) \ +_(attr, scales_w) \ +_(attr, sections) \ +_(attr, self) \ +_(attr, self_is_result) \ +_(attr, self_num_batch_dims) \ +_(attr, self_or_result) \ +_(attr, self_sizes) \ +_(attr, sequences) \ +_(attr, shape) \ +_(attr, shared) \ +_(attr, shifts) \ +_(attr, side) \ +_(attr, sigma) \ +_(attr, sign) \ +_(attr, singular_values) \ +_(attr, size) \ +_(attr, sizes) \ +_(attr, sobolstate) \ +_(attr, solution) \ +_(attr, some) \ +_(attr, sorted) \ +_(attr, sorted_sequence) \ +_(attr, sorter) \ +_(attr, source) \ +_(attr, spacing) \ +_(attr, sparse) \ +_(attr, sparse_dim) \ +_(attr, sparse_grad) \ +_(attr, split_size) \ +_(attr, split_sizes) \ +_(attr, src) \ +_(attr, stable) \ +_(attr, start) \ +_(attr, start_dim) \ +_(attr, state_steps) \ +_(attr, std) \ +_(attr, step) \ +_(attr, steps) \ +_(attr, storage_offset) \ +_(attr, stride) \ +_(attr, sumdim) \ +_(attr, swap) \ +_(attr, symmetric_quant) \ +_(attr, t) \ +_(attr, tangent) \ +_(attr, target) \ +_(attr, target_lengths) \ +_(attr, targets) \ +_(attr, tau) \ +_(attr, tensor) \ +_(attr, tensor1) \ +_(attr, tensor2) \ +_(attr, tensor_indices_or_sections) \ +_(attr, tensors) \ +_(attr, test_element) \ +_(attr, test_elements) \ +_(attr, the_template) \ +_(attr, theta) \ +_(attr, threshold) \ +_(attr, to) \ +_(attr, tol) \ +_(attr, total) \ +_(attr, total_length) \ +_(attr, total_weight) \ +_(attr, train) \ +_(attr, training) \ +_(attr, transpose) \ +_(attr, transposed) \ +_(attr, type1) \ +_(attr, type2) \ +_(attr, unbiased) \ +_(attr, unitriangular) \ +_(attr, unpack_data) \ +_(attr, unpack_pivots) \ +_(attr, unroll_dim) \ +_(attr, unsafe) \ +_(attr, upper) \ +_(attr, upscale_factor) \ +_(attr, use_gelu) \ +_(attr, use_input_stats) \ +_(attr, v) \ +_(attr, value) \ +_(attr, values) \ +_(attr, var) \ +_(attr, vec) \ +_(attr, vec1) \ +_(attr, vec2) \ +_(attr, w_hh) \ +_(attr, w_ih) \ +_(attr, weight) \ +_(attr, weight_arr) \ +_(attr, weight_buf) \ +_(attr, weight_decay) \ +_(attr, weight_g) \ +_(attr, weight_scale) \ +_(attr, weight_stride0) \ +_(attr, weight_zero_point) \ +_(attr, weights) \ +_(attr, win_length) \ +_(attr, window) \ +_(attr, window_length) \ +_(attr, with_replacement) \ +_(attr, workspace) \ +_(attr, wrap) \ +_(attr, x) \ +_(attr, x1) \ +_(attr, x2) \ +_(attr, y) \ +_(attr, z) \ +_(attr, z_state) \ +_(attr, zero_infinity) \ +_(attr, zero_point) \ +_(attr, zero_point_hh) \ +_(attr, zero_point_ih) \ +_(attr, zero_points) diff --git a/voice_bridge/torch/include/ATen/core/blob.h b/voice_bridge/torch/include/ATen/core/blob.h new file mode 100644 index 0000000000000000000000000000000000000000..cc7a181a0b88d87aa757f4aaad3057daf3e38d31 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/blob.h @@ -0,0 +1,209 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace caffe2 { + +class Tensor; + +/** + * @brief Blob is a general container that hosts a typed pointer. + * + * A Blob hosts a pointer as well as its type, and takes charge of deleting it + * properly when the blob is deallocated or re-allocated with a new type. A blob + * could contain anything, although the most common case is to contain a Tensor. + */ +class TORCH_API Blob final : public c10::intrusive_ptr_target { + public: + /** + * Initializes an empty Blob. + */ + Blob() noexcept : meta_(), pointer_(nullptr), has_ownership_(false) {} + ~Blob() { + Reset(); + } + + Blob(Blob&& other) noexcept : Blob() { + swap(other); + } + + Blob& operator=(Blob&& other) noexcept { + Blob(std::move(other)).swap(*this); + return *this; + } + + /** + * Checks if the content stored in the blob is of type T. + */ + template + bool IsType() const noexcept { + return meta_.Match(); + } + + /** + * Returns the meta info of the blob. + */ + const TypeMeta meta() const noexcept { + return meta_; + } + + /** + * Returns a printable typename of the blob. + */ + c10::string_view TypeName() const noexcept { + return meta_.name(); + } + + /** + * @brief Gets the const reference of the stored object. The code checks if + * the stored object is of the desired type. + */ + // TODO(jerryzh): add a Get(DeviceType) function? + template + const T& Get() const { + TORCH_INTERNAL_ASSERT( + IsType(), + "wrong type for the Blob instance. Blob contains ", + meta_.name(), + " while caller expects ", + TypeMeta::TypeName()); + // TODO: after we add Get(DeviceType) + // and changed all the callsites, we can add + // a static assert here to enforce T != Tensor + // NOLINTNEXTLINE(clang-analyzer-core.uninitialized.UndefReturn) + return *static_cast(pointer_); + } + + const void* GetRaw() const noexcept { + return pointer_; + } + void* GetRaw() noexcept { + return pointer_; + } + + /** + * @brief Gets a mutable pointer to the stored object. + * + * If the current object is not of the right type, a new object is created + * and the old object is freed. Note that type T should have a default + * constructor. Otherwise, create the object yourself first, and use + * Reset(). + */ + template + T* GetMutable() { + static_assert( + std::is_default_constructible::value, + "GetMutable can't be called with non-default-constructible types. " + "Try using specialized methods"); + if (IsType()) { + return static_cast(pointer_); + } else { + // TODO Re-enable logging + // VLOG(1) << "Create new mutable object " << TypeMeta::TypeName(); + return Reset(new T()); + } + } + + template + T* GetMutableOrNull() { + if (IsType()) { + return static_cast(pointer_); + } else { + return nullptr; + } + } + + /** + * Sets the underlying object to the allocated one. The Blob then takes over + * the ownership of the passed in pointer. If there is already an object in + * the Blob, the old object is freed. + * + * This is used when the underlying class T does not have a default ctor, or + * complex initializations needs to be done outside the blob. + */ + template + T* Reset(T* allocated) { + free_(); + meta_ = TypeMeta::Make(); + pointer_ = static_cast(allocated); + has_ownership_ = true; + return allocated; + } + + /** + * Sets the underlying object to the allocated one, but does not take over + * the ownership of the passed in pointer. If there is already an object in + * the Blob, the old object is freed. + * + * Unlike Reset, this does not take over the ownership of the pointer and the + * caller is responsible for making sure that the lifetime of the allocated + * blob outlasts the lifetime of any access to this blob, until another Reset + * call is made or the blob is destructed. + */ + template + typename std::remove_const::type* ShareExternal( + typename std::remove_const::type* allocated) { + return static_cast(ShareExternal( + static_cast(allocated), + TypeMeta::Make::type>())); + } + + void* ShareExternal(void* allocated, const TypeMeta meta) { + free_(); + meta_ = meta; + pointer_ = allocated; + has_ownership_ = false; + return allocated; + } + + /** + * Resets the Blob to an empty one. + */ + void Reset() { + free_(); + pointer_ = nullptr; + meta_ = TypeMeta(); + has_ownership_ = false; + } + + /** + * @brief Swaps the underlying storage of two blobs. + */ + void swap(Blob& rhs) { + using std::swap; + swap(meta_, rhs.meta_); + swap(pointer_, rhs.pointer_); + swap(has_ownership_, rhs.has_ownership_); + } + + private: + void free_() { + if (has_ownership_ && pointer_ != nullptr) { + (*meta_.deleteFn())(pointer_); + } + } + + TypeMeta meta_; + void* pointer_; + bool has_ownership_; + + C10_DISABLE_COPY_AND_ASSIGN(Blob); +}; + +inline void swap(Blob& lhs, Blob& rhs) { + lhs.swap(rhs); +} + +inline std::ostream& operator<<(std::ostream& out, const Blob& v) { + return out << "Blob[" << v.TypeName() << "]"; +} + +} // namespace caffe2 diff --git a/voice_bridge/torch/include/ATen/core/boxing/BoxedKernel.h b/voice_bridge/torch/include/ATen/core/boxing/BoxedKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..829031f423eb2783837c74d0c1641108fa165ef5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/boxing/BoxedKernel.h @@ -0,0 +1,176 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +struct IValue; +using Stack = std::vector; + +class OperatorHandle; +class KernelFunction; + +// This kernel implements the behavior of falling through to the next available +// registered dispatch key. The implementation of this function is FAST; it is +// no overhead to fallthrough to the next key. See cpp file for some more +// implementation notes; notably, this does NOT actually go through the +// boxing/unboxing codepath. +TORCH_API void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); + +// Note [Ambiguity in AutogradOther kernel] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// This error-reporting kernel is registered to the AutogradOther entry in the +// dispatch table when there is both a CompositeImplicitAutograd kernel and a +// backend kernel for ANY backend that maps to AutogradOther. To see why +// this is necessary in the AutogradOther case, it's helpful to first see +// why everything works out fine for a backend that has a reserved Autograd +// entry (see rule 2.2 in [Note] DispatchTable computation): +// +// CPU AutogradCPU +// reg? registers with... +// ------------------------------------------------- +// y Autograd registration takes precedence +// over CompositeImplicitAutograd. +// This is good, because the CPU specific backend +// implementation is more specialized and typically better; +// if we used the composite, we would bypass it. +// (NB: the Autograd key is guaranteed to exist because +// the autograd codegen requires it!) +// +// n CompositeImplicitAutograd takes precedence. +// This is also good, because the Autograd +// registration (if it exists) would try to redispatch +// to the (non-existent) CPU implementation; by +// using the composite, we ensure the operator +// actually works. +// +// As you can see, when we have a specific Autograd key (AutogradCPU), we can +// decide whether or not to use the CompositeImplicitAutograd kernel or the +// Autograd kernel based on whether or not the backend kernel exists. +// +// However, for AutogradOther (which is the catchall autograd kernel for +// everything that doesn't have a specific Autograd key), we can't do this +// trick because there isn't any unique backend to peek at to disambiguate; +// if there are some backends that have implementations they prefer Autograd, +// but unimplemented backends would prefer CompositeImplicitAutograd. Rather +// than arbitrarily pick one or the other, we just register a kernel that raises +// an error and let the user decide how to proceed. +TORCH_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); + +// Note [named_not_supported_kernel] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// This kernel implements reporting an error message saying that named tensor is +// not supported. This kernel doesn't rely on the Stack, and so it is special +// cased in the dispatcher to be triggered before we attempt boxing (so we can +// give a good error message in cases when boxing is not supported). When +// boxing is universally supported this can be removed. +[[noreturn]] TORCH_API void named_not_supported_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); + +/** + * BoxedKernel is similar to a std::function storing a boxed kernel. + */ +class TORCH_API BoxedKernel final { +public: + // This is how boxed kernels are actually stored + // + // Note [Plumbing Keys Through The Dispatcher] + // Benchmarks have shown that it is expensive for the dispatcher to read from thread-local storage (TLS) + // upon every dispatch call into order to compute which kernel to dispatch to. + // + // To mitigate this, we've updated the calling convention inside the dispatcher to expect every kernel that it stores + // to have a first argument of type DispatchKeySet. + // + // What are the invariants of the DispatchKeySet when it gets passed to a kernel? + // - All keys to the left of the current dispatch key have been masked out. + // (e.g. a Tracing kernel that takes in the DispatchKeySet will expect the highest bit to be DispatchKey::Tracer) + // - All other keys that dispatcher normally would have computed through TLS + global state + op arguments + // are still in the set. + // + // Kernels can then opt into using this keyset to save the dispatcher from doing repeated work during redispatches: + // recalculating the highest-priority dispatch key, which involves reading from TLS. Instead, the kernels that opt in will + // calculate an updated DispatchKeySet directly from the old one, and pass the updated set directly into the dispatcher + // upon redispatching. + // + // This is an opt-in mechanism: Kernels can automatically opt in by setting the first argument in their signature + // to be of type DispatchKeySet. See the kernels in VariableTypeEverything.cpp and TraceTypeEverything.cpp for examples. + // + // The mechanism for optionally passing that DispatchKeySet into the kernel lives in make_boxed_from_unboxed_functor.h. + // See Note [Plumbing Keys Through The Dispatcher 2] for details. + using InternalBoxedKernelFunction = void(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); + // This is the public API for how boxed kernels are defined + using BoxedKernelFunction = void(const OperatorHandle&, Stack*); + using BoxedKernelFunction_withDispatchKeys = void(const OperatorHandle&, DispatchKeySet, Stack*); + + BoxedKernel(); + + // Fast path for dispatch to allow not touching the boxed kernel in + // the common case where unboxed is available. + bool isValid() const; + bool isFallthrough() const; + + /** + * Call the function with boxed arguments. + */ + void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const; + + /** + * Create a KernelFunction from a boxed function. + * + * Example: + * + * > void boxed_func(OperatorKernel*, Stack* stack) {...} + * > BoxedFunction func = BoxedKernel::makeFromFunction<&boxed_func>(); + */ + template + static BoxedKernel makeFromFunction(); + + /** + * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none) + * See Note [Plumbing Keys Through The Dispatcher] for details. + */ + template + static BoxedKernel makeFromFunction(); + + /** + * Create a KernelFunction from a boxed functor. + * + * Example: + * + * > class MyFunctor final : public c10::OperatorKernel { + * > public: + * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...} + * > }; + * > BoxedKernel func = BoxedKernel::makeFromFunctor(std::make_unique()); + */ + template + static BoxedKernel makeFromFunctor(std::unique_ptr kernelFunctor); + + + static BoxedKernel makeFallthrough(); + static BoxedKernel makeAmbiguousAutogradOther(); + static BoxedKernel makeNamedNotSupported(); + +private: + + friend class KernelFunction; + + template + static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack); + + template + static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack); + + explicit BoxedKernel(std::unique_ptr functor, InternalBoxedKernelFunction* boxed_kernel_func); + + OperatorKernel* getFunctor() const; + InternalBoxedKernelFunction* getFnPtr() const; + + c10::intrusive_ptr functor_; + InternalBoxedKernelFunction* boxed_kernel_func_; +}; + +} // namespace c10 + +#include diff --git a/voice_bridge/torch/include/ATen/core/boxing/BoxedKernel_impl.h b/voice_bridge/torch/include/ATen/core/boxing/BoxedKernel_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..421b85cca3ec5a38e2cb9e27b1ea7f28bd3eb71b --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/boxing/BoxedKernel_impl.h @@ -0,0 +1,99 @@ +#pragma once + +namespace c10 { + +inline BoxedKernel::BoxedKernel() + : functor_() +, boxed_kernel_func_(nullptr) +{} + +inline BoxedKernel::BoxedKernel(std::unique_ptr functor, InternalBoxedKernelFunction* boxed_kernel_func) +: functor_(std::move(functor)) +, boxed_kernel_func_(boxed_kernel_func) +{} + +template +inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack) { + // Note that we're dropping the DispatchKeySet argument. + // See Note [Plumbing Keys Through The Dispatcher 2] for details. + func(opHandle, stack); +} + +template +inline void BoxedKernel::make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet ks, Stack* stack) { + // See Note [Plumbing Keys Through The Dispatcher 2] for details. + func(opHandle, ks, stack); +} + +inline bool BoxedKernel::isValid() const { + return boxed_kernel_func_ != nullptr; +} + +inline bool BoxedKernel::isFallthrough() const { + return boxed_kernel_func_ == &fallthrough_kernel; +} + +inline void BoxedKernel::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + boxed_kernel_func_ != nullptr, + "Tried to call BoxedKernel::callBoxed() on an uninitialized BoxedKernel." + ); + (*boxed_kernel_func_)(functor_.get(), opHandle, dispatchKeySet, stack); +} + +template +inline BoxedKernel BoxedKernel::makeFromFunction() { + return BoxedKernel( + nullptr, // no functor_ object + &make_boxed_function + ); +} + +template +inline BoxedKernel BoxedKernel::makeFromFunction() { + return BoxedKernel( + nullptr, // no functor_ object + &make_boxed_function + ); +} + +inline BoxedKernel BoxedKernel::makeFallthrough() { + return BoxedKernel( + nullptr, // no functor_ object + &fallthrough_kernel + ); +} + +inline BoxedKernel BoxedKernel::makeAmbiguousAutogradOther() { + return BoxedKernel( + nullptr, // no functor_ object + &ambiguous_autogradother_kernel + ); +} + +inline BoxedKernel BoxedKernel::makeNamedNotSupported() { + return BoxedKernel( + nullptr, // no functor_ object + &named_not_supported_kernel + ); +} + +template +inline BoxedKernel BoxedKernel::makeFromFunctor(std::unique_ptr kernelFunctor) { + static_assert(std::is_base_of::value, "Tried to call BoxedKernel::makeFromFunctor, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + return BoxedKernel( + std::move(kernelFunctor), + [](OperatorKernel* kernel, const OperatorHandle& op, DispatchKeySet ks, Stack* stack) { + (*static_cast(kernel))(op, ks, stack); + } + ); +} + +inline OperatorKernel* BoxedKernel::getFunctor() const { + return functor_.get(); +} +inline BoxedKernel::InternalBoxedKernelFunction* BoxedKernel::getFnPtr() const { + return boxed_kernel_func_; +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/boxing/KernelFunction.h b/voice_bridge/torch/include/ATen/core/boxing/KernelFunction.h new file mode 100644 index 0000000000000000000000000000000000000000..f1bfc9ec6f27359672b6f8f23988eef80aedadc4 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/boxing/KernelFunction.h @@ -0,0 +1,251 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace c10 { + +using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace. + +class OperatorHandle; +struct OperatorKernel; +class KernelFunction; + +template +using has_symint = + guts::disjunction< + std::is_same>, + std::is_same>, + std::is_same>, + std::is_same, std::decay_t> + >; + +template +struct remove_symint { + using type = T; +}; + +template <> +struct remove_symint { + using type = int64_t; +}; + +template <> +struct remove_symint { + using type = OptionalIntArrayRef; +}; + +template <> +struct remove_symint { + using type = c10::IntArrayRef; +}; + +template <> +struct remove_symint> { + using type = c10::optional; +}; + + +template +struct maybe_keep_symint final {}; + +template +struct maybe_keep_symint { using type = T; }; + +template +struct maybe_keep_symint { using type = typename remove_symint::type; }; + +template +using fn_has_symint = typename guts::typelist::true_for_any_type< + has_symint, + typename guts::infer_function_traits::type::parameter_types +>; + +/** + * KernelFunction is similar to std::function but stores a kernel function. + * You can create a KernelFunction from a boxed or unboxed function/functor/lambda + * and call it in a boxed or unboxed way. If the way it was created doesn't + * match the way it was called, it will do boxing or unboxing as necessary. + */ +class TORCH_API KernelFunction final { +public: + using InternalBoxedKernelFunction = BoxedKernel::InternalBoxedKernelFunction; + using BoxedKernelFunction = BoxedKernel::BoxedKernelFunction; + using BoxedKernelFunction_withDispatchKeys = BoxedKernel::BoxedKernelFunction_withDispatchKeys; + + KernelFunction(); + + // Fast path for dispatch to allow not touching the boxed kernel in + // the common case where unboxed is available. + bool isValidUnboxed() const; + bool isValidSymUnboxed() const; + bool isValid() const; + bool isFallthrough() const; + + /** + * Call the function in a boxed way. + * If the kernel function was created with an unboxed function, + * this will call an unboxing wrapper which then calls into that + * unboxed function. + * + * Example: + * + * > void boxed_func(OperatorKernel*, Stack* stack) {...} + * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func); + * > Tensor result = func.callBoxed(stack); + * + * Or, with an unboxed implementation: + * + * > KernelFunction func = KernelFunction::makeFromUnboxedLambda( + * > [] (Tensor a, bool b) -> Tensor {...}); + * > Tensor result = func.callBoxed(stack); + */ + void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const; + + /** + * Call the function in an unboxed way. + * If the kernel function was created with a boxed function, + * this will box all inputs and then call into that boxed function. + * + * Note that this doesn't work for all types yet. + * + * Example: + * + * > KernelFunction func = KernelFunction::makeFromUnboxedLambda( + * > [] (Tensor a, bool b) -> Tensor {...}); + * > Tensor result = func.call(tensor1, true); + * + * Or, with a boxed implementation: + * + * > void boxed_func(OperatorKernel*, Stack* stack) {...} + * > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func); + * > Tensor result = func.call(tensor1, true); + */ + template + Return call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const; + + /** + * Create a KernelFunction from a BoxedKernel. + */ + static KernelFunction makeFromBoxedKernel(BoxedKernel boxed_fn); + + /** + * Create a KernelFunction from a boxed function. + * + * Example: + * + * > void boxed_func(OperatorKernel*, Stack* stack) {...} + * > KernelFunction func = KernelFunction::makeFromBoxedFunction<&boxed_func>(); + */ + template + static KernelFunction makeFromBoxedFunction(); + + /** + * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none) + * See Note [Plumbing Keys Through The Dispatcher] for details. + */ + template + static KernelFunction makeFromBoxedFunction(); + + /** + * Create a KernelFunction from an unboxed functor. + * + * Example: + * + * > class MyFunctor final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > KernelFunction func = KernelFunction::makeFromUnboxedFunctor(std::make_unique()); + */ + template + static KernelFunction makeFromUnboxedFunctor(std::unique_ptr kernelFunctor); + + /** + * Create a KernelFunction from a boxed functor. + * + * Example: + * + * > class MyFunctor final : public c10::OperatorKernel { + * > public: + * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...} + * > }; + * > KernelFunction func = KernelFunction::makeFromBoxedFunctor(std::make_unique()); + */ + template + static KernelFunction makeFromBoxedFunctor(std::unique_ptr kernelFunctor); + + /** + * Create a KernelFunction from an unboxed function. + * This is usually better than KernelFunction::makeFromUnboxedRuntimeFunction + * because knowing the function pointer as a template argument (i.e. at + * compile time) allows the compiler to inline the function into its + * unboxing wrapper and yields better performance when calling the function. + * + * Example: + * + * > Tensor unboxed_func(Tensor a, Tensor b) {...} + * > KernelFunction func = KernelFunction::makeFromUnboxedFunction(); + */ + template + static KernelFunction makeFromUnboxedFunction(FuncPtr); + + /** + * Create a KernelFunction from an unboxed function. + * KernelFunction::makeFromUnboxedFunction is usually a better choice than + * this if you know the function pointer at compile time, see doc comment + * there for an explanation. + * + * Example: + * + * > Tensor unboxed_func(Tensor a, Tensor b) {...} + * > KernelFunction func = KernelFunction::makeFromUnboxedRuntimeFunction(&unboxed_func); + */ + template + static KernelFunction makeFromUnboxedRuntimeFunction(FuncType* func); + + static KernelFunction makeFallthrough(); + static KernelFunction makeAmbiguousAutogradOther(); + static KernelFunction makeNamedNotSupported(); + + /** + * Create a KernelFunction from an unboxed lambda. + * + * Example: + * + * > KernelFunction func = KernelFunction::makeFromUnboxedLambda( + * > [] (Tensor a, bool b) -> Tensor {...}); + */ + template + static std::enable_if_t>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda); + template + static std::enable_if_t>::value, KernelFunction> makeFromUnboxedLambda(Lambda&& lambda); + + std::string dumpState() const; + // For testing internal invariants only + bool _equalsBoxedAndUnboxed(const KernelFunction&) const; + +private: + + explicit KernelFunction( + std::unique_ptr functor, + InternalBoxedKernelFunction* boxed_kernel_func, + void* unboxed_kernel_func, + void* sym_unboxed_kernel_func); + explicit KernelFunction( + BoxedKernel boxed_fn, + void* unboxed_kernel_func, + void* sym_unboxed_kernel_func); + + BoxedKernel boxed_kernel_func_; + void* unboxed_kernel_func_; + void* sym_unboxed_kernel_func_; +}; + +} + +#include diff --git a/voice_bridge/torch/include/ATen/core/boxing/KernelFunction_impl.h b/voice_bridge/torch/include/ATen/core/boxing/KernelFunction_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..9637f8fc2043a710da823bb4db6455f28923cfb6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/boxing/KernelFunction_impl.h @@ -0,0 +1,227 @@ +#include +#include +#include +#include + +namespace c10 { + +inline KernelFunction::KernelFunction() + : boxed_kernel_func_() + , unboxed_kernel_func_(nullptr) + , sym_unboxed_kernel_func_(nullptr) +{} + +inline KernelFunction::KernelFunction(std::unique_ptr functor, InternalBoxedKernelFunction* boxed_kernel_func, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr) + : boxed_kernel_func_(std::move(functor), boxed_kernel_func) + , unboxed_kernel_func_(unboxed_kernel_func) + , sym_unboxed_kernel_func_(sym_unboxed_kernel_func) +{} + +inline KernelFunction::KernelFunction(BoxedKernel boxed_fn, void* unboxed_kernel_func, void* sym_unboxed_kernel_func = nullptr) + : boxed_kernel_func_(std::move(boxed_fn)) + , unboxed_kernel_func_(unboxed_kernel_func) + , sym_unboxed_kernel_func_(sym_unboxed_kernel_func) +{} + +inline bool KernelFunction::isValidUnboxed() const { + return unboxed_kernel_func_ != nullptr; +} + +inline bool KernelFunction::isValidSymUnboxed() const { + return sym_unboxed_kernel_func_ != nullptr; +} + +inline bool KernelFunction::isValid() const { + return boxed_kernel_func_.isValid(); +} + +inline bool KernelFunction::isFallthrough() const { + return boxed_kernel_func_.isFallthrough(); +} + +inline void KernelFunction::callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const { + boxed_kernel_func_.callBoxed(opHandle, dispatchKeySet, stack); +} + +template +inline Return callUnboxedKernelFunction(void* unboxed_kernel_func, OperatorKernel* functor, DispatchKeySet dispatchKeySet, Args&&... args) { + using ActualSignature = Return (OperatorKernel*, DispatchKeySet, Args...); + ActualSignature* func = reinterpret_cast(unboxed_kernel_func); + return (*func)(functor, dispatchKeySet, std::forward(args)...); +} + +// This template requires you to explicitly specify the argument you want to +// forward; it doesn't work if you try to deduce it +// NB: keep this in sync with cloneWithRealTypes in function_schema.cpp + +template +inline typename remove_symint::type unpackSymInt(T x) { return x; } + +template <> +inline typename remove_symint::type unpackSymInt(c10::SymInt x) { + return x.expect_int(); +} + +template <> +inline typename remove_symint::type unpackSymInt(c10::SymIntArrayRef x) { + return c10::asIntArrayRefSlow(x); +} + +template <> +inline typename remove_symint>::type unpackSymInt(c10::optional x) { + return x.has_value() ? c10::make_optional(x->expect_int()) : c10::nullopt; +} + +template <> +inline typename remove_symint::type unpackSymInt(at::OptionalSymIntArrayRef x) { + return x.has_value() ? c10::make_optional(c10::asIntArrayRefSlow(*x)) : c10::nullopt; +} + +template +C10_ALWAYS_INLINE Return KernelFunction::call(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Args... args) const { + // note: Args above is intentionally not Args&&. We don't want perfect + // forwarding, which would require Args to be deduced, but instead we + // want callers to explicitly specify the Args. + + // This should get inlined by compiler + if (guts::disjunction...>::value) { + if (sym_unboxed_kernel_func_ != nullptr) { + auto *functor = boxed_kernel_func_.getFunctor(); + return callUnboxedKernelFunction( + sym_unboxed_kernel_func_, functor, dispatchKeySet, std::forward(args)...); + } + + if (unboxed_kernel_func_ != nullptr) { + auto *functor = boxed_kernel_func_.getFunctor(); + return callUnboxedKernelFunction::type...>( + unboxed_kernel_func_, functor, dispatchKeySet, unpackSymInt(args)...); + } + } else { + if (C10_LIKELY(unboxed_kernel_func_ != nullptr)) { + auto *functor = boxed_kernel_func_.getFunctor(); + return callUnboxedKernelFunction( + unboxed_kernel_func_, functor, dispatchKeySet, std::forward(args)...); + } + } + + return impl::BoxedKernelWrapper::call( + boxed_kernel_func_, + opHandle, + dispatchKeySet, + std::forward(args)... + ); +} + +inline KernelFunction KernelFunction::makeFromBoxedKernel(BoxedKernel boxed_fn) { + return KernelFunction(std::move(boxed_fn), nullptr); // no unboxed function pointer +} + +template +inline KernelFunction KernelFunction::makeFromBoxedFunction() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeFromFunction()); +} + +template +inline KernelFunction KernelFunction::makeFromBoxedFunction() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeFromFunction()); +} + +inline KernelFunction KernelFunction::makeFallthrough() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeFallthrough()); +} + +inline KernelFunction KernelFunction::makeAmbiguousAutogradOther() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeAmbiguousAutogradOther()); +} + +inline KernelFunction KernelFunction::makeNamedNotSupported() { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeNamedNotSupported()); +} + +template +inline KernelFunction KernelFunction::makeFromUnboxedFunctor(std::unique_ptr kernelFunctor) { +#ifndef NDEBUG + // This assertion is costly for build time so it's debug-gated. + static_assert(guts::is_functor::value, "Tried to call KernelFunction::makeFromUnboxedFunctor but the argument is not a functor."); +#endif + static_assert(std::is_base_of::value, "Tried to call KernelFunction::makeFromUnboxedFunctor, but the functor doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + + auto* unboxed_fn = &impl::wrap_kernel_functor_unboxed::call; + void* void_unboxed_fn = reinterpret_cast(unboxed_fn); + bool is_symint = fn_has_symint::value; + return KernelFunction( + std::move(kernelFunctor), + &impl::make_boxed_from_unboxed_functor::call, + is_symint ? nullptr : void_unboxed_fn, + is_symint ? void_unboxed_fn : nullptr + ); +} + +template +inline KernelFunction KernelFunction::makeFromBoxedFunctor(std::unique_ptr kernelFunctor) { + return KernelFunction::makeFromBoxedKernel( + BoxedKernel::makeFromFunctor(std::move(kernelFunctor))); +} + +template +inline KernelFunction KernelFunction::makeFromUnboxedFunction(FuncPtr func_ptr) { + static_assert(is_compile_time_function_pointer::value, "Tried to call KernelFunction::makeFromUnboxedFunction with an invalid parameter. It must be a function pointer created with TORCH_FN."); + static_assert(!std::is_same::value, "Tried to call KernelFunction::makeFromUnboxedFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead."); + static_assert(FuncPtr::func_ptr() != nullptr, "Kernel function cannot be nullptr"); + +#if !defined(C10_MOBILE) + (void)func_ptr; // Suppress unused variable warning + return makeFromUnboxedFunctor::type>( + guts::make_unique_base::type>() + ); +#else + // On mobile, we rather want to optimize for binary size than for performance, + // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction + // instead. + return makeFromUnboxedRuntimeFunction(func_ptr.func_ptr()); +#endif +} + +template +inline KernelFunction KernelFunction::makeFromUnboxedRuntimeFunction(FuncType* func) { + static_assert(guts::is_function_type::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a non-function type."); + static_assert(!std::is_same::value, "Tried to call KernelFunction::makeFromUnboxedRuntimeFunction with a boxed function pointer. Please use KernelFunction::makeFromBoxedFunction instead."); + TORCH_INTERNAL_ASSERT(func != nullptr, "Kernel function cannot be nullptr"); + + return makeFromUnboxedFunctor>>( + guts::make_unique_base>>(func) + ); +} + +template +inline std::enable_if_t>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) { + static_assert(guts::is_functor>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type."); + +#if !defined(C10_MOBILE) + return makeFromUnboxedFunctor>>( + guts::make_unique_base>>(std::forward(lambda)) + ); +#else + // On mobile, we rather want to optimize for binary size than for performance, + // so let's not inline the kernel into the wrapper but use makeFromUnboxedRuntimeFunction + // instead. + using FuncType = typename guts::infer_function_traits_t>::func_type; + return makeFromUnboxedRuntimeFunction(lambda); +#endif +} + +template +inline std::enable_if_t>::value, KernelFunction> KernelFunction::makeFromUnboxedLambda(Lambda&& lambda) { + static_assert(guts::is_functor>::value, "Tried to call KernelFunction::makeFromUnboxedLambda with a non-lambda type."); + + return makeFromUnboxedFunctor>>( + guts::make_unique_base>>(std::forward(lambda)) + ); +} + +} diff --git a/voice_bridge/torch/include/ATen/core/boxing/OperatorKernel.h b/voice_bridge/torch/include/ATen/core/boxing/OperatorKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..ac4f06a91c474575a9a94834de2b995b5a63fc0d --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/boxing/OperatorKernel.h @@ -0,0 +1,27 @@ +#pragma once +#include + +namespace c10 { + +/** + * Inherit from OperatorKernel to implement a c10 kernel. + * + * Example: + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * + * The kernel class is allowed to have members but these are equivalent + * to global variables. The kernel implementation is responsible for + * preventing race conditions on them. + * + * See below for how to register this kernel with PyTorch. + */ +struct TORCH_API OperatorKernel : public c10::intrusive_ptr_target { + virtual ~OperatorKernel() = default; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h b/voice_bridge/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h new file mode 100644 index 0000000000000000000000000000000000000000..c8d7687cde3f74348ad1f73deacc2af1d03c8da9 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +namespace c10 { +namespace impl { + namespace detail { + template class WrapFunctionIntoFunctor_ {}; + template + class WrapFunctionIntoFunctor_> final : public c10::OperatorKernel { + public: + C10_ALWAYS_INLINE decltype(auto) operator()(Parameters... args) { + return (*FuncPtr::func_ptr())(std::forward(args)...); + } + }; + } + + // WrapFunctionIntoFunctor: Wraps a compile time function pointer into a kernel functor. + // Since it is a compile time function pointer, many compilers can inline it + // into the wrapper and you don't get any performance overhead for wrapping. + template + struct WrapFunctionIntoFunctor final { + static_assert(c10::is_compile_time_function_pointer::value, "WrapFunctionIntoFunctor can only wrap functions created with TORCH_FN."); + using type = detail::WrapFunctionIntoFunctor_< + FuncPtr, + typename guts::function_traits::return_type, + typename guts::function_traits::parameter_types + >; + }; +} + +} diff --git a/voice_bridge/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h b/voice_bridge/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h new file mode 100644 index 0000000000000000000000000000000000000000..9cd647597d42d461431164ec76a16ccccc75063e --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +namespace c10 { + +namespace impl { + namespace detail { + template class WrapFunctionIntoRuntimeFunctor_ {}; + template + class WrapFunctionIntoRuntimeFunctor_> final : public c10::OperatorKernel { + public: + template + explicit WrapFunctionIntoRuntimeFunctor_(FuncType_&& kernel_func) + : kernel_func_(std::forward(kernel_func)) {} + + decltype(auto) operator()(Parameters... args) { + return kernel_func_(std::forward(args)...); + } + + private: + FuncType kernel_func_; + }; + } + + // WrapFunctionIntoRuntimeFunctor: Wraps any runtime functor into a functor that + // inherits from c10::OperatorKernel, so it can be used as a c10 kernel. + // This can, for example, be used for lambdas, functors or even function pointers. + // In the case of function pointers, since it is a runtime function pointer, + // there is an overhead for calling it whenever the kernel is invoked. + template + using WrapFunctionIntoRuntimeFunctor = detail::WrapFunctionIntoRuntimeFunctor_< + FuncType, + typename guts::infer_function_traits_t::return_type, + typename guts::infer_function_traits_t::parameter_types + >; +} + +} diff --git a/voice_bridge/torch/include/ATen/core/boxing/impl/boxing.h b/voice_bridge/torch/include/ATen/core/boxing/impl/boxing.h new file mode 100644 index 0000000000000000000000000000000000000000..ccac9ebe8f61b0ba9c2671d5d1c8f1443c24c5cd --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/boxing/impl/boxing.h @@ -0,0 +1,389 @@ +#pragma once + +// This file contains boxing (not unboxing) logic, +// i.e. how to make a vector from a set of concrete arguments. + +#include +#include +#include + +#include + +#include + +namespace c10 { +namespace impl { + +// +// utils +// + +// is_mutable_tensor_ref +template struct is_mutable_tensor_ref : std::false_type {}; +template <> struct is_mutable_tensor_ref : std::true_type {}; + +// is_tuple_of_mutable_tensor_refs +// +template +struct is_tuple_of_mutable_tensor_refs : std::false_type {}; + +template +struct is_tuple_of_mutable_tensor_refs::value, void>> +: guts::typelist::all> +{}; + +// has_ivalue_to tests the presence/absence of instance method IValue::to() +// +template +struct has_ivalue_to : std::false_type {}; + +template +struct has_ivalue_to().to())>> +: std::true_type +{}; + +// +// boxing predicates +// + +// A boxable arg type is one that IValue has a constructor for. +template +using can_box = + guts::disjunction< + std::is_constructible>, + // TensorOptions are not directly constructible into IValue, + // but torch::jit::push knows how to handle them + std::is_same> + >; + +template +using can_box_all = guts::conjunction...>; + +// an unboxable result is one that can be extracted from an IValue +template +using can_unbox = + guts::conjunction< + guts::disjunction< + has_ivalue_to, + // void returns are ok + std::is_same + >, + guts::negation> + >; + +// +// boxArgs - utility for pushing unboxed args onto IValue stack +// +template +torch::jit::Stack boxArgs(Args... args) { + // TODO Reuse stack vector instead of allocating? + torch::jit::Stack stack; + stack.reserve(sizeof...(Args)); + torch::jit::push(stack, std::forward(args)...); + return stack; +} + +template +static inline constexpr size_t boxed_size_one() { + static_assert(!std::is_same, c10::TensorOptions>::value, "need to patch this path to support TensorOptions passed by reference"); + return 1; +} + +// torch::jit::push pushes 4 values for a TensorOptions; this needs to +// be kept in sync. +template <> +inline constexpr size_t boxed_size_one() { + return 4; +} + +// NOTE: this could probably be simplified with C++17 fold expressions. +template +struct BoxedSize : std::integral_constant {}; +template +struct BoxedSize : std::integral_constant() + BoxedSize::value> {}; + +template +static inline constexpr size_t boxed_size() { + return BoxedSize::value; +} + +using IValueAlignedStorage = std::aligned_storage_t; + +template +C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, T& arg, int& lastIdx) { + new (&dest[lastIdx]) IValue(arg); + lastIdx++; +} + +C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, c10::TensorOptions options, int& lastIdx) { + new (&dest[lastIdx++]) IValue(c10::typeMetaToScalarType(options.dtype())); + new (&dest[lastIdx++]) IValue(options.layout()); + new (&dest[lastIdx++]) IValue(options.device()); + new (&dest[lastIdx++]) IValue(options.pinned_memory()); +} + +inline void boxArgsToStack(IValueAlignedStorage*, int&) {} + +template +C10_ALWAYS_INLINE_UNLESS_MOBILE void boxArgsToStack(IValueAlignedStorage* dest, int& lastIdx, T& arg, Args &... args) { + boxToStack(dest, arg, lastIdx); + boxArgsToStack(dest, lastIdx, args...); +} + +// +// PopResult is a helper class whose specializations handle popping single and +// multiple return values, respectively. +// +template +struct PopResult final { + static Result call(Stack& stack) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 1, + "Boxed kernel was expected to return one value on the stack, ", + "but instead pushed ", stack.size(), " values." + ); + return std::move(stack[0]).to(); + } +}; + +template +struct PopResult> final { + using Result = std::tuple; + + static Result call(Stack& stack) { + // for tuple return types, boxed kernel has pushed multiple values onto the stack + constexpr int RetCount = sizeof...(Types); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == RetCount, + "Boxed kernel was expected to return ", RetCount, " values on the stack, ", + "but instead pushed ", stack.size(), " values." + ); + return pop_to_tuple_impl(stack, std::make_index_sequence()); + } +private: + // note: this has been moved into its own helper only to avoid a parse error on `indices` otherwise. + // I'm sure there's an incantation that slips it past the parser but eh + template + static Result pop_to_tuple_impl(Stack& stack, std::index_sequence) { + return std::make_tuple((std::move(stack[indices]).to())...); + } +}; + +// +// BoxedKernelWrapper +// +// For a given function type FT, BoxedKernelWrapper implements +// a `call` method that +// - takes a boxed kernel and unboxed arguments as specified by FT, +// - calls `boxArgs` to box the arguments +// - calls the boxed kernel +// - unboxes and returns the result +// +// The partial specializations below handle various cases: in +// particular, not all types appearing in op signatures are supported, +// and ops returning references have nonstandard wrapper implementations. +// + +// 1. The base specialization of BoxedKernelWrapper should never be instantiated. +// A "no call method defined on BoxedKernelWrapper" compile error means that +// an op signature has failed to trigger any of the partial specializations +// that follow this one. +// +template +struct BoxedKernelWrapper { + // The reason we're not just doing straight up static_assert(false, ...) here: + // Basically, the way to make sure a static_assert only fires if a template + // is actually instantiated (rather than every time the file is parsed) is to use + // template parameters in the expression, e.g. FuncType here. However, since + // `sizeof(FuncType) != sizeof(FuncType)` is always false, this has the same + // effect. + static_assert(sizeof(FuncType) != sizeof(FuncType), + "Function signature contains one or more unsupported parameter and/or return types. " + "Look for a nearby error like " + "\"'call' is not a member of 'c10::impl::BoxedKernelWrapper<(your function type), void>'\" " + "- (your function type) is the unsupported signature."); +}; + +// +// 2. Supported signatures, other than those involving non-const Tensor refs - +// i.e., "functional" ops. +// + +template +struct BoxedKernelWrapper< + Result(Args...), + std::enable_if_t< + can_box_all::value && can_unbox::value && !is_tuple_of_mutable_tensor_refs::value, + void + > +> { + static Result call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + Args... args + ) { + torch::jit::Stack stack = boxArgs(std::forward(args)...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + + return guts::if_constexpr::value>( + [&] (auto delay_check) { + // op has pushed one or more values onto the stack. + return delay_check(PopResult::call(stack)); + }, + [&] { + // op returns void, boxed kernel has pushed nothing onto stack. + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 0, + "Boxed kernel was expected to return no values on the stack, ", + "but instead returned ", stack.size(), " values." + ); + } + ); + } +}; + +// +// 3. in-place ops take a single non-const Tensor reference +// as their first argument, and return it. +// +// Note: all signatures matching this pattern are assumed to be for such ops. +// Because of this, the generated BoxedKernelWrapper specializations simply +// return the in-place argument. +// + +template +struct BoxedKernelWrapper< + at::Tensor&(at::Tensor&, OtherArgs...), + std::enable_if_t::value, void> +> { + static at::Tensor& call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + at::Tensor& outArg, OtherArgs... otherArgs + ) { + torch::jit::Stack stack = boxArgs(outArg, std::forward(otherArgs)...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 1, + "Boxed kernel was expected to return a single value on the stack, ", + "but instead returned ", stack.size(), " values." + ); + + return outArg; + } +}; + +// +// 3.5. In-process migration to make in-place ops take and return +// const references instead. +template +struct BoxedKernelWrapper< + const at::Tensor&(const at::Tensor&, OtherArgs...), + std::enable_if_t::value, void> +> { + static const at::Tensor& call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + const at::Tensor& outArg, OtherArgs... otherArgs + ) { + torch::jit::Stack stack = boxArgs(outArg, otherArgs...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 1, + "Boxed kernel was expected to return a single value on the stack, ", + "but instead returned ", stack.size(), " values." + ); + + return outArg; + } +}; + +// +// 4. out of place ops that take a single non-const Tensor reference as their +// final argument, and also return it. +// +// Note: all signatures matching this pattern are assumed to be for such ops. +// This assumption permits the generated BoxedKernelWrapper specializations to simply +// return out arguments. +// +template +struct BoxedKernelWrapper< + at::Tensor&(FirstArg, RestArgs...), + std::enable_if_t< + can_box_all::value + // this skips over in-place kernels with a non-const Tensor + // arg at the front, so those can unambiguously trigger the preceding specialization. + && !is_mutable_tensor_ref::value, + void + > +> { + static at::Tensor& call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + FirstArg firstArg, RestArgs... restArgs + ) { + torch::jit::Stack stack = boxArgs(std::forward(firstArg), std::forward(restArgs)...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 1, + "Boxed kernel was expected to return a single value on the stack, ", + "but instead returned ", stack.size(), " values." + ); + + // reusing restArgs after it has been forwarded here is ok because we know + // that the last element is of type `Tensor&`. + return std::get(std::tuple{restArgs...}); + } +}; + +// +// 5. out of place ops that take multiple non-const Tensor references as their +// final arguments, and return them in a std::tuple. +// +// Note: all signatures matching this pattern are assumed to be for such ops. +// This assumption permits the generated BoxedKernelWrapper specializations to simply +// return the out arguments. +// +template +struct BoxedKernelWrapper< + Result(Args...), + std::enable_if_t< + can_box_all::value && is_tuple_of_mutable_tensor_refs::value, + void + > +> { + static Result call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + Args... args + ) { + using ArgTuple = std::tuple; + constexpr int RetCount = std::tuple_size(); + + torch::jit::Stack stack = boxArgs(std::forward(args)...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == RetCount, + "Boxed kernel was expected to return ", RetCount, " values on the stack, ", + "but instead returned ", stack.size(), " values." + ); + + // reusing args after it has been forwarded here is ok because we know + // that the last RetCount elements are of type `Tensor&`. + auto result = guts::tuple_take(ArgTuple{std::forward(args)...}); + static_assert( + std::is_same::value, + "The parameter list of an op returning a tuple of Tensor references " + "must end with an equal number of Tensor reference parameters." + ); + return result; + } +}; + +} // impl +} // c10 diff --git a/voice_bridge/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h b/voice_bridge/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h new file mode 100644 index 0000000000000000000000000000000000000000..a99f45040788d24bff23a4fb4d1fc9793d3eb13e --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h @@ -0,0 +1,608 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +using Stack = torch::jit::Stack; // TODO Instead of this, move torch::jit::Stack to the c10 namespace. +class OperatorHandle; + +/* + * [Note: Argument forwarding in the dispatcher] + * + * The dispatcher uses a somewhat unusual way to forward arguments through several layers of + * wrapper functions. This can be confusing because an experienced C++ programmer would look at this + * and think "oh this is supposed to be forwarding a universal reference but the && is missing. This is a bug.". + * It is not a bug. The common way in C++ to forward arguments is to use universal references: + * + * > template void func(T&& arg) { func2(std::forward(arg)); } + * + * but that relies on inferring the correct reference type (i.e. value vs & vs &&) from the argument. + * In our case, we cannot rely on the argument as supplied by the caller, because that could infer a + * different reference type than was used in the kernel function. The correct reference type + * is dictated by the kernel signature and must be identical since we cast function pointers + * through void* pointers and mismatches would be UB. So we need a forwarding pattern that determines + * the reference type to use by looking at the explicitly supplied operator signature, not by looking at + * the argument we're calling it with. + * + * What does std::forward do, exactly? + * ------------------------------------ + * std::forward(t) is a way to cast t to the reference type supplied in T. + * Let's assume decay_t == U and T is either U or some reference of U. + * - std::forward(t) will return U&, no matter what kind of reference t is. + * - std::forward(t) will return U&&, no matter what kind of reference t is. + * - std::forward(t) will return U&& (not U!), no matter what kind of reference t is. + * + * For universal references, that means that in the following function + * > template void func(T&& arg) { func2(std::forward(arg)); } + * + * - when called with arg being a rvalue reference or non-reference value, T gets inferred to be + * a non-reference U, and std::forward(t) will return U&&, correctly moving the argument. + * - when called with arg behind a lvalue reference, T gets inferred to be U& because that's the only + * way to match the signature (in C++, a type that is (T&)&& will collapse to T&). + * That means std::forward(t) will return U& and the value will not be moved but passed on as + * a lvalue reference. + * + * How do we use that? + * ------------------------------------ + * But std::forward can also be used outside of the common "universal forwarding" pattern to change + * reference types. So instead of following the common C++ pattern, we notice what + * std::forward() actually does, and that is it takes a value and changes its reference to the + * type of reference passed in as T. If we don't infer T but explicitly specify it, we can use this + * to forward based on an explicitly specified reference type instead of the inferred argument type. + * + * This is why many of the dispatcher functions look like + * > template func(T t) { func2(std::forward(t)); } + * instead of the common + * > template func(T&& t) { func2(std::forward(t)); } + * + * and are expected to be called by explicitly specifying the template parameters in a way that matches + * the expected operator signature at each call site. + */ + +namespace impl { + // supported_primitive_arg_types defines which primitive types we allow in + // kernel functions as arguments or returns. + // Additionally, we support lists, dicts and optionals containing these types. + using supported_primitive_arg_types = guts::typelist::typelist< + int64_t, + double, + bool, + c10::string_view, + at::Tensor, + at::Scalar, + c10::QScheme, + c10::ScalarType, + c10::Device, + c10::Layout, + c10::MemoryFormat, + at::Dimname + >; + + // We have an unboxed functor in hand that takes C++ arguments, and + // we're building a boxed functor wrapper for it that takes IValues. + // So "outside" is boxed and "inside" is unboxed. + // + // So a valid input type is one that our boxed functor wrapper can + // unbox from an IValue into a C++ value. + // + // Whereas a valid output type is one that our wrapper can recieve + // as a C++ value from the unboxed functor, and box into an IValue. + + // + // assert_is_valid_input_type + // checks that T can be unboxed from an IValue into a C++ value. + // + + template + struct assert_is_valid_input_type { + assert_is_valid_input_type() { + guts::if_constexpr::value>([] { + /* everything is ok, this is a primitive type */ + }, /* else */ [] { + /* otherwise this must be an instance of a valid custom class, since it can only + have been created via IValue(x), which ensures this. */ + }); + } + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type {}; + + template + struct TypeCheckHelper; + + template + struct TypeCheckHelper {}; + + template + struct TypeCheckHelper + : TypeCheckHelper { + assert_is_valid_input_type check; + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : TypeCheckHelper {}; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(guts::typelist::contains::value, + "You tried to register a kernel with an unsupported input type: Dict where Key is invalid. We only support int64_t, double, bool, and string."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(AllowDeprecatedTypes, + "You tried to register a kernel with an unsupported input type: std::unordered_map. Please use Dict instead."); + static_assert(guts::typelist::contains::value, + "You tried to register a kernel with an unsupported input type: std::unordered_map where Key is invalid. We only support int64_t, double, bool, and string."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported input type: List. Please use List, List or Tensor instead."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported input type: ArrayRef. Please use List, List or Tensor instead."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported input type: OptionalArrayRef. Please use List, List or Tensor instead."); + }; + + template + struct assert_is_valid_input_type, AllowDeprecatedTypes> + : assert_is_valid_input_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported input type: std::array. Please use std::array instead."); + }; + + // The following specialisations of assert_is_valid_input_type are technically not + // necessary since we would hit the base case and show an error message + // there if they didn't exist, but we can show a better error message + // in some common error scenarios. + template + struct assert_is_valid_input_type::value>> { + // There is no reason to support float when we have double. Keep the API lean. + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported input type: float. Please use double instead."); + }; + template + struct assert_is_valid_input_type::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported input type: const char*. Please use c10::string_view instead."); + }; + template + struct assert_is_valid_input_type, T>::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported input type: vector. Please use List instead."); + }; + template + struct assert_is_valid_input_type::value && !guts::typelist::contains::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported integral input type. Please use int64_t instead."); + }; + + // + // assert_is_valid_output_type + // + + template + struct assert_is_valid_output_type { + assert_is_valid_output_type() { + guts::if_constexpr::value>([] { + /* everything is ok, this is a primitive type */ + }, /* else */ [] { + /* otherwise T is verified to be a registered custom class in the IValue + constructor, so no benefit in double-checking here */ + }); + } + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type {}; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type {}; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(guts::typelist::contains::value, + "You tried to register a kernel with an unsupported output type: Dict where Key is invalid. We only support int64_t, double, bool, and string."); + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: Dict. Please use Dict or Dict."); + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(AllowDeprecatedTypes, + "You tried to register a kernel with an unsupported output type: std::unordered_map. Please use Dict instead."); + static_assert(guts::typelist::contains::value, + "You tried to register a kernel with an unsupported output type: std::unordered_map where Key is invalid. We only support int64_t, double, bool, and string."); + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: std::unordered_map. Please use Dict or Dict."); + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: List. Please use List, List or Tensor instead."); + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: std::vector. Please use List, List or Tensor instead."); + // TODO static_assert(AllowDeprecatedTypes, "You tried to register a kernel with an unsupported output type: std::vector. Please use List instead."); + }; + + template + struct assert_is_valid_output_type, AllowDeprecatedTypes> + : assert_is_valid_output_type { + static_assert(!std::is_same::value, + "You tried to register a kernel with an unsupported output type: std::array. Please use std::array instead."); + }; + + // The following specialisations of assert_is_valid_output_type are technically not + // necessary since we would hit the base case and show an error message + // there if they didn't exist, but we can show a better error message + // in some common error scenarios. + template + struct assert_is_valid_output_type::value>> { + // There is no reason to support float when we have double. Keep the API lean. + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported output type: float. Please use double instead."); + }; + template + struct assert_is_valid_output_type::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported output type: const char*. Please use c10::string_view instead."); + }; + template + struct assert_is_valid_output_type, T>::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported output type: vector. Please use List instead."); + }; + template + struct assert_is_valid_output_type::value && !guts::typelist::contains::value>> { + static_assert(guts::false_t::value, + "You tried to register a kernel with an unsupported integral output type. Please use int64_t instead."); + }; + + // ivalue_to_arg + + template + struct decay_if_not_tensor final { + using type = std::decay_t; + }; + + template<> + struct decay_if_not_tensor final { + using type = at::Tensor&; + }; + + template<> + struct decay_if_not_tensor final { + using type = const at::Tensor&; + }; + + template + struct ivalue_to_arg final { + static decltype(auto) call(IValue& v) { + assert_is_valid_input_type(); + return std::move(v).to(); + } + }; + + // The following two specializations take advantage of specialized + // `toTensor()` overloads on IValue to avoid copying. + template + struct ivalue_to_arg final { + // We cannot use the default implementation if they asked for a + // `at::Tensor&` because it moves from the IValue, so it can't get + // an lvalue reference. + static at::Tensor& call(IValue& v) { + // Tensor& is valid, don't bother asserting + return v.toTensor(); + } + }; + + template + struct ivalue_to_arg final { + // We should not use the default implementation if they asked for + // a `const at::Tensor&` because it moves from the IValue and they + // didn't ask for that. + static const at::Tensor& call(IValue& v) { + // const Tensor& is valid, don't bother asserting + return v.toTensor(); + } + }; + + template + struct ivalue_to_arg final { + static List call(IValue& v) { + return v.toTensorList(); + } + }; + + template + struct ivalue_to_arg, AllowDeprecatedTypes> final { + // If an argument is ArrayRef, convert the IValue to a std::vector and pass that + // to the operator. std::vector is implicitly convertible to ArrayRef. + static std::vector call(IValue& v) { + return ivalue_to_arg, AllowDeprecatedTypes>::call(v); + } + }; + template + struct ivalue_to_arg final { + static std::vector call(IValue& v) { + if (v.isIntList()) { + std::vector r; + auto src = v.toIntList(); + std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); }); + return r; + } else { + return ivalue_to_arg, AllowDeprecatedTypes>::call(v); + } + } + }; + template + struct ivalue_to_arg, AllowDeprecatedTypes> final { + static OptionalArray call(IValue& v) { + if (v.isIntList()) { + std::vector r; + auto src = v.toIntList(); + std::transform(src.begin(), src.end(), std::back_inserter(r), [](int64_t i) { return c10::SymInt(i); }); + return OptionalArray(r); + } else { + return std::move(v).to>(); + } + } + }; + template + struct ivalue_to_arg>, AllowDeprecatedTypes> final { + // If an argument is optional>, convert the IValue to an optional> and pass that + // to the operator. OptionalArray is basically a optional> but implicitly convertible + // to optional>. + static OptionalArray call(IValue& v) { + return ivalue_to_arg, AllowDeprecatedTypes>::call(v); + } + }; + + template + struct ivalue_to_arg, AllowDeprecatedTypes> final { + // If an argument is OptionalArrayRef, convert the IValue to an + // optional> and pass that to the operator. OptionalArray + // is basically a optional> but implicitly convertible to + // OptionalArrayRef + static OptionalArray call(IValue& v) { + return ivalue_to_arg, AllowDeprecatedTypes>::call(v); + } + }; + + // return_to_ivalue + template + struct return_to_ivalue final {}; + + template + struct return_to_ivalue::value>> final { + static IValue call(T&& v) { + assert_is_valid_output_type(); + return c10::ivalue::from(std::move(v)); + } + static IValue copy(const T& v) { + assert_is_valid_output_type(); + return IValue(v); + } + }; + + // Special case to allow kernels to return `Tensor&`. + // TODO Delete this once kernels don't do that anymore + template + struct return_to_ivalue final { + static IValue call(at::Tensor& v) { + return c10::ivalue::from(v); + } + static IValue copy(at::Tensor& v) { + return IValue(v); + } + }; + + // wrap_kernel_functor_unboxed_ + + template + struct wrap_kernel_functor_unboxed_ final {}; + + // This specialization is for kernels with a first argument that is NOT of type DispatchKeySet + // This includes kernels with 0 arguments. + template + struct wrap_kernel_functor_unboxed_ final { + static_assert(std::is_same::return_type>::value, + "Return type mismatch"); + static_assert(std::is_same, typename guts::infer_function_traits_t::parameter_types>::value, + "Parameter types mismatch"); + + // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use && + static ReturnType call(OperatorKernel* functor, DispatchKeySet, ParameterTypes... args) { + KernelFunctor* functor_ = static_cast(functor); + // Note [Plumbing Keys Through The Dispatcher 2] + // See Note [Plumbing Keys Through The Dispatcher] for the background. + // This functor explicitly takes in a dispatchKeySet and drops it on the floor- it does not forward it to the registered kernel. + // + // This is due to the calling convention within the dispatcher, which expects all registered kernels to have a first argument of type + // DispatchKeySet. + // This is not the case for pretty much all manually written kernels, however- this functor serves to separate the calling convention + // of the dispatcher from the calling convention of manually written kernels. + return (*functor_)(std::forward(args)...); + } + }; + + // This specialization is for kernels with a first argument of type DispatchKeySet + template + struct wrap_kernel_functor_unboxed_ final { + static_assert(std::is_same::return_type>::value, + "Return type mismatch"); + static_assert(std::is_same, typename guts::infer_function_traits_t::parameter_types>::value, + "Parameter types mismatch"); + + // See [Note: Argument forwarding in the dispatcher] for why ParameterTypes doesn't use && + static ReturnType call(OperatorKernel* functor, DispatchKeySet dispatchKeySet, ParameterTypes... args) { + KernelFunctor* functor_ = static_cast(functor); + // We're explicitly taking in a dispatchKeySet and forwarding it to the registered kernel. + // See Note [Plumbing Keys Through The Dispatcher 2] for details. + return (*functor_)(dispatchKeySet, std::forward(args)...); + } + }; + + template + using wrap_kernel_functor_unboxed = wrap_kernel_functor_unboxed_::func_type>; + + // call_functor_with_args_from_stack + + template + std::decay_t::return_type> + call_functor_with_args_from_stack_(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack, std::index_sequence, guts::typelist::typelist*) { + (void)(stack); // when sizeof...(ivalue_arg_indices) == 0, this argument would be unused and we have to silence the compiler warning. + + // We're explicitly filtering out DispatchKeySet from the argument list. + // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher. + // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack. + // See Note [Plumbing Keys Through The Dispatcher] for the background. + return wrap_kernel_functor_unboxed::call(functor, dispatchKeySet, + ivalue_to_arg::type, AllowDeprecatedTypes>::call( + torch::jit::peek(*stack, ivalue_arg_indices, sizeof...(ivalue_arg_indices)) + )...); + } + + template + std::decay_t::return_type> + call_functor_with_args_from_stack(OperatorKernel* functor, DispatchKeySet dispatchKeySet, Stack* stack) { + // We're explicitly filtering out DispatchKeySet from the argument list. + // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher. + // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack. + // See Note [Plumbing Keys Through The Dispatcher] for the background. + using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func::parameter_types; + constexpr size_t num_ivalue_args = guts::typelist::size::value; + return call_functor_with_args_from_stack_(functor, dispatchKeySet, stack, std::make_index_sequence(), static_cast(nullptr)); + } + + // push_outputs + + template + struct push_outputs final { + // Contrary to [Note: Argument forwarding in the dispatcher], we use OutputType&& here + // to avoid one extra call to the move constructor in this case. This is still not a + // universal reference though because OutputType is an explicitly specified class + // template parameter. + static void call(OutputType&& output, Stack* stack) { + torch::jit::push(*stack, return_to_ivalue::call(std::forward(output))); + } + static void copy(const OutputType& output, Stack* stack) { + torch::jit::push(*stack, return_to_ivalue::copy(output)); + } + }; + template + struct push_outputs, AllowDeprecatedTypes> final { + static void call(std::tuple&& output, Stack* stack) { + call_(std::move(output), stack, std::make_index_sequence()); + } + static void copy(const std::tuple& output, Stack* stack) { + copy_(output, stack, std::make_index_sequence()); + } + + private: + template + static void call_(std::tuple&& output, Stack* stack, std::index_sequence) { + torch::jit::push(*stack, return_to_ivalue::call(std::forward(std::get(output)))...); + } + template + static void copy_(const std::tuple& output, Stack* stack, std::index_sequence) { + torch::jit::push(*stack, return_to_ivalue::copy(std::get(output))...); + } + }; + template + struct push_outputs final { + static void call(int /*dummy*/, Stack* /*stack*/) { + } + static void copy(int /*dummy*/, Stack* /*stack*/) { + } + }; + + // make_boxed_from_unboxed_functor + + template + struct make_boxed_from_unboxed_functor final { + static_assert(std::is_base_of::value, + "Tried to register a kernel functor using the kernel() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + + static void call(OperatorKernel* functor, const OperatorHandle&, DispatchKeySet dispatchKeySet, Stack* stack) { + using ReturnType = typename guts::infer_function_traits_t::return_type; + // We're explicitly filtering out DispatchKeySet from the argument list. + // Some kernels take a DispatchKeySet as their first argument in order to plumb keys through the dispatcher. + // We don't want to expose the DispatchKeySet type to jit, so we don't include this argument on the stack. + // See Note [Plumbing Keys Through The Dispatcher] for the background. + using ArgTypes = typename c10::remove_DispatchKeySet_arg_from_func::parameter_types; + constexpr bool has_outputs = !std::is_same::value; + constexpr size_t num_inputs = guts::typelist::size::value; +#ifdef __cpp_if_constexpr + if constexpr (has_outputs) { +#else + guts::if_constexpr([&] (auto delay_check) { +#endif + // Decay ReturnType to ReturnType_ so that if a reference gets returned, we actually store it by value + // and don't get a dangling reference. This is only required because some kernels still return `Tensor&`. +#ifdef __cpp_if_constexpr + using ReturnType_ = std::decay_t; + ReturnType_ output = call_functor_with_args_from_stack(functor, dispatchKeySet, stack); +#else + using ReturnType_ = std::decay_t>; + ReturnType_ output = call_functor_with_args_from_stack(functor, dispatchKeySet, delay_check(stack)); +#endif + torch::jit::drop(*stack, num_inputs); + push_outputs::call(std::move(output), stack); +#ifdef __cpp_if_constexpr + } else { +#else + }, /* else */ [&] { +#endif + call_functor_with_args_from_stack(functor, dispatchKeySet, stack); + torch::jit::drop(*stack, num_inputs); +#ifdef __cpp_if_constexpr + } +#else + }); +#endif + } + }; +} // namespace impl + +} // namespace c10 + +namespace torch { + using OperatorKernel = c10::OperatorKernel; +} diff --git a/voice_bridge/torch/include/ATen/core/boxing/impl/test_helpers.h b/voice_bridge/torch/include/ATen/core/boxing/impl/test_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..93b11dc853f00f2ac06ebfd361b6ee02986cfd1f --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/boxing/impl/test_helpers.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +template +inline std::vector makeStack(Inputs&&... inputs) { + return {std::forward(inputs)...}; +} + +inline at::Tensor dummyTensor(c10::DispatchKeySet ks, bool requires_grad=false) { + auto* allocator = c10::GetCPUAllocator(); + int64_t nelements = 1; + auto dtype = caffe2::TypeMeta::Make(); + int64_t size_bytes = nelements * dtype.itemsize(); + auto storage_impl = c10::make_intrusive( + c10::StorageImpl::use_byte_size_t(), + size_bytes, + allocator->allocate(size_bytes), + allocator, + /*resizable=*/true); + at::Tensor t = at::detail::make_tensor(storage_impl, ks, dtype); + // TODO: We add this to simulate the ideal case where we only have Autograd backend keys + // on Tensor when it requires grad. But currently Autograd keys are added in TensorImpl + // constructor by default. + if (!requires_grad) { + t.unsafeGetTensorImpl()->remove_autograd_key(); + } + return t; +} + +inline at::Tensor dummyTensor(c10::DispatchKey dispatch_key, bool requires_grad=false) { + return dummyTensor(c10::DispatchKeySet(dispatch_key), requires_grad); +} + +template +inline std::vector callOp(const c10::OperatorHandle& op, Args... args) { + auto stack = makeStack(std::forward(args)...); + op.callBoxed(&stack); + return stack; +} + +template +inline Result callOpUnboxed(const c10::OperatorHandle& op, Args... args) { + return op.typed().call(std::forward(args)...); +} + +template +inline Result callOpUnboxedWithDispatchKey(const c10::OperatorHandle& op, c10::DispatchKey dispatchKey, Args... args) { + return op.typed().callWithDispatchKey(dispatchKey, std::forward(args)...); +} + +template +inline Result callOpUnboxedWithPrecomputedDispatchKeySet(const c10::OperatorHandle& op, c10::DispatchKeySet ks, Args... args) { + return op.typed().redispatch(ks, std::forward(args)...); +} + +inline void expectDoesntFindKernel(const char* op_name, c10::DispatchKey dispatch_key) { + auto op = c10::Dispatcher::singleton().findSchema({op_name, ""}); + EXPECT_ANY_THROW( + callOp(*op, dummyTensor(dispatch_key), 5); + ); +} + +inline void expectDoesntFindOperator(const char* op_name) { + auto op = c10::Dispatcher::singleton().findSchema({op_name, ""}); + EXPECT_FALSE(op.has_value()); +} + +template +inline void expectThrows(Functor&& functor, const char* expectMessageContains) { + try { + std::forward(functor)(); + } catch (const Exception& e) { + EXPECT_THAT(e.what(), testing::HasSubstr(expectMessageContains)); + return; + } + ADD_FAILURE() << "Expected to throw exception containing \"" + << expectMessageContains << "\" but didn't throw"; +} + +template +void expectListEquals(c10::ArrayRef expected, std::array actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual[i]); + } +} + +template +void expectListEquals(c10::ArrayRef expected, c10::ArrayRef actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual[i]); + } +} + +template +void expectListEquals(c10::ArrayRef expected, c10::List actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual.get(i)); + } +} + +template +void expectListEquals(c10::ArrayRef expected, std::vector actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual[i]); + } +} + +// NB: This is not really sound, but all of the type sets constructed here +// are singletons so it's fine +static inline c10::DispatchKey extractDispatchKey(const at::Tensor& t) { + return legacyExtractDispatchKey(t.key_set()); +} diff --git a/voice_bridge/torch/include/ATen/core/builtin_function.h b/voice_bridge/torch/include/ATen/core/builtin_function.h new file mode 100644 index 0000000000000000000000000000000000000000..6f1e9e75ea3e2952b8bf4bd0637a664fea4b0694 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/builtin_function.h @@ -0,0 +1,88 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct BuiltinOpFunction : public Function { + BuiltinOpFunction( + c10::QualifiedName qualname, + c10::FunctionSchema schema, + std::function callable, + std::string doc_string = "") + : name_(std::move(qualname)), + callable_(std::move(callable)), + schema_(std::move(schema)), + doc_string_(std::move(doc_string)) { + TORCH_INTERNAL_ASSERT(schema_.returns().size() == 1); + } + + c10::string_view doc_string() const override { + return doc_string_; + } + + void run(Stack& stack) override { + callable_(stack); + } + + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher /* not used */) override { + run(stack); + auto res = c10::make_intrusive(stack.front().type()); + res->markCompleted(std::move(stack.front())); + return res; + } + + const c10::QualifiedName& qualname() const override { + return name_; + } + + // if this isn't yet defined, run its method_creator function + void ensure_defined() override { + // nop + } + + const c10::FunctionSchema& getSchema() const override { + return schema_; + } + + size_t num_inputs() const override { + return schema_.arguments().size(); + } + + Function& setSchema(c10::FunctionSchema schema) override { + schema_ = std::move(schema); + return *this; + } + + bool call(Stack& stack, c10::optional, c10::function_ref) override { + run(stack); + return false; + } + + bool call(Stack& stack, c10::function_ref) override { + run(stack); + return false; + } + + ~BuiltinOpFunction() override {} + + private: + c10::QualifiedName name_; + + std::function callable_; + + c10::FunctionSchema schema_; + + std::string doc_string_; +}; + +} // namespace jit +} // namespace torch diff --git a/voice_bridge/torch/include/ATen/core/class_type.h b/voice_bridge/torch/include/ATen/core/class_type.h new file mode 100644 index 0000000000000000000000000000000000000000..67507c89bf1b473f85609994d644a9f9c1de517f --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/class_type.h @@ -0,0 +1,442 @@ +#pragma once + +#include + +#include +#include +#include + +namespace torch { +namespace jit { +struct CompilationUnit; +struct Function; +} // namespace jit +} // namespace torch + +namespace c10 { + +struct FunctionSchema; + +// This enumerator represents the 'kind' of an attribute - a buffer, a parameter, or neither. +// This state is mutually exclusive. Buffers and Parameters can only appear on modules. +enum class AttributeKind { + BUFFER, + PARAMETER, + REGULAR_ATTRIBUTE +}; + +// This structure represents all notional booking entities in a class attribute: name, kind (see: AttributeKind), and type (see: TypePtr). +// Note: This structure does not represent the value of the attribute. +struct TORCH_API ClassAttribute { + public: + ClassAttribute(AttributeKind kind, + TypePtr attributeType, + std::string attributeName) : + kind_(kind), + attributeType_(std::move(attributeType)), + attributeName_(std::move(attributeName)) {} + + AttributeKind getKind() const { + return kind_; + } + + const TypePtr& getType() const { + return attributeType_; + } + + const std::string& getName() const { + return attributeName_; + } + + private: + AttributeKind kind_; + TypePtr attributeType_; + std::string attributeName_; +}; + +/** + * User Defined Types + */ + +struct ClassType; +using ClassTypePtr = std::shared_ptr; +using ::torch::jit::CompilationUnit; + +// This represents a class in TorchScript. +struct TORCH_API ClassType : public NamedType { + // This represents an attribute of a class; a name associated with an attribute, and a + // getter and (optional) setter for that attribute. + struct Property { + std::string name; + torch::jit::Function* getter; + torch::jit::Function* setter; + }; + + // Create a class type with name `name` and its methods stored in `cu`. + static ClassTypePtr create( + c10::optional qualifiedName, + std::weak_ptr cu, + bool is_module = false, + std::string doc_string = "", + std::vector unresolved_class_attributes = {}); + + bool equals(const Type& rhs) const override { + if (this == &rhs) { + return true; + } + if (auto user_rhs = rhs.castRaw()) { + const auto& lhs_name = name().value(); + const auto& rhs_name = user_rhs->name().value(); + + return lhs_name == rhs_name && + this->compilation_unit() == user_rhs->compilation_unit(); + } + return false; + } + + std::string str() const override { + return annotation_str(); + } + + std::string repr_str() const override { + std::stringstream ss; + ss << str() + << " (of Python compilation unit at: " << compilation_unit().get() << ")"; + return ss.str(); + } + + const std::vector& methods() const; + + TypePtr findAttribute(const std::string& name) const { + size_t pos = 0; + for (const auto& attr : attributes_) { + if (name == attr.getName()) { + break; + } + ++pos; + } + + if (pos >= attributes_.size()) { + return nullptr; + } + return attributes_[pos].getType(); + } + + const TypePtr& getAttribute(const std::string& name) const { + auto slot = findAttributeSlot(name); + TORCH_CHECK( + slot, + repr_str(), + " does not have an attribute with name '", + name, + "'"); + return attributes_[*slot].getType(); + } + + size_t numAttributes() const { + return attributes_.size(); + } + + const TypePtr& getAttribute(size_t slot) const { + AT_ASSERT(slot < attributes_.size()); + return attributes_.at(slot).getType(); + } + + const std::string getAttributeName(size_t slot) const { + AT_ASSERT(slot < attributes_.size()); + return attributes_[slot].getName(); + } + + void checkNotExist(const std::string& name, const std::string& what) const; + + // Attributes are stored in a specific slot at runtime for effiency. + // When emitting instructions we specify the slot so that attribute access is + // a constant lookup + c10::optional findAttributeSlot(const std::string& name) const { + size_t slot = 0; + for (const auto& attr : attributes_) { + if (name == attr.getName()) { + return slot; + } + slot++; + } + return c10::nullopt; + } + size_t getAttributeSlot(const std::string& name) const { + if (auto r = findAttributeSlot(name)) { + return *r; + } + TORCH_CHECK( + false, + repr_str(), + " does not have an attribute with name '", + name, + "'"); + } + + bool hasAttribute(const std::string& name) const { + return std::find_if( + attributes_.cbegin(), + attributes_.cend(), + [&](const ClassAttribute& attr) { return attr.getName() == name; }) != + attributes_.cend(); + } + + bool isUnresolvedClassAttribute(const std::string& name) const; + + at::ArrayRef containedTypes() const override { + return attributeTypes_; + } + + size_t addAttribute( + const std::string& name, + TypePtr type, + bool is_parameter = false, + bool is_buffer = false); + + // [Internal Only] Remove attribute from the ClassType, + // caller is responsible to make sure the modification is safe: + // it is unsafe to having existing allocations + // of this object around anymore, and any code that works on + // the attribute is now invalid. Only newly created code is + // valid again. + void unsafeRemoveAttribute(const std::string& name); + + // [Internal Only] Change the type of an attribute of the ClassType, + // The caller is responsible to make sure the modification is safe: + // it is unsafe to maintain uses of the old type of the attribute, + // and any code that works on the attribute is now invalid. + // Only newly created code is valid again. + void unsafeChangeAttributeType(const std::string& name, TypePtr new_ty); + + // Add attribute \p NAME if it doesn't exist or verify that it has a + // compatible type otherwise. + size_t addOrCheckAttribute( + const std::string& name, + TypePtr ty, + bool is_parameter = false, + bool is_buffer = false) { + auto slot_idx = findAttributeSlot(name); + if (!slot_idx) { + return addAttribute(name, std::move(ty), is_parameter, is_buffer); + } + + TORCH_CHECK( + is_parameter == this->is_parameter(*slot_idx), + "Parameter field mismatch for the field '", + name, + "'"); + const TypePtr& atype = getAttribute(*slot_idx); + TORCH_CHECK( + ty->isSubtypeOf(*atype), + ty->repr_str(), + " is not compatible with the type ", + atype->repr_str(), + " for the field '", + name, + "'"); + return *slot_idx; + } + + // Get the property with the given \p name, if it exists on the class. + c10::optional getProperty(const std::string& name); + // Add a property named \p name with \p getter and \p setter as its getter and setter. + void addProperty(const std::string& name, torch::jit::Function* getter, torch::jit::Function* setter); + // Get a list of all properties. + const std::vector& properties() const { + return properties_; + } + + bool hasConstant(const std::string& name) const { + return std::find_if( + constantNames_.cbegin(), + constantNames_.cend(), + [&](const std::string& constant) { return constant == name; }) != + constantNames_.cend(); + } + + size_t addConstant(const std::string& name, const IValue& value); + + c10::optional findConstantSlot(const std::string& name) const; + + size_t getConstantSlot(const std::string& name) const { + if (auto r = findConstantSlot(name)) { + return *r; + } + TORCH_CHECK( + false, + repr_str(), + " does not have constant field with the name '", + name, + "'"); + } + + const std::string& getConstantName(size_t slot) const; + + const std::string& doc_string() const { + return doc_string_; + } + + IValue getConstant(const std::string& name) const; + + IValue getConstant(size_t slot) const; + + c10::optional findConstant(const std::string& name) const; + + size_t numConstants() const; + + at::ArrayRef constantNames() const { + return constantNames_; + } + + at::ArrayRef constantValues() const; + + // [Internal Only] Remove constant from the ClassType + // caller is responsible to make sure the modification is safe: + // it is unsafe to having existing allocations + // of this object around anymore, and any code that works on + // the attribute is now invalid. Only newly created code is + // valid again. + void unsafeRemoveConstant(const std::string& name); + + TypePtr createWithContained(std::vector contained_types) const override { + auto ptr = ClassType::create(name(), compilation_unit_, is_module()); + AT_ASSERT(numAttributes() == contained_types.size()); + for(size_t i = 0; i < attributes_.size(); ++i) { + AT_ASSERT(attributes_[i].getType()->isSubtypeOf(*contained_types[i])); + ptr->addAttribute(attributes_[i].getName(), std::move(contained_types[i])); + } + // Copy methods over + for (const auto& method : methods()) { + ptr->addMethod(method); + } + return ptr; + } + + bool is_module() const override { + return isModule_; + } + + const std::vector& getAttributes() const { + return attributes_; + } + + bool is_parameter(size_t slot) const { + TORCH_INTERNAL_ASSERT( + is_module(), "asking for parameterSlots of non-Module"); + return attributes_.at(slot).getKind() == AttributeKind::PARAMETER; + } + + bool is_buffer(size_t slot) const { + TORCH_INTERNAL_ASSERT( + is_module(), "asking for bufferWrittenSlots of non-Module"); + return attributes_.at(slot).getKind() == AttributeKind::BUFFER; + } + + void addForwardPreHook(torch::jit::Function* pre_hook_ptr); + void addForwardHook(torch::jit::Function* hook_ptr); + torch::jit::Function* findForwardPreHook(const std::string& name) const; + torch::jit::Function* findForwardHook(const std::string& name) const; + const std::vector& getForwardHooks() const; + const std::vector& getForwardPreHooks() const; + + void checkForwardPreHookSchema( + int pre_hook_idx, + const FunctionSchema& pre_hook_schema) const; + void checkForwardHookSchema( + int hook_idx, + const FunctionSchema& hook_schema) const; + + void addMethod(torch::jit::Function* method); + torch::jit::Function* findMethod(const std::string& name) const; + torch::jit::Function& getMethod(const std::string& name) const; + torch::jit::Function* findHook(const std::string& name) const; + torch::jit::Function& getHook(const std::string& name) const; + bool hasMethod(const std::string& name) const; + + torch::jit::Function* findStaticMethod(const std::string& name) const; + void addStaticMethod(torch::jit::Function* method); + + // [Internal Only] Remove method from the ClassType + // caller is responsible to make sure the modification is safe: + // it is unsafe to having existing allocations + // of this object around anymore, and any code that works on + // the attribute is now invalid. Only newly created code is + // valid again. + // Note this method is intended for freezing only. + void unsafeRemoveMethod(const std::string& name); + + std::shared_ptr compilation_unit(); + + std::shared_ptr compilation_unit() const; + + // generate a refined version of this class. + // It has the same name but the slot Types are subtypes of + // the original slots. It is only valid to refine a class type in a context + // where it is know that there are not assignments to the objects slots + // that would invalidate the refinement. + // These variants are not registered in the global class table. + ClassTypePtr refine(at::ArrayRef refined_slots) const; + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + static const TypeKind Kind = TypeKind::ClassType; + + private: + ClassType( + c10::optional name, + std::weak_ptr cu, + bool is_module = false, + std::string doc_string = "", + std::vector unresolved_class_attributes = {}); + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + (void)printer; // Suppress unused variable warning + const auto& n = name().value(); + return n.qualifiedName(); + } + + void addAttribute(ClassAttribute classAttribute); + std::string getForwardPreHookErrorMessage(int pre_hook_idx) const; + std::string getForwardHookErrorMessage(int hook_idx) const; + + // Mapping of attribute names -> their type. + // NOTE: this does not contain methods, which are stored in the module + // TODO: once modules support arbitrary ivalue attributes, we don't need this + // anymore. + // TODO: This is better represented as an OrderedDict, but alas it is not yet + // available from c10 + + // Mapping of constant names -> their value. + std::vector constantNames_; + std::vector constantValues_; + // Holds method attributes + std::weak_ptr compilation_unit_; + + // Holds all atrributes, attribute details are found on ClassAttribute + std::vector attributes_; + // Construct mirroring attributes_, only around due to the fact that `containedTypes()` method returns an ArrayRef. + // Never fill this without using the appropriate provideNewClassAttribute method + std::vector attributeTypes_; + + // List of methods associated with this class. + std::vector methods_; + std::vector staticmethods_; + + // List of hooks to be run before/after forward. + std::vector forward_hooks_; + std::vector forward_pre_hooks_; + + // List of properties exposed by this class. + std::vector properties_; + + bool isModule_ = false; + + // Doc string of class. + std::string doc_string_ = ""; + + // For error reporting accesses to class level attributes. + std::vector unresolved_class_attributes_; +}; + +} diff --git a/voice_bridge/torch/include/ATen/core/custom_class.h b/voice_bridge/torch/include/ATen/core/custom_class.h new file mode 100644 index 0000000000000000000000000000000000000000..ff9bda981b2906e55449e93a582266888c2eb258 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/custom_class.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace c10 { + +struct ClassType; +using ClassTypePtr = std::shared_ptr; + +TORCH_API c10::ClassTypePtr getCustomClassTypeImpl(const std::type_index &tindex); + +template +const c10::ClassTypePtr& getCustomClassType() { + // Classes are never unregistered from getCustomClassTypeMap and the + // hash lookup can be a hot path, so just cache. + // For the same reason, it's fine If this ends up getting duplicated across + // DSO boundaries for whatever reason. + static c10::ClassTypePtr cache = getCustomClassTypeImpl( + std::type_index(typeid(T))); + return cache; +} + +} diff --git a/voice_bridge/torch/include/ATen/core/dispatch/CppSignature.h b/voice_bridge/torch/include/ATen/core/dispatch/CppSignature.h new file mode 100644 index 0000000000000000000000000000000000000000..0a152a60d923d1753ba6a2f373afad34d28aba02 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/dispatch/CppSignature.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { +namespace impl { + +// A CppSignature object holds RTTI information about a C++ function signature at runtime +// and can compare them or get a debug-printable name. +class TORCH_API CppSignature final { +public: + CppSignature(const CppSignature&) = default; + CppSignature(CppSignature&&) noexcept = default; + CppSignature& operator=(const CppSignature&) = default; + CppSignature& operator=(CppSignature&&) noexcept = default; + + template + static CppSignature make() { + // Normalize functors, lambdas, function pointers, etc. into the plain function type + // The first argument of the schema might be of type DispatchKeySet, in which case we remove it. + // We do this to guarantee that all CppSignature's for an operator will match, even if they're registered + // with different calling conventions. + // See Note [Plumbing Keys Through The Dispatcher] + using decayed_function_type = typename c10::remove_DispatchKeySet_arg_from_func>::func_type; + + return CppSignature(std::type_index(typeid(decayed_function_type))); + } + + std::string name() const { + return c10::demangle(signature_.name()); + } + + friend bool operator==(const CppSignature& lhs, const CppSignature& rhs) { + if (lhs.signature_ == rhs.signature_) { + return true; + } + // Without RTLD_GLOBAL, the type_index comparison could yield false because + // they point to different instances of the RTTI data, but the types would + // still be the same. Let's check for that case too. + // Note that there still is a case where this might not work, i.e. when + // linking libraries of different compilers together, they might have + // different ways to serialize a type name. That, together with a missing + // RTLD_GLOBAL, would still fail this. + if (0 == strcmp(lhs.signature_.name(), rhs.signature_.name())) { + return true; + } + + return false; + } + +private: + explicit CppSignature(std::type_index signature): signature_(std::move(signature)) {} + std::type_index signature_; +}; + +inline bool operator!=(const CppSignature& lhs, const CppSignature& rhs) { + return !(lhs == rhs ); +} + +} +} diff --git a/voice_bridge/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h b/voice_bridge/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h new file mode 100644 index 0000000000000000000000000000000000000000..27c6e26721a2e7d10b42cf68c1f35434f54d4ab5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h @@ -0,0 +1,242 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +namespace impl { + +// Take a DispatchKeySet for a Tensor and determine what the actual dispatch +// DispatchKey should be, taking into account TLS, and skipping backends which +// fall through. +// +// Unlike Tensor::key_set(), the value of this on a tensor can change depending +// on TLS. +// +// NB: If there is no valid dispatch key, this will return Undefined +static inline DispatchKeySet computeDispatchKeySet( + DispatchKeySet ks, + // The key mask lets us eliminate (by zero entries) keys which should not + // be considered for dispatch. There are two cases when we use this: + // + // - If an operator's dispatch table contains a fallthrough entry, we + // should bypass it entirely when finding the key + // - If a user invokes with redispatch, the mask lets us + // zero out the key the user asked us to stop. + // + // These excluded backends are NOT tracked in the TLS, but must be applied + // AFTER TLS (since the backend may have been introduced for consideration + // by the included TLS), which is why you have to pass them in to this + // function (as opposed to just applying it to the input 'ks'). + DispatchKeySet key_mask +) { + c10::impl::LocalDispatchKeySet local = c10::impl::tls_local_dispatch_key_set(); + // TODO: It's a bit irritating that we have to do logical ORs here, it would + // be nice to only do one. Can always_included be folded into the TLS? Well, + // it's a bit troublesome, because fastpath TLS access requires the type of + // the TLS in question to be zero-initialized, so you don't actually win + // anyting in that case. + return (((ks | local.included_) - local.excluded_) & key_mask); +} + +} + +namespace detail { + // A small gadget to extract the DispatchKeySet from types which are known + // to have it. Used to extract dispatch keys from unboxed calls. + struct MultiDispatchKeySet : at::IterArgs { + DispatchKeySet ts; + void operator()(const at::Tensor& x) { + ts = ts | x.key_set(); + } + void operator()(const c10::optional& x) { + if (x.has_value()) { + ts = ts | x->key_set(); + } + } + void operator()(at::ArrayRef xs) { + for (const auto& x : xs) { + ts = ts | x.key_set(); + } + } + // Tensor?[] translates to this case. + void operator()(const c10::List>& xs) { + for (c10::optional x : xs) { + if (x.has_value()) { + ts = ts | x.value().key_set(); + } + } + } + // Structured Tensor[] translates to this case + void operator()(at::ITensorListRef xs) { + for (const auto& x : xs) { + ts = ts | x.key_set(); + } + } + void operator()(at::ArrayRef>) { + // Just checking that the handling of Tensor?[] didn't change. + TORCH_INTERNAL_ASSERT(false); + } + void operator()(const at::Generator& gen) { + if (gen.defined()) { + ts = ts | gen.key_set(); + } + } + void operator()(const c10::optional& gen) { + if (gen.has_value() && gen->defined()) { + ts = ts | gen->key_set(); + } + } + template + void operator()(const T&) { + // do nothing + } + }; + + // NB: take by const reference (Don't do universal forwarding here! You + // don't want to move into this function!) + template + DispatchKeySet multi_dispatch_key_set(const Args&... args) { + return MultiDispatchKeySet().apply(args...).ts; + } +} + +/** + * An instance of DispatchKeyExtractor knows how to get a dispatch key given + * a list of arguments for an operator call. + * + * The instance is specific for a certain operator as: + * - In boxed dispatch, different operators have different ways to extract + * the dispatch key (e.g. different numbers of arguments), and we precompute + * the stack locations we should look at; and + * - In all dispatch, some backends should be excluded from dispatch because + * they have been registered as fallthrough. The set of excluded backends + * varies from operator, as some operators may have overridden the + * fallthrough with custom behavior. + * + * Note - this should maintain identical impl to the py dispatcher key extraction logic + * at pytorch/torch/dispatcher.py + */ +struct TORCH_API DispatchKeyExtractor final { +public: + static DispatchKeyExtractor make(const FunctionSchema& schema) { + return DispatchKeyExtractor(makeBitsetForDispatchArgs(schema)); + } + + static DispatchKeyExtractor makeUninitialized() { + return DispatchKeyExtractor(c10::utils::bitset()); + } + + void registerSchema(const FunctionSchema& schema) { + TORCH_INTERNAL_ASSERT(dispatch_arg_indices_reverse_.is_entirely_unset()); + dispatch_arg_indices_reverse_ = makeBitsetForDispatchArgs(schema); + } + void deregisterSchema() { + dispatch_arg_indices_reverse_ = c10::utils::bitset(); + } + + DispatchKeySet getDispatchKeySetBoxed(const torch::jit::Stack* stack) const { + DispatchKeySet ks; + dispatch_arg_indices_reverse_.for_each_set_bit([&] (size_t reverse_arg_index) { + const auto& ivalue = torch::jit::peek(*stack, 0, reverse_arg_index + 1); + if (C10_LIKELY(ivalue.isTensor())) { + // NB: Take care not to introduce a refcount bump (there's + // no safe toTensorRef method, alas) + ks = ks | ivalue.unsafeToTensorImpl()->key_set(); + } else if (C10_UNLIKELY(ivalue.isTensorList())) { + for (const at::Tensor& tensor : ivalue.toTensorList()) { + ks = ks | tensor.key_set(); + } + } + // Tensor?[] translates to a c10::List so we need to peek inside + else if (C10_UNLIKELY(ivalue.isList())) { + for (const auto& elt : ivalue.toListRef()) { + if (elt.isTensor()) { + ks = ks | elt.toTensor().key_set(); + } + } + } + }); + // Keys that are fallthrough should be skipped + if (requiresBitsetPerBackend_) { + auto backend_idx = ks.getBackendIndex(); + return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]); + } else { + return impl::computeDispatchKeySet(ks, nonFallthroughKeys_); + } + } + + template + DispatchKeySet getDispatchKeySetUnboxed(const Args&... args) const { + auto ks = detail::multi_dispatch_key_set(args...); + // Keys that are fallthrough should be skipped + if (requiresBitsetPerBackend_) { + auto backend_idx = ks.getBackendIndex(); + return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]); + } else { + return impl::computeDispatchKeySet(ks, nonFallthroughKeys_); + } + } + + void setOperatorHasFallthroughForKey(DispatchKey k, bool has_fallthrough); + + std::string dumpState() const; + void checkInvariants(const FunctionSchema& schema) const; + +private: + static c10::utils::bitset makeBitsetForDispatchArgs(const FunctionSchema& schema) { + TORCH_CHECK(schema.arguments().size() <= c10::utils::bitset::NUM_BITS(), + "The function schema has ", schema.arguments().size(), + " arguments but this PyTorch build only supports ", c10::utils::bitset::NUM_BITS()); + c10::utils::bitset dispatch_arg_indices_reverse; + for (const auto index : c10::irange(schema.arguments().size())) { + if (schema.arguments()[index].type()->isSubtypeOf(*TensorType::get()) || + schema.arguments()[index].type()->isSubtypeOf( + *ListType::ofTensors()) || + schema.arguments()[index].type()->isSubtypeOf( + *ListType::ofOptionalTensors()) || + schema.arguments()[index].type()->isSubtypeOf( + *OptionalType::ofTensor())) { + dispatch_arg_indices_reverse.set(schema.arguments().size() - 1 - index); + } + } + return dispatch_arg_indices_reverse; + } + + explicit DispatchKeyExtractor(c10::utils::bitset dispatch_arg_indices_reverse) + : dispatch_arg_indices_reverse_(dispatch_arg_indices_reverse) + , nonFallthroughKeys_(DispatchKeySet::FULL) + , requiresBitsetPerBackend_(false) { + for (const auto i : c10::irange(nonFallthroughKeysPerBackend_.size())) { + nonFallthroughKeysPerBackend_[i] = DispatchKeySet::FULL; + } + } + + // this is a bitset that has ones for each argument index which has to be + // considered for dispatch. This avoids having to iterate over the stack + // to find all the tensors. The bits are stored in reverse order, i.e. + // dispatch_arg_indices_reverse_[i] == true, then the i-th argument from + // the top of the stack (i.e. the i-th last argument of the function) + // is relevant for dispatch. + // dispatch_arg_indices_reverse_ is allowed to have zero bits set; that just means you must do the + // fallthrough + c10::utils::bitset dispatch_arg_indices_reverse_; + + // Set of functionality keys for which the operator does NOT have fallthrough kernel. + DispatchKeySet nonFallthroughKeys_; + // Set of functionality keys for which the operator does NOT have fallthrough kernel, defined PER BACKEND. + // This is only needed if we know that the operator has a different set of fallthroughs defined for some backends. + std::array nonFallthroughKeysPerBackend_; + // Flag to tell us if we can use the single set of nonFallthroughKeys_ (fast path), + // or if we need to fall back to the slower path and check nonFallthroughKeysPerBackend_ + bool requiresBitsetPerBackend_; +}; + +} diff --git a/voice_bridge/torch/include/ATen/core/dispatch/Dispatcher.h b/voice_bridge/torch/include/ATen/core/dispatch/Dispatcher.h new file mode 100644 index 0000000000000000000000000000000000000000..1ea677b54ef5ac8c50c1bc123bd5c1a0e9550843 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/dispatch/Dispatcher.h @@ -0,0 +1,677 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace c10 { + +TORCH_API bool show_dispatch_trace(); + +class TORCH_API OperatorHandle; +template class TypedOperatorHandle; + +/** + * Implement this interface and register your instance with the dispatcher + * to get notified when operators are registered or deregistered with + * the dispatcher. + * + * NB: registration events only occur when a 'def' occurs; we don't trigger + * on 'impl' or 'fallback' calls. + */ +class TORCH_API OpRegistrationListener { +public: + virtual ~OpRegistrationListener(); + + virtual void onOperatorRegistered(const OperatorHandle& op) = 0; + virtual void onOperatorDeregistered(const OperatorHandle& op) = 0; +}; + +namespace detail { +class RegistrationListenerList; +} +class SchemaRegistrationHandleRAII; + +/** + * Top-level dispatch interface for dispatching via the dynamic dispatcher. + * Most end users shouldn't use this directly; if you're trying to register + * ops look in op_registration + */ +class TORCH_API Dispatcher final { +private: + // For direct access to backend fallback information + friend class impl::OperatorEntry; + + struct OperatorDef final { + explicit OperatorDef(OperatorName&& op_name) + : op(std::move(op_name)) {} + + impl::OperatorEntry op; + + // These refer to the number of outstanding RegistrationHandleRAII + // for this operator. def_count reflects only def() registrations + // (in the new world, this should only ever be 1, but old style + // registrations may register the schema multiple times, which + // will increase this count). def_and_impl_count reflects the number + // of combined def() and impl() registrations. When the last def() gets + // unregistered, we must immediately call the Deregistered listeners, but we + // must not actually delete the handle as there are other outstanding RAII + // destructors which will try to destruct and they had better still have a + // working operator handle in this case + size_t def_count = 0; + size_t def_and_impl_count = 0; + }; + friend class OperatorHandle; + template friend class TypedOperatorHandle; + +public: + ~Dispatcher(); + + // Implementation note: this class abstracts over the fact that we have per-operator + // dispatch tables. This could be easily adjusted to have a single global hash + // table. + static Dispatcher& realSingleton(); + + C10_ALWAYS_INLINE static Dispatcher& singleton() { +#if !defined C10_MOBILE + // Implemented inline so that steady-state code needn't incur + // function-call overhead. We can't just inline `realSingleton` + // because the function-local static would get duplicated across + // all DSOs that include & use this header, leading to multiple + // singleton instances. + static Dispatcher& s = realSingleton(); + return s; +#else + // For C10_MOBILE, we should never inline a static function that + // has a static member, since the generated code calls + // __cxa_guard_acquire and __cxa_guard_release which help + // implement exactly once semantics for the initialization of the + // static Dispatcher& s above (for the non-mobile case). That + // additional code when duplicated across all operator stubs + // for every backend results in a lot of additional code + // being generated by the compiler. + return realSingleton(); +#endif + } + + // ------------------------------------------------------------------------ + // + // Accessing operators by schema + // + // ------------------------------------------------------------------------ + + /** + * Looks for an operator schema with the given name and overload name + * and returns it if it is registered WITH A SCHEMA. + * Returns nullopt otherwise. + */ + c10::optional findSchema(const OperatorName& operator_name); + + /** + * Variant of findSchema that results in less code generated at the call site. + * It (1) takes const char* pointer rather than OperatorName (so we skip + * generating std::string constructor calls at the call site), and (2) + * it raises an exception if the operator is not found (so we skip + * generating exception raising code at the call site) + * + * Irritatingly, we still have to generate the handful of instructions + * for dealing with an exception being thrown during static initialization + * (e.g. __cxa_guard_abort). If we could annotate this method noexcept we + * could avoid this code too, but as the name of the function suggests, + * it does throw exceptions. + */ + OperatorHandle findSchemaOrThrow(const char* name, const char* overload_name); + + // Like findSchema, but also returns OperatorHandle even if there is no schema + c10::optional findOp(const OperatorName& operator_name); + + // Returns a list of all operator names present in the operatorLookupTable_ + const std::vector getAllOpNames(); + + // ------------------------------------------------------------------------ + // + // Invoking operators + // + // ------------------------------------------------------------------------ + + template + Return call(const TypedOperatorHandle& op, Args... args) const; + + + template + static Return callWithDispatchKeySlowPath(const TypedOperatorHandle& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args); + + // Like call, but intended for use in a redispatch in kernels that have explicitly performed the DispatchKey update calculatulation. + // This will take the DispatchKeySet completely as is and dispatch to the kernel of the corresponding highest priority key in the set. + // Note that this version of redispatch treats the inputted DispatchKeySet *as is*, and does NOT mask out the highest priority key. + // See Note [Plumbing Keys Through The Dispatcher] + template + Return redispatch(const TypedOperatorHandle& op, DispatchKeySet currentDispatchKeySet, Args... args) const; + + // Invoke an operator via the boxed calling convention using an IValue stack + void callBoxed(const OperatorHandle& op, Stack* stack) const; + void callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const; + + // TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none) + // See Note [Plumbing Keys Through The Dispatcher] + void redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const; + + bool hasBackendFallbackForDispatchKey(DispatchKey dk) { + auto dispatch_ix = getDispatchTableIndexForDispatchKey(dk); + if (dispatch_ix < 0) return false; + return backendFallbackKernels_[dispatch_ix].kernel.isValid(); + } + + + // ------------------------------------------------------------------------ + // + // Performing registrations (NON user public; use op_registration) + // + // ------------------------------------------------------------------------ + + /** + * Register a new operator schema. + * + * If a schema with the same operator name and overload name already exists, + * this function will check that both schemas are exactly identical. + */ + RegistrationHandleRAII registerDef(FunctionSchema schema, std::string debug, std::vector tags = {}); + + /** + * Register a kernel to the dispatch table for an operator. + * If dispatch_key is nullopt, then this registers a fallback kernel. + * + * @return A RAII object that manages the lifetime of the registration. + * Once that object is destructed, the kernel will be deregistered. + */ + // NB: steals the inferred function schema, as we may need to hold on to + // it for a bit until the real schema turns up + RegistrationHandleRAII registerImpl(OperatorName op_name, c10::optional dispatch_key, KernelFunction kernel, c10::optional cpp_signature, std::unique_ptr inferred_function_schema, std::string debug); + + /** + * Register a new operator by name. + */ + RegistrationHandleRAII registerName(OperatorName op_name); + + /** + * Register a fallback kernel for a backend. + * If an operator is called but there is no concrete kernel for the dispatch + * key of the given operator arguments, it will check if there is such a + * fallback kernel for the given dispatch key and, if yes, call that one. + */ + RegistrationHandleRAII registerFallback(DispatchKey dispatch_key, KernelFunction kernel, std::string debug); + + /** + * Use to register whenever we had a TORCH_LIBRARY declaration in the frontend + * API. These invocations are only permitted once per program, so we raise + * an error if this is called again for the same namespace. + */ + RegistrationHandleRAII registerLibrary(std::string ns, std::string debug); + + // ------------------------------------------------------------------------ + // + // Listeners on registrations + // + // ------------------------------------------------------------------------ + + /** + * Add a listener that gets called whenever a new op is registered or an existing + * op is deregistered. Immediately after registering, this listener gets called + * for all previously registered ops, so it can be used to keep track of ops + * registered with this dispatcher. + */ + RegistrationHandleRAII addRegistrationListener(std::unique_ptr listener); + + void checkInvariants() const; + + // + // ------------------------------------------------------------------------ + // + // Assertions + // + // ------------------------------------------------------------------------ + + /** + * For testing purposes. + * Returns a list of all operators that were created through calls to registerImpl(), + * without any corresponding calls to registerDef(). After static initialization + * is done this is almost certainly a bug, as the created OperatorHandle won't have + * any schema associated with it and users calling the op through the dispatcher + * won't be able to access it + * + * Note that we cannot enforce this invariant "as we go" during static initialization, + * due to undefined static initialization order- we have no guarantees over the order + * in which .def() and .impl() calls are registered in the dispatcher at static + * initialization time. So this function should only be called after static initialization. + */ + std::vector findDanglingImpls() const; + + /** + * Useful for inspecting global Dispatcher registration state. + * Returns the names of all operators with a kernel registered for the specified DispatchKey. + * If no DispatchKey is specified, it returns all registered operators. + */ + std::vector getRegistrationsForDispatchKey(c10::optional k) const; + +private: + Dispatcher(); + + static int64_t sequenceNumberForRunningRecordFunction(DispatchKey dispatchKey); + static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey); + static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, c10::ArrayRef args); + + OperatorHandle findOrRegisterSchema_(FunctionSchema&& schema); + OperatorHandle findOrRegisterName_(const OperatorName& op_name); + + void deregisterDef_(const OperatorHandle& op, const OperatorName& op_name); + void deregisterImpl_( + const OperatorHandle& op, + const OperatorName& op_name, + c10::optional dispatch_key, + impl::OperatorEntry::AnnotatedKernelContainerIterator kernel_handle); + void deregisterName_(const OperatorHandle& op, const OperatorName& op_name); + void deregisterFallback_(DispatchKey dispatchKey); + void deregisterLibrary_(const std::string& ns); + void cleanup(const OperatorHandle& op, const OperatorName& op_name); + void checkSchemaCompatibility(const OperatorHandle& op, const FunctionSchema& schema, const std::string& debug); + + std::list operators_; +#if !defined(C10_MOBILE) + LeftRight> operatorLookupTable_; +#else + RWSafeLeftRightWrapper> operatorLookupTable_; +#endif + // Map from namespace to debug string (saying, e.g., where the library was defined) + ska::flat_hash_map libraries_; + + std::array backendFallbackKernels_; + + std::unique_ptr listeners_; + std::mutex mutex_; +}; + +/** + * This is a handle to an operator schema registered with the dispatcher. + * This handle can be used to register kernels with the dispatcher or + * to lookup a kernel for a certain set of arguments. + */ +class TORCH_API OperatorHandle { +public: + OperatorHandle(OperatorHandle&&) noexcept = default; + OperatorHandle& operator=(OperatorHandle&&) noexcept = default; + OperatorHandle(const OperatorHandle&) = default; + OperatorHandle& operator=(const OperatorHandle&) = default; + ~OperatorHandle(); + + const OperatorName& operator_name() const { + return operatorDef_->op.operator_name(); + } + + bool hasSchema() const { + return operatorDef_->op.hasSchema(); + } + + const FunctionSchema& schema() const { + return operatorDef_->op.schema(); + } + + const std::string& debug() const { + return operatorDef_->op.debug(); + } + + std::string dumpState() const { + return operatorDef_->op.dumpState(); + } + + bool hasKernelForDispatchKey(DispatchKey k) const { + return operatorDef_->op.hasKernelForDispatchKey(k); + } + + bool hasKernelForAnyDispatchKey(DispatchKeySet k) const { + return operatorDef_->op.hasKernelForAnyDispatchKey(k); + } + + bool hasComputedKernelForDispatchKey(DispatchKey k) const { + return operatorDef_->op.hasComputedKernelForDispatchKey(k); + } + + std::string dumpComputedTable() const { + return operatorDef_->op.dumpComputedTable(); + } + + void checkInvariants() const { + return operatorDef_->op.checkInvariants(); + } + + c10::ArrayRef getTags() const { + return operatorDef_->op.getTags(); + } + + bool hasTag(const at::Tag& tag) const { + for(const auto& tag_: getTags()) { + if (tag == tag_) { + return true; + } + } + return false; + } + + template + TypedOperatorHandle typed() const { + // NB: This assert is not 100% sound: you can retrieve a typed() operator + // handle prior to ANY C++ signature being registered on the operator + // and the check will say everything is OK (at which point you can then + // smuggle in a kernel that is typed incorrectly). For everything + // in core library this won't happen, because all the static registrations + // will be done by the time a typed() handle is acquired. +#if !defined C10_MOBILE + operatorDef_->op.assertSignatureIsCorrect(); +#endif + return TypedOperatorHandle(operatorIterator_); + } + + void callBoxed(Stack* stack) const { + c10::Dispatcher::singleton().callBoxed(*this, stack); + } + + void callBoxed(Stack& stack) const { + callBoxed(&stack); + } + + void callBoxedForDispatchKey(DispatchKey dk, Stack& stack) const { + c10::Dispatcher::singleton().callBoxedForDispatchKey(*this, dk, &stack); + } + + void redispatchBoxed(DispatchKeySet ks, Stack* stack) const { + c10::Dispatcher::singleton().redispatchBoxed(*this, ks, stack); + } + + template + PyObject* getPythonOp(c10::impl::PyInterpreter* self_interpreter, F slow_accessor) const { + return operatorDef_->op.getPythonOp(self_interpreter, slow_accessor); + } + +private: + explicit OperatorHandle(std::list::iterator operatorIterator) + : operatorDef_(&*operatorIterator), operatorIterator_(operatorIterator) {} + friend class Dispatcher; + template friend class TypedOperatorHandle; + + // Storing a direct pointer to the OperatorDef even though we + // already have the iterator saves an instruction in the critical + // dispatch path. The iterator is effectively a + // pointer-to-std::list-node, and (at least in libstdc++'s + // implementation) the element is at an offset 16 bytes from that, + // because the prev/next pointers come first in the list node + // struct. So, an add instruction would be necessary to convert from the + // iterator to an OperatorDef*. + Dispatcher::OperatorDef* operatorDef_; + + // We need to store this iterator in order to make + // Dispatcher::cleanup() fast -- it runs a lot on program + // termination (and presuambly library unloading). + std::list::iterator operatorIterator_; +}; + +/** + * This is a handle to an operator schema registered with the dispatcher. + * It holds the same information as an OperatorHandle, but it is templated + * on the operator arguments and allows calling the operator in an + * unboxed way. + */ +template +class TypedOperatorHandle final { + static_assert(guts::false_t(), "FuncType in OperatorHandle::typed was not a valid function type"); +}; +template +class TypedOperatorHandle final : public OperatorHandle { +public: + TypedOperatorHandle(TypedOperatorHandle&&) noexcept = default; + TypedOperatorHandle& operator=(TypedOperatorHandle&&) noexcept = default; + TypedOperatorHandle(const TypedOperatorHandle&) = default; + TypedOperatorHandle& operator=(const TypedOperatorHandle&) = default; + + // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && + C10_ALWAYS_INLINE Return call(Args... args) const { + return c10::Dispatcher::singleton().call(*this, std::forward(args)...); + } + + // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && + C10_ALWAYS_INLINE Return redispatch(DispatchKeySet currentDispatchKeySet, Args... args) const { + return c10::Dispatcher::singleton().redispatch(*this, currentDispatchKeySet, std::forward(args)...); + } + +private: + explicit TypedOperatorHandle(std::list::iterator operatorIterator) + : OperatorHandle(operatorIterator) {} + friend class OperatorHandle; +}; + +namespace detail { +template inline void unused_arg_(const Args&...) {} + +// CaptureKernelCall is intended to capture return values from Dispatcher +// unboxed kernel calls. A record function may request to get outputs from the +// kernel calls. For boxed kernels, it's straightforward, the returned values +// are in the stack object. The stack can be passed to record functions. For +// unboxed kernels, we need to handle different kinds of return values, cache +// them temporarily, then release the values for the actual function call +// return. +template +struct CaptureKernelCall { + template + CaptureKernelCall( + const F& kernel, + const TypedOperatorHandle& op, + const DispatchKeySet& dispatchKeySet, + Args&&... args) + // Calls the kernel and capture the result in output_. + : output_{kernel.template call( + op, + dispatchKeySet, + std::forward(args)...)} {} + // Wraps the return values in a Stack. + Stack getOutputs() { + Stack stack; + impl::push_outputs::copy(output_, &stack); + return stack; + } + // Since we are returning the output_, we don't expect the output_ to be used + // afterward. Copy elision and RVO do not apply to class data members. Using + // move semantic to avoid copies when possible. + ReturnType release() && { + return std::move(output_); + } + + private: + ReturnType output_; +}; + +// Handle the lvalue reference differently since it should not be moved. +template <> +inline at::Tensor& CaptureKernelCall::release() && { + return output_; +} + +// Handle case where the kernel returns void. +template <> +struct CaptureKernelCall { + template + CaptureKernelCall( + const F& kernel, + const TypedOperatorHandle& op, + const DispatchKeySet& dispatchKeySet, + Args&&... args) { + // Calling the kernel and no need to capture void. + kernel.template call( + op, dispatchKeySet, std::forward(args)...); + } + Stack getOutputs() { + return Stack(); + } + void release() && {} +}; + +} // namespace detail + +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +template +inline Return Dispatcher::callWithDispatchKeySlowPath(const TypedOperatorHandle& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args) { + // If callbacks need inputs, we box the arguments and pass them to the guard. + // Note: For perf reasons we wouldn't want to prematurely box the arguments. + at::RecordFunction guard(std::move(stepCallbacks)); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(op.operatorDef_->op.isObserved()); + auto dispatchKey = dispatchKeySet.highestPriorityTypeId(); + auto& schema = op.schema(); + auto schema_ref = std::reference_wrapper(schema); + if (guard.needsInputs()) { + constexpr auto num_boxed_args = impl::boxed_size(); + // If we used std::array here, we would + // have to spend time default constructing the IValues in + // boxedArgs. aligned_storage has no such requirement. + // Max to avoid zero-size array.` + std::aligned_storage_t boxedArgs[std::max(num_boxed_args, static_cast(1))]; + // For debugging only; could be removed (but the compiler will do + // that for us and it's nice to have the extra assurance of + // correctness from our debug builds). + int lastArgIdx = 0; + impl::boxArgsToStack(boxedArgs, lastArgIdx, args...); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(lastArgIdx == num_boxed_args); + // I don't *think* we need std::launder here, because IValue has + // no subclasses and no const or reference fields. (We also + // couldn't use it even if we wanted to because we are currently + // stuck on C++14 rather than C++17, but we could do a backport + // similar to folly::launder if needed.) + runRecordFunction(guard, schema_ref, dispatchKey, c10::ArrayRef(reinterpret_cast(boxedArgs), num_boxed_args)); + for (size_t ii = 0; ii < num_boxed_args; ++ii) { + reinterpret_cast(&boxedArgs[ii])->~IValue(); + } + } else { + runRecordFunction(guard, schema_ref, dispatchKey); + } + + if (C10_UNLIKELY(guard.needsOutputs())) { + // Calls the kernel and capture the output temporarily to pass to + // RecordFunction. + detail::CaptureKernelCall captureKernelCall( + kernel, op, dispatchKeySet, std::forward(args)...); + guard.setOutputs(captureKernelCall.getOutputs()); + // Releases the captured output to return to caller. + return std::move(captureKernelCall).release(); + } + + // keeping the guard alive while executing the kernel + return kernel.template call(op, dispatchKeySet, std::forward(args)...); +} + +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +template +C10_ALWAYS_INLINE_UNLESS_MOBILE Return Dispatcher::call(const TypedOperatorHandle& op, Args... args) const { + detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5 + auto dispatchKeySet = op.operatorDef_->op.dispatchKeyExtractor() + .template getDispatchKeySetUnboxed(args...); +#ifndef NDEBUG + if (show_dispatch_trace()) { + std::cerr << "[call] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl; + } +#endif + const KernelFunction& kernel = op.operatorDef_->op.lookup(dispatchKeySet); +#ifndef PYTORCH_DISABLE_PER_OP_PROFILING + auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION); + if (C10_UNLIKELY(step_callbacks.has_value() && op.operatorDef_->op.isObserved())) { + return callWithDispatchKeySlowPath(op, *step_callbacks, dispatchKeySet, kernel, std::forward(args)...); + } +#endif // PYTORCH_DISABLE_PER_OP_PROFILING + return kernel.template call(op, dispatchKeySet, std::forward(args)...); +} + +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +template +inline Return Dispatcher::redispatch(const TypedOperatorHandle& op, DispatchKeySet currentDispatchKeySet, Args... args) const { + detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5 + // do not use RecordFunction on redispatch +#ifndef NDEBUG + if (show_dispatch_trace()) { + std::cerr << "[redispatch] op=[" << op.operator_name() << "], key=[" << toString(currentDispatchKeySet.highestPriorityTypeId()) << "]" << std::endl; + } +#endif + const KernelFunction& kernel = op.operatorDef_->op.lookup(currentDispatchKeySet); + return kernel.template call(op, currentDispatchKeySet, std::forward(args)...); +} + +inline void Dispatcher::callBoxed(const OperatorHandle& op, Stack* stack) const { + // note: this doesn't need the mutex because write operations on the list keep iterators intact. + const auto& entry = op.operatorDef_->op; + auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack); +#ifndef NDEBUG + if (show_dispatch_trace()) { + std::cerr << "[callBoxed] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl; + } +#endif + const auto& kernel = entry.lookup(dispatchKeySet); +#ifndef PYTORCH_DISABLE_PER_OP_PROFILING + auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION); + if (C10_UNLIKELY(step_callbacks.has_value() && entry.isObserved())) { + at::RecordFunction guard(std::move(*step_callbacks)); + auto dispatchKey = dispatchKeySet.highestPriorityTypeId(); + auto& schema = op.schema(); + auto schema_ref = std::reference_wrapper(schema); + guard.needsInputs() ? runRecordFunction(guard, schema_ref, dispatchKey, c10::ArrayRef(stack->data(), stack->size())) + : runRecordFunction(guard, schema_ref, dispatchKey); + + // keeping the guard alive while executing the kernel + kernel.callBoxed(op, dispatchKeySet, stack); + + if (C10_UNLIKELY(guard.needsOutputs())) { + guard.setOutputs(*stack); + } + return; + } +#endif // PYTORCH_DISABLE_PER_OP_PROFILING + kernel.callBoxed(op, dispatchKeySet, stack); +} + +// NB: this doesn't count as a "true" dispatcher jump, so no instrumentation +inline void Dispatcher::callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const { + // note: this doesn't need the mutex because write operations on the list keep iterators intact. + const auto& entry = op.operatorDef_->op; + // We still compute this as we're obligated to pass it on to the internal + // kernel, if it is a boxed fallback + auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack); + const auto& kernel = ([&]() { + if (op.hasKernelForDispatchKey(dk)) { + return entry.kernelForDispatchKey(dk); + } else { + auto idx = getDispatchTableIndexForDispatchKey(dk); + TORCH_INTERNAL_ASSERT(idx >= 0); + return backendFallbackKernels_[idx].kernel; + } + })(); + kernel.callBoxed(op, dispatchKeySet, stack); +} + +inline void Dispatcher::redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const { + // note: this doesn't need the mutex because write operations on the list keep iterators intact. + const auto& entry = op.operatorDef_->op; +#ifndef NDEBUG + if (show_dispatch_trace()) { + std::cerr << "[redispatchBoxed] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl; + } +#endif + const auto& kernel = entry.lookup(dispatchKeySet); + return kernel.callBoxed(op, dispatchKeySet, stack); +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/dispatch/ObservedOperators.h b/voice_bridge/torch/include/ATen/core/dispatch/ObservedOperators.h new file mode 100644 index 0000000000000000000000000000000000000000..1741171fbf00412647178b2210071cee36928e54 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/dispatch/ObservedOperators.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +struct TORCH_API ObservedOperators { + ObservedOperators() = delete; + + static bool isObserved(const OperatorName& name); + + static std::unordered_set& getUnobservedOperatorList(); +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/dispatch/OperatorEntry.h b/voice_bridge/torch/include/ATen/core/dispatch/OperatorEntry.h new file mode 100644 index 0000000000000000000000000000000000000000..c3bd91197f5e79aae8404dc29ad83dcb04886a3e --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/dispatch/OperatorEntry.h @@ -0,0 +1,309 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#ifdef C10_MOBILE +#define C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY +#endif + +namespace c10 { + +class Dispatcher; + +namespace impl { + +// This data structure represents a kernel that was registered to us from a +// user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata +// about the kernel that isn't necessary for actual dispatching (this is why +// we don't put AnnotatedKernel in the actual DispatchTable), but is useful for +// giving good error messages. +struct AnnotatedKernel final { + AnnotatedKernel(KernelFunction k, std::unique_ptr s, std::string d) + : kernel(std::move(k)) + , inferred_function_schema(std::move(s)) + , debug(std::move(d)) + {} + AnnotatedKernel() {} + KernelFunction kernel; + std::unique_ptr inferred_function_schema; + // A little debug string to help us identify the kernel in question. + // Most importantly it records the TORCH_LIBRARY block that did the + // registration. + std::string debug; +}; + +// This data structure represents operator schema, with metadata specifying +// where the registration of this schema occurred +struct AnnotatedSchema final { + AnnotatedSchema(FunctionSchema s, std::string d) + : schema(std::move(s)) + , debug(std::move(d)) + {} + FunctionSchema schema; + std::string debug; +}; + +// Internal data structure that records information about a specific operator. +// It's not part of the public API; typically, users will interact with +// OperatorHandle instead. +// +// Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher +// lock (this is important because some methods in OperatorEntry access +// dispatcher state) +class TORCH_API OperatorEntry final { +public: + explicit OperatorEntry(OperatorName&& operator_name); + + OperatorEntry(const OperatorEntry&) = delete; + OperatorEntry(OperatorEntry&&) noexcept = delete; + OperatorEntry& operator=(const OperatorEntry&) = delete; + OperatorEntry& operator=(OperatorEntry&&) noexcept = delete; + + const FunctionSchema& schema() const { + TORCH_INTERNAL_ASSERT(schema_.has_value(), "Tried to access the schema for ", name_, " which doesn't have a schema registered yet"); + return schema_->schema; + } + const std::string& debug() const { + TORCH_INTERNAL_ASSERT(schema_.has_value()); + return schema_->debug; + } + bool hasSchema() const { + return schema_.has_value(); + } + + bool isObserved() const { + return is_observed_; + } + + // We may allocate an OperatorEntry for an operator even when we don't + // have a schema. When we receive the schema registration, we post + // facto register a schema. + // + // NB: registerSchema/deregisterSchema are not idempotent; if you + // attempt to register a schema when one is already present or vice + // versa that is an error. (Refcounting for the registrations is + // handled in the OperatorHandle in Dispatcher) + void registerSchema(FunctionSchema&&, std::string&& debug, std::vector tags = {}); + void deregisterSchema(); + + const OperatorName& operator_name() const { + return name_; + } + +#ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY + using AnnotatedKernelContainer = std::array; +#else + using AnnotatedKernelContainer = std::list; +#endif + using AnnotatedKernelContainerIterator = AnnotatedKernelContainer::iterator; + + // Why are kernels and fallback asymmetric? It has to do with ownership. + // Kernels and the computed dispatch tables for them are canonically + // owned by OperatorEntry, but backend fallbacks are specified once + // and apply for all operators, so they should be owned by Dispatcher. + // However, the registration of a backend fallback affects the + // state of the computed dispatch table, so when a backend fallback + // is updated, we need to update the operator tables too. Thus, + // registerKernel is the mechanism by which we give kernels to + // operator entry to own (and update dispatch table), but we only + // need a non-owning mechanism to update fallback. + + // Precondition: Dispatcher::mutex_ is held + // Postcondition: caller is responsible for disposing of the kernel + AnnotatedKernelContainerIterator registerKernel( + const Dispatcher& dispatcher, + c10::optional dispatch_key, + KernelFunction kernel, + c10::optional cpp_signature, + std::unique_ptr inferred_function_schema, + std::string debug + ); + + // Precondition: Dispatcher::mutex_ is held + void deregisterKernel_( + const Dispatcher& dispatcher, + c10::optional dispatch_key, + AnnotatedKernelContainerIterator kernel + ); + + // Precondition: Dispatcher::mutex_ is held + void updateFallback( + const Dispatcher& dispatcher, + DispatchKey dispatch_key + ); + + // Precondition: Dispatcher::mutex_ is held + void updateSchemaAliasAnalysis(AliasAnalysisKind a) { + TORCH_INTERNAL_ASSERT(schema_.has_value()); + schema_->schema.setAliasAnalysis(a); + } + + std::string dumpComputedTable() const; + std::string dumpState() const; + void checkInvariants() const; + + const DispatchKeyExtractor& dispatchKeyExtractor() const { return dispatchKeyExtractor_; } + + // Asserts that the given FuncType is correct for calling this operator in an unboxed way. + template + inline void assertSignatureIsCorrect() { + assertSignatureIsCorrect(CppSignature::make(), fn_has_symint::value); + } + + void assertSignatureIsCorrect(const CppSignature call_signature, bool has_symint) const; + + [[noreturn]] void reportError(DispatchKey dispatchKey) const; + + const KernelFunction& lookup(DispatchKeySet ks) const { + const auto idx = ks.getDispatchTableIndexForDispatchKeySet(); + if (C10_UNLIKELY(idx == -1)) { + reportError(ks.highestPriorityTypeId()); + } + const auto& kernel = dispatchTable_[idx]; + // A valid kernel *always* has a boxed kernel and *may* have an + // unboxed kernel. However, we typically do unboxed calls in at:: + // APIs, where the kernel 1) will very likely be valid and 2) + // should have an unboxed kernel. Checking the unboxed kernel + // first will allow us to avoid touching the boxed kernel at all + // in the common case. + if (C10_UNLIKELY(!kernel.isValidUnboxed())) { + if (!kernel.isValid()) { + reportError(ks.highestPriorityTypeId()); + } + } + return kernel; + } + + std::string listAllDispatchKeys() const; + + // Returns true if kernel_ has entry for any key in ks. + // + // Invariant: There are no alias keys in the passed-in dispatch key set. + // Note [No Alias Keys in DispatchKeySet] + // Alias keys should be checked using `hasKernelForDispatchKey` + // Alias keys shouldn't go inside of a DispatchKeySet, since they can technically + // have a value > 63 (causing overflow). + bool hasKernelForAnyDispatchKey(DispatchKeySet ks) const; + // Returns true if kernel_ has entry for a particular key. + bool hasKernelForDispatchKey(DispatchKey k) const; + // Retrieves the kernel entry at a particular key. Symmetric with + // hasKernelForDispatchKey. To get the AnnotatedKernel, see + // getKernelForDispatchKey (private) + const KernelFunction& kernelForDispatchKey(DispatchKey k) const; + // Returns true if the "computed table" has an entry for a particular key. + bool hasComputedKernelForDispatchKey(DispatchKey k) const; + // Returns all the operator tags added at the time of registration + const std::vector& getTags() const; + + template + PyObject* getPythonOp(PyInterpreter* self_interpreter, F slow_accessor) const { + return py_cache_.ptr_or(self_interpreter, slow_accessor); + } + +private: + + OperatorName name_; + c10::optional schema_; + #ifndef C10_MOBILE + std::vector tags_; + #endif + std::array dispatchTable_; + DispatchKeyExtractor dispatchKeyExtractor_; + // Pointer to the torch.ops.ns.op.overload object for speed + c10::PyHandleCache py_cache_; + + // kernels_ stores all registered kernels for the corresponding dispatch key + // and catchAllKernels_ stores the catch-all kernels. + // If an operator library gets loaded that overwrites an already existing kernel, + // both kernels will be in that list but only the newer one will be in + // dispatchTable. If any of the kernels go away (say the library gets + // unloaded), we remove the kernel from this list and update the + // dispatchTable if necessary. + // Kernels in the list are ordered by registration time descendingly, + // newer registrations are before older registrations. + // We do not combine dispatchTable and kernels into one hash map because + // kernels is a larger data structure and accessed quite infrequently + // while dispatchTable is accessed often and should be kept small to fit + // into CPU caches. + // Invariants: + // - dispatchTable[dispatch_key] == kernels_[dispatch_key].front() + // - dispatchTable[dispatch_key] does not exist if and only if + // kernels_[dispatch_key] does not exist + // - If kernels_[dispatch_key] exists, then it has elements. + // It is never an empty list. + // + // Why do we do that? + // ----- + // We mostly do this to enable Jupyter notebooks where a cell registering + // a kernel could be executed multiple times and the later execution + // should overwrite the earlier one. Note that this still fails when the + // function schema changed between the executions, but it works as long + // as the function schema didn't change. A better solution would be to + // unload the old extension library from the Jupyter cell when the cell is + // re-executed and then only allow one kernel here, i.e. error if a kernel + // is already registered, but that's a lot of effort to implement and + // currently not high-pri. + ska::flat_hash_map +#else + std::list +#endif + > kernels_; + + const AnnotatedKernel& missingKernel() const; + const AnnotatedKernel& ambiguousAutogradOtherKernel() const; + + // cpp_signature_ stores function signature if any of + // the kernels was created in a way that allowed us to know the function + // signature (i.e. by supplying an unboxed C++ kernel function). + // If this is set, it will be used to check that future kernel + // registrations match and it will be used in unboxed function calls + // to verify their arguments against the known function signature. + struct CppSignatureWithDebug { + CppSignature signature; + std::string debug; + c10::optional dispatch_key; + }; + c10::optional cpp_signature_; + c10::optional sym_cpp_signature_; + + // Whether this operator needs to be observed with RecordFunction + const bool is_observed_; + + [[noreturn]] void reportSignatureError(const CppSignature& call_signature, const CppSignatureWithDebug& saved_signature) const; + const KernelFunction& computeDispatchTableEntry(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key) const; + std::pair computeDispatchTableEntryWithDebug( + const c10::Dispatcher& dispatcher, DispatchKey dispatch_key + ) const; + // This function re-establishes the invariant that dispatchTable + // contains the front element from the kernels list for a given runtime dispatch key. + void updateDispatchTableEntry_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key); + // Like above, but also handles alias dispatch keys. + void updateDispatchTable_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key); + // Like above, but for ALL entries in the dispatch table. + void updateDispatchTableFull_(const c10::Dispatcher& dispatcher); + // Retrieves a pointer to AnnotatedKernel at kernels_.at(dispatch_key).front(). + const AnnotatedKernel* getKernelForDispatchKey(DispatchKey dispatch_key) const; +}; + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/dispatch/OperatorOptions.h b/voice_bridge/torch/include/ATen/core/dispatch/OperatorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..5c87f93657ac174b341074359e661c8e187421d3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/dispatch/OperatorOptions.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +namespace c10 { + +enum class AliasAnalysisKind : uint8_t { + INTERNAL_SPECIAL_CASE, + CONSERVATIVE, // The most conservative alias analysis type, assumes + // side-effects. This is the default analysis. + FROM_SCHEMA, + PURE_FUNCTION +}; + +#if !defined(_MSC_VER) +constexpr // Our current MSVC version has a bug that doesn't allow this to be constexpr. +#endif +inline const char* toString(AliasAnalysisKind aliasAnalysisKind) { + return (aliasAnalysisKind == AliasAnalysisKind::CONSERVATIVE) + ? "CONSERVATIVE" + : (aliasAnalysisKind == AliasAnalysisKind::FROM_SCHEMA) + ? "FROM_SCHEMA" + : (aliasAnalysisKind == AliasAnalysisKind::PURE_FUNCTION) + ? "PURE_FUNCTION" + : (aliasAnalysisKind == AliasAnalysisKind::INTERNAL_SPECIAL_CASE) + ? "INTERNAL_SPECIAL_CASE" + : "UNKNOWN"; +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h b/voice_bridge/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h new file mode 100644 index 0000000000000000000000000000000000000000..e6ef2128fd495f873465c98b10ebfad6f1e323df --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +namespace c10 { + +class RegistrationHandleRAII final { +public: + explicit RegistrationHandleRAII(std::function onDestruction) + : onDestruction_(std::move(onDestruction)) {} + + ~RegistrationHandleRAII() { + if (onDestruction_) { + onDestruction_(); + } + } + + RegistrationHandleRAII(const RegistrationHandleRAII&) = delete; + RegistrationHandleRAII& operator=(const RegistrationHandleRAII&) = delete; + + RegistrationHandleRAII(RegistrationHandleRAII&& rhs) noexcept + : onDestruction_(std::move(rhs.onDestruction_)) { + rhs.onDestruction_ = nullptr; + } + + RegistrationHandleRAII& operator=(RegistrationHandleRAII&& rhs) noexcept { + onDestruction_ = std::move(rhs.onDestruction_); + rhs.onDestruction_ = nullptr; + return *this; + } + +private: + std::function onDestruction_; +}; + +} diff --git a/voice_bridge/torch/include/ATen/core/dynamic_type.h b/voice_bridge/torch/include/ATen/core/dynamic_type.h new file mode 100644 index 0000000000000000000000000000000000000000..1f649c8217cbe29553edac8fc25ce65f782167f3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/dynamic_type.h @@ -0,0 +1,237 @@ +#pragma once + +#include +#include + +#include +#include + +namespace c10 { + +using DynamicTypeBits = std::uint32_t; +#define DYNAMIC_TYPE_BIT(x) (1u << x) + +constexpr DynamicTypeBits kDynamicCovariantTypeBit = DYNAMIC_TYPE_BIT(31); +constexpr DynamicTypeBits kDynamicAnyTypeBit = DYNAMIC_TYPE_BIT(30); + +constexpr DynamicTypeBits kDynamicNoneTypeBit = DYNAMIC_TYPE_BIT(1); +constexpr DynamicTypeBits kDynamicIntTypeBit = DYNAMIC_TYPE_BIT(3); +constexpr DynamicTypeBits kDynamicFloatTypeBit = DYNAMIC_TYPE_BIT(4); +constexpr DynamicTypeBits kDynamicComplexTypeBit = DYNAMIC_TYPE_BIT(5); +constexpr DynamicTypeBits kDynamicListTypeBit = DYNAMIC_TYPE_BIT(7); +constexpr DynamicTypeBits kDynamicTupleTypeBit = DYNAMIC_TYPE_BIT(8); +constexpr DynamicTypeBits kDynamicClassTypeBit = DYNAMIC_TYPE_BIT(10); + +#define FORALL_DYNAMIC_TYPES(_) \ + _(Tensor, DYNAMIC_TYPE_BIT(0), 1) \ + _(None, kDynamicNoneTypeBit, 1) \ + _(Bool, DYNAMIC_TYPE_BIT(2), 1) \ + _(Int, kDynamicIntTypeBit, 1) \ + _(Float, kDynamicFloatTypeBit, 1) \ + _(Complex, kDynamicComplexTypeBit, 1) \ + _(Number, \ + (kDynamicIntTypeBit | kDynamicFloatTypeBit | kDynamicComplexTypeBit), \ + 1) \ + _(String, DYNAMIC_TYPE_BIT(6), 1) \ + _(List, kDynamicListTypeBit, 0) \ + _(Tuple, (kDynamicTupleTypeBit | kDynamicCovariantTypeBit), 0) \ + _(Dict, DYNAMIC_TYPE_BIT(9), 0) \ + _(Class, kDynamicClassTypeBit, 0) \ + _(Optional, \ + (DYNAMIC_TYPE_BIT(11) | kDynamicNoneTypeBit | kDynamicCovariantTypeBit), \ + 0) \ + _(AnyList, (kDynamicListTypeBit | kDynamicAnyTypeBit), 1) \ + _(AnyTuple, \ + (kDynamicTupleTypeBit | kDynamicCovariantTypeBit | kDynamicAnyTypeBit), \ + 1) \ + _(DeviceObj, DYNAMIC_TYPE_BIT(12), 1) \ + _(StreamObj, DYNAMIC_TYPE_BIT(13), 1) \ + _(Capsule, DYNAMIC_TYPE_BIT(14), 1) \ + _(Generator, DYNAMIC_TYPE_BIT(15), 1) \ + _(Storage, DYNAMIC_TYPE_BIT(16), 1) \ + _(Var, DYNAMIC_TYPE_BIT(17), 0) \ + _(AnyClass, (kDynamicClassTypeBit | kDynamicAnyTypeBit), 1) \ + _(QScheme, DYNAMIC_TYPE_BIT(18), 1) \ + _(Quantizer, DYNAMIC_TYPE_BIT(19), 1) \ + _(AnyEnum, DYNAMIC_TYPE_BIT(20), 1) \ + _(RRef, DYNAMIC_TYPE_BIT(21), 0) \ + _(Future, DYNAMIC_TYPE_BIT(22), 0) \ + _(Any, 0xffffffff, 1) + +#define FORALL_DYNAMIC_TYPES_FAKE(_) \ + _(ScalarType, kDynamicIntTypeBit, 1) \ + _(Layout, kDynamicIntTypeBit, 1) \ + _(SymInt, kDynamicIntTypeBit, 1) \ + _(MemoryFormat, kDynamicIntTypeBit, 1) + +#define FORWARD_DECL_TYPE(NAME, _, __) struct NAME ## Type; + FORALL_DYNAMIC_TYPES(FORWARD_DECL_TYPE) + FORALL_DYNAMIC_TYPES_FAKE(FORWARD_DECL_TYPE) +#undef FORWARD_DECL_TYPE + +class DynamicType; +using DynamicTypePtr = std::shared_ptr; + +/** + * DynamicType is designed as a low dependency type system for TorchScript. The + * existing JIT types are used for both compilation and runtime, which makes + * sense for server contexts because we often compile and run the model in + * the same process, however this doesn't hold for mobile devices where we + * always compiles a model ahead of time, therefore there will be dependencies + * which are not needed, but built with mobile runtime causing binary size + * bloat, by design. Every basic type like Int, Bool or String will bring their + * vtable, typeinfo, constructor, destructor and even more data from their + * specializations for STL types to the binary causing a long tail bloat. + * + * The core problem is about the complexity to implement and maintain a single + * type system for both analysis and execution purposes. Although they should + * have the exactly same semantics, in practice implement a unified abstraction + * adds conceptual and representational overhead for both sides of the world. + * + * To address the issues, DynamicType implements a minimal subset of JIT types + * and uses a generic algorithm to test all subtyping relations. To achieve + * this, we assign each dynamic type a single integer tag to represent its + * semantics. More specifically, a dynamic type is defined as a set of "control + * bits" and "data bits", where control bits describe the special behavior when + * testing a type and data bits map to identity of each nominal type. We use bit + * operations to perform all the tests. + * + * For example, a "covariant bit" is a control bit used to describe if a type + * is covariant, right now the most used one is tuple type, and in addition to + * the control bit, tuple type's data bit is the 8th bit from the LSB. Control + * bits start from MSB and data bits start from LSB. + * + * If two types are equal, then they are subtype of each other, also if the bits + * from one type tag is subset of the other tag, it automatically becomes a + * subtype of the other. This simplifies the subtyping logic a lot, and over the + * long term it is possible to adopt this scheme on the server side as well. + * Special cases can be added but they generally should not take too much code + * size. + * + * DynamicType may or may not inherit from c10::Type because it's not the core + * requirement of DynamicType to interface with existing JIT types, but we might + * want to inherit from c10::Type to reduce the migration cost. + */ +class DynamicType : public SharedType { + using ClassTypePtr = std::shared_ptr; + + /** + * A implementation detail to support NamedTuple. + */ + struct LabeledDynamicType { + c10::optional label; + DynamicTypePtr ty; + explicit LabeledDynamicType(DynamicTypePtr t) : ty(std::move(t)) {} + + bool equals(const LabeledDynamicType& other) const; + bool isSubtypeOf(const LabeledDynamicType& other) const; + }; + + public: + // TODO Change Ptr to DynamicTypePtr when all migrations are done. + using Ptr = TypePtr; + using ElementType = DynamicType; + ~DynamicType() override; + + struct Arguments { + Arguments() = default; + Arguments(c10::ArrayRef); + Arguments(const std::vector&, c10::ArrayRef); + std::vector elems; + }; + + enum class Tag : DynamicTypeBits { +#define DYNAMIC_TYPE_ITEM(NAME, VAL, _) NAME = VAL, + FORALL_DYNAMIC_TYPES(DYNAMIC_TYPE_ITEM) + FORALL_DYNAMIC_TYPES_FAKE(DYNAMIC_TYPE_ITEM) +#undef DYNAMIC_TYPE_ITEM + }; + + bool equals(const Type& rhs) const override; + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + std::string str() const override; + static const TypeKind Kind = TypeKind::DynamicType; + static TORCH_API DynamicTypePtr create(Type& ty); + + explicit DynamicType(Tag, Arguments); + explicit DynamicType(Tag, c10::string_view, Arguments); + + TypePtr containedType(size_t) const override; + size_t containedTypeSize() const override; + Tag tag() const { + return tag_; + } + const c10::optional& name() const { + return name_; + } + const Arguments& arguments() const { + return arguments_; + } + TORCH_API TypeKind dynamicKind() const; + + // Should be used only on the server side to restore static type information. +#ifndef C10_MOBILE + TORCH_API +#endif + TypePtr fallback() const; + + private: + bool symmetric() const override { + return false; + } + friend struct Type; + static std::shared_ptr create(const Type& ty); + DynamicType(const Type& other); + bool equals(const DynamicType& other) const; + + template + bool compareArguments(const DynamicType& other, F&& f) const { + if (arguments_.elems.size() != other.arguments_.elems.size()) { + return false; + } + for (size_t i = 0; i < arguments_.elems.size(); i++) { + if (!f(arguments_.elems[i], other.arguments_.elems[i])) { + return false; + } + } + return true; + } + + Tag tag_; + c10::optional name_; + union { + Arguments arguments_; + ClassTypePtr class_; + }; +}; + +template +struct DynamicTypeTrait { + C10_NOINLINE static auto tagValue() { + TORCH_CHECK(false); + return DynamicType::Tag::Any; + } +}; + +namespace detail { +C10_NOINLINE DynamicTypePtr makeBaseType(DynamicType::Tag tag); +} + +#define DYNAMIC_TYPE_TAG_VALUE(NAME, _, IS_BASE_TYPE) \ + template <> \ + struct TORCH_API DynamicTypeTrait { \ + C10_ERASE static auto tagValue() { \ + return DynamicType::Tag::NAME; \ + } \ + static constexpr bool isBaseType = IS_BASE_TYPE; \ + template \ + static std::enable_if_t getBaseType() { \ + static auto type = detail::makeBaseType(tagValue()); \ + return type; \ + } \ + }; // namespace c10 +FORALL_DYNAMIC_TYPES(DYNAMIC_TYPE_TAG_VALUE) +FORALL_DYNAMIC_TYPES_FAKE(DYNAMIC_TYPE_TAG_VALUE) +#undef DYNAMIC_TYPE_TAG_VALUE + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/enum_tag.h b/voice_bridge/torch/include/ATen/core/enum_tag.h new file mode 100644 index 0000000000000000000000000000000000000000..ce35bd844c6e3d006952b8cf14ae56d574680d76 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/enum_tag.h @@ -0,0 +1,16 @@ +#pragma once + +// @generated by torchgen/gen.py from enum_tag.h + +namespace at { + // Enum of valid tags obtained from the entries in tags.yaml + enum class Tag { + data_dependent_output, + dynamic_output_shape, + generated, + inplace_view, + nondeterministic_bitwise, + nondeterministic_seeded, + view_copy + }; +} diff --git a/voice_bridge/torch/include/ATen/core/enum_type.h b/voice_bridge/torch/include/ATen/core/enum_type.h new file mode 100644 index 0000000000000000000000000000000000000000..720d5363799fa81626353477b8b20caa6872eea3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/enum_type.h @@ -0,0 +1,100 @@ +#pragma once + +#include + +namespace c10 { + +struct EnumType; +using EnumTypePtr = std::shared_ptr; +using EnumNameValue = std::pair; +struct TORCH_API EnumType : public NamedType { + friend struct Type; + static const TypeKind Kind = TypeKind::EnumType; + + static EnumTypePtr create( + const c10::QualifiedName& qualified_class_name, + TypePtr value, + std::vector enum_names_values, + std::weak_ptr<::torch::jit::CompilationUnit> cu) { + switch (value->kind()) { + case TypeKind::IntType: + case TypeKind::FloatType: + case TypeKind::StringType: + return EnumTypePtr(new EnumType( + qualified_class_name, + std::move(value), + std::move(enum_names_values), + std::move(cu))); + default: + AT_ERROR( + "Cannot create Enum with value type '", + value->str(), + "', only int, float and string are supported"); + } + } + + std::string str() const override { + return "Enum<" + annotation_str() + ">"; + } + + std::string repr_str() const override { + return str(); + } + + const TypePtr& getValueType() const { + return value_type_; + } + + bool equals(const Type& rhs) const override { + if (auto* enum_rhs = rhs.castRaw()) { + return name().value() == enum_rhs->name().value() && + *getValueType() == *(enum_rhs->getValueType()) && + this->compilation_unit() == enum_rhs->compilation_unit(); + } + return false; + } + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + std::shared_ptr compilation_unit() + const { + auto cu = cu_.lock(); + return cu; + } + + const QualifiedName qualifiedClassName() const { + return name().value(); + } + + at::ArrayRef containedTypes() const override { + return value_type_; + } + + const at::ArrayRef enumNamesValues() const { + return enum_names_values_; + } + + private: + EnumType( + c10::QualifiedName qualified_class_name, + TypePtr value_type, + std::vector enum_names_values, + std::weak_ptr cu) + : NamedType(TypeKind::EnumType, std::move(qualified_class_name)), + value_type_(std::move(value_type)), + enum_names_values_(std::move(enum_names_values)), + cu_(cu) {} + + std::string annotation_str_impl( + TypePrinter printer = nullptr) const override { + (void)printer; // Suppress unused variable warning + const auto& n = name().value(); + return n.qualifiedName(); + } + + TypePtr value_type_; + std::vector enum_names_values_; + std::weak_ptr<::torch::jit::CompilationUnit> cu_; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/function.h b/voice_bridge/torch/include/ATen/core/function.h new file mode 100644 index 0000000000000000000000000000000000000000..76e417b8c5cfe9d9fff57819dd6a6383a40c62a2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/function.h @@ -0,0 +1,107 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { +struct FunctionSchema; +}; + +namespace at { +TORCH_API void launch(std::function func); +} + +namespace torch { +namespace jit { + +struct Graph; +struct Code; + +namespace mobile { +struct Code; +} + +using Stack = std::vector; +using Kwargs = std::unordered_map; +struct RecursiveMethodCallError : public std::exception {}; +using TaskLauncher = std::function)>; + +TORCH_API void preoptimizeGraph(std::shared_ptr& graph, bool disable_autocast=false); + +// A Function is a pure Graph with no implicit `self` object bound. +// It contains schema information and the executor that manages the +// execution of the function. Method is a wrapper around an +// underlying Function that also provides a `self` object. +struct TORCH_API Function { + virtual c10::string_view doc_string() const { + static constexpr c10::string_view no_doc_string = ""; + return no_doc_string; + } + + virtual bool isGraphFunction() const { + return false; + } + + virtual void run(Stack& stack) = 0; + + virtual c10::intrusive_ptr runAsync( + Stack& /*stack*/, + TaskLauncher taskLauncher = at::launch) { + (void)taskLauncher; // Suppress unused variable warning + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); + return {}; + } + + at::IValue operator()( + Stack stack, + const Kwargs& kwargs = Kwargs()) { + getSchema().checkAndNormalizeInputs(stack, kwargs); + run(stack); + return stack.front(); + } + + virtual const c10::QualifiedName& qualname() const = 0; + + const std::string& name() const { + return qualname().name(); + } + + // if this isn't yet defined, run its method_creator function + virtual void ensure_defined() = 0; + + virtual const c10::FunctionSchema& getSchema() const = 0; + + virtual size_t num_inputs() const = 0; + + virtual Function& setSchema(c10::FunctionSchema schema) = 0; + + // call() defines how different interpreter implementations interacts with + // Function objects. Basically interpreters need to provide a callback to + // communicate to Functions what to do if provided a Code object. + // Alternatively we could design the signature to return an optional Code + // object, but that requires special handling the null case in interpreter + // and the fallback behavior is not well defined by interpreter but rather + // Function themselves, so a callback approach is more reasonable than + // returning values. + // If call() returns true, then callback completes successfully, otherwise + // call() returns false. + + // Overload for server interpreter, a bailout size is needed for graph executor. + virtual bool call(Stack&, c10::optional, c10::function_ref) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); + return false; + } + + // Overload for mobile interpreter. + virtual bool call(Stack&, c10::function_ref) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); + return false; + } + + virtual ~Function() {} +}; +} // namespace jit +} // namespace torch diff --git a/voice_bridge/torch/include/ATen/core/function_schema.h b/voice_bridge/torch/include/ATen/core/function_schema.h new file mode 100644 index 0000000000000000000000000000000000000000..d80eaf6581e08d90a3f1f1252b6ca3f5dc300aef --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/function_schema.h @@ -0,0 +1,645 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +// schema as used in the compiler for resolving function calls and reporting +// errors. These objects should be constructed from C10 schema once those +// are available. + +struct Argument; +struct FunctionSchema; + +using AliasTypeSet = std::vector; + +bool operator==(const Argument& lhs, const Argument& rhs); + +struct Argument { + Argument( + std::string name = "", + TypePtr type = nullptr, + c10::optional N = c10::nullopt, + c10::optional default_value = c10::nullopt, + bool kwarg_only = false, + c10::optional alias_info = c10::nullopt) + : Argument(name, type, type, N, default_value, kwarg_only, alias_info) {} + + Argument( + std::string name, + TypePtr fake_type, + TypePtr real_type, + c10::optional N = c10::nullopt, + c10::optional default_value = c10::nullopt, + bool kwarg_only = false, + c10::optional alias_info = c10::nullopt) + : name_(std::move(name)), + type_(fake_type ? std::move(fake_type) : TensorType::get()), + real_type_(real_type ? std::move(real_type) : type_), + N_(std::move(N)), + default_value_(std::move(default_value)), + alias_info_(alias_info ? std::make_unique(std::move(*alias_info)) : nullptr), + kwarg_only_(kwarg_only) { + // this is an softly-enforced invariant for out arguments. + bool is_alias = alias_info_ != nullptr && alias_info_->isWrite(); + is_out_ = kwarg_only_ && is_alias; + } + + Argument(Argument&& rhs) noexcept = default; + + Argument(const Argument& rhs) + : name_(rhs.name_), + type_(rhs.type_), + real_type_(rhs.real_type_), + N_(rhs.N_), + default_value_(rhs.default_value_), + alias_info_(rhs.alias_info_ ? std::make_unique(*rhs.alias_info_) : nullptr), + kwarg_only_(rhs.kwarg_only_), + is_out_(rhs.is_out_) {} + + Argument& operator=(Argument&& rhs) = default; + + Argument& operator=(const Argument& rhs) { + if (this != &rhs) { + name_ = rhs.name_; + type_ = rhs.type_; + real_type_ = rhs.real_type_; + N_ = rhs.N_; + default_value_ = rhs.default_value_; + alias_info_ = rhs.alias_info_ ? std::make_unique(*rhs.alias_info_) : nullptr; + kwarg_only_ = rhs.kwarg_only_; + is_out_ = rhs.is_out_; + } + return *this; + } + + const std::string& name() const { + return name_; + } + const TypePtr& type() const { + return type_; + } + // if type() is non-null, this is guaranteed to be non-null (if no real + // type was provided, this takes on type()'s value) + const TypePtr& real_type() const { + return real_type_; + } + c10::optional N() const { + return N_; + } + const c10::optional& default_value() const { + return default_value_; + } + bool kwarg_only() const { + return kwarg_only_; + } + + bool is_out() const { + return is_out_; + } + + C10_NODISCARD const AliasInfo* alias_info() const { + return alias_info_.get(); + } + + bool is_inferred_type() const { + bool is_inferred_type = false; + TORCH_INTERNAL_ASSERT(type_); + if (auto pt = type_->cast()) { + if (pt->isInferredType()) { + is_inferred_type = true; + } + } + return is_inferred_type; + } + + std::string formatTypeMismatchMsg(const std::string& actual_type) const { + std::string inferred_type_hint; + if (is_inferred_type()) { + inferred_type_hint = c10::str( + "Inferred '", + name(), + "' to be of type 'Tensor' ", + "because it was not annotated with an explicit type.\n"); + } + return c10::str( + "Expected a value of type '", + type()->repr_str(), + "' for argument '", + name(), + "' but instead found type '", + actual_type, + "'.\n", + inferred_type_hint); + } + + Argument cloneWithType(TypePtr new_type) const { + return Argument( + name_, + std::move(new_type), + N_, + default_value_, + kwarg_only_, + alias_info_ ? c10::optional(*alias_info_) : c10::nullopt); + } + + // this function checks whether this Argument is backward compatible with + // the old one. we consider the following cases are backward compatible: + // 1) two arguments are equal + // 2) this arg's type should be subtype of old + // 3) this arg must provide the same default value if old arg has one, + bool isBackwardCompatibleWith( + const Argument& old, + std::ostream* why_not=nullptr) const; + + // this function checks whether this Argument is forward compatible with + // the old one. we consider the following cases are forward compatible: + // 1) two arguments are equal + // 2) this arg's type should be subtype of old + // 3) this arg must provide the same default value if old arg has one, + bool isForwardCompatibleWith( + const Argument& old, + std::ostream* why_not = nullptr) const; + + private: + std::string name_; + TypePtr type_; + TypePtr real_type_; // this is ScalarType, not int, e.g. + // for list types, an optional statically known length for the list + // e.g. for int[3]: type = ListType::ofInts(), N = 3 + // If present, this will allow scalars to be broadcast to this length to + // become a list. + c10::optional N_; + + c10::optional default_value_; + // AliasInfo is huge, so let's only allocate memory for it if + // necessary (which it isn't during schema parsing on startup, to + // give a pertinent example). + std::unique_ptr alias_info_; + // is this only specifiable as a keyword argument? + bool kwarg_only_; + // marks if the argument is out variant of the schema + bool is_out_; +}; + +inline bool operator==(const Argument& lhs, const Argument& rhs) { + return lhs.name() == rhs.name() + && *lhs.type() == *rhs.type() + && lhs.N() == rhs.N() + && lhs.default_value() == rhs.default_value() + && lhs.kwarg_only() == rhs.kwarg_only() + && (lhs.alias_info() == rhs.alias_info() + || (lhs.alias_info() != nullptr && rhs.alias_info() != nullptr + && *lhs.alias_info() == *rhs.alias_info())); +} + +inline bool operator!=(const Argument& lhs, const Argument& rhs) { + return !(lhs == rhs); +} + +enum struct TORCH_API SchemaArgType { input, output }; + +/** + * struct SchemaArgument + * + * Structure used to represent arguments or returns for a schema. + */ +struct TORCH_API SchemaArgument { + SchemaArgType type; + size_t index; + SchemaArgument(SchemaArgType tpe, size_t idx) : type(tpe), index(idx) {} + bool operator==(const SchemaArgument& rhs) const { + return type == rhs.type && index == rhs.index; + } +}; + +bool operator==(const FunctionSchema& lhs, const FunctionSchema& rhs); + +struct TORCH_API FunctionSchema { + FunctionSchema( + std::string name, + std::string overload_name, + std::vector arguments, + std::vector returns, + bool is_vararg = false, + bool is_varret = false) + : name_({std::move(name), std::move(overload_name)}), + arguments_(std::move(arguments)), + returns_(std::move(returns)), + is_vararg_(is_vararg), + is_varret_(is_varret) { + checkSchema(); + } + + FunctionSchema( + Symbol name, + std::string overload_name, + std::vector arguments, + std::vector returns, + bool is_vararg = false, + bool is_varret = false) + : FunctionSchema( + name.toQualString(), + std::move(overload_name), + std::move(arguments), + std::move(returns), + is_vararg, + is_varret) { + checkSchema(); + } + + // Checks whether this schema is backward compatible with the old one. + // The following conditions must be true: + // [Function structure] The new schema's name, overload-name, varargs, and + // return arity are the same. + // [Output Narrowing] The new schema's output type must be the same class + // or inherit from the old schema's output type. + // [Argument count] The new schema must have at least as many arguments as + // the old schema (considering the list of positional and kwargs). + // [Arg Compatibility] Every argument in the old schema has a corresponding + // argument in the new schema that: + // * is at the same position. + // * has the same name. + // * is either positional, or kwarg and the old argument was kwarg. + // * has the same type, or the old argument's type inherits from the + // new argument's type. + // [Default Values] Every new argument must have a default value. + // E.g. + // OK f_new(a, b, c=1) => f_old(a, b) + // NOK f_new(a, c=1, *, b) => f_old(a, *, b) + // OK f_new(a, b, *, c) => f_old(a, *, b, c) + // NOK f_new(a, *, b, c) -> f_old(a, b, *, c) + // NOK f_new(a, *, c, b) => f_old(a, *, b, c) + // OK f_new(a, *, b, c, d=1) => f_old(a, *, b, c) + bool isBackwardCompatibleWith( + const FunctionSchema& old, + std::ostream* why_not = nullptr) const; + + // Checks whether this schema is forward compatible with the old one. + // The following conditions must be true: + // [Function structure] The new schema's name, overload-name, varargs, and + // return arity are the same. + // [Output Narrowing] The new schema's output type must be the same class + // or inherit from the old schema's output type. + // [Arg Compatibility] Every argument in the old schema has a corresponding + // argument in the new schema that: + // * is at the same position. + // * has the same name. + // * is either positional, or kwarg and the old argument was kwarg. + // * has the same type, or the old argument's type inherits from the + // new argument's type. + // [Default Values] Every new argument must have a default value. + // Each default value type should NOT be a container type. + // [Positioning] All defaults arguments MUST go after either old + // default arguments or the end of positional arguments + // and right BEFORE all out arguments + bool isForwardCompatibleWith( + const FunctionSchema& old, + std::ostringstream& why_not) const; + + private: + OperatorName name_; + std::vector arguments_; + std::vector returns_; + // if true then this schema takes an arbitrary number of additional arguments + // after the argument specified in arguments + // currently this is used primarily to represent 'primitive' operators whose + // arguments are not checked by schema + bool is_vararg_; + bool is_varret_; + + // if no alias information is directly specified, what kind of "default" + // alias information should we infer? + // NB: due to alias analysis kind merging, this may be nullopt. Eventually + // this should always be set no matter what + c10::optional alias_kind_; + + template + void checkArg(const IValue& value, const Argument& argument, optional pos) const; + + void checkSchema() const { + bool seen_default_arg = false; + for (const auto& arg : arguments()) { + if (arg.default_value()) { + seen_default_arg = true; + } else { + // we have historically serialized broadcasting lists wo/default values, + // so to not break BC allow lists here + if (arg.type()->kind() == ListType::Kind) { + continue; + } + TORCH_INTERNAL_ASSERT( + !seen_default_arg || arg.kwarg_only(), + "Non-default positional argument follows default argument. Parameter ", + arg.name(), + " in ", + *this); + } + } + } + + public: + + void dump() const; + + const OperatorName& operator_name() const { + return name_; + } + const std::string& name() const { + return name_.name; + } + const std::string& overload_name() const { + return name_.overload_name; + } + const std::vector& arguments() const { + return arguments_; + } + const std::vector& returns() const { + return returns_; + } + bool is_vararg() const { + return is_vararg_; + } + bool is_varret() const { + return is_varret_; + } + bool is_aliasing(const c10::SchemaArgument &argument) const { + TORCH_INTERNAL_ASSERT( + argument.index < getCorrectList(argument.type).size(), + "Invalid index for schema."); + const AliasInfo* aliasInfo = getCorrectList(argument.type)[argument.index].alias_info(); + return aliasInfo; + } + bool is_mutable() const { + return std::any_of( + arguments_.cbegin(), arguments_.cend(), [](const Argument& arg) { + const AliasInfo* aliasInfo = arg.alias_info(); + return aliasInfo && aliasInfo->isWrite(); + }); + } + bool is_mutable(const c10::SchemaArgument &argument) const { + TORCH_INTERNAL_ASSERT( + argument.index < getCorrectList(argument.type).size(), + "Invalid index for schema."); + const AliasInfo* aliasInfo = getCorrectList(argument.type)[argument.index].alias_info(); + return aliasInfo && aliasInfo->isWrite(); + } + bool is_mutable(c10::string_view name) const { + c10::optional index = argumentIndexWithName(name); + TORCH_INTERNAL_ASSERT( + index != c10::nullopt, "Schema has no argument named ", name); + + return is_mutable({c10::SchemaArgType::input, static_cast(*index)}); + } + + // Returns whether lhs and rhs may alias directly. + // This does not account for cases where lhs or rhs are a container that + // may contain elements that alias the other argument. + // FunctionSchema::may_contain_alias will include that functionality. + bool may_alias(const SchemaArgument& lhs, const SchemaArgument& rhs) const; + + // Returns whether lhs and rhs may alias directly or whether lhs/rhs are a container + // that may contain elements that alias the other argument. + // bidirectional = false only returns whether lhs may contain an alias of rhs + // while bidirectional = true returns both directions. + bool may_contain_alias(const SchemaArgument& lhs, const SchemaArgument& rhs, bool bidirectional = true) const; + + // Returns whether the two AliasTypeSets contain any similarities + // ie: whether the two type sets can alias. + bool canAliasTypeSetsAlias(const c10::optional &lhs, const c10::optional &rhs) const; + + // Recursively Finds all contained types within the AliasTypeSet. + c10::optional getAliasTypeSetContainedTypes(const c10::optional &aliasTypeSet) const; + + // Similar to mapTypeToAliasTypeSet defined in alias_analysis.cpp. + // Used to map types to a type such that all types that can alias will be mapped to the same type. + // For example, calling this method on 'Optional[List[int]]' is the same as calling this method + // on 'List[int]'. + c10::optional mapTypeToAliasTypeSet(const TypePtr& type) const; + + // Returns either arguments() or returns() depending on the SchemaArgType + // output => returns(), input => arguments() + const std::vector& getCorrectList(SchemaArgType type) const; + + c10::optional argumentIndexWithName(c10::string_view name) const { + for (const auto i : c10::irange(arguments().size())) { + if(name == arguments()[i].name()) + return i; + } + return c10::nullopt; + } + FunctionSchema cloneWithName(std::string name, std::string overload_name) const { + return FunctionSchema( + std::move(name), + std::move(overload_name), + arguments(), + returns(), + is_vararg(), + is_varret() + ); + } + FunctionSchema cloneWithArguments(std::vector new_arguments) const { + return FunctionSchema( + name(), + overload_name(), + std::move(new_arguments), + returns(), + is_vararg(), + is_varret()); + } + FunctionSchema cloneWithReturns(std::vector new_returns) const { + return FunctionSchema( + name(), + overload_name(), + arguments(), + std::move(new_returns), + is_vararg(), + is_varret()); + } + + std::string formatTypeMismatchMsg( + const Argument& expected, + const std::string& actual_type, + c10::optional position = c10::nullopt, + c10::optional value = c10::nullopt) const; + + FunctionSchema cloneWithRemappedTypes( + const std::function type_map) const; + + FunctionSchema cloneWithRealTypes(bool with_symint=true) const; + + // Check that inputs have the correct types and appends any missing default + // values. + template + void checkAndNormalizeInputs( + std::vector& inputs, + const std::unordered_map& kwargs = + std::unordered_map{}) const; + + std::string findErrorInKwargs(const std::vector& kwargs) const; + + bool hasAnyAliasInfo() const { + for (const auto& arg : arguments_) { + if (arg.alias_info() != nullptr) { + return true; + } + } + for (const auto& ret : returns_) { + if (ret.alias_info() != nullptr) { + return true; + } + } + return false; + } + + + // TODO remove the mutation here + bool isDefaultAliasAnalysisKind() const { + return !alias_kind_; + } + AliasAnalysisKind aliasAnalysis() const { + return alias_kind_.value_or(AliasAnalysisKind::CONSERVATIVE); + } + void setAliasAnalysis(AliasAnalysisKind v) { + alias_kind_ = v; + } + + c10::optional getNamespace() const { + return name_.getNamespace(); + } + + // Returns true if we successfully set the namespace (as there + // was none set, and false otherwise) + bool setNamespaceIfNotSet(const char* ns) { + return name_.setNamespaceIfNotSet(ns); + } + + // can a function with this schema be substituted for a function of rhs's + // schema and have the program typecheck? + // as_method - if true, treat this schema as a method and ignore + // the first argument, which will be the object in both cases + bool isSubtypeOf(const FunctionSchema& rhs, bool as_method, std::ostream* why_not=nullptr) const; +}; + +inline bool operator==(const FunctionSchema& lhs, const FunctionSchema& rhs) { + return lhs.name() == rhs.name() + && lhs.overload_name() == rhs.overload_name() + && lhs.arguments() == rhs.arguments() + && lhs.returns() == rhs.returns() + && lhs.is_vararg() == rhs.is_vararg() + && lhs.is_varret() == rhs.is_varret(); +} + +inline bool operator!=(const FunctionSchema& lhs, const FunctionSchema& rhs) { + return !(lhs == rhs); +} + +// print out Argument, which is compatible with FunctionSchema parser +// full format: Type(alias)? name=default_value +inline std::ostream& operator<<(std::ostream& out, const Argument& arg) { + + // for adjusting the ? position. + // in schema, we have Tensor?(a!) input, and t(a!)?. + // however, t?(a!) doesn't work with schema parser. + // so we always use Type(alias)? format + // real_type versus fake_type: in order to be compatible with FunctionSchema + // parser, printing an argument with either MemoryFormat or Layout type should + // give us the original schema string, hence printing out real_type. + auto type = arg.real_type(); + bool is_opt = type->kind() == OptionalType::Kind; + auto unopt_type = is_opt ? type->castRaw()->getElementType() : type; + + if (unopt_type->kind() == ListType::Kind) { + // sized lists get size N from arg, not type + auto list = unopt_type->cast(); + out << list->getElementType()->str(); + if (arg.alias_info() && !arg.alias_info()->containedTypes().empty()){ + out << arg.alias_info()->containedTypes()[0]; + } + std::string N = ""; + if (arg.N()) { + N = std::to_string(*arg.N()); + } + out << "[" << N << "]"; + } else { + out << unopt_type->str(); + } + + // print alias info if it has beforeSets. + if (arg.alias_info() && !arg.alias_info()->beforeSets().empty()) { + out << *arg.alias_info(); + } + + if (is_opt) { + out << "?"; + } + + if (!arg.name().empty()) { + out << " " << arg.name(); + } + + if (arg.default_value()) { + out << "="; + if ((type->kind() == c10::TypeKind::StringType || + unopt_type->kind() == c10::TypeKind::StringType) && + arg.default_value().value().isString()) { + printQuotedString(out, arg.default_value().value().toStringRef()); + } else if (type->kind() == TypeKind::ListType && type->castRaw()->getElementType()->kind() == c10::TypeKind::IntType) { + // We want to faithfully replicate JIT schema. + // in native_functions.yaml defaults for int arrays with a single value always look like + // int[2] stride=1 + // instead of + // int[2] stride=[1, 1] + auto default_val = arg.default_value().value().toIntList(); + if (default_val.size() > 1) { + auto all_defaults_the_same = true; + for (const auto i : c10::irange(1, default_val.size())) { + if (default_val[0] != default_val[i]) all_defaults_the_same = false; + } + if (all_defaults_the_same) { + out << default_val[0]; + } else { + out << arg.default_value().value(); + } + } else { + out << arg.default_value().value(); + } + } else { + out << arg.default_value().value(); + } + } + + return out; +} + +inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema); + +inline std::string toString(const FunctionSchema& schema) { + std::ostringstream str; + str << schema; + return str.str(); +} + +} // namespace c10 + +namespace std { +template<> + struct hash { + size_t operator()(const c10::SchemaArgument& arg) const + { + return c10::hash_combine(std::hash()(arg.index), std::hash()(static_cast(arg.type))); + } + }; +} // namespace std + + +#include // IWYU pragma: keep diff --git a/voice_bridge/torch/include/ATen/core/function_schema_inl.h b/voice_bridge/torch/include/ATen/core/function_schema_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..6adc986b4455fa9985ab9d9ef483c9bba3d7bec0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/function_schema_inl.h @@ -0,0 +1,481 @@ +#pragma once +#include + +// note: windows build doesn't find symbols in operator files unless +// this is a header file + +namespace c10 { + +inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema) { + // eventually this should look almost identical to python arg parser, but + // it is simpler for now to work directly on this schema + + out << schema.name(); + if (schema.overload_name() != "") { + out << "." << schema.overload_name(); + } + out << "("; + + bool seen_kwarg_only = false; + for (const auto i : c10::irange(schema.arguments().size())) { + if (i > 0) out << ", "; + if (schema.arguments()[i].kwarg_only() && !seen_kwarg_only) { + out << "*, "; + seen_kwarg_only = true; + } + out << schema.arguments()[i]; + } + + if(schema.is_vararg()) { + if(schema.arguments().size() > 0) + out << ", "; + out << "..."; + } + + out << ") -> "; + + const auto& returns = schema.returns(); + + /* + * We should skip parenthesis if we return a single item and it's not varret, + * or we return nothing but varret. + * + * Need special handling for schema + * aten::items.str(Dict(str, t) self) -> (str,t)[] + * Even though this schema returns a single item, we need add parenthesis. + * The is necessary so the printed schema can be parsed by the C++ SchemaParser + * Without the extra parenthesis, the parser sees the first parenthesis in '(str,t)' and mistakenly + * treat the return type as a tuple. An alternative is to enhance the Lexer + * to lookahead multiple tokens to accurately decide if the return type is + * a tuple. + */ + bool need_paren = !( + (returns.size() == 1 && !schema.is_varret()) || + (returns.size() == 0 && schema.is_varret())); + + if (returns.size() == 1 && !schema.is_varret()) { + std::stringstream return_ss; + return_ss << returns.at(0); + auto return_str = return_ss.str(); + + // enclosing the single return item with parenthesis if the return type + // starts with a left parenthesis. + // + // There are 2 cases + // 1. something like 'aten::items.str(Dict(str, t) self) -> ((str, t)[])'. + // without the extra parenthesis, the c++ schem parser can not parse it. + // 2. something like '-> ((str, str))'. Need extra parenthesis so the return + // type is a single tuple rather than two strings. + // PR (https://github.com/pytorch/pytorch/pull/23204) has more context about + // this. test_serialize_and_deserialize (https://github.com/pytorch/pytorch/blob/master/test/test_function_schema.py#L15) + // also covers this case. + if (return_str.size() > 0 && return_str.front() == '(') { + need_paren = true; + } + } + + if (need_paren) { + out << "("; + } + for (const auto i : c10::irange(returns.size())) { + if (i > 0) { + out << ", "; + } + out << returns.at(i); + } + if (schema.is_varret()) { + if (returns.size() != 0) { + out << ", "; + } + out << "..."; + } + if (need_paren) { + out << ")"; + } + return out; +} + +inline size_t findFirstOutArg(const std::vector& args) { + // find the start of out args in the schema + for (const auto out_start_idx : c10::irange(args.size())) { + if (args.at(out_start_idx).is_out()) { + return out_start_idx; + } + } + return args.size(); +} + +inline bool Argument::isBackwardCompatibleWith( + const Argument& old, + std::ostream* why_not) const { + const Argument* lhs = this; + const Argument* rhs = &old; + if (!(lhs->name() == rhs->name() + && lhs->N() == rhs->N() + && (lhs->alias_info() == rhs->alias_info() + || (lhs->alias_info() != nullptr && rhs->alias_info() != nullptr + && *lhs->alias_info() == *rhs->alias_info())))) { + return false; + } + if (lhs->kwarg_only() && !rhs->kwarg_only()) { + return false; + } + if (!rhs->type()->isSubtypeOfExt(*lhs->type(), why_not)) { + return false; + } + if (rhs->default_value().has_value() && + lhs->default_value() != rhs->default_value()) { + return false; + } + return true; +} + +inline bool Argument::isForwardCompatibleWith( + const Argument& old, + std::ostream* why_not) const { + const Argument* lhs = this; + const Argument* rhs = &old; + if (!(lhs->name() == rhs->name() + && lhs->N() == rhs->N() + && (lhs->alias_info() == rhs->alias_info() + || (lhs->alias_info() != nullptr && rhs->alias_info() != nullptr + && *lhs->alias_info() == *rhs->alias_info())))) { + return false; + } + if (lhs->kwarg_only() && !rhs->kwarg_only()) { + return false; + } + if (!lhs->type()->isSubtypeOfExt(rhs->type(), why_not)) { + return false; + } + if (rhs->default_value().has_value() && + lhs->default_value() != rhs->default_value()) { + return false; + } + if (lhs->default_value().has_value() && !rhs->default_value().has_value()) { + return false; + } + return true; +} + +inline std::string FunctionSchema::formatTypeMismatchMsg( + const Argument& expected, + const std::string& actual_type, + c10::optional position, + c10::optional value) const { + std::string position_str; + if (position) { + position_str = c10::str("Position: ", *position, "\n"); + } + std::string value_str; + if (value) { + value_str = c10::str("Value: ", *value, "\n"); + } + return c10::str( + name(), + "() ", + expected.formatTypeMismatchMsg(actual_type), + position_str, + value_str, + "Declaration: ", + *this); +} + +inline bool FunctionSchema::isBackwardCompatibleWith( + const FunctionSchema& old, + std::ostream* why_not) const { + if (!(name() == old.name() + && overload_name() == old.overload_name() + // we are conservative on is_vararg and is_varret, + // since they are only used by internal operators + && is_vararg() == old.is_vararg() + && is_varret() == old.is_varret() + && returns().size() == old.returns().size() + && arguments().size() >= old.arguments().size())) { + return false; + } + for (const auto i : c10::irange(returns().size())) { + // Backwards compatibility requires covariance on argument types + // (i.e. more generic), and contravariance on return types (i.e. + // more specific). + if (!old.returns().at(i).isBackwardCompatibleWith( + returns().at(i), + why_not)) { + return false; + } + } + + // we want to test both out and default args separately + size_t old_out_start_idx = findFirstOutArg(old.arguments()); + size_t new_out_start_idx = findFirstOutArg(arguments()); + + // make sure among the default args, they are backward compatible + for (const auto i : c10::irange(old_out_start_idx)) { + if (!arguments().at(i).isBackwardCompatibleWith( + old.arguments().at(i), why_not)) { + return false; + } + } + + // Validate that all new arguments provided has a default value + for (const auto i : c10::irange(old_out_start_idx, new_out_start_idx)) { + if (!arguments().at(i).default_value()) { + if (why_not) { + *why_not + << "Function schema not backward compatible since the new argument '" + << arguments().at(i).name() << "' of type " + << arguments().at(i).type()->str() + << " did not provide a default value."; + } + return false; + } + } + + // now compare the out args + for (const auto i : c10::irange(old_out_start_idx, old.arguments().size())) { + if (!arguments() + .at(i - old_out_start_idx + new_out_start_idx) + .isBackwardCompatibleWith(old.arguments().at(i), why_not)) { + return false; + } + } + + return true; +} + +inline bool FunctionSchema::isForwardCompatibleWith( + const FunctionSchema& old, + std::ostringstream& why_not) const { + if (!(name() == old.name() && + overload_name() == old.overload_name() + // we are conservative on is_vararg and is_varret, + // since they are only used by internal operators + && is_vararg() == old.is_vararg() && is_varret() == old.is_varret() && + returns().size() == old.returns().size())) { + return false; + } + + // we want to test both out and default args separately + size_t old_out_start_idx = findFirstOutArg(old.arguments()); + size_t new_out_start_idx = findFirstOutArg(arguments()); + + if (old.arguments().size() - old_out_start_idx != + arguments().size() - new_out_start_idx) { + if (why_not) { + why_not << "Function schema should have the " + << "same number of out arguments"; + } + return false; + } + + // make sure among the default args, they are forward compatible + for (size_t i = 0; i < std::min(old_out_start_idx, new_out_start_idx); i++) { + if (!arguments().at(i).isForwardCompatibleWith(old.arguments().at(i))) { + if (why_not) { + why_not + << "'" << arguments().at(i).name() << "'" + << " is not forward compatible with the older version of the schema"; + } + return false; + } + } + + // Validate that all new arguments provided has a default value + for (size_t i = old_out_start_idx; i < new_out_start_idx; ++i) { + if (!arguments().at(i).default_value()) { + if (why_not) { + why_not + << "Function schema is not forward compatible since the new argument '" + << arguments().at(i).name() << "' of type " + << arguments().at(i).type()->str() + << " did not provide a default value."; + } + return false; + } + + auto default_val = arguments().at(i).default_value().value(); + if (default_val.isList() || default_val.isGenericDict()) { + if (why_not) { + why_not + << "Function schema is not forward compatible since the new argument '" + << arguments().at(i).name() << "' of type " + << arguments().at(i).type()->str() << " has a container type " + << "as its default value."; + } + return false; + } + } + + // now compare the out args + for (size_t i = old_out_start_idx; i < old.arguments().size(); i++) { + if (!arguments() + .at(i - old_out_start_idx + new_out_start_idx) + .isForwardCompatibleWith(old.arguments().at(i))) { + if (why_not) { + why_not << "Out argument '" + << "'" << arguments().at(i).name() + << " is not FC with the older version of the schema"; + } + return false; + } + } + + return true; +} + +template +inline void FunctionSchema::checkArg( + const IValue& value, + const Argument& argument, + optional pos) const { + if (value.isTensor() && argument.type() == TensorType::get()) { + // Fast-path for the common case + return; + } + if (!value.type()->isSubtypeOf(*argument.type())) { + TORCH_CHECK( + false, + formatTypeMismatchMsg( + argument, value.type()->repr_str(), pos)); + } +} + +inline std::string FunctionSchema::findErrorInKwargs(const std::vector& kwargs) const { + // First check if any of the kwargs are unknown, i.e. don't match the name of + // any argument in the schema. + for (const auto& kwarg : kwargs) { + if (!std::count_if( + arguments().begin(), + arguments().end(), + [&kwarg](const Argument& argument) { + return argument.name() == kwarg; + })) { + return c10::str( + "Unknown keyword argument '", + kwarg, + "' for operator '", + name(), + "'. Schema: ", + *this); + } + } + // If there are unconsumed kwargs but none of them were unknown, the first + // positional argument present in the kwargs is duplicated. + for (const auto& argument : arguments()) { + if (std::find(kwargs.begin(), kwargs.end(), argument.name()) != kwargs.end()) { + AT_ASSERT(!argument.default_value()); + return c10::str( + "Argument '", + argument.name(), + "' specified both as positional and ", + "keyword argument. Schema: ", + *this); + } + } + return ""; +} + +template +inline void FunctionSchema::checkAndNormalizeInputs( + std::vector& inputs, + const std::unordered_map& kwargs) const { + // Do we have more inputs than the schema accepts? + TORCH_CHECK( + inputs.size() <= arguments().size(), + "Expected at most ", + arguments().size(), + " argument(s) for operator '", + name(), + "', but received ", + inputs.size(), + " argument(s). Declaration: ", + *this); + + size_t consumed_kwargs = 0; + for (const auto pos : c10::irange(arguments().size())) { + const auto& argument = arguments()[pos]; + if (pos < inputs.size()) { + checkArg(inputs[pos], argument, pos); + continue; + } + auto it = kwargs.find(argument.name()); + if (it != kwargs.end()) { + checkArg(it->second, argument, nullopt); + inputs.push_back(it->second); + consumed_kwargs++; + continue; + } + if (argument.default_value()) { + inputs.push_back(*argument.default_value()); + continue; + } + AT_ERROR( + name(), + "() is missing value for argument '", + argument.name(), + "'. Declaration: ", + *this); + } + if (consumed_kwargs != kwargs.size()) { + std::vector names; + for(const auto& k : kwargs) { + names.emplace_back(k.first); + } + throw std::runtime_error(findErrorInKwargs(names)); + } +} + +inline FunctionSchema FunctionSchema::cloneWithRemappedTypes( + const std::function type_map) const { + auto update_args = [&](const std::vector& args) { + std::vector new_args; + new_args.reserve(args.size()); + for(const Argument& arg : args) { + new_args.emplace_back(arg.cloneWithType(type_map(arg.type()))); + } + return new_args; + }; + return FunctionSchema( + name(), + overload_name(), + update_args(arguments()), + update_args(returns()), + is_vararg(), + is_varret()); +} + +// covariant subtyping of list of Arguments +inline bool isSubtypeOfList( + ArrayRef child, + ArrayRef parent, + std::ostream* why_not) { + if (child.size() != parent.size()) { + return false; + } + for (const auto i : c10::irange(child.size())) { + const Argument& c = child[i]; + const Argument& p = parent[i]; + if (c.name() != p.name()) { + return false; + } + if (!c.type()->isSubtypeOfExt(*p.type(), why_not)) { + return false; + } + } + return true; +} + +inline bool FunctionSchema::isSubtypeOf( + const FunctionSchema& rhs, + bool as_method, + std::ostream* why_not) const { + size_t start = as_method ? 1 : 0; + // functions are contravariant in arguments but covariant in returns + return isSubtypeOfList( + ArrayRef(rhs.arguments()).slice(start), + ArrayRef(arguments()).slice(start), + why_not) && + isSubtypeOfList(returns(), rhs.returns(), why_not); +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/functional.h b/voice_bridge/torch/include/ATen/core/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..6b4f3447f5d48051e83a4664e159303df9db4f4a --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/functional.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include + +namespace c10 { + +// The passed in function must take T by value (T), or by +// const reference (const T&); taking T by non-const reference +// will result in an error like: +// +// error: no type named 'type' in 'class std::result_of' +// +// No explicit template parameters are required. + +// Overload for explicit function and ArrayRef +template +inline auto fmap(const T& inputs, const F& fn) -> std::vector { + std::vector r; + r.reserve(inputs.size()); + for(const auto & input : inputs) + r.push_back(fn(input)); + return r; +} + +// C++ forbids taking an address of a constructor, so here's a workaround... +// Overload for constructor (R) application +template +inline std::vector fmap(const T& inputs) { + std::vector r; + r.reserve(inputs.size()); + for(auto & input : inputs) + r.push_back(R(input)); + return r; +} + +template +inline std::vector filter(at::ArrayRef inputs, const F& fn) { + std::vector r; + r.reserve(inputs.size()); + for(auto & input : inputs) { + if (fn(input)) { + r.push_back(input); + } + } + return r; +} + +template +inline std::vector filter(const std::vector& inputs, const F& fn) { + return filter(static_cast>(inputs), fn); +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/grad_mode.h b/voice_bridge/torch/include/ATen/core/grad_mode.h new file mode 100644 index 0000000000000000000000000000000000000000..47051525c59beece9e8e11accacd926d9c5e587e --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/grad_mode.h @@ -0,0 +1,10 @@ +#pragma once + +#include +#include + +namespace at { + using GradMode = c10::GradMode; + using AutoGradMode = c10::AutoGradMode; + using NoGradGuard = c10::NoGradGuard; +} diff --git a/voice_bridge/torch/include/ATen/core/interned_strings.h b/voice_bridge/torch/include/ATen/core/interned_strings.h new file mode 100644 index 0000000000000000000000000000000000000000..dc5860ebf2c4e73043bc1dbc56f70832b3b22166 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/interned_strings.h @@ -0,0 +1,348 @@ +#pragma once +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace c10 { + +#define FORALL_NS_SYMBOLS(_) \ + _(namespaces, prim) \ + _(namespaces, prims) \ + _(namespaces, nvprims) \ + _(namespaces, aten) \ + _(namespaces, cuda) \ + _(namespaces, onnx) \ + _(namespaces, attr) \ + _(namespaces, scope) \ + _(namespaces, user) \ + _(namespaces, _caffe2) \ + _(namespaces, dimname) \ + _(namespaces, namespaces) \ + _(prim, Assign) \ + _(prim, BroadcastingChunk) \ + _(prim, BroadcastSizes) \ + _(prim, ReductionSizes) \ + _(prim, Constant) \ + _(prim, ChunkSizes) \ + _(prim, ConstantMKLDNNTensor) \ + _(prim, BroadcastMKLDNNTensors) \ + _(prim, MKLDNNGroup) \ + _(prim, MKLDNNHardSwish) \ + _(prim, MKLDNNHardSigmoid) \ + _(prim, MKLDNNHardTanh) \ + _(prim, MKLDNNClamp) \ + _(prim, StaticRuntimeCopyOuts) \ + _(prim, Drop) \ + _(prim, Eval) \ + _(prim, Expand) /* onnx */ \ + _(prim, FusionGroup) \ + _(prim, CudaFusionGroup) \ + _(prim, CudaFusionGuard) \ + _(prim, oneDNNFusionGroup) \ + _(prim, oneDNNFusionGuard) \ + _(prim, FunctionalGraph) \ + _(prim, add_optional) \ + _(prim, view_copy) \ + _(prim, reshape_copy) \ + _(prim, squeeze_copy) \ + _(prim, unsqueeze_copy) \ + _(prim, flatten_copy) \ + _(prim, expand_copy) \ + _(prim, expand_as_copy) \ + _(prim, DifferentiableGraph) \ + _(prim, TensorExprGroup) \ + _(prim, TensorExprDynamicGroup) \ + _(prim, StaticSubgraph) \ + _(prim, If) \ + _(prim, Jump) /* debug */ \ + _(prim, JumpNZ) /* debug */ \ + _(prim, JumpZ) /* debug */ \ + _(prim, Load) \ + _(prim, Loop) \ + _(prim, Param) \ + _(prim, PackPadded) /* onnx */ \ + _(prim, PadPacked) /* onnx */ \ + _(prim, Placeholder) /* debug */ \ + _(prim, Print) \ + _(prim, EmptyListLiteral) \ + _(prim, LegacyTypedConstructor) \ + _(prim, PythonOp) \ + _(prim, IgnoredPythonOp) \ + _(prim, Reverse) \ + _(prim, Return) \ + _(prim, ReturnStmt) \ + _(prim, BreakStmt) \ + _(prim, ContinueStmt) \ + _(prim, ComprehensionScope) \ + _(prim, Store) \ + _(prim, AutogradZero) \ + _(prim, AutogradAnyNonZero) \ + _(prim, AutogradAllNonZero) \ + _(prim, AutogradAllZero) \ + _(prim, Starred) \ + _(prim, TupleConstruct) \ + _(prim, TupleUnpack) \ + _(prim, TupleIndex) \ + _(prim, TupleSlice) \ + _(prim, ListConstruct) \ + _(prim, ListUnpack) \ + _(prim, DictConstruct) \ + _(prim, ModuleContainerIndex) \ + _(prim, EnumName) \ + _(prim, EnumValue) \ + _(prim, StringIndex) \ + _(prim, NumToTensor) \ + _(prim, Uninitialized) \ + _(prim, VarConcat) \ + _(prim, VarStack) \ + _(prim, With) \ + _(prim, Enter) \ + _(prim, Exit) \ + _(prim, IfThenElse) \ + _(aten, Bool) \ + _(aten, Int) \ + _(aten, FloatImplicit) \ + _(aten, ComplexImplicit) \ + _(aten, IntImplicit) \ + _(aten, ScalarImplicit) \ + _(aten, Float) \ + _(aten, Complex) \ + _(aten, str) \ + _(aten, Delete) \ + _(prim, device) \ + _(prim, dtype) \ + _(prim, layout) \ + _(prim, id) \ + _(prim, requires_grad) \ + _(prim, MakeTestTensor) /* test */ \ + _(prim, AutogradAdd) \ + _(prim, GradOf) \ + _(aten, grad) \ + _(aten, backward) \ + _(prim, Guard) \ + _(prim, BailOut) \ + _(prim, TypeCheck) \ + _(prim, RequiresGradCheck) \ + _(prim, FallbackGraph) \ + _(prim, FusedConcat) \ + _(prim, ConstantChunk) \ + _(prim, MMTreeReduce) \ + _(prim, MMBatchSide) \ + _(prim, list) \ + _(prim, dict) \ + _(prim, min) \ + _(prim, max) \ + _(prim, abs) \ + _(aten, divmod) \ + _(prim, zip) \ + _(prim, enumerate) \ + _(prim, range) \ + _(prim, rangelist) \ + _(prim, isinstance) \ + _(prim, tolist) \ + _(prim, unchecked_cast) \ + _(aten, _grad_sum_to_size) \ + _(aten, _size_if_not_equal) \ + _(aten, _ncf_unsqueeze) \ + _(aten, warn) \ + _(aten, sorted) \ + _(aten, floordiv) \ + _(aten, __range_length) \ + _(aten, __derive_index) \ + _(aten, __round_to_zero_floordiv) \ + _(aten, is_scripting) \ + _(aten, _unwrap_optional) \ + _(prim, fork) \ + _(prim, forkClosure) \ + _(prim, RaiseException) \ + _(prim, Closure) \ + _(prim, CreateObject) \ + _(prim, SetAttr) \ + _(prim, GetAttr) \ + _(prim, HasAttr) \ + _(prim, profile) \ + _(prim, profile_ivalue) \ + _(prim, AddStatValue) \ + _(prim, TimePoint) \ + _(prim, CallFunction) \ + _(prim, CallMethod) \ + _(prim, LoopContinuation) \ + _(prim, annotate) \ + _(prim, TracedModuleForward) \ + _(prim, TracedFork) \ + _(prim, TracedAttr) \ + _(prim, rpc_async) \ + _(prim, rpc_sync) \ + _(prim, rpc_remote) \ + _(prim, is_cuda) \ + _(aten, append) \ + _(aten, as_tensor) \ + _(aten, adaptive_avg_pool2d_backward) \ + _(aten, dim) \ + _(aten, format) \ + _(aten, percentFormat) \ + _(aten, __not__) \ + _(aten, __is__) \ + _(aten, __isnot__) \ + _(aten, _ger) \ + _(aten, __getitem__) \ + _(aten, _set_item) \ + _(aten, manual_seed) \ + _(aten, device) \ + _(aten, hash) \ + _(aten, len) \ + _(aten, list) \ + _(aten, dict) \ + _(aten, wait) \ + _(aten, save) \ + _(aten, keys) \ + _(aten, ord) \ + _(aten, chr) \ + _(aten, hex) \ + _(aten, oct) \ + _(aten, clear) \ + _(aten, setdefault) \ + _(aten, bin) \ + _(aten, pop) \ + _(aten, insert) \ + _(aten, tensor) \ + _(prim, unchecked_unwrap_optional) \ + _(aten, __contains__) \ + _(prim, BailoutTemplate) \ + _(prim, grad) \ + _(cuda, _set_device) \ + _(cuda, set_stream) \ + _(cuda, _current_device) \ + _(cuda, synchronize) \ + _(aten, has_torch_function) \ + _(aten, is_autocast_enabled) \ + _(aten, is_autocast_cpu_enabled) \ + FORALL_ATEN_BASE_SYMBOLS(_) \ + _(onnx, Add) \ + _(onnx, Concat) \ + _(onnx, Constant) \ + _(onnx, ConstantFill) \ + _(onnx, Div) \ + _(onnx, GRU) \ + _(onnx, Gather) \ + _(onnx, Gemm) \ + _(onnx, LSTM) \ + _(onnx, MatMul) \ + _(onnx, Min) \ + _(onnx, Mul) \ + _(onnx, Pow) \ + _(onnx, RNN) \ + _(onnx, Shape) \ + _(onnx, Size) \ + _(onnx, Slice) \ + _(onnx, Softmax) \ + _(onnx, Squeeze) \ + _(onnx, Sub) \ + _(onnx, Transpose) \ + _(onnx, Unsqueeze) \ + _(onnx, Loop) \ + _(onnx, If) \ + _(onnx, Reshape) \ + _(onnx, Expand) \ + _(onnx, Equal) \ + _(onnx, Greater) \ + _(onnx, GreaterOrEqual) \ + _(onnx, Less) \ + _(onnx, LessOrEqual) \ + _(onnx, Not) \ + _(aten, ATen) \ + _(onnx, Split) \ + _(onnx, ConstantOfShape) \ + _(onnx, Cast) \ + _(onnx, Mod) \ + _(onnx, Sqrt) \ + _(onnx, SplitToSequence) \ + _(onnx, SequenceAt) \ + _(onnx, SequenceConstruct) \ + _(onnx, SequenceEmpty) \ + _(onnx, SequenceInsert) \ + _(onnx, SequenceErase) \ + _(onnx, ConcatFromSequence) \ + _(onnx, Identity) \ + _(onnx, SoftmaxCrossEntropyLoss) \ + _(onnx, NegativeLogLikelihoodLoss) \ + _(onnx, LogSoftmax) \ + _(onnx, ReduceL1) \ + _(onnx, ReduceL2) \ + _(onnx, Conv) \ + _(onnx, BatchNormalization) \ + _(onnx, ReduceMean) \ + _(onnx, ReduceProd) \ + _(onnx, Relu) \ + _(onnx, Neg) \ + _(onnx, NonZero) \ + _(onnx, Range) \ + _(onnx, Tile) \ + _(onnx, Where) \ + _(onnx, Optional) \ + _(onnx, OptionalGetElement) \ + _(onnx, OptionalHasElement) \ + FORALL_ATTR_BASE_SYMBOLS(_) \ + _(attr, Subgraph) \ + _(attr, ReverseSubgraph) \ + _(attr, f_real_outputs) \ + _(attr, df_input_vjps) \ + _(attr, df_input_captured_inputs) \ + _(attr, df_input_captured_outputs) \ + _(attr, df_output_vjps) \ + _(attr, axes) \ + _(attr, symbolic_shape_inputs) \ + _(attr, allow_stack_outputs) \ + _(attr, striding_inputs_desc) \ + _(attr, striding_outputs_desc) \ + _(attr, broadcast) \ + _(attr, direction) \ + _(attr, ends) \ + _(attr, inplace) \ + _(attr, input_as_shape) \ + _(attr, is_zero) \ + _(attr, num_none) \ + _(attr, num_present) \ + _(attr, perm) \ + _(attr, starts) \ + _(attr, profiled_type) \ + _(attr, transA) \ + _(attr, transB) \ + _(attr, name) \ + _(attr, module) \ + _(attr, beg) \ + _(attr, idx) \ + _(attr, split) \ + _(attr, slot) \ + _(attr, kinds) \ + _(attr, types) \ + _(attr, scope) \ + _(attr, keepdims) \ + _(attr, cache_id) \ + _(attr, new_axis) \ + _(attr, warn_id) \ + _(attr, output_layouts) \ + _(attr, allowzero) \ + _(attr, seen_none) \ + _(attr, overload_name) + +enum class _keys : unique_t { + #define DEFINE_KEY(ns, s) ns##_##s, + FORALL_NS_SYMBOLS(DEFINE_KEY) + #undef DEFINE_KEY + num_symbols +}; + +#define DEFINE_SYMBOL(ns, s) \ + namespace ns { constexpr Symbol s(static_cast(_keys::ns##_##s)); } +FORALL_NS_SYMBOLS(DEFINE_SYMBOL) +#undef DEFINE_SYMBOL + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/interned_strings_class.h b/voice_bridge/torch/include/ATen/core/interned_strings_class.h new file mode 100644 index 0000000000000000000000000000000000000000..6e57332b99f97161437e3c51f895b1f10fe6be7b --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/interned_strings_class.h @@ -0,0 +1,34 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +struct TORCH_API InternedStrings { + InternedStrings(); + Symbol symbol(const std::string& s); + std::pair string(Symbol sym); + Symbol ns(Symbol sym); + + private: + // prereq - holding mutex_ + Symbol _symbol(const std::string& s); + std::pair customString(Symbol sym); + std::unordered_map string_to_sym_; + + struct SymbolInfo { + Symbol ns; + std::string qual_name; + std::string unqual_name; + }; + std::vector sym_to_info_; + + std::mutex mutex_; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/ivalue.h b/voice_bridge/torch/include/ATen/core/ivalue.h new file mode 100644 index 0000000000000000000000000000000000000000..122afcba4d8433f5903fc453b3e5ded75c533739 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/ivalue.h @@ -0,0 +1,1452 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +class TORCH_API CustomClassHolder : public c10::intrusive_ptr_target {}; +namespace jit { +using ::torch::CustomClassHolder; +struct Function; +struct CompilationUnit; +struct Module; +} // namespace jit +} // namespace torch +namespace c10 { +template +class Dict; +template +class List; +template +class IListRef; +struct IValue; +struct ClassType; +struct Type; +class RRefInterface; + +struct ClassType; +using ClassTypePtr = std::shared_ptr; + +TORCH_API bool _fastEqualsForContainer(const IValue& lhs, const IValue& rhs); + +TORCH_API torch::jit::Function* checkObjectSortSchema( + const c10::ClassTypePtr& t, + std::stringstream& why_not); + +// A comparator that checks ordering of two IValues of same type. +typedef std::function IValueComparator; + +TORCH_API IValueComparator getLessThanComparator(const IValue& v); +TORCH_API IValueComparator getGreaterThanComparator(const IValue& v); + +namespace ivalue { +struct Tuple; +struct Future; +struct ConstantString; +struct GenericDict; +struct Object; +struct PyObjectHolder; +struct EnumHolder; +// We need a ComplexHolder because currently the payloads in the Union +// only take 64 bits. Since ComplexDouble takes up 128 bits, and is too big +// to fit in the IValue directly, we indirect complex numbers through an intrusive +// pointer to ComplexHolder (which contains a c10::complex). +struct ComplexHolder : c10::intrusive_ptr_target { + public: + template + ComplexHolder(c10::complex c) { + val = convert>(c); + } + ComplexHolder() {} + c10::complex val; +}; +} // namespace ivalue + +// This is an owning wrapper for a c10::optional> +// that can be implicitly converted to a (non-owning) optional>. +// Its purpose is to be used in generated code to keep the vector alive +// either until the end of a statement (as a temporary), or as a saved arg +// in autograd. +template +struct OptionalArray { + c10::optional> list; + + OptionalArray(){} + OptionalArray(std::vector val) : list(std::move(val)) {} + + // Used when saving an argument for the backwards pass. + OptionalArray& operator=(c10::optional> ref) { + if (ref) { + list = std::vector(ref->begin(), ref->end()); + } else { + list = nullopt; + } + return *this; + } + + // Used when saving an argument for the backwards pass. + OptionalArray& operator=(c10::OptionalArrayRef ref) { + if (ref) { + list = std::vector(ref->begin(), ref->end()); + } else { + list = nullopt; + } + return *this; + } + + operator c10::optional>() { + if (!list) { + return nullopt; + } + return *list; + } + + operator c10::OptionalArrayRef() { + if (!list) { + return nullopt; + } + return *list; + } +}; + +// Capsule is an internal implementation detail of custom C++ classes. We +// define it as an owning wrapper for +// c10::intrusive_ptr This wrapper is here to serve as +// an abstraction of the type erased custom class object pointer. It also allow +// pybind11 to treat this as a standalone class to register as a separate type +// caster, instead of a custom pointer holder which the pointer holder type +// caster try to "unwrap" it automatically. +struct Capsule { + c10::intrusive_ptr obj_ptr; + explicit Capsule(c10::intrusive_ptr ptr) + : obj_ptr(std::move(ptr)) {} +}; + +// IValue is the generic tagged union used by the interpreter to hold +// all value types. +// It is a 16-byte object with an 8-byte payload and an 8-byte tag. +// The tag is currently 4 bytes to determine the type, and 1 byte +// to mark whether that type is a subtype of c10::intrusive_ptr_target and needs +// retain/release calls. + +#define TORCH_FORALL_TAGS(_) \ + _(None) \ + _(Tensor) \ + _(Storage) \ + _(Double) \ + _(ComplexDouble) \ + _(Int) \ + _(SymInt) \ + _(SymFloat) \ + _(Bool) \ + _(Tuple) \ + _(String) \ + _(Blob) \ + _(GenericList) \ + _(GenericDict) \ + _(Future) \ + _(Device) \ + _(Stream) \ + _(Object) \ + _(PyObject) \ + _(Uninitialized) \ + _(Capsule) \ + _(RRef) \ + _(Quantizer) \ + _(Generator) \ + _(Enum) + +// [doxygen private] +// These methods are not actually private but we don't want to document them, so +// they are marked `@private`, which hides them on the doxygen documentation for +// this page. + +/// IValue (Interpreter Value) is a tagged union over the types +/// supported by the TorchScript interpreter. IValues contain their +/// values as an `IValue::Payload`, which holds primitive types +/// (`int64_t`, `bool`, `double`, `Device`) and `Tensor` as values, +/// and all other types as a `c10::intrusive_ptr`. In order to +/// optimize performance of the destructor and related operations by +/// making the `Tensor` and `c10::intrusive_ptr` paths generate the +/// same code, we represent a null `c10::intrusive_ptr` as +/// `UndefinedTensorImpl::singleton()`, *not* `nullptr`. +/// +/// IValues are used as inputs to and outputs from the TorchScript interpreter. +/// To retrieve the value contained within an IValue, use the `.toX()` methods, +/// where `X` is the type you are trying to get. Note that neither the `.toX()` +/// methods nor the templated `.to` functions do any kind of casting, they +/// only unwrap the contained value. For example: +/// +/// \rst +/// .. code-block:: cpp +/// +/// // Make the IValue +/// torch::IValue my_ivalue(26); +/// std::cout << my_ivalue << "\n"; +/// +/// // Unwrap the IValue +/// int64_t my_int = my_ivalue.toInt(); +/// std::cout << my_int << "\n"; +/// +/// // This will throw an error! +/// // `my_ivalue` is tagged as an int and cannot be used as another type +/// torch::Tensor my_tensor = my_ivalue.toTensor(); +/// \endrst +struct TORCH_API IValue final { + IValue(const IValue& rhs) + : IValue(rhs.payload, rhs.tag) { + if (isIntrusivePtr() && payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { + c10::raw::intrusive_ptr::incref(payload.u.as_intrusive_ptr); + } + } + + IValue(IValue&& rhs) noexcept : tag(rhs.tag) { + moveFrom(std::move(rhs)); + } + + /// @private [doxygen private] + ~IValue() { + destroy(); + } + + C10_ALWAYS_INLINE IValue& operator=(IValue&& rhs) & noexcept { + if (&rhs == this) { + return *this; + } + + destroy(); + moveFrom(std::move(rhs)); + return *this; + } + + IValue& operator=(IValue const& rhs) & { + *this = IValue(rhs); + return *this; + } + + void dump() const; + + /** + * Equality comparison. The semantics are the same as Python's `==`: + * 1. Numerical types are compared by value. + * 2. Tensors compute element-wise equality, returning a BoolTensor (see: + * `torch.eq()`) + * 3. Strings are compared by value. + * 4. Sequence types (list, tuple) are compared lexicographically by + * comparing their elements. Different sequence types never compare equal. + * 5. Mappings (dict) must have equal (key, value) pairs. + * 6. If not listed above, the default behavior for is to test identity + * equality (e.g. pointer equality). + * + * Why does this return an IValue instead of a bool? Because in PyTorch, + * `tensor1 == tensor2` returns a `BoolTensor`, not a bool. + * + * NOTE: we (like Python) assume that identity equality implies value equality + * for efficiency. + * TODO: need to support customizing equality + */ + IValue equals(const IValue& rhs) const; + /** + * This implements the same semantics as `bool(lhs == rhs)` in Python. which + * is the same as `equals()` except for Tensor types. + */ + TORCH_API friend bool operator==(const IValue& lhs, const IValue& rhs); + TORCH_API friend bool operator!=(const IValue& lhs, const IValue& rhs); + + /** + * Identity comparison. Checks if `this` is the same object as `rhs`. The + * semantics are the same as Python's `is` operator. + * + * NOTE: Like in Python, this operation is poorly defined for primitive types + * like numbers and strings. Prefer to use `==` unless you really want to + * check identity equality. + */ + bool is(const IValue& rhs) const; + + /** + * Hashing for IValues. Returns an IValue-boxed int. + * + * Some notes: + * - Like eager, Tensors are hashed by looking at the pointer. This is not + * strictly correct because two value-equal tensors with different tensor + * pointers will hash differently, but we choose to reproduce the eager + * semantics. + * - Hashing is not defined on all built-in IValue types (e.g. list and + * dict), following Python. Calling `hash()` on these types will throw. + */ + IValue hash() const { + return (int64_t)IValue::hash(*this); + } + // This is defined because `c10::hash` dispatches to a function of this + // signature. See the member function `hash()`. + static size_t hash(const IValue& iv); + + /** + * @private [doxygen private] + * [container equality] + * This is an equality implementation that assumes objects with the same + * identity equal themselves, for efficiency reasons. We primarily have this + * for consistency, because Python does the same thing. This actually + * provokes user-visible changes in behavior due to quirks in torch: + * [tensor1] == [tensor1] -> True (because container equality will first + * compare identity) [tensor1] == [tensor1_copy] -> RuntimeError: + * Boolean value of Tensor with more than one value is ambiguous + */ + TORCH_API friend bool _fastEqualsForContainer( + const IValue& lhs, + const IValue& rhs); + +private: + static bool isAliasOf(const at::Tensor& a, const at::Tensor& b) { + if (a.is_sparse()) { + return isAliasOf(a._values(), b) || isAliasOf(a._indices(), b); + } + if (b.is_sparse()) { + return isAliasOf(a, b._values()) || isAliasOf(a, b._indices()); + } + if (a.is_sparse_csr()) { + return isAliasOf(a.values(), b) || + isAliasOf(a.crow_indices(), b) || + isAliasOf(a.col_indices(), b); + } + if (b.is_sparse_csr()) { + return isAliasOf(a, b.values()) || + isAliasOf(a, b.crow_indices()) || + isAliasOf(a, b.col_indices()); + } + + // Opaque tensors such as the ones constructed by the MKL-DNN backend + // don't have storage so we just compare their TensorImpls. + // TODO: Find way to expose alias info for opaque tensors. + if (!a.has_storage() || !b.has_storage()) { + return a.unsafeGetTensorImpl() == b.unsafeGetTensorImpl(); + } + + return a.is_alias_of(b); + } + + template + bool isListOf() const; + +public: + /// @private [doxygen private] + bool isAliasOf(const IValue& rhs) const { + if (this->tag != rhs.tag) { + // Trivially don't alias if the type is different + return false; + } + + // Tensors should be compared based on internal storage + if (this->isTensor()) { + return isAliasOf(this->toTensor(), rhs.toTensor()); + } + + if (!isIntrusivePtr()) { + // Primitive types don't alias anything + return false; + } + + AT_ASSERT(rhs.isIntrusivePtr()); + + // Other types can be compared by their ptr value + return this->payload.u.as_intrusive_ptr == rhs.payload.u.as_intrusive_ptr; + } + + /// @private [doxygen private] + size_t use_count() const noexcept { + if (isTensor()) { + return payload.as_tensor.use_count(); + } + + if (!isIntrusivePtrLegacyBehavior()) { + return 1; + } + + if (payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()) { + return 0; + } + return c10::raw::intrusive_ptr::use_count(payload.u.as_intrusive_ptr); + } + + /// @private [doxygen private] + void swap(IValue& rhs) noexcept { + if (isTensor() && rhs.isTensor()) { + std::swap(payload.as_tensor, rhs.payload.as_tensor); + } else if (isTensor()) { + at::Tensor t = std::move(payload.as_tensor); + // As far as I can tell, omitting the usual explicit destructor call + // is not UB in and of itself, and it's a slight perf win. The + // destructor is a no-op, because the moved-from Tensor is + // effectively an intrusive_ptr in the null state, so we don't need + // the behavior for correctness reasons either. Leaving this + // explanatory comment, including commented-out destructor call, to + // make this abundantly clear. + // + // payload.as_tensor.~Tensor(); + payload.u = rhs.payload.u; + new (&rhs.payload.as_tensor) at::Tensor(std::move(t)); + } else if (rhs.isTensor()) { + rhs.swap(*this); + return; + } else { + std::swap(payload.u, rhs.payload.u); + } + std::swap(tag, rhs.tag); + } + + // Accessors for subtypes are arranged together below + // While some of these accessors could be generated through templates, + // we prefer to write them manually for clarity + + IValue(at::TensorBase t) : tag(Tag::Tensor) { + new (&payload.as_tensor) at::Tensor(std::move(t)); + } + bool isTensor() const { + return Tag::Tensor == tag; + } + + private: + // Outlined error path so that toTensor() can be inlined. + [[noreturn]] void reportToTensorTypeError() const; + + public: + at::Tensor toTensor() &&; + at::Tensor& toTensor() &; + const at::Tensor& toTensor() const&; + at::TensorImpl* unsafeToTensorImpl() const { + return payload.as_tensor.unsafeGetTensorImpl(); + } + + IValue(at::Storage s) : tag(Tag::Storage) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(s.unsafeReleaseStorageImpl()); + } + bool isStorage() const { + return Tag::Storage == tag; + } + c10::Storage toStorage() &&; + c10::Storage toStorage() const&; + + const IValue& toIValue() const { + return *this; + } + IValue& toIValue() { + return *this; + } + + /// @private [doxygen private] + IValue(intrusive_ptr blob) + : tag(Tag::Blob) { + // TODO (after Tensor merge) If we pass in a Blob holding a Tensor, extract + // and store it as a Tensor instead. + payload.u.as_intrusive_ptr = null_to_undefined_tensor(blob.release()); + } + + /// @private [doxygen private] + bool isBlob() const { + return Tag::Blob == tag; + } + + /// @private [doxygen private] + c10::intrusive_ptr toBlob() &&; + + /// @private [doxygen private] + c10::intrusive_ptr toBlob() const&; + + // Capsule. No new callsites of these APIs should + // be introduced. + static inline IValue make_capsule( + intrusive_ptr blob); + bool isCapsule() const { + return Tag::Capsule == tag; + } + c10::intrusive_ptr toCapsule() &&; + c10::intrusive_ptr toCapsule() const&; + + // Custom C++ classes + template < + typename T, + std::enable_if_t< + std::is_base_of::value, + int> = 0> + IValue(intrusive_ptr custom_class); + bool isCustomClass() const; + template + c10::intrusive_ptr toCustomClass() &&; + template + c10::intrusive_ptr toCustomClass() const&; + + // Tuple + IValue(c10::intrusive_ptr v); + + template < + typename... Args, + std::enable_if_t< + !guts::disjunction< + std::is_lvalue_reference..., + guts::negation>...>::value, + std::nullptr_t> = nullptr> + IValue(const std::tuple& t); + template < + typename... Args, + std::enable_if_t< + !guts::disjunction< + std::is_lvalue_reference..., + guts::negation>...>::value, + std::nullptr_t> = nullptr> + IValue(std::tuple&& t); + bool isTuple() const { + return Tag::Tuple == tag; + } + c10::intrusive_ptr toTuple() &&; + c10::intrusive_ptr toTuple() const&; + C10_NODISCARD ivalue::Tuple& toTupleRef() const; + + // Double + IValue(double d) : tag(Tag::Double) { + payload.u.as_double = d; + } + bool isDouble() const { + return Tag::Double == tag; + } + double toDouble() const { + AT_ASSERT(isDouble()); + return payload.u.as_double; + } + + // ComplexDouble + template + IValue(c10::complex c); + bool isComplexDouble() const { return Tag::ComplexDouble == tag; } + c10::complex toComplexDouble() const; + + // Future + IValue(c10::intrusive_ptr v); + bool isFuture() const { + return Tag::Future == tag; + } + c10::intrusive_ptr toFuture() &&; + c10::intrusive_ptr toFuture() const&; + + // RRef + IValue(c10::intrusive_ptr v); + bool isRRef() const { + return Tag::RRef == tag; + } + c10::intrusive_ptr toRRef() &&; + c10::intrusive_ptr toRRef() const&; + + // Quantizer + IValue(c10::intrusive_ptr v); + bool isQuantizer() const { + return Tag::Quantizer == tag; + } + c10::intrusive_ptr toQuantizer() &&; + c10::intrusive_ptr toQuantizer() const&; + + // Int + IValue(int64_t i) : tag(Tag::Int) { + payload.u.as_int = i; + } + + IValue(c10::SymInt i) { + if (i.is_symbolic()) { + tag = Tag::SymInt; + payload.u.as_intrusive_ptr = i.toSymIntNodeImpl().release(); + } else { + tag = Tag::Int; + payload.u.as_int = i.as_int_unchecked(); + } + } + + bool isSymInt() const { + return Tag::SymInt == tag; + } + + c10::SymInt toSymInt() const; + + IValue(c10::SymFloat i) { + if (i.is_symbolic()) { + tag = Tag::SymFloat; + payload.u.as_intrusive_ptr = i.toSymFloatNodeImpl().release(); + } else { + tag = Tag::Double; + payload.u.as_double = i.as_float_unchecked(); + } + } + + bool isSymFloat() const { + return Tag::SymFloat == tag; + } + + c10::SymFloat toSymFloat() const; + + // allow you to pass literals (3, 4) without ambiguity + IValue(int32_t i) : IValue(static_cast(i)) {} + + bool isInt() const { + return Tag::Int == tag; + } + + int64_t toInt() const { + AT_ASSERT(isInt()); + return payload.u.as_int; + } + + // Bool + IValue(bool b) : tag(Tag::Bool) { +#if defined(__clang__) && defined(__x86_64__) + // Initializing entire payload stops valgrind's from reporting + // "jump or move depends on uninitialised value" in IValue copy constructor + // See https://github.com/pytorch/pytorch/issues/37117 + payload.u.as_int = b; +#else + payload.u.as_bool = b; +#endif + } + bool isBool() const { + return Tag::Bool == tag; + } + bool toBool() const { + AT_ASSERT(isBool()); + return payload.u.as_bool; + } + + // IntList + bool isIntList() const; + c10::List toIntList() &&; + c10::List toIntList() const&; + std::vector toIntVector() const; + at::DimVector toDimVector() const; + + // ConstantString + IValue(c10::intrusive_ptr v); + IValue(std::string v); + IValue(const char* v) : IValue(std::string(v)) {} + IValue(c10::string_view v) : IValue(std::string(v)) {}; + bool isString() const { + return Tag::String == tag; + } + c10::intrusive_ptr toString() &&; + c10::intrusive_ptr toString() const&; + const std::string& toStringRef() const; + c10::optional> toOptionalStringRef() + const; + c10::string_view toStringView() const; + + // DoubleList + bool isDoubleList() const; + c10::List toDoubleList() &&; + c10::List toDoubleList() const&; + std::vector toDoubleVector() const; + + // ComplexDoubleList + bool isComplexDoubleList() const; + c10::List> toComplexDoubleList() &&; + c10::List> toComplexDoubleList() const&; + std::vector> toComplexDoubleVector() const; + + // BoolList + bool isBoolList() const; + c10::List toBoolList() &&; + c10::List toBoolList() const&; + + // TensorList + bool isTensorList() const; + c10::List toTensorList() &&; + c10::List toTensorList() const&; + std::vector toTensorVector() const; + + // OptionalTensorList + bool isOptionalTensorList() const; + c10::List> toOptionalTensorList() &&; + c10::List> toOptionalTensorList() const&; + std::vector> toOptionalTensorVector() const; + + // GenericList + IValue(c10::List v); + bool isList() const { + return Tag::GenericList == tag; + } + c10::List toList() &&; + c10::List toList() const&; + c10::ArrayRef toListRef() const; + + // Some template constructors of IValue calls another constructor recursively. + // This SFINAEs the called constructor exists. + template + using enable_if_ivalue_constructible = + std::enable_if_t::value, std::nullptr_t>; + + // The rule for lists is more complicated; the generic constructor is only + // acceptable if your element isn't SymInt. If you do have a SymInt element, + // then you must also, at construction time, check if you can decay the list + // into an int list (this is MANDATORY, as at a use site we may expect + // toIntList to work even if at the call site you had a SymIntArrayRef + // argument). In practice, only SymIntArrayRef is used this way, so we + // didn't bother making it work for the other constructors, we just make sure + // they're not selectable. + template + using enable_if_list_is_ivalue_constructible = + std::enable_if_t::value && + !std::is_same::value, std::nullptr_t>; + + template = nullptr> + IValue(c10::List&& v); + template = nullptr> + IValue(const c10::List& v); + template = nullptr> + IValue(at::ArrayRef v); + template = nullptr> + IValue(const std::vector& v); + template + IValue(std::array v); + + // Manual constructors for lists of symints, which decay to int list if + // possible. To avoid ambiguous overload situations, we template them + // to prevent implicit conversions + template + using enable_if_symint = + std::enable_if_t::value, std::nullptr_t>; + + template = nullptr> + IValue(at::ArrayRef v); + template = nullptr> + IValue(at::OptionalArrayRef v); + template = nullptr> + IValue(const std::vector& v); + + template + using enable_if_ilist_is_ivalue_constructible = std::enable_if_t< + std::is_constructible::value && + std::is_constructible::boxed_type>::value && + !std::is_same::value, + std::nullptr_t>; + + template = nullptr> + IValue(c10::IListRef v); + + // GenericDict + IValue(c10::Dict v); + bool isGenericDict() const { + return Tag::GenericDict == tag; + } + c10::Dict toGenericDict() &&; + c10::Dict toGenericDict() const&; + + template + IValue(c10::Dict v); + + template + /// \cond + /// DOXYGEN_CANNOT_HANDLE_CONSTRUCTORS_WITH_MACROS_SO_EXCLUDE_THIS_LINE_FROM_DOXYGEN + C10_DEPRECATED_MESSAGE( + "IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict instead.") + /// \endcond + IValue(std::unordered_map v); + + template = nullptr> + IValue(c10::optional v); + template = nullptr> + IValue(c10::OptionalArrayRef v); + IValue(c10::nullopt_t); + + // ClassType + IValue(c10::intrusive_ptr v); + bool isObject() const { + return tag == Tag::Object; + } + c10::intrusive_ptr toObject() &&; + c10::intrusive_ptr toObject() const&; + ivalue::Object& toObjectRef() const; + + torch::jit::Module toModule() const; + bool isModule() const; + + // PyObject + IValue(c10::intrusive_ptr v); + bool isPyObject() const { + return tag == Tag::PyObject; + } + c10::intrusive_ptr toPyObjectHolder() &&; + c10::intrusive_ptr toPyObjectHolder() const&; + PyObject* toPyObject() const; + + // Enum + explicit IValue(c10::intrusive_ptr v); + bool isEnum() const { + return tag == Tag::Enum; + } + c10::intrusive_ptr toEnumHolder() &&; + c10::intrusive_ptr toEnumHolder() const&; + + // None + IValue() : tag(Tag::None) {} + bool isNone() const { + return Tag::None == tag; + } + std::string toNone() const { + AT_ASSERT(isNone()); + return "None"; + } + + static IValue uninitialized() { + auto i = IValue(); + i.tag = Tag::Uninitialized; + return i; + } + + // Scalar, which gets encoded as either an Int, a Double or a ComplexDouble + IValue(const at::Scalar& s) : IValue() { + // NB: do the symbolic versions first, as isFloatingPoint is true + // for both SymFloat and double + if (s.isSymInt()) { + tag = Tag::SymInt; + payload.u.as_intrusive_ptr = s.toSymInt().toSymIntNodeImpl().release(); + } else if (s.isSymFloat()) { + tag = Tag::SymFloat; + payload.u.as_intrusive_ptr = s.toSymFloat().toSymFloatNodeImpl().release(); + } else if (s.isFloatingPoint()) { + tag = Tag::Double; + payload.u.as_double = s.toDouble(); + } else if (s.isComplex()) { + *this = s.toComplexDouble(); + } else if (s.isBoolean()) { + tag = Tag::Bool; + payload.u.as_bool = s.toBool(); + } else { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(s.isIntegral(false), "Unknown type in Scalar"); + tag = Tag::Int; + payload.u.as_int = s.toLong(); + } + } + + bool isScalar() const { + return isDouble() || isInt() || isComplexDouble() || isBool() || isSymInt() || isSymFloat(); + } + + at::Scalar toScalar() const { + if (isDouble()) + return toDouble(); + else if (isInt()) + return toInt(); + else if (isComplexDouble()) + return toComplexDouble(); + else if (isBool()) + return toBool(); + else if (isSymInt()) + return toSymInt(); + else if (isSymFloat()) + return toSymFloat(); + throw std::runtime_error("IValue is not a Scalar"); + } + + // Device + IValue(c10::Device d) : tag(Tag::Device) { + payload.u.as_device.type = d.type(); + payload.u.as_device.index = d.index(); + } + bool isDevice() const { + return Tag::Device == tag; + } + c10::Device toDevice() const { + AT_ASSERT(isDevice()); + return c10::Device(payload.u.as_device.type, payload.u.as_device.index); + } + + //Stream + IValue(c10::Stream stream) + : tag(Tag::Stream) { + payload.u.as_int = stream.pack(); + } + c10::Stream toStream() &&; + c10::Stream toStream() const &; + bool isStream() const { return Tag::Stream == tag; } + + // ScalarType + IValue(ScalarType t) + : IValue(static_cast::type>(t)) {} + at::ScalarType toScalarType() const { + return static_cast(toInt()); + } + + // Layout + IValue(Layout l) + : IValue(static_cast::type>(l)) {} + at::Layout toLayout() const { + return static_cast(toInt()); + } + + // MemoryFormat + IValue(MemoryFormat m) + : IValue(static_cast::type>(m)) {} + at::MemoryFormat toMemoryFormat() const { + return static_cast(toInt()); + } + + // QScheme + IValue(at::QScheme qscheme) : tag(Tag::Int) { + payload.u.as_int = static_cast(qscheme); + } + + at::QScheme toQScheme() const { + return static_cast(toInt()); + } + + // Dimname + IValue(at::Dimname dimname) : IValue(dimname.symbol().toQualString()) {} + + at::Dimname toDimname() const { + return at::Dimname::fromSymbol(Symbol::fromQualString(toStringRef())); + } + + // Generator + IValue(at::Generator g) : tag(Tag::Generator) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(g.unsafeReleaseGeneratorImpl()); + } + bool isGenerator() const { + return Tag::Generator == tag; + } + at::Generator toGenerator() &&; + at::Generator toGenerator() const&; + + // for debugging + std::string tagKind() const { + switch (tag) { +#define DEFINE_CASE(x) \ + case Tag::x: \ + return #x; + TORCH_FORALL_TAGS(DEFINE_CASE) +#undef DEFINE_CASE + } + return "InvalidTag(" + c10::guts::to_string(static_cast(tag)) + ")"; + } + + // generic v.to() implementations + // that can be used in special functions like pop/push + // that use template meta-programming. + // prefer the directly named methods when you can, + // since they are simpler to understand + + // Note: if you get linker errors saying one of these is missing, + // change it to ... && = delete; and you will see better error messages for + // why However, we cannot commit this because some compiler versions barf on + // it. + template + T to() &&; + template + typename c10::detail::ivalue_to_const_ref_overload_return::type to() const&; + + // ToOptional: convert a IValue to the Optional obj that accepts both T and + // None + template + optional toOptional(); + template + optional toOptional() const; + + /// @private [doxygen private] + /// this is a shallow comparison of two IValues to test the object identity + bool isSameIdentity(const IValue& rhs) const; + + // Computes the "official" string representation of an IValue. This produces a + // TorchScript expression that can be used to recreate an IValue with the same + // value (e.g. when we are printing constants in the serializer). + // + // Callers can use `customFormatter` to override how `repr()` prints out an + // IValue. This is useful if you have some other environment where you can + // look up values, and you want to print a reference to that environment (like + // the serializer's constant table). + // + // repr() is not necessarily defined on all objects! + std::ostream& repr( + std::ostream& stream, + std::function customFormatter) + const; + + // Computes an "informal" string representation of an IValue. This should be + // used for debugging, or servicing `print()`-like functions. + // This is different from `repr()` in that there is no expectation that we can + // exactly reconstruct an IValue from the output; feel free to use a + // concise/pretty form + TORCH_API friend std::ostream& operator<<( + std::ostream& out, + const IValue& v); + + bool isPtrType() const { + if (isTensor()) { + return payload.as_tensor.defined(); + } + return isIntrusivePtrLegacyBehavior(); + } + + /// @private [doxygen private] + const void* internalToPointer() const { + TORCH_INTERNAL_ASSERT( + isPtrType(), "Can only call internalToPointer() for pointer types"); + if (isTensor()) { + return payload.as_tensor.unsafeGetTensorImpl(); + } else { + return payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton() + ? payload.u.as_intrusive_ptr : nullptr; + } + } + + template + TypePtr type() const; + + // Detect aliased tensors. + struct HashAliasedIValue { + size_t hashTensor(const at::Tensor& ten) const { + if (ten.is_sparse()) { + // COO sparse tensors have a "values" tensor and an "indices" tensor + // so this will detect overlap of sparse tensors that share a values + // tensor, but not sparse tensors that share an indices tensor. + return hashTensor(ten._values()); + } else if (ten.is_sparse_csr()) { + // COO sparse tensors have a "values" tensor and an "indices" tensor + // so this will detect overlap of sparse tensors that share a values + // tensor, but not sparse tensors that share an indices tensor. + return hashTensor(ten.values()); + } else if (!ten.has_storage()) { + // Opaque tensors such as the ones constructed by the MKL-DNN backend + // don't have storage so we just use their TensorImpls. + // TODO: Find way to expose alias info for opaque tensors. + return reinterpret_cast(ten.unsafeGetTensorImpl()); + } else { + return reinterpret_cast( + ten.storage().unsafeGetStorageImpl()); + } + } + size_t operator()(const IValue& val) const { + if (val.isTensor()) { + return hashTensor(val.toTensor()); + } + // If it is not a Tensor, then two mutable IValues alias each other only + // if they are the same pointer. + return val.payload.u.as_int; + } + }; + + struct CompAliasedIValues { + bool operator()(const IValue& lhs, const IValue& rhs) const { + return lhs.isAliasOf(rhs); + } + }; + + using HashAliasedIValues = + std::unordered_set; + using HashAliasedIValueMap = + std::unordered_map; + + // Chechs if this and rhs has a subvalues in common. + // [t1,t2] and [t2, t3] returns true. + bool overlaps(const IValue& rhs) const; + + // Inserts all subvalues of this in subValues. + void getSubValues(HashAliasedIValues& subValues) const; + + // Apply visitor to every subvalue. + // TODO: There are several places that recurse over IValue. This is fragile. + // This visitor should be used to recurse over ivalues. + void visit(const std::function& visitor) const; + IValue deepcopy() const; + IValue deepcopy(HashAliasedIValueMap& memo) const; + + private: + static c10::intrusive_ptr_target* null_to_undefined_tensor(c10::intrusive_ptr_target* p) { + return p ? p : static_cast(c10::UndefinedTensorImpl::singleton()); + } + + static bool ptrEqual(const IValue& lhs, const IValue& rhs); + // NOTE: IValue tags are intentionally private. In the future we may encode + // this value different (e.g. using NaN boxing), and this would make it more + // costly to determine the tag for all types vs just determining if something + // is a particular type. Instead we want clients to use the `isX` methods when + // possible. If for perf. reasons you really, absolutely, must have a jump + // table, then we can revisit this. + enum class Tag : uint32_t { +#define DEFINE_TAG(x) x, + TORCH_FORALL_TAGS(DEFINE_TAG) +#undef DEFINE_TAG + }; + + template < + class T, + class NullType = c10::detail::intrusive_target_default_null_type> + c10::intrusive_ptr moveToIntrusivePtr(); + template < + typename T, + class NullType = c10::detail::intrusive_target_default_null_type> + c10::intrusive_ptr toIntrusivePtr() const; + + void destroy() { + // We carefully construct this call to both 1) avoid UB by using + // the "wrong" one of as_tensor and as_intrusive_ptr and 2) enable + // the compiler to generate the same code for each case. It is + // surprisingly difficult to get this right. + if (isTensor() || isIntrusivePtr()) { + c10::intrusive_ptr_target* p = isTensor() ? payload.as_tensor.unsafeGetTensorImpl() : payload.u.as_intrusive_ptr; + c10::intrusive_ptr::reclaim(p); + // No need to make this destructor call! + // payload.as_tensor.~Tensor(); + } + } + + C10_ALWAYS_INLINE void moveFrom(IValue&& rhs) noexcept { + if (rhs.isTensor()) { + new (&payload.as_tensor) at::Tensor(std::move(rhs.payload.as_tensor)); + // As far as I can tell, omitting the usual explicit destructor call + // is not UB in and of itself, and it's a slight perf win. The + // destructor is a no-op, because the moved-from Tensor is + // effectively an intrusive_ptr in the null state, so we don't need + // the behavior for correctness reasons either. Leaving this + // explanatory comment, including commented-out destructor call, to + // make this abundantly clear. + // + // rhs.payload.as_tensor.~Tensor(); + } else { + payload.u = rhs.payload.u; + } + tag = rhs.tag; + rhs.clearToNone(); + } + + void clearToNone() noexcept { + payload.u.as_int = 0; + tag = Tag::None; + } + + bool isIntrusivePtr() const { + switch (tag) { + case Tag::None: + return false; + case Tag::Tensor: + return false; + case Tag::Storage: + return true; + case Tag::Generator: + return true; + case Tag::Double: + return false; + case Tag::ComplexDouble: + return true; + case Tag::Int: + return false; + case Tag::SymInt: + return true; + case Tag::SymFloat: + return true; + case Tag::Bool: + return false; + case Tag::Tuple: + return true; + case Tag::String: + return true; + case Tag::Blob: + return true; + case Tag::GenericList: + return true; + case Tag::GenericDict: + return true; + case Tag::Future: + return true; + case Tag::Device: + return false; + case Tag::Stream: + return false; + case Tag::Object: + return true; + case Tag::PyObject: + return true; + case Tag::Uninitialized: + return false; + case Tag::Capsule: + return true; + case Tag::RRef: + return true; + case Tag::Quantizer: + return true; + case Tag::Enum: + return true; + } + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false, "unexpected tag ", static_cast(tag)); + return false; + } + + // Storage and Generator were treated specially when + // is_intrusive_ptr was stored as explicit state. This getter + // preserves the old behavior for use with WeakIValue for now. + bool isIntrusivePtrLegacyBehavior() const { + if (tag == Tag::Storage || tag == Tag::Generator) { + return payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(); + } else { + return isIntrusivePtr(); + } + } + + union Payload { + // [TriviallyCopyablePayload] + // We use a nested union here so that we can make the copy easy + // and efficient in the non-tensor (i.e., trivially copyable) + // case. Specifically, we do not have to do a switch-on-tag to + // figure out which union member to assign; we can just use + // TriviallyCopyablePayload::operator=. + union TriviallyCopyablePayload { + TriviallyCopyablePayload() : as_int(0) {} + int64_t as_int; + double as_double; + bool as_bool; + // Invariant: never nullptr; null state is represented as + // c10::UndefinedTensorImpl::singleton() for consistency of + // representation with Tensor. + c10::intrusive_ptr_target* as_intrusive_ptr; + struct { + DeviceType type; + DeviceIndex index; + } as_device; + } u; + at::Tensor as_tensor; + Payload() : u() {} + ~Payload() {} + }; + + IValue(const Payload& p, Tag t) : tag(t) { + if (isTensor()) { + new (&payload.as_tensor) at::Tensor(p.as_tensor); + } else { + payload.u = p.u; + } + } + + template + struct TagType {}; + + friend MaybeOwnedTraits; + + Payload payload; + Tag tag; + friend struct WeakIValue; +}; + +struct TORCH_API WeakIValue final { + WeakIValue() : tag(IValue::Tag::None), is_intrusive_ptr(false) {} + + WeakIValue(const WeakIValue& rhs) + : payload(rhs.payload), + tag(rhs.tag), + is_intrusive_ptr(rhs.is_intrusive_ptr) { + if (is_intrusive_ptr && payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { + c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr); + } + } + WeakIValue(const IValue& rhs) + : tag(rhs.tag), + is_intrusive_ptr(rhs.isIntrusivePtrLegacyBehavior()) { + if (rhs.isTensor()) { + payload.as_intrusive_ptr = rhs.unsafeToTensorImpl(); + is_intrusive_ptr = true; + } else { + payload = rhs.payload.u; + } + if (is_intrusive_ptr) { + if (payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { + c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr); + } + } + } + WeakIValue(WeakIValue&& rhs) noexcept : WeakIValue() { + swap(rhs); + } + ~WeakIValue() { + if (is_intrusive_ptr && payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { + c10::raw::weak_intrusive_ptr::decref(payload.as_intrusive_ptr); + } + } + WeakIValue& operator=(WeakIValue&& rhs) & noexcept { + WeakIValue(std::move(rhs)).swap(*this); // this also sets rhs to None + return *this; + } + WeakIValue& operator=(WeakIValue const& rhs) & { + WeakIValue(rhs).swap(*this); + return *this; + } + void swap(WeakIValue& rhs) noexcept { + std::swap(payload, rhs.payload); + std::swap(is_intrusive_ptr, rhs.is_intrusive_ptr); + std::swap(tag, rhs.tag); + } + + bool isSameIdentity(const WeakIValue& rhs) const { + return payload.as_int == rhs.payload.as_int && tag == rhs.tag && + is_intrusive_ptr == rhs.is_intrusive_ptr; + } + + IValue lock() const { + if (!is_intrusive_ptr) { + IValue::Payload newPayload; + newPayload.u = payload; + return IValue(newPayload, tag); + } + if (IValue::Tag::Tensor == tag) { + auto temp = c10::weak_intrusive_ptr::reclaim( + static_cast(payload.as_intrusive_ptr)); + c10::intrusive_ptr ip(temp.lock()); + temp.release(); + if (!ip) { + return IValue(); + } else { + return IValue(at::Tensor(std::move(ip))); + } + } else { + auto temp = c10::weak_intrusive_ptr::reclaim( + payload.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton() + ? nullptr + : payload.as_intrusive_ptr); + IValue::Payload pl; + pl.u.as_intrusive_ptr = temp.lock().release(); + temp.release(); + if (!pl.u.as_intrusive_ptr) { + return IValue(); + } else { + return IValue(pl, tag); + } + } + } + + size_t use_count() const noexcept { + if (!is_intrusive_ptr) { + return 1; + } + auto temp = c10::weak_intrusive_ptr::reclaim( + payload.as_intrusive_ptr); + size_t result = temp.use_count(); + temp.release(); + return result; + } + + size_t weak_use_count() const noexcept { + if (!is_intrusive_ptr) { + return 1; + } + auto temp = c10::weak_intrusive_ptr::reclaim( + payload.as_intrusive_ptr); + size_t result = temp.weak_use_count(); + temp.release(); + return result; + } + size_t hash() const { + return payload.as_int; + } + + private: + using Payload = IValue::Payload::TriviallyCopyablePayload; + Payload payload; + IValue::Tag tag; + bool is_intrusive_ptr; +}; + +// An owning pointer to a type. When the type is class type, it requires a pair +// of shared_ptrs to the class type and its owning CU, so that the class type is +// guaranteed to stay alive as long as we hold this object. +struct TORCH_API StrongTypePtr { + StrongTypePtr( + std::shared_ptr cu, + TypePtr type); + + std::shared_ptr cu_; + TypePtr type_; +}; + +// [Constant Object Weak CompilationUnit Reference] +// A non owning pointer to a type. When a class get inserted as a constant +// into a graph, if we used a strong pointer we would have a circular reference +// from Object -> CompilationUnit and CompilationUnit -> Graph (which owns the +// Constant Object) +struct TORCH_API WeakTypePtr { + WeakTypePtr( + std::weak_ptr cu, + TypePtr type); + + std::weak_ptr cu_; + TypePtr type_; +}; + +// internal build errors with std::variant :/ +struct WeakOrStrongCompilationUnit { + explicit WeakOrStrongCompilationUnit( + std::shared_ptr shared_cu) { + strong_ptr_ = shared_cu; + weak_ptr_ = c10::nullopt; + } + + explicit WeakOrStrongCompilationUnit( + std::weak_ptr weak_cu) { + strong_ptr_ = c10::nullopt; + weak_ptr_ = weak_cu; + } + + std::shared_ptr getStrongRefOrThrow() const { + TORCH_INTERNAL_ASSERT(strong_ptr_ != c10::nullopt); + return *strong_ptr_; + } + + std::weak_ptr getWeakRefOrThrow() const { + TORCH_INTERNAL_ASSERT(weak_ptr_ != c10::nullopt); + return *weak_ptr_; + } + + bool holdingStrongRef() const { + return strong_ptr_ != c10::nullopt; + } + + bool holdingEmptyStrongRef() const { + return holdingStrongRef() && *strong_ptr_ == nullptr; + } + + c10::optional> strong_ptr_; + c10::optional> weak_ptr_; +}; + +// An Object will hold a non-owning Compilation Unit reference if it is a +// Constant in the graph and a Owning reference otherwise +struct TORCH_API WeakOrStrongTypePtr { + explicit WeakOrStrongTypePtr(WeakTypePtr weak) + : cu_(WeakOrStrongCompilationUnit(weak.cu_)) { + type_ = weak.type_; + } + explicit WeakOrStrongTypePtr(StrongTypePtr strong) + : cu_(WeakOrStrongCompilationUnit(strong.cu_)) { + type_ = strong.type_; + } + explicit WeakOrStrongTypePtr(WeakOrStrongCompilationUnit cu, TypePtr type) + : cu_(cu) { + type_ = type; + } + WeakTypePtr asWeakTypePtr() const; + + WeakOrStrongCompilationUnit cu_; + TypePtr type_; + + bool holds_strong_ref() const { + return cu_.holdingStrongRef(); + } + + bool holds_empty_strong_ref() const { + return cu_.holdingEmptyStrongRef(); + } +}; + + +} // namespace c10 + +#include // IWYU pragma: keep diff --git a/voice_bridge/torch/include/ATen/core/ivalue_inl.h b/voice_bridge/torch/include/ATen/core/ivalue_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..1c3453abb4c882be6621cb0c7f291000752f7ae0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/ivalue_inl.h @@ -0,0 +1,2362 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +struct Function; +struct CompilationUnit; +} // namespace jit +TORCH_API bool isCustomClass(const c10::IValue& v); +} // namespace torch +namespace c10 { +struct IValue; +struct ClassType; +struct TupleType; +struct EnumType; +struct InferredType; + +// For custom class __init__ registration, we need to pass in a function +// that looks like this: [](IValue x, args...) + +// However, make_boxed_from_unboxed_functor.h automatically sets the input types +// of the function by introspecting the types of the functor (which is IValue in +// this case). However, we need the type it binds to be Foo. + +// Instead, we pass in a lambda [](ivalue_holder x, args...) from +// which getTypePtr can recover the original class pointer. + +template +struct tagged_capsule { + IValue ivalue; +}; + +template +c10::intrusive_ptr IValue::moveToIntrusivePtr() { + auto t = c10::intrusive_ptr::reclaim( + payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton() + ? NullType::singleton() + : static_cast(payload.u.as_intrusive_ptr)); + clearToNone(); + return t; +} +template +c10::intrusive_ptr IValue::toIntrusivePtr() const { + if (payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()) { + return c10::intrusive_ptr(); + } + c10::raw::intrusive_ptr::incref(payload.u.as_intrusive_ptr); + return c10::intrusive_ptr::reclaim( + static_cast(payload.u.as_intrusive_ptr)); +} + +template +intrusive_ptr static_intrusive_pointer_cast(intrusive_ptr r) { + return intrusive_ptr::reclaim(static_cast(r.release())); +} + +template +intrusive_ptr dynamic_intrusive_pointer_cast(intrusive_ptr r) { + return intrusive_ptr::reclaim(dynamic_cast(r.release())); +} + +inline c10::intrusive_ptr IValue::toFuture() && { + AT_ASSERT(isFuture(), "Expected Future but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toFuture() const& { + AT_ASSERT(isFuture(), "Expected Future but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toRRef() && { + AT_ASSERT(isRRef(), "Expected RRef but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toRRef() const& { + AT_ASSERT(isRRef(), "Expected RRef but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toQuantizer() && { + AT_ASSERT(isQuantizer(), "Expected Quantizer but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toQuantizer() const& { + AT_ASSERT(isQuantizer(), "Expected Quantizer but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toString() && { + AT_ASSERT(isString(), "Expected String but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toString() const& { + AT_ASSERT(isString(), "Expected String but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toObject() && { + AT_ASSERT(isObject(), "Expected Object but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toObject() const& { + AT_ASSERT(isObject(), "Expected Object but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue:: + toPyObjectHolder() && { + TORCH_INTERNAL_ASSERT(isPyObject(), "Expected PyObject but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toPyObjectHolder() + const& { + TORCH_INTERNAL_ASSERT(isPyObject(), "Expected PyObject but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toEnumHolder() && { + TORCH_INTERNAL_ASSERT(isEnum(), "Expected Enum but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toEnumHolder() const& { + TORCH_INTERNAL_ASSERT(isEnum(), "Expected Enum but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::complex IValue::toComplexDouble() const { + TORCH_INTERNAL_ASSERT(isComplexDouble(), "Expected ComplexDouble but got ", tagKind()); + auto ptr = toIntrusivePtr(); + return (*ptr).val; +} +inline at::Tensor IValue::toTensor() && { + if (C10_UNLIKELY(!isTensor())) { + reportToTensorTypeError(); + } + auto result = std::move(payload.as_tensor); + // As far as I can tell, omitting the usual explicit destructor call + // is not UB in and of itself, and it's a slight perf win. The + // destructor is a no-op, because the moved-from Tensor is + // effectively an intrusive_ptr in the null state, so we don't need + // the behavior for correctness reasons either. Leaving this + // explanatory comment, including commented-out destructor call, to + // make this abundantly clear. + // + // payload.as_tensor.~Tensor(); + clearToNone(); + return result; +} +inline at::Tensor& IValue::toTensor() & { + if (C10_UNLIKELY(!isTensor())) { + reportToTensorTypeError(); + } + return payload.as_tensor; +} +inline const at::Tensor& IValue::toTensor() const& { + if (C10_UNLIKELY(!isTensor())) { + reportToTensorTypeError(); + } + return payload.as_tensor; +} +inline c10::Storage IValue::toStorage() && { + AT_ASSERT(isStorage(), "Expected Storage but got ", tagKind()); + return c10::Storage( + moveToIntrusivePtr()); +} +inline c10::Storage IValue::toStorage() const& { + AT_ASSERT(isStorage(), "Expected Storage but got ", tagKind()); + return c10::Storage(toIntrusivePtr()); +} +inline c10::Stream IValue::toStream() && { + return c10::Stream::unpack(payload.u.as_int); +} +inline c10::Stream IValue::toStream() const& { + return c10::Stream::unpack(payload.u.as_int); +} +inline c10::intrusive_ptr IValue::toBlob() && { + AT_ASSERT(isBlob(), "Expected Blob but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toBlob() const& { + AT_ASSERT(isBlob(), "Expected Blob but got ", tagKind()); + return toIntrusivePtr(); + ; +} +inline c10::intrusive_ptr IValue::toCapsule() && { + TORCH_INTERNAL_ASSERT(isCapsule()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toCapsule() const& { + TORCH_INTERNAL_ASSERT(isCapsule()); + return toIntrusivePtr(); +} +inline at::Generator IValue::toGenerator() && { + AT_ASSERT(isGenerator(), "Expected Generator but got ", tagKind()); + return at::Generator(moveToIntrusivePtr()); +} +inline at::Generator IValue::toGenerator() const& { + AT_ASSERT(isGenerator(), "Expected Generator but got ", tagKind()); + return at::Generator(toIntrusivePtr()); +} +inline c10::SymInt IValue::toSymInt() const { + AT_ASSERT(isSymInt() || isInt(), "Expected SymInt or int but got ", tagKind()); + if (isSymInt()) { + return c10::SymInt::toSymInt(toIntrusivePtr()); + } else { + return c10::SymInt(payload.u.as_int); + } +} + +inline c10::SymFloat IValue::toSymFloat() const { + AT_ASSERT(isSymFloat() || isDouble(), "Expected SymFloat or double but got ", tagKind()); + if (isSymFloat()) { + return c10::SymFloat::toSymFloat(toIntrusivePtr()); + } else { + return c10::SymFloat(payload.u.as_double); + } +} + +namespace ivalue { + +void TORCH_API +checkCustomClassType(const ClassType* expected_type, const Type* actual_type); + +template +using Shared = c10::intrusive_ptr; + +// string +struct TORCH_API ConstantString final : c10::intrusive_ptr_target { + private: + const std::string str_; + + public: + ConstantString(std::string str) : str_(std::move(str)) {} + ConstantString(c10::string_view str) : str_(std::string(str)) {} + static c10::intrusive_ptr create(std::string str_); + static c10::intrusive_ptr create(c10::string_view str_); + static c10::intrusive_ptr create(const char* str_); + + const std::string& string() const { + return str_; + } + c10::string_view string_view() const { + return str_; + } + + operator const std::string&() const { + return string(); + } + TORCH_API friend std::ostream& operator<<( + std::ostream& out, + const ConstantString& v); +}; + +struct Future; + +struct TORCH_API TupleElements { + private: + size_t inlineSize_; + // We represent TupleElements this way to save doing a heap + // allocation in the common (at least for unpickling) case where we + // have only 3 elements. We have our own union instead of + // c10::SmallVector because c10::SmallVector always + // stores the begin/end/capacity pointers, which would be a waste of + // space in our use case. + union { + std::vector elementsVector_; + // Don't want to declare a std::array because the convenient + // iteration and size members are a footgun in this case -- the + // actual size of the array may be smaller than 3! + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) + IValue elementsInline_[3]; + }; + + void destroyInline() { + for (const auto ii : c10::irange(inlineSize_)) { + elementsInline_[ii].~IValue(); + } + } + public: + + using iterator = IValue*; + using const_iterator = const IValue*; + + TupleElements() : inlineSize_(0) { + new (&elementsVector_) std::vector(); + } + + explicit TupleElements(std::vector elements) + : inlineSize_(0), elementsVector_(std::move(elements)) {} + + explicit TupleElements(c10::ArrayRef elements) + : inlineSize_(elements.size() <= 3 ? elements.size() : 0) { + switch (inlineSize_) { + case 3: + new (&elementsInline_[2]) IValue(elements[2]); + C10_FALLTHROUGH; + case 2: + new (&elementsInline_[1]) IValue(elements[1]); + C10_FALLTHROUGH; + case 1: + new (&elementsInline_[0]) IValue(elements[0]); + break; + case 0: + new (&elementsVector_) std::vector(elements.begin(), elements.end()); + break; + } + } + + explicit TupleElements(IValue&& e1) + : inlineSize_(1) { + new (&elementsInline_[0]) IValue(std::move(e1)); + } + + explicit TupleElements(IValue&& e1, IValue&& e2) + : inlineSize_(2) { + new (&elementsInline_[0]) IValue(std::move(e1)); + new (&elementsInline_[1]) IValue(std::move(e2)); + } + + explicit TupleElements(IValue&& e1, IValue&& e2, IValue&& e3) + : inlineSize_(3) { + new (&elementsInline_[0]) IValue(std::move(e1)); + new (&elementsInline_[1]) IValue(std::move(e2)); + new (&elementsInline_[2]) IValue(std::move(e3)); + } + + ~TupleElements() { + if (inlineSize_) { + destroyInline(); + } else { + elementsVector_.~vector(); + } + } + + // It would be nice to make this noncopyable to prevent people from + // writing code like `auto output = + // forward(...).toTupleRef().elements()` (which does refcount bumps on + // each element, unlike the more efficient but verbose + // ``` + // auto outputIntrusivePtr = forward(...).toTuple(); + // const auto& output = outputIntrusivePtr->elements(); + // ``` + // ), but there is simply an overwhelming amount of code that does + // it the inefficient way. + // See also operator std::vector below. + TupleElements(const TupleElements& rhs) + : inlineSize_(rhs.inlineSize_) { + if (rhs.inlineSize_) { + for (const auto ii : c10::irange(inlineSize_)) { + new (&elementsInline_[ii]) IValue(rhs.elementsInline_[ii]); + } + } else { + new (&elementsVector_) std::vector(rhs.elementsVector_); + } + } + + TupleElements& operator=(const TupleElements& rhs) { + if (inlineSize_) { + if (rhs.inlineSize_) { + for (const auto ii : c10::irange(std::min(inlineSize_, rhs.inlineSize_))) { + elementsInline_[ii] = rhs.elementsInline_[ii]; + } + if (rhs.inlineSize_ > inlineSize_) { + for (const auto ii : c10::irange(inlineSize_, rhs.inlineSize_)) { + new (&elementsInline_[ii]) IValue(rhs.elementsInline_[ii]); + } + } else { + for (const auto ii : c10::irange(rhs.inlineSize_, inlineSize_)) { + elementsInline_[ii].~IValue(); + } + } + } else { + destroyInline(); + new (&elementsVector_) std::vector(rhs.elementsVector_); + } + } else { + if (rhs.inlineSize_) { + elementsVector_.~vector(); + for (const auto ii : c10::irange(rhs.inlineSize_)) { + new (&elementsInline_[ii]) IValue(rhs.elementsInline_[ii]); + } + } else { + elementsVector_ = rhs.elementsVector_; + } + } + inlineSize_ = rhs.inlineSize_; + return *this; + } + + TupleElements(TupleElements&& rhs) noexcept + : inlineSize_(rhs.inlineSize_) { + if (inlineSize_) { + for (const auto ii : c10::irange(inlineSize_)) { + new (&elementsInline_[ii]) IValue(std::move(rhs.elementsInline_[ii])); + } + } else { + new (&elementsVector_) std::vector(std::move(rhs.elementsVector_)); + } + } + + TupleElements& operator=(TupleElements&& rhs) noexcept { + if (inlineSize_) { + if (rhs.inlineSize_) { + for (const auto ii : c10::irange(std::min(inlineSize_, rhs.inlineSize_))) { + elementsInline_[ii] = std::move(rhs.elementsInline_[ii]); + } + if (rhs.inlineSize_ > inlineSize_) { + for (const auto ii : c10::irange(inlineSize_, rhs.inlineSize_)) { + new (&elementsInline_[ii]) IValue(std::move(rhs.elementsInline_[ii])); + } + } else { + for (const auto ii : c10::irange(rhs.inlineSize_, inlineSize_)) { + elementsInline_[ii].~IValue(); + } + } + } else { + destroyInline(); + new (&elementsVector_) std::vector(std::move(rhs.elementsVector_)); + } + } else { + if (rhs.inlineSize_) { + elementsVector_.~vector(); + for (const auto ii : c10::irange(rhs.inlineSize_)) { + new (&elementsInline_[ii]) IValue(std::move(rhs.elementsInline_[ii])); + } + } else { + elementsVector_ = std::move(rhs.elementsVector_); + } + } + inlineSize_ = rhs.inlineSize_; + return *this; + } + + C10_NODISCARD c10::ArrayRef asArrayRef() const { + if (inlineSize_) { + return c10::ArrayRef(elementsInline_, inlineSize_); + } else { + return elementsVector_; + } + } + + // Mimic implicit conversion from std::vector to ArrayRef. + operator c10::ArrayRef() const { + return asArrayRef(); + } + + static size_t hash(const TupleElements& v) { + return c10::hash>()(v.asArrayRef()); + } + + void setContents(std::vector&& contents) { + if (inlineSize_) { + destroyInline(); + new (&elementsVector_) std::vector(std::move(contents)); + inlineSize_ = 0; + } else { + elementsVector_ = std::move(contents); + } + } + + C10_NODISCARD bool empty() const { + return inlineSize_ ? false : elementsVector_.empty(); + } + + C10_NODISCARD size_t size() const { + return inlineSize_ ? inlineSize_ : elementsVector_.size(); + } + + C10_NODISCARD IValue& operator[](size_t idx) { + if (inlineSize_) { + return elementsInline_[idx]; + } else { + return elementsVector_[idx]; + } + } + + C10_NODISCARD const IValue& operator[](size_t idx) const { + if (inlineSize_) { + return elementsInline_[idx]; + } else { + return elementsVector_[idx]; + } + } + + C10_NODISCARD IValue& at(size_t idx) { + if (inlineSize_) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(inlineSize_ <= 3); + TORCH_CHECK(idx < inlineSize_, "TupleElements: invalid index Index = ", idx, "; Length = ", inlineSize_); + return elementsInline_[idx]; + } else { + return elementsVector_.at(idx); + } + } + + C10_NODISCARD const IValue& at(size_t idx) const { + if (inlineSize_) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(inlineSize_ <= 3); + TORCH_CHECK(idx < inlineSize_, "TupleElements: invalid index Index = ", idx, "; Length = ", inlineSize_); + return elementsInline_[idx]; + } else { + TORCH_CHECK(idx < elementsVector_.size(), "TupleElements: invalid index Index = ", idx, "; Length = ", elementsVector_.size()); + return elementsVector_.at(idx); + } + } + + C10_NODISCARD iterator begin() { + if (inlineSize_) { + return elementsInline_; + } else { + return elementsVector_.data(); + } + } + + C10_NODISCARD iterator end() { + if (inlineSize_) { + return elementsInline_ + inlineSize_; + } else { + return elementsVector_.data() + elementsVector_.size(); + } + } + + C10_NODISCARD const_iterator begin() const { + if (inlineSize_) { + return elementsInline_; + } else { + return elementsVector_.data(); + } + } + + C10_NODISCARD const_iterator end() const { + if (inlineSize_) { + return elementsInline_ + inlineSize_; + } else { + return elementsVector_.data() + elementsVector_.size(); + } + } + + C10_NODISCARD const_iterator cbegin() const { + return begin(); + } + + C10_NODISCARD const_iterator cend() const { + return end(); + } + + C10_NODISCARD std::vector vec() const & { + return asArrayRef().vec(); + } + + C10_NODISCARD IValue& back() { + return *(end() - 1); + } + + C10_NODISCARD const IValue& back() const { + return *(end() - 1); + } + + C10_NODISCARD std::vector vec() && { + std::vector result; + result.reserve(size()); + for (auto&& iv : *this) { + result.push_back(std::move(iv)); + } + return result; + } + + // More compatibility shims for the overwhelming amount of code that + // likes to copy tuple elements into a vector; see comment above the + // copy constructor. + operator std::vector() const & { + return vec(); + } + + operator std::vector() && { + return vec(); + } +}; + +template +struct TupleTypeFactory {}; + +template <> +struct TORCH_API TupleTypeFactory { + static TupleTypePtr create(std::vector types) { + return TupleType::create(std::move(types)); + } + static TupleTypePtr fallback(const Type& type); +}; + +template <> +struct TORCH_API TupleTypeFactory { + static DynamicTypePtr create(std::vector elemTypes); + static DynamicTypePtr fallback(const Type&); +}; + +struct TORCH_API Tuple : c10::intrusive_ptr_target { + private: + TupleElements elements_; + mutable c10::TypePtr type_; // lazily computed for unnamed tuples + + public: + // named tuples have additional type information, so we + // directly create them tagged + static c10::intrusive_ptr createNamed( + std::vector elements_, + c10::TypePtr type_) { + return c10::make_intrusive(std::move(elements_), std::move(type_)); + } + + static c10::intrusive_ptr createNamed( + TupleElements elements_, + std::shared_ptr type_) { + return c10::make_intrusive(std::move(elements_), std::move(type_)); + } + + static c10::intrusive_ptr createNamed( + std::initializer_list elements_, + std::shared_ptr type_) { + return createNamed(TupleElements(c10::ArrayRef(elements_)), std::move(type_)); + } + + // MSVC apparently can't disambiguate the other two overloads of + // create when passed an initializer_list without this. + static c10::intrusive_ptr create(std::initializer_list elements_) { + return create(c10::ArrayRef(elements_)); + } + + static c10::intrusive_ptr create(std::vector elements_) { + return c10::make_intrusive(std::move(elements_)); + } + + static c10::intrusive_ptr create(TupleElements elements_) { + return c10::make_intrusive(std::move(elements_)); + } + + static c10::intrusive_ptr create(c10::ArrayRef elements_) { + return create(TupleElements(elements_)); + } + + static c10::intrusive_ptr create(IValue e1) { + return c10::make_intrusive(std::move(e1)); + } + + static c10::intrusive_ptr create(IValue e1, IValue e2) { + return c10::make_intrusive(std::move(e1), std::move(e2)); + } + + static c10::intrusive_ptr create(IValue e1, IValue e2, IValue e3) { + return c10::make_intrusive(std::move(e1), std::move(e2), std::move(e3)); + } + + private: + // Workaround inability to use `>` operator in template argument list. + template + static constexpr bool hasMoreThanThreeArgs() { + return sizeof...(Args) > 3; + } + + public: + template + static c10::intrusive_ptr create(Args&&... elements_) { + switch (sizeof...(Args)) { + case 1: + case 2: + case 3: + return create(IValue(std::forward(elements_))...); + default: + return create( + std::vector{IValue(std::forward(elements_))...}); + } + } + + // Again, it would be nice to make this noncopyable, but there's a + // lot of extant code that copies Tuples. + // Tuple(const Tuple& rhs) = delete; + + const TupleElements& elements() const& { + return elements_; + } + + TupleElements elements() && { + return std::move(elements_); + } + + void setElements(std::vector&& elements) { + elements_.setContents(std::move(elements)); + } + + void setElements(TupleElements&& elements) { + elements_ = std::move(elements); + } + + void unsafeSetElement(size_t idx, const IValue& element) { + elements_[idx] = element; + } + + void unsafeSetElement(size_t idx, IValue&& element) { + elements_[idx] = std::move(element); + } + + size_t size() const { + return elements_.size(); + } + + template + std::shared_ptr type() const { + if (!type_) { + type_ = TupleTypeFactory::create(fmap(elements(), [&](const IValue& v) { + return v.type(); + })); + } + if (auto t = type_->cast()) { + return t; + } + return TupleTypeFactory::fallback(*type_); + } + + static size_t hash(const Tuple& t) { + return c10::get_hash(t.elements()); + } + + TORCH_API friend bool operator==( + const ivalue::Tuple& lhs, + const ivalue::Tuple& rhs); + + private: + // NOTE: If we try to avoid the overloads without + // `std::shared_ptr type` by defaulting it to nullptr, we + // end up having to call (part of) the shared_ptr destructor for + // `type` even though we should know statically it won't do + // anything. + explicit Tuple(std::vector elements) + : elements_(std::move(elements)){} + + explicit Tuple(std::vector elements, c10::TypePtr type) + : elements_(std::move(elements)), type_(std::move(type)) {} + + explicit Tuple(TupleElements&& elements) + : elements_(std::move(elements)) {} + + explicit Tuple(TupleElements&& elements, std::shared_ptr type) + : elements_(std::move(elements)), type_(std::move(type)) {} + + explicit Tuple(IValue&& e1) + : elements_(std::move(e1)) {} + + explicit Tuple(IValue&& e1, std::shared_ptr type) + : elements_(std::move(e1)), type_(std::move(type)) {} + + explicit Tuple(IValue&& e1, IValue&& e2) + : elements_(std::move(e1), std::move(e2)) {} + + explicit Tuple(IValue&& e1, IValue&& e2, std::shared_ptr type) + : elements_(std::move(e1), std::move(e2)), type_(std::move(type)) {} + + explicit Tuple(IValue&& e1, IValue&& e2, IValue&& e3) + : elements_(std::move(e1), std::move(e2), std::move(e3)) {} + + explicit Tuple(IValue&& e1, IValue&& e2, IValue&& e3, std::shared_ptr type) + : elements_(std::move(e1), std::move(e2), std::move(e3)), type_(std::move(type)) {} + + friend class c10::intrusive_ptr; +}; + +struct Object; +struct PyObjectHolder; +struct EnumHolder; +} // namespace ivalue + +// Future +struct C10_EXPORT ivalue::Future final : c10::intrusive_ptr_target { + private: + // Keep this private in order to force users to go through make_intrusive and + // thus prevent creating a Future that's not held by an intrusive_ptr. + explicit Future(TypePtr type, std::vector devices={}) + : type_(std::move(type)), + impl_(getTypeOfDevices(devices)), + devices_(sortAndDeduplicateDevices(impl_, std::move(devices))) {} + + friend c10::intrusive_ptr; + + public: + Future(const Future&) = delete; + Future(Future&&) = delete; + Future& operator=(const Future&) = delete; + Future& operator=(Future&&) = delete; + + struct TORCH_API FutureError final : public std::exception { + explicit FutureError(std::string&& error_msg_) + : error_msg(std::move(error_msg_)) {} + + FutureError() = default; + + const char* what() const noexcept override { + return error_msg.c_str(); + } + + std::string error_msg; + }; + + /** + * Wait on the future until it completes. + */ + void wait() { + std::unique_lock lock(mutex_); + finished_cv_.wait(lock, [&]() -> bool { return completed_; }); + synchronizeWithCurrentStreams(); + } + + /** + * Wait on the future until it completes and throw an + * exception if an error exists. + */ + void waitAndThrow() { + wait(); + + if (eptr_) { + std::rethrow_exception(eptr_); + } + } + + /** + * Explicitly mark the future as completed with the output value. Optionally, + * the storages for all tensors in IValue can be passed as well. The DataPtrs + * of these storages are used to synchronize CUDA streams. If storages isn't + * given we will attempt to extract it from the value, if we need to (this + * happens if a non-empty set of devices was given to the constructor). Thus + * one only needs to provide storages when 1) they cannot be extracted through + * IValue::getSubValues() or through pickling in case of Python object; or + * when 2) customized storage extraction is more efficient. + */ + using WeakStorage = c10::weak_intrusive_ptr; + void markCompleted( + IValue value, + c10::optional> storages = c10::nullopt) { + // Start by performing all steps that can throw, before setting any field. + // Do this before even acquiring the mutex, because extractStorages might + // acquire the GIL, which could lead to a lock inversion with our mutex. + // See https://github.com/pytorch/pytorch/issues/58239. + std::vector actualStorages; + std::vector usedDevices; + try { + // FIXME We should always extract DataPtrs, in order to catch the case of + // users using CUDA values but forgetting to set devices, which currently + // leads to a silent synchronization/correctness issue. However, as this + // might worsen perf in CPU-only cases, we should only do so after careful + // benchmarks. + if (impl_.type() != c10::kCPU) { + actualStorages = + storages.has_value() ? std::move(*storages) : extractStorages(value); + usedDevices = getDevicesOfStorages(impl_, actualStorages); + ensureIsSubsetOfDevices(usedDevices, devices_); + } + } catch (const std::exception&) { + setError(std::current_exception()); + return; + } + + std::unique_lock lock(mutex_); + TORCH_CHECK( + !completed(), + "Attempting to mark a completed Future as complete again. Note that " + "a Future can only be marked completed once."); + + // Only set value_ and completed_ flag once all checks and preparation steps + // have returned successfully to allow for proper error propagation. + value_ = std::move(value); + completed_ = true; + + currentDevice_ = impl_.getDevice(); + storages_ = std::move(actualStorages); + for (const c10::Device& device : usedDevices) { + c10::Event event(impl_.type()); + event.record(impl_.getStream(device)); + events_.push_back(std::move(event)); + } + + std::vector> cbs; + cbs.swap(callbacks_); + lock.unlock(); + + finished_cv_.notify_all(); + for (auto& callback : cbs) { + invokeCallback(std::move(callback)); + } + } + + void markCompleted() { + markCompleted(IValue{}); + } + + void setError(std::exception_ptr eptr) { + std::unique_lock lock(mutex_); + setErrorInternal(std::move(eptr), lock); + } + + void setErrorIfNeeded(std::exception_ptr eptr) { + std::unique_lock lock(mutex_); + if (completed_) { + // This should be rare and shouldn't cause log spew. Its important to + // log errors and thats why we have this log here. + std::string msg = c10::str( + "Skipping setting following error on the Future since " + "it is already marked completed (this is not necessarily " + "an error):\n", + tryRetrieveErrorMessageInternal(eptr)); + if (eptr_) { + msg += c10::str( + ", \nOriginal exception:\n", + tryRetrieveErrorMessageInternal(eptr_)); + } + LOG(INFO) << msg; + return; + } else { + setErrorInternal(std::move(eptr), lock); + } + } + + // Get the result of the current future. + IValue value() { + std::unique_lock lock(mutex_); + AT_ASSERT(completed()); + if (eptr_) { + std::rethrow_exception(eptr_); + } + return value_; + } + + // This accessor should only be used if we know that the future is + // completed() with no error. + const IValue& constValue() const { + std::unique_lock lock(mutex_); + AT_ASSERT(completed()); + TORCH_INTERNAL_ASSERT( + !eptr_, + "value() accessor should only be used when future is not completed with ", + "an error, but future had the following error: ", + tryRetrieveErrorMessageInternal(eptr_) + ); + return value_; + } + + // This accessor should only be used if we know that the future is + // completed() with no error. + const std::vector& storages() const { + std::unique_lock lock(mutex_); + AT_ASSERT(completed()); + AT_ASSERT(!eptr_); + return storages_; + } + + /** + * Add a callback to the future. + * The callbacks will be executed once the future completes. + * If the future has already completed, + * this function will execute the callback immediately. + */ + template + void addCallback(T callback) { +#if __cpp_lib_is_invocable >= 201703 + static_assert( + std::is_invocable_r::value, + "The callback must have signature void(Future&)"); +#endif + std::unique_lock lock(mutex_); + if (completed()) { + lock.unlock(); + invokeCallback(std::move(callback)); + return; + } + callbacks_.emplace_back(std::move(callback)); + } + + /** + * Add a callback to the future, and return another Future to hold the return + * value of the callback. This is necessary when the callback provider needs + * to know for sure when the callback has finished. + */ + template + c10::intrusive_ptr then(T callback, TypePtr type) { + using IValueWithStorages = std::tuple>; +#if __cpp_lib_is_invocable >= 201703 + static_assert( + guts::disjunction< + std::is_invocable_r, + std::is_invocable_r>::value, + "The callback must have signature IValue(Future&) or " + "std::tuple>(Future&)"); +#endif + auto childFut = createInstance(std::move(type)); + addCallback([childFut, + cb = std::move(callback)](Future& parentFut) mutable { + try { + guts::if_constexpr, + IValueWithStorages>::value>( + [&](auto identity) { + IValue value; + std::vector storages; + std::tie(value, storages) = identity(cb)(parentFut); + childFut->markCompleted(std::move(value), std::move(storages)); + }, + [&](auto identity) { + childFut->markCompleted(identity(cb)(parentFut)); + }); + } catch (std::exception&) { + childFut->setError(std::current_exception()); + } + }); + return childFut; + } + + template + c10::intrusive_ptr thenAsync(T callback, TypePtr type) { +#if __cpp_lib_is_invocable >= 201703 + static_assert( + std::is_invocable_r, T, Future&>::value, + "The callback must have signature c10::intrusive_ptr(Future&)"); +#endif + auto childFut = createInstance(std::move(type)); + addCallback( + [childFut, cb = std::move(callback)](Future& parentFut) mutable { + c10::intrusive_ptr intermediateFut; + try { + intermediateFut = cb(parentFut); + } catch (std::exception&) { + childFut->setError(std::current_exception()); + return; + } + intermediateFut->addCallback( + [childFut = std::move(childFut)](Future& intermediateFut) { + if (intermediateFut.hasError()) { + childFut->setError(intermediateFut.exception_ptr()); + } else { + childFut->markCompleted( + intermediateFut.value(), intermediateFut.storages()); + } + }); + }); + return childFut; + } + + // Tries to retrieve the error message from std::exception_ptr. + std::string tryRetrieveErrorMessage() const { + TORCH_CHECK(hasError(), "No error present on the future."); + std::unique_lock lock(mutex_); + return tryRetrieveErrorMessageInternal(eptr_); + } + + // Check if the current future has completed + bool completed() const { + return completed_; + } + + bool hasValue() const { + std::unique_lock lock(mutex_); + return completed_ && !eptr_; + } + + bool hasError() const { + std::unique_lock lock(mutex_); + return eptr_ ? true : false; + } + + std::exception_ptr exception_ptr() const { + std::unique_lock lock(mutex_); + return eptr_; + } + + TORCH_API friend std::ostream& operator<<( + std::ostream& out, + const Future& v); + + TypePtr elementType() const { + return type_; + } + + const std::vector& devices() const { + return devices_; + } + + // This method should be used when one intends to manually create a child + // future, for example when implementing a customized version of then(). + c10::intrusive_ptr createInstance(at::TypePtr type) { + return c10::make_intrusive(std::move(type), devices_); + } + + private: + + // This method should always be used when invoking a callback (regardless of + // how/when that happens) as it will ensure that the proper "environment" is + // set up before running the callback, as in, it will set up the CUDA streams, + // synchronize them with the value, and so on (if needed). + template + void invokeCallback(T callback) { +#if __cpp_lib_is_invocable >= 201703 + static_assert( + std::is_invocable_r::value, + "The callback must have signature void(Future&)"); +#endif + + c10::OptionalDeviceGuard deviceGuard(currentDevice_); + + std::vector streams; + for (const c10::Device& device : devices_) { + streams.push_back(impl_.getStreamFromGlobalPool(device)); + } + c10::MultiStreamGuard streamGuard(streams); + synchronizeWithCurrentStreams(); + + callback(*this); + } + + // This method should be called before this future's value is used, as it + // ensures that the CUDA streams that are "current" at the callsite properly + // synchronize with the value. + void synchronizeWithCurrentStreams() { + for (c10::Event& event : events_) { + event.block(impl_.getStream(event.device())); + } + + for (const WeakStorage& weak_storage : storages_) { + c10::intrusive_ptr storage = weak_storage.lock(); + if (!storage) { + continue; + } + if (!storage->device().is_cpu()) { + impl_.recordDataPtrOnStream( + storage->data_ptr(), impl_.getStream(storage->device())); + } + } + } + + void setErrorInternal( + std::exception_ptr eptr, + std::unique_lock& lock) { + TORCH_CHECK( + !eptr_, + "Error already set on this Future: ", + tryRetrieveErrorMessageInternal(eptr_), + ", trying to set error: ", + tryRetrieveErrorMessageInternal(eptr)); + TORCH_INTERNAL_ASSERT(!completed(), "Future is already marked completed"); + completed_ = true; + eptr_ = std::move(eptr); + + std::vector> cbs; + cbs.swap(callbacks_); + lock.unlock(); + + finished_cv_.notify_all(); + for (auto& callback : cbs) { + invokeCallback(std::move(callback)); + } + } + + // Tries to retrieve the error message from std::exception_ptr. + std::string tryRetrieveErrorMessageInternal(std::exception_ptr eptr) const { + try { + std::rethrow_exception(eptr); + } catch (const std::exception& e) { + return e.what(); + } catch (...) { + return "Unknown Exception Type"; + } + } + + // Defined in ivalue.cpp. + static std::vector extractStorages( + const at::IValue& value); + + static std::vector getDevicesOfStorages( + const c10::impl::VirtualGuardImpl& impl, + const std::vector& storages) { + c10::DeviceIndex deviceCount = impl.deviceCount(); + std::vector isDeviceUsed(deviceCount, false); + for (const WeakStorage& weak_storage : storages) { + c10::intrusive_ptr storage = weak_storage.lock(); + if (!storage) { + continue; + } + c10::Device device = storage->device(); + if (!device.is_cpu()) { + TORCH_CHECK_VALUE( + device.type() == impl.type(), + "Expected all data ptrs to be on a device of type ", + impl.type(), + ", got one on device ", + device); + isDeviceUsed[device.index()] = true; + } + } + std::vector devices; + for (c10::DeviceIndex idx = 0; idx < deviceCount; idx++) { + if (isDeviceUsed[idx]) { + devices.emplace_back(impl.type(), idx); + } + } + return devices; + } + + static std::string formatSetOfDevices( + const std::vector& devices) { + if (devices.empty()) { + return "(none)"; + } + std::ostringstream oss; + oss << devices[0]; + for (const auto idx : c10::irange(1, devices.size())) { + if (idx == devices.size() - 1) { + oss << " and "; + } else { + oss << ", "; + } + oss << devices[idx]; + } + return oss.str(); + } + + static c10::DeviceType getTypeOfDevices( + const std::vector& devices) { + if (devices.empty()) { + return c10::kCPU; + } + c10::DeviceType deviceType = devices[0].type(); + for (const auto idx : c10::irange(1, devices.size())) { + TORCH_CHECK_VALUE( + devices[idx].type() == deviceType, + "Expected all devices to be of the same type, but got a mismatch between ", + devices[0], + " and ", + devices[idx]); + } + return deviceType; + } + + // We need devices to be sorted in order to use ensureIsSubsetOfDevices. + static std::vector sortAndDeduplicateDevices( + const c10::impl::VirtualGuardImpl& /*impl*/, + std::vector devices) { + std::sort( + devices.begin(), devices.end(), + [](const c10::Device& a, const c10::Device& b) { return a.index() < b.index(); }); + // Deduplicate by compacting. + size_t targetIdx = 0; + for (const auto sourceIdx : c10::irange(devices.size())) { + TORCH_CHECK_VALUE( + devices[sourceIdx].has_index(), + "Expected devices to have indices, got ", devices[sourceIdx]); + if (targetIdx > 0 && devices[targetIdx - 1].index() == devices[sourceIdx].index()) { + // It's a duplicate, skip it. + continue; + } + if (sourceIdx != targetIdx) { + devices[targetIdx] = devices[sourceIdx]; + } + targetIdx++; + } + // If there were duplicates there's now a gap at the end: trim it. Resizing + // requires the item type to be default-constructible (which c10::Device is + // not) because in principle it could be required to create new items. Since + // we know we'll shrink the vector, we provide a custom dummy value instead. + devices.resize(targetIdx, c10::Device(c10::kCPU)); + return devices; + } + + static void ensureIsSubsetOfDevices( + const std::vector& subset, + const std::vector& superset) { + // We assume the devices in both vectors have the same consistent type, and + // their indices are unique and sorted. + std::vector excessDevices; + std::set_difference( + subset.begin(), + subset.end(), + superset.begin(), + superset.end(), + std::back_inserter(excessDevices), + [](const c10::Device& a, const c10::Device& b) { return a.index() < b.index(); }); + TORCH_CHECK_VALUE( + excessDevices.empty(), + "The result contained tensors residing on device(s) ", + formatSetOfDevices(excessDevices), + " which are not among the expected device(s) ", + formatSetOfDevices(superset)); + } + + mutable std::mutex mutex_; + std::atomic_bool completed_ = {false}; // is this future complete + std::condition_variable finished_cv_; + + IValue value_; // when finished the value + TypePtr type_; + std::vector> callbacks_; + std::exception_ptr eptr_; + + // An upcast pointer to a virtual class which allows us to manipulate events, + // streams, ... in a generic way, without an explicit dependency on CUDA. + const c10::impl::VirtualGuardImpl impl_; + + // The device that was current when markCompleted was called, which we'll + // restore when invoking callbacks. It's optional because we'll only store it + // if the future completes successfully. + optional currentDevice_; + + // The events that correspond to the completion of the async I/O kernels. They + // are recorded on the appropriate streams when the future is marked completed + // and can then be queried/waited/blocked on. There is one event for each + // distinct device on which the value's tensors reside. + std::vector events_; + + // A cached version of the storages extracted from the value when the future + // is first marked completed. + std::vector storages_; + + // The bounding set of devices that this future, and any of its children, is + // allowed to use. This is a superset of the set of devices used by the events + // above. We need this to know what streams (for which devices) to set as + // current when invoking a callback, thus allowing the callback to use devices + // that the parent future didn't use. This field is set to the value provided + // in the constructor and will be "inherited" by all child futures. + const std::vector devices_; +}; + +// Input is a list of Futures with the same target type. +// Output is a Future to the List of completed Futures. +TORCH_API intrusive_ptr collectAll( + c10::List> srcs); +// Input is a List of Futures with the same target type. +// Output is a Future that will be updated with a seen value. +TORCH_API intrusive_ptr collectAny( + c10::List> srcs); + +// User-defined object. +struct C10_EXPORT ivalue::Object final : c10::intrusive_ptr_target { + public: + // In general, class types hold a shared_ptr to its owning CompilationUnit, + // so that its type and methods do not get deallocated while the class exists. + // However, the CompilationUnit holds ownership of the type's graphs, so + // inserting a constant object into a Graph would create a reference cycle if + // that constant object held a shared_ptr to its CU. For these objects we + // instatiate them with non-owning references to its CU + Object(WeakOrStrongTypePtr type, size_t numSlots) : type_(std::move(type)) { + slots_.resize(numSlots); + } + + Object(StrongTypePtr type, size_t numSlots) + : type_(WeakOrStrongTypePtr(std::move(type))) { + slots_.resize(numSlots); + } + + static c10::intrusive_ptr create( + WeakOrStrongTypePtr type, + size_t numSlots) { + return c10::make_intrusive(std::move(type), numSlots); + } + + static c10::intrusive_ptr create( + StrongTypePtr type, + size_t numSlots) { + return c10::make_intrusive(std::move(type), numSlots); + } + + static c10::intrusive_ptr create(ClassTypePtr classType, size_t numSlots); + + /** + * Slot API. + * + * Attributes are stored as a simple vector so that lookups are fast at + * runtime. A "slot" is just an index into that vector, which can be computed + * statically if you have access to the class type. Use this API if you are + * writing compiler stuff. + */ + void setSlot(size_t slot, IValue v) { + if (slot >= slots_.size()) { + // for module types, it is possible that the members of the class have + // expanded after the object was created. In this case, we expand + // the slots to the right size + resizeObject(slot); + } + slots_[slot] = std::move(v); + } + + const IValue& getSlot(size_t slot) const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(slot < slots_.size()); + // NOTE: This lookup is fairly hot, so we use unchecked access to the + // vector. Errors should still be detectable with ASan. + return slots_[slot]; + } + + void unsafeRemoveSlot(size_t slot) { + TORCH_CHECK(slot < slots_.size()); + slots_.erase(slots_.begin() + slot); + } + + /** + * Attribute API. + * + * Wrappers around the slot stuff so that users can access attributes + * directly. Use this API if you are a user. + * + * Note: Unlike in Python, TorchScript must make a distinction between + * attributes (which are IValues) and methods (which are Methods). If you + * want a method, use `obj.type()->getMethod()` + */ + IValue getAttr(const std::string& name) const; + void setAttr(const std::string& name, IValue v); + // Remove attribute by name, caller is responsible for + // the safety of this operation + // We didn't remove the attribute in the type because the type + // might be shared by multiple objects. + // Therefore after removing attribute, the object is in an inconsistent + // state where it has more attribute types in its Type than + // the attribute slots it has, user needs to make sure the object + // has consistent by removing the attribute in type as well + void unsafeRemoveAttr(const std::string& name); + + std::string name() const; + + const std::vector& slots() const { + return slots_; + } + std::shared_ptr type() const; + + std::shared_ptr compilation_unit() { + if (type_.holds_strong_ref()) { + return type_.cu_.getStrongRefOrThrow(); + } else { + auto weak_ptr = type_.cu_.getWeakRefOrThrow(); + return std::shared_ptr(weak_ptr); + } + } + + c10::intrusive_ptr copy_to_weak_compilation_ref() const; + + void unsafe_make_weak_compilation_ref() { + type_ = WeakOrStrongTypePtr(type_.asWeakTypePtr()); + } + + c10::intrusive_ptr copy() const; + + c10::intrusive_ptr deepcopy() const; + + c10::intrusive_ptr deepcopy(IValue::HashAliasedIValueMap& memo) const; + + bool is_weak_compilation_ref() const { + return !type_.holds_strong_ref(); + } + + bool is_empty_strong_compilation_ref() const { + return type_.holds_empty_strong_ref(); + } + + private: + void resizeObject(size_t slot); + WeakOrStrongTypePtr type_; + std::vector slots_; +}; + +// virtual ivalue PyObjectHolder that hold a py::object, we make this virtual +// because the py::object and refcounting logic should happen in libtorch_python +// see concrete implementation in python_ivalue.h +struct ivalue::PyObjectHolder : c10::intrusive_ptr_target { + public: + virtual PyObject* getPyObject() = 0; + virtual c10::InferredType tryToInferType() = 0; + virtual IValue toIValue(const TypePtr& type, c10::optional N = c10::nullopt) = 0; + virtual std::string toStr() = 0; + virtual std::vector extractTensors() = 0; + + virtual ~PyObjectHolder(){}; +}; + +struct ivalue::EnumHolder : c10::intrusive_ptr_target { + public: + EnumHolder(std::shared_ptr type, std::string name, IValue value) + : type_(std::move(type)), + name_(std::move(name)), + value_(std::move(value)) {} + + bool is(const ivalue::EnumHolder& rhs) { + return *this == rhs; + } + + friend bool operator==( + const ivalue::EnumHolder& lhs, + const ivalue::EnumHolder& rhs); + + TORCH_API friend std::ostream& operator<<( + std::ostream& out, + const EnumHolder& v); + + TORCH_API const std::string qualifiedClassName() const; + + const std::string unqualifiedClassName() const; + + const std::string& name() const { + return name_; + } + + const IValue& value() const { + return value_; + } + + std::shared_ptr type() const { + return type_; + } + + private: + std::shared_ptr type_; + std::string name_; + IValue value_; +}; + +#undef TORCH_FORALL_TAGS + +namespace detail { + +struct _guarded_unsigned_long_unique_dummy final { + _guarded_unsigned_long_unique_dummy(int64_t){}; +}; +using _guarded_unsigned_long = std::conditional_t< + std::is_same::value || + std::is_same::value, + _guarded_unsigned_long_unique_dummy, + unsigned long>; + +} // namespace detail + +inline ivalue::Object& IValue::toObjectRef() const { + AT_ASSERT(isObject(), "Expected Object but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), "Attempted to create null reference"); + return *static_cast(payload.u.as_intrusive_ptr); +} + +// note: when adding a DEFINE_TO case here you should also add a +// toX method to IValue. These named methods are much more discoverable +// than the to templated function. + +#define DEFINE_TO(T, method_name) \ + template <> \ + inline T IValue::to()&& { \ + return static_cast(std::move(*this).method_name()); \ + } \ + template <> \ + inline c10::detail::ivalue_to_const_ref_overload_return::type IValue::to() const& { \ + typedef c10::detail::ivalue_to_const_ref_overload_return::type return_type; \ + return static_cast(this->method_name()); \ + } + +DEFINE_TO(at::Tensor, toTensor) +DEFINE_TO(at::Storage, toStorage) +DEFINE_TO(c10::Stream, toStream) +DEFINE_TO(float, toDouble) +DEFINE_TO(double, toDouble) +DEFINE_TO(c10::complex, toComplexDouble) +DEFINE_TO(unsigned char, toInt) +DEFINE_TO(signed char, toInt) +DEFINE_TO(unsigned short, toInt) +DEFINE_TO(short, toInt) +DEFINE_TO(int, toInt) +DEFINE_TO(uint32_t, toInt) +DEFINE_TO(uint64_t, toInt) +DEFINE_TO(detail::_guarded_unsigned_long, toInt) +DEFINE_TO(int64_t, toInt) +DEFINE_TO(bool, toBool) +DEFINE_TO(c10::intrusive_ptr, toBlob); +DEFINE_TO(c10::intrusive_ptr, toString) +DEFINE_TO(c10::intrusive_ptr, toObject) +DEFINE_TO(at::Scalar, toScalar) +DEFINE_TO(c10::List, toIntList) +DEFINE_TO(c10::List, toDoubleList) +DEFINE_TO(c10::List>, toComplexDoubleList) +DEFINE_TO(c10::List, toBoolList) +DEFINE_TO(c10::List, toTensorList) +DEFINE_TO(c10::impl::GenericList, toList) +DEFINE_TO(c10::impl::GenericDict, toGenericDict) +DEFINE_TO(c10::intrusive_ptr, toTuple) +DEFINE_TO(std::string, toStringRef) +DEFINE_TO(c10::string_view, toStringView) +DEFINE_TO(c10::intrusive_ptr, toFuture) +DEFINE_TO(c10::intrusive_ptr, toRRef) +DEFINE_TO(c10::intrusive_ptr, toQuantizer) +DEFINE_TO(IValue, toIValue) +DEFINE_TO(c10::Device, toDevice) +DEFINE_TO(at::ScalarType, toScalarType) +DEFINE_TO(at::Layout, toLayout) +DEFINE_TO(at::MemoryFormat, toMemoryFormat) +DEFINE_TO(at::QScheme, toQScheme) +DEFINE_TO(at::Dimname, toDimname) +DEFINE_TO(at::Generator, toGenerator) +DEFINE_TO(c10::SymInt, toSymInt) +DEFINE_TO(c10::SymFloat, toSymFloat) + +template +struct _fake_type {}; + +// generic_to converts an IValue from a generic list or generic dict +// to a concrete list/dict type likelike List, Dict<...> or optional. +// Note that in the case of lists, this only works for IValue-based lists, +// i.e. not for int64_t, double, ... +// generic_to is an implementation detail of IValue::to and not +// supposed to be called directly. +// The _fake_type parameter allows us to overload +// based on the return type. +template +// TODO this is deprecated but we don't throw a warning because a lot of ops in +// native_functions.yaml still return std::vector. +// C10_DEPRECATED_MESSAGE("IValues based on std::vector are potentially slow +// and deprecated. Please use torch::List instead.") +std::vector generic_to(IValue ivalue, _fake_type>) { + // We need to do a deep copy of the vector because there might be other + // references to this same IValue that also use the list. We can't just + // move the elements out. + auto list = std::move(ivalue).to>(); + std::vector result; + result.reserve(list.size()); + for (Elem v : list) { + result.push_back(std::move(v)); + } + return result; +} + +template +c10::intrusive_ptr IValue::toCustomClass() && { + static_assert( + std::is_base_of::value == true, + "toCustomClass requires that template parameter T must inherit " + "from torch::CustomClassHolder"); + auto obj = toObject(); + TORCH_CHECK( + obj->slots().size() == 1, + "Tried to cast IValue to custom class but it did " + "not contain a custom class!"); + const auto* expected_type = c10::getCustomClassType>().get(); + ivalue::checkCustomClassType(expected_type, type().get()); + auto userObj = + c10::static_intrusive_pointer_cast(obj->getSlot(0).toCapsule()); + return userObj; +} + +template +c10::intrusive_ptr IValue::toCustomClass() const& { + static_assert( + std::is_base_of::value == true, + "toCustomClass requires that template parameter T must inherit " + "from torch::CustomClassHolder"); + auto obj = toObject(); + TORCH_CHECK( + obj->slots().size() == 1, + "Tried to cast IValue to custom class but it did " + "not contain a custom class!"); + const auto* expected_type = c10::getCustomClassType>().get(); + ivalue::checkCustomClassType(expected_type, type().get()); + auto userObj = + c10::static_intrusive_pointer_cast(obj->getSlot(0).toCapsule()); + return userObj; +} + +template +T generic_to(IValue ivalue, _fake_type) { + using ElemType = typename std::remove_pointer::type::element_type; + return std::move(ivalue).toCustomClass(); +} + +template +tagged_capsule generic_to(IValue ivalue, _fake_type>) { + return tagged_capsule{std::move(ivalue)}; +} + +template +c10::List generic_to(IValue ivalue, _fake_type>) { + return impl::toTypedList(std::move(ivalue).toList()); +} + +template +static T createVectorLikeFromList(const c10::detail::ListImpl* impl) { + T result; + result.reserve(impl->list.size()); + for (size_t i = 0, N = impl->list.size(); i < N; ++i) { + result.push_back(impl->list[i].to()); + } + return result; +} + +template +static std::vector createVectorFromList(const c10::detail::ListImpl* impl) { + return createVectorLikeFromList>(impl); +} + +template +std::vector createVectorFromList(const c10::List& impl) { + std::vector result; + result.reserve(impl.size()); + for (size_t i = 0, N = impl.size(); i < N; ++i) { + result.push_back(impl[i]); + } + return result; +} + +template +OptionalArray generic_to(IValue ivalue, _fake_type>) { + if (ivalue.isNone()) { + return {}; + } + return createVectorFromList( + std::move(ivalue).to>() + ); +} + +namespace detail { +template +std::array generic_to_array( + IValue ivalue, + _fake_type>, + std::index_sequence) { + // We need to do a deep copy of the array because there might be other + // references to this same IValue that also use the list. We can't just + // move the elements out. + auto list = std::move(ivalue).to>(); + TORCH_CHECK( + list.size() == sizeof...(I), + "Tried to convert a List with ", + list.size(), + " elements to a fixed-size array of size ", + sizeof...(I)); + return {list[I]...}; +} +} // namespace detail + +template +std::array generic_to( + IValue ivalue, + _fake_type> ft) { + return detail::generic_to_array(ivalue, ft, std::make_index_sequence()); +} + +template +c10::Dict generic_to( + IValue ivalue, + _fake_type>) { + return impl::toTypedDict(std::move(ivalue).toGenericDict()); +} + +template +C10_DEPRECATED_MESSAGE( + "IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict instead.") +std::unordered_map generic_to( + IValue ivalue, + _fake_type>) { + std::unordered_map specialized_dict; + + for (const auto& item : std::move(ivalue).toGenericDict()) { + specialized_dict[item.key().template to()] = item.value().template to(); + } + + return specialized_dict; +} + +template +c10::optional generic_to(IValue ivalue, _fake_type>) { + if (ivalue.isNone()) { + return c10::nullopt; + } + return std::move(ivalue).to(); +} + +namespace detail { +template +Tuple generic_to_tuple_impl( + const ivalue::TupleElements& t, + std::index_sequence) { + return std::make_tuple( + t[INDEX].to::type>()...); +} +} // namespace detail + +template < + typename... Args, + typename Indices = std::make_index_sequence, + std::enable_if_t< + !guts::disjunction< + std::is_lvalue_reference..., + guts::negation>...>::value, + std::nullptr_t> = nullptr> +std::tuple generic_to(IValue ivalue, _fake_type>) { + const auto& vals = ivalue.toTupleRef().elements(); + TORCH_CHECK(vals.size() == sizeof...(Args)); + return detail::generic_to_tuple_impl>(vals, Indices{}); +} + +template +inline T IValue::to() && { + return generic_to(std::move(*this), _fake_type{}); +} + +template <> +inline c10::optional IValue::to() && { + // In the default implementation, the IValue is destroyed with std::move. + // But if the unboxed type is optional we cannot destroy + // the IValue. + return generic_to(*this, _fake_type>{}); +} + +template +inline typename c10::detail::ivalue_to_const_ref_overload_return::type IValue::to() const& { + return generic_to(*this, _fake_type{}); +} + +inline c10::List IValue::toIntList() && { + AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind()); + return c10::List(moveToIntrusivePtr()); +} +inline c10::List IValue::toIntList() const& { + AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind()); + return c10::List(toIntrusivePtr()); +} +inline std::vector IValue::toIntVector() const { + AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toIntVector on null intrusive_ptr IValue"); + return createVectorFromList( + static_cast(payload.u.as_intrusive_ptr)); +} +inline at::DimVector IValue::toDimVector() const { + AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toDimVector on null intrusive_ptr IValue"); + return createVectorLikeFromList( + static_cast(payload.u.as_intrusive_ptr)); +} +inline c10::List IValue::toDoubleList() && { + AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind()); + return c10::List(moveToIntrusivePtr()); +} +inline c10::List IValue::toDoubleList() const& { + AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind()); + return c10::List(toIntrusivePtr()); +} +inline std::vector IValue::toDoubleVector() const { + AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toDoubleVector on null intrusive_ptr IValue"); + return createVectorFromList( + static_cast(payload.u.as_intrusive_ptr)); +} +inline c10::List> IValue::toComplexDoubleList() && { + AT_ASSERT(isComplexDoubleList(), "Expected ComplexDoubleList but got ", tagKind()); + return c10::List>(moveToIntrusivePtr()); +} +inline c10::List> IValue::toComplexDoubleList() const& { + AT_ASSERT(isComplexDoubleList(), "Expected ComplexDoubleList but got ", tagKind()); + return c10::List>(toIntrusivePtr()); +} +inline std::vector> IValue::toComplexDoubleVector() const { + AT_ASSERT(isComplexDoubleList(), "Expected ComplexDoubleList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toComplexDoubleVector on null intrusive_ptr IValue"); + return createVectorFromList>( + static_cast(payload.u.as_intrusive_ptr)); +} +inline c10::List IValue::toBoolList() && { + AT_ASSERT(isBoolList(), "Expected BoolList but got ", tagKind()); + return c10::List(moveToIntrusivePtr()); +} +inline c10::List IValue::toBoolList() const& { + AT_ASSERT(isBoolList(), "Expected BoolList but got ", tagKind()); + return c10::List(toIntrusivePtr()); +} +inline c10::List IValue::toTensorList() && { + AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind()); + return c10::List(moveToIntrusivePtr()); +} +inline c10::List IValue::toTensorList() const& { + AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind()); + return c10::List(toIntrusivePtr()); +} +inline std::vector IValue::toTensorVector() const { + AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toTensorVector on null intrusive_ptr IValue"); + return createVectorFromList( + static_cast(payload.u.as_intrusive_ptr)); +} +inline c10::List> IValue::toOptionalTensorList() && { + AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind()); + return c10::List>(moveToIntrusivePtr()); +} +inline c10::List> IValue::toOptionalTensorList() const& { + AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind()); + return c10::List>(toIntrusivePtr()); +} +inline std::vector> IValue::toOptionalTensorVector() const { + AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toOptionalTensorVector on null intrusive_ptr IValue"); + return createVectorFromList>( + static_cast(payload.u.as_intrusive_ptr)); +} +inline c10::List IValue::toList() && { + AT_ASSERT(isList(), "Expected GenericList but got ", tagKind()); + return c10::List(moveToIntrusivePtr()); +} +inline c10::List IValue::toList() const& { + AT_ASSERT(isList(), "Expected GenericList but got ", tagKind()); + return c10::List(toIntrusivePtr()); +} +inline c10::ArrayRef IValue::toListRef() const { + AT_ASSERT(isList(), "Expected GenericList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toListRef on null intrusive_ptr IValue"); + return static_cast(payload.u.as_intrusive_ptr) + ->list; +} +inline c10::Dict IValue::toGenericDict() && { + AT_ASSERT(isGenericDict(), "Expected GenericDict but got ", tagKind()); + return c10::Dict(moveToIntrusivePtr()); +} +inline c10::Dict IValue::toGenericDict() const& { + AT_ASSERT(isGenericDict(), "Expected GenericDict but got ", tagKind()); + return c10::Dict(toIntrusivePtr()); +} +inline c10::intrusive_ptr IValue::toTuple() && { + AT_ASSERT(isTuple(), "Expected Tuple but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toTuple() const& { + AT_ASSERT(isTuple(), "Expected Tuple but got ", tagKind()); + return toIntrusivePtr(); +} +inline ivalue::Tuple& IValue::toTupleRef() const { + AT_ASSERT(isTuple(), "Expected Tuple but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toTupleRef on null intrusive_ptr IValue"); + return *static_cast( + payload.u.as_intrusive_ptr); +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::Tuple) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} +template < + typename... Args, + std::enable_if_t< + !guts::disjunction< + std::is_lvalue_reference..., + guts::negation>...>::value, + std::nullptr_t>> +inline IValue::IValue(const std::tuple& t) + : IValue( + std::move(c10::guts::apply(c10::ivalue::Tuple::create, t))) { +} + +template < + typename... Args, + std::enable_if_t< + !guts::disjunction< + std::is_lvalue_reference..., + guts::negation>...>::value, + std::nullptr_t>> +inline IValue::IValue(std::tuple&& t) + : IValue( + std::move(c10::guts::apply(c10::ivalue::Tuple::create, std::move(t)))) { +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::String) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} +inline IValue::IValue(std::string v) + : IValue(ivalue::ConstantString::create(std::move(v))) {} + +inline IValue::IValue(c10::impl::GenericList v) + : tag(Tag::GenericList) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.impl_.release()); +} + +template > +inline IValue::IValue(c10::List&& v) : IValue(impl::toList(std::move(v))) {} +template > +inline IValue::IValue(const c10::List& v) : IValue(impl::toList(v)) {} +template > +inline IValue::IValue(at::ArrayRef v) : IValue(c10::List()) { + auto list = to>(); + list.reserve(v.size()); + for (const auto& e : v) { + list.push_back(e); + } +} +template > +inline IValue::IValue(at::ArrayRef v) : IValue() { + auto vi = c10::asIntArrayRefSlowOpt(v); + if (vi.has_value()) { + // This list is entirely integers; ensure it is typed as + // an IntList so toIntList works + *this = IValue(*vi); + } else { + // This list has SymInts; type it as a SymInt + *this = IValue(impl::toList(c10::List())); + auto list = to>(); + list.reserve(v.size()); + for (const auto& e : v) { + list.push_back(e); + } + } +} +template > +inline IValue::IValue(at::OptionalArrayRef mb_v) : IValue() { + if (!mb_v.has_value()) return; + *this = IValue(*mb_v); +} +template > +inline IValue::IValue(const std::vector& v) : IValue() { + *this = IValue(at::ArrayRef(v)); +} +template > +inline IValue::IValue(const std::vector& v) : IValue(c10::List()) { + auto list = to>(); + list.reserve(v.size()); + for (const auto& e : v) { + list.push_back(e); + } +} +template > +inline IValue::IValue(c10::OptionalArrayRef v) : IValue() { + if (v.has_value()) { + *this = IValue(std::move(*v)); + } +} + +template +inline IValue::IValue(std::array v) : IValue(c10::List()) { + auto list = to>(); + list.reserve(v.size()); + for (auto& e : v) { + list.push_back(std::move(e)); + } +} + +template > +inline IValue::IValue(c10::IListRef v) : IValue() { + constexpr bool boxed_type_constructs_ivalue = + std::is_constructible::boxed_type>::value; + // First, we try to use the boxed value. + // If we fail (either it's not in the boxed state, or its boxed type + // can not construct an IValue), we fallback to copying the list. + if (boxed_type_constructs_ivalue && v.isBoxed()) { + *this = IValue(impl::toList(v.toBoxed())); + } else { + c10::List list; + list.reserve(v.size()); + for (const auto& t : v) { + list.push_back(t); + } + *this = IValue(impl::toList(std::move(list))); + } +} + +inline IValue::IValue(c10::impl::GenericDict v) + : tag(Tag::GenericDict) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.impl_.release()); +} +template +inline IValue::IValue(c10::Dict v) + : IValue(impl::toGenericDict(std::move(v))) {} + +template +inline IValue::IValue(std::unordered_map v) + : IValue(Dict()) { + auto dict = to>(); + dict.reserve(v.size()); + for (auto& e : v) { + dict.insert(std::move(e.first), std::move(e.second)); + } +} + +template > +inline IValue::IValue(c10::optional v) : IValue() { + if (v.has_value()) { + *this = IValue(std::move(*v)); + } +} + +inline IValue::IValue(c10::nullopt_t) : IValue() {} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::Object) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::PyObject) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::Enum) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +inline IValue IValue::make_capsule( + intrusive_ptr blob) { + IValue iv; + iv.tag = Tag::Capsule; + iv.payload.u.as_intrusive_ptr = null_to_undefined_tensor(blob.release()); + return iv; +} + +template < + typename T, + std::enable_if_t::value, int>> +IValue::IValue(c10::intrusive_ptr custom_class) { + auto classType = []() { + try { + return c10::getCustomClassType>(); + } catch (const c10::Error&) { + throw c10::Error( + "Trying to instantiate a class that isn't a registered custom class: " + + std::string(c10::util::get_fully_qualified_type_name()), + ""); + } + }(); + auto ivalue_obj = c10::ivalue::Object::create(std::move(classType), /* numSlots */1); + ivalue_obj->setSlot(0, IValue::make_capsule(std::move(custom_class))); + payload.u.as_intrusive_ptr = null_to_undefined_tensor(ivalue_obj.release()); + tag = Tag::Object; +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::Future) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::RRef) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::Quantizer) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +template +inline IValue::IValue(c10::complex c) + : tag(Tag::ComplexDouble) { + auto v = c10::make_intrusive(c); + payload.u.as_intrusive_ptr = v.release(); +} + +inline const std::string& IValue::toStringRef() const { + AT_ASSERT(isString(), "Expected String but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toStringRef on null intrusive_ptr IValue"); + return static_cast( + payload.u.as_intrusive_ptr) + ->string(); +} +inline c10::optional> IValue:: + toOptionalStringRef() const { + if (isNone()) { + return c10::nullopt; + } + AT_ASSERT(isString(), "Expected optional but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toOptionalStringRef on null intrusive_ptr IValue"); + return std::reference_wrapper( + static_cast(payload.u.as_intrusive_ptr) + ->string()); +} + +inline c10::string_view IValue::toStringView() const { + AT_ASSERT(isString(), "Expected String but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toStringView on null intrusive_ptr IValue"); + return static_cast( + payload.u.as_intrusive_ptr) + ->string_view(); +} + +inline PyObject* IValue::toPyObject() const { + return toPyObjectHolder()->getPyObject(); +} + +template +inline optional IValue::toOptional() { + if (this->isNone()) { + return nullopt; + } + return this->to(); +} + +template +inline optional IValue::toOptional() const { + if (this->isNone()) { + return nullopt; + } + return this->to(); +} + +inline bool IValue::isCustomClass() const { + return torch::isCustomClass(*this); +} + +inline bool IValue::isSameIdentity(const IValue& rhs) const { + // We choose to not use memcmp for payload check due to potential random + // padding characters on union type + + // Semantics: + // 1. Immutable primitive values of the same type (Int, Double, None, Bool, + // Str) return value equality + // 2. If it is a tensor type, we need to take undefined tensor into account + // 3. Undefined_tensor is None and vice versa should be true + // 4. If it is a reference type (i.e. isIntrusivePtr()), then is True when + // the pointed-to object is the same. + // 5. False for all other comparisons. + if (this->isNone() && rhs.isNone()) { + return true; + } else if (this->isBool() && rhs.isBool()) { + // for bool type, do equality check + return this->toBool() == rhs.toBool(); + } else if (this->isTensor() && rhs.isTensor()) { + return this->payload.as_tensor.is_same(rhs.payload.as_tensor); + } else if (this->isTensor() && rhs.isNone()) { + // special case: undefined tensor and None are the same identity + return !this->payload.as_tensor.defined(); + } else if (this->isNone() && rhs.isTensor()) { + // special case: undefined tensor and None are the same identity + return !rhs.payload.as_tensor.defined(); + } else if (this->isInt() && rhs.isInt()) { + return this->toInt() == rhs.toInt(); + } else if (this->isDouble() && rhs.isDouble()) { + return this->toDouble() == rhs.toDouble(); + } else if (this->isString() && rhs.isString()) { + return this->toStringRef() == rhs.toStringRef(); + } else { + // for objects holding in IValue, do shallow compare on pointer address to + // testify the identity + return this->isIntrusivePtr() && rhs.isIntrusivePtr() && + this->payload.u.as_intrusive_ptr == rhs.payload.u.as_intrusive_ptr; + } +} + +namespace ivalue { +namespace detail { + +template +IValue from_(T&& x, std::true_type) { + return IValue(std::forward(x)); +} +template +IValue from_(c10::intrusive_ptr x, std::false_type) { + return IValue(std::move(x)); +} +template +IValue from_(T&& /*x*/, std::false_type) { + static_assert( + guts::false_t::value, + "You are calling from with a type that it doesn't support, and isn't a potential custom class (ie: is an intrusive_ptr)"); + return IValue(); +} +} // namespace detail + +template +IValue from(T&& x) { + return detail::from_( + std::forward(x), typename std::is_constructible::type{}); +} + +} // namespace ivalue + + +template <> +struct MaybeOwnedTraits { + using owned_type = IValue; + using borrow_type = IValue; + + static borrow_type createBorrow(const owned_type& from) { + if (!from.isPtrType()) { + return from; + } + if (from.isTensor()) { + return IValue(MaybeOwnedTraits::createBorrow(from.toTensor())); + } else { + return IValue(from.payload, from.tag); + } + } + + static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) { + lhs.clearToNone(); + if (!rhs.isPtrType()) { + lhs = rhs; + } else if (rhs.isTensor()) { + lhs = IValue(MaybeOwnedTraits::createBorrow(rhs.toTensor())); + } else { + lhs = IValue(rhs.payload, rhs.tag); + } + } + + static void destroyBorrow(borrow_type& toDestroy) { + toDestroy.clearToNone(); + } + + static const owned_type& referenceFromBorrow(const borrow_type& borrow) { + return borrow; + } + + static const owned_type* pointerFromBorrow(const borrow_type& borrow) { + return &borrow; + } + + static bool debugBorrowIsValid(const borrow_type&) { + return true; + } +}; + +template <> +struct IValue::TagType { + static TORCH_API c10::TypePtr get(const IValue&); +}; + +template <> +struct IValue::TagType { + static TORCH_API c10::TypePtr get(const IValue&); +}; + +template +TypePtr IValue::type() const { + return IValue::TagType::get(*this); +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/ivalue_to.h b/voice_bridge/torch/include/ATen/core/ivalue_to.h new file mode 100644 index 0000000000000000000000000000000000000000..52af58083596939106fe120abfbd360f0068db67 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/ivalue_to.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +namespace at { +class Tensor; +} // namespace at + +namespace c10 { +struct IValue; +namespace detail { +// Determine the return type of `IValue::to() const &`. It's a const +// reference when possible and a copy otherwise. It is in this +// separate header so that List can use it as well. +template +struct ivalue_to_const_ref_overload_return { + using type = T; +}; + +template<> +struct ivalue_to_const_ref_overload_return { + using type = const at::Tensor&; +}; + +template<> +struct ivalue_to_const_ref_overload_return { + using type = const std::string&; +}; + +template<> +struct ivalue_to_const_ref_overload_return { + using type = const IValue&; +}; + +} // namespace detail +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/jit_type.h b/voice_bridge/torch/include/ATen/core/jit_type.h new file mode 100644 index 0000000000000000000000000000000000000000..e554bd586272fe6efaea21863f31016f307cab52 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/jit_type.h @@ -0,0 +1,2351 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +struct Function; +} // namespace jit +} // namespace torch + +namespace c10 { + +template +class Dict; +struct IValue; +struct FunctionSchema; +struct NamedType; +using OptNameList = c10::optional>; + +void standardizeVectorForUnion(std::vector& reference, std::vector* to_fill); +void standardizeVectorForUnion(std::vector* to_flatten); + +inline bool is_contiguous_strides( + const IntArrayRef sizes, + const IntArrayRef strides) { + int n_dim = static_cast(sizes.size()); + if (n_dim == 0) { + return true; + } + + if (strides[n_dim - 1] != 1) { + return false; + } + + for (int i = n_dim - 2; i >= 0; i--) { + if (strides[i] != strides[i + 1] * sizes[i + 1]) { + return false; + } + } + return true; +} + +struct AnyType; +using AnyTypePtr = SingletonTypePtr; +// Any is the top of the type hierarchy, all other types are subtypes +// T <: Any, forall T +struct TORCH_API AnyType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "Any"; + } + static const TypeKind Kind = TypeKind::AnyType; + // global singleton + static AnyTypePtr get(); + + private: + AnyType() : Type(TypeKind::AnyType) {} +}; + +inline std::string toString(const Type& type) { + return type.str(); +} + +// Shim for compatibility with code that uses TypePtr. +inline std::string toString(const TypePtr& typePtr) { + return toString(*typePtr); +} + +inline bool operator!=(const Type& lhs, const Type& rhs) { + return !(lhs == rhs); +} + +// common base for all types that have a single sub element +// e.g. Future[T], Optional[T], List[T] +template +struct SingleElementType : public SharedType { + static const TypeKind Kind = K; + + const TypePtr& getElementType() const { + return elem; + } + + bool hasFreeVariables() const override { + return getElementType()->hasFreeVariables(); + } + + at::ArrayRef containedTypes() const override { + return elem; + } + + bool equals(const Type& rhs) const override { + if (auto rhs_ = rhs.cast()) { + return *getElementType() == *rhs_->getElementType(); + } + return false; + } + + protected: + SingleElementType(TypePtr elem) : SharedType(Kind), elem(std::move(elem)) { + if (!this->elem) { + throw std::runtime_error(c10::str( + "Can not create ", typeKindToString(Kind), " with None type")); + } + } + + private: + TypePtr elem; +}; + +struct UnionType; +using UnionTypePtr = std::shared_ptr; +struct TORCH_API UnionType : public SharedType { + friend struct Type; + + static const TypeKind Kind = TypeKind::UnionType; + + bool isSubtypeOfExt(const Type& rhs_, std::ostream* why_not) const override; + + std::string str() const override; + + static UnionTypePtr create(std::vector reference); + + bool equals(const Type& rhs) const override; + + bool isUnionType() const override { + return true; + } + + at::ArrayRef containedTypes() const override { + return types_; + } + + // For testing purposes only + at::ArrayRef getTypes() const { + return types_; + } + + TypePtr createWithContained(std::vector contained_types) const override { + return create(std::move(contained_types)); + } + + bool canHoldType(const Type& type) const; + + bool hasFreeVariables() const override { + return has_free_variables_; + } + + c10::optional toOptional() const; + + c10::optional subtractTypeSet(std::vector& to_subtract) const; + + protected: + explicit UnionType(std::vector types, TypeKind kind=TypeKind::UnionType); + std::string annotation_str_impl(TypePrinter printer = nullptr) const override; + std::string unionStr( + TypePrinter printer = nullptr, + bool is_annotation_str = false) const; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool has_free_variables_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector types_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool can_hold_none_; + +}; + +struct OptionalType; +using OptionalTypePtr = std::shared_ptr; +// This type represents an optional type. There is one `Optional` for +// each element type. `Optional[T]` can accept both `T` and +// `None`(`c10::nullopt` in C++) +// Subtype hierarchy for Optional: +// - Optional[T] <: Optional[R] iff T <: R +// - T <: Optional[R] if T <: R +// - None <: Optional[T] for all T +// - Optional[T] == Union[T, None] for all T +struct TORCH_API OptionalType : public UnionType { + static OptionalTypePtr create(TypePtr contained); + + static const TypeKind Kind = TypeKind::OptionalType; + + friend struct Type; + + bool equals(const Type& rhs) const override; + + const TypePtr& getElementType() const { + return contained_; + } + + at::ArrayRef containedTypes() const override { + return contained_; + } + + std::string str() const override { + std::stringstream ss; + ss << getElementType()->str() << "?"; + return ss.str(); + } + + TypePtr createWithContained( + std::vector contained_types) const override { + AT_ASSERT(contained_types.size() == 1); + return create(std::move(contained_types[0])); + } + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + bool isUnionType() const override { + return true; + } + + // common cast Optional[Tensor] for undefined tensor type + static TypePtr ofTensor(); + // + // global singleton + static TypePtr get(TypePtr inner); + + private: + explicit OptionalType(TypePtr contained); + + TypePtr contained_; + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + std::stringstream ss; + ss << "Optional[" << getElementType()->annotation_str(printer) << "]"; + return ss.str(); + } +}; + +template +inline c10::optional merge_primitive( + const c10::optional& a, + const c10::optional& b) { + if (a.has_value() && b.has_value() && a.value() == b.value()) { + return a; + } + return c10::optional{}; +} + +// If we see `a + b + c` and know that a, b, and c are the same size and have +// two dimensions (WxH), then we can generate a fused kernel for them. That +// fused kernel would likely have indexing math to handling both the W and H +// dimensions. However, if we knew the WxH dimensions were contiguous, we can +// pretend like we only have a single dimension, simplifying the indexing logic. +// This can be performed even if the dimensions are transposed, +// as long as a, b, and c are transposed in the same way. +// We'd like to have the compiler be able to do this dimensionality reduction, +// but simply knowing sizes is not enough. +// We can extend profiling to also record stride information. +// Rather than recording specific strides, +// we can simply order the strides from smallest to largest with +// `stride_indices` A contiguity marker on the smallest stride (c0) indicates +// the stride is precisely 1, otherwise a contiguity marker means that $stride_n +// = size_{n-1}*stride_{n-1}$ +struct TORCH_API Stride { + Stride() {} + Stride( + const c10::optional& stride_index, + c10::optional contiguous, + const c10::optional& stride) + : stride_index_(stride_index), contiguous_(contiguous), stride_(stride) {} + + bool operator==(const Stride& b) const { + return stride_index_ == b.stride_index_ && contiguous_ == b.contiguous_ && + stride_ == b.stride_; + } + + bool isComplete() const { + return stride_index_ && contiguous_ && stride_; + } + + c10::optional stride_index_; + c10::optional contiguous_; + c10::optional stride_; +}; + +template <> +inline c10::optional merge_primitive( + const c10::optional& a, + const c10::optional& b) { + c10::optional left = a; + c10::optional right = b; + if (!left.has_value()) { + left = {Stride()}; + } + if (!right.has_value()) { + right = {Stride()}; + } + + auto merged_index = + merge_primitive(left->stride_index_, right->stride_index_); + auto merged_cont = merge_primitive(left->contiguous_, right->contiguous_); + auto merged_stride = merge_primitive(left->stride_, right->stride_); + auto r = Stride(merged_index, merged_cont, merged_stride); + // normalize + if (!r.stride_index_.has_value() && !r.contiguous_.has_value() && + !r.stride_.has_value()) { + return c10::optional{}; + } + + return r; +} + +struct TORCH_API ShapeSymbol { + // needed for use in `std::map` + ShapeSymbol() : value_(-1) {} + // is this symbol a fixed/static dimension + bool is_static() const { + return value_ >= 0; + }; + bool operator==(const ShapeSymbol& b) const { + return value_ == b.value_; + } + bool operator<(const ShapeSymbol& b) const { + return value_ < b.value_; + } + + static ShapeSymbol fromStaticSize(int64_t val) { + return ShapeSymbol(val); + } + int64_t static_size() const { + TORCH_CHECK(is_static()); + return value_; + }; + + int64_t value() const { + return value_; + }; + + static ShapeSymbol newSymbol() { + return fromStaticSize(-static_cast(++num_symbols)); + }; + friend TORCH_API std::ostream& operator<<( + std::ostream& os, + const ShapeSymbol& s); + + private: + ShapeSymbol(int64_t val) : value_(val) {} + int64_t value_; + static std::atomic num_symbols; +}; + +inline ShapeSymbol merge_primitive( + const ShapeSymbol& a, + const ShapeSymbol& b) { + if (a.is_static() && b.is_static() && a == b) { + return a; + } + return ShapeSymbol::newSymbol(); +} + +// Shape of a Tensor represented with ShapeSymbol's. Unranked, ranked unknown +// dims, partially known and fully known shapes are all supported. +struct TORCH_API SymbolicShape { + // Unranked shape constructor. + SymbolicShape() : dims_(c10::nullopt) {} + + // Known rank but unknown dimentions. + SymbolicShape(c10::optional rank) : dims_(c10::nullopt) { + if(!rank) { + return; + } + + std::vector shape_symbols; + shape_symbols.reserve(*rank); + for(size_t i = 0; i < *rank; ++i) { + shape_symbols.push_back(ShapeSymbol::newSymbol()); + } + dims_ = shape_symbols; + } + + // Mix of known and unknown ranks + SymbolicShape(const std::vector>& dims) { + std::vector shape_symbols; + shape_symbols.reserve(dims.size()); + for(c10::optional dim: dims) { + if(!dim) { + shape_symbols.push_back(ShapeSymbol::newSymbol()); + } else { + shape_symbols.push_back(ShapeSymbol::fromStaticSize(*dim)); + } + } + dims_ = shape_symbols; + } + + void dump() const; + + SymbolicShape(std::vector dims) : dims_(std::move(dims)) {} + + SymbolicShape(c10::IntArrayRef dims) { + std::vector shape_symbols; + shape_symbols.reserve(dims.size()); + for(int64_t dim : dims) { + shape_symbols.push_back(ShapeSymbol::fromStaticSize(dim)); + } + dims_ = shape_symbols; + } + + ShapeSymbol operator[](size_t i) const { + if (!dims_) { + throw std::runtime_error("Rank isn't fixed"); + } + return (*dims_).at(i); + } + + ShapeSymbol at(size_t i) const { + if (!dims_) { + throw std::runtime_error("Rank isn't fixed"); + } + return (*dims_).at(i); + } + + // Returns rank or nullopt in case of unranked shape. + c10::optional rank() const { + if(!dims_) { + return c10::nullopt; + } + return dims_->size(); + } + + c10::optional> sizes() const { + return dims_; + } + + c10::optional> symbolicDims() const { + if (!dims_) { + return c10::nullopt; + } + auto symbolic_dims = std::vector(); + for (const ShapeSymbol& s : *dims_) { + symbolic_dims.push_back(!s.is_static()); + } + return symbolic_dims; + } + + // Checks whether the shape is fully defined/complete, ie. rank and sizes + // of every dimension are known. + bool isComplete() const { + if(!dims_) { + return false; + } + for(auto d : *dims_) { + if(!d.is_static()) { + return false; + } + } + return true; + } + + // Create new SymbolicShape that is result of merging self and another + // SymbolicShape. Only dimensions that are static and equal will be + // preserved. + // If either of two shapes are of unknown rank or they have unmatching rank, + // result will be unranked. + SymbolicShape merge(const SymbolicShape& other) const; + + friend bool operator==(const SymbolicShape& lhs, const SymbolicShape& rhs) { + return lhs.dims_ == rhs.dims_; + } + + friend bool operator!=(const SymbolicShape& lhs, const SymbolicShape& rhs) { + return !(lhs == rhs); + } + + private: + c10::optional> dims_; +}; + +namespace detail { +inline bool isComplete(const Stride& s) { + return s.isComplete(); +} + +template +inline bool isComplete(const T& /*t*/) { + return true; +} +} + +template +struct VaryingShape { + using ListOfOptionalElements = std::vector>; + VaryingShape(const std::vector& vec) + : VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {} + + VaryingShape(c10::ArrayRef vec) + : VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {} + + VaryingShape(c10::optional size = c10::nullopt) : dims_(c10::nullopt) { + if (size) { + dims_ = ListOfOptionalElements(*size); + } + } + + VaryingShape(ListOfOptionalElements dims) : dims_(std::move(dims)) {} + + VaryingShape(size_t size) : VaryingShape(c10::optional(size)) {} + + bool operator==(const VaryingShape& other) const { + return dims_ == other.dims_; + } + + const c10::optional &operator[](size_t i) const { + if (!dims_) { + throw std::runtime_error("Rank isn't fixed"); + } + return (*dims_).at(i); + } + + c10::optional size() const { + if (!dims_) { + return c10::nullopt; + } + const auto& dims = dims_.value(); + return dims.size(); + } + + const c10::optional& sizes() const { + return dims_; + } + + TORCH_API VaryingShape merge(const VaryingShape& other) const; + + c10::optional> concrete_sizes() const { + if (!dims_) { + return c10::nullopt; + } + std::vector sizes; + for (auto d : *dims_) { + if (!d) { + return c10::nullopt; + } + sizes.push_back(d.value()); + } + return sizes; + } + + bool isComplete() const { + if (!dims_) { + return false; + } + for (auto d : *dims_) { + if (!d || !detail::isComplete(*d)) { + return false; + } + } + return true; + } + + private: + c10::optional dims_; +}; + +struct TensorType; +// TODO: investigate making this SingletonOrSharedTypePtr +using TensorTypePtr = std::shared_ptr; +// This type represents a single Tensor with a specific size +struct TORCH_API TensorType : public SharedType { + static TensorTypePtr create(const at::Tensor& t); + + // used by TensorType::create(size_t dim) which in turn used by + // shape_analysis.cpp + static TensorTypePtr create( + c10::optional scalar_type, + c10::optional device, + const VaryingShape& sizes, + const VaryingShape& strides, + c10::optional requires_grad, + c10::optional undefined = false, + bool tensor_contiguity = false); + + static TensorTypePtr create( + c10::optional scalar_type, + c10::optional device, + const SymbolicShape& sizes, + const VaryingShape& stride_, + c10::optional requires_grad, + c10::optional undefined = false); + + static TensorTypePtr create( + c10::optional scalar_type, + c10::optional device, + c10::optional dim, + c10::optional requires_grad); + + // overloaded create variadic template argument as it could not distinguish + // initializer list + static TensorTypePtr createContiguous( + at::ScalarType scalar_type, + at::Device device, + at::IntArrayRef sizes); + + static TypePtr fromNumberType(const Type& typ); + static TypePtr fromBoolType(); + + c10::optional dim() const { + return sizes().size(); + } + + VaryingShape sizes() const; + + VaryingShape strides() const; + + const VaryingShape& stride_properties() const { + return strides_; + } + + c10::optional device() const { + return device_; + } + c10::optional scalarType() const { + return scalar_type_; + } + c10::optional requiresGrad() const { + return requires_grad_; + } + bool requires_grad() const override { + return requires_grad_ ? *requires_grad_ : true; + } + + bool equals(const Type& rhs) const override; + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + std::string str() const override; + + std::string repr_str() const override { + if (isInferredType()) { + return str() + " (inferred)"; + } else { + return str(); + } + } + + c10::optional numel() const { + size_t prod = 1; + const auto& shape = sizes(); + + for (size_t i = 0; i < shape.size(); i++) { + if (!shape[i]) { + return c10::optional{}; + } + prod *= shape[i].value(); + } + return prod; + } + + TensorTypePtr withRequiresGrad(c10::optional s) { + auto copy = clone(); + copy->requires_grad_ = s; + return copy; + } + + TensorTypePtr withScalarType(c10::optional st) { + auto copy = clone(); + copy->scalar_type_ = st; + return copy; + } + + TensorTypePtr withDim(c10::optional d) { + auto copy = clone(); + // withDim is only used by the legacy executor + // that only cares about the rank, so create dummy symbols)) : + copy->sizes_ = SymbolicShape(d); + copy->strides_ = VaryingShape(d); + return copy; + } + + TensorTypePtr withStrides(VaryingShape sstrides) const { + auto cloned = clone(); + cloned->strides_ = sstrides; + return cloned; + } + + TensorTypePtr withSizesStrides( + at::IntArrayRef sizes, + at::IntArrayRef strides) const { + auto cloned = clone(); + auto ssizes = SymbolicShape(sizes); + cloned->sizes_ = ssizes; + cloned->strides_ = computeStrideProps(sizes, strides); + return cloned; + } + + TensorTypePtr withSymbolicShapes(SymbolicShape ssizes) const { + auto cloned = clone(); + cloned->sizes_ = std::move(ssizes); + return cloned; + } + + TensorTypePtr withSizes(at::IntArrayRef sizes) const { + return withSizesStrides( + sizes, contiguousStridesOf(sizes)); + } + + TensorTypePtr withDevice(const c10::optional device) const { + auto copy = clone(); + copy->device_ = device; + return copy; + } + + TensorTypePtr dimensionedOnly() const { + auto copy = clone(); + copy->sizes_ = SymbolicShape(sizes().size()); + copy->strides_ = VaryingShape(sizes().size()); + return copy; + } + + TensorTypePtr contiguous() const { + auto cloned = clone(); + TORCH_INTERNAL_ASSERT(sizes().concrete_sizes().has_value()); + auto strides = computeStrideProps( + *sizes().concrete_sizes(), + contiguousStridesOf(*sizes().concrete_sizes())); + cloned->strides_ = strides; + return cloned; + } + + const SymbolicShape& symbolic_sizes() const; + + TensorTypePtr merge(const TensorType& other, bool merge_sizes = true) const; + + bool matchTensor(const at::Tensor& t); + + // is all information about the type specified except for autograd? + // This replaces the notion of a 'CompleteTensorType' that used to exist + // in the type-hierarchy. Excluding require_grad and undefined allows + // this to match the old behavior. + bool isComplete() const { + return scalar_type_ && device_ && sizes_.isComplete() && strides_.isComplete(); + } + + bool isInferredType() const { + return is_inferred_; + } + + static TensorTypePtr getInferred() { + static auto valueInferred = TensorType::create( + /*scalar_type=*/{}, + /*device=*/{}, + /*sizes=*/SymbolicShape(), + /*stride=*/VaryingShape{}, + /*requires_grad=*/{}, + /*undefined=*/false); + valueInferred->is_inferred_ = true; + return valueInferred; + } + + // this property is used by GuardElimination + // please see `checkInputs` for more details + bool isSummarized() const { + return !(isComplete() && requiresGrad().has_value() && + undefined().has_value()); + } + + TensorTypePtr withUndefined() { + auto r = clone(); + r->undefined_ = true; + return r; + } + + TensorTypePtr withPossiblyUndefined() { + auto r = clone(); + r->undefined_ = c10::nullopt; + return r; + } + + c10::optional undefined() const { return undefined_; } + + static const TensorTypePtr& get(); + + static const TypeKind Kind = TypeKind::TensorType; + + static std::vector contiguousStridesOf( + at::IntArrayRef in_sizes, + at::MemoryFormat memory_format = MemoryFormat::Contiguous) { + auto contiguous_fn = [](const at::IntArrayRef& sizes, + const std::vector& dim_order) { + std::vector strides(sizes.size()); + if (sizes.empty()) // zero-dim case + return strides; + + strides[dim_order[0]] = 1; + for (size_t i = 1; i < dim_order.size(); i++) { + auto cur_dim = dim_order[i]; + auto pre_dim = dim_order[i - 1]; + strides[cur_dim] = strides[pre_dim] * sizes[pre_dim]; + } + return strides; + }; + + std::vector dim_order(in_sizes.size()); + if (memory_format == MemoryFormat::ChannelsLast) { + dim_order = {1, 3, 2, 0}; + } else if (memory_format == MemoryFormat::ChannelsLast3d) { + dim_order = {1, 4, 3, 2, 0}; + } else { + auto ndims = in_sizes.size(); + for (size_t i = 0; i < ndims; i++) { + dim_order[i] = ndims - i - 1; // Reverse + } + } + return contiguous_fn(in_sizes, dim_order); + } + + private: + TensorType( + c10::optional scalar_type, + c10::optional device, + const SymbolicShape& sizes, + const VaryingShape& strides, + c10::optional requires_grad, + c10::optional undefined = false); + + TensorTypePtr clone() const { + return TensorTypePtr(new TensorType( + scalar_type_, device_, sizes_, strides_, requires_grad_, undefined_)); + } + + static VaryingShape computeStrideProps( + at::IntArrayRef sizes, + at::IntArrayRef strides, + bool tensor_contiguity = false); + + c10::optional scalar_type_; + c10::optional device_; + SymbolicShape sizes_; + VaryingShape strides_; + c10::optional requires_grad_; + // we exploit the fact certain tensors must be zero in the autograd to + // optimize gradient computation. Such zero tensors are currently implemented + // with `UndefinedTensorImpl.` They can be handled only by special operators + // (e.g. `AutogradAdd`) and their `Tensor::defined()` property returns false. + // Normally, `undefined_` is set to false, unless a type was created + // with `withUndefined` + // This will also mean that `undefined` tensors will fail + // `subtypeOf(TensorType::get())` check + // undefined_ may become `c10::nullopt` if the tensor was observed to be both + // defined and undefined. However, no tensor type starts out with + // `undefined_` set to `c10::nullopt` + c10::optional undefined_; + // Represents whether or not this type was inferred. + bool is_inferred_ = false; +}; + +struct ListType; +using ListTypePtr = std::shared_ptr; +struct TORCH_API ListType + : public SingleElementType { + // It's not exactly a singleton, but there should be exactly one instance of + // List[T] for every T + friend struct Type; + template + static ListTypePtr create(T&&... all) { + return ListTypePtr( + new ListType(std::forward(all)...)); // NOLINT(modernize-make-shared) + } + + std::string str() const override { + std::stringstream ss; + ss << getElementType()->str() << "[]"; + return ss.str(); + } + TypePtr createWithContained( + std::vector contained_types) const override { + return create(std::move(contained_types.at(0))); + } + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + // global singleton + // Given an inner type T and an identifier, + // this function wil return the global singleton type pointer + // the type List. + // The extra "identifier" argument is needed beccause we have multiple container types + // that all re-use this function (List, array, etc.) + static TypePtr get(std::string identifier, TypePtr inner); + + // common cast List[Tensor] + static ListTypePtr ofTensors(); + static ListTypePtr ofOptionalTensors(); + static ListTypePtr ofInts(); + static ListTypePtr ofFloats(); + static ListTypePtr ofComplexDoubles(); + static ListTypePtr ofBools(); + static ListTypePtr ofStrings(); + + private: + ListType(TypePtr elem) : SingleElementType(std::move(elem)) {} + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + std::stringstream ss; + ss << "List[" << getElementType()->annotation_str(printer) << "]"; + return ss.str(); + } +}; + +struct DictType; +using DictTypePtr = std::shared_ptr; +struct TORCH_API DictType : public SharedType { + friend struct Type; + static const TypeKind Kind = TypeKind::DictType; + + static DictTypePtr create(TypePtr key, TypePtr value) { + auto kind = key->kind(); + if (auto dyn = key->castRaw()) { + kind = dyn->dynamicKind(); + } + switch (kind) { + case TypeKind::AnyType: + case TypeKind::IntType: + case TypeKind::BoolType: + case TypeKind::FloatType: + case TypeKind::ComplexType: + case TypeKind::StringType: + case TypeKind::TensorType: + case TypeKind::DeviceObjType: + return DictTypePtr(new DictType(std::move(key), std::move(value))); + default: + AT_ERROR( + "Cannot create dict for key type '", + key->str(), + "', only int, float, complex, Tensor, device and string keys are supported"); + } + } + + // aligned with the format in FunctionSchema + std::string str() const override { + std::stringstream ss; + ss << "Dict(" << getKeyType()->str() << ", " << getValueType()->str() + << ")"; + return ss.str(); + } + + TypePtr createWithContained( + std::vector contained_types) const override { + if (contained_types.size() != 2) { + throw std::runtime_error("Expected 2 contained types"); + } + return create(std::move(contained_types.at(0)), std::move(contained_types.at(1))); + } + + const TypePtr& getKeyType() const { + return types.at(0); + } + + const TypePtr& getValueType() const { + return types.at(1); + } + + bool hasFreeVariables() const override { + return has_free_variables; + } + + at::ArrayRef containedTypes() const override { + return types; + } + + bool equals(const Type& rhs) const override { + if (auto* dict_rhs = rhs.castRaw()) { + return *getKeyType() == *(dict_rhs->getKeyType()) && + *getValueType() == *(dict_rhs->getValueType()); + } + return false; + } + + // global singleton + // Given an inner type T and an identifier, + // this function wil return the global singleton type pointer + // the type List. + // The extra "identifier" argument is needed beccause we have multiple container types + // that all re-use this function (Dict and unordered_map) + static TypePtr get(std::string identifier, TypePtr key, TypePtr val); + + private: + DictType(TypePtr key, TypePtr value) + : SharedType(TypeKind::DictType), + has_free_variables( + key->hasFreeVariables() || value->hasFreeVariables()) { + types.reserve(2); + types.push_back(std::move(key)); + types.push_back(std::move(value)); + } + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + std::stringstream ss; + ss << "Dict[" << getKeyType()->annotation_str(printer) << ", " + << getValueType()->annotation_str(printer) << "]"; + return ss.str(); + } + + std::vector types; + bool has_free_variables; +}; + +struct FutureType; +using FutureTypePtr = std::shared_ptr; + +struct TORCH_API FutureType + : public SingleElementType { + friend struct Type; + template + static FutureTypePtr create(TypePtr elem) { + return FutureTypePtr( + new FutureType(std::move(elem))); // NOLINT(modernize-make-shared) + } + + std::string str() const override { + std::stringstream ss; + ss << "Future(" << getElementType()->str() << ")"; + return ss.str(); + } + TypePtr createWithContained( + std::vector contained_types) const override { + return create(std::move(contained_types.at(0))); + } + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override { + if (Type::isSubtypeOfExt(rhs, why_not)) { + return true; + } + if (auto rhs_ = rhs.castRaw()) { + return getElementType()->isSubtypeOfExt(*rhs_->getElementType(), why_not); + } + return false; + } + + private: + FutureType(TypePtr elem) : SingleElementType(std::move(elem)) {} + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + std::stringstream ss; + ss << "Future[" << getElementType()->annotation_str(printer) << "]"; + return ss.str(); + } +}; + +struct RRefType; +using RRefTypePtr = std::shared_ptr; + +struct TORCH_API RRefType + : public SingleElementType { + friend struct Type; + template + static RRefTypePtr create(TypePtr elem) { + return RRefTypePtr( + new RRefType(std::move(elem))); // NOLINT(modernize-make-shared) + } + + std::string str() const override { + std::stringstream ss; + ss << "RRef(" << getElementType()->str() << ")"; + return ss.str(); + } + TypePtr createWithContained( + std::vector contained_types) const override { + return create(std::move(contained_types.at(0))); + } + + private: + RRefType(TypePtr elem) : SingleElementType(std::move(elem)) {} + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + std::stringstream ss; + ss << "RRef[" << getElementType()->annotation_str(printer) << "]"; + return ss.str(); + } +}; + +// Any should never appear in a named type like a class, namedtuple or +// interface. If it does, then dynamic type information will be lost in the +// Pickler, leading to hard-to-track-down bugs that will only occur +// after saving or loading a model. This is because we rely on the +// static types in named types to reconstruct type tags of loaded +// values. Lifting this restriction requires solving the serialization +// problem first. +TORCH_API void checkNoAny( + const Type& base, + const char* what, + const std::string& attrname, + const TypePtr& attrtype); + +struct TupleType; +using TupleTypePtr = std::shared_ptr; +using NameList = std::vector; +// This type represents a Tuple +struct TORCH_API TupleType : public NamedType { + + static TupleTypePtr createNamed(const c10::optional& name, + const std::vector& field_names, + const std::vector& field_types, + std::vector& field_defaults); + + static TupleTypePtr createNamed(const c10::optional& name, + const std::vector& field_names, + const std::vector& field_types); + + static TupleTypePtr createNamed(const c10::optional& name, + const std::vector& field_names, + const std::vector& field_types); + + static TupleTypePtr create( + std::vector types) { + return TupleTypePtr(new TupleType( + std::move(types), + c10::nullopt, + nullptr)); // NOLINT(modernize-make-shared) + } + static TupleTypePtr create() { + return create({}); + } + + at::ArrayRef elements() const { + return elements_; + } + + bool equals(const Type& rhs) const override; + bool isSubtypeOfExt(const Type& rhs_, std::ostream* why_not) const override; + + std::string str() const override; + bool hasFreeVariables() const override { + return has_free_variables_; + } + at::ArrayRef containedTypes() const override { + return elements_; + } + TypePtr createWithContained( + std::vector contained_types) const override { + return std::shared_ptr( + new TupleType(std::move(contained_types), name(), schema())); + } + const std::shared_ptr& schema() const { + return schema_; + } + c10::optional> names() const; + + static const TypeKind Kind = TypeKind::TupleType; + + private: + template + static TupleTypePtr createWithSpec( + const c10::optional& name, + const std::vector& field_names, + const std::vector& field_types, + std::vector& field_defaults); + + TupleType( + std::vector elements_, + c10::optional name, + std::shared_ptr schema); + + bool compare( + const Type& rhs, + std::function fn) const { + if (rhs.kind() != kind()) { + return false; + } + + const auto& l_elements = elements(); + const auto& r_elements = rhs.castRaw()->elements(); + if (l_elements.size() != r_elements.size()) + return false; + for (size_t i = 0; i < l_elements.size(); ++i) { + if (!fn(*l_elements[i], *r_elements[i])) + return false; + } + return true; + } + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override; + + std::vector elements_; + bool has_free_variables_; + std::shared_ptr schema_; +}; + +// the common supertype of all Enums, only used in operator registraion. +// EnumType <: AnyEnumType for all Enums +struct AnyEnumType; +using AnyEnumTypePtr = SingletonTypePtr; +struct TORCH_API AnyEnumType final : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "AnyEnumType"; + } + static const TypeKind Kind = TypeKind::AnyEnumType; + // global singleton + static AnyEnumTypePtr get(); +private: + AnyEnumType() + : Type(TypeKind::AnyEnumType) {} +}; + +struct NumberType; +using NumberTypePtr = SingletonTypePtr; +// This type represents a Python number +// Subtype hierarchy for Number Types (NumberType as the base type): +// IntType <: NumberType +// FloatType <: NumberType +// ComplexType <:NumberType +// +// WARNING: if you add a new subtype of NumberType that is not +// represented by a global singleton, you need to change NumberTypePtr +// to a SingletonOrSharedTypePtr and deal with NumberType needing to +// both inherit and not inherit from SharedType! +struct TORCH_API NumberType : public Type { + bool equals(const Type& rhs) const override; + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + std::string str() const override { + return "Scalar"; // match what PythonArgParser says for clarity + } + static const TypeKind Kind = TypeKind::NumberType; + // global singleton + static NumberTypePtr get(); + + protected: + NumberType(TypeKind kind = TypeKind::NumberType) : Type(kind) {} + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + (void)printer; // Suppress unused variable warning + return "number"; // technically not a valid python type, but + // we need to use it when parsing back in annotations + // for implicit conversions + } +}; + +struct FloatType; +using FloatTypePtr = SingletonTypePtr; +// This type represents a Python float number +struct TORCH_API FloatType : public NumberType { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "float"; + } + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override { + // NOLINTNEXTLINE(bugprone-parent-virtual-call) + return rhs.kind() == TypeKind::NumberType || Type::isSubtypeOfExt(rhs, why_not); + } + static const TypeKind Kind = TypeKind::FloatType; + // global singleton + static FloatTypePtr get(); + + private: + FloatType() : NumberType(TypeKind::FloatType) {} + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + (void)printer; // Suppress unused variable warning + return "float"; + } +}; + +struct ComplexType; +using ComplexTypePtr = SingletonTypePtr; +// This type represents a Python float number +struct TORCH_API ComplexType : public NumberType { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "complex"; + } + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override { + // NOLINTNEXTLINE(bugprone-parent-virtual-call) + return rhs.kind() == TypeKind::NumberType || Type::isSubtypeOfExt(rhs, why_not); + } + static const TypeKind Kind = TypeKind::ComplexType; + // global singleton + static ComplexTypePtr get(); + + private: + ComplexType() : NumberType(TypeKind::ComplexType) {} + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + (void)printer; // Suppress unused variable warning + return "complex"; + } +}; + +// We need to introduce `SymIntType` to represent the `SymInt` type +// used in function schemas e.g. `aten::narrow_copy(... SymInt length) +// `SymInt` will be used to enable tracing arithmetic operations on +// dimension values. Please see [SymInt.h] for more information +struct SymIntType; +using SymIntTypePtr = SingletonTypePtr; +struct TORCH_API SymIntType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "SymInt"; + } + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + // TODO: will become a Union[SymIntNodeImpl|int] in the near future + return "int"; + } + static const TypeKind Kind = TypeKind::SymIntType; + // global singleton + static SymIntTypePtr get(); + + private: + SymIntType() : Type(TypeKind::SymIntType) {} +}; + +struct SymFloatType; +using SymFloatTypePtr = SingletonTypePtr; +struct TORCH_API SymFloatType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "SymFloat"; + } + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + return "float"; + } + static const TypeKind Kind = TypeKind::SymFloatType; + // global singleton + static SymFloatTypePtr get(); + + private: + SymFloatType() : Type(TypeKind::SymFloatType) {} +}; + +struct IntType; +using IntTypePtr = SingletonTypePtr; +// This type represents a Python int number +struct TORCH_API IntType : public NumberType { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "int"; + } + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override { + // NOLINTNEXTLINE(bugprone-parent-virtual-call) + return rhs.kind() == TypeKind::NumberType || Type::isSubtypeOfExt(rhs, why_not); + } + static const TypeKind Kind = TypeKind::IntType; + // global singleton + static IntTypePtr get(); + + private: + IntType() : NumberType(TypeKind::IntType) {} + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + (void)printer; // Suppress unused variable warning + return "int"; + } +}; + +struct BoolType; +using BoolTypePtr = SingletonTypePtr; +// This node represents a Python bool value +struct TORCH_API BoolType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "bool"; + } + static const TypeKind Kind = TypeKind::BoolType; + // global singleton + static BoolTypePtr get(); + + private: + BoolType() : Type(TypeKind::BoolType) {} +}; + +struct StringType; +using StringTypePtr = SingletonTypePtr; +// This type represents a Python string +struct TORCH_API StringType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + // we only use "str" (not "string") in both FunctionSchema and script + return annotation_str(); + } + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + (void)printer; // Suppress unused variable warning + return "str"; + } + static const TypeKind Kind = TypeKind::StringType; + // global singleton + static StringTypePtr get(); + + private: + StringType() : Type(TypeKind::StringType) {} +}; + +struct StorageType; +using StorageTypePtr = SingletonTypePtr; +struct TORCH_API StorageType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return annotation_str(); + } + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + (void)printer; // Suppress unused variable warning + return "Storage"; + } + static const TypeKind Kind = TypeKind::StorageType; + // global singleton + static StorageTypePtr get(); + + private: + StorageType() : Type(TypeKind::StorageType) {} +}; + +struct FunctionType; +using FunctionTypePtr = std::shared_ptr; +struct TORCH_API FunctionType : public NamedType { + static FunctionTypePtr create(torch::jit::Function* function) { + return FunctionTypePtr( + new FunctionType(function)); // NOLINT(modernize-make-shared) + } + bool equals(const Type& rhs) const override { + if (auto func_type = rhs.cast()) { + return func_type->function_ == function_; + } + + return false; + } + std::string str() const override { + return "Function"; + } + torch::jit::Function* function() const { + return function_; + } + static const TypeKind Kind = TypeKind::FunctionType; + + private: + FunctionType(torch::jit::Function* function); + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + (void)printer; // Suppress unused variable warning + const auto& n = name().value(); + return n.qualifiedName(); + } + torch::jit::Function* function_; +}; + +struct NoneType; +using NoneTypePtr = SingletonTypePtr; +// This type represents a Python None +struct TORCH_API NoneType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "NoneType"; + } + bool isSubtypeOfExt(const Type& rhs, std::ostream *why_not) const override; + + static const TypeKind Kind = TypeKind::NoneType; + // global singleton + static NoneTypePtr get(); + + private: + NoneType() : Type(TypeKind::NoneType) {} +}; + +struct GeneratorType; +using GeneratorTypePtr = SingletonTypePtr; +// This type represents a Generator +struct TORCH_API GeneratorType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "Generator"; + } + static const TypeKind Kind = TypeKind::GeneratorType; + // global singleton + static GeneratorTypePtr get(); + + private: + GeneratorType() : Type(TypeKind::GeneratorType) {} +}; + +struct QuantizerType; +using QuantizerTypePtr = SingletonTypePtr; +// This type represents a Quantizer +struct TORCH_API QuantizerType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "Quantizer"; + } + static const TypeKind Kind = TypeKind::QuantizerType; + // global singleton + static QuantizerTypePtr get(); + + private: + QuantizerType() : Type(TypeKind::QuantizerType) {} +}; + +struct QSchemeType; +using QSchemeTypePtr = SingletonTypePtr; +// This type represents a QScheme +struct TORCH_API QSchemeType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "QScheme"; + } + static const TypeKind Kind = TypeKind::QSchemeType; + // global singleton + static QSchemeTypePtr get(); + + private: + QSchemeType() : Type(TypeKind::QSchemeType) {} +}; + +struct DeviceObjType; +using DeviceObjTypePtr = SingletonTypePtr; +// This type represents a Device +struct TORCH_API DeviceObjType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "Device"; + } + static const TypeKind Kind = TypeKind::DeviceObjType; + // global singleton + static DeviceObjTypePtr get(); + + private: + DeviceObjType() : Type(TypeKind::DeviceObjType) {} +}; + +struct StreamObjType; +using StreamObjTypePtr = SingletonTypePtr; +// This type represents a Generator +struct TORCH_API StreamObjType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "Stream"; + } + static const TypeKind Kind = TypeKind::StreamObjType; + // global singleton + static StreamObjTypePtr get(); + +private: + StreamObjType() : Type(TypeKind::StreamObjType) {} +}; + +struct VarType; +using VarTypePtr = std::shared_ptr; +// This type represents a type variable, used in FunctionSchema +struct VarType : public SharedType { + static VarTypePtr create(std::string name_) { + return VarTypePtr(new VarType(std::move(name_))); + } + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return name(); + } + const std::string& name() const { + return name_; + } + bool hasFreeVariables() const override { + return true; + } + static const TypeKind Kind = TypeKind::VarType; + + private: + VarType(std::string name_) + : SharedType(TypeKind::VarType), name_(std::move(name_)) {} + std::string name_; +}; + +struct CapsuleType; +using CapsuleTypePtr = SingletonTypePtr; +// This type represents a Python Capsule. +// It does not appear in the IR and is only used during runtime +struct TORCH_API CapsuleType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "Capsule"; + } + static const TypeKind Kind = TypeKind::CapsuleType; + // global singleton + static CapsuleTypePtr get(); +private: + CapsuleType() + : Type(TypeKind::CapsuleType) {} +}; + +struct PyObjectType; +using PyObjectTypePtr = SingletonTypePtr; +// This type represents a PyObject Type +struct TORCH_API PyObjectType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "PyObject"; + } + static const TypeKind Kind = TypeKind::PyObjectType; + // global singleton + static PyObjectTypePtr get(); +private: + PyObjectType() + : Type(TypeKind::PyObjectType) {} +}; + +enum class TypeVerbosity { + None, + Type, + TypeAndStride, + Full, + Symbolic, + Default = Full, +}; + +TORCH_API TypeVerbosity type_verbosity(); + +TORCH_API std::ostream& operator<<(std::ostream& out, const Type& t); +template +TORCH_API std::ostream& operator<<( + std::ostream& out, + const VaryingShape& t); +TORCH_API std::ostream& operator<<(std::ostream& os, const SymbolicShape& s); +TORCH_API std::ostream& operator<<(std::ostream& os, const ShapeSymbol& s); +TORCH_API std::ostream& operator<<(std::ostream& os, const Stride& s); +// what is the type, ignoring extra size/shape information? +// e.g. Tensor(2x3) -> Dynamic, and Tuple(Tensor(2x3),...) -> Tuple(Dynamic,...) + +// `unshapedType` is used to remove Tensor subtypes. We treat all Tensor +// subtypes as simply "Tensor"; we also create a new version of any +// container types in which internal Tensors have undergone the same +// operation. This is used for type comparisons between two Tensor types +// (`unshapedType` means that we don't falsely return `false` for e.g. +// Tensors of different dimensions). It's also used in the alias +// analysis pass. +// Be careful with calls because this can be very slow. If calling this +// on a graph, use `EraseShapeInformation` in shape_analysis.h +inline TypePtr unshapedType(const TypePtr& type) { + if (type->isSubtypeOf(*TensorType::get())) { + return TensorType::get(); + } + at::ArrayRef contained = type->containedTypes(); + if (contained.empty()) { + return type; + } + return type->withContained(fmap(type->containedTypes(), unshapedType)); +} + +inline TypePtr TensorType::fromNumberType(const Type& typ) { + if (typ.isSubtypeOf(*IntType::get())) { + return TensorType::createContiguous(at::kLong, at::kCPU, {}); + } else if (typ.isSubtypeOf(*FloatType::get())) { + return TensorType::createContiguous(at::kDouble, at::kCPU, {}); + } else if (typ.isSubtypeOf(*BoolType::get())) { + return TensorType::createContiguous(at::kBool, at::kCPU, {}); + } else if (typ.kind() == NumberType::Kind) { + return TensorType::create(c10::nullopt, at::kCPU, {}, c10::nullopt); + } + TORCH_CHECK(false, "Unknown number type: ", typ.str()); +} +inline TypePtr TensorType::fromBoolType() { + return TensorType::createContiguous(at::kBool, at::kCPU, {}); +} + +inline c10::optional tryScalarTypeFromJitType(const Type& type) { + if (type == *FloatType::get()) { + return at::typeMetaToScalarType(c10::get_default_dtype()); + } else if (type == *IntType::get()) { + return at::ScalarType::Long; + } else if (type == *BoolType::get()) { + return at::ScalarType::Bool; + } + return c10::nullopt; +} + +inline at::ScalarType scalarTypeFromJitType(const Type& type) { + auto result = tryScalarTypeFromJitType(type); + TORCH_CHECK( + result, + "Add new condition, expected Float, Complex, Int, or Bool but got", + type.str()); + return *result; +} + +// Attempt to find the correct supertype of the two types `t1` and `t2`. +// If no supertype is found, then nullopt will be returned if +// `default_to_union` is false, and `Union[t1, t2]` will be returned +// if it is true. If `t1 == t2`, or `t1` is a type refinement of `t2`, +// then `t2` will be returned (and vice versa). +// +// Two different tensortypes will return dynamic. +// +// Currently we chose not to support returning a NumberType for +// two types from the set of {FloatType, IntType, ComplexType}, because +// there is a lack of operator support for NumberType. +// +// If `type_hint` is an `InterfaceType`, then we can use that as a +// potential supertype for `ClassType`s in the list. Otherwise, we have +// no way to find and use some common interface type +TORCH_API c10::optional unifyTypes( + const TypePtr& t1, + const TypePtr& t2, + bool default_to_union = false, + TypePtr type_hint = nullptr); + +TORCH_API c10::optional unifyTypeList( + at::ArrayRef elements, + std::ostream& why_not, + bool default_to_union = false, + TypePtr type_hint = nullptr); + +namespace detail { +template +struct getTypePtr_ final { + static decltype(auto) call() { + return ([]() { + try { + return getCustomClassType(); + } catch(const c10::Error&) { + TORCH_CHECK( + false, + "Type ", + c10::util::get_fully_qualified_type_name(), + " could not be converted to any of the known types." + ); + } + }()); + } +}; + +template +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return getTypePtr_::call(); + } +}; + +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return AnyType::get(); + } +}; + +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return TensorType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return StorageType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return StreamObjType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return FloatType::get(); + } +}; +template <> +struct getTypePtr_> final { + static decltype(auto) call() { + return ComplexType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return IntType::get(); + } +}; + +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return SymIntType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return IntType::get(); + } +}; + +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return SymFloatType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return FloatType::get(); + } +}; + +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return DeviceObjType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return BoolType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return NumberType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return QSchemeType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return TypeFactory::create( + TypeFactory::get()); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return StringType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return StringType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return StringType::get(); + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + // The "per vector" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = ListType::get("vector", inner_type); + return type; + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + // The "per ArrayRef" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = ListType::get("ArrayRef", inner_type); + return type; + } +}; +template +struct getMaybeFakeTypePtr_ final { + static const auto& call() { + static auto type = ListType::create(getMaybeFakeTypePtr_::call()); + return type; + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + // The "per List" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = ListType::get("List", inner_type); + return type; + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + static auto type = ListType::get("List", inner_type); + return type; + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + // The "per array" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + // (Concatenating the length onto the end of the string because we want a unique + // type_ptr created for every std::array type). + static auto type = ListType::get(std::string("array") + std::to_string(N), inner_type); + return type; + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_key_type = getMaybeFakeTypePtr_::call(); + static auto inner_val_type = getMaybeFakeTypePtr_::call(); + // The "per unordered_map" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = DictType::get("unordered_map", inner_key_type, inner_val_type); + return type; + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_key_type = getMaybeFakeTypePtr_::call(); + static auto inner_val_type = getMaybeFakeTypePtr_::call(); + // The "per Dict" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = DictType::get("Dict", inner_key_type, inner_val_type); + return type; + } +}; + +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + // The "per optional" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = OptionalType::get(inner_type); + return type; + } +}; + + +template<> +struct getTypePtr_ final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + // The "per optional" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = OptionalType::get(inner_type); + return type; + } +}; + +template +struct getMaybeFakeTypePtr_ final { + static const auto& call() { + // The "per optional" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto inner_type = getMaybeFakeTypePtr_::call(); + static auto type = OptionalType::get(inner_type); + return type; + } +}; + +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto type = ([]() { + std::vector contained_types = { + (getMaybeFakeTypePtr_::call())... + }; + return TupleType::create(std::move(contained_types)); + })(); + return type; + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return NoneType::get(); + } +}; +} // namespace detail +template +inline decltype(auto) getTypePtr() { + // TODO: static_assert that a templated function exists, and throw a friendly + // error message if not + return detail::getMaybeFakeTypePtr_::call(); +} + +template +inline TypePtr getTypePtrCopy() { + // TODO: static_assert that a templated function exists, and throw a friendly + // error message if not + return getTypePtr(); +} + +template +inline decltype(auto) getFakeTypePtr() { + return detail::getMaybeFakeTypePtr_::call(); +} + +template +inline TypePtr getFakeTypePtrCopy() { + return getFakeTypePtr(); +} + +using TypeEnv = std::unordered_map; +struct MatchTypeReturn { + MatchTypeReturn(std::string reason) : reason_(std::move(reason)) {} + static MatchTypeReturn Success() { + return MatchTypeReturn(); + } + bool success() const { + return !reason_.has_value(); + } + const std::string& reason() const { + return reason_.value(); + } + + private: + MatchTypeReturn() + : reason_(c10::nullopt) {} + c10::optional reason_; // is there is no match, this contains the reason +}; + +// attempt to match the type variables in formal to actual, adding them to type_env. +// If no match is possible this returns a MatchTypeReturn with r.success() == false +// and a r.reason() that describes why it could not match. +// note: It is possible to successfully match a formal, but for type variables +// in the formal to still not be defined. In particular, None matches Optional[T] +// but does not define the value of T. +TORCH_API MatchTypeReturn +matchTypeVariables(const TypePtr& formal, const TypePtr& actual, TypeEnv& type_env); + +// replace type variables appearing in `type` with the values in +// `type_env`. Returns nullptr if a variable used in `type` +// does not appear in `type_env` +TORCH_API TypePtr tryEvalTypeVariables(const TypePtr& type, TypeEnv& type_env); + +TORCH_API bool elementTypeCanBeInferredFromMembers(const TypePtr& elem_type); + +struct InterfaceType; +using InterfaceTypePtr = std::shared_ptr; + +// Interfaces are a list of abstract methods that a class might meet. +// If a class provides those methods, it implicitly meets the interface. + +// Subtype relations for Interface with ClassType: +// lhs (ClassType or InterfaceType) is a subtype of rhs if: +// 1. lhs methods are a superset of rhs methods +// 2. if rhs is module interface, the lhs must be module interface or module itself +struct TORCH_API InterfaceType : public NamedType { + static InterfaceTypePtr create( + QualifiedName qualifiedName, bool is_module=false); + + bool equals(const Type& rhs) const override { + if (auto user_rhs = rhs.castRaw()) { + return isSubTypeImpl(*this, *user_rhs, nullptr) && + isSubTypeImpl(*user_rhs, *this, nullptr); + } + return false; + } + + std::string str() const override { + return std::string("InterfaceType<") + name()->name() + ">"; + } + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + // try to find a method of this interface, + // returns nullptr if not found. + const FunctionSchema* getMethod(const std::string& name) const; + void addMethod(FunctionSchema schema); + const std::vector& methods() const { + return *methods_; + } + + bool is_module() const override{ + return is_module_; + } + static const TypeKind Kind = TypeKind::InterfaceType; + ~InterfaceType() override; + private: + InterfaceType(QualifiedName name, bool is_module); + static bool isSubTypeImpl( + const InterfaceType& lhs, + const InterfaceType& rhs, + std::ostream* why_not); + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + (void)printer; // Suppress unused variable warning + return name()->qualifiedName(); + } + + // shared_ptr so that this header does not have to depend on + // FunctionSchema.h + std::shared_ptr> methods_; + // flag to distinguish if it's an interface type from a module or not + bool is_module_; +}; + +template +struct EnumerationType : public Type { +static const TypeKind Kind = K; + +bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); +} + +protected: +EnumerationType() : Type(Kind) {} +}; + +// WARNING: These enumeration types below DO NOT actually get parsed out +// from the logical schema strings, instead they are mapped as ints. To +// observe these types, use real_type() instead of type() on Argument + +struct ScalarTypeType; +using ScalarTypeTypePtr = SingletonTypePtr; +struct TORCH_API ScalarTypeType : public EnumerationType { +std::string str() const override { +return "ScalarType"; +} +static const TypeKind Kind = TypeKind::ScalarTypeType; +// global singleton +static ScalarTypeTypePtr get(); + +private: +ScalarTypeType() : EnumerationType() {} +}; + +struct MemoryFormatType; +using MemoryFormatTypePtr = SingletonTypePtr; +struct TORCH_API MemoryFormatType : public EnumerationType { +std::string str() const override { +return "MemoryFormat"; +} +static const TypeKind Kind = TypeKind::MemoryFormatType; +// global singleton +static MemoryFormatTypePtr get(); + +private: +MemoryFormatType() : EnumerationType() {} +}; + +struct LayoutType; +using LayoutTypePtr = SingletonTypePtr; +struct TORCH_API LayoutType : public EnumerationType { +std::string str() const override { +return "Layout"; +} +static const TypeKind Kind = TypeKind::LayoutType; +// global singleton +static LayoutTypePtr get(); + +private: +LayoutType() : EnumerationType() {} +}; + +namespace detail { +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return ScalarTypeType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return LayoutType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return MemoryFormatType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return IntType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return IntType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return IntType::get(); + } +}; +} // namespace detail + +// the common supertype of all lists, +// List[T] <: AnyList for all T +struct AnyListType; +using AnyListTypePtr = SingletonTypePtr; +struct TORCH_API AnyListType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "list"; + } + static const TypeKind Kind = TypeKind::AnyListType; + // global singleton + static AnyListTypePtr get(); +private: + AnyListType() + : Type(TypeKind::AnyListType) {} +}; + +// the common supertype of all tuples, +// Tuple[T...] <: AnyTuple for all T +struct AnyTupleType; +using AnyTupleTypePtr = SingletonTypePtr; +struct TORCH_API AnyTupleType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + + std::string str() const override { + return "tuple"; + } + static const TypeKind Kind = TypeKind::AnyTupleType; + + // global singleton + static AnyTupleTypePtr get(); +private: + AnyTupleType() + : Type(TypeKind::AnyTupleType) {} +}; + +// the common supertype of all classes, +// ClassType <: AnyClassType for all classes +struct AnyClassType; +using AnyClassTypePtr = SingletonTypePtr; +struct TORCH_API AnyClassType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "AnyClassType"; + } + static const TypeKind Kind = TypeKind::AnyClassType; + // global singleton + static AnyClassTypePtr get(); +private: + AnyClassType() + : Type(TypeKind::AnyClassType) {} +}; + +template<> +inline typename detail::CastReturnType::type Type::cast() { + if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType || + kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return nullptr; +} + +template<> +inline typename detail::CastConstReturnType::type Type::cast() const { + if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType || + kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return nullptr; +} + +template<> +inline const NamedType* Type::castRaw() const { + if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType || + kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) { + return static_cast(this); + } + return nullptr; +} + +// Used as a return type when inferring the IValue type of a Python object. +struct InferredType { + /* implicit */ InferredType(TypePtr type) : type_(std::move(type)) {} + /* implicit */ InferredType(std::string reason) + : type_(nullptr), reason_(std::move(reason)) {} + TypePtr type() const { + TORCH_INTERNAL_ASSERT( + type_, + "Tried to get the type from an InferredType but the type is null. ", + "Reason: ", + reason_); + return type_; + } + bool success() const { + return type_ != nullptr; + } + const std::string& reason() const { + TORCH_INTERNAL_ASSERT(!type_); + return reason_; + } + +private: + TypePtr type_; + std::string reason_; +}; + +TORCH_API bool containsAnyType(const TypePtr& type); + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/jit_type_base.h b/voice_bridge/torch/include/ATen/core/jit_type_base.h new file mode 100644 index 0000000000000000000000000000000000000000..beb553eb935a27a46d180331b22e6e5ab51ba805 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/jit_type_base.h @@ -0,0 +1,710 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +#define C10_FORALL_TYPES(_) \ + _(AnyType) \ + _(EnumType) \ + _(AnyEnumType) \ + _(TensorType) \ + _(StorageType) \ + _(TupleType) \ + _(ListType) \ + _(DictType) \ + _(NumberType) \ + _(FloatType) \ + _(ComplexType) \ + _(FutureType) \ + _(RRefType) \ + _(IntType) \ + _(NoneType) \ + _(StringType) \ + _(GeneratorType) \ + _(QuantizerType) \ + _(BoolType) \ + _(OptionalType) \ + _(VarType) \ + _(DeviceObjType) \ + _(StreamObjType) \ + _(FunctionType) \ + _(ClassType) \ + _(PyObjectType) \ + _(CapsuleType) \ + _(InterfaceType) \ + _(QSchemeType) \ + _(ScalarTypeType) \ + _(LayoutType) \ + _(MemoryFormatType) \ + _(AnyListType) \ + _(AnyTupleType) \ + _(AnyClassType) \ + _(SymIntType) \ + _(SymFloatType) \ + _(UnionType) \ + _(DynamicType) + +enum class TypeKind { +#define DEFINE_TYPE(T) T, + C10_FORALL_TYPES(DEFINE_TYPE) +#undef DEFINE_TYPE +}; + +TORCH_API const char* typeKindToString(TypeKind kind); + +struct Type; +struct SharedType; + +// Use this to customize how a Type is printed using `annotation_str()`. If +// c10::nullopt is returned, `annotation_str()` falls through to its default +// implementation. +using TypePrinter = std::function(const Type&)>; + +namespace detail { +template +struct IsSingletonType : public std::integral_constant {}; +} // namespace detail +#define TORCH_DECLARE_SINGLETON(Type) \ + struct Type; \ + namespace detail { \ + template <> struct IsSingletonType : public std::integral_constant {}; \ + } + +TORCH_DECLARE_SINGLETON(AnyType); +TORCH_DECLARE_SINGLETON(AnyEnumType); +TORCH_DECLARE_SINGLETON(NumberType); +TORCH_DECLARE_SINGLETON(FloatType); +TORCH_DECLARE_SINGLETON(ComplexType); +TORCH_DECLARE_SINGLETON(IntType); +TORCH_DECLARE_SINGLETON(BoolType); +TORCH_DECLARE_SINGLETON(StringType); +TORCH_DECLARE_SINGLETON(StorageType); +TORCH_DECLARE_SINGLETON(NoneType); +TORCH_DECLARE_SINGLETON(GeneratorType); +TORCH_DECLARE_SINGLETON(QuantizerType); +TORCH_DECLARE_SINGLETON(QSchemeType); +TORCH_DECLARE_SINGLETON(DeviceObjType); +TORCH_DECLARE_SINGLETON(StreamObjType); +TORCH_DECLARE_SINGLETON(CapsuleType); +TORCH_DECLARE_SINGLETON(PyObjectType); +TORCH_DECLARE_SINGLETON(ScalarTypeType); +TORCH_DECLARE_SINGLETON(LayoutType); +TORCH_DECLARE_SINGLETON(MemoryFormatType); +TORCH_DECLARE_SINGLETON(AnyListType); +TORCH_DECLARE_SINGLETON(AnyTupleType); +TORCH_DECLARE_SINGLETON(AnyClassType); + +namespace detail { +template +struct CastReturnType { + using type = std::shared_ptr; +}; + +template +struct CastReturnType::value>::type> { + using type = SingletonTypePtr; +}; + +template +struct CastConstReturnType { + using type = std::shared_ptr; +}; + +template +struct CastConstReturnType::value>::type> { + using type = SingletonTypePtr; +}; + +template +struct as_shared_type { + using type = SharedType*; +}; + +template +struct as_shared_type { + using type = const SharedType *; +}; +} // namespace detail + +struct TORCH_API Type { + friend TORCH_API bool operator==(const Type& lhs, const Type& rhs); + private: + TypeKind kind_; + + protected: + Type(TypeKind kind) : kind_(kind) {} + + virtual std::string annotation_str_impl(TypePrinter /*printer*/) const { + return str(); + } + // a == b + virtual bool equals(const Type& rhs) const = 0; + // a == b <=> b == a + virtual bool symmetric() const { + return true; + } + + public: + template + class SingletonOrSharedTypePtr { + public: + using element_type = typename std::shared_ptr::element_type; + + SingletonOrSharedTypePtr() = default; + + /* implicit */ SingletonOrSharedTypePtr(std::shared_ptr x) + : repr_(std::move(x)) {} + + template ::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(std::shared_ptr x) + : repr_(std::move(x)) {} + + /* implicit */ SingletonOrSharedTypePtr(std::nullptr_t) + : repr_(nullptr) {} + + /* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr p) + : repr_(p) {} + + template ::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr p) + : repr_(SingletonTypePtr(p.get())) {} + + + // We need to support construction from T* for pybind. The problem + // is that it's not clear if we are supposed to be taking shared + // ownership or not. + // + // Case 1: if T is known statically to derive from SharedType, we should use + // shared_from_this() and take shared_ownership. + // + // Case 2: if T is exactly Type, we need to do a dynamic_cast to + // check if it's a SharedType and do the right thing. + // + // Case 3: Otherwise, T is not a SharedType. (debug-check this + // assumption!) Use a singleton pointer. + + template ::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(T* p) : SingletonOrSharedTypePtr(static_cast::type>(p)->shared_from_this()) {} + + template ::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(T* p) { + if (auto* shared_p = dynamic_cast::type>(p)) { + repr_ = Repr(shared_p->shared_from_this()); + } else { + repr_ = Repr(p); + } + } + + template ::value && !std::is_base_of::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(T* p) + : repr_(p) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(dynamic_cast::type>(p) == nullptr); + } + + SingletonOrSharedTypePtr(const SingletonOrSharedTypePtr&) = default; + SingletonOrSharedTypePtr(SingletonOrSharedTypePtr&&) noexcept = default; + SingletonOrSharedTypePtr& operator=(const SingletonOrSharedTypePtr&) = default; + SingletonOrSharedTypePtr& operator=(SingletonOrSharedTypePtr&&) noexcept = default; + + T* get() const { + return repr_.isSharedAndNonNull() ? repr_.shared_.repr_.get() : static_cast(repr_.rawRepr().first); + } + + operator bool() const { + return repr_.isNonNull(); + } + + bool operator==(std::nullptr_t) const { + return !repr_.isNonNull(); + } + + bool operator!=(std::nullptr_t) const { + return repr_.isNonNull(); + } + + template , void>::value, bool> = true> + U& operator*() const { + return *get(); + } + + T* operator->() const { + return get(); + } + + private: + // NOTE: SharedPtrWrapper exists to work around a baffling bug in + // nvcc; see comment in destroy() below. + struct SharedPtrWrapper { + SharedPtrWrapper(std::shared_ptr &&x) + : repr_(std::move(x)) {} + std::shared_ptr repr_; + }; + union Repr { + Repr() : Repr(nullptr) {} + + explicit Repr(std::shared_ptr x) + : shared_(std::move(x)) {} + + explicit Repr(std::nullptr_t) + : singletonRepr_(nullptr) {} + + explicit Repr(SingletonTypePtr p) + : singletonRepr_(p.get()) {} + + ~Repr() { + destroy(); + } + + // NOTE: the only non-UB way to access our null state is through + // rawRepr(), because our copy operation doesn't preserve which + // union member is active for null pointers. + Repr(const Repr& rhs) { + if (rhs.isSharedAndNonNull()) { + new (&shared_) SharedPtrWrapper(rhs.shared_); + } else { + singletonRepr_.singleton_ = static_cast(rhs.rawRepr().first); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr); + singletonRepr_.unused_ = nullptr; + } + } + + Repr(Repr&& rhs) noexcept { + if (rhs.isSharedAndNonNull()) { + new (&shared_) SharedPtrWrapper(std::move(rhs.shared_)); + } else { + singletonRepr_.singleton_ = static_cast(rhs.rawRepr().first); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr); + singletonRepr_.unused_ = nullptr; + } + } + + Repr& operator=(const Repr& rhs) { + if (&rhs == this) { + return *this; + } + if (rhs.isSharedAndNonNull()) { + if (isSharedAndNonNull()) { + shared_ = rhs.shared_; + } else { + new (&shared_) SharedPtrWrapper(rhs.shared_); + } + } else { + if (isSharedAndNonNull()) { + destroy(); + } + singletonRepr_.singleton_ = static_cast(rhs.rawRepr().first); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr); + singletonRepr_.unused_ = nullptr; + } + return *this; + } + + Repr& operator=(Repr&& rhs) noexcept { + if (&rhs == this) { + return *this; + } + if (rhs.isSharedAndNonNull()) { + if (isSharedAndNonNull()) { + shared_ = std::move(rhs.shared_); + } else { + new (&shared_) SharedPtrWrapper(std::move(rhs.shared_)); + } + } else { + if (isSharedAndNonNull()) { + destroy(); + } + singletonRepr_.singleton_ = static_cast(rhs.rawRepr().first); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr); + singletonRepr_.unused_ = nullptr; + } + return *this; + } + + SharedPtrWrapper shared_; + + struct SingletonRepr { + explicit SingletonRepr(T* s) : singleton_(s) {} + T* singleton_; + void* unused_ = nullptr; + } singletonRepr_; + struct RawRepr { + void* first; + void* nullIfSingleton_; + }; + + // It is UB to read the singleton part of Repr if it was + // constructed as a shared_ptr and vice versa, but memcpying out + // the representation is always OK, so here's an accessor to obey + // the letter of the law. + RawRepr rawRepr() const { + RawRepr repr; + memcpy(&repr, reinterpret_cast(this), sizeof(RawRepr)); + return repr; + } + + bool isNonNull() const { + auto repr = rawRepr(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(repr.nullIfSingleton_ == nullptr || repr.first != nullptr); + return repr.first != nullptr; + } + + bool isSharedAndNonNull() const { + return rawRepr().nullIfSingleton_ != nullptr; + } + + private: + void destroy() { + if (isSharedAndNonNull()) { + // Without SharedPtrWrapper, this line would read + // `shared_.~shared_ptr()` and nvcc would complain with + // "error: expected primary-expression before '>' token" + // referring to the "t" in "shared_ptr". SharedPtrWrapper + // exists to work around this compiler bug. + shared_.~SharedPtrWrapper(); + } + } + } repr_; + }; + + using TypePtr = SingletonOrSharedTypePtr; + using Ptr = TypePtr; + using ElementType = Type; + + // subtyping relation. By default, we return true for the case + // when the type is exactly equal or if this <: T where rhs = Optional[T] + + // if this returns false and the why_not stream is non-null, it contains + // additional details that describe why this is not a subtype of 'rhs'. + // This additional information should only contain details that are not + // obvious from the annotation_str() that describes the type. For instance it + // is clear that `int <: str` is false but not clear why `Foo <: InterfaceBar` + // might be false. + virtual bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const; + virtual bool is_module() const; + bool isSubtypeOf(const Type& rhs) const { + return isSubtypeOfExt(rhs, nullptr); + } + // Compatibility shims to accommodate existing code that passes shared_ptrs + // around. Ideally, we would just delete this, but it should be harmless. + template + typename std::enable_if::value, bool>::type + isSubtypeOf(const std::shared_ptr& rhs) const { + return isSubtypeOf(*rhs); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOf(const SingletonOrSharedTypePtr& rhs) const { + return isSubtypeOf(*rhs); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOf(SingletonTypePtr rhs) const { + return isSubtypeOf(*rhs); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOfExt(const SingletonOrSharedTypePtr& rhs, std::ostream* why_not) const { + return isSubtypeOfExt(*rhs, why_not); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOfExt(const std::shared_ptr& rhs, std::ostream* why_not) const { + return isSubtypeOfExt(*rhs, why_not); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOfExt(SingletonTypePtr rhs, std::ostream* why_not) const { + return isSubtypeOfExt(*rhs, why_not); + } + + // How this type will appear in FunctionSchema declarations + virtual std::string str() const = 0; + + // How this type will appear as if it were a type annotation in Python + // which is sometimes different than how it appears in declarations (e.g. + // int[] vs List[int]) + // + // Takes a custom printer that users can pass in to customize the output of + // this method. + std::string annotation_str(TypePrinter printer) const { + if (printer) { + // the printer can return nullopt to fall through to the default impl + if (auto renamed = printer(*this)) { + return *renamed; + } + } + return annotation_str_impl(printer); + } + std::string annotation_str() const { + // Overload instead of define a default value for `printer` to help + // debuggers out. + return annotation_str(nullptr); + } + + // Returns a human readable string that includes additional information like + // "type is inferred rather than explictly defined" to help construct more + // user-friendly messages. + virtual std::string repr_str() const { + return annotation_str(); + } + + TypeKind kind() const { + return kind_; + } + + virtual bool isUnionType() const { + return false; + } + + virtual bool requires_grad() const { + for (const auto& ct : containedTypes()) { + if (ct->requires_grad()) { + return true; + } + } + return false; + } + + // Dynamically cast this object to the subclass indicated by the + // template variable, returning nullptr if the cast is invalid. + template ::value, bool> = true> + typename detail::CastReturnType::type cast() { + if (T::Kind == kind()) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return nullptr; + } + template ::value, bool> = true> + typename detail::CastReturnType::type cast() { + if (T::Kind == kind()) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(this == T::get().get()); + return typename detail::CastReturnType::type(static_cast(this)); + } + return nullptr; + } + template ::value, bool> = true> + typename detail::CastConstReturnType::type cast() const { + if (T::Kind == kind()) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return nullptr; + } + template ::value, bool> = true> + typename detail::CastConstReturnType::type cast() const { + if (T::Kind == kind()) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(this == T::get().get()); + return typename detail::CastConstReturnType::type(static_cast(this)); + } + return nullptr; + } + template + T* castRaw() { + if (T::Kind == kind()) { + return static_cast(this); + } + return nullptr; + } + template + const T* castRaw() const { + if (T::Kind == kind()) { + return static_cast(this); + } + return nullptr; + } + template + auto expect() { + auto r = cast(); + AT_ASSERT(r); + return r; + } + template + auto expect() const { + auto r = cast(); + AT_ASSERT(r); + return r; + } + template + T& expectRef() { + auto* r = castRaw(); + AT_ASSERT(r); + return *r; + } + template + const T& expectRef() const { + auto* r = castRaw(); + AT_ASSERT(r); + return *r; + } + virtual ~Type() = default; + virtual bool hasFreeVariables() const { + return false; + } + // list of types this type contains, e.g. for a List then element type of a + // list for a tuple, the types of the tuple elements + virtual at::ArrayRef containedTypes() const { + return {}; + } + virtual TypePtr containedType(size_t i) const { + return containedTypes().at(i); + } + virtual size_t containedTypeSize() const { + return containedTypes().size(); + } + // create a new version of this type, replacing its contained types with + // contained_types + TypePtr withContained(std::vector contained_types); + // per-type constructor, you only need to override this if the + // containedTypes() is not empty + virtual TypePtr createWithContained( + std::vector /*contained_types*/) const { + AT_ERROR( + "type with contained types did not overload createWithContained: ", + str()); + } + +}; + +template +using SingletonOrSharedTypePtr = Type::SingletonOrSharedTypePtr; + + +template +bool operator==(const SingletonOrSharedTypePtr& x, const SingletonOrSharedTypePtr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator==(const SingletonOrSharedTypePtr& x, const std::shared_ptr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator==(const std::shared_ptr& x, const SingletonOrSharedTypePtr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator==(const SingletonOrSharedTypePtr& x, const SingletonTypePtr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator==(const SingletonTypePtr& x, const SingletonOrSharedTypePtr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator!=(const SingletonOrSharedTypePtr& x, const SingletonOrSharedTypePtr& y) { + return !(x == y); +} + +template +bool operator!=(const SingletonOrSharedTypePtr& x, const std::shared_ptr& y) { + return !(x == y); +} + +template +bool operator!=(const std::shared_ptr& x, const SingletonOrSharedTypePtr& y) { + return !(x == y); +} + +template +bool operator!=(const SingletonOrSharedTypePtr& x, const SingletonTypePtr& y) { + return !(x == y); +} + +template +bool operator!=(const SingletonTypePtr& x, const SingletonOrSharedTypePtr& y) { + return !(x == y); +} + +using TypePtr = SingletonOrSharedTypePtr; +using ConstTypePtr = SingletonOrSharedTypePtr; + +// Explicitly enable MaybeOwned>, rather than allowing +// MaybeOwned to be used for any type right away. +template +struct MaybeOwnedTraits> + : public MaybeOwnedTraitsGenericImpl> {}; + +// Base class for Types that are guaranteed to be owned by std::shared_ptr. +struct TORCH_API SharedType : public Type, public std::enable_shared_from_this { + using Type::Type; +}; + +inline TypePtr Type::withContained(std::vector contained_types) { + auto current_contained = containedTypes(); + // Types with no contained_types don't need this call. Check before calling! + // + // (We can't support this efficiently because types without + // contained types may be singletons, in which case + // shared_from_this will crash; we would have to provide a virtual + // typeptr_from_this or isSingleton.) + TORCH_INTERNAL_ASSERT(!current_contained.empty() && current_contained.size() == contained_types.size()); + if (current_contained.equals(contained_types)) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return createWithContained(std::move(contained_types)); +} + + +TORCH_API inline bool operator==(const Type& lhs, const Type& rhs) { + if (C10_UNLIKELY(!rhs.symmetric())) { + return rhs.equals(lhs); + } + return lhs.equals(rhs); +} + +struct NamedType; +using NamedTypePtr = std::shared_ptr; +using ConstNamedTypePtr = std::shared_ptr; + +struct TORCH_API NamedType : public SharedType { + NamedType(TypeKind tk, c10::optional name) + : SharedType(tk), name_(std::move(name)) { + TORCH_INTERNAL_ASSERT( + tk == TypeKind::TupleType || tk == TypeKind::FunctionType || + tk == TypeKind::ClassType || tk == TypeKind::InterfaceType || + tk == TypeKind::EnumType, + "If you add a new kind of NamedType, ", + "please update the cast specialization and this assert"); + } + + // Fully qualified name of type + // Looks like: "foo.bar.Baz". + const c10::optional& name() const { + return name_; + } + + private: + c10::optional name_; +}; + +} // namespace c10 + +namespace std { +template +struct hash> { + size_t operator()(const c10::SingletonOrSharedTypePtr& x) const { + return std::hash()(x.get()); + } +}; +} // namespace std diff --git a/voice_bridge/torch/include/ATen/core/op_registration/adaption.h b/voice_bridge/torch/include/ATen/core/op_registration/adaption.h new file mode 100644 index 0000000000000000000000000000000000000000..3112a206bb4e1f81c8cb7bb4f1c154f6d73076b6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/op_registration/adaption.h @@ -0,0 +1,83 @@ +#pragma once + +#include +#include +#include +#include + +/* + * [Note: hacky wrapper removal for optional tensor] + * + * The kernel implementation takes an optional tensor marked in the schema as + * Tensor? but the C++ function takes Tensor instead of the optional + * expected by the dispatcher. + * + * To remove the hacky wrapper, the C++ function is changed to take + * optional and unwrap the Tensor value at the beginning of + * the function, e.g.: + * > c10::MaybeOwned weight_maybe_owned = + * > at::borrow_from_optional_tensor(weight_opt); + * > const Tensor& weight = *weight_maybe_owned; + * + * We may want to make the kernel handle optional directly without + * going through the creation of a default-constructed Tensor in + * at::borrow_from_optional_tensor. + */ + +/* + * [Note: hacky wrapper removal for TensorOptions] + * + * The kernel implementation takes a TensorOptions argument but the dispatcher + * expects separate arguments for dtype, layout, device, pin_memory. + * + * To remove the hacky wrapper, the kernel implementation is changed to take + * the 4 arguments (dtype, layout, device, pin_memory), and assemble the + * TensorOptions value at the beginning of the function, e.g.: + * > TensorOptions options = TensorOptions().dtype(dtype).layout(layout) + * > .device(device).pinned_memory(pin_memory); + * + * We may want make the kernel handle these parameters directly without going + * through the creation of a TensorOptions value. + */ + +namespace c10 { +namespace impl { + +TORCH_API void common_device_check_failure(optional& common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName); + +inline void check_and_update_common_device(optional& common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) { + // TODO: Remove this once the following issue is addressed: + // https://github.com/pytorch/pytorch/issues/57380 + if (!tensor.defined()) { + return; + } + + if (!common_device.has_value()) { + common_device = tensor.device(); + return; + } + + if (C10_UNLIKELY(common_device != tensor.device())) { + common_device_check_failure(common_device, tensor, methodName, argName); + } +} + +inline void check_and_update_common_device(optional& common_device, const optional& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) { + if (tensor.has_value()) { + check_and_update_common_device(common_device, tensor.value(), methodName, argName); + } +} + +inline void check_and_update_common_device(optional& common_device, at::ITensorListRef tensors, at::CheckedFrom methodName, at::CheckedFrom argName) { + for (const auto& tensor : tensors) { + check_and_update_common_device(common_device, tensor, methodName, argName); + } +} + +inline void check_and_update_common_device(optional& common_device, const List>& tensors, at::CheckedFrom methodName, at::CheckedFrom argName) { + for (const auto& tensor : tensors) { + check_and_update_common_device(common_device, tensor, methodName, argName); + } +} +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/op_registration/infer_schema.h b/voice_bridge/torch/include/ATen/core/op_registration/infer_schema.h new file mode 100644 index 0000000000000000000000000000000000000000..2938e2a8d564ed307df253f6a56cc85562dfbb40 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/op_registration/infer_schema.h @@ -0,0 +1,161 @@ +#pragma once + +/** + * This file contains functionality to take a C++ function and infer its + * c10::FunctionSchema. + */ + +#include +#include +#include + +namespace c10 { +namespace detail { + +namespace infer_schema { + +/// The templated inference code creates `ArgumentDef` instead of `Argument`, +/// because that can be constructed at compile time and has a much smaller +/// binary size than having calls to `Argument` constructors in the template. +/// Creating `Argument` objects from `ArgumentDef` can then be done at +/// runtime in a non-templated way. +struct ArgumentDef final { + using GetTypeFn = TypePtr(); + GetTypeFn* getTypeFn; + GetTypeFn* getFakeTypeFn; + constexpr ArgumentDef(): getTypeFn(nullptr), getFakeTypeFn(nullptr) {} + explicit constexpr ArgumentDef(GetTypeFn *getTypeFn, GetTypeFn *getFakeTypeFn): getTypeFn(getTypeFn), getFakeTypeFn(getFakeTypeFn) {} +}; + +template +struct bool_t {}; +template<> struct bool_t : std::true_type {}; +template<> struct bool_t : std::false_type {}; + +/// Checks the static C++ types `Types` for correctness to catch common error cases. +template +constexpr int checkStaticTypes() { + // Give nice error messages for some of the common error cases. + // Use a LOUD ERROR MESSAGE SO USERS SEE THE STATIC_ASSERT + static_assert(guts::conjunction< + bool_t::value || std::is_same::value || std::is_same::value>... + >::value, "INVALID TYPE: Only int64_t and bool are supported as an integral argument type"); + static_assert(guts::conjunction< + bool_t::value>... + >::value, "INVALID TYPE: float is not supported as an argument type, use double instead"); + return 0; +} + +template +constexpr std::array createArgumentVectorFromTypes(std::index_sequence) { + return ( + // Check types for common errors + checkStaticTypes(), + + // Create the return value + std::array{ + ArgumentDef(&getTypePtrCopy>, &getFakeTypePtrCopy>)...} + ); +} + +/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified +/// as template arguments. +template struct createArguments final {}; +template +struct createArguments> final { + static constexpr std::array call() { + return createArgumentVectorFromTypes( + std::make_index_sequence() + ); + } +}; + +/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified +/// as a tuple (i.e. in the way c10 kernels return values). +/// It can be a tuple if there's three output arguments with types A, B, C. +/// It can be an empty tuple<>, or void for kernels that don't return anything. +/// It can be a single type A (i.e. no tuple) for the case where a kernel just +/// returns one value. +template struct createReturns final {}; + +template +struct createReturns, void> final { + static constexpr std::array call() { + return createArgumentVectorFromTypes( + std::make_index_sequence() + ); + } +}; + +template +struct createReturns::value && !guts::is_instantiation_of::value>> final { + static constexpr std::array call() { + return createReturns>::call(); + } +}; + +template<> +struct createReturns final { + static constexpr std::array call() { + return createReturns>::call(); + } +}; + +template +struct createSingleReturn { + static constexpr std::array call() { + return createArgumentVectorFromTypes(std::make_index_sequence<1>()); + } +}; + +C10_API FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef arguments, c10::ArrayRef returns); +C10_API FunctionSchema make_function_schema(c10::ArrayRef arguments, c10::ArrayRef returns); + +/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a +/// function. Flattens std::tuple returns into multiple return types +template +FunctionSchema createFunctionSchemaFromTraitsFlattenedReturns() { + using ReturnType = typename FunctionTraits::return_type; + using ParameterTypes = typename FunctionTraits::parameter_types; + + // arguments and returns are computed into a std::array at compile time and embedded into the binary. + // The only code executed at runtime here is the one that creates a std::vector + // of the arguments/returns from the std::array. + constexpr auto arguments = createArguments::call(); + constexpr auto returns = createReturns::call(); + + return make_function_schema(arguments, returns); +} + +/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a +/// function. Preserves std::tuple returns as a Tuple return type +template +FunctionSchema createFunctionSchemaFromTraitsSingleReturn(std::string&& name, std::string&& overload_name) { + using ReturnType = typename FunctionTraits::return_type; + using ParameterTypes = typename FunctionTraits::parameter_types; + + // arguments and returns are computed into a std::array at compile time and embedded into the binary. + // The only code executed at runtime here is the one that creates a std::vector + // of the arguments/returns from the std::array. + constexpr auto arguments = createArguments::call(); + constexpr auto returns = createSingleReturn::call(); + + return make_function_schema(std::move(name), std::move(overload_name), arguments, returns); +} + +} +} + +template +FunctionSchema inferFunctionSchemaFlattenedReturns() { + return detail::infer_schema::createFunctionSchemaFromTraitsFlattenedReturns>(); +} + +template +FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&& overload_name) { + return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn>(std::move(name), std::move(overload_name)); +} + +TORCH_API c10::optional findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified); + +} diff --git a/voice_bridge/torch/include/ATen/core/op_registration/op_allowlist.h b/voice_bridge/torch/include/ATen/core/op_registration/op_allowlist.h new file mode 100644 index 0000000000000000000000000000000000000000..6e77c565388156db9f14ff607b4c1bc42240d278 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/op_registration/op_allowlist.h @@ -0,0 +1,199 @@ +#pragma once + +// TODO: unify to C10_MOBILE. In theory this header could be used in OSS. +#ifdef TEMPLATE_SELECTIVE_BUILD +#include +#endif + +/** + * This header implements functionality to build PyTorch with only a certain + * set of operators (+ dependencies) included. + * + * - Build with -DTORCH_OPERATOR_WHITELIST="aten::add;aten::sub" and only these + * two ops will be included in your build. The allowlist records operators + * only, no overloads; if you include aten::add, all overloads of aten::add + * will be included. + * + * Internally, this is done by removing the operator registration calls + * using compile time programming, and the linker will then prune all + * operator functions that weren't registered. + * See Note [Selective build] for more details + * + * WARNING: The allowlist mechanism doesn't work for all ways you could go about + * registering an operator. If the dispatch key / operator name is not + * sufficiently obvious at compile time, then the allowlisting mechanism + * will fail (and the operator will be included in the binary anyway). + */ + +#include +#include +#include + + +#if defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) +#include +#endif + +namespace c10 { + +namespace impl { + +constexpr bool allowlist_contains(string_view allowlist, string_view item); // Forward Declare + +/** + * In selective build mode returns true/false depending on whether a build + * feature is available or not. + * + * In instrumenting mode (tracing mode), always returns true, and doesn't + * trigger any side effects. + */ +constexpr bool is_build_feature_available(const char* name) { +#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) + // Selective Build mode. +#if !defined(TORCH_BUILD_FEATURE_ALLOWLIST) + (void)name; + return true; +#else + return allowlist_contains( + C10_STRINGIZE(TORCH_BUILD_FEATURE_ALLOWLIST), + name); +#endif + +#else + // Instrumenting mode. + (void)name; + return true; +#endif +} + +[[noreturn]] void build_feature_required_feature_not_available(const char* feature); + +/** + * Use BUILD_FEATURE_REQUIRED macro in user-code. + * + * In selective build mode becomes a no-op if the build feature passed + * in is available. If not available, throws an exception (c10::Error). + * The compiler is able to perform dead code elimination for code + * following this method if the build feature is not available. + * + * In instrumenting mode (tracing mode), registers (as a side effect) + * the presence of this specific build feature being triggered. + */ +#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) // selective build mode + +#if defined(TORCH_BUILD_FEATURE_ALLOWLIST) +#define BUILD_FEATURE_REQUIRED(NAME) \ + if (!c10::impl::is_build_feature_available(NAME)) { \ + ::c10::impl::build_feature_required_feature_not_available(NAME); \ + } +#else // Everything trivially selected +#define BUILD_FEATURE_REQUIRED(NAME) + +#endif + +#else // trace mode +#define BUILD_FEATURE_REQUIRED(NAME) \ + RECORD_FUNCTION_WITH_SCOPE( \ + at::RecordScope::BUILD_FEATURE, \ + std::string(NAME), \ + {}); +#endif + +// Use this macro, and not is_build_feature_available +#define BUILD_FEATURE_AVAILABLE(NAME) ::c10::impl::is_build_feature_available(NAME) + +// returns true iff allowlist contains item +// allowlist_contains("a;bc;d", "bc") == true +constexpr bool allowlist_contains(string_view allowlist, string_view item) { + //Choose a really big value for next so that if something goes wrong + //this code will blow up in a hopefully detectable way. + size_t next = std::numeric_limits::max(); + for (size_t cur = 0; cur <= allowlist.size(); cur = next) { + next = allowlist.find(';', cur); + if (next != string_view::npos) { + if (allowlist.substr(cur, next - cur).compare(item) == 0) { + return true; + } + next++; + } else { + if (allowlist.substr(cur).compare(item) == 0) { + return true; + } + break; + } + } + return false; +} + +// Returns true iff the given op name is on the allowlist +// and should be registered +constexpr bool op_allowlist_check(string_view op_name) { + assert(op_name.find("::") != string_view::npos); + // Use assert() instead of throw() due to a gcc bug. See: + // https://stackoverflow.com/questions/34280729/throw-in-constexpr-function + // https://github.com/fmtlib/fmt/issues/682 + assert(op_name.find("(") == string_view::npos); +#if !defined(TORCH_OPERATOR_WHITELIST) + // If the TORCH_OPERATOR_WHITELIST parameter is not defined, + // all ops are to be registered + return true; +#else + return allowlist_contains( + C10_STRINGIZE(TORCH_OPERATOR_WHITELIST), + // This function is majorly used for mobile selective build with + // root operators, where the overload is included in the allowlist. + op_name); + // // Strip overload name (as allowlist doesn't contain overloads) + // // Another function based on this may be added when there's usage + // // on op names without overload. + // OperatorNameView::parse(op_name).name); +#endif +} + +// Returns true iff the given schema string is on the allowlist +// and should be registered +constexpr bool schema_allowlist_check(string_view schema) { +#if defined(TORCH_FORCE_SCHEMA_REGISTRATION) + return true; +#else + return op_allowlist_check(schema.substr(0, schema.find("("))); +#endif +} + +// Returns true iff the given custom class name is on the allowlist +// and should be registered +constexpr bool custom_class_allowlist_check(string_view custom_class_name) { +#if !defined(TORCH_CUSTOM_CLASS_ALLOWLIST) + // If the TORCH_CUSTOM_CLASS_ALLOWLIST parameter is not defined, + // all custom classes are to be registered + (void)custom_class_name; + return true; +#else + return allowlist_contains( + C10_STRINGIZE(TORCH_CUSTOM_CLASS_ALLOWLIST), + custom_class_name); +#endif +} + +// schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST. +// Add this API to pass arbitrary allowlist. +constexpr bool op_allowlist_contains_name_in_schema(string_view allowlist, string_view schema) { + return allowlist_contains(allowlist, schema.substr(0, schema.find("("))); +} + +// Returns true iff the given dispatch key is on the allowlist +// and should be registered. When we turn this on, the list of valid +// mobile dispatch keys is hard coded (but you need to make sure +// that you have the correct set of dispatch keys for this). +constexpr bool dispatch_key_allowlist_check(DispatchKey /*k*/) { +#ifdef C10_MOBILE + return true; + // Disabled for now: to be enabled later! + // return k == DispatchKey::CPU || k == DispatchKey::Vulkan || k == DispatchKey::QuantizedCPU || k == DispatchKey::BackendSelect || k == DispatchKey::CatchAll; +#else + return true; +#endif +} + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/op_registration/op_registration.h b/voice_bridge/torch/include/ATen/core/op_registration/op_registration.h new file mode 100644 index 0000000000000000000000000000000000000000..8cfb22c5ac849b3eafee8f3481938cce16736898 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/op_registration/op_registration.h @@ -0,0 +1,596 @@ +#pragma once + +/** + * Include this file if you want to register operators. It includes all + * functionality needed to do so for you. + */ + +#include +#include +#include +#include +#include +#include +#include +#if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD) +#include +#endif +#include + +namespace c10 { + +namespace detail { +// The first argument of the schema might be of type DispatchKeySet, in which case we remove it. +// We do this because every argument in a function schema is expected to be convertable +// to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of. +// See Note [Plumbing Keys Through The Dispatcher] +template +std::unique_ptr inferFunctionSchemaFromFunctor() { + using func_type = typename c10::remove_DispatchKeySet_arg_from_func::func_type; + return std::make_unique(inferFunctionSchemaFlattenedReturns()); +} +} + +/** + * An instance of this class handles the registration for one or more operators. + * Make sure you keep the RegisterOperators instance around since it will + * deregister the operator it's responsible for in its destructor. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + */ +class TORCH_API RegisterOperators final { +public: + RegisterOperators(); + ~RegisterOperators(); + + RegisterOperators(const RegisterOperators&) = delete; + RegisterOperators& operator=(const RegisterOperators&) = delete; + RegisterOperators(RegisterOperators&&) noexcept; + RegisterOperators& operator=(RegisterOperators&&) noexcept; + + class TORCH_API Options final { + public: + Options(const Options&) = delete; + Options(Options&&) noexcept = delete; + Options& operator=(const Options&) = delete; + Options& operator=(Options&&) noexcept = delete; + + // internal-only for registering stack based kernels + template + Options&& kernel(DispatchKey dispatch_key) && { + return std::move(*this).kernel(dispatch_key, KernelFunction::makeFromBoxedFunction(), nullopt, nullptr); + } + + // internal-only for registering stack based catch-all kernels + template + Options&& catchAllKernel() && { + return std::move(*this).kernel(c10::nullopt, KernelFunction::makeFromBoxedFunction(), nullopt, nullptr); + } + + // internal only for registering caffe2 ops + Options&& schema(FunctionSchema&& schema) { + TORCH_CHECK(!schemaOrName_.has_value(), "You can only specify the schema once per operator registration."); + schemaOrName_ = c10::make_right(std::move(schema)); + return std::move(*this); + } + + /** + * Use this to specify the schema for an operator. You can also specify + * the operator name only to have the function signature part of the + * schema be inferred from the kernel function. + * + * Example: + * + * > // Infer function signature from my_kernel_cpu + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + * > + * > + * > // Explicitly specify full schema + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op(Tensor a) -> Tensor") + * > .kernel(DispatchKey::CPU)); + */ + Options&& schema(const std::string& schemaOrName) { + TORCH_CHECK(!schemaOrName_.has_value(), "Tried to register operator ", schemaOrName," but specified schema multiple times. You can only specify the schema once per operator registration."); + + #if !defined(EXPOSE_C2_OPS) && defined(CAFFE2_IS_XPLAT_BUILD) + throw std::logic_error("Tried to register operator " + schemaOrName + ". We don't support registering c10 ops on mobile yet because the function schema parser isn't present in the mobile build."); + #else + schemaOrName_ = torch::jit::parseSchemaOrName(schemaOrName); + #endif + + return std::move(*this); + } + + /** + * Use this to register an operator whose kernel is implemented as a functor. + * The kernel is only called for inputs matching the given dispatch key. + * You can register multiple kernels for different dispatch keys. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + * + * The functor constructor can take arguments to configure the kernel. + * The arguments are defined in the kernel registration. + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b) + * > : ... {...} + * > + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU, "some_configuration", 3, true)); + */ + template + // enable_if: only enable it if KernelFunctor is actually a functor + std::enable_if_t::value, Options&&> kernel(DispatchKey dispatch_key, ConstructorParameters&&... constructorParameters) && { + static_assert(std::is_base_of::value, "Tried to register a kernel functor using the kernel() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + static_assert(std::is_constructible::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel(arguments...) must match one of the constructors of Functor."); + + return std::move(*this).kernel( + std::move(dispatch_key), + KernelFunction::makeFromUnboxedFunctor(std::make_unique(std::forward(constructorParameters)...)), + impl::CppSignature::make(), + detail::inferFunctionSchemaFromFunctor() + ); + } + + /** + * Use this to register an operator whose kernel is implemented as a functor. + * The kernel is a catch-all kernel, meaning it's called independent from + * the input. Dispatch is disabled for this operator. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel()); + * + * The functor constructor can take arguments to configure the kernel. + * The arguments are defined in the kernel registration. + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b) + * > : ... {...} + * > + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel("some_configuration", 3, true)); + */ + template + // enable_if: only enable it if KernelFunctor is actually a functor + std::enable_if_t::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && { + static_assert(std::is_base_of::value, "Tried to register a kernel functor using the kernel() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + static_assert(std::is_constructible::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel(arguments...) must match one of the constructors of Functor."); + + return std::move(*this).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedFunctor(std::make_unique(std::forward(constructorParameters)...)), + impl::CppSignature::make(), + detail::inferFunctionSchemaFromFunctor() + ); + } + + /** + * Use this to register an operator whose kernel is implemented by a function. + * The kernel is only called for inputs matching the given dispatch key. + * You can register multiple kernels for different dispatch keys. + * + * Example: + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + */ + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> kernel(DispatchKey dispatch_key) && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + std::move(dispatch_key), + KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>::type>() + ); + } + + /** + * Use this to register an operator whose kernel is implemented by a function. + * The kernel is a catch-all kernel, meaning it's called independent from + * the input. Dispatch is disabled for this operator. + * + * Example: + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel()); + */ + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> catchAllKernel() && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>::type>() + ); + } + + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> kernel(DispatchKey dispatch_key, FuncType* kernel_func) && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + std::move(dispatch_key), + KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> catchAllKernel(FuncType* kernel_func) && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + /** + * Use this to register an operator whose kernel is implemented as a lambda. + * The kernel is only called for inputs matching the given dispatch key. + * You can register multiple kernels for different dispatch keys. + * + * The lambda must be stateless, i.e. not have a capture. If your kernel + * needs to store some configuration parameters, write the kernel as a + * functor instead. + * + * Example: + * + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU, [] (Tensor a) -> Tensor {...})); + */ + template + // enable_if: only enable it if Lambda is a functor (note: lambdas are functors) + std::enable_if_t< + guts::is_functor>::value + && !std::is_same>::func_type, KernelFunction::BoxedKernelFunction>::value, + Options&&> kernel(DispatchKey dispatch_key, Lambda&& functor) && { + static_assert(!std::is_base_of>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel() API instead."); + + // We don't support stateful lambdas (i.e. lambdas with a capture), because their + // behavior would be nonobvious. A functor kernel with cache gets a new instance of + // its cache each time the kernel is looked up from the dispatch table. + // A lambda with a capture would be global and share its capture between all kernel lookups. + // So, instead of making users having to think about it (including the thread-safety + // issues this causes), let's just forbid stateful lambdas altogether. + static_assert(guts::is_stateless_lambda>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel() instead."); + + return std::move(*this).kernel( + std::move(dispatch_key), + KernelFunction::makeFromUnboxedLambda(std::forward(functor)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + /** + * Use this to register an operator whose kernel is implemented as a lambda. + * The kernel is a catch-all kernel, meaning it's called independent from + * the input. Dispatch is disabled for this operator. + * + * The lambda must be stateless, i.e. not have a capture. If your kernel + * needs to store some configuration parameters, write the kernel as a + * functor instead. + * + * Example: + * + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel([] (Tensor a) -> Tensor {...})); + */ + template + // enable_if: only enable it if Lambda is a functor (note: lambdas are functors) + std::enable_if_t< + guts::is_functor>::value + && !std::is_same>::func_type, KernelFunction::BoxedKernelFunction>::value, + Options&&> catchAllKernel(Lambda&& lambda) && { + static_assert(!std::is_base_of>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel() API instead."); + + // We don't support stateful lambdas (i.e. lambdas with a capture), because their + // behavior would be nonobvious. + // A lambda with a capture would be global and share its capture between all kernel lookups. + // This would be a likely source for unexpected race conditions, so we forbid it. + // If a kernel really needs global state, they can just have regular global state + // in their .cpp file next to the kernel lambda. + static_assert(guts::is_stateless_lambda>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel() instead."); + + return std::move(*this).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedLambda(std::forward(lambda)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + Options&& aliasAnalysis(AliasAnalysisKind aliasAnalysisKind) && { + TORCH_CHECK(!aliasAnalysisKind_.has_value(), "You can only call aliasAnalysis() once per operator registration."); + aliasAnalysisKind_ = aliasAnalysisKind; + return std::move(*this); + } + + private: + Options&& kernel(c10::optional dispatch_key, KernelFunction&& func, c10::optional cpp_signature, std::unique_ptr&& inferred_function_schema) && { + KernelRegistrationConfig config; + config.dispatch_key = dispatch_key; + config.func = std::move(func); + config.cpp_signature = std::move(cpp_signature); + config.inferred_function_schema = std::move(inferred_function_schema); + kernels.push_back(std::move(config)); + return std::move(*this); + } + + Options() + : schemaOrName_(c10::nullopt) + , kernels() + , aliasAnalysisKind_(c10::nullopt) + {} + + // KernelRegistrationConfig accumulates all information from the config + // parameters passed to a RegisterOperators::op() call into one object. + struct KernelRegistrationConfig final { + KernelRegistrationConfig() + : dispatch_key(c10::nullopt) + , func() + , cpp_signature(c10::nullopt) + , inferred_function_schema(nullptr) + {} + + c10::optional dispatch_key; + KernelFunction func; + c10::optional cpp_signature; + std::unique_ptr inferred_function_schema; + }; + + c10::optional> schemaOrName_; + + std::vector kernels; + optional aliasAnalysisKind_; + friend class RegisterOperators; + friend class Library; + }; + + /** + * Call this to get an instance of registration options, which + * can be passed to a call to RegisterOperators::op() to specify + * these options for the operator registration. + * See class doc comment for examples. + */ + static Options options() { + return {}; + } + + /** + * Call this to register an operator. See class doc comment for examples. + */ + RegisterOperators&& op(Options&& options) && { + checkSchemaAndRegisterOp_(std::move(options)); + return std::move(*this); + } + + // Regular mutator version of the && version above + RegisterOperators& op(Options&& options) & { + checkSchemaAndRegisterOp_(std::move(options)); + return *this; + } + + /** + * This is a shorthand for RegisterOperators::op(Options) where you can + * specify the operator schema outside of the options parameter. + * See class doc comment for examples. + */ + RegisterOperators&& op(const std::string& schemaOrName, Options&& options = RegisterOperators::options()) && { + return std::move(*this).op(std::move(options).schema(schemaOrName)); + } + + // internal only for registering caffe2 ops + RegisterOperators&& op(FunctionSchema schema, Options&& options) && { + return std::move(*this).op(std::move(options).schema(std::move(schema))); + } + + template + explicit RegisterOperators(const std::string& schemaOrName, FuncType&& func, Options&& options = RegisterOperators::options()) + : RegisterOperators() { + std::move(*this).op(schemaOrName, std::forward(func), std::move(options)); + } + + /** + * This API registers an operator based on a kernel function pointer. + * + * Given a kernel + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * + * This API looks like: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", &my_kernel_cpu); + * + * If your kernel is small and the overhead of calling it matters, + * then this API might be the wrong choice since the following API + * has a slightly lower overhead for calling into the kernel: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .kernel()); + * + * Or, alternatively, write your kernel as a functor: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .kernel()); + */ + template + // enable_if: only enable it if FuncType is actually a function, but not a stack based BoxedKernelFunction. + std::enable_if_t::value && !std::is_same::value, RegisterOperators&&> + op(const std::string& schemaOrName, FuncType* func, Options&& options = RegisterOperators::options()) && { + constexpr bool AllowLegacyTypes = true; + return std::move(*this).op(std::move(options).schema(schemaOrName).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedRuntimeFunction(func), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + )); + } + + /** + * This API registers an operator based on a kernel lambda. + * + * This API looks like: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", [] (Tensor a, Tensor b) {...}); + * + * This is equivalent to: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .catchAllKernel([] (Tensor a, Tensor b) {...})); + * + */ + template + // enable_if: only enable it if Lambda is actually a stateless lambda + std::enable_if_t::value && guts::is_stateless_lambda>::value, RegisterOperators&&> + op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && { + static_assert(!std::is_base_of::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead."); + + constexpr bool AllowLegacyTypes = true; + return std::move(*this).op(std::move(options).schema(schemaOrName).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedLambda(std::forward(lambda)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + )); + } + + template + C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.") + // enable_if: only enable it if Lambda is actually a functor but not a stateless lambda + std::enable_if_t::value && !guts::is_stateless_lambda>::value, RegisterOperators&&> + op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && { + static_assert(!std::is_base_of::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead."); + + constexpr bool AllowLegacyTypes = true; + return std::move(*this).op(std::move(options).schema(schemaOrName).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedLambda(std::forward(lambda)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + )); + } + +private: + void checkSchemaAndRegisterOp_(Options&& config); + + static c10::FunctionSchema inferSchemaFromKernels_(const OperatorName& opNameStr, const Options& options); + void checkNoDuplicateKernels_(const Options& options); + void registerOp_(Options&& options); + + std::vector registrars_; +}; + +} // namespace c10 + +namespace torch { + // Old-style API + using RegisterOperators = c10::RegisterOperators; +} diff --git a/voice_bridge/torch/include/ATen/core/operator_name.h b/voice_bridge/torch/include/ATen/core/operator_name.h new file mode 100644 index 0000000000000000000000000000000000000000..6440a695b55ec45e8d2346911fed2dc767e0afa3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/operator_name.h @@ -0,0 +1,92 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +// TODO: consider storing namespace separately too +struct OperatorName final { + std::string name; + std::string overload_name; + OperatorName(std::string name, std::string overload_name) + : name(std::move(name)), overload_name(std::move(overload_name)) {} + + // TODO: These two functions below are slow! Fix internal data structures so + // I don't have to manually reconstruct the namespaces! + + // Return the namespace of this OperatorName, if it exists. The + // returned string_view is only live as long as the OperatorName + // exists and name is not mutated + c10::optional getNamespace() const { + auto pos = name.find("::"); + if (pos == std::string::npos) { + return c10::nullopt; + } else { + return c10::make_optional(c10::string_view(name.data(), pos)); + } + } + + // Returns true if we successfully set the namespace + bool setNamespaceIfNotSet(const char* ns) { + if (!getNamespace().has_value()) { + const auto ns_len = strlen(ns); + const auto old_name_size = name.size(); + name.resize(ns_len + 2 + old_name_size); + // Shift current value of name to the end of the new space. + name.replace(name.size() - old_name_size, old_name_size, name, 0, old_name_size); + name.replace(0, ns_len, ns, ns_len); + name[ns_len] = ':'; + name[ns_len + 1] = ':'; + return true; + } else { + return false; + } + } +}; + +// Non-owning view of an OperatorName. Unlike OperatorName, most of +// its functions are constexpr, so it can be used for compile time +// computations +struct OperatorNameView final { + c10::string_view name; + c10::string_view overload_name; + constexpr OperatorNameView(c10::string_view name, c10::string_view overload_name) + : name(name), overload_name(overload_name) {} + // Parses strings like "foo.overload" and also "foo" + constexpr static OperatorNameView parse(c10::string_view full_name) { + auto i = full_name.find('.'); + if (i == c10::string_view::npos) { + return OperatorNameView(full_name, c10::string_view()); + } else { + return OperatorNameView(full_name.substr(0, i), full_name.substr(i + 1)); + } + } +}; + +inline bool operator==(const OperatorName& lhs, const OperatorName& rhs) { + return lhs.name == rhs.name && lhs.overload_name == rhs.overload_name; +} + +inline bool operator!=(const OperatorName& lhs, const OperatorName& rhs) { + return !operator==(lhs, rhs); +} + +TORCH_API std::string toString(const OperatorName& opName); +TORCH_API std::ostream& operator<<(std::ostream&, const OperatorName&); + +} // namespace c10 + +namespace std { + template <> + struct hash<::c10::OperatorName> { + size_t operator()(const ::c10::OperatorName& x) const { + return std::hash()(x.name) ^ (~ std::hash()(x.overload_name)); + } + }; +} diff --git a/voice_bridge/torch/include/ATen/core/qualified_name.h b/voice_bridge/torch/include/ATen/core/qualified_name.h new file mode 100644 index 0000000000000000000000000000000000000000..b8065d9d5085f75d1e55292ca3e23517d622d87f --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/qualified_name.h @@ -0,0 +1,161 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { + +// Represents a name of the form "foo.bar.baz" +struct QualifiedName { + QualifiedName() {} + + // `name` can be a dotted string, like "foo.bar.baz", or just a bare name. + /* implicit */ QualifiedName(const std::string& name) { + TORCH_CHECK(!name.empty()); + // split the string into its atoms. + size_t startSearchFrom = 0; + size_t pos = name.find(delimiter_, startSearchFrom); + + while (pos != std::string::npos) { + auto atom = name.substr(startSearchFrom, pos - startSearchFrom); + TORCH_INTERNAL_ASSERT( + atom.size() > 0, "Invalid name for qualified name: '", name, "'"); + atoms_.push_back(std::move(atom)); + startSearchFrom = pos + 1; + pos = name.find(delimiter_, startSearchFrom); + } + + auto finalAtom = name.substr(startSearchFrom, pos - startSearchFrom); + TORCH_INTERNAL_ASSERT( + finalAtom.size() > 0, "Invalid name for qualified name: '", name, "'"); + atoms_.emplace_back(std::move(finalAtom)); + + cacheAccessors(); + } + + explicit QualifiedName(std::vector atoms) { + for (const auto& atom : atoms) { + TORCH_CHECK(!atom.empty(), "Atom cannot be empty"); + TORCH_CHECK( + atom.find(delimiter_) == std::string::npos, + "Delimiter not allowed in atom"); + } + atoms_ = std::move(atoms); + cacheAccessors(); + } + // Unnecessary copy. Ideally we'd use something like std::string_view. + /* implicit */ QualifiedName(const char* name) + : QualifiedName(std::string(name)) {} + + // `name` must be a bare name (no dots!) + explicit QualifiedName(const QualifiedName& prefix, std::string name) { + TORCH_INTERNAL_ASSERT(!name.empty()); + TORCH_INTERNAL_ASSERT(name.find(delimiter_) == std::string::npos); + atoms_.insert(atoms_.begin(), prefix.atoms_.begin(), prefix.atoms_.end()); + atoms_.push_back(std::move(name)); + + cacheAccessors(); + } + + // Is `this` a prefix of `other`? + // For example, "foo.bar" is a prefix of "foo.bar.baz" + bool isPrefixOf(const QualifiedName& other) const { + const auto& thisAtoms = atoms_; + const auto& otherAtoms = other.atoms_; + + if (thisAtoms.size() > otherAtoms.size()) { + // Can't be a prefix if it's bigger + return false; + } + for (const auto i : c10::irange(thisAtoms.size())) { + if (thisAtoms[i] != otherAtoms[i]) { + return false; + } + } + return true; + } + + // The fully qualified name, like "foo.bar.baz" + const std::string& qualifiedName() const { + return qualifiedName_; + } + + // The leading qualifier, like "foo.bar" + const std::string& prefix() const { + return prefix_; + } + + // The base name, like "baz" + const std::string& name() const { + return name_; + } + + const std::vector& atoms() const { + return atoms_; + } + + bool operator==(const QualifiedName& other) const { + return this->qualifiedName_ == other.qualifiedName_; + } + + bool operator!=(const QualifiedName& other) const { + return !(*this == other); + } + + private: + static constexpr char delimiter_ = '.'; + + // Helper for cacheAccessors() below. + template + std::string join(char delimiter, const T& v) { + std::string out; + size_t reserve = 0; + for (const auto& e : v) { + reserve += e.size() + 1; + } + out.reserve(reserve); + for (const auto i : c10::irange(v.size())) { + if (i != 0) { + out.push_back(delimiter); + } + out.append(v[i]); + } + return out; + } + + void cacheAccessors() { + qualifiedName_ = join(delimiter_, atoms_); + if (atoms_.size() > 1) { + ArrayRef view(atoms_); + const auto prefixView = view.slice(0, view.size() - 1); + prefix_ = join(delimiter_, prefixView); + } + + if (atoms_.size() >= 1) { + name_ = atoms_.back(); + } + } + + // The actual list of names, like "{foo, bar, baz}" + std::vector atoms_; + + /* + * Cached accessors, derived from `atoms_`. + */ + std::string qualifiedName_; + std::string prefix_; + std::string name_; +}; +} // namespace c10 + +namespace std { +template <> +struct hash { + size_t operator()(const c10::QualifiedName& n) const noexcept { + return std::hash()(n.qualifiedName()); + } +}; +} // namespace std diff --git a/voice_bridge/torch/include/ATen/core/rref_interface.h b/voice_bridge/torch/include/ATen/core/rref_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..95f7ff9e9e2f72acc944d1142948eab6735cb28a --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/rref_interface.h @@ -0,0 +1,40 @@ +#pragma once + +#include +#include + +namespace c10 { + +struct Type; +using worker_id_t = int16_t; + +// This abstract class contains only user-facing APIs, and will be shared +// between jit and distributed to implement TorchScript support. +class C10_EXPORT RRefInterface : public c10::intrusive_ptr_target { + public: + RRefInterface() = default; + // RRef is made NOT copyable NOT movable to prevent messing up reference + // counting. + RRefInterface(const RRefInterface& other) = delete; + RRefInterface(RRefInterface&& other) = delete; + RRefInterface& operator=(RRefInterface&& other) = delete; + + virtual ~RRefInterface() = default; + + // returns the worker id of the owner + virtual worker_id_t owner() const = 0; + + // returns the worker name of the owner + virtual std::string ownerName() const = 0; + + // Returns true if this is the ``OwnerRRef`` + virtual bool isOwner() const = 0; + + // Returns true if this is an ``OwnerRRef`` or if this ``UserRRef`` has been + // confirmed by its owner. + virtual bool confirmedByOwner() const = 0; + + virtual const TypePtr type() const = 0; +}; + +} diff --git a/voice_bridge/torch/include/ATen/core/stack.h b/voice_bridge/torch/include/ATen/core/stack.h new file mode 100644 index 0000000000000000000000000000000000000000..1695e5995ab6922e37e6c1aba8fda811c0b528ad --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/stack.h @@ -0,0 +1,200 @@ +#pragma once + +#include + +#include +#include +#include + +// TODO move this to c10 namespace + +namespace torch { +namespace jit { + +using c10::IValue; +using Stack = std::vector; + +class Operation { + template + using accepts = std::is_constructible, F&&>; + + public: + template ::value, int> = 0> + C10_DEPRECATED_MESSAGE("Please use void(Stack&) to register operator instead.") + Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { + raw(&stack); + }) {} + + template ::value && + !std::is_same, Operation>::value, int> = 0> + Operation(F&& op): op_(std::forward(op)) {} + + Operation(std::nullptr_t) noexcept {} + + explicit operator bool() const noexcept { + return op_ ? true : false; + } + + void operator()(Stack& stack) { + op_(stack); + } + + template + T* target() noexcept { + return op_.target(); + } + + private: + std::function op_; +}; + +// An operation with N inputs and M outputs pops the last N inputs off +// the stack and pushes its M inputs onto the stack +// before: I0, I1, ... IN <- stack.back() +// after: O0, O1, ... OM +// operations are defined this way so that ownership of inputs can be +// transferred to the operation and it can incrementally drop ownership of +// tensors when they become unneeded. For large operations, like 'run an entire +// subgraph', this functionality is very important for minimizing gpu memory +// usage return value is the relative 'offset' to jump to for the next +// operation: +// pc += 1 + offset +// so a return value of 0 goes to the next instruction + +// treat the last N elements of the stack as a list, looking up +// element i +static inline IValue& peek(Stack& stack, size_t i, size_t N) { + return *(stack.end() - N + i); +} +static inline IValue& peek(Stack* stack, size_t i, size_t N) { + return peek(*stack, i, N); +} +static inline const IValue& peek(const Stack& stack, size_t i, size_t N) { + return *(stack.end() - N + i); +} +static inline const IValue& peek(const Stack* stack, size_t i, size_t N) { + return peek(*stack, i, N); +} +// treat the last N elements of the stack as a list, looking up the +// slice starting at index i and having length len +static inline at::ArrayRef peekSlice( + const Stack& stack, + size_t i, + size_t len, + size_t N) { + return at::ArrayRef(stack).slice(stack.size() - N + i, len); +} +static inline at::ArrayRef last(const Stack& stack, size_t N) { + return peekSlice(stack, 0, N, N); +} +static inline at::ArrayRef last(const Stack* stack, size_t N) { + return last(*stack, N); +} +static inline void drop(Stack& stack, size_t n) { + stack.erase(stack.end() - n, stack.end()); +} +static inline void drop(Stack* stack, size_t n) { + drop(*stack, n); +} +static inline IValue pop(Stack& stack) { + auto r = std::move(stack.back()); + stack.pop_back(); + return r; +} +static inline IValue pop(Stack* stack) { + return pop(*stack); +} +static inline std::vector pop(Stack& stack, size_t n) { + std::vector result; + result.reserve(n); + for (const auto i : c10::irange(n)) { + result.push_back(std::move(peek(stack, i, n))); + } + drop(stack, n); + return result; +} + +// variadic pop: +// int64_t a; at::Tensor b; +// pop(stack, a, b); +// equivalent to: +// b = pop(stack).toTensor(); +// a = pop(stack).toInt(); +template +static inline void pop(Stack& stack, Types&... args) { + size_t i = 0; + constexpr size_t N = sizeof...(args); + (void)std::initializer_list{ + (args = std::move(peek(stack, i++, N)).template to(), 0)...}; + drop(stack, N); +} +template +static inline void pop(Stack* stack, Types&... args) { + pop(*stack, args...); +} +template +static inline void push_one(Stack& stack, Type&& arg) { + stack.emplace_back(std::forward(arg)); +} + +static inline void push_one(Stack& stack, c10::TensorOptions options) { + stack.emplace_back(c10::typeMetaToScalarType(options.dtype())); + stack.emplace_back(options.layout()); + stack.emplace_back(options.device()); + stack.emplace_back(options.pinned_memory()); +} + +template +static inline void push(Stack& stack, Types&&... args) { + (void)std::initializer_list{(push_one(stack, std::forward(args)), 0)...}; +} +template +static inline void push(Stack* stack, Types&&... args) { + return push(*stack, std::forward(args)...); +} +template +static inline void push_list_elements(Stack& stack, const c10::List& elements) { + for (T elem : elements) { + stack.push_back(std::move(elem)); + } +} + +// The packer here is carefully written not to make any unnecessary +// copies. + +// pack takes the return values of aten functions pushes them onto the stack +template +inline void pack(Stack& stack, T&& v) { + stack.emplace_back(std::forward(v)); +} +template +inline void pack(Stack* stack, T&& v) { + pack(*stack, std::forward(v)); +} + +template +struct TuplePacker { + // NB: *Not* a universal reference. + static void execute(Stack& stack, std::tuple&& t) { + // NB: The move here does not "destroy" the entire tuple, that is + // not what std::move does; only the particular tuple index + // processed here gets stolen. + pack(stack, std::get(std::move(t))); + TuplePacker::execute(stack, std::move(t)); + } +}; + +template +struct TuplePacker<0, Args...> { + static void execute(Stack& /*stack*/, std::tuple&& /*t*/){}; +}; + +template +inline void pack(Stack& stack, std::tuple&& t) { + TuplePacker::execute(stack, std::move(t)); +} + +} // namespace jit +} // namespace torch diff --git a/voice_bridge/torch/include/ATen/core/symbol.h b/voice_bridge/torch/include/ATen/core/symbol.h new file mode 100644 index 0000000000000000000000000000000000000000..04d480b51e3170e54db1df7050e23fceecd114f9 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/symbol.h @@ -0,0 +1,147 @@ +#pragma once +#include +#include +#include // For std::hash +#include + + +namespace c10 { + +// 'prim' symbols are synthetic operators that occur only in the IR +// and don't have corresponding implementations in ATen. + +// 'onnx' symbols correspond to ONNX operators. Their semantics +// are defined in https://github.com/onnx/onnx/blob/master/docs/Operators.md +// The particular version we are targeting is specified by '_onnx_opset_version' +// in torch.onnx.symbolic_helper +// +// In general, most ONNX operators won't get an entry here, because they +// are handled from the Python end. However, you may occasionally need +// to intern an ONNX symbol here so that you can conveniently write an +// optimization on ONNX operations. + +// 'attr' symbols are attribute keys. They are shared between both ONNX and ATen +// operators (you disambiguate their meaning by looking at the operator itself). +// In general, you only need to define attribute keys that are used by +// onnx or prim; ATen attributes are automatically generated in FORALL_ATTR_BASE_SYMBOLS. + +// Note [Symbol allocation] +// ~~~~~~~~~~~~~~~~~~~~~~~~ +// +// 1. Symbol namespace is split up into namespaces. +// +// 2. The intended access pattern for built-in symbols is onnx::MatMul +// in the c10 namespace (this is a Symbol). +// + +// Built-in constant definition strategy: +// - Enum is the most convenient way to generate a contiguous sequence +// of numbers for an identifier. +// - However, an enum gives you a fresh type. We want onnx::MatMul to +// be type Symbol, not some random enum type! +// - Therefore, after using enums to generate the sequence of integers, +// we then declare constexpr Symbols to get everything the actual Symbol +// type we want. Symbols must be constexpr to be valid to be "case"ed on. + +using unique_t = uint32_t; + +const std::string& domain_prefix(); + +// A Symbol is like an interned string, but with a little extra +// structure; it is namespaced via SymbolNamespace and the resulting +// intern pointers support efficient namespace testing. +struct TORCH_API Symbol { + explicit constexpr Symbol() : value(0) {}; + explicit constexpr Symbol(unique_t uniq) + : value(uniq) {} + + // Get a Symbol for a qualified string like "attr::bar" + static Symbol fromQualString(const std::string & s); + + // Get a Symbol from a domain and an unqualified string like "org.pytorch.attr" and "bar" + static Symbol fromDomainAndUnqualString(const std::string & d, const std::string & s); + + // Constructors for our various namespaced strings. This will construct + // the appropriate namespaced string, e.g., "attr::foo" for the + // argument "foo", and then attempt to intern it. DO NOT USE THIS + // with a string literal; attr::foo should be available in that case + // (and if it's not, you should add it to the built-ins list above.) + static Symbol attr(const std::string & s); + static Symbol aten(const std::string & s); + static Symbol cuda(const std::string & s); + static Symbol onnx(const std::string & s); + static Symbol prim(const std::string & s); + static Symbol user(const std::string & s); + static Symbol caffe2(const std::string & s); + static Symbol dimname(const std::string & s); + // TODO: eliminate me + static Symbol scope(const std::string & s); + + bool is_attr() const; + bool is_aten() const; + bool is_cuda() const; + bool is_prim() const; + bool is_prims() const; + bool is_nvprims() const; + bool is_onnx() const; + bool is_user() const; + bool is_caffe2() const; + bool is_dimname() const; + + // So we can switch on this + constexpr operator unique_t() const { + return value; + } + + Symbol ns() const; + + // Give a string corresponding to the unqualified version of this name, e.g., + // "mm". Use this in a context where the intended namespace of the string is + // obvious; this is a *lossy* conversion. + const char * toUnqualString() const; + + // Give a string corresponding to the qualified version of this name, + // e.g., "aten::mm". This string format is made available to Python bindings + // (so we know how to parse it.) + const char * toQualString() const; + + // This describes a symbol in a case where humans read it. At the moment it's + // the same as toQualString. This has to be a const char* returned because + // a lot of printf style macros use it. + const char * toDisplayString() const; + + // Give a string corresponding to the domain name for the symbol, + // e.g., "org.pytorch.aten". + std::string domainString() const; + +private: + + explicit Symbol(Symbol ns, const std::string & s); + unique_t value; +}; + +static inline bool operator==(Symbol lhs, Symbol rhs) { + return static_cast(lhs) == static_cast(rhs); +} + +inline Symbol Symbol::attr(const std::string & s) { return Symbol::fromQualString("attr::" + s); } +inline Symbol Symbol::aten(const std::string & s) { return Symbol::fromQualString("aten::" + s); } +inline Symbol Symbol::cuda(const std::string & s) { return Symbol::fromQualString("cuda::" + s); } +inline Symbol Symbol::onnx(const std::string & s) { return Symbol::fromQualString("onnx::" + s); } +inline Symbol Symbol::prim(const std::string & s) { return Symbol::fromQualString("prim::" + s); } +inline Symbol Symbol::scope(const std::string & s) { return Symbol::fromQualString("scope::" + s); } +inline Symbol Symbol::user(const std::string & s) { return Symbol::fromQualString("user::" + s); } +inline Symbol Symbol::caffe2(const std::string & s) { return Symbol::fromQualString("_caffe2::" + s); } +inline Symbol Symbol::dimname(const std::string & s) { return Symbol::fromQualString("dimname::" + s); } + +} // namespace c10 + +// make symbol behave like an integer in hash tables +namespace std { +template <> +struct hash { + size_t operator()(c10::Symbol s) const { + return std::hash()(static_cast(s)); + } +}; +} diff --git a/voice_bridge/torch/include/ATen/core/type_factory.h b/voice_bridge/torch/include/ATen/core/type_factory.h new file mode 100644 index 0000000000000000000000000000000000000000..5718f79efff2a516fb8f9f9dfba782c0ca9c259b --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/type_factory.h @@ -0,0 +1,108 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace c10 { + +template +struct TORCH_API TypeFactoryBase {}; + +template <> +struct TORCH_API TypeFactoryBase { + template + static c10::DynamicTypePtr create(TypePtr ty, Args&&... args) { + return std::make_shared( + c10::DynamicTypeTrait::tagValue(), + c10::DynamicType::Arguments(c10::ArrayRef( + {std::move(ty), std::forward(args)...}))); + } + template + static c10::DynamicTypePtr create(std::vector types) { + return std::make_shared( + c10::DynamicTypeTrait::tagValue(), + c10::DynamicType::Arguments(types)); + } + static c10::DynamicTypePtr createNamedTuple( + const std::string& name, + const std::vector& fields, + const std::vector& types) { + return std::make_shared( + c10::DynamicType::Tag::Tuple, + name, + c10::DynamicType::Arguments(fields, types)); + } + template + C10_ERASE static c10::DynamicTypePtr createNamed(const std::string& name) { + return std::make_shared( + c10::DynamicTypeTrait::tagValue(), + name, + c10::DynamicType::Arguments{}); + } + template + C10_ERASE static c10::DynamicTypePtr get() { + return DynamicTypeTrait::getBaseType(); + } + static const std::unordered_map& basePythonTypes(); +}; + +using DynamicTypeFactory = TypeFactoryBase; + +// Helper functions for constructing DynamicTypes inline. +template < + typename T, + std::enable_if_t::isBaseType, int> = 0> +C10_ERASE DynamicTypePtr dynT() { + return DynamicTypeFactory::get(); +} + +template < + typename T, + typename... Args, + std::enable_if_t::isBaseType, int> = 0> +C10_ERASE DynamicTypePtr dynT(Args&&... args) { + return DynamicTypeFactory::create(std::forward(args)...); +} + +template <> +struct TORCH_API TypeFactoryBase { + template + static c10::TypePtr create(TypePtr ty, Args&&... args) { + return T::create(std::move(ty), std::forward(args)...); + } + template + static c10::TypePtr create(std::vector types) { + return T::create(std::move(types)); + } + static c10::TypePtr createNamedTuple( + const std::string& name, + const std::vector& fields, + const std::vector& types); + template + C10_ERASE static c10::TypePtr createNamed(const std::string& name) { + return T::create(name); + } + static const std::unordered_map& basePythonTypes(); + template + C10_ERASE static c10::TypePtr get() { + return T::get(); + } +}; + +using DefaultTypeFactory = TypeFactoryBase; + +using PlatformType = +#ifdef C10_MOBILE + c10::DynamicType +#else + c10::Type +#endif + ; + +using TypeFactory = TypeFactoryBase; + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/type_ptr.h b/voice_bridge/torch/include/ATen/core/type_ptr.h new file mode 100644 index 0000000000000000000000000000000000000000..cfe7d8dac2515665fb655d4b5df44b99047d8e4d --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/type_ptr.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include + +#include +#include + +namespace c10 { + +// Compatibility wrapper around a raw pointer so that existing code +// written to deal with a shared_ptr can keep working. +template +class SingletonTypePtr { + public: + /* implicit */ SingletonTypePtr(T* p) : repr_(p) {} + + // We need this to satisfy Pybind11, but it shouldn't be hit. + explicit SingletonTypePtr(std::shared_ptr) { TORCH_CHECK(false); } + + using element_type = typename std::shared_ptr::element_type; + + template , void>::value, bool> = true> + T& operator*() const { + return *repr_; + } + + T* get() const { + return repr_; + } + + T* operator->() const { + return repr_; + } + + operator bool() const { + return repr_ != nullptr; + } + + private: + T* repr_; +}; + +template +bool operator==(SingletonTypePtr lhs, SingletonTypePtr rhs) { + return (void*)lhs.get() == (void*)rhs.get(); +} + +template +bool operator!=(SingletonTypePtr lhs, SingletonTypePtr rhs) { + return !(lhs == rhs); +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/core/typeid.h b/voice_bridge/torch/include/ATen/core/typeid.h new file mode 100644 index 0000000000000000000000000000000000000000..5967c0a1659aadd9225d3f16f13275879c8bdbc9 --- /dev/null +++ b/voice_bridge/torch/include/ATen/core/typeid.h @@ -0,0 +1 @@ +#include diff --git a/voice_bridge/torch/include/ATen/cpp_custom_type_hack.h b/voice_bridge/torch/include/ATen/cpp_custom_type_hack.h new file mode 100644 index 0000000000000000000000000000000000000000..ff5310b809684eeb3fb22f035162afd6f3d91fb5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpp_custom_type_hack.h @@ -0,0 +1,106 @@ +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP + +// YOU ARE IN THE WRONG PLACE! TURN BACK NOW! + +// This code was a temporary hack to enable embedding arbitrary C++ structures +// into Tensors. THIS IS UNSAFE AND IS NOT SUPPORTED. IF YOU USE THIS CODE, +// IT __WILL__ BREAK. + +// This code has been superseded by custom classes: +// https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html + +// Please use custom classes and **DO NOT ADD MORE CALLSITES TO THINGS DEFINED +// IN THIS FILE**. + +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP +// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP + +#include +#include + +namespace at { +namespace cpp_custom_type_hack { + +template +[[deprecated( + "Use custom classes instead: " + "https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html")]] bool +isa(const Tensor& packed) { + return (packed.scalar_type() == kByte) && + (packed.storage().data_ptr().get_deleter() == + caffe2::TypeMeta::Make().deleteFn()); +} + +template +[[deprecated( + "Use custom classes instead: " + "https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html")]] T& +cast(const Tensor& packed) { + TORCH_CHECK( + packed.scalar_type() == kByte, "Expected temporary cpp type wrapper"); + TORCH_CHECK( + packed.storage().data_ptr().get_deleter() == + caffe2::TypeMeta::Make().deleteFn(), + "Expected temporary cpp type wrapper of type ", + caffe2::TypeMeta::TypeName()); + return *reinterpret_cast(packed.storage().data_ptr().get()); +} + +template +[[deprecated( + "Use custom classes instead: " + "https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html")]] Tensor +create(std::unique_ptr ptr, TensorOptions options) { + // None of this should trace, so turn off Tracer dispatching + at::AutoDispatchBelowADInplaceOrView guard; // TODO: remove + at::tracer::impl::NoTracerDispatchMode tracer_guard; + + // We store this instance away in a Tensor and register a deleter function + // so that we do not leak memory. On the other side, we pull out the storage's + // data_ptr and get the right typed pointer. + void* raw_ptr = ptr.release(); + at::DataPtr at_ptr( + raw_ptr, raw_ptr, caffe2::TypeMeta::Make().deleteFn(), at::kCPU); + + // size doesn't really matter, but we can align it to the actual size + // returning variables because one likely want to use this hack from python + auto retval = at::empty({sizeof(T)}, options.device(kCPU).dtype(at::kByte)); + retval.storage().set_data_ptr_noswap(std::move(at_ptr)); + return retval; +} + +} // namespace cpp_custom_type_hack +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cpu/FlushDenormal.h b/voice_bridge/torch/include/ATen/cpu/FlushDenormal.h new file mode 100644 index 0000000000000000000000000000000000000000..ca6820fc638696d83bce526bdab73ab9bbdee070 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/FlushDenormal.h @@ -0,0 +1,14 @@ +/// Flush-To-Zero and Denormals-Are-Zero mode +/// +/// Flush-To-Zero (FTZ) and Denormals-Are-Zero (DAZ) are modes that bypass +/// IEEE 754 methods of dealing with denormal floating-point numbers on x86-64 +/// and some x86 CPUs. They result in reduced precision for values near zero, +/// but increased performance. +/// +/// See https://software.intel.com/en-us/articles/x87-and-sse-floating-point-assists-in-ia-32-flush-to-zero-ftz-and-denormals-are-zero-daz + +namespace at { namespace cpu { + +bool set_flush_denormal(bool on); + +}} // namespace at::cpu diff --git a/voice_bridge/torch/include/ATen/cpu/vec/functional.h b/voice_bridge/torch/include/ATen/cpu/vec/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..388b3170d5b55a8c4bdd3af4ff982397fb323cb6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/functional.h @@ -0,0 +1,4 @@ +#pragma once + +#include +#include diff --git a/voice_bridge/torch/include/ATen/cpu/vec/functional_base.h b/voice_bridge/torch/include/ATen/cpu/vec/functional_base.h new file mode 100644 index 0000000000000000000000000000000000000000..44d39028b990d1817a6ade73efea950c58d48dca --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/functional_base.h @@ -0,0 +1,320 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include + +namespace at { namespace vec { + +// slow path +template +inline scalar_t vec_reduce_all( + const Op& vec_fun, + vec::Vectorized acc_vec, + int64_t size) { + using Vec = vec::Vectorized; + scalar_t acc_arr[Vec::size()]; + acc_vec.store(acc_arr); + for (const auto i : c10::irange(1, size)) { + std::array acc_arr_next = {0}; + acc_arr_next[0] = acc_arr[i]; + Vec acc_vec_next = Vec::loadu(acc_arr_next.data()); + acc_vec = vec_fun(acc_vec, acc_vec_next); + } + acc_vec.store(acc_arr); + return acc_arr[0]; +} + +template +struct VecReduceAllSIMD { + static inline scalar_t apply(const Op& vec_fun, Vectorized acc_vec) { + return vec_reduce_all(vec_fun, acc_vec, Vectorized::size()); + } +}; + +#if defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE) +#if defined(CPU_CAPABILITY_AVX2) +template +struct VecReduceAllSIMD { + static inline float apply(const Op& vec_fun, Vectorized acc_vec) { + using Vec = Vectorized; + Vec v = acc_vec; + // 128-bit shuffle + Vec v1 = _mm256_permute2f128_ps(v, v, 0x1); + v = vec_fun(v, v1); + // 64-bit shuffle + v1 = _mm256_shuffle_ps(v, v, 0x4E); + v = vec_fun(v, v1); + // 32-bit shuffle + v1 = _mm256_shuffle_ps(v, v, 0xB1); + v = vec_fun(v, v1); + return _mm256_cvtss_f32(v); + } +}; +#endif // defined(CPU_CAPABILITY_AVX2) +#if defined(CPU_CAPABILITY_AVX512) +template +struct VecReduceAllSIMD { + static inline float apply(const Op& vec_fun, Vectorized acc_vec) { + using Vec = Vectorized; + Vec v = acc_vec; + // 256-bit shuffle + Vec v1 = _mm512_shuffle_f32x4(v, v, 0x4E); + v = vec_fun(v, v1); + // 128-bit shuffle + v1 = _mm512_shuffle_f32x4(v, v, 0xB1); + v = vec_fun(v, v1); + // 64-bit shuffle + v1 = _mm512_shuffle_ps(v, v, 0x4E); + v = vec_fun(v, v1); + // 32-bit shuffle + v1 = _mm512_shuffle_ps(v, v, 0xB1); + v = vec_fun(v, v1); + return _mm512_cvtss_f32(v); + } +}; +#endif // defined(CPU_CAPABILITY_AVX512) +#endif // defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE) + +template +inline scalar_t vec_reduce_all(const Op& vec_fun, Vectorized acc_vec) { + return VecReduceAllSIMD::apply(vec_fun, acc_vec); +} + +template +inline scalar_t reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) + return vec_reduce_all(vec_fun, Vec::loadu(data, size), size); + int64_t d = Vec::size(); + Vec acc_vec = Vec::loadu(data); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + acc_vec = vec_fun(acc_vec, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + acc_vec = Vec::set(acc_vec, vec_fun(acc_vec, data_vec), size - d); + } + return vec_reduce_all(vec_fun, acc_vec); +} + +// similar to reduce_all, but reduces into two outputs +template +inline std::pair reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2, + const scalar_t* data, int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) { + auto loaded_data = Vec::loadu(data, size); + return std::pair( + vec_reduce_all(vec_fun1, loaded_data, size), + vec_reduce_all(vec_fun2, loaded_data, size)); + } + int64_t d = Vec::size(); + Vec acc_vec1 = Vec::loadu(data); + Vec acc_vec2 = Vec::loadu(data); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + acc_vec1 = vec_fun1(acc_vec1, data_vec); + acc_vec2 = vec_fun2(acc_vec2, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + acc_vec1 = Vec::set(acc_vec1, vec_fun1(acc_vec1, data_vec), size - d); + acc_vec2 = Vec::set(acc_vec2, vec_fun2(acc_vec2, data_vec), size - d); + } + return std::pair( + vec_reduce_all(vec_fun1, acc_vec1), + vec_reduce_all(vec_fun2, acc_vec2)); +} + +template +inline scalar_t map_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) + return vec_reduce_all(red_fun, map_fun(Vec::loadu(data, size)), size); + int64_t d = Vec::size(); + Vec acc_vec = map_fun(Vec::loadu(data)); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + data_vec = map_fun(data_vec); + acc_vec = red_fun(acc_vec, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + data_vec = map_fun(data_vec); + acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d); + } + return vec_reduce_all(red_fun, acc_vec); +} + +template +inline scalar_t map2_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + const scalar_t* data2, + int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) { + Vec data_vec = Vec::loadu(data, size); + Vec data2_vec = Vec::loadu(data2, size); + data_vec = map_fun(data_vec, data2_vec); + return vec_reduce_all(red_fun, data_vec, size); + } + int64_t d = Vec::size(); + Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2)); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + Vec data2_vec = Vec::loadu(data2 + d); + data_vec = map_fun(data_vec, data2_vec); + acc_vec = red_fun(acc_vec, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + Vec data2_vec = Vec::loadu(data2 + d, size - d); + data_vec = map_fun(data_vec, data2_vec); + acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d); + } + return vec_reduce_all(red_fun, acc_vec); +} + +template +inline scalar_t map3_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + const scalar_t* data2, + const scalar_t* data3, + int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) { + Vec data_vec = Vec::loadu(data, size); + Vec data2_vec = Vec::loadu(data2, size); + Vec data3_vec = Vec::loadu(data3, size); + data_vec = map_fun(data_vec, data2_vec, data3_vec); + return vec_reduce_all(red_fun, data_vec, size); + } + + int64_t d = Vec::size(); + Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2), Vec::loadu(data3)); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + Vec data2_vec = Vec::loadu(data2 + d); + Vec data3_vec = Vec::loadu(data3 + d); + data_vec = map_fun(data_vec, data2_vec, data3_vec); + acc_vec = red_fun(acc_vec, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + Vec data2_vec = Vec::loadu(data2 + d, size - d); + Vec data3_vec = Vec::loadu(data3 + d, size - d); + data_vec = map_fun(data_vec, data2_vec, data3_vec); + acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d); + } + return vec_reduce_all(red_fun, acc_vec); +} + +template +inline void map( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data, + int64_t size) { + using Vec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec output_vec = vec_fun(Vec::loadu(input_data + d)); + output_vec.store(output_data + d); + } + if (size - d > 0) { + Vec output_vec = vec_fun(Vec::loadu(input_data + d, size - d)); + output_vec.store(output_data + d, size - d); + } +} + +template +inline void map2( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data, + const scalar_t* input_data2, + int64_t size) { + using Vec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(input_data + d); + Vec data_vec2 = Vec::loadu(input_data2 + d); + Vec output_vec = vec_fun(data_vec, data_vec2); + output_vec.store(output_data + d); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(input_data + d, size - d); + Vec data_vec2 = Vec::loadu(input_data2 + d, size - d); + Vec output_vec = vec_fun(data_vec, data_vec2); + output_vec.store(output_data + d, size - d); + } +} + +template +inline void map3( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data1, + const scalar_t* input_data2, + const scalar_t* input_data3, + int64_t size) { + using Vec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec1 = Vec::loadu(input_data1 + d); + Vec data_vec2 = Vec::loadu(input_data2 + d); + Vec data_vec3 = Vec::loadu(input_data3 + d); + Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3); + output_vec.store(output_data + d); + } + if (size - d > 0) { + Vec data_vec1 = Vec::loadu(input_data1 + d, size - d); + Vec data_vec2 = Vec::loadu(input_data2 + d, size - d); + Vec data_vec3 = Vec::loadu(input_data3 + d, size - d); + Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3); + output_vec.store(output_data + d, size - d); + } +} + +template +inline void map4( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data1, + const scalar_t* input_data2, + const scalar_t* input_data3, + const scalar_t* input_data4, + int64_t size) { + using Vec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec1 = Vec::loadu(input_data1 + d); + Vec data_vec2 = Vec::loadu(input_data2 + d); + Vec data_vec3 = Vec::loadu(input_data3 + d); + Vec data_vec4 = Vec::loadu(input_data4 + d); + Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4); + output_vec.store(output_data + d); + } + if (size - d > 0) { + Vec data_vec1 = Vec::loadu(input_data1 + d, size - d); + Vec data_vec2 = Vec::loadu(input_data2 + d, size - d); + Vec data_vec3 = Vec::loadu(input_data3 + d, size - d); + Vec data_vec4 = Vec::loadu(input_data4 + d, size - d); + Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4); + output_vec.store(output_data + d, size - d); + } +} + +}} // namespace at::vec diff --git a/voice_bridge/torch/include/ATen/cpu/vec/functional_bfloat16.h b/voice_bridge/torch/include/ATen/cpu/vec/functional_bfloat16.h new file mode 100644 index 0000000000000000000000000000000000000000..acb77ccaa491d43dfe06f414ab47ca6818e98837 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/functional_bfloat16.h @@ -0,0 +1,500 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include + +namespace at { namespace vec { + +// BFloat16 specification +template struct VecScalarType { using type = scalar_t; }; +template <> struct VecScalarType { using type = float; }; + +// This is different from at::acc_type since we only need to specialize BFloat16 +template +using vec_scalar_t = typename VecScalarType::type; + +// Note that we already have specialized member of Vectorized for BFloat16 +// so the following functions would run smoothly: +// using Vec = Vectorized; +// Vec one = Vec(BFloat16(1)); +// vec::map([](Vec x) { return one / (one + x.exp()); }, y_ptr, x_ptr, N); +// +// Then why we still need to specialize "funtional"? +// If we do specialization at Vectorized<> level, the above example would need 3 pairs of +// conversion of bf16->fp32/fp32->bf16, each for ".exp()", "+" and "/". +// If we do specialization at vec::map<>() level, we have only 1 pair of conversion +// of bf16->fp32/fp32->bf16, for the input and output BFloat16 vector only. +// +// The following BFloat16 functionality will only do data type conversion for input +// and output vector (reduce functionality will only convert the final scalar back to bf16). +// Compared to Vectorized<> specialization, +// 1. better performance since we have less data type conversion; +// 2. less rounding error since immediate results are kept in fp32; +// 3. accumulation done on data type of fp32. +// +// If you plan to extend this file, please ensure adding unit tests at +// aten/src/ATen/test/vec_test_all_types.cpp +// +template +inline BFloat16 reduce_all(const Op& vec_fun, const BFloat16* data, int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + if (size > fVec::size()) { + data_fvec0 = fVec::set(data_fvec0, vec_fun(data_fvec0, data_fvec1), size - fVec::size()); + return vec_reduce_all(vec_fun, data_fvec0, fVec::size()); + } else { + return vec_reduce_all(vec_fun, data_fvec0, size); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + fVec acc_fvec0, acc_fvec1; + std::tie(acc_fvec0, acc_fvec1) = convert_bfloat16_float(acc_bvec); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + acc_fvec0 = vec_fun(acc_fvec0, data_fvec0); + acc_fvec1 = vec_fun(acc_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + if (size - d > fVec::size()) { + acc_fvec0 = vec_fun(acc_fvec0, data_fvec0); + acc_fvec1 = fVec::set(acc_fvec1, vec_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); + } else { + acc_fvec0 = fVec::set(acc_fvec0, vec_fun(acc_fvec0, data_fvec0), size - d); + } + } + acc_fvec0 = vec_fun(acc_fvec0, acc_fvec1); + return vec_reduce_all(vec_fun, acc_fvec0); +} + +template +inline std::pair reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2, + const BFloat16* data, int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + if (size > fVec::size()) { + fVec acc1_fvec = fVec::set(data_fvec0, vec_fun1(data_fvec0, data_fvec1), size - fVec::size()); + fVec acc2_fvec = fVec::set(data_fvec0, vec_fun2(data_fvec0, data_fvec1), size - fVec::size()); + return std::pair( + vec_reduce_all(vec_fun1, acc1_fvec, fVec::size()), + vec_reduce_all(vec_fun2, acc2_fvec, fVec::size())); + } else { + return std::pair( + vec_reduce_all(vec_fun1, data_fvec0, size), + vec_reduce_all(vec_fun2, data_fvec0, size)); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + fVec acc1_fvec0, acc1_fvec1; + std::tie(acc1_fvec0, acc1_fvec1) = convert_bfloat16_float(acc_bvec); + fVec acc2_fvec0, acc2_fvec1; + std::tie(acc2_fvec0, acc2_fvec1) = convert_bfloat16_float(acc_bvec); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0); + acc1_fvec1 = vec_fun1(acc1_fvec1, data_fvec1); + acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0); + acc2_fvec1 = vec_fun2(acc2_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + if (size - d > fVec::size()) { + acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0); + acc1_fvec1 = fVec::set(acc1_fvec1, vec_fun1(acc1_fvec1, data_fvec1), size - d - fVec::size()); + acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0); + acc2_fvec1 = fVec::set(acc2_fvec1, vec_fun2(acc2_fvec1, data_fvec1), size - d - fVec::size()); + } else { + acc1_fvec0 = fVec::set(acc1_fvec0, vec_fun1(acc1_fvec0, data_fvec0), size - d); + acc2_fvec0 = fVec::set(acc2_fvec0, vec_fun2(acc2_fvec0, data_fvec0), size - d); + } + } + acc1_fvec0 = vec_fun1(acc1_fvec0, acc1_fvec1); + acc2_fvec0 = vec_fun2(acc2_fvec0, acc2_fvec1); + return std::pair( + vec_reduce_all(vec_fun1, acc1_fvec0), + vec_reduce_all(vec_fun2, acc2_fvec0)); +} + +template +inline BFloat16 map_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const BFloat16* data, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + if (size > fVec::size()) { + data_fvec0 = map_fun(data_fvec0); + data_fvec1 = map_fun(data_fvec1); + data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size()); + return vec_reduce_all(red_fun, data_fvec0, fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0); + return vec_reduce_all(red_fun, data_fvec0, size); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + fVec acc_fvec0, acc_fvec1; + std::tie(acc_fvec0, acc_fvec1) = convert_bfloat16_float(acc_bvec); + acc_fvec0 = map_fun(acc_fvec0); + acc_fvec1 = map_fun(acc_fvec1); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + data_fvec0 = map_fun(data_fvec0); + data_fvec1 = map_fun(data_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = red_fun(acc_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + if (size - d > fVec::size()) { + data_fvec0 = map_fun(data_fvec0); + data_fvec1 = map_fun(data_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0); + acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d); + } + } + acc_fvec0 = red_fun(acc_fvec0, acc_fvec1); + return vec_reduce_all(red_fun, acc_fvec0); +} + +template +inline BFloat16 map2_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const BFloat16* data, + const BFloat16* data2, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2, size); + fVec data2_fvec0, data2_fvec1; + std::tie(data2_fvec0, data2_fvec1) = convert_bfloat16_float(data2_bvec); + if (size > fVec::size()) { + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1); + data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size()); + return vec_reduce_all(red_fun, data_fvec0, fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + return vec_reduce_all(red_fun, data_fvec0, size); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + fVec acc_fvec0, acc_fvec1; + std::tie(acc_fvec0, acc_fvec1) = convert_bfloat16_float(acc_bvec); + bVec acc2_bvec = bVec::loadu(data2); + fVec acc2_fvec0, acc2_fvec1; + std::tie(acc2_fvec0, acc2_fvec1) = convert_bfloat16_float(acc2_bvec); + acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0); + acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2 + d); + fVec data2_fvec0, data2_fvec1; + std::tie(data2_fvec0, data2_fvec1) = convert_bfloat16_float(data2_bvec); + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = red_fun(acc_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2 + d, size - d); + fVec data2_fvec0, data2_fvec1; + std::tie(data2_fvec0, data2_fvec1) = convert_bfloat16_float(data2_bvec); + if (size - d > fVec::size()) { + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d); + } + } + acc_fvec0 = red_fun(acc_fvec0, acc_fvec1); + return vec_reduce_all(red_fun, acc_fvec0); +} + +template +inline BFloat16 map3_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const BFloat16* data, + const BFloat16* data2, + const BFloat16* data3, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2, size); + fVec data2_fvec0, data2_fvec1; + std::tie(data2_fvec0, data2_fvec1) = convert_bfloat16_float(data2_bvec); + bVec data3_bvec = bVec::loadu(data3, size); + fVec data3_fvec0, data3_fvec1; + std::tie(data3_fvec0, data3_fvec1) = convert_bfloat16_float(data3_bvec); + if (size > fVec::size()) { + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1); + data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size()); + return vec_reduce_all(red_fun, data_fvec0, fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + return vec_reduce_all(red_fun, data_fvec0, size); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + fVec acc_fvec0, acc_fvec1; + std::tie(acc_fvec0, acc_fvec1) = convert_bfloat16_float(acc_bvec); + bVec acc2_bvec = bVec::loadu(data2); + fVec acc2_fvec0, acc2_fvec1; + std::tie(acc2_fvec0, acc2_fvec1) = convert_bfloat16_float(acc2_bvec); + bVec acc3_bvec = bVec::loadu(data3); + fVec acc3_fvec0, acc3_fvec1; + std::tie(acc3_fvec0, acc3_fvec1) = convert_bfloat16_float(acc3_bvec); + acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0, acc3_fvec0); + acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1, acc3_fvec1); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2 + d); + fVec data2_fvec0, data2_fvec1; + std::tie(data2_fvec0, data2_fvec1) = convert_bfloat16_float(data2_bvec); + bVec data3_bvec = bVec::loadu(data3 + d); + fVec data3_fvec0, data3_fvec1; + std::tie(data3_fvec0, data3_fvec1) = convert_bfloat16_float(data3_bvec); + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = red_fun(acc_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2 + d, size - d); + fVec data2_fvec0, data2_fvec1; + std::tie(data2_fvec0, data2_fvec1) = convert_bfloat16_float(data2_bvec); + bVec data3_bvec = bVec::loadu(data3 + d, size - d); + fVec data3_fvec0, data3_fvec1; + std::tie(data3_fvec0, data3_fvec1) = convert_bfloat16_float(data3_bvec); + if (size - d > fVec::size()) { + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d); + } + } + acc_fvec0 = red_fun(acc_fvec0, acc_fvec1); + return vec_reduce_all(red_fun, acc_fvec0); +} + +template +inline void map( + const Op& vec_fun, + BFloat16* output_data, + const BFloat16* input_data, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(input_data + d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + fVec output_fvec0 = vec_fun(data_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1); + bVec output_bvec = convert_float_bfloat16(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(input_data + d, size - d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + fVec output_fvec0 = vec_fun(data_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1); + bVec output_bvec = convert_float_bfloat16(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +template +inline void map2( + const Op& vec_fun, + BFloat16* output_data, + const BFloat16* input_data, + const BFloat16* input_data2, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(input_data + d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d); + fVec data2_fvec0, data2_fvec1; + std::tie(data2_fvec0, data2_fvec1) = convert_bfloat16_float(data2_bvec); + fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1); + bVec output_bvec = convert_float_bfloat16(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(input_data + d, size - d); + fVec data_fvec0, data_fvec1; + std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d, size - d); + fVec data2_fvec0, data2_fvec1; + std::tie(data2_fvec0, data2_fvec1) = convert_bfloat16_float(data2_bvec); + fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1); + bVec output_bvec = convert_float_bfloat16(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +template +inline void map3( + const Op& vec_fun, + BFloat16* output_data, + const BFloat16* input_data1, + const BFloat16* input_data2, + const BFloat16* input_data3, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data1_bvec = bVec::loadu(input_data1 + d); + fVec data1_fvec0, data1_fvec1; + std::tie(data1_fvec0, data1_fvec1) = convert_bfloat16_float(data1_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d); + fVec data2_fvec0, data2_fvec1; + std::tie(data2_fvec0, data2_fvec1) = convert_bfloat16_float(data2_bvec); + bVec data3_bvec = bVec::loadu(input_data3 + d); + fVec data3_fvec0, data3_fvec1; + std::tie(data3_fvec0, data3_fvec1) = convert_bfloat16_float(data3_bvec); + fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0); + fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1); + bVec output_bvec = convert_float_bfloat16(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + bVec data1_bvec = bVec::loadu(input_data1 + d, size - d); + fVec data1_fvec0, data1_fvec1; + std::tie(data1_fvec0, data1_fvec1) = convert_bfloat16_float(data1_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d, size - d); + fVec data2_fvec0, data2_fvec1; + std::tie(data2_fvec0, data2_fvec1) = convert_bfloat16_float(data2_bvec); + bVec data3_bvec = bVec::loadu(input_data3 + d, size - d); + fVec data3_fvec0, data3_fvec1; + std::tie(data3_fvec0, data3_fvec1) = convert_bfloat16_float(data3_bvec); + fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0); + fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1); + bVec output_bvec = convert_float_bfloat16(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +template +inline void map4( + const Op& vec_fun, + BFloat16* output_data, + const BFloat16* input_data1, + const BFloat16* input_data2, + const BFloat16* input_data3, + const BFloat16* input_data4, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data1_bvec = bVec::loadu(input_data1 + d); + fVec data1_fvec0, data1_fvec1; + std::tie(data1_fvec0, data1_fvec1) = convert_bfloat16_float(data1_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d); + fVec data2_fvec0, data2_fvec1; + std::tie(data2_fvec0, data2_fvec1) = convert_bfloat16_float(data2_bvec); + bVec data3_bvec = bVec::loadu(input_data3 + d); + fVec data3_fvec0, data3_fvec1; + std::tie(data3_fvec0, data3_fvec1) = convert_bfloat16_float(data3_bvec); + bVec data4_bvec = bVec::loadu(input_data4 + d); + fVec data4_fvec0, data4_fvec1; + std::tie(data4_fvec0, data4_fvec1) = convert_bfloat16_float(data4_bvec); + fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0); + fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1); + bVec output_bvec = convert_float_bfloat16(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + bVec data1_bvec = bVec::loadu(input_data1 + d, size - d); + fVec data1_fvec0, data1_fvec1; + std::tie(data1_fvec0, data1_fvec1) = convert_bfloat16_float(data1_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d, size - d); + fVec data2_fvec0, data2_fvec1; + std::tie(data2_fvec0, data2_fvec1) = convert_bfloat16_float(data2_bvec); + bVec data3_bvec = bVec::loadu(input_data3 + d, size - d); + fVec data3_fvec0, data3_fvec1; + std::tie(data3_fvec0, data3_fvec1) = convert_bfloat16_float(data3_bvec); + bVec data4_bvec = bVec::loadu(input_data4 + d, size - d); + fVec data4_fvec0, data4_fvec1; + std::tie(data4_fvec0, data4_fvec1) = convert_bfloat16_float(data4_bvec); + fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0); + fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1); + bVec output_bvec = convert_float_bfloat16(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +}} // namespace at::vec diff --git a/voice_bridge/torch/include/ATen/cpu/vec/intrinsics.h b/voice_bridge/torch/include/ATen/cpu/vec/intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..a82a8ef1a69457d4800f6c3de277c82f61dfa03c --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/intrinsics.h @@ -0,0 +1,43 @@ +#pragma once +#if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) +/* GCC or clang-compatible compiler, targeting x86/x86-64 */ +#include +#elif defined(__clang__) && (defined(__ARM_NEON__) || defined(__aarch64__)) +/* Clang-compatible compiler, targeting arm neon */ +#include +#elif defined(_MSC_VER) +/* Microsoft C/C++-compatible compiler */ +#include +#if _MSC_VER <= 1900 +#define _mm256_extract_epi64(X, Y) (_mm_extract_epi64(_mm256_extractf128_si256(X, Y >> 1), Y % 2)) +#define _mm256_extract_epi32(X, Y) (_mm_extract_epi32(_mm256_extractf128_si256(X, Y >> 2), Y % 4)) +#define _mm256_extract_epi16(X, Y) (_mm_extract_epi16(_mm256_extractf128_si256(X, Y >> 3), Y % 8)) +#define _mm256_extract_epi8(X, Y) (_mm_extract_epi8(_mm256_extractf128_si256(X, Y >> 4), Y % 16)) +#endif +#elif defined(__GNUC__) && (defined(__ARM_NEON__) || defined(__aarch64__)) +/* GCC-compatible compiler, targeting ARM with NEON */ +#include +#if defined (MISSING_ARM_VLD1) +#include +#elif defined (MISSING_ARM_VST1) +#include +#endif +#elif defined(__GNUC__) && defined(__IWMMXT__) +/* GCC-compatible compiler, targeting ARM with WMMX */ +#include +#elif defined(__s390x__) +// targets Z/architecture +// we will include vecintrin later +#elif (defined(__GNUC__) || defined(__xlC__)) && \ + (defined(__VEC__) || defined(__ALTIVEC__)) +/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */ +#include +/* We need to undef those tokens defined by to avoid conflicts + with the C++ types. => Can still use __bool/__vector */ +#undef bool +#undef vector +#undef pixel +#elif defined(__GNUC__) && defined(__SPE__) +/* GCC-compatible compiler, targeting PowerPC with SPE */ +#include +#endif diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec.h b/voice_bridge/torch/include/ATen/cpu/vec/vec.h new file mode 100644 index 0000000000000000000000000000000000000000..9d39142aad91c63f012f4bd318eeccd4b8717d56 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec.h @@ -0,0 +1,36 @@ +#pragma once + +#if defined(CPU_CAPABILITY_AVX512) +#include +#else +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +inline Vectorized convert_to_bool(Vectorized x) { + __at_align__ bool buffer[x.size()]; + x.ne(Vectorized(0)).store(buffer); + + Vectorized ret; + static_assert(x.size() == ret.size(), ""); + std::memcpy(ret, buffer, ret.size() * sizeof(bool)); + return ret; +} + +template <> +inline Vectorized Vectorized::loadu(const void* ptr) { + // See NOTE [Loading boolean values] + return convert_to_bool(Vectorized::loadu(ptr)); +} + +template <> +inline Vectorized Vectorized::loadu(const void* ptr, int64_t count) { + // See NOTE [Loading boolean values] + return convert_to_bool(Vectorized::loadu(ptr, count)); +} + +}}} // namespace at::vec::CPU_CAPABILITY diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h b/voice_bridge/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..5540c8bc782faedbadb0794142580bad1207afc0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h @@ -0,0 +1,452 @@ +/* Workaround for missing vld1_*_x2 and vst1_*_x2 intrinsics in gcc-7. */ + +__extension__ extern __inline uint8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_u8_x2 (const uint8_t *__a) +{ + uint8x8x2_t ret; + asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_s8_x2 (const int8_t *__a) +{ + int8x8x2_t ret; + asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_u16_x2 (const uint16_t *__a) +{ + uint16x4x2_t ret; + asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_s16_x2 (const int16_t *__a) +{ + int16x4x2_t ret; + asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_u32_x2 (const uint32_t *__a) +{ + uint32x2x2_t ret; + asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_s32_x2 (const int32_t *__a) +{ + int32x2x2_t ret; + asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_u64_x2 (const uint64_t *__a) +{ + uint64x1x2_t ret; + asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_s64_x2 (const int64_t *__a) +{ + int64x1x2_t ret; + __builtin_aarch64_simd_oi __o; + asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline float16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_f16_x2 (const float16_t *__a) +{ + float16x4x2_t ret; + asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline float32x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_f32_x2 (const float32_t *__a) +{ + float32x2x2_t ret; + asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline float64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_f64_x2 (const float64_t *__a) +{ + float64x1x2_t ret; + asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline poly8x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_p8_x2 (const poly8_t *__a) +{ + poly8x8x2_t ret; + asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline poly16x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_p16_x2 (const poly16_t *__a) +{ + poly16x4x2_t ret; + asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline poly64x1x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1_p64_x2 (const poly64_t *__a) +{ + poly64x1x2_t ret; + asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u8_x2 (const uint8_t *__a) +{ + uint8x16x2_t ret; + asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s8_x2 (const int8_t *__a) +{ + int8x16x2_t ret; + asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u16_x2 (const uint16_t *__a) +{ + uint16x8x2_t ret; + asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s16_x2 (const int16_t *__a) +{ + int16x8x2_t ret; + asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u32_x2 (const uint32_t *__a) +{ + uint32x4x2_t ret; + asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s32_x2 (const int32_t *__a) +{ + int32x4x2_t ret; + asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline uint64x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_u64_x2 (const uint64_t *__a) +{ + uint64x2x2_t ret; + asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline int64x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_s64_x2 (const int64_t *__a) +{ + int64x2x2_t ret; + asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline float16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_f16_x2 (const float16_t *__a) +{ + float16x8x2_t ret; + asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline float32x4x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_f32_x2 (const float32_t *__a) +{ + float32x4x2_t ret; + asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline float64x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_f64_x2 (const float64_t *__a) +{ + float64x2x2_t ret; + asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline poly8x16x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_p8_x2 (const poly8_t *__a) +{ + poly8x16x2_t ret; + asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline poly16x8x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_p16_x2 (const poly16_t *__a) +{ + poly16x8x2_t ret; + asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +__extension__ extern __inline poly64x2x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vld1q_p64_x2 (const poly64_t *__a) +{ + poly64x2x2_t ret; + asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a)); + return ret; +} + +/* vst1x2 */ + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_s64_x2 (int64_t * __a, int64x1x2_t val) +{ + asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_u64_x2 (uint64_t * __a, uint64x1x2_t val) +{ + asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_f64_x2 (float64_t * __a, float64x1x2_t val) +{ + asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_s8_x2 (int8_t * __a, int8x8x2_t val) +{ + asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_p8_x2 (poly8_t * __a, poly8x8x2_t val) +{ + asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_s16_x2 (int16_t * __a, int16x4x2_t val) +{ + asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_p16_x2 (poly16_t * __a, poly16x4x2_t val) +{ + asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_s32_x2 (int32_t * __a, int32x2x2_t val) +{ + asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_u8_x2 (uint8_t * __a, uint8x8x2_t val) +{ + asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_u16_x2 (uint16_t * __a, uint16x4x2_t val) +{ + asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_u32_x2 (uint32_t * __a, uint32x2x2_t val) +{ + asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_f16_x2 (float16_t * __a, float16x4x2_t val) +{ + asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_f32_x2 (float32_t * __a, float32x2x2_t val) +{ + asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1_p64_x2 (poly64_t * __a, poly64x1x2_t val) +{ + asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_s8_x2 (int8_t * __a, int8x16x2_t val) +{ + asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_p8_x2 (poly8_t * __a, poly8x16x2_t val) +{ + asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_s16_x2 (int16_t * __a, int16x8x2_t val) +{ + asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_p16_x2 (poly16_t * __a, poly16x8x2_t val) +{ + asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_s32_x2 (int32_t * __a, int32x4x2_t val) +{ + asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_s64_x2 (int64_t * __a, int64x2x2_t val) +{ + asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_u8_x2 (uint8_t * __a, uint8x16x2_t val) +{ + asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_u16_x2 (uint16_t * __a, uint16x8x2_t val) +{ + asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_u32_x2 (uint32_t * __a, uint32x4x2_t val) +{ + asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_u64_x2 (uint64_t * __a, uint64x2x2_t val) +{ + asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_f16_x2 (float16_t * __a, float16x8x2_t val) +{ + asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_f32_x2 (float32_t * __a, float32x4x2_t val) +{ + asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_f64_x2 (float64_t * __a, float64x2x2_t val) +{ + asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val)); +} + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_p64_x2 (poly64_t * __a, poly64x2x2_t val) +{ + asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val)); +} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h b/voice_bridge/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..711d16f9b231f0de8ef7950de809337027b1b2ee --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h @@ -0,0 +1,8 @@ +/* Workaround for missing vst1q_f32_x2 in gcc-8. */ + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_f32_x2 (float32_t * __a, float32x4x2_t val) +{ + asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val)); +} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256.h b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256.h new file mode 100644 index 0000000000000000000000000000000000000000..98ec588137ce33c1ac05e376f1ae37db4f3d9a02 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256.h @@ -0,0 +1,227 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include + +#include +#if !(defined(__VSX__) || defined(CPU_CAPABILITY_VSX) || defined(CPU_CAPABILITY_ZVECTOR)) +#include +#include +#include +#include +#include +#include +#include +#include +#elif defined(__VSX__) || defined(CPU_CAPABILITY_VSX) +#include +#else +#include +#include +#endif + +#include +#include +#include +#include +#include + +namespace at { +namespace vec { + +// Note [CPU_CAPABILITY namespace] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// This header, and all of its subheaders, will be compiled with +// different architecture flags for each supported set of vector +// intrinsics. So we need to make sure they aren't inadvertently +// linked together. We do this by declaring objects in an `inline +// namespace` which changes the name mangling, but can still be +// accessed as `at::vec`. +inline namespace CPU_CAPABILITY { + +inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) { + stream << val.val_; + return stream; +} +inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) { + stream << static_cast(val.val_); + return stream; +} +inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) { + stream << static_cast(val.val_); + return stream; +} + +template +std::ostream& operator<<(std::ostream& stream, const Vectorized& vec) { + T buf[Vectorized::size()]; + vec.store(buf); + stream << "vec["; + for (int i = 0; i != Vectorized::size(); i++) { + if (i != 0) { + stream << ", "; + } + stream << buf[i]; + } + stream << "]"; + return stream; +} + + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX2) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template<> +inline Vectorized cast(const Vectorized& src) { + return _mm256_castpd_ps(src); +} + +template<> +inline Vectorized cast(const Vectorized& src) { + return _mm256_castps_pd(src); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template +std::enable_if_t> +inline gather(const double* base_addr, const Vectorized& vindex) { + return _mm256_i64gather_pd(base_addr, vindex, scale); +} + +template +std::enable_if_t> +inline gather(const float* base_addr, const Vectorized& vindex) { + return _mm256_i32gather_ps(base_addr, vindex, scale); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template +std::enable_if_t> +inline mask_gather(const Vectorized& src, const double* base_addr, + const Vectorized& vindex, const Vectorized& mask) { + return _mm256_mask_i64gather_pd(src, base_addr, vindex, mask, scale); +} + +template +std::enable_if_t> +inline mask_gather(const Vectorized& src, const float* base_addr, + const Vectorized& vindex, const Vectorized& mask) { + return _mm256_mask_i32gather_ps(src, base_addr, vindex, mask, scale); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +// Only works for inputs in the range: [-2^51, 2^51] +// From: https://stackoverflow.com/a/41148578 +template<> +Vectorized +inline convert_to_int_of_same_size(const Vectorized &src) { + auto x = _mm256_add_pd(src, _mm256_set1_pd(0x0018000000000000)); + return _mm256_sub_epi64( + _mm256_castpd_si256(x), + _mm256_castpd_si256(_mm256_set1_pd(0x0018000000000000)) + ); +} + +template<> +Vectorized +inline convert_to_int_of_same_size(const Vectorized &src) { + return _mm256_cvttps_epi32(src); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template <> +std::pair, Vectorized> +inline interleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, a1, a3, a3} + // b = {b0, b1, b2, b3} + + // swap lanes: + // a_swapped = {a0, a1, b0, b1} + // b_swapped = {a2, a3, b2, b3} + auto a_swapped = _mm256_permute2f128_pd(a, b, 0b0100000); // 0, 2. 4 bits apart + auto b_swapped = _mm256_permute2f128_pd(a, b, 0b0110001); // 1, 3. 4 bits apart + + // group cols crossing lanes: + // return {a0, b0, a1, b1} + // {a2, b2, a3, b3} + return std::make_pair(_mm256_permute4x64_pd(a_swapped, 0b11011000), // 0, 2, 1, 3 + _mm256_permute4x64_pd(b_swapped, 0b11011000)); // 0, 2, 1, 3 +} + +template <> +std::pair, Vectorized> +inline interleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, a1, a2, a3, a4, a5, a6, a7} + // b = {b0, b1, b2, b3, b4, b5, b6, b7} + + // swap lanes: + // a_swapped = {a0, a1, a2, a3, b0, b1, b2, b3} + // b_swapped = {a4, a5, a6, a7, b4, b5, b6, b7} + // TODO: can we support caching this? + auto a_swapped = _mm256_permute2f128_ps(a, b, 0b0100000); // 0, 2. 4 bits apart + auto b_swapped = _mm256_permute2f128_ps(a, b, 0b0110001); // 1, 3. 4 bits apart + + // group cols crossing lanes: + // return {a0, b0, a1, b1, a2, b2, a3, b3} + // {a4, b4, a5, b5, a6, b6, a7, b7} + const __m256i group_ctrl = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7); + return std::make_pair(_mm256_permutevar8x32_ps(a_swapped, group_ctrl), + _mm256_permutevar8x32_ps(b_swapped, group_ctrl)); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template <> +std::pair, Vectorized> +inline deinterleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, b0, a1, b1} + // b = {a2, b2, a3, b3} + + // group cols crossing lanes: + // a_grouped = {a0, a1, b0, b1} + // b_grouped = {a2, a3, b2, b3} + auto a_grouped = _mm256_permute4x64_pd(a, 0b11011000); // 0, 2, 1, 3 + auto b_grouped = _mm256_permute4x64_pd(b, 0b11011000); // 0, 2, 1, 3 + + // swap lanes: + // return {a0, a1, a2, a3} + // {b0, b1, b2, b3} + return std::make_pair(_mm256_permute2f128_pd(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart + _mm256_permute2f128_pd(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart +} + +template <> +std::pair, Vectorized> +inline deinterleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, b0, a1, b1, a2, b2, a3, b3} + // b = {a4, b4, a5, b5, a6, b6, a7, b7} + + // group cols crossing lanes: + // a_grouped = {a0, a1, a2, a3, b0, b1, b2, b3} + // b_grouped = {a4, a5, a6, a7, b4, b5, b6, b7} + // TODO: can we support caching this? + const __m256i group_ctrl = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7); + auto a_grouped = _mm256_permutevar8x32_ps(a, group_ctrl); + auto b_grouped = _mm256_permutevar8x32_ps(b, group_ctrl); + + // swap lanes: + // return {a0, a1, a2, a3, a4, a5, a6, a7} + // {b0, b1, b2, b3, b4, b5, b6, b7} + return std::make_pair(_mm256_permute2f128_ps(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart + _mm256_permute2f128_ps(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart +} + +#endif // (defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h new file mode 100644 index 0000000000000000000000000000000000000000..15d8ac269e3d2c292861c2357e0b8796674e5c32 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h @@ -0,0 +1,821 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) +#include +#endif + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wignored-qualifiers" + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +static inline void cvtbf16_fp32(const __m128i& a, __m256& o) { + o = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(a), 16)); +} + +static inline void cvtbf16_fp32(const __m256i& a, __m256& o1, __m256& o2) { + __m128i lo = _mm256_extractf128_si256(a, 0); + __m128i hi = _mm256_extractf128_si256(a, 1); + cvtbf16_fp32(lo, o1); + cvtbf16_fp32(hi, o2); +} +static inline __m256i cvtfp32_bf16(const __m256& a, const __m256& b) { + __m256i lo = _mm256_castps_si256(a); + __m256i hi = _mm256_castps_si256(b); + __m256i nan = _mm256_set1_epi32(0xffff); + __m256i mask_lo = _mm256_castps_si256(_mm256_cmp_ps(a, a, _CMP_ORD_Q)); + __m256i mask_hi = _mm256_castps_si256(_mm256_cmp_ps(b, b, _CMP_ORD_Q)); + __m256i ones = _mm256_set1_epi32(0x1); + __m256i vec_bias = _mm256_set1_epi32(0x7fff); + // uint32_t lsb = (input >> 16) & 1; + auto t_lo = _mm256_and_si256(_mm256_srli_epi32(lo, 16), ones); + auto t_hi = _mm256_and_si256(_mm256_srli_epi32(hi, 16), ones); + // uint32_t rounding_bias = 0x7fff + lsb; + t_lo = _mm256_add_epi32(t_lo, vec_bias); + t_hi = _mm256_add_epi32(t_hi, vec_bias); + // input += rounding_bias; + t_lo = _mm256_add_epi32(t_lo, lo); + t_hi = _mm256_add_epi32(t_hi, hi); + // input = input >> 16; + t_lo = _mm256_srli_epi32(t_lo, 16); + t_hi = _mm256_srli_epi32(t_hi, 16); + // Check NaN before converting back to bf16 + t_lo = _mm256_blendv_epi8(nan, t_lo, mask_lo); + t_hi = _mm256_blendv_epi8(nan, t_hi, mask_hi); + + t_lo = _mm256_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4] + return _mm256_permute4x64_epi64(t_lo, 0xd8); // 11 01 10 00 +} + +static inline __m256i merge_compare_result(const __m256& a, const __m256& b) { + __m256i lo = _mm256_castps_si256(a); + __m256i hi = _mm256_castps_si256(b); + lo = _mm256_srli_epi32(lo, 16); + hi = _mm256_srli_epi32(hi, 16); + auto out = _mm256_packus_epi32(lo, hi); + return _mm256_permute4x64_epi64(out, 0xd8); +} + +template <> class Vectorized { +private: + __m256i values; +public: + using value_type = uint16_t; + using size_type = int; + static constexpr size_type size() { + return 16; + } + Vectorized() {} + Vectorized(__m256i v) : values(v) {} + Vectorized(BFloat16 val) { + value_type uw = val.x; + values = _mm256_set1_epi16(uw); + } + Vectorized(BFloat16 val1, BFloat16 val2, BFloat16 val3, BFloat16 val4, + BFloat16 val5, BFloat16 val6, BFloat16 val7, BFloat16 val8, + BFloat16 val9, BFloat16 val10, BFloat16 val11, BFloat16 val12, + BFloat16 val13, BFloat16 val14, BFloat16 val15, BFloat16 val16) { + values = _mm256_setr_epi16( + val1.x, val2.x, val3.x, val4.x, val5.x, val6.x, val7.x, val8.x, + val9.x, val10.x, val11.x, val12.x, val13.x, val14.x, val15.x, val16.x); + } + operator __m256i() const { + return values; + } + BFloat16& operator[](int idx) = delete; + const BFloat16& operator[](int idx) const = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __m256i cmp = _mm256_cmpeq_epi16(values, _mm256_set1_epi16(0)); + return _mm256_movemask_epi8(cmp); + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int16_t count) { + __at_align__ int16_t tmp_values[size()]; + std::memcpy(tmp_values, ptr, count * sizeof(int16_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int16_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int16_t)); + } + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + __at_align__ int16_t tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = _mm256_extract_epi16(b.values, 0); + if (mask & 0x02) + tmp_values[1] = _mm256_extract_epi16(b.values, 1); + if (mask & 0x04) + tmp_values[2] = _mm256_extract_epi16(b.values, 2); + if (mask & 0x08) + tmp_values[3] = _mm256_extract_epi16(b.values, 3); + if (mask & 0x10) + tmp_values[4] = _mm256_extract_epi16(b.values, 4); + if (mask & 0x20) + tmp_values[5] = _mm256_extract_epi16(b.values, 5); + if (mask & 0x40) + tmp_values[6] = _mm256_extract_epi16(b.values, 6); + if (mask & 0x80) + tmp_values[7] = _mm256_extract_epi16(b.values, 7); + if (mask & 0x100) + tmp_values[8] = _mm256_extract_epi16(b.values, 8); + if (mask & 0x200) + tmp_values[9] = _mm256_extract_epi16(b.values, 9); + if (mask & 0x400) + tmp_values[10] = _mm256_extract_epi16(b.values, 10); + if (mask & 0x800) + tmp_values[11] = _mm256_extract_epi16(b.values, 11); + if (mask & 0x1000) + tmp_values[12] = _mm256_extract_epi16(b.values, 12); + if (mask & 0x2000) + tmp_values[13] = _mm256_extract_epi16(b.values, 13); + if (mask & 0x4000) + tmp_values[14] = _mm256_extract_epi16(b.values, 14); + if (mask & 0x8000) + tmp_values[15] = _mm256_extract_epi16(b.values, 15); + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, + const Vectorized& b, const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(BFloat16 base = 0.f, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step); + } + static Vectorized set(const Vectorized& a, + const Vectorized& b, int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + case 8: + return blend<255>(a, b); + case 9: + return blend<511>(a, b); + case 10: + return blend<1023>(a, b); + case 11: + return blend<2047>(a, b); + case 12: + return blend<4095>(a, b); + case 13: + return blend<8191>(a, b); + case 14: + return blend<16383>(a, b); + case 15: + return blend<32767>(a, b); + } + return b; + } + Vectorized map(const __m256 (*const vop)(__m256)) const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + const auto o1 = vop(lo); + const auto o2 = vop(hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized abs() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + const auto mask = _mm256_set1_ps(-0.f); + const auto o1 = _mm256_andnot_ps(mask, lo); + const auto o2 = _mm256_andnot_ps(mask, hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized angle() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto angle_lambda = [](__m256 values) { + const auto zero_vec = _mm256_set1_ps(0.f); + const auto nan_vec = _mm256_set1_ps(NAN); + const auto not_nan_mask = _mm256_cmp_ps(values, values, _CMP_EQ_OQ); + const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ); + const auto pi = _mm256_set1_ps(c10::pi); + + const auto neg_mask = _mm256_cmp_ps(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask); + angle = _mm256_blendv_ps(angle, nan_vec, nan_mask); + return angle; + }; + auto o1 = angle_lambda(lo); + auto o2 = angle_lambda(hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi16(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return map(Sleef_acosf8_u10); + } + Vectorized asin() const { + return map(Sleef_asinf8_u10); + } + Vectorized atan() const { + return map(Sleef_atanf8_u10); + } + Vectorized atan2(const Vectorized &b) const { + __m256 lo, hi; + __m256 b1, b2; + cvtbf16_fp32(values, lo, hi); + cvtbf16_fp32(b.values, b1, b2); + auto o1 = Sleef_atan2f8_u10(lo, b1); + auto o2 = Sleef_atan2f8_u10(hi, b2); + return cvtfp32_bf16(o1, o2); + } + Vectorized copysign(const Vectorized &sign) const { + // copy sign bit (0x8000) from sign and remaining bits from values + __m256i mask_value = _mm256_set1_epi32(~0x80008000); + __m256i mask_signbit = _mm256_set1_epi32(0x80008000); + return Vectorized( + _mm256_or_si256( + _mm256_and_si256(values, mask_value), + _mm256_and_si256(sign, mask_signbit))); + } + Vectorized erf() const { + return map(Sleef_erff8_u10); + } + Vectorized erfc() const { + return map(Sleef_erfcf8_u15); + } + Vectorized erfinv() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + for (int64_t i = 0; i < size() / 2; i++) { + tmp1[i] = calc_erfinv(tmp1[i]); + tmp2[i] = calc_erfinv(tmp2[i]); + } + auto o1 = _mm256_loadu_ps(tmp1); + auto o2 = _mm256_loadu_ps(tmp2); + return cvtfp32_bf16(o1, o2); + } + Vectorized exp() const { + return map(Sleef_expf8_u10); + } + Vectorized expm1() const { + return map(Sleef_expm1f8_u10); + } + Vectorized fmod(const Vectorized & q) const { + __m256 x_lo, x_hi; + cvtbf16_fp32(values, x_lo, x_hi); + __m256 q_lo, q_hi; + cvtbf16_fp32(q.values, q_lo, q_hi); + auto o1 = Sleef_fmodf8(x_lo, q_lo); + auto o2 = Sleef_fmodf8(x_hi, q_hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized hypot(const Vectorized &b) const { + __m256 lo, hi; + __m256 b1, b2; + cvtbf16_fp32(values, lo, hi); + cvtbf16_fp32(b.values, b1, b2); + auto o1 = Sleef_hypotf8_u05(lo, b1); + auto o2 = Sleef_hypotf8_u05(hi, b2); + return cvtfp32_bf16(o1, o2); + } + Vectorized i0() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + for (int64_t i = 0; i < size() / 2; i++) { + tmp1[i] = calc_i0(tmp1[i]); + tmp2[i] = calc_i0(tmp2[i]); + } + auto o1 = _mm256_loadu_ps(tmp1); + auto o2 = _mm256_loadu_ps(tmp2); + return cvtfp32_bf16(o1, o2); + } + Vectorized i0e() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + constexpr auto sz = size(); + __at_align__ float tmp1[sz / 2], tmp2[sz / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + + for (auto i = decltype(sz){0}; i < sz / 2; i++) { + tmp1[i] = calc_i0e(tmp1[i]); + tmp2[i] = calc_i0e(tmp2[i]); + } + const auto o1 = _mm256_loadu_ps(tmp1); + const auto o2 = _mm256_loadu_ps(tmp2); + return cvtfp32_bf16(o1, o2); + } + Vectorized igamma(const Vectorized &x) const { + __m256 lo, hi; + __m256 xlo, xhi; + cvtbf16_fp32(values, lo, hi); + cvtbf16_fp32(x.values, xlo, xhi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmpx1), xlo); + _mm256_storeu_ps(reinterpret_cast(tmpx2), xhi); + for (int64_t i = 0; i < size() / 2; ++i) { + tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]); + tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]); + } + auto o1 = _mm256_loadu_ps(tmp1); + auto o2 = _mm256_loadu_ps(tmp2); + return cvtfp32_bf16(o1, o2); + } + + Vectorized igammac(const Vectorized &x) const { + __m256 lo, hi; + __m256 xlo, xhi; + cvtbf16_fp32(values, lo, hi); + cvtbf16_fp32(x.values, xlo, xhi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmpx1), xlo); + _mm256_storeu_ps(reinterpret_cast(tmpx2), xhi); + for (int64_t i = 0; i < size() / 2; ++i) { + tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]); + tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]); + } + auto o1 = _mm256_loadu_ps(tmp1); + auto o2 = _mm256_loadu_ps(tmp2); + return cvtfp32_bf16(o1, o2); + } + Vectorized log() const { + return map(Sleef_logf8_u10); + } + Vectorized log2() const { + return map(Sleef_log2f8_u10); + } + Vectorized log10() const { + return map(Sleef_log10f8_u10); + } + Vectorized log1p() const { + return map(Sleef_log1pf8_u10); + } + Vectorized frac() const; + Vectorized sin() const { + return map(Sleef_sinf8_u10); + } + Vectorized sinh() const { + return map(Sleef_sinhf8_u10); + } + Vectorized cos() const { + return map(Sleef_cosf8_u10); + } + Vectorized cosh() const { + return map(Sleef_coshf8_u10); + } + Vectorized ceil() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto o1 = _mm256_ceil_ps(lo); + auto o2 = _mm256_ceil_ps(hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized floor() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto o1 = _mm256_floor_ps(lo); + auto o2 = _mm256_floor_ps(hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized neg() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto mask = _mm256_set1_ps(-0.f); + auto o1 = _mm256_xor_ps(mask, lo); + auto o2 = _mm256_xor_ps(mask, hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized round() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + return cvtfp32_bf16(o1, o2); + } + Vectorized tan() const { + return map(Sleef_tanf8_u10); + } + Vectorized tanh() const { + return map(Sleef_tanhf8_u10); + } + Vectorized trunc() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + return cvtfp32_bf16(o1, o2); + } + Vectorized lgamma() const { + return map(Sleef_lgammaf8_u10); + } + Vectorized sqrt() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto o1 = _mm256_sqrt_ps(lo); + auto o2 = _mm256_sqrt_ps(hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized reciprocal() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto ones = _mm256_set1_ps(1); + auto o1 = _mm256_div_ps(ones, lo); + auto o2 = _mm256_div_ps(ones, hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized rsqrt() const { + __m256 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto ones = _mm256_set1_ps(1); + auto o1 = _mm256_div_ps(ones, _mm256_sqrt_ps(lo)); + auto o2 = _mm256_div_ps(ones, _mm256_sqrt_ps(hi)); + return cvtfp32_bf16(o1, o2); + } + Vectorized pow(const Vectorized &b) const { + __m256 lo, hi; + __m256 b1, b2; + cvtbf16_fp32(values, lo, hi); + cvtbf16_fp32(b.values, b1, b2); + auto o1 = Sleef_powf8_u10(lo, b1); + auto o2 = Sleef_powf8_u10(hi, b2); + return cvtfp32_bf16(o1, o2); + } + + Vectorized inline operator>(const Vectorized& other) const; + Vectorized inline operator<(const Vectorized& other) const; + Vectorized inline operator>=(const Vectorized& other) const; + Vectorized inline operator<=(const Vectorized& other) const; + Vectorized inline operator==(const Vectorized& other) const; + Vectorized inline operator!=(const Vectorized& other) const; + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template +Vectorized static inline bfloat16_binary_op_as_fp32(const Vectorized& a, const Vectorized& b, Op op) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(b), b_lo, b_hi); + auto o1 = op(a_lo, b_lo); + auto o2 = op(a_hi, b_hi); + return cvtfp32_bf16(o1, o2); +} + +template +Vectorized static inline bfloat16_compare_as_fp32(const Vectorized& a, const Vectorized& b, Op op) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(b), b_lo, b_hi); + auto o1 = op(a_lo, b_lo); + auto o2 = op(a_hi, b_hi); + return merge_compare_result(o1, o2); +} + +Vectorized inline Vectorized::operator>(const Vectorized& other) const { + return bfloat16_compare_as_fp32(*this, other, [](__m256 x, __m256 y) { + return _mm256_cmp_ps(x, y, _CMP_GT_OQ); + }); +} +Vectorized inline Vectorized::operator<(const Vectorized& other) const { + return bfloat16_compare_as_fp32(*this, other, [](__m256 x, __m256 y) { + return _mm256_cmp_ps(x, y, _CMP_LT_OQ); + }); +} +Vectorized inline Vectorized::operator>=(const Vectorized& other) const { + return bfloat16_compare_as_fp32(*this, other, [](__m256 x, __m256 y) { + return _mm256_cmp_ps(x, y, _CMP_GE_OQ); + }); +} +Vectorized inline Vectorized::operator<=(const Vectorized& other) const { + return bfloat16_compare_as_fp32(*this, other, [](__m256 x, __m256 y) { + return _mm256_cmp_ps(x, y, _CMP_LE_OQ); + }); +} +Vectorized inline Vectorized::operator==(const Vectorized& other) const { + return bfloat16_compare_as_fp32(*this, other, [](__m256 x, __m256 y) { + return _mm256_cmp_ps(x, y, _CMP_EQ_OQ); + }); +} +Vectorized inline Vectorized::operator!=(const Vectorized& other) const { + return bfloat16_compare_as_fp32(*this, other, [](__m256 x, __m256 y) { + return _mm256_cmp_ps(x, y, _CMP_NEQ_UQ); + }); +} + +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return bfloat16_binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); }); +} +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return bfloat16_binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); }); +} +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return bfloat16_binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); }); +} +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return bfloat16_binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); }); +} + +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_si256(a, b); +} +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_si256(a, b); +} +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_si256(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(b), b_lo, b_hi); + auto max_lo = _mm256_max_ps(a_lo, b_lo); + auto max_hi = _mm256_max_ps(a_hi, b_hi); + auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm256_or_ps(max_lo, nan_lo); + auto o2 = _mm256_or_ps(max_hi, nan_hi); + return cvtfp32_bf16(o1, o2); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(b), b_lo, b_hi); + auto min_lo = _mm256_min_ps(a_lo, b_lo); + auto min_hi = _mm256_min_ps(a_hi, b_hi); + auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm256_or_ps(min_lo, nan_lo); + auto o2 = _mm256_or_ps(min_hi, nan_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp(const Vectorized& a, + const Vectorized& min, const Vectorized& max) { + __m256 a_lo, a_hi; + __m256 min_lo, min_hi; + __m256 max_lo, max_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(min), min_lo, min_hi); + cvtbf16_fp32(__m256i(max), max_lo, max_hi); + auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo)); + auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi)); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + __m256 a_lo, a_hi; + __m256 max_lo, max_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(max), max_lo, max_hi); + auto o1 = _mm256_min_ps(max_lo, a_lo); + auto o2 = _mm256_min_ps(max_hi, a_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + __m256 a_lo, a_hi; + __m256 min_lo, min_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(min), min_lo, min_hi); + auto o1 = _mm256_max_ps(min_lo, a_lo); + auto o2 = _mm256_max_ps(min_hi, a_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +inline void convert(const float* src, BFloat16* dst, int64_t n) { + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m256 a = _mm256_loadu_ps(&src[i]); + __m256 b = _mm256_loadu_ps(&src[i + 8]); + + __m256i bf = cvtfp32_bf16(a, b); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +inline void convert(const double* src, BFloat16* dst, int64_t n) { + auto load_float = [](const double *src) -> __m256 { + // Load one float vector from an array of doubles + __m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src)); + __m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4)); + return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1); + }; + + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m256 a = load_float(&src[i]); + __m256 b = load_float(&src[i + 8]); + + __m256i bf = cvtfp32_bf16(a, b); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, + const Vectorized& b, const Vectorized& c) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + __m256 c_lo, c_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(b), b_lo, b_hi); + cvtbf16_fp32(__m256i(c), c_lo, c_hi); + auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo); + auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi); + return cvtfp32_bf16(o1, o2); +} + +inline std::tuple, Vectorized> convert_bfloat16_float(const Vectorized& a) { + __m256 o1, o2; + cvtbf16_fp32(__m256i(a), o1, o2); + return std::make_tuple(o1, o2); +} + +inline Vectorized convert_float_bfloat16(const Vectorized& a, const Vectorized& b) { + return cvtfp32_bf16(__m256(a), __m256(b)); +} + + +#else // defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +inline std::tuple, Vectorized> convert_bfloat16_float(const Vectorized& a) { + constexpr int64_t K = Vectorized::size(); + __at_align__ float arr[K]; + __at_align__ BFloat16 arr2[K]; + a.store(arr2); + convert(arr2, arr, K); + return std::make_tuple( + Vectorized::loadu(arr), + Vectorized::loadu(arr + Vectorized::size())); +} + +inline Vectorized convert_float_bfloat16(const Vectorized& a, const Vectorized& b) { + constexpr int64_t K = Vectorized::size(); + __at_align__ float arr[K]; + __at_align__ BFloat16 arr2[K]; + a.store(arr); + b.store(arr + Vectorized::size()); + convert(arr, arr2, K); + return Vectorized::loadu(arr2); +} + +#endif // defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) +inline void load_fp32_from_bf16(const c10::BFloat16 *data, Vectorized& out) { + auto values = _mm_loadu_si128(reinterpret_cast(data)); + __m256 out_values; + cvtbf16_fp32(values, out_values); + out = out_values; +} + +inline void load_fp32_from_bf16(const c10::BFloat16 *data, Vectorized& out1, Vectorized& out2) { + auto vec = Vectorized::loadu(data); + __m256 out1_values, out2_values; + cvtbf16_fp32(vec, out1_values, out2_values); + out1 = out1_values; + out2 = out2_values; +} +#else // defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) +inline void load_fp32_from_bf16(const c10::BFloat16 *data, Vectorized& out) { + __at_align__ float values[Vectorized::size()]; + for (const auto k : c10::irange(Vectorized::size())) { + values[k] = data[k]; + } + out = Vectorized::loadu(values); +} + +inline void load_fp32_from_bf16(const c10::BFloat16 *data, Vectorized& out1, Vectorized& out2) { + load_fp32_from_bf16(data, out1); + data += Vectorized::size(); + load_fp32_from_bf16(data, out2); +} +#endif + +}}} + +#pragma GCC diagnostic pop diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h new file mode 100644 index 0000000000000000000000000000000000000000..487233bc3c4079f32ecbbcb024056fc73a358a60 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h @@ -0,0 +1,445 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +template <> class Vectorized> { +private: + __m256d values; +public: + using value_type = c10::complex; + using size_type = int; + static constexpr size_type size() { + return 2; + } + Vectorized() {} + Vectorized(__m256d v) : values(v) {} + Vectorized(c10::complex val) { + double real_value = val.real(); + double imag_value = val.imag(); + values = _mm256_setr_pd(real_value, imag_value, + real_value, imag_value); + } + Vectorized(c10::complex val1, c10::complex val2) { + values = _mm256_setr_pd(val1.real(), val1.imag(), + val2.real(), val2.imag()); + } + operator __m256d() const { + return values; + } + template + static Vectorized> blend(const Vectorized>& a, const Vectorized>& b) { + // convert c10::complex index mask to V index mask: xy -> xxyy + static_assert (mask > -1 && mask < 4, "Unexpected mask value"); + switch (mask) { + case 0: + return a; + case 1: + return _mm256_blend_pd(a.values, b.values, 0x03); + case 2: + return _mm256_blend_pd(a.values, b.values, 0x0c); + case 3: break; + } + return b; + } + static Vectorized> blendv(const Vectorized>& a, const Vectorized>& b, + const Vectorized>& mask) { + // convert c10::complex index mask to V index mask: xy -> xxyy + auto mask_ = _mm256_unpacklo_pd(mask.values, mask.values); + return _mm256_blendv_pd(a.values, b.values, mask_); + + } + template + static Vectorized> arange(c10::complex base = 0., step_t step = static_cast(1)) { + return Vectorized>(base, + base + step); + } + static Vectorized> set(const Vectorized>& a, const Vectorized>& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + } + return b; + } + static Vectorized> loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm256_loadu_pd(reinterpret_cast(ptr)); + + __at_align__ double tmp_values[2*size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(2*size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(c10::complex)); + return _mm256_load_pd(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm256_storeu_pd(reinterpret_cast(ptr), values); + } else if (count > 0) { + double tmp_values[2*size()]; + _mm256_storeu_pd(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(c10::complex)); + } + } + const c10::complex& operator[](int idx) const = delete; + c10::complex& operator[](int idx) = delete; + Vectorized> map(c10::complex (*const f)(const c10::complex &)) const { + __at_align__ c10::complex tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + __m256d abs_2_() const { + auto val_2 = _mm256_mul_pd(values, values); // a*a b*b + return _mm256_hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b + } + __m256d abs_() const { + return _mm256_sqrt_pd(abs_2_()); // abs abs + } + Vectorized> abs() const { + const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + return _mm256_and_pd(abs_(), real_mask); // abs 0 + } + __m256d angle_() const { + //angle = atan2(b/a) + auto b_a = _mm256_permute_pd(values, 0x05); // b a + return Sleef_atan2d4_u10(values, b_a); // 90-angle angle + } + Vectorized> angle() const { + const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + auto angle = _mm256_permute_pd(angle_(), 0x05); // angle 90-angle + return _mm256_and_pd(angle, real_mask); // angle 0 + } + Vectorized> sgn() const { + auto abs = abs_(); + auto zero = _mm256_setzero_pd(); + auto mask = _mm256_cmp_pd(abs, zero, _CMP_EQ_OQ); + auto abs_val = Vectorized(abs); + + auto div = values / abs_val.values; // x / abs(x) + + return blendv(div, zero, mask); + } + __m256d real_() const { + const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + return _mm256_and_pd(values, real_mask); + } + Vectorized> real() const { + return real_(); + } + __m256d imag_() const { + const __m256d imag_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0x0000000000000000, 0xFFFFFFFFFFFFFFFF, + 0x0000000000000000, 0xFFFFFFFFFFFFFFFF)); + return _mm256_and_pd(values, imag_mask); + } + Vectorized> imag() const { + return _mm256_permute_pd(imag_(), 0x05); //b a + } + __m256d conj_() const { + const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0); + return _mm256_xor_pd(values, sign_mask); // a -b + } + Vectorized> conj() const { + return conj_(); + } + Vectorized> log() const { + // Most trigonomic ops use the log() op to improve complex number performance. + return map(std::log); + } + Vectorized> log2() const { + const __m256d log2_ = _mm256_set1_pd(std::log(2)); + return _mm256_div_pd(log(), log2_); + } + Vectorized> log10() const { + const __m256d log10_ = _mm256_set1_pd(std::log(10)); + return _mm256_div_pd(log(), log10_); + } + Vectorized> log1p() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + const __m256d one = _mm256_set1_pd(1); + + auto conj = conj_(); + auto b_a = _mm256_permute_pd(conj, 0x05); //-b a + auto ab = _mm256_mul_pd(conj, b_a); //-ab -ab + auto im = _mm256_add_pd(ab, ab); //-2ab -2ab + + auto val_2 = _mm256_mul_pd(values, values); // a*a b*b + auto re = _mm256_hsub_pd(val_2, _mm256_permute_pd(val_2, 0x05)); // a*a-b*b b*b-a*a + re = _mm256_sub_pd(one, re); + + auto root = Vectorized(_mm256_blend_pd(re, im, 0x0A)).sqrt(); //sqrt(re + i*im) + auto ln = Vectorized(_mm256_add_pd(b_a, root)).log(); //ln(iz + sqrt()) + return Vectorized(_mm256_permute_pd(ln.values, 0x05)).conj(); //-i*ln() + } + Vectorized> acos() const { + // acos(x) = pi/2 - asin(x) + constexpr auto pi_2d = c10::pi / 2; + const __m256d pi_2 = _mm256_setr_pd(pi_2d, 0.0, pi_2d, 0.0); + return _mm256_sub_pd(pi_2, asin()); + } + Vectorized> atan() const; + Vectorized> atan2(const Vectorized>&) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> erf() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> erfc() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> exp() const { + //exp(a + bi) + // = exp(a)*(cos(b) + sin(b)i) + auto exp = Sleef_expd4_u10(values); //exp(a) exp(b) + exp = _mm256_blend_pd(exp, _mm256_permute_pd(exp, 0x05), 0x0A); //exp(a) exp(a) + + auto sin_cos = Sleef_sincosd4_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)] + auto cos_sin = _mm256_blend_pd(_mm256_permute_pd(sin_cos.y, 0x05), + sin_cos.x, 0x0A); //cos(b) sin(b) + return _mm256_mul_pd(exp, cos_sin); + } + Vectorized> expm1() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> sin() const { + return map(std::sin); + } + Vectorized> sinh() const { + return map(std::sinh); + } + Vectorized> cos() const { + return map(std::cos); + } + Vectorized> cosh() const { + return map(std::cosh); + } + Vectorized> ceil() const { + return _mm256_ceil_pd(values); + } + Vectorized> floor() const { + return _mm256_floor_pd(values); + } + Vectorized> hypot(const Vectorized> &) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> igamma(const Vectorized> &) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> igammac(const Vectorized> &) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> neg() const { + auto zero = _mm256_setzero_pd(); + return _mm256_sub_pd(zero, values); + } + Vectorized> nextafter(const Vectorized> &) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> round() const { + return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized> tan() const { + return map(std::tan); + } + Vectorized> tanh() const { + return map(std::tanh); + } + Vectorized> trunc() const { + return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized> sqrt() const { + return map(std::sqrt); + } + Vectorized> reciprocal() const; + Vectorized> rsqrt() const { + return sqrt().reciprocal(); + } + Vectorized> pow(const Vectorized> &exp) const { + __at_align__ c10::complex x_tmp[size()]; + __at_align__ c10::complex y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized> operator==(const Vectorized>& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ); + } + Vectorized> operator!=(const Vectorized>& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ); + } + Vectorized> operator<(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator<=(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>=(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized> eq(const Vectorized>& other) const; + Vectorized> ne(const Vectorized>& other) const; + Vectorized> lt(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> le(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> gt(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> ge(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } +}; + +template <> Vectorized> inline operator+(const Vectorized> &a, const Vectorized> &b) { + return _mm256_add_pd(a, b); +} + +template <> Vectorized> inline operator-(const Vectorized> &a, const Vectorized> &b) { + return _mm256_sub_pd(a, b); +} + +template <> Vectorized> inline operator*(const Vectorized> &a, const Vectorized> &b) { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i + const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0); + auto ac_bd = _mm256_mul_pd(a, b); //ac bd + + auto d_c = _mm256_permute_pd(b, 0x05); //d c + d_c = _mm256_xor_pd(sign_mask, d_c); //d -c + auto ad_bc = _mm256_mul_pd(a, d_c); //ad -bc + + auto ret = _mm256_hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc + return ret; +} + +template <> Vectorized> inline operator/(const Vectorized> &a, const Vectorized> &b) { + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() + //im = (bc - ad)/abs_2() + const __m256d sign_mask = _mm256_setr_pd(-0.0, 0.0, -0.0, 0.0); + auto ac_bd = _mm256_mul_pd(a, b); //ac bd + + auto d_c = _mm256_permute_pd(b, 0x05); //d c + d_c = _mm256_xor_pd(sign_mask, d_c); //-d c + auto ad_bc = _mm256_mul_pd(a, d_c); //-ad bc + + auto re_im = _mm256_hadd_pd(ac_bd, ad_bc);//ac + bd bc - ad + return _mm256_div_pd(re_im, b.abs_2_()); +} + +// reciprocal. Implement this here so we can use multiplication. +inline Vectorized> Vectorized>::reciprocal() const{ + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() = c/abs_2() + //im = (bc - ad)/abs_2() = d/abs_2() + const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0); + auto c_d = _mm256_xor_pd(sign_mask, values); //c -d + return _mm256_div_pd(c_d, abs_2_()); +} + +inline Vectorized> Vectorized>::atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + const __m256d i = _mm256_setr_pd(0.0, 1.0, 0.0, 1.0); + const Vectorized i_half = _mm256_setr_pd(0.0, 0.5, 0.0, 0.5); + + auto sum = Vectorized(_mm256_add_pd(i, values)); // a 1+b + auto sub = Vectorized(_mm256_sub_pd(i, values)); // -a 1-b + auto ln = (sum/sub).log(); // ln((i + z)/(i - z)) + return i_half*ln; // i/2*ln() +} + +template <> +Vectorized> inline maximum(const Vectorized>& a, const Vectorized>& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_LT_OQ); + auto max = _mm256_blendv_pd(a, b, mask); + // Exploit the fact that all-ones is a NaN. + auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q); + return _mm256_or_pd(max, isnan); +} + +template <> +Vectorized> inline minimum(const Vectorized>& a, const Vectorized>& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_GT_OQ); + auto min = _mm256_blendv_pd(a, b, mask); + // Exploit the fact that all-ones is a NaN. + auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q); + return _mm256_or_pd(min, isnan); +} + +template <> +Vectorized> inline operator&(const Vectorized>& a, const Vectorized>& b) { + return _mm256_and_pd(a, b); +} + +template <> +Vectorized> inline operator|(const Vectorized>& a, const Vectorized>& b) { + return _mm256_or_pd(a, b); +} + +template <> +Vectorized> inline operator^(const Vectorized>& a, const Vectorized>& b) { + return _mm256_xor_pd(a, b); +} + +inline Vectorized> Vectorized>::eq(const Vectorized>& other) const { + return (*this == other) & Vectorized>(_mm256_set1_pd(1.0)); +} + +inline Vectorized> Vectorized>::ne(const Vectorized>& other) const { + return (*this != other) & Vectorized>(_mm256_set1_pd(1.0)); +} + +#endif + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h new file mode 100644 index 0000000000000000000000000000000000000000..4093022a7e34971150042755d150bcafaed32040 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h @@ -0,0 +1,483 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +template <> class Vectorized> { +private: + __m256 values; +public: + using value_type = c10::complex; + using size_type = int; + static constexpr size_type size() { + return 4; + } + Vectorized() {} + Vectorized(__m256 v) : values(v) {} + Vectorized(c10::complex val) { + float real_value = val.real(); + float imag_value = val.imag(); + values = _mm256_setr_ps(real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value + ); + } + Vectorized(c10::complex val1, c10::complex val2, c10::complex val3, c10::complex val4) { + values = _mm256_setr_ps(val1.real(), val1.imag(), + val2.real(), val2.imag(), + val3.real(), val3.imag(), + val4.real(), val4.imag() + ); + } + operator __m256() const { + return values; + } + template + static Vectorized> blend(const Vectorized>& a, const Vectorized>& b) { + // convert c10::complex index mask to V index mask: xy -> xxyy + static_assert(mask > -1 && mask < 16, "Unexpected mask range"); + switch (mask) { + case 0: + return a; + case 1: + return _mm256_blend_ps(a.values, b.values, 0x03); //b0000 0001 = b0000 0011 + case 2: + return _mm256_blend_ps(a.values, b.values, 0x0C); //b0000 0010 = b0000 1100 + case 3: + return _mm256_blend_ps(a.values, b.values, 0x0F); //b0000 0011 = b0000 1111 + case 4: + return _mm256_blend_ps(a.values, b.values, 0x30); //b0000 0100 = b0011 0000 + case 5: + return _mm256_blend_ps(a.values, b.values, 0x33); //b0000 0101 = b0011 0011 + case 6: + return _mm256_blend_ps(a.values, b.values, 0x3C); //b0000 0110 = b0011 1100 + case 7: + return _mm256_blend_ps(a.values, b.values, 0x3F); //b0000 0111 = b0011 1111 + case 8: + return _mm256_blend_ps(a.values, b.values, 0xC0); //b0000 1000 = b1100 0000 + case 9: + return _mm256_blend_ps(a.values, b.values, 0xC3); //b0000 1001 = b1100 0011 + case 10: + return _mm256_blend_ps(a.values, b.values, 0xCC); //b0000 1010 = b1100 1100 + case 11: + return _mm256_blend_ps(a.values, b.values, 0xCF); //b0000 1011 = b1100 1111 + case 12: + return _mm256_blend_ps(a.values, b.values, 0xF0); //b0000 1100 = b1111 0000 + case 13: + return _mm256_blend_ps(a.values, b.values, 0xF3); //b0000 1101 = b1111 0011 + case 14: + return _mm256_blend_ps(a.values, b.values, 0xFC); //b0000 1110 = b1111 1100 + default: break; + } + return b; + } + static Vectorized> blendv(const Vectorized>& a, const Vectorized>& b, + const Vectorized>& mask) { + // convert c10::complex index mask to V index mask: xy -> xxyy + auto mask_ = _mm256_unpacklo_ps(mask.values, mask.values); + return _mm256_blendv_ps(a.values, b.values, mask_); + + } + template + static Vectorized> arange(c10::complex base = 0., step_t step = static_cast(1)) { + return Vectorized>(base, + base + step, + base + c10::complex(2)*step, + base + c10::complex(3)*step); + } + static Vectorized> set(const Vectorized>& a, const Vectorized>& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + return b; + } + static Vectorized> loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm256_loadu_ps(reinterpret_cast(ptr)); + + __at_align__ float tmp_values[2*size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(2*size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(c10::complex)); + return _mm256_load_ps(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm256_storeu_ps(reinterpret_cast(ptr), values); + } else if (count > 0) { + float tmp_values[2*size()]; + _mm256_storeu_ps(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(c10::complex)); + } + } + const c10::complex& operator[](int idx) const = delete; + c10::complex& operator[](int idx) = delete; + Vectorized> map(c10::complex (*const f)(const c10::complex &)) const { + __at_align__ c10::complex tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + __m256 abs_2_() const { + auto val_2 = _mm256_mul_ps(values, values); // a*a b*b + auto ret = _mm256_hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b + return _mm256_permute_ps(ret, 0xD8); + } + __m256 abs_() const { + return _mm256_sqrt_ps(abs_2_()); // abs abs + } + Vectorized> abs() const { + const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + return _mm256_and_ps(abs_(), real_mask); // abs 0 + } + __m256 angle_() const { + //angle = atan2(b/a) + auto b_a = _mm256_permute_ps(values, 0xB1); // b a + return Sleef_atan2f8_u10(values, b_a); // 90-angle angle + } + Vectorized> angle() const { + const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + auto angle = _mm256_permute_ps(angle_(), 0xB1); // angle 90-angle + return _mm256_and_ps(angle, real_mask); // angle 0 + } + Vectorized> sgn() const { + auto abs = abs_(); + auto zero = _mm256_setzero_ps(); + auto mask = _mm256_cmp_ps(abs, zero, _CMP_EQ_OQ); + auto abs_val = Vectorized(abs); + + auto div = values / abs_val.values; // x / abs(x) + + return _mm256_blendv_ps(div, zero, mask); + } + __m256 real_() const { + const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + return _mm256_and_ps(values, real_mask); + } + Vectorized> real() const { + return real_(); + } + __m256 imag_() const { + const __m256 imag_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF)); + return _mm256_and_ps(values, imag_mask); + } + Vectorized> imag() const { + return _mm256_permute_ps(imag_(), 0xB1); //b a + } + __m256 conj_() const { + const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + return _mm256_xor_ps(values, sign_mask); // a -b + } + Vectorized> conj() const { + return conj_(); + } + Vectorized> log() const { + // Most trigonomic ops use the log() op to improve complex number performance. + return map(std::log); + } + Vectorized> log2() const { + const __m256 log2_ = _mm256_set1_ps(std::log(2)); + return _mm256_div_ps(log(), log2_); + } + Vectorized> log10() const { + const __m256 log10_ = _mm256_set1_ps(std::log(10)); + return _mm256_div_ps(log(), log10_); + } + Vectorized> log1p() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + const __m256 one = _mm256_set1_ps(1); + + auto conj = conj_(); + auto b_a = _mm256_permute_ps(conj, 0xB1); //-b a + auto ab = _mm256_mul_ps(conj, b_a); //-ab -ab + auto im = _mm256_add_ps(ab, ab); //-2ab -2ab + + auto val_2 = _mm256_mul_ps(values, values); // a*a b*b + auto re = _mm256_hsub_ps(val_2, _mm256_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a + re = _mm256_permute_ps(re, 0xD8); + re = _mm256_sub_ps(one, re); + + auto root = Vectorized(_mm256_blend_ps(re, im, 0xAA)).sqrt(); //sqrt(re + i*im) + auto ln = Vectorized(_mm256_add_ps(b_a, root)).log(); //ln(iz + sqrt()) + return Vectorized(_mm256_permute_ps(ln.values, 0xB1)).conj(); //-i*ln() + } + Vectorized> acos() const { + return map(std::acos); + } + Vectorized> atan() const; + Vectorized> atan2(const Vectorized>& /*b*/) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> erf() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> erfc() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> exp() const { + //exp(a + bi) + // = exp(a)*(cos(b) + sin(b)i) + auto exp = Sleef_expf8_u10(values); //exp(a) exp(b) + exp = _mm256_blend_ps(exp, _mm256_permute_ps(exp, 0xB1), 0xAA); //exp(a) exp(a) + + auto sin_cos = Sleef_sincosf8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)] + auto cos_sin = _mm256_blend_ps(_mm256_permute_ps(sin_cos.y, 0xB1), + sin_cos.x, 0xAA); //cos(b) sin(b) + return _mm256_mul_ps(exp, cos_sin); + } + Vectorized> expm1() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> sin() const { + return map(std::sin); + } + Vectorized> sinh() const { + return map(std::sinh); + } + Vectorized> cos() const { + return map(std::cos); + } + Vectorized> cosh() const { + return map(std::cosh); + } + Vectorized> ceil() const { + return _mm256_ceil_ps(values); + } + Vectorized> floor() const { + return _mm256_floor_ps(values); + } + Vectorized> hypot(const Vectorized>& /*b*/) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> igamma(const Vectorized>& /*x*/) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> igammac(const Vectorized>& /*x*/) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> neg() const { + auto zero = _mm256_setzero_ps(); + return _mm256_sub_ps(zero, values); + } + Vectorized> nextafter(const Vectorized>& /*b*/) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> round() const { + return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized> tan() const { + return map(std::tan); + } + Vectorized> tanh() const { + return map(std::tanh); + } + Vectorized> trunc() const { + return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized> sqrt() const { + return map(std::sqrt); + } + Vectorized> reciprocal() const; + Vectorized> rsqrt() const { + return sqrt().reciprocal(); + } + Vectorized> pow(const Vectorized> &exp) const { + __at_align__ c10::complex x_tmp[size()]; + __at_align__ c10::complex y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized> operator==(const Vectorized>& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ); + } + Vectorized> operator!=(const Vectorized>& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ); + } + Vectorized> operator<(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator<=(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>=(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized> eq(const Vectorized>& other) const; + Vectorized> ne(const Vectorized>& other) const; + Vectorized> lt(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> le(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> gt(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> ge(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } +}; + +template <> Vectorized> inline operator+(const Vectorized> &a, const Vectorized> &b) { + return _mm256_add_ps(a, b); +} + +template <> Vectorized> inline operator-(const Vectorized> &a, const Vectorized> &b) { + return _mm256_sub_ps(a, b); +} + +template <> Vectorized> inline operator*(const Vectorized> &a, const Vectorized> &b) { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i + const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto ac_bd = _mm256_mul_ps(a, b); //ac bd + + auto d_c = _mm256_permute_ps(b, 0xB1); //d c + d_c = _mm256_xor_ps(sign_mask, d_c); //d -c + auto ad_bc = _mm256_mul_ps(a, d_c); //ad -bc + + auto ret = _mm256_hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc + ret = _mm256_permute_ps(ret, 0xD8); + return ret; +} + +template <> Vectorized> inline operator/(const Vectorized> &a, const Vectorized> &b) { + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() + //im = (bc - ad)/abs_2() + const __m256 sign_mask = _mm256_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0); + auto ac_bd = _mm256_mul_ps(a, b); //ac bd + + auto d_c = _mm256_permute_ps(b, 0xB1); //d c + d_c = _mm256_xor_ps(sign_mask, d_c); //-d c + auto ad_bc = _mm256_mul_ps(a, d_c); //-ad bc + + auto re_im = _mm256_hadd_ps(ac_bd, ad_bc);//ac + bd bc - ad + re_im = _mm256_permute_ps(re_im, 0xD8); + return _mm256_div_ps(re_im, b.abs_2_()); +} + +// reciprocal. Implement this here so we can use multiplication. +inline Vectorized> Vectorized>::reciprocal() const { + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() = c/abs_2() + //im = (bc - ad)/abs_2() = d/abs_2() + const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto c_d = _mm256_xor_ps(sign_mask, values); //c -d + return _mm256_div_ps(c_d, abs_2_()); +} + +inline Vectorized> Vectorized>::atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + const __m256 i = _mm256_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + const Vectorized i_half = _mm256_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + + auto sum = Vectorized(_mm256_add_ps(i, values)); // a 1+b + auto sub = Vectorized(_mm256_sub_ps(i, values)); // -a 1-b + auto ln = (sum/sub).log(); // ln((i + z)/(i - z)) + return i_half*ln; // i/2*ln() +} + +template <> +Vectorized> inline maximum(const Vectorized>& a, const Vectorized>& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ); + auto max = _mm256_blendv_ps(a, b, mask); + // Exploit the fact that all-ones is a NaN. + auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); + return _mm256_or_ps(max, isnan); +} + +template <> +Vectorized> inline minimum(const Vectorized>& a, const Vectorized>& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ); + auto min = _mm256_blendv_ps(a, b, mask); + // Exploit the fact that all-ones is a NaN. + auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); + return _mm256_or_ps(min, isnan); +} + +template <> +Vectorized> inline operator&(const Vectorized>& a, const Vectorized>& b) { + return _mm256_and_ps(a, b); +} + +template <> +Vectorized> inline operator|(const Vectorized>& a, const Vectorized>& b) { + return _mm256_or_ps(a, b); +} + +template <> +Vectorized> inline operator^(const Vectorized>& a, const Vectorized>& b) { + return _mm256_xor_ps(a, b); +} + +inline Vectorized> Vectorized>::eq( + const Vectorized>& other) const { + return (*this == other) & Vectorized>(_mm256_set1_ps(1.0f)); +} + +inline Vectorized> Vectorized>::ne( + const Vectorized>& other) const { + return (*this != other) & Vectorized>(_mm256_set1_ps(1.0f)); +} + +#endif + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_double.h b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_double.h new file mode 100644 index 0000000000000000000000000000000000000000..138daf3f588af729711a4eb6a90a9704b1e22c51 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_double.h @@ -0,0 +1,419 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +template <> class Vectorized { +private: + __m256d values; +public: + using value_type = double; + using size_type = int; + static constexpr size_type size() { + return 4; + } + Vectorized() {} + Vectorized(__m256d v) : values(v) {} + Vectorized(double val) { + values = _mm256_set1_pd(val); + } + Vectorized(double val1, double val2, double val3, double val4) { + values = _mm256_setr_pd(val1, val2, val3, val4); + } + operator __m256d() const { + return values; + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + return _mm256_blend_pd(a.values, b.values, mask); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_pd(a.values, b.values, mask.values); + } + template + static Vectorized arange(double base = 0., step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step); + } + static Vectorized set(const Vectorized& a, const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm256_loadu_pd(reinterpret_cast(ptr)); + + + __at_align__ double tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(double)); + return _mm256_load_pd(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm256_storeu_pd(reinterpret_cast(ptr), values); + } else if (count > 0) { + double tmp_values[size()]; + _mm256_storeu_pd(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(double)); + } + } + const double& operator[](int idx) const = delete; + double& operator[](int idx) = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __m256d cmp = _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_EQ_OQ); + return _mm256_movemask_pd(cmp); + } + Vectorized isnan() const { + return _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_UNORD_Q); + } + Vectorized map(double (*const f)(double)) const { + __at_align__ double tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + auto mask = _mm256_set1_pd(-0.f); + return _mm256_andnot_pd(mask, values); + } + Vectorized angle() const { + const auto zero_vec = _mm256_set1_pd(0.f); + const auto nan_vec = _mm256_set1_pd(NAN); + const auto not_nan_mask = _mm256_cmp_pd(values, values, _CMP_EQ_OQ); + const auto nan_mask = _mm256_cmp_pd(not_nan_mask, zero_vec, _CMP_EQ_OQ); + const auto pi = _mm256_set1_pd(c10::pi); + + const auto neg_mask = _mm256_cmp_pd(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm256_blendv_pd(zero_vec, pi, neg_mask); + angle = _mm256_blendv_pd(angle, nan_vec, nan_mask); + return angle; + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_pd(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return Vectorized(Sleef_acosd4_u10(values)); + } + Vectorized asin() const { + return Vectorized(Sleef_asind4_u10(values)); + } + Vectorized atan() const { + return Vectorized(Sleef_atand4_u10(values)); + } + Vectorized atan2(const Vectorized &b) const { + return Vectorized(Sleef_atan2d4_u10(values, b)); + } + Vectorized copysign(const Vectorized &sign) const { + return Vectorized(Sleef_copysignd4(values, sign)); + } + Vectorized erf() const { + return Vectorized(Sleef_erfd4_u10(values)); + } + Vectorized erfc() const { + return Vectorized(Sleef_erfcd4_u15(values)); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return Vectorized(Sleef_expd4_u10(values)); + } + Vectorized expm1() const { + return Vectorized(Sleef_expm1d4_u10(values)); + } + Vectorized fmod(const Vectorized& q) const { + return Vectorized(Sleef_fmodd4(values, q)); + } + Vectorized hypot(const Vectorized &b) const { + return Vectorized(Sleef_hypotd4_u05(values, b)); + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized igamma(const Vectorized &x) const { + __at_align__ double tmp[size()]; + __at_align__ double tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igamma(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized igammac(const Vectorized &x) const { + __at_align__ double tmp[size()]; + __at_align__ double tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igammac(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized log() const { + return Vectorized(Sleef_logd4_u10(values)); + } + Vectorized log2() const { + return Vectorized(Sleef_log2d4_u10(values)); + } + Vectorized log10() const { + return Vectorized(Sleef_log10d4_u10(values)); + } + Vectorized log1p() const { + return Vectorized(Sleef_log1pd4_u10(values)); + } + Vectorized sin() const { + return Vectorized(Sleef_sind4_u10(values)); + } + Vectorized sinh() const { + return Vectorized(Sleef_sinhd4_u10(values)); + } + Vectorized cos() const { + return Vectorized(Sleef_cosd4_u10(values)); + } + Vectorized cosh() const { + return Vectorized(Sleef_coshd4_u10(values)); + } + Vectorized ceil() const { + return _mm256_ceil_pd(values); + } + Vectorized floor() const { + return _mm256_floor_pd(values); + } + Vectorized frac() const; + Vectorized neg() const { + return _mm256_xor_pd(_mm256_set1_pd(-0.), values); + } + Vectorized nextafter(const Vectorized &b) const { + return Vectorized(Sleef_nextafterd4(values, b)); + } + Vectorized round() const { + return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized tan() const { + return Vectorized(Sleef_tand4_u10(values)); + } + Vectorized tanh() const { + return Vectorized(Sleef_tanhd4_u10(values)); + } + Vectorized trunc() const { + return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized lgamma() const { + return Vectorized(Sleef_lgammad4_u10(values)); + } + Vectorized sqrt() const { + return _mm256_sqrt_pd(values); + } + Vectorized reciprocal() const { + return _mm256_div_pd(_mm256_set1_pd(1), values); + } + Vectorized rsqrt() const { + return _mm256_div_pd(_mm256_set1_pd(1), _mm256_sqrt_pd(values)); + } + Vectorized pow(const Vectorized &b) const { + return Vectorized(Sleef_powd4_u10(values, b)); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ); + } + + Vectorized operator!=(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ); + } + + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_LT_OQ); + } + + Vectorized operator<=(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_LE_OQ); + } + + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_GT_OQ); + } + + Vectorized operator>=(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_GE_OQ); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_pd(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_pd(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm256_mul_pd(a, b); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return _mm256_div_pd(a, b); +} + +// frac. Implement this here so we can use subtraction. +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + Vectorized max = _mm256_max_pd(a, b); + Vectorized isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + return _mm256_or_pd(max, isnan); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + Vectorized min = _mm256_min_pd(a, b); + Vectorized isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + return _mm256_or_pd(min, isnan); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { + return _mm256_min_pd(max, _mm256_max_pd(min, a)); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + return _mm256_max_pd(min, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + return _mm256_min_pd(max, a); +} + +template <> +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_pd(a, b); +} + +template <> +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_pd(a, b); +} + +template <> +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_pd(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0); +} + +template <> +inline void convert(const double* src, double* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + _mm256_storeu_pd(dst + i, _mm256_loadu_pd(src + i)); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +#ifdef CPU_CAPABILITY_AVX2 +template <> +Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm256_fmadd_pd(a, b, c); +} +#endif + +#endif + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_float.h b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_float.h new file mode 100644 index 0000000000000000000000000000000000000000..6981676c92c8cc85d05f0a9e2ea9c7f7c1075366 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_float.h @@ -0,0 +1,424 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +template <> class Vectorized { +private: + __m256 values; +public: + using value_type = float; + using size_type = int; + static constexpr size_type size() { + return 8; + } + Vectorized() {} + Vectorized(__m256 v) : values(v) {} + Vectorized(float val) { + values = _mm256_set1_ps(val); + } + Vectorized(float val1, float val2, float val3, float val4, + float val5, float val6, float val7, float val8) { + values = _mm256_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8); + } + operator __m256() const { + return values; + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + return _mm256_blend_ps(a.values, b.values, mask); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_ps(a.values, b.values, mask.values); + } + template + static Vectorized arange(float base = 0.f, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step); + } + static Vectorized set(const Vectorized& a, const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm256_loadu_ps(reinterpret_cast(ptr)); + __at_align__ float tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(float)); + return _mm256_loadu_ps(tmp_values); + } + void store(void* ptr, int64_t count = size()) const { + if (count == size()) { + _mm256_storeu_ps(reinterpret_cast(ptr), values); + } else if (count > 0) { + float tmp_values[size()]; + _mm256_storeu_ps(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(float)); + } + } + const float& operator[](int idx) const = delete; + float& operator[](int idx) = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ); + return _mm256_movemask_ps(cmp); + } + Vectorized isnan() const { + return _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_UNORD_Q); + } + Vectorized map(float (*const f)(float)) const { + __at_align__ float tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + auto mask = _mm256_set1_ps(-0.f); + return _mm256_andnot_ps(mask, values); + } + Vectorized angle() const { + const auto zero_vec = _mm256_set1_ps(0.f); + const auto nan_vec = _mm256_set1_ps(NAN); + const auto not_nan_mask = _mm256_cmp_ps(values, values, _CMP_EQ_OQ); + const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ); + const auto pi = _mm256_set1_ps(c10::pi); + + const auto neg_mask = _mm256_cmp_ps(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask); + angle = _mm256_blendv_ps(angle, nan_vec, nan_mask); + return angle; + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_ps(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return Vectorized(Sleef_acosf8_u10(values)); + } + Vectorized asin() const { + return Vectorized(Sleef_asinf8_u10(values)); + } + Vectorized atan() const { + return Vectorized(Sleef_atanf8_u10(values)); + } + Vectorized atan2(const Vectorized &b) const { + return Vectorized(Sleef_atan2f8_u10(values, b)); + } + Vectorized copysign(const Vectorized &sign) const { + return Vectorized(Sleef_copysignf8(values, sign)); + } + Vectorized erf() const { + return Vectorized(Sleef_erff8_u10(values)); + } + Vectorized erfc() const { + return Vectorized(Sleef_erfcf8_u15(values)); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return Vectorized(Sleef_expf8_u10(values)); + } + Vectorized expm1() const { + return Vectorized(Sleef_expm1f8_u10(values)); + } + Vectorized fmod(const Vectorized& q) const { + return Vectorized(Sleef_fmodf8(values, q)); + } + Vectorized log() const { + return Vectorized(Sleef_logf8_u10(values)); + } + Vectorized log2() const { + return Vectorized(Sleef_log2f8_u10(values)); + } + Vectorized log10() const { + return Vectorized(Sleef_log10f8_u10(values)); + } + Vectorized log1p() const { + return Vectorized(Sleef_log1pf8_u10(values)); + } + Vectorized frac() const; + Vectorized sin() const { + return Vectorized(Sleef_sinf8_u10(values)); + } + Vectorized sinh() const { + return Vectorized(Sleef_sinhf8_u10(values)); + } + Vectorized cos() const { + return Vectorized(Sleef_cosf8_u10(values)); + } + Vectorized cosh() const { + return Vectorized(Sleef_coshf8_u10(values)); + } + Vectorized ceil() const { + return _mm256_ceil_ps(values); + } + Vectorized floor() const { + return _mm256_floor_ps(values); + } + Vectorized hypot(const Vectorized &b) const { + return Vectorized(Sleef_hypotf8_u05(values, b)); + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized igamma(const Vectorized &x) const { + __at_align__ float tmp[size()]; + __at_align__ float tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igamma(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized igammac(const Vectorized &x) const { + __at_align__ float tmp[size()]; + __at_align__ float tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igammac(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized neg() const { + return _mm256_xor_ps(_mm256_set1_ps(-0.f), values); + } + Vectorized nextafter(const Vectorized &b) const { + return Vectorized(Sleef_nextafterf8(values, b)); + } + Vectorized round() const { + return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized tan() const { + return Vectorized(Sleef_tanf8_u10(values)); + } + Vectorized tanh() const { + return Vectorized(Sleef_tanhf8_u10(values)); + } + Vectorized trunc() const { + return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized lgamma() const { + return Vectorized(Sleef_lgammaf8_u10(values)); + } + Vectorized sqrt() const { + return _mm256_sqrt_ps(values); + } + Vectorized reciprocal() const { + return _mm256_div_ps(_mm256_set1_ps(1), values); + } + Vectorized rsqrt() const { + return _mm256_div_ps(_mm256_set1_ps(1), _mm256_sqrt_ps(values)); + } + Vectorized pow(const Vectorized &b) const { + return Vectorized(Sleef_powf8_u10(values, b)); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ); + } + + Vectorized operator!=(const Vectorized& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ); + } + + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_LT_OQ); + } + + Vectorized operator<=(const Vectorized& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_LE_OQ); + } + + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_GT_OQ); + } + + Vectorized operator>=(const Vectorized& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_GE_OQ); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_ps(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_ps(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm256_mul_ps(a, b); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return _mm256_div_ps(a, b); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + Vectorized max = _mm256_max_ps(a, b); + Vectorized isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + return _mm256_or_ps(max, isnan); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + Vectorized min = _mm256_min_ps(a, b); + Vectorized isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + return _mm256_or_ps(min, isnan); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { + return _mm256_min_ps(max, _mm256_max_ps(min, a)); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + return _mm256_min_ps(max, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + return _mm256_max_ps(min, a); +} + +template <> +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_ps(a, b); +} + +template <> +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_ps(a, b); +} + +template <> +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_ps(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +template <> +inline void convert(const float* src, float* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + _mm256_storeu_ps(dst + i, _mm256_loadu_ps(src + i)); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + + +template <> +Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm256_fmadd_ps(a, b, c); +} + +#endif + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_float_neon.h b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_float_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..cbd349083636b8ec570eda079f1a0c636ab07d90 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_float_neon.h @@ -0,0 +1,832 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include + +#if defined(__aarch64__) && defined(AT_BUILD_ARM_VEC256_WITH_SLEEF) +#include +#endif + +// Sleef offers vectorized versions of some transcedentals +// such as sin, cos, tan etc.. +// However for now opting for STL, since we are not building +// with Sleef for mobile yet. + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +// Right now contains only aarch64 implementation. +// Due to follow two reasons aarch32 is not currently supported. +// 1. Due to difference in ISA been aarch32 and aarch64, intrinsics +// that work for aarch64 dont work for aarch32. +// 2. Android NDK r21 has problems with compiling aarch32. +// Clang seg faults. +// https://github.com/android/ndk/issues/1248 +// https://bugs.llvm.org/show_bug.cgi?id=45824 +// Most likely we will do aarch32 support with inline asm. +#if defined(__aarch64__) + +#ifdef __BIG_ENDIAN__ +#error "Big endian is not supported." +#endif + +#if defined(AT_BUILD_ARM_VEC256_WITH_SLEEF) +#define USE_SLEEF(sleef_code, non_sleef_code) sleef_code +#else +#define USE_SLEEF(sleef_code, non_sleef_code) non_sleef_code +#endif + +template +struct BlendRegs { + static float32x4_t impl( + const float32x4_t& a, const float32x4_t& b, float32x4_t& res); +}; + +template +struct BlendRegs{ + static float32x4_t impl( + const float32x4_t& a, const float32x4_t& b, float32x4_t& res) { + return vsetq_lane_f32(vgetq_lane_f32(b, index), res, index); + } +}; + +template +struct BlendRegs{ + static float32x4_t impl( + const float32x4_t& a, const float32x4_t& b, float32x4_t& res) { + return vsetq_lane_f32(vgetq_lane_f32(a, index), res, index); + } +}; + +template <> class Vectorized { +private: + float32x4x2_t values; +public: + using value_type = float; + using size_type = int; + static constexpr size_type size() { + return 8; + } + Vectorized() {} + Vectorized(float32x4x2_t v) : values(v) {} + Vectorized(float val) : values{vdupq_n_f32(val), vdupq_n_f32(val) } {} + Vectorized(float val0, float val1, float val2, float val3, + float val4, float val5, float val6, float val7) : + values{val0, val1, val2, val3, val4, val5, val6, val7} {} + Vectorized(float32x4_t val0, float32x4_t val1) : values{val0, val1} {} + operator float32x4x2_t() const { + return values; + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + Vectorized vec; + // 0. + vec.values.val[0] = + BlendRegs<0, (mask & 0x01)!=0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = + BlendRegs<1, (mask & 0x02)!=0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = + BlendRegs<2, (mask & 0x04)!=0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = + BlendRegs<3, (mask & 0x08)!=0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + // 1. + vec.values.val[1] = + BlendRegs<0, (mask & 0x10)!=0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = + BlendRegs<1, (mask & 0x20)!=0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = + BlendRegs<2, (mask & 0x40)!=0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = + BlendRegs<3, (mask & 0x80)!=0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + return vec; + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + // TODO + // NB: This requires that each value, i.e., each uint value, + // of the mask either all be zeros or all be 1s. + // We perhaps need some kind of an assert? + // But that will affect performance. + Vectorized vec(mask.values); + vec.values.val[0] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[0]), + b.values.val[0], + a.values.val[0]); + vec.values.val[1] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[1]), + b.values.val[1], + a.values.val[1]); + return vec; + } + template + static Vectorized arange(float base = 0.f, step_t step = static_cast(1)) { + const Vectorized base_vec(base); + const Vectorized step_vec(step); + const Vectorized step_sizes(0, 1, 2, 3, 4, 5, 6, 7); + return fmadd(step_sizes, step_vec, base_vec); + } + static Vectorized set(const Vectorized& a, const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + { + Vectorized vec; + static uint32x4_t mask_low = {0xFFFFFFFF, 0x0, 0x0, 0x0}; + vec.values.val[0] = vreinterpretq_f32_u32(mask_low); + vec.values.val[1] = a.values.val[1]; + vec.values.val[0] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[0]), + b.values.val[0], + a.values.val[0]); + return vec; + } + case 2: + { + Vectorized vec; + static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0}; + vec.values.val[0] = vreinterpretq_f32_u32(mask_low); + vec.values.val[1] = a.values.val[1]; + vec.values.val[0] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[0]), + b.values.val[0], + a.values.val[0]); + return vec; + } + case 3: + { + Vectorized vec; + static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0}; + vec.values.val[0] = vreinterpretq_f32_u32(mask_low); + vec.values.val[1] = a.values.val[1]; + vec.values.val[0] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[0]), + b.values.val[0], + a.values.val[0]); + return vec; + } + case 4: + return Vectorized(b.values.val[0], a.values.val[1]); + case 5: + { + Vectorized vec; + static uint32x4_t mask_high = {0xFFFFFFFF, 0x0, 0x0, 0x0}; + vec.values.val[0] = b.values.val[0]; + vec.values.val[1] = vreinterpretq_f32_u32(mask_high); + vec.values.val[1] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[1]), + b.values.val[1], + a.values.val[1]); + return vec; + } + case 6: + { + Vectorized vec; + static uint32x4_t mask_high = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0}; + vec.values.val[0] = b.values.val[0]; + vec.values.val[1] = vreinterpretq_f32_u32(mask_high); + vec.values.val[1] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[1]), + b.values.val[1], + a.values.val[1]); + return vec; + } + case 7: + { + Vectorized vec; + static uint32x4_t mask_high = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0}; + vec.values.val[0] = b.values.val[0]; + vec.values.val[1] = vreinterpretq_f32_u32(mask_high); + vec.values.val[1] = vbslq_f32( + vreinterpretq_u32_f32(vec.values.val[1]), + b.values.val[1], + a.values.val[1]); + return vec; + } + } + return b; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) { + return vld1q_f32_x2(reinterpret_cast(ptr)); + } + else if (count == (size() >> 1)) { + Vectorized res; + res.values.val[0] = vld1q_f32(reinterpret_cast(ptr)); + res.values.val[1] = vdupq_n_f32(0.f); + return res; + } + else { + __at_align__ float tmp_values[size()]; + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(float)); + return vld1q_f32_x2(reinterpret_cast(tmp_values)); + } + } + void store(void* ptr, int64_t count = size()) const { + if (count == size()) { + vst1q_f32_x2(reinterpret_cast(ptr), values); + } + else if (count == (size() >> 1)) { + vst1q_f32(reinterpret_cast(ptr), values.val[0]); + } + else { + float tmp_values[size()]; + vst1q_f32_x2(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(float)); + } + } + inline const float32x4_t& get_low() const { + return values.val[0]; + } + inline float32x4_t& get_low() { + return values.val[0]; + } + inline const float32x4_t& get_high() const { + return values.val[1]; + } + inline float32x4_t& get_high() { + return values.val[1]; + } + // Very slow implementation of indexing. + // Only required because vec256_qint refers to this. + // Once we specialize that implementation for ARM + // this should be removed. TODO (kimishpatel) + float operator[](int idx) const { + __at_align__ float tmp[size()]; + store(tmp); + return tmp[idx]; + } + float operator[](int idx) { + __at_align__ float tmp[size()]; + store(tmp); + return tmp[idx]; + } + // For boolean version where we want to if any 1/all zero + // etc. can be done faster in a different way. + int zero_mask() const { + __at_align__ float tmp[size()]; + store(tmp); + int mask = 0; + for (int i = 0; i < size(); ++ i) { + if (tmp[i] == 0.f) { + mask |= (1 << i); + } + } + return mask; + } + Vectorized isnan() const { + __at_align__ float tmp[size()]; + __at_align__ float res[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + if (_isnan(tmp[i])) { + std::memset(static_cast(&res[i]), 0xFF, sizeof(float)); + } else { + std::memset(static_cast(&res[i]), 0, sizeof(float)); + } + } + return loadu(res); + }; + Vectorized map(float (*const f)(float)) const { + __at_align__ float tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + return Vectorized(vabsq_f32(values.val[0]), vabsq_f32(values.val[1])); + } + Vectorized angle() const { + auto zero = Vectorized(0); + auto pi = Vectorized(c10::pi); + auto tmp = blendv(zero, pi, *this < zero); + return blendv(tmp, *this, isnan()); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return Vectorized(0.f); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return USE_SLEEF( + Vectorized(Sleef_acosf4_u10(values.val[0]), Sleef_acosf4_u10(values.val[1])), + map(std::acos) + ); + } + Vectorized asin() const { + return USE_SLEEF( + Vectorized(Sleef_asinf4_u10(values.val[0]), Sleef_asinf4_u10(values.val[1])), + map(std::asin) + ); + } + Vectorized atan() const { + return USE_SLEEF( + Vectorized(Sleef_atanf4_u10(values.val[0]), Sleef_atanf4_u10(values.val[1])), + map(std::atan) + ); + } + Vectorized atan2(const Vectorized &exp) const { + USE_SLEEF( + { + return Vectorized(Sleef_atan2f4_u10(values.val[0], exp.values.val[0]), + Sleef_atan2f4_u10(values.val[1], exp.values.val[1])); + }, + { + __at_align__ float tmp[size()]; + __at_align__ float tmp_exp[size()]; + store(tmp); + exp.store(tmp_exp); + for (const auto i : c10::irange(size())) { + tmp[i] = std::atan2(tmp[i], tmp_exp[i]); + } + return loadu(tmp); + } + ) + } + Vectorized copysign(const Vectorized &sign) const { + USE_SLEEF( + { + return Vectorized(Sleef_copysignf4(values.val[0], sign.values.val[0]), + Sleef_copysignf4(values.val[1], sign.values.val[1])); + }, + { + __at_align__ float tmp[size()]; + __at_align__ float tmp_sign[size()]; + store(tmp); + sign.store(tmp_sign); + for (size_type i = 0; i < size(); i++) { + tmp[i] = std::copysign(tmp[i], tmp_sign[i]); + } + return loadu(tmp); + } + ) + } + Vectorized erf() const { + return USE_SLEEF( + Vectorized(Sleef_erff4_u10(values.val[0]), Sleef_erff4_u10(values.val[1])), + map(std::erf); + ); + } + Vectorized erfc() const { + return USE_SLEEF( + Vectorized(Sleef_erfcf4_u15(values.val[0]), Sleef_erfcf4_u15(values.val[1])), + map(std::erfc) + ); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return USE_SLEEF( + Vectorized(Sleef_expf4_u10(values.val[0]), Sleef_expf4_u10(values.val[1])), + map(std::exp) + ); + } + Vectorized expm1() const { + return USE_SLEEF( + Vectorized(Sleef_expm1f4_u10(values.val[0]), Sleef_expm1f4_u10(values.val[1])), + map(std::expm1) + ); + } + Vectorized fmod(const Vectorized& q) const { + USE_SLEEF( + { + return Vectorized(Sleef_fmodf4(values.val[0], q.values.val[0]), + Sleef_fmodf4(values.val[1], q.values.val[1])); + }, + { + __at_align__ float tmp[size()]; + __at_align__ float tmp_q[size()]; + store(tmp); + q.store(tmp_q); + for (const auto i : c10::irange(size())) { + tmp[i] = std::fmod(tmp[i], tmp_q[i]); + } + return loadu(tmp); + } + ) + } + Vectorized hypot(const Vectorized &b) const { + USE_SLEEF( + { + return Vectorized(Sleef_hypotf4_u05(values.val[0], b.values.val[0]), + Sleef_hypotf4_u05(values.val[1], b.values.val[1])); + }, + { + __at_align__ float tmp[size()]; + __at_align__ float tmp_b[size()]; + store(tmp); + b.store(tmp_b); + for (const auto i : c10::irange(size())) { + tmp[i] = std::hypot(tmp[i], tmp_b[i]); + } + return loadu(tmp); + } + ) + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized igamma(const Vectorized &x) const { + __at_align__ float tmp[size()]; + __at_align__ float tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igamma(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized igammac(const Vectorized &x) const { + __at_align__ float tmp[size()]; + __at_align__ float tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igammac(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized log() const { + return USE_SLEEF( + Vectorized(Sleef_logf4_u10(values.val[0]), Sleef_logf4_u10(values.val[1])), + map(std::log) + ); + } + Vectorized log10() const { + return USE_SLEEF( + Vectorized(Sleef_log10f4_u10(values.val[0]), Sleef_log10f4_u10(values.val[1])), + map(std::log10) + ); + } + Vectorized log1p() const { + return USE_SLEEF( + Vectorized(Sleef_log1pf4_u10(values.val[0]), Sleef_log1pf4_u10(values.val[1])), + map(std::log1p) + ); + } + Vectorized log2() const { + return USE_SLEEF( + Vectorized(Sleef_log2f4_u10(values.val[0]), Sleef_log2f4_u10(values.val[1])), + map(std::log2) + ); + } + Vectorized nextafter(const Vectorized &b) const { + USE_SLEEF( + { + return Vectorized(Sleef_nextafterf4(values.val[0], b.values.val[0]), + Sleef_nextafterf4(values.val[1], b.values.val[1])); + }, + { + __at_align__ float tmp[size()]; + __at_align__ float tmp_b[size()]; + store(tmp); + b.store(tmp_b); + for (const auto i : c10::irange(size())) { + tmp[i] = std::nextafter(tmp[i], tmp_b[i]); + } + return loadu(tmp); + } + ) + } + Vectorized frac() const; + Vectorized sin() const { + return USE_SLEEF( + Vectorized(Sleef_sinf4_u10(values.val[0]), Sleef_sinf4_u10(values.val[1])), + map(std::sin) + ); + } + Vectorized sinh() const { + return USE_SLEEF( + Vectorized(Sleef_sinhf4_u10(values.val[0]), Sleef_sinhf4_u10(values.val[1])), + map(std::sinh) + ); + } + Vectorized cos() const { + return USE_SLEEF( + Vectorized(Sleef_cosf4_u10(values.val[0]), Sleef_cosf4_u10(values.val[1])), + map(std::cos) + ); + } + Vectorized cosh() const { + return USE_SLEEF( + Vectorized(Sleef_coshf4_u10(values.val[0]), Sleef_coshf4_u10(values.val[1])), + map(std::cosh) + ); + } + Vectorized ceil() const { + return map(at::native::ceil_impl); + } + Vectorized floor() const { + return map(at::native::floor_impl); + } + Vectorized neg() const { + return Vectorized( + vnegq_f32(values.val[0]), + vnegq_f32(values.val[1])); + } + Vectorized round() const { + // We do not use std::round because we would like to round midway numbers to the nearest even integer. + return map(at::native::round_impl); + } + Vectorized tan() const { + return USE_SLEEF( + Vectorized(Sleef_tanf4_u10(values.val[0]), Sleef_tanf4_u10(values.val[1])), + map(std::tan) + ); + } + Vectorized tanh() const { + return USE_SLEEF( + Vectorized(Sleef_tanhf4_u10(values.val[0]), Sleef_tanhf4_u10(values.val[1])), + map(std::tanh) + ); + } + Vectorized trunc() const { + float32x4_t r0 = vrndq_f32(values.val[0]); + float32x4_t r1 = vrndq_f32(values.val[1]); + return Vectorized(r0, r1); + } + Vectorized lgamma() const { + return USE_SLEEF( + Vectorized(Sleef_lgammaf4_u10(values.val[0]), Sleef_lgammaf4_u10(values.val[1])), + map(std::lgamma) + ); + } + Vectorized sqrt() const { + return Vectorized( + vsqrtq_f32(values.val[0]), + vsqrtq_f32(values.val[1])); + } + Vectorized reciprocal() const { + auto r0 = vdivq_f32(vdupq_n_f32(1.0f), values.val[0]); + auto r1 = vdivq_f32(vdupq_n_f32(1.0f), values.val[1]); + return Vectorized(r0, r1); + } + Vectorized rsqrt() const { + return this->sqrt().reciprocal(); + } + Vectorized pow(const Vectorized &exp) const { + USE_SLEEF( + { + return Vectorized(Sleef_powf4_u10(values.val[0], exp.values.val[0]), + Sleef_powf4_u10(values.val[1], exp.values.val[1])); + }, + { + __at_align__ float tmp[size()]; + __at_align__ float tmp_exp[size()]; + store(tmp); + exp.store(tmp_exp); + for (const auto i : c10::irange(size())) { + tmp[i] = std::pow(tmp[i], tmp_exp[i]); + } + return loadu(tmp); + } + ) + } + Vectorized operator==(const Vectorized& other) const { + float32x4_t r0 = + vreinterpretq_f32_u32(vceqq_f32(values.val[0], other.values.val[0])); + float32x4_t r1 = + vreinterpretq_f32_u32(vceqq_f32(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator!=(const Vectorized& other) const { + float32x4_t r0 = vreinterpretq_f32_u32( + vmvnq_u32(vceqq_f32(values.val[0], other.values.val[0]))); + float32x4_t r1 = vreinterpretq_f32_u32( + vmvnq_u32(vceqq_f32(values.val[1], other.values.val[1]))); + return Vectorized(r0, r1); + } + + Vectorized operator<(const Vectorized& other) const { + float32x4_t r0 = + vreinterpretq_f32_u32(vcltq_f32(values.val[0], other.values.val[0])); + float32x4_t r1 = + vreinterpretq_f32_u32(vcltq_f32(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator<=(const Vectorized& other) const { + float32x4_t r0 = + vreinterpretq_f32_u32(vcleq_f32(values.val[0], other.values.val[0])); + float32x4_t r1 = + vreinterpretq_f32_u32(vcleq_f32(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator>(const Vectorized& other) const { + float32x4_t r0 = + vreinterpretq_f32_u32(vcgtq_f32(values.val[0], other.values.val[0])); + float32x4_t r1 = + vreinterpretq_f32_u32(vcgtq_f32(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator>=(const Vectorized& other) const { + float32x4_t r0 = + vreinterpretq_f32_u32(vcgeq_f32(values.val[0], other.values.val[0])); + float32x4_t r1 = + vreinterpretq_f32_u32(vcgeq_f32(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vaddq_f32(a.get_low(), b.get_low()); + float32x4_t r1 = vaddq_f32(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vsubq_f32(a.get_low(), b.get_low()); + float32x4_t r1 = vsubq_f32(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vmulq_f32(a.get_low(), b.get_low()); + float32x4_t r1 = vmulq_f32(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vdivq_f32(a.get_low(), b.get_low()); + float32x4_t r1 = vdivq_f32(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vmaxq_f32(a.get_low(), b.get_low()); + float32x4_t r1 = vmaxq_f32(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vminq_f32(a.get_low(), b.get_low()); + float32x4_t r1 = vminq_f32(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { + return minimum(max, maximum(min, a)); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + return minimum(max, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + return maximum(min, a); +} + +template <> +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vreinterpretq_f32_u32(vandq_u32( + vreinterpretq_u32_f32(a.get_low()), + vreinterpretq_u32_f32(b.get_low()))); + float32x4_t r1 = vreinterpretq_f32_u32(vandq_u32( + vreinterpretq_u32_f32(a.get_high()), + vreinterpretq_u32_f32(b.get_high()))); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vreinterpretq_f32_u32(vorrq_u32( + vreinterpretq_u32_f32(a.get_low()), + vreinterpretq_u32_f32(b.get_low()))); + float32x4_t r1 = vreinterpretq_f32_u32(vorrq_u32( + vreinterpretq_u32_f32(a.get_high()), + vreinterpretq_u32_f32(b.get_high()))); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + float32x4_t r0 = vreinterpretq_f32_u32(veorq_u32( + vreinterpretq_u32_f32(a.get_low()), + vreinterpretq_u32_f32(b.get_low()))); + float32x4_t r1 = vreinterpretq_f32_u32(veorq_u32( + vreinterpretq_u32_f32(a.get_high()), + vreinterpretq_u32_f32(b.get_high()))); + return Vectorized(r0, r1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +template <> +inline void convert(const float* src, int32_t* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + vst1q_s32(dst + i, vcvtq_s32_f32(vld1q_f32(src + i))); + vst1q_s32(dst + i + 4, vcvtq_s32_f32(vld1q_f32(src + i + 4))); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +inline void convert(const int32_t* src, float* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + vst1q_f32(dst + i, vcvtq_f32_s32(vld1q_s32(src + i))); + vst1q_f32(dst + i + 4, vcvtq_f32_s32(vld1q_s32(src + i + 4))); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + float32x4_t r0 = vfmaq_f32(c.get_low(), a.get_low(), b.get_low()); + float32x4_t r1 = vfmaq_f32(c.get_high(), a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +#endif /* defined(aarch64) */ + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_int.h b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_int.h new file mode 100644 index 0000000000000000000000000000000000000000..0cc36d590019d84897e9f18b0b99e0ab1b3d9618 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_int.h @@ -0,0 +1,1138 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include +#include + +namespace at { +namespace vec { +inline namespace CPU_CAPABILITY { + +#ifdef CPU_CAPABILITY_AVX2 + +struct Vectorizedi { +protected: + __m256i values; + + static inline __m256i invert(const __m256i& v) { + const auto ones = _mm256_set1_epi64x(-1); + return _mm256_xor_si256(ones, v); + } +public: + Vectorizedi() {} + Vectorizedi(__m256i v) : values(v) {} + operator __m256i() const { + return values; + } +}; + +#else + +struct Vectorizedi {}; // dummy definition to make Vectorizedi always defined + +#endif // CPU_CAPABILITY_AVX2 + +#ifdef CPU_CAPABILITY_AVX2 + +template <> +class Vectorized : public Vectorizedi { +private: + static const Vectorized ones; +public: + using value_type = int64_t; + using size_type = int; + static constexpr size_type size() { + return 4; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int64_t v) { values = _mm256_set1_epi64x(v); } + Vectorized(int64_t val1, int64_t val2, int64_t val3, int64_t val4) { + values = _mm256_setr_epi64x(val1, val2, val3, val4); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + __at_align__ int64_t tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = _mm256_extract_epi64(b.values, 0); + if (mask & 0x02) + tmp_values[1] = _mm256_extract_epi64(b.values, 1); + if (mask & 0x04) + tmp_values[2] = _mm256_extract_epi64(b.values, 2); + if (mask & 0x08) + tmp_values[3] = _mm256_extract_epi64(b.values, 3); + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(int64_t base = 0, step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ int64_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int64_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int64_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int64_t)); + } + } + const int64_t& operator[](int idx) const = delete; + int64_t& operator[](int idx) = delete; + Vectorized abs() const { + auto zero = _mm256_set1_epi64x(0); + auto is_larger = _mm256_cmpgt_epi64(zero, values); + auto inverse = _mm256_xor_si256(values, is_larger); + return _mm256_sub_epi64(inverse, is_larger); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi64x(0); + } + Vectorized conj() const { + return *this; + } + Vectorized frac() const; + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi64(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi64(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmpgt_epi64(other.values, values); + } + Vectorized operator<=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi64(values, other.values)); + } + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmpgt_epi64(values, other.values); + } + Vectorized operator>=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi64(other.values, values)); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +class Vectorized : public Vectorizedi { +private: + static const Vectorized ones; +public: + using value_type = int32_t; + static constexpr int size() { + return 8; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int32_t v) { values = _mm256_set1_epi32(v); } + Vectorized(int32_t val1, int32_t val2, int32_t val3, int32_t val4, + int32_t val5, int32_t val6, int32_t val7, int32_t val8) { + values = _mm256_setr_epi32(val1, val2, val3, val4, val5, val6, val7, val8); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + return _mm256_blend_epi32(a, b, mask); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(int32_t base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int32_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int32_t count) { + __at_align__ int32_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int32_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int32_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int32_t)); + } + } + const int32_t& operator[](int idx) const = delete; + int32_t& operator[](int idx) = delete; + Vectorized abs() const { + return _mm256_abs_epi32(values); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi32(0); + } + Vectorized conj() const { + return *this; + } + Vectorized frac() const; + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi32(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi32(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmpgt_epi32(other.values, values); + } + Vectorized operator<=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi32(values, other.values)); + } + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmpgt_epi32(values, other.values); + } + Vectorized operator>=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi32(other.values, values)); + } + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +inline void convert(const int32_t *src, float *dst, int64_t n) { + int64_t i; + // int32_t and float have same size +#ifndef _MSC_VER +# pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto input_vec = _mm256_loadu_si256(reinterpret_cast(src + i)); + auto output_vec = _mm256_cvtepi32_ps(input_vec); + _mm256_storeu_ps(reinterpret_cast(dst + i), output_vec); + } +#ifndef _MSC_VER +# pragma unroll +#endif + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +inline void convert(const int32_t *src, double *dst, int64_t n) { + int64_t i; + // int32_t has half the size of double +#ifndef _MSC_VER +# pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto input_128_vec = _mm_loadu_si128(reinterpret_cast(src + i)); + auto output_vec = _mm256_cvtepi32_pd(input_128_vec); + _mm256_storeu_pd(reinterpret_cast(dst + i), output_vec); + } +#ifndef _MSC_VER +# pragma unroll +#endif + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +class Vectorized : public Vectorizedi { +private: + static const Vectorized ones; +public: + using value_type = int16_t; + static constexpr int size() { + return 16; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int16_t v) { values = _mm256_set1_epi16(v); } + Vectorized(int16_t val1, int16_t val2, int16_t val3, int16_t val4, + int16_t val5, int16_t val6, int16_t val7, int16_t val8, + int16_t val9, int16_t val10, int16_t val11, int16_t val12, + int16_t val13, int16_t val14, int16_t val15, int16_t val16) { + values = _mm256_setr_epi16(val1, val2, val3, val4, val5, val6, val7, val8, + val9, val10, val11, val12, val13, val14, val15, val16); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + __at_align__ int16_t tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = _mm256_extract_epi16(b.values, 0); + if (mask & 0x02) + tmp_values[1] = _mm256_extract_epi16(b.values, 1); + if (mask & 0x04) + tmp_values[2] = _mm256_extract_epi16(b.values, 2); + if (mask & 0x08) + tmp_values[3] = _mm256_extract_epi16(b.values, 3); + if (mask & 0x10) + tmp_values[4] = _mm256_extract_epi16(b.values, 4); + if (mask & 0x20) + tmp_values[5] = _mm256_extract_epi16(b.values, 5); + if (mask & 0x40) + tmp_values[6] = _mm256_extract_epi16(b.values, 6); + if (mask & 0x80) + tmp_values[7] = _mm256_extract_epi16(b.values, 7); + if (mask & 0x100) + tmp_values[8] = _mm256_extract_epi16(b.values, 8); + if (mask & 0x200) + tmp_values[9] = _mm256_extract_epi16(b.values, 9); + if (mask & 0x400) + tmp_values[10] = _mm256_extract_epi16(b.values, 10); + if (mask & 0x800) + tmp_values[11] = _mm256_extract_epi16(b.values, 11); + if (mask & 0x1000) + tmp_values[12] = _mm256_extract_epi16(b.values, 12); + if (mask & 0x2000) + tmp_values[13] = _mm256_extract_epi16(b.values, 13); + if (mask & 0x4000) + tmp_values[14] = _mm256_extract_epi16(b.values, 14); + if (mask & 0x8000) + tmp_values[15] = _mm256_extract_epi16(b.values, 15); + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(int16_t base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int16_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + case 8: + return blend<255>(a, b); + case 9: + return blend<511>(a, b); + case 10: + return blend<1023>(a, b); + case 11: + return blend<2047>(a, b); + case 12: + return blend<4095>(a, b); + case 13: + return blend<8191>(a, b); + case 14: + return blend<16383>(a, b); + case 15: + return blend<32767>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int16_t count) { + __at_align__ int16_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int16_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int16_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int16_t)); + } + } + const int16_t& operator[](int idx) const = delete; + int16_t& operator[](int idx) = delete; + Vectorized abs() const { + return _mm256_abs_epi16(values); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi16(0); + } + Vectorized conj() const { + return *this; + } + Vectorized frac() const; + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi16(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi16(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmpgt_epi16(other.values, values); + } + Vectorized operator<=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi16(values, other.values)); + } + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmpgt_epi16(values, other.values); + } + Vectorized operator>=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi16(other.values, values)); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +class Vectorized : public Vectorizedi { +private: + static const Vectorized ones; +public: + using value_type = int8_t; + static constexpr int size() { + return 32; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int8_t v) { values = _mm256_set1_epi8(v); } + Vectorized(int8_t val1, int8_t val2, int8_t val3, int8_t val4, + int8_t val5, int8_t val6, int8_t val7, int8_t val8, + int8_t val9, int8_t val10, int8_t val11, int8_t val12, + int8_t val13, int8_t val14, int8_t val15, int8_t val16, + int8_t val17, int8_t val18, int8_t val19, int8_t val20, + int8_t val21, int8_t val22, int8_t val23, int8_t val24, + int8_t val25, int8_t val26, int8_t val27, int8_t val28, + int8_t val29, int8_t val30, int8_t val31, int8_t val32) { + values = _mm256_setr_epi8(val1, val2, val3, val4, val5, val6, val7, val8, + val9, val10, val11, val12, val13, val14, val15, val16, + val17, val18, val19, val20, val21, val22, val23, val24, + val25, val26, val27, val28, val29, val30, val31, val32); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + __at_align__ int8_t tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = _mm256_extract_epi8(b.values, 0); + if (mask & 0x02) + tmp_values[1] = _mm256_extract_epi8(b.values, 1); + if (mask & 0x04) + tmp_values[2] = _mm256_extract_epi8(b.values, 2); + if (mask & 0x08) + tmp_values[3] = _mm256_extract_epi8(b.values, 3); + if (mask & 0x10) + tmp_values[4] = _mm256_extract_epi8(b.values, 4); + if (mask & 0x20) + tmp_values[5] = _mm256_extract_epi8(b.values, 5); + if (mask & 0x40) + tmp_values[6] = _mm256_extract_epi8(b.values, 6); + if (mask & 0x80) + tmp_values[7] = _mm256_extract_epi8(b.values, 7); + if (mask & 0x100) + tmp_values[8] = _mm256_extract_epi8(b.values, 8); + if (mask & 0x200) + tmp_values[9] = _mm256_extract_epi8(b.values, 9); + if (mask & 0x400) + tmp_values[10] = _mm256_extract_epi8(b.values, 10); + if (mask & 0x800) + tmp_values[11] = _mm256_extract_epi8(b.values, 11); + if (mask & 0x1000) + tmp_values[12] = _mm256_extract_epi8(b.values, 12); + if (mask & 0x2000) + tmp_values[13] = _mm256_extract_epi8(b.values, 13); + if (mask & 0x4000) + tmp_values[14] = _mm256_extract_epi8(b.values, 14); + if (mask & 0x8000) + tmp_values[15] = _mm256_extract_epi8(b.values, 15); + if (mask & 0x010000) + tmp_values[16] = _mm256_extract_epi8(b.values, 16); + if (mask & 0x020000) + tmp_values[17] = _mm256_extract_epi8(b.values, 17); + if (mask & 0x040000) + tmp_values[18] = _mm256_extract_epi8(b.values, 18); + if (mask & 0x080000) + tmp_values[19] = _mm256_extract_epi8(b.values, 19); + if (mask & 0x100000) + tmp_values[20] = _mm256_extract_epi8(b.values, 20); + if (mask & 0x200000) + tmp_values[21] = _mm256_extract_epi8(b.values, 21); + if (mask & 0x400000) + tmp_values[22] = _mm256_extract_epi8(b.values, 22); + if (mask & 0x800000) + tmp_values[23] = _mm256_extract_epi8(b.values, 23); + if (mask & 0x1000000) + tmp_values[24] = _mm256_extract_epi8(b.values, 24); + if (mask & 0x2000000) + tmp_values[25] = _mm256_extract_epi8(b.values, 25); + if (mask & 0x4000000) + tmp_values[26] = _mm256_extract_epi8(b.values, 26); + if (mask & 0x8000000) + tmp_values[27] = _mm256_extract_epi8(b.values, 27); + if (mask & 0x10000000) + tmp_values[28] = _mm256_extract_epi8(b.values, 28); + if (mask & 0x20000000) + tmp_values[29] = _mm256_extract_epi8(b.values, 29); + if (mask & 0x40000000) + tmp_values[30] = _mm256_extract_epi8(b.values, 30); + if (mask & 0x80000000) + tmp_values[31] = _mm256_extract_epi8(b.values, 31); + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(int8_t base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step, + base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step, + base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step, + base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step, + base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int8_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<0x1>(a, b); + case 2: + return blend<0x3>(a, b); + case 3: + return blend<0x7>(a, b); + case 4: + return blend<0xF>(a, b); + case 5: + return blend<0x1F>(a, b); + case 6: + return blend<0x3F>(a, b); + case 7: + return blend<0x7F>(a, b); + case 8: + return blend<0xFF>(a, b); + case 9: + return blend<0x1FF>(a, b); + case 10: + return blend<0x3FF>(a, b); + case 11: + return blend<0x7FF>(a, b); + case 12: + return blend<0xFFF>(a, b); + case 13: + return blend<0x1FFF>(a, b); + case 14: + return blend<0x3FFF>(a, b); + case 15: + return blend<0x7FFF>(a, b); + case 16: + return blend<0xFFFF>(a, b); + case 17: + return blend<0x1FFFF>(a, b); + case 18: + return blend<0x3FFFF>(a, b); + case 19: + return blend<0x7FFFF>(a, b); + case 20: + return blend<0xFFFFF>(a, b); + case 21: + return blend<0x1FFFFF>(a, b); + case 22: + return blend<0x3FFFFF>(a, b); + case 23: + return blend<0x7FFFFF>(a, b); + case 24: + return blend<0xFFFFFF>(a, b); + case 25: + return blend<0x1FFFFFF>(a, b); + case 26: + return blend<0x3FFFFFF>(a, b); + case 27: + return blend<0x7FFFFFF>(a, b); + case 28: + return blend<0xFFFFFFF>(a, b); + case 29: + return blend<0x1FFFFFFF>(a, b); + case 30: + return blend<0x3FFFFFFF>(a, b); + case 31: + return blend<0x7FFFFFFF>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int8_t count) { + __at_align__ int8_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int8_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int8_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int8_t)); + } + } + const int8_t& operator[](int idx) const = delete; + int8_t& operator[](int idx) = delete; + Vectorized abs() const { + return _mm256_abs_epi8(values); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi8(0); + } + Vectorized conj() const { + return *this; + } + Vectorized frac() const; + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi8(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi8(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmpgt_epi8(other.values, values); + } + Vectorized operator<=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi8(values, other.values)); + } + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmpgt_epi8(values, other.values); + } + Vectorized operator>=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi8(other.values, values)); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi64(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi32(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi16(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi8(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi64(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi32(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi16(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi8(a, b); +} + +// Negation. Defined here so we can utilize operator- +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +// Emulate operations with no native 64-bit support in avx, +// by extracting each element, performing the operation pointwise, +// then combining the results into a vector. +template +Vectorized inline emulate(const Vectorized& a, const Vectorized& b, const op_t& op) { + int64_t a0 = _mm256_extract_epi64(a, 0); + int64_t a1 = _mm256_extract_epi64(a, 1); + int64_t a2 = _mm256_extract_epi64(a, 2); + int64_t a3 = _mm256_extract_epi64(a, 3); + + int64_t b0 = _mm256_extract_epi64(b, 0); + int64_t b1 = _mm256_extract_epi64(b, 1); + int64_t b2 = _mm256_extract_epi64(b, 2); + int64_t b3 = _mm256_extract_epi64(b, 3); + + int64_t c0 = op(a0, b0); + int64_t c1 = op(a1, b1); + int64_t c2 = op(a2, b2); + int64_t c3 = op(a3, b3); + + return _mm256_set_epi64x(c3, c2, c1, c0); +} + +template +Vectorized inline emulate(const Vectorized& a, const Vectorized& b, const Vectorized& c, const op_t& op) { + int64_t a0 = _mm256_extract_epi64(a, 0); + int64_t a1 = _mm256_extract_epi64(a, 1); + int64_t a2 = _mm256_extract_epi64(a, 2); + int64_t a3 = _mm256_extract_epi64(a, 3); + + int64_t b0 = _mm256_extract_epi64(b, 0); + int64_t b1 = _mm256_extract_epi64(b, 1); + int64_t b2 = _mm256_extract_epi64(b, 2); + int64_t b3 = _mm256_extract_epi64(b, 3); + + int64_t c0 = _mm256_extract_epi64(c, 0); + int64_t c1 = _mm256_extract_epi64(c, 1); + int64_t c2 = _mm256_extract_epi64(c, 2); + int64_t c3 = _mm256_extract_epi64(c, 3); + + int64_t d0 = op(a0, b0, c0); + int64_t d1 = op(a1, b1, c1); + int64_t d2 = op(a2, b2, c2); + int64_t d3 = op(a3, b3, c3); + + return _mm256_set_epi64x(d3, d2, d1, d0); +} + +// AVX2 has no intrinsic for int64_t multiply so it needs to be emulated +// This could be implemented more efficiently using epi32 instructions +// This is also technically avx compatible, but then we'll need AVX +// code for add as well. +// Note: intentionally ignores undefined behavior like (-lowest * -1). +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return emulate(a, b, [](int64_t a_point, int64_t b_point) __ubsan_ignore_undefined__ {return a_point * b_point;}); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm256_mullo_epi32(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm256_mullo_epi16(a, b); +} + +template +Vectorized inline int_elementwise_binary_256(const Vectorized& a, const Vectorized& b, Op op) { + T values_a[Vectorized::size()]; + T values_b[Vectorized::size()]; + a.store(values_a); + b.store(values_b); + for (int i = 0; i != Vectorized::size(); i++) { + values_a[i] = op(values_a[i], values_b[i]); + } + return Vectorized::loadu(values_a); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + // We don't have an instruction for multiplying int8_t + return int_elementwise_binary_256(a, b, std::multiplies()); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::min(a_point, b_point);}); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm256_min_epi32(a, b); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm256_min_epi16(a, b); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm256_min_epi8(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::max(a_point, b_point);}); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm256_max_epi32(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm256_max_epi16(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm256_max_epi8(a, b); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return emulate(a, min_val, max_val, [](int64_t a_point, int64_t min_point, int64_t max_point) {return std::min(max_point, std::max(a_point, min_point));}); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm256_min_epi32(max_val, _mm256_max_epi32(a, min_val)); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm256_min_epi16(max_val, _mm256_max_epi16(a, min_val)); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm256_min_epi8(max_val, _mm256_max_epi8(a, min_val)); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return emulate(a, max_val, [](int64_t a_point, int64_t max_point) {return std::min(max_point, a_point);}); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm256_min_epi32(max_val, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm256_min_epi16(max_val, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm256_min_epi8(max_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return emulate(a, min_val, [](int64_t a_point, int64_t min_point) {return std::max(min_point, a_point);}); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm256_max_epi32(min_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm256_max_epi16(min_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm256_max_epi8(min_val, a); +} + +template +Vectorized inline convert_to_int32(const T* ptr) { + return Vectorized::loadu(ptr); +} + +template<> +Vectorized inline convert_to_int32(const int8_t* ptr) { + return _mm256_cvtepi8_epi32(_mm_loadl_epi64(reinterpret_cast(ptr))); +} + +template<> +Vectorized inline convert_to_int32(const uint8_t* ptr) { + return _mm256_cvtepu8_epi32(_mm_loadl_epi64(reinterpret_cast(ptr))); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} + +template>::value, int> = 0> +inline Vectorized operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_si256(a, b); +} +template>::value, int> = 0> +inline Vectorized operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_si256(a, b); +} +template>::value, int> = 0> +inline Vectorized operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_si256(a, b); +} +template>::value, int> = 0> +inline Vectorized operator~(const Vectorized& a) { + return _mm256_xor_si256(a, _mm256_set1_epi32(-1)); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +#endif + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_qint.h b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_qint.h new file mode 100644 index 0000000000000000000000000000000000000000..0ee43b53e63582f823a0a84e95bf0c93b5888000 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec256/vec256_qint.h @@ -0,0 +1,1246 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +// This file defines Vectorized<> for the quantized types. +// +// +// Currently, we simply use these classes as efficient converters between +// the quantized types and Vectorized, usually in bandwidth-bound cases +// where doing the arithmetic in full-precision is acceptable (e.g. +// elementwise operators). +// +// +// Conversions are as follows: +// Vectorized -> 4x Vectorized +// Vectorized -> 4x Vectorized +// Vectorized -> 1x Vectorized +// +// The size of the returned float vector is specified by the special +// constexpr function float_num_vecs. The type of the value returned +// from dequantize (and expected as an argument to quantize) is +// specified by float_vec_return_type. +// +// When writing kernels with these vectors, it is expected that floating- +// point operations will be carried out in a loop over Vectorized::float_num_vecs +// iterations. + +namespace at { +namespace vec { +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +struct Vectorizedqi { + protected: + __m256i vals __attribute__((aligned(64))); + + public: + Vectorizedqi() {} + Vectorizedqi(__m256i v) : vals(v) {} + operator __m256i() const { + return vals; + } +}; + +template +__m256i pack_saturate_and_clamp( + __m256i first, + __m256i second, + T min_val, + T max_val); + +template <> +inline __m256i pack_saturate_and_clamp( + __m256i /*first*/, + __m256i /*second*/, + int32_t /*min_val*/, + int32_t /*max_val*/) { + // This function is for linkage only, will not be used + AT_ERROR("pack_saturate_and_clamp is not supported"); +} + +template <> +inline __m256i pack_saturate_and_clamp( + __m256i first, + __m256i second, + int8_t min_val, + int8_t max_val) { + __m256i packed_and_sat = _mm256_packs_epi16(first, second); + return _mm256_max_epi8( + _mm256_set1_epi8(min_val), + _mm256_min_epi8(packed_and_sat, _mm256_set1_epi8(max_val))); +} + +template <> +inline __m256i pack_saturate_and_clamp( + __m256i first, + __m256i second, + uint8_t min_val, + uint8_t max_val) { + __m256i packed_and_sat = _mm256_packus_epi16(first, second); + return _mm256_max_epu8( + _mm256_set1_epi8(min_val), + _mm256_min_epu8(packed_and_sat, _mm256_set1_epi8(max_val))); +} + +template +inline void __attribute__((always_inline)) QuantizeAvx2( + const float* src, + typename T::underlying* dst, + int len, + float inverse_scale, + int64_t zero_point) { + constexpr int VLEN = 8; + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + const __m256i min_v = _mm256_set1_epi32(min_val); + const __m256i max_v = _mm256_set1_epi32(max_val); + // This is the largest int32 value < int32_max exactly representable in float + constexpr int32_t int32_float_max_val = + std::numeric_limits::max() - 127; + int i = 0; + __m256 inverse_scale_v = _mm256_set1_ps(inverse_scale); + // clang-format off + static const __m256i shuffle_mask_v = _mm256_set_epi8( + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00); + // clang-format on + __m256i permute_mask_v = + _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00); + __m256i permute_mask_l8_v = + _mm256_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00); + int len_aligned = len / (VLEN * 4) * (VLEN * 4); + for (; i < len_aligned; i += 4 * VLEN) { + // x + __m256 x_vals = _mm256_load_ps(src + i); + __m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v); + // If the floating point value is greater than int32_max, + // _mm256_cvtps_epi32 converts them to -ve. Clip at int32_float_max_val to + // Clip at int32_float_max_val to avoid this. + x_transformed_v = + _mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val)); + // y + __m256 y_vals = _mm256_load_ps(src + i + VLEN); + __m256 y_transformed_v = _mm256_mul_ps(y_vals, inverse_scale_v); + y_transformed_v = + _mm256_min_ps(y_transformed_v, _mm256_set1_ps(int32_float_max_val)); + // z + __m256 z_vals = _mm256_load_ps(src + i + 2 * VLEN); + __m256 z_transformed_v = _mm256_mul_ps(z_vals, inverse_scale_v); + z_transformed_v = + _mm256_min_ps(z_transformed_v, _mm256_set1_ps(int32_float_max_val)); + // w + __m256 w_vals = _mm256_load_ps(src + i + 3 * VLEN); + __m256 w_transformed_v = _mm256_mul_ps(w_vals, inverse_scale_v); + w_transformed_v = + _mm256_min_ps(w_transformed_v, _mm256_set1_ps(int32_float_max_val)); + + __m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v); + __m256i y_rounded_v = _mm256_cvtps_epi32(y_transformed_v); + __m256i z_rounded_v = _mm256_cvtps_epi32(z_transformed_v); + __m256i w_rounded_v = _mm256_cvtps_epi32(w_transformed_v); + + // add zero point + x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point)); + y_rounded_v = _mm256_add_epi32(y_rounded_v, _mm256_set1_epi32(zero_point)); + z_rounded_v = _mm256_add_epi32(z_rounded_v, _mm256_set1_epi32(zero_point)); + w_rounded_v = _mm256_add_epi32(w_rounded_v, _mm256_set1_epi32(zero_point)); + + __m256i xy_packed_v = _mm256_packs_epi32(x_rounded_v, y_rounded_v); + __m256i zw_packed_v = _mm256_packs_epi32(z_rounded_v, w_rounded_v); + __m256i xyzw_clamped_v = pack_saturate_and_clamp( + xy_packed_v, zw_packed_v, min_val, max_val); + + xyzw_clamped_v = + _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(dst + i), xyzw_clamped_v); + } + + // Additional 8-lane AVX2 version to take advantage when len is smaller + // based on fbgemm::QuantizeAvx2 (https://github.com/pytorch/FBGEMM) + for (; i < len / VLEN * VLEN; i += VLEN) { + __m256 x_vals = _mm256_load_ps(src + i); + __m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v); + x_transformed_v = + _mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val)); + __m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v); + x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point)); + __m256i x_clipped_v = + _mm256_max_epi32(min_v, _mm256_min_epi32(max_v, x_rounded_v)); + + x_clipped_v = _mm256_shuffle_epi8(x_clipped_v, shuffle_mask_v); + x_clipped_v = _mm256_permutevar8x32_epi32(x_clipped_v, permute_mask_l8_v); + _mm_storel_epi64( + reinterpret_cast<__m128i*>(dst + i), + _mm256_castsi256_si128(x_clipped_v)); + } + + for (; i < len; ++i) { + float transformed = src[i] * inverse_scale; + + // Not exactly the same behavior as the vectorized code. + // The vectorized code above always rounds to even in halfway cases + // (https://software.intel.com/en-us/node/523819), but std::nearbyint + // does the same only when the current rounding mode is FE_TONEAREST. + // However, in practice, this should not be a problem because most cases + // use the default rounding mode FE_TONEAREST. + // Note that we cannot implement the same behavior as the vectorized code + // using std::round because it does rounding away from zero in halfway + // cases. + transformed = zero_point + nearbyint(transformed); + float clipped = + std::min(std::max(transformed, float(min_val)), float(max_val)); + dst[i] = clipped; + } +} + +template<> +struct Vectorized : public Vectorizedqi { + using size_type = int; + static constexpr size_type size() { + return 8; + } + + static constexpr int float_num_vecs() { + return 1; + } + + static constexpr int int_num_vecs() { + return 1; + } + + using float_vec_return_type = std::array, 1>; + using int_vec_return_type = std::array, 1>; + using value_type = c10::qint32::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + Vectorized() {} + + Vectorized(__m256i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::qint32& val) { + value_type uw = val.val_; + vals = _mm256_set1_epi32(uw); + } + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm256_storeu_si256((__m256i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return _mm256_loadu_si256((const __m256i*)tmp_values); + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized /*zero_point*/, + Vectorized scale_zp_premul) const { + __m256 float_vals = _mm256_cvtepi32_ps(vals); + return {vec::fmadd(scale, Vectorized(float_vals), scale_zp_premul)}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float /*inverse_scale*/) { + Vectorized retval; + auto rhs_data = (__m256)rhs[0]; + at::native::quantize_vec( + scale, zero_point, (float*)&rhs_data, (c10::qint32*)&retval.vals, 8); + return retval; + } + + Vectorized maximum(Vectorized b) const { + return _mm256_max_epi32(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm256_min_epi32(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm256_min_epi32( + _mm256_max_epi32(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + return {_mm256_sub_epi32(vals, b)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m256 multiplier_v = _mm256_set1_ps(multiplier); + __m256i zero_point_v = _mm256_set1_epi32(zero_point); + + __m256 scaled = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier_v); + __m256i rounded = _mm256_cvtps_epi32(scaled); + return _mm256_add_epi32(rounded, zero_point_v); + } + + private: + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm256_loadu_si256((const __m256i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline operator*( + const Vectorized& a, + const Vectorized& b) { + return _mm256_mullo_epi32(a, b); +} + +template <> +Vectorized inline operator+( + const Vectorized& a, + const Vectorized& b) { + return _mm256_add_epi32(a, b); +} + +/* + * Convert values from int32 back to int8/uint8 + */ +template +__m256i RequantizeAvx2( + const std::array, 4>& inp, + __m256 multiplier, + __m256i zp) { + static_assert( + std::is_same::value || std::is_same::value, + "Only int8_t/uint8_t are supported"); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + __m256i permute_mask_v = + _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00); + __m256 x_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier); + __m256 y_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[1]), multiplier); + __m256 z_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[2]), multiplier); + __m256 w_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[3]), multiplier); + + __m256i x_rounded_v = _mm256_cvtps_epi32(x_scaled_v); + __m256i y_rounded_v = _mm256_cvtps_epi32(y_scaled_v); + __m256i z_rounded_v = _mm256_cvtps_epi32(z_scaled_v); + __m256i w_rounded_v = _mm256_cvtps_epi32(w_scaled_v); + + /* Add zero point */ + __m256i x_v = _mm256_add_epi32(x_rounded_v, zp); + __m256i y_v = _mm256_add_epi32(y_rounded_v, zp); + __m256i z_v = _mm256_add_epi32(z_rounded_v, zp); + __m256i w_v = _mm256_add_epi32(w_rounded_v, zp); + + /* Pack to int16_t and saturate */ + __m256i xy_packed_v = _mm256_packs_epi32(x_v, y_v); + __m256i zw_packed_v = _mm256_packs_epi32(z_v, w_v); + + __m256i xyzw_clamped_v = + pack_saturate_and_clamp(xy_packed_v, zw_packed_v, min_val, max_val); + + /* + * xyzw_clamped_v has results in the following layout so we need to + * permute: x0-3 y0-3 z0-3 w0-3 x4-7 y4-7 z4-7 w4-7 + */ + xyzw_clamped_v = _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v); + return xyzw_clamped_v; +} + +template<> +struct Vectorized : public Vectorizedqi { + static constexpr int size() { + return 32; + } + + static constexpr int float_num_vecs() { + return 4; + } + + static constexpr int int_num_vecs() { + return 4; + } + + using float_vec_return_type = std::array, 4>; + using int_vec_return_type = std::array, 4>; + using value_type = typename c10::qint8::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + + Vectorized() {} + Vectorized(__m256i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::qint8& val) { + value_type uw = val.val_; + vals = _mm256_set1_epi8(uw); + } + + // This is needed because the compiler emits awful code for the default + // constructor for moving the enum + // NOLINTNEXTLINE(clang-diagnostic-deprecated-copy) + C10_CLANG_DIAGNOSTIC_PUSH() + #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy") + C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy") + #endif + Vectorized(const Vectorized& other) : Vectorizedqi(other.vals) { } + C10_CLANG_DIAGNOSTIC_POP() + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm256_storeu_si256((__m256i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return _mm256_loadu_si256((const __m256i*)tmp_values); + } + + private: + __m256i cvtepi8_epi32(__m128i epi8_vals) const { + return _mm256_cvtepi8_epi32(epi8_vals); + } + + public: + float_vec_return_type dequantize( + Vectorized scale, + Vectorized /*zero_point*/, + Vectorized scale_neg_zp_premul) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0)); + __m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1)); + __m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2)); + __m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3)); + + auto val0 = + vec::fmadd(scale, Vectorized(float_val0), scale_neg_zp_premul); + auto val1 = + vec::fmadd(scale, Vectorized(float_val1), scale_neg_zp_premul); + auto val2 = + vec::fmadd(scale, Vectorized(float_val2), scale_neg_zp_premul); + auto val3 = + vec::fmadd(scale, Vectorized(float_val3), scale_neg_zp_premul); + return {val0, val1, val2, val3}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float /*scale*/, + int32_t zero_point, + float inverse_scale) { + auto* rhs_data = (float*)rhs.data(); + int8_t quantized_values[32]; + QuantizeAvx2( + rhs_data, quantized_values, 32, inverse_scale, zero_point); + return Vectorized::loadu(quantized_values); + } + + Vectorized maximum(Vectorized b) const { + return _mm256_max_epi8(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm256_min_epi8(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm256_min_epi8( + _mm256_max_epi8(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256i int32_val0 = cvtepi8_epi32(int_val0); + __m256i int32_val1 = cvtepi8_epi32(int_val1); + __m256i int32_val2 = cvtepi8_epi32(int_val2); + __m256i int32_val3 = cvtepi8_epi32(int_val3); + + __m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0)); + __m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1)); + __m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2)); + __m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3)); + + __m256i int32_b0 = cvtepi8_epi32(int_b0); + __m256i int32_b1 = cvtepi8_epi32(int_b1); + __m256i int32_b2 = cvtepi8_epi32(int_b2); + __m256i int32_b3 = cvtepi8_epi32(int_b3); + + __m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0); + __m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1); + __m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2); + __m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3); + + return {Vectorized(res_0), + Vectorized(res_1), + Vectorized(res_2), + Vectorized(res_3)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m256 multiplier_v = _mm256_set1_ps(multiplier); + __m256i zero_point_v = _mm256_set1_epi32(zero_point); + return RequantizeAvx2(inp, multiplier_v, zero_point_v); + } + + private: + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm256_loadu_si256((const __m256i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template<> +struct Vectorized : public Vectorizedqi { + static constexpr int size() { + return 32; + } + + static constexpr int float_num_vecs() { + return 4; + } + + static constexpr int int_num_vecs() { + return 4; + } + + using float_vec_return_type = std::array, 4>; + using int_vec_return_type = std::array, 4>; + using value_type = typename c10::quint8::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + Vectorized() {} + + Vectorized(__m256i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::quint8& val) { + value_type uw = val.val_; + vals = _mm256_set1_epi8(uw); + } + + // NOLINTNEXTLINE(clang-diagnostic-deprecated-copy) + C10_CLANG_DIAGNOSTIC_PUSH() + #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy") + C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy") + #endif + Vectorized(const Vectorized& other) : Vectorizedqi(other.vals) { } + C10_CLANG_DIAGNOSTIC_POP() + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm256_storeu_si256((__m256i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return _mm256_loadu_si256((const __m256i*)tmp_values); + } + + private: + __m256i cvtepu8_epi32(__m128i epu8_vals) const { + return _mm256_cvtepu8_epi32(epu8_vals); + } + + public: + float_vec_return_type dequantize( + Vectorized scale, + Vectorized /*zero_point*/, + Vectorized scale_zp_premul) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0)); + __m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1)); + __m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2)); + __m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3)); + + auto val0 = + vec::fmadd(scale, Vectorized(float_val0), scale_zp_premul); + auto val1 = + vec::fmadd(scale, Vectorized(float_val1), scale_zp_premul); + auto val2 = + vec::fmadd(scale, Vectorized(float_val2), scale_zp_premul); + auto val3 = + vec::fmadd(scale, Vectorized(float_val3), scale_zp_premul); + return {val0, val1, val2, val3}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float /*scale*/, + int32_t zero_point, + float inverse_scale) { + auto* rhs_data = (float*)rhs.data(); + uint8_t quantized_values[32]; + QuantizeAvx2( + rhs_data, quantized_values, 32, inverse_scale, zero_point); + return Vectorized::loadu(quantized_values); + } + + Vectorized maximum(Vectorized b) const { + return _mm256_max_epu8(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm256_min_epu8(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm256_min_epu8( + _mm256_max_epu8(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256i int32_val0 = cvtepu8_epi32(int_val0); + __m256i int32_val1 = cvtepu8_epi32(int_val1); + __m256i int32_val2 = cvtepu8_epi32(int_val2); + __m256i int32_val3 = cvtepu8_epi32(int_val3); + + __m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0)); + __m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1)); + __m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2)); + __m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3)); + + __m256i int32_b0 = cvtepu8_epi32(int_b0); + __m256i int32_b1 = cvtepu8_epi32(int_b1); + __m256i int32_b2 = cvtepu8_epi32(int_b2); + __m256i int32_b3 = cvtepu8_epi32(int_b3); + + __m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0); + __m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1); + __m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2); + __m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3); + return {Vectorized(res_0), + Vectorized(res_1), + Vectorized(res_2), + Vectorized(res_3)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m256 multiplier_v = _mm256_set1_ps(multiplier); + __m256i zero_point_v = _mm256_set1_epi32(zero_point); + return RequantizeAvx2(inp, multiplier_v, zero_point_v); + } + + private: + + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm256_loadu_si256((const __m256i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +#else + +// NOTE: These are low-performance implementations that we fall back on +// if we are not building with AVX2. This may not be an issue, because +// currently for quantization we assume the user has at least AVX512 +// installed, so these can simply act as a reference implementation. +// +// If in the future we relax this requirement (AVX2+), we should probably +// revisit these implementations + +template < + typename T, + typename float_vec_return_type_, + typename int_vec_return_type_, + int size_> +struct VectorizedQuantizedConverter { + static constexpr int size() { + return size_; + } + + static constexpr int float_num_vecs() { + return size() / 8; + } + + static constexpr int int_num_vecs() { + return size() / 8; + } + + using float_vec_return_type = float_vec_return_type_; + using int_vec_return_type = int_vec_return_type_; + + using value_type = typename T::underlying; + std::array vals; + + VectorizedQuantizedConverter(T val) { + for (const auto i : c10::irange(size())) { + vals[i] = val.val_; + } + } + + VectorizedQuantizedConverter(const void* ptr) { + memcpy(vals.data(), ptr, sizeof(value_type) * size()); + } + + void store(void* ptr, int count = size()) const { + memcpy(ptr, vals.data(), count * sizeof(value_type)); + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized /*scale_zp_premul*/) const { + float_vec_return_type rv; + for (const auto i : c10::irange(float_num_vecs())) { + float tmp_vals[8]; + for (const auto j : c10::irange(8)) { + tmp_vals[j] = at::native::dequantize_val( + scale[j], zero_point[j], T(vals[8 * i + j])); + } + rv[i] = Vectorized(tmp_vals[0], + tmp_vals[1], + tmp_vals[2], + tmp_vals[3], + tmp_vals[4], + tmp_vals[5], + tmp_vals[6], + tmp_vals[7]); + } + return rv; + } + + protected: + VectorizedQuantizedConverter() {} +}; + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 8> { + Vectorized() + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 8>() {} + Vectorized(c10::qint32 val) + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 8>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 8>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return Vectorized(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float /*inverse_scale*/) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 8], 8); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::qint32*)qvals.data(), + 8 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + for (const auto i : c10::irange(size())) { + retval[0].vals[i] = vals[i] - b.vals[i]; + } + return retval; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = + nearbyint(static_cast(inp[0].vals[i]) * multiplier) + + zero_point; + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline operator*( + const Vectorized& a, + const Vectorized& b) { + Vectorized retval; + for (const auto i : c10::irange(std::decay_t::size())) { + retval.vals[i] = a.vals[i] * b.vals[i]; + } + return retval; +} + +template <> +Vectorized inline operator+( + const Vectorized& a, + const Vectorized& b) { + Vectorized retval; + for (const auto i : c10::irange(std::decay_t::size())) { + retval.vals[i] = a.vals[i] + b.vals[i]; + } + return retval; +} + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 32> { + Vectorized() + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 32>() {} + Vectorized(c10::qint8 val) + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 32>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 32>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return Vectorized(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float /*inverse_scale*/) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 8], 8); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::qint8*)qvals.data(), + 8 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + constexpr int elem_per_int_vec = size() / int_num_vecs(); + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + retval[i].vals[j] = + static_cast(vals[i * elem_per_int_vec + j]) - + static_cast(b.vals[i * elem_per_int_vec + j]); + } + } + return retval; + } + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + constexpr int elem_per_int_vec = size() / int_num_vecs(); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + Vectorized retval; + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + int32_t rounded = + nearbyint(static_cast(inp[i].vals[j]) * multiplier) + + zero_point; + retval.vals[i * elem_per_int_vec + j] = + std::min(std::max(rounded, min_val), max_val); + } + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 32> { + Vectorized() + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 32>() {} + Vectorized(c10::quint8 val) + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 32>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 32>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return Vectorized(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float /*inverse_scale*/) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 8], 8); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::quint8*)qvals.data(), + 8 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + constexpr int elem_per_int_vec = size() / int_num_vecs(); + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + retval[i].vals[j] = + static_cast(vals[i * elem_per_int_vec + j]) - + static_cast(b.vals[i * elem_per_int_vec + j]); + } + } + return retval; + } + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + constexpr int elem_per_int_vec = size() / int_num_vecs(); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + Vectorized retval; + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + int32_t rounded = + nearbyint(static_cast(inp[i].vals[j]) * multiplier) + + zero_point; + retval.vals[i * elem_per_int_vec + j] = + std::min(std::max(rounded, min_val), max_val); + } + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +#endif // if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512.h b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512.h new file mode 100644 index 0000000000000000000000000000000000000000..0c6f33fa08a06f021cae9326819a3789d2c71d3d --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512.h @@ -0,0 +1,195 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include +#include +#include +#include + +namespace at { +namespace vec { + +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) { + stream << val.val_; + return stream; +} +inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) { + stream << static_cast(val.val_); + return stream; +} +inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) { + stream << static_cast(val.val_); + return stream; +} + +template +std::ostream& operator<<(std::ostream& stream, const Vectorized& vec) { + T buf[Vectorized::size()]; + vec.store(buf); + stream << "vec["; + for (int i = 0; i != Vectorized::size(); i++) { + if (i != 0) { + stream << ", "; + } + stream << buf[i]; + } + stream << "]"; + return stream; +} + + +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX512) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template<> +inline Vectorized cast(const Vectorized& src) { + return _mm512_castpd_ps(src); +} + +template<> +inline Vectorized cast(const Vectorized& src) { + return _mm512_castps_pd(src); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template +std::enable_if_t> +inline gather(const double* base_addr, const Vectorized& vindex) { + return _mm512_i64gather_pd(vindex, base_addr, scale); +} + +template +std::enable_if_t> +inline gather(const float* base_addr, const Vectorized& vindex) { + return _mm512_i32gather_ps(vindex, base_addr, scale); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template +std::enable_if_t> +inline mask_gather(const Vectorized& src, const double* base_addr, + const Vectorized& vindex, const Vectorized& mask) { + auto all_ones = _mm512_castsi512_pd(_mm512_set1_epi64(0xFFFFFFFFFFFFFFFF)); + auto mask_ = _mm512_cmp_pd_mask(all_ones, mask.values, _CMP_EQ_OQ); + return _mm512_mask_i64gather_pd(src, mask_, vindex, base_addr, scale); +} + +template +std::enable_if_t> +inline mask_gather(const Vectorized& src, const float* base_addr, + const Vectorized& vindex, const Vectorized& mask) { + auto all_ones = _mm512_castsi512_ps(_mm512_set1_epi32(0xFFFFFFFF)); + auto mask_ = _mm512_cmp_ps_mask(all_ones, mask.values, _CMP_EQ_OQ); + return _mm512_mask_i32gather_ps(src, mask_, vindex, base_addr, scale); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template<> +Vectorized +inline convert_to_int_of_same_size(const Vectorized &src) { + return _mm512_cvtpd_epi64(src); +} + +template<> +Vectorized +inline convert_to_int_of_same_size(const Vectorized &src) { + return _mm512_cvttps_epi32(src); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template <> +std::pair, Vectorized> +inline interleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, a1, a3, a3, a4, a5, a6, a7} + // b = {b0, b1, b2, b3, b4, b5, b6, b7} + // group cols crossing lanes: + // return {a0, b0, a1, b1, a2, b2, a3, b3} + // {a4, b4, a5, b5, a6, b6, a7, b7} + __m512i idx1 = _mm512_set_epi64(11, 3, 10, 2, 9, 1, 8, 0); + __m512i idx2 = _mm512_set_epi64(15, 7, 14, 6, 13, 5, 12, 4); + return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b), + _mm512_mask_permutex2var_pd(a, 0xff, idx2, b)); +} + +template <> +std::pair, Vectorized> +inline interleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} + // b = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} + // + // return: + // {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7} + // {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15} + __m512i idx1 = _mm512_set_epi32(23, 7, 22, 6, 21, 5, 20, 4, + 19, 3, 18, 2, 17, 1, 16, 0); + __m512i idx2 = _mm512_set_epi32(31, 15, 30, 14, 29, 13, 28, 12, + 27, 11, 26, 10, 25, 9, 24, 8); + return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b), + _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b)); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template <> +std::pair, Vectorized> +inline deinterleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, b0, a1, b1, a2, b2, a3, b3} + // b = {a4, b4, a5, b5, a6, b6, a7, b7} + // output: + // return {a0, a1, a2, a3, a4, a5, a6, a7} + // {b0, b1, b2, b3, b4, b5, b6, b7} + // The members of indices have been written in binary format for better understandability + __m512i idx1 = _mm512_set_epi64(14, 12, 10, 8, 6, 4, 2, 0); + __m512i idx2 = _mm512_set_epi64(15, 13, 11, 9, 7, 5, 3, 1); + + return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b), + _mm512_mask_permutex2var_pd(a, 0xff, idx2, b)); +} + +template <> +std::pair, Vectorized> +inline deinterleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7} + // b = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15} + // output: + // return {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} + // {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} + __m512i idx1 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, + 14, 12, 10, 8, 6, 4, 2, 0); + __m512i idx2 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, + 15, 13, 11, 9, 7, 5, 3, 1); + + return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b), + _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b)); +} + +#endif // defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h new file mode 100644 index 0000000000000000000000000000000000000000..65fca91542153fbfca2c03feea92e17ad9eab09e --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h @@ -0,0 +1,925 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include + +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + +static inline void cvtbf16_fp32(const __m256i& a, __m512& o) { + o = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16)); +} + +static inline void cvtbf16_fp32(const __m512i& a, __m512& o1, __m512& o2) { + __m256i lo = _mm512_extracti32x8_epi32(a, 0); + __m256i hi = _mm512_extracti32x8_epi32(a, 1); + cvtbf16_fp32(lo, o1); + cvtbf16_fp32(hi, o2); +} + +static inline __m512i cvtfp32_bf16(const __m512& a, const __m512& b) { + __m512i lo = _mm512_castps_si512(a); + __m512i hi = _mm512_castps_si512(b); + __m512i nan = _mm512_set1_epi32(0xffff); + auto mask_lo = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q); + auto mask_hi = _mm512_cmp_ps_mask(b, b, _CMP_ORD_Q); + __m512i ones = _mm512_set1_epi32(0x1); + __m512i vec_bias = _mm512_set1_epi32(0x7fff); + // uint32_t lsb = (input >> 16) & 1; + auto t_lo = _mm512_and_si512(_mm512_srli_epi32(lo, 16), ones); + auto t_hi = _mm512_and_si512(_mm512_srli_epi32(hi, 16), ones); + // uint32_t rounding_bias = 0x7fff + lsb; + t_lo = _mm512_add_epi32(t_lo, vec_bias); + t_hi = _mm512_add_epi32(t_hi, vec_bias); + // input += rounding_bias; + t_lo = _mm512_add_epi32(t_lo, lo); + t_hi = _mm512_add_epi32(t_hi, hi); + // input = input >> 16; + t_lo = _mm512_srli_epi32(t_lo, 16); + t_hi = _mm512_srli_epi32(t_hi, 16); + // Check NaN before converting back to bf16 + t_lo = _mm512_mask_blend_epi32(mask_lo, nan, t_lo); + t_hi = _mm512_mask_blend_epi32(mask_hi, nan, t_hi); + + t_lo = _mm512_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4] + __m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0); + return _mm512_permutexvar_epi64(idx, t_lo); +} + +static inline __m512i merge_compare_result(const __m512& a, const __m512& b) { + __m512i lo = _mm512_castps_si512(a); + __m512i hi = _mm512_castps_si512(b); + lo = _mm512_srli_epi32(lo, 16); + hi = _mm512_srli_epi32(hi, 16); + auto out = _mm512_packus_epi32(lo, hi); + __m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0); + return _mm512_permutexvar_epi64(idx, out); +} + +template <> class Vectorized { +private: + __m512i values; +public: + using value_type = uint16_t; + using size_type = int; + static constexpr size_type size() { + return 32; + } + Vectorized() {} + Vectorized(__m512i v) : values(v) {} + Vectorized(BFloat16 val) { + value_type uw = val.x; + values = _mm512_set1_epi16(uw); + } + Vectorized(BFloat16 val1, BFloat16 val2, BFloat16 val3, BFloat16 val4, + BFloat16 val5, BFloat16 val6, BFloat16 val7, BFloat16 val8, + BFloat16 val9, BFloat16 val10, BFloat16 val11, BFloat16 val12, + BFloat16 val13, BFloat16 val14, BFloat16 val15, BFloat16 val16, + BFloat16 val17, BFloat16 val18, BFloat16 val19, BFloat16 val20, + BFloat16 val21, BFloat16 val22, BFloat16 val23, BFloat16 val24, + BFloat16 val25, BFloat16 val26, BFloat16 val27, BFloat16 val28, + BFloat16 val29, BFloat16 val30, BFloat16 val31, BFloat16 val32) { + values = _mm512_set_epi16( + val32.x, val31.x, val30.x, val29.x, val28.x, val27.x, val26.x, val25.x, + val24.x, val23.x, val22.x, val21.x, val20.x, val19.x, val18.x, val17.x, + val16.x, val15.x, val14.x, val13.x, val12.x, val11.x, val10.x, val9.x, + val8.x, val7.x, val6.x, val5.x, val4.x, val3.x, val2.x, val1.x); + } + operator __m512i() const { + return values; + } + BFloat16& operator[](int idx) = delete; + const BFloat16& operator[](int idx) const = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + return _mm512_cmpeq_epi16_mask(values, _mm512_set1_epi16(0)); + } + static Vectorized loadu(const void* ptr) { + return _mm512_loadu_si512(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int16_t count) { + __at_align__ int16_t tmp_values[size()]; + std::memcpy(tmp_values, ptr, count * sizeof(int16_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values); + } else if (count > 0) { + __at_align__ int16_t tmp_values[size()]; + _mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int16_t)); + } + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + __at_align__ int16_t tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = b.values[31]; + if (mask & 0x02) + tmp_values[1] = b.values[30]; + if (mask & 0x04) + tmp_values[2] = b.values[29]; + if (mask & 0x08) + tmp_values[3] = b.values[28]; + if (mask & 0x10) + tmp_values[4] = b.values[27]; + if (mask & 0x20) + tmp_values[5] = b.values[26]; + if (mask & 0x40) + tmp_values[6] = b.values[25]; + if (mask & 0x80) + tmp_values[7] = b.values[24]; + if (mask & 0x100) + tmp_values[8] = b.values[23]; + if (mask & 0x200) + tmp_values[9] = b.values[22]; + if (mask & 0x400) + tmp_values[10] = b.values[21]; + if (mask & 0x800) + tmp_values[11] = b.values[20]; + if (mask & 0x1000) + tmp_values[12] = b.values[19]; + if (mask & 0x2000) + tmp_values[13] = b.values[18]; + if (mask & 0x4000) + tmp_values[14] = b.values[17]; + if (mask & 0x8000) + tmp_values[15] = b.values[16]; + if (mask & 0x10000) + tmp_values[16] = b.values[15]; + if (mask & 0x20000) + tmp_values[17] = b.values[14]; + if (mask & 0x40000) + tmp_values[18] = b.values[13]; + if (mask & 0x80000) + tmp_values[19] = b.values[12]; + if (mask & 0x100000) + tmp_values[20] = b.values[11]; + if (mask & 0x200000) + tmp_values[21] = b.values[10]; + if (mask & 0x400000) + tmp_values[22] = b.values[9]; + if (mask & 0x800000) + tmp_values[23] = b.values[8]; + if (mask & 0x1000000) + tmp_values[24] = b.values[7]; + if (mask & 0x2000000) + tmp_values[25] = b.values[6]; + if (mask & 0x4000000) + tmp_values[26] = b.values[5]; + if (mask & 0x8000000) + tmp_values[27] = b.values[4]; + if (mask & 0x10000000) + tmp_values[28] = b.values[3]; + if (mask & 0x20000000) + tmp_values[29] = b.values[2]; + if (mask & 0x40000000) + tmp_values[30] = b.values[1]; + if (mask & 0x80000000) + tmp_values[31] = b.values[0]; + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, + const Vectorized& b, const Vectorized& mask) { + auto all_ones = _mm512_set1_epi16(0xFFFF); + auto mask_ = _mm512_cmp_epi16_mask(mask, all_ones, _MM_CMPINT_EQ); + return _mm512_mask_blend_epi16(mask_, a.values, b.values); + } + template + static Vectorized arange(BFloat16 base = 0.f, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step, + base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step, + base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step, + base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step, + base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step); + } + static Vectorized set(const Vectorized& a, + const Vectorized& b, int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + case 8: + return blend<255>(a, b); + case 9: + return blend<511>(a, b); + case 10: + return blend<1023>(a, b); + case 11: + return blend<2047>(a, b); + case 12: + return blend<4095>(a, b); + case 13: + return blend<8191>(a, b); + case 14: + return blend<16383>(a, b); + case 15: + return blend<32767>(a, b); + case 16: + return blend<65535>(a, b); + case 17: + return blend<131071>(a, b); + case 18: + return blend<262143>(a, b); + case 19: + return blend<524287>(a, b); + case 20: + return blend<1048575>(a, b); + case 21: + return blend<2097151>(a, b); + case 22: + return blend<4194303>(a, b); + case 23: + return blend<8388607>(a, b); + case 24: + return blend<16777215>(a, b); + case 25: + return blend<33554431>(a, b); + case 26: + return blend<67108863>(a, b); + case 27: + return blend<134217727>(a, b); + case 28: + return blend<268435455>(a, b); + case 29: + return blend<536870911>(a, b); + case 30: + return blend<1073741823>(a, b); + case 31: + return blend<2147483647>(a, b); + } + return b; + } + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wignored-qualifiers" + Vectorized map(const __m512 (*const vop)(__m512)) const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + const auto o1 = vop(lo); + const auto o2 = vop(hi); + return cvtfp32_bf16(o1, o2); + } + #pragma clang diagnostic pop + Vectorized abs() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + const auto mask = _mm512_set1_ps(-0.f); + const auto o1 = _mm512_andnot_ps(mask, lo); + const auto o2 = _mm512_andnot_ps(mask, hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized angle() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto angle_lambda = [](__m512 values) { + const auto zero_vec = _mm512_set1_ps(0.f); + const auto nan_vec = _mm512_set1_ps(NAN); + const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ); + const auto non_nan_mask_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec), + not_nan_mask, 0xFFFFFFFF); + const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(non_nan_mask_vec), + zero_vec, _CMP_EQ_OQ); + const auto pi = _mm512_set1_ps(c10::pi); + + const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi); + angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec); + return angle; + }; + auto o1 = angle_lambda(lo); + auto o2 = angle_lambda(hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm512_set1_epi16(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return map(Sleef_acosf16_u10); + } + Vectorized asin() const { + return map(Sleef_asinf16_u10); + } + Vectorized atan() const { + return map(Sleef_atanf16_u10); + } + Vectorized atan2(const Vectorized &b) const { + __m512 lo, hi; + __m512 b1, b2; + cvtbf16_fp32(values, lo, hi); + cvtbf16_fp32(b.values, b1, b2); + auto o1 = Sleef_atan2f16_u10(lo, b1); + auto o2 = Sleef_atan2f16_u10(hi, b2); + return cvtfp32_bf16(o1, o2); + } + Vectorized copysign(const Vectorized &sign) const { + // copy sign bit (0x8000) from sign and remaining bits from values + __m512i mask_value = _mm512_set1_epi32(~0x80008000); + __m512i mask_signbit = _mm512_set1_epi32(0x80008000); + return Vectorized( + _mm512_or_si512( + _mm512_and_si512(values, mask_value), + _mm512_and_si512(sign, mask_signbit))); + } + Vectorized erf() const { + return map(Sleef_erff16_u10); + } + Vectorized erfc() const { + return map(Sleef_erfcf16_u15); + } + Vectorized erfinv() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm512_storeu_ps(reinterpret_cast(tmp1), lo); + _mm512_storeu_ps(reinterpret_cast(tmp2), hi); + for (int64_t i = 0; i < size() / 2; i++) { + tmp1[i] = calc_erfinv(tmp1[i]); + tmp2[i] = calc_erfinv(tmp2[i]); + } + auto o1 = _mm512_loadu_ps(tmp1); + auto o2 = _mm512_loadu_ps(tmp2); + return cvtfp32_bf16(o1, o2); + } + Vectorized exp() const { + return map(Sleef_expf16_u10); + } + Vectorized expm1() const { + return map(Sleef_expm1f16_u10); + } + Vectorized fmod(const Vectorized & q) const { + __m512 x_lo, x_hi; + cvtbf16_fp32(values, x_lo, x_hi); + __m512 q_lo, q_hi; + cvtbf16_fp32(q.values, q_lo, q_hi); + auto o1 = Sleef_fmodf16(x_lo, q_lo); + auto o2 = Sleef_fmodf16(x_hi, q_hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized hypot(const Vectorized &b) const { + __m512 lo, hi; + __m512 b1, b2; + cvtbf16_fp32(values, lo, hi); + cvtbf16_fp32(b.values, b1, b2); + auto o1 = Sleef_hypotf16_u05(lo, b1); + auto o2 = Sleef_hypotf16_u05(hi, b2); + return cvtfp32_bf16(o1, o2); + } + Vectorized i0() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm512_storeu_ps(reinterpret_cast(tmp1), lo); + _mm512_storeu_ps(reinterpret_cast(tmp2), hi); + for (int64_t i = 0; i < size() / 2; i++) { + tmp1[i] = calc_i0(tmp1[i]); + tmp2[i] = calc_i0(tmp2[i]); + } + auto o1 = _mm512_loadu_ps(tmp1); + auto o2 = _mm512_loadu_ps(tmp2); + return cvtfp32_bf16(o1, o2); + } + Vectorized i0e() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + constexpr auto sz = size(); + __at_align__ float tmp1[sz / 2], tmp2[sz / 2]; + _mm512_storeu_ps(reinterpret_cast(tmp1), lo); + _mm512_storeu_ps(reinterpret_cast(tmp2), hi); + + for (auto i = decltype(sz){0}; i < sz / 2; i++) { + tmp1[i] = calc_i0e(tmp1[i]); + tmp2[i] = calc_i0e(tmp2[i]); + } + const auto o1 = _mm512_loadu_ps(tmp1); + const auto o2 = _mm512_loadu_ps(tmp2); + return cvtfp32_bf16(o1, o2); + } + Vectorized igamma(const Vectorized &x) const { + __m512 lo, hi; + __m512 xlo, xhi; + cvtbf16_fp32(values, lo, hi); + cvtbf16_fp32(x.values, xlo, xhi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm512_storeu_ps(reinterpret_cast(tmp1), lo); + _mm512_storeu_ps(reinterpret_cast(tmp2), hi); + __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2]; + _mm512_storeu_ps(reinterpret_cast(tmpx1), xlo); + _mm512_storeu_ps(reinterpret_cast(tmpx2), xhi); + for (int64_t i = 0; i < size() / 2; ++i) { + tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]); + tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]); + } + auto o1 = _mm512_loadu_ps(tmp1); + auto o2 = _mm512_loadu_ps(tmp2); + return cvtfp32_bf16(o1, o2); + } + + Vectorized igammac(const Vectorized &x) const { + __m512 lo, hi; + __m512 xlo, xhi; + cvtbf16_fp32(values, lo, hi); + cvtbf16_fp32(x.values, xlo, xhi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm512_storeu_ps(reinterpret_cast(tmp1), lo); + _mm512_storeu_ps(reinterpret_cast(tmp2), hi); + __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2]; + _mm512_storeu_ps(reinterpret_cast(tmpx1), xlo); + _mm512_storeu_ps(reinterpret_cast(tmpx2), xhi); + for (int64_t i = 0; i < size() / 2; ++i) { + tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]); + tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]); + } + auto o1 = _mm512_loadu_ps(tmp1); + auto o2 = _mm512_loadu_ps(tmp2); + return cvtfp32_bf16(o1, o2); + } + Vectorized log() const { + return map(Sleef_logf16_u10); + } + Vectorized log2() const { + return map(Sleef_log2f16_u10); + } + Vectorized log10() const { + return map(Sleef_log10f16_u10); + } + Vectorized log1p() const { + return map(Sleef_log1pf16_u10); + } + Vectorized frac() const; + Vectorized sin() const { + return map(Sleef_sinf16_u10); + } + Vectorized sinh() const { + return map(Sleef_sinhf16_u10); + } + Vectorized cos() const { + return map(Sleef_cosf16_u10); + } + Vectorized cosh() const { + return map(Sleef_coshf16_u10); + } + Vectorized ceil() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto o1 = _mm512_ceil_ps(lo); + auto o2 = _mm512_ceil_ps(hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized floor() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto o1 = _mm512_floor_ps(lo); + auto o2 = _mm512_floor_ps(hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized neg() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto mask = _mm512_set1_ps(-0.f); + auto o1 = _mm512_xor_ps(mask, lo); + auto o2 = _mm512_xor_ps(mask, hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized round() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + return cvtfp32_bf16(o1, o2); + } + Vectorized tan() const { + return map(Sleef_tanf16_u10); + } + Vectorized tanh() const { + return map(Sleef_tanhf16_u10); + } + Vectorized trunc() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + return cvtfp32_bf16(o1, o2); + } + Vectorized lgamma() const { + return map(Sleef_lgammaf16_u10); + } + Vectorized sqrt() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto o1 = _mm512_sqrt_ps(lo); + auto o2 = _mm512_sqrt_ps(hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized reciprocal() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto ones = _mm512_set1_ps(1); + auto o1 = _mm512_div_ps(ones, lo); + auto o2 = _mm512_div_ps(ones, hi); + return cvtfp32_bf16(o1, o2); + } + Vectorized rsqrt() const { + __m512 lo, hi; + cvtbf16_fp32(values, lo, hi); + auto ones = _mm512_set1_ps(1); + auto o1 = _mm512_div_ps(ones, _mm512_sqrt_ps(lo)); + auto o2 = _mm512_div_ps(ones, _mm512_sqrt_ps(hi)); + return cvtfp32_bf16(o1, o2); + } + Vectorized pow(const Vectorized &b) const { + __m512 lo, hi; + __m512 b1, b2; + cvtbf16_fp32(values, lo, hi); + cvtbf16_fp32(b.values, b1, b2); + auto o1 = Sleef_powf16_u10(lo, b1); + auto o2 = Sleef_powf16_u10(hi, b2); + return cvtfp32_bf16(o1, o2); + } + + Vectorized inline operator>(const Vectorized& other) const; + Vectorized inline operator<(const Vectorized& other) const; + Vectorized inline operator>=(const Vectorized& other) const; + Vectorized inline operator<=(const Vectorized& other) const; + Vectorized inline operator==(const Vectorized& other) const; + Vectorized inline operator!=(const Vectorized& other) const; + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template +Vectorized static inline bfloat16_binary_op_as_fp32(const Vectorized& a, + const Vectorized& b, Op op) { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(b), b_lo, b_hi); + auto o1 = op(a_lo, b_lo); + auto o2 = op(a_hi, b_hi); + return cvtfp32_bf16(o1, o2); +} + +template +Vectorized static inline bfloat16_compare_as_fp32(const Vectorized& a, + const Vectorized& b, Op op) { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(b), b_lo, b_hi); + auto o1 = op(a_lo, b_lo); + auto o2 = op(a_hi, b_hi); + return merge_compare_result(o1, o2); +} + +Vectorized inline Vectorized::operator>(const Vectorized& other) const { + return bfloat16_compare_as_fp32(*this, other, [](__m512 x, __m512 y) { + auto zero_vec = _mm512_set1_epi32(0); + auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GT_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF)); + }); +} +Vectorized inline Vectorized::operator<(const Vectorized& other) const { + return bfloat16_compare_as_fp32(*this, other, [](__m512 x, __m512 y) { + auto zero_vec = _mm512_set1_epi32(0); + auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LT_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF)); + }); +} +Vectorized inline Vectorized::operator>=(const Vectorized& other) const { + return bfloat16_compare_as_fp32(*this, other, [](__m512 x, __m512 y) { + auto zero_vec = _mm512_set1_epi32(0); + auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GE_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF)); + }); +} +Vectorized inline Vectorized::operator<=(const Vectorized& other) const { + return bfloat16_compare_as_fp32(*this, other, [](__m512 x, __m512 y) { + auto zero_vec = _mm512_set1_epi32(0); + auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LE_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF)); + }); +} +Vectorized inline Vectorized::operator==(const Vectorized& other) const { + return bfloat16_compare_as_fp32(*this, other, [](__m512 x, __m512 y) { + auto zero_vec = _mm512_set1_epi32(0); + auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_EQ_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF)); + }); +} +Vectorized inline Vectorized::operator!=(const Vectorized& other) const { + return bfloat16_compare_as_fp32(*this, other, [](__m512 x, __m512 y) { + auto zero_vec = _mm512_set1_epi32(0); + auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_NEQ_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF)); + }); +} + +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return bfloat16_binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); }); +} +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return bfloat16_binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); }); +} +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return bfloat16_binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); }); +} +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return bfloat16_binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); }); +} + +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm512_and_si512(a, b); +} +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm512_or_si512(a, b); +} +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm512_xor_si512(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(b), b_lo, b_hi); + auto max_lo = _mm512_max_ps(a_lo, b_lo); + auto max_hi = _mm512_max_ps(a_hi, b_hi); + auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q); + auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask)); + auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask)); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm512_or_ps(max_lo, nan_lo); + auto o2 = _mm512_or_ps(max_hi, nan_hi); + return cvtfp32_bf16(o1, o2); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + __m512i zero_vec = _mm512_set1_epi32(0); + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(b), b_lo, b_hi); + auto min_lo = _mm512_min_ps(a_lo, b_lo); + auto min_hi = _mm512_min_ps(a_hi, b_hi); + auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q); + auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask, + 0xFFFFFFFF)); + auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask, + 0xFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm512_or_ps(min_lo, nan_lo); + auto o2 = _mm512_or_ps(min_hi, nan_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp(const Vectorized& a, + const Vectorized& min, const Vectorized& max) { + __m512 a_lo, a_hi; + __m512 min_lo, min_hi; + __m512 max_lo, max_hi; + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(min), min_lo, min_hi); + cvtbf16_fp32(__m512i(max), max_lo, max_hi); + auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo)); + auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi)); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + __m512 a_lo, a_hi; + __m512 max_lo, max_hi; + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(max), max_lo, max_hi); + auto o1 = _mm512_min_ps(max_lo, a_lo); + auto o2 = _mm512_min_ps(max_hi, a_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + __m512 a_lo, a_hi; + __m512 min_lo, min_hi; + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(min), min_lo, min_hi); + auto o1 = _mm512_max_ps(min_lo, a_lo); + auto o2 = _mm512_max_ps(min_hi, a_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i))); + _mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +inline void convert(const float* src, BFloat16* dst, int64_t n) { + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m512 a = _mm512_loadu_ps(&src[i]); + __m512 b = _mm512_loadu_ps(&src[i + 16]); + + __m512i bf = cvtfp32_bf16(a, b); + _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +inline void convert(const double* src, BFloat16* dst, int64_t n) { + auto load_float = [](const double *src) -> __m512 { + // Load one float vector from an array of doubles + __m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src)); + __m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8)); + return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1); + }; + + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m512 a = load_float(&src[i]); + __m512 b = load_float(&src[i + 16]); + + __m512i bf = cvtfp32_bf16(a, b); + _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, + const Vectorized& b, const Vectorized& c) { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + __m512 c_lo, c_hi; + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(b), b_lo, b_hi); + cvtbf16_fp32(__m512i(c), c_lo, c_hi); + auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo); + auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi); + return cvtfp32_bf16(o1, o2); +} + +inline std::tuple, Vectorized> convert_bfloat16_float(const Vectorized& a) { + __m512 o1, o2; + cvtbf16_fp32(__m512i(a), o1, o2); + return std::make_tuple(o1, o2); +} + +inline Vectorized convert_float_bfloat16(const Vectorized& a, const Vectorized& b) { + return cvtfp32_bf16(__m512(a), __m512(b)); +} + +#else //defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + +inline std::tuple, Vectorized> convert_bfloat16_float(const Vectorized& a) { + constexpr int64_t K = Vectorized::size(); + __at_align__ float arr[K]; + __at_align__ BFloat16 arr2[K]; + a.store(arr2); + for (const auto k : c10::irange(K)) { + arr[k] = c10::convert(arr2[k]); + } + return std::make_tuple( + Vectorized::loadu(arr), + Vectorized::loadu(arr + Vectorized::size())); +} + +inline Vectorized convert_float_bfloat16(const Vectorized& a, const Vectorized& b) { + constexpr int64_t K = Vectorized::size(); + __at_align__ float arr[K]; + __at_align__ BFloat16 arr2[K]; + a.store(arr); + b.store(arr + Vectorized::size()); + for (const auto k : c10::irange(K)) { + arr2[k] = c10::convert(arr[k]); + } + return Vectorized::loadu(arr2); +} + +#endif // defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) +inline void load_fp32_from_bf16(const c10::BFloat16 *data, Vectorized& out) { + auto values = _mm256_loadu_si256(reinterpret_cast(data)); + __m512 out_values; + cvtbf16_fp32(values, out_values); + out = out_values; +} + +inline void load_fp32_from_bf16(const c10::BFloat16 *data, Vectorized& out1, Vectorized& out2) { + auto vec = Vectorized::loadu(data); + __m512 out1_values, out2_values; + cvtbf16_fp32(vec, out1_values, out2_values); + out1 = out1_values; + out2 = out2_values; +} +#else // defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) +inline void load_fp32_from_bf16(const c10::BFloat16 *data, Vectorized& out) { + __at_align__ float values[Vectorized::size()]; + for (const auto k : c10::irange(Vectorized::size())) { + values[k] = data[k]; + } + out = Vectorized::loadu(values); +} + +inline void load_fp32_from_bf16(const c10::BFloat16 *data, Vectorized& out1, Vectorized& out2) { + load_fp32_from_bf16(data, out1); + data += Vectorized::size(); + load_fp32_from_bf16(data, out2); +} + +#endif + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h new file mode 100644 index 0000000000000000000000000000000000000000..9d862534a9d67980960a7aa7a32b1338d801dab2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h @@ -0,0 +1,527 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + +template <> class Vectorized> { +private: + __m512d values; + static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0}; +public: + using value_type = c10::complex; + using size_type = int; + static constexpr size_type size() { + return 4; + } + Vectorized() {} + Vectorized(__m512d v) : values(v) {} + Vectorized(c10::complex val) { + double real_value = val.real(); + double imag_value = val.imag(); + values = _mm512_setr_pd(real_value, imag_value, real_value, imag_value, + real_value, imag_value, real_value, imag_value); + } + Vectorized(c10::complex val1, c10::complex val2, + c10::complex val3, c10::complex val4) { + values = _mm512_setr_pd(val1.real(), val1.imag(), + val2.real(), val2.imag(), + val3.real(), val3.imag(), + val4.real(), val4.imag()); + } + operator __m512d() const { + return values; + } + template + static Vectorized> blend(const Vectorized>& a, + const Vectorized>& b) { + // convert c10::complex index mask to V index mask: xy -> xxyy + // NOLINTNEXTLINE(clang-diagnostic-warning) + switch (mask) { + case 0: + return a; + case 1: + return _mm512_mask_blend_pd(0x03, a.values, b.values); //b0000 0001 = b0000 0011 + case 2: + return _mm512_mask_blend_pd(0x0C, a.values, b.values); //b0000 0010 = b0000 1100 + case 3: + return _mm512_mask_blend_pd(0x0F, a.values, b.values); //b0000 0011 = b0000 1111 + case 4: + return _mm512_mask_blend_pd(0x30, a.values, b.values); //b0000 0100 = b0011 0000 + case 5: + return _mm512_mask_blend_pd(0x33, a.values, b.values); //b0000 0101 = b0011 0011 + case 6: + return _mm512_mask_blend_pd(0x3C, a.values, b.values); //b0000 0110 = b0011 1100 + case 7: + return _mm512_mask_blend_pd(0x3F, a.values, b.values); //b0000 0111 = b0011 1111 + case 8: + return _mm512_mask_blend_pd(0xC0, a.values, b.values); //b0000 1000 = b1100 0000 + case 9: + return _mm512_mask_blend_pd(0xC3, a.values, b.values); //b0000 1001 = b1100 0011 + case 10: + return _mm512_mask_blend_pd(0xCC, a.values, b.values); //b0000 1010 = b1100 1100 + case 11: + return _mm512_mask_blend_pd(0xCF, a.values, b.values); //b0000 1011 = b1100 1111 + case 12: + return _mm512_mask_blend_pd(0xF0, a.values, b.values); //b0000 1100 = b1111 0000 + case 13: + return _mm512_mask_blend_pd(0xF3, a.values, b.values); //b0000 1101 = b1111 0011 + case 14: + return _mm512_mask_blend_pd(0xFC, a.values, b.values); //b0000 1110 = b1111 1100 + case 15: + return _mm512_mask_blend_pd(0xFF, a.values, b.values); //b0000 1111 = b1111 1111 + } + return b; + } + static Vectorized> blendv(const Vectorized>& a, + const Vectorized>& b, + const Vectorized>& mask) { + // convert c10::complex index mask to V index mask: xy -> xxyy + auto mask_ = _mm512_unpacklo_pd(mask.values, mask.values); + auto all_ones = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF); + auto mmask = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask_), all_ones, _MM_CMPINT_EQ); + return _mm512_mask_blend_pd(mmask, a.values, b.values); + } + template + static Vectorized> arange(c10::complex base = 0., + step_t step = static_cast(1)) { + return Vectorized>(base, + base + c10::complex(1)*step, + base + c10::complex(2)*step, + base + c10::complex(3)*step); + } + static Vectorized> set(const Vectorized>& a, + const Vectorized>& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + return b; + } + static Vectorized> loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm512_loadu_pd(reinterpret_cast(ptr)); + + __at_align__ double tmp_values[2*size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(2*size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(c10::complex)); + return _mm512_load_pd(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm512_storeu_pd(reinterpret_cast(ptr), values); + } else if (count > 0) { + double tmp_values[2*size()]; + _mm512_storeu_pd(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(c10::complex)); + } + } + const c10::complex& operator[](int idx) const = delete; + c10::complex& operator[](int idx) = delete; + Vectorized> map(c10::complex (*const f)(const c10::complex &)) const { + __at_align__ c10::complex tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + // AVX512 doesn't have horizontal add & horizontal sub instructions. + // TODO: hadd_pd() & hsub_pd() may have scope for improvement. + static inline __m512d hadd_pd(__m512d a, __m512d b) { + __m512i idx1 = _mm512_set_epi64(14, 6, 12, 4, 10, 2, 8, 0); + __m512i idx2 = _mm512_set_epi64(15, 7, 13, 5, 11, 3, 9, 1); + return _mm512_add_pd(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b), + _mm512_mask_permutex2var_pd(a, 0xff, idx2, b)); + } + static inline __m512d hsub_pd(__m512d a, __m512d b) { + __m512i idx1 = _mm512_set_epi64(14, 6, 12, 4, 10, 2, 8, 0); + __m512i idx2 = _mm512_set_epi64(15, 7, 13, 5, 11, 3, 9, 1); + return _mm512_sub_pd(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b), + _mm512_mask_permutex2var_pd(a, 0xff, idx2, b)); + } + __m512d abs_2_() const { + auto val_2 = _mm512_mul_pd(values, values); // a*a b*b + return hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b + } + __m512d abs_() const { + return _mm512_sqrt_pd(abs_2_()); // abs abs + } + Vectorized> abs() const { + const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + return _mm512_and_pd(abs_(), real_mask); // abs 0 + } + __m512d angle_() const { + //angle = atan2(b/a) + auto b_a = _mm512_permute_pd(values, 0x55); // b a + return Sleef_atan2d8_u10(values, b_a); // 90-angle angle + } + Vectorized> angle() const { + const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + auto angle = _mm512_permute_pd(angle_(), 0x55); // angle 90-angle + return _mm512_and_pd(angle, real_mask); // angle 0 + } + Vectorized> sgn() const { + auto abs = abs_(); + auto zero = _mm512_setzero_pd(); + auto mask = _mm512_cmp_pd_mask(abs, zero, _CMP_EQ_OQ); + auto mask_vec = _mm512_mask_set1_epi64(_mm512_castpd_si512(zero), mask, + 0xFFFFFFFFFFFFFFFF); + auto abs_val = Vectorized(abs); + + auto div = values / abs_val.values; // x / abs(x) + + return blendv(div, zero, _mm512_castsi512_pd(mask_vec)); + } + __m512d real_() const { + const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + return _mm512_and_pd(values, real_mask); + } + Vectorized> real() const { + return real_(); + } + __m512d imag_() const { + const __m512d imag_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0x0000000000000000, 0xFFFFFFFFFFFFFFFF, + 0x0000000000000000, 0xFFFFFFFFFFFFFFFF, + 0x0000000000000000, 0xFFFFFFFFFFFFFFFF, + 0x0000000000000000, 0xFFFFFFFFFFFFFFFF)); + return _mm512_and_pd(values, imag_mask); + } + Vectorized> imag() const { + return _mm512_permute_pd(imag_(), 0x55); //b a + } + __m512d conj_() const { + const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + return _mm512_xor_pd(values, sign_mask); // a -b + } + Vectorized> conj() const { + return conj_(); + } + Vectorized> log() const { + // Most trigonomic ops use the log() op to improve complex number performance. + return map(std::log); + } + Vectorized> log2() const { + const __m512d log2_ = _mm512_set1_pd(std::log(2)); + return _mm512_div_pd(log(), log2_); + } + Vectorized> log10() const { + const __m512d log10_ = _mm512_set1_pd(std::log(10)); + return _mm512_div_pd(log(), log10_); + } + Vectorized> log1p() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + const __m512d one = _mm512_set1_pd(1); + + auto conj = conj_(); + auto b_a = _mm512_permute_pd(conj, 0x55); //-b a + auto ab = _mm512_mul_pd(conj, b_a); //-ab -ab + auto im = _mm512_add_pd(ab, ab); //-2ab -2ab + + auto val_2 = _mm512_mul_pd(values, values); // a*a b*b + auto re = hsub_pd(val_2, _mm512_permute_pd(val_2, 0x55)); // a*a-b*b b*b-a*a + re = _mm512_sub_pd(one, re); + + auto root = Vectorized(_mm512_mask_blend_pd(0xAA, re, im)).sqrt(); //sqrt(re + i*im) + auto ln = Vectorized(_mm512_add_pd(b_a, root)).log(); //ln(iz + sqrt()) + return Vectorized(_mm512_permute_pd(ln.values, 0x55)).conj(); //-i*ln() + } + Vectorized> acos() const { + // acos(x) = pi/2 - asin(x) + constexpr auto pi_2d = c10::pi / 2; + const __m512d pi_2 = _mm512_setr_pd(pi_2d, 0.0, pi_2d, 0.0, pi_2d, 0.0, pi_2d, 0.0); + return _mm512_sub_pd(pi_2, asin()); + } + Vectorized> atan() const; + Vectorized> atan2(const Vectorized> &b) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> erf() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> erfc() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> exp() const { + //exp(a + bi) + // = exp(a)*(cos(b) + sin(b)i) + auto exp = Sleef_expd8_u10(values); //exp(a) exp(b) + exp = _mm512_mask_blend_pd(0xAA, exp, _mm512_permute_pd(exp, 0x55)); //exp(a) exp(a) + + auto sin_cos = Sleef_sincosd8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)] + auto cos_sin = _mm512_mask_blend_pd(0xAA, _mm512_permute_pd(sin_cos.y, 0x55), + sin_cos.x); //cos(b) sin(b) + return _mm512_mul_pd(exp, cos_sin); + } + Vectorized> expm1() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> sin() const { + return map(std::sin); + } + Vectorized> sinh() const { + return map(std::sinh); + } + Vectorized> cos() const { + return map(std::cos); + } + Vectorized> cosh() const { + return map(std::cosh); + } + Vectorized> ceil() const { + return _mm512_ceil_pd(values); + } + Vectorized> floor() const { + return _mm512_floor_pd(values); + } + Vectorized> hypot(const Vectorized> &b) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> igamma(const Vectorized> &x) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> igammac(const Vectorized> &x) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> neg() const { + auto zero = _mm512_setzero_pd(); + return _mm512_sub_pd(zero, values); + } + Vectorized> nextafter(const Vectorized> &b) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> round() const { + return _mm512_roundscale_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized> tan() const { + return map(std::tan); + } + Vectorized> tanh() const { + return map(std::tanh); + } + Vectorized> trunc() const { + return _mm512_roundscale_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized> sqrt() const { + return map(std::sqrt); + } + Vectorized> reciprocal() const; + Vectorized> rsqrt() const { + return sqrt().reciprocal(); + } + Vectorized> pow(const Vectorized> &exp) const { + __at_align__ c10::complex x_tmp[size()]; + __at_align__ c10::complex y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized> operator==(const Vectorized>& other) const { + auto mask = _mm512_cmp_pd_mask(values, other.values, _CMP_EQ_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask, + 0xFFFFFFFFFFFFFFFF)); + } + Vectorized> operator!=(const Vectorized>& other) const { + auto mask = _mm512_cmp_pd_mask(values, other.values, _CMP_NEQ_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask, + 0xFFFFFFFFFFFFFFFF)); + } + Vectorized> operator<(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator<=(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>=(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized> eq(const Vectorized>& other) const; + Vectorized> ne(const Vectorized>& other) const; + Vectorized> lt(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> le(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> gt(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> ge(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } +}; + +template <> Vectorized> inline operator+(const Vectorized> &a, + const Vectorized> &b) { + return _mm512_add_pd(a, b); +} + +template <> Vectorized> inline operator-(const Vectorized> &a, + const Vectorized> &b) { + return _mm512_sub_pd(a, b); +} + +template <> Vectorized> inline operator*(const Vectorized> &a, + const Vectorized> &b) { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i + const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto ac_bd = _mm512_mul_pd(a, b); //ac bd + + auto d_c = _mm512_permute_pd(b, 0x55); //d c + d_c = _mm512_xor_pd(sign_mask, d_c); //d -c + auto ad_bc = _mm512_mul_pd(a, d_c); //ad -bc + + auto ret = Vectorized>::hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc + return ret; +} + +template <> Vectorized> inline operator/(const Vectorized> &a, + const Vectorized> &b) { + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() + //im = (bc - ad)/abs_2() + const __m512d sign_mask = _mm512_setr_pd(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0); + auto ac_bd = _mm512_mul_pd(a, b); //ac bd + + auto d_c = _mm512_permute_pd(b, 0x55); //d c + d_c = _mm512_xor_pd(sign_mask, d_c); //-d c + auto ad_bc = _mm512_mul_pd(a, d_c); //-ad bc + + auto re_im = Vectorized>::hadd_pd(ac_bd, ad_bc);//ac + bd bc - ad + return _mm512_div_pd(re_im, b.abs_2_()); +} + +// reciprocal. Implement this here so we can use multiplication. +inline Vectorized> Vectorized>::reciprocal() const{ + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() = c/abs_2() + //im = (bc - ad)/abs_2() = d/abs_2() + const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto c_d = _mm512_xor_pd(sign_mask, values); //c -d + return _mm512_div_pd(c_d, abs_2_()); +} + +inline Vectorized> Vectorized>::atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + const __m512d i = _mm512_setr_pd(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + const Vectorized i_half = _mm512_setr_pd(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + + auto sum = Vectorized(_mm512_add_pd(i, values)); // a 1+b + auto sub = Vectorized(_mm512_sub_pd(i, values)); // -a 1-b + auto ln = (sum/sub).log(); // ln((i + z)/(i - z)) + return i_half*ln; // i/2*ln() +} + +template <> +Vectorized> inline maximum(const Vectorized>& a, + const Vectorized>& b) { + auto zero_vec = _mm512_set1_epi64(0); + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_LT_OQ); + auto max = _mm512_mask_blend_pd(mask, a, b); + // Exploit the fact that all-ones is a NaN. + auto isnan_mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_UNORD_Q); + auto isnan = _mm512_mask_set1_epi64(zero_vec, isnan_mask, + 0xFFFFFFFFFFFFFFFF); + return _mm512_or_pd(max, _mm512_castsi512_pd(isnan)); +} + +template <> +Vectorized> inline minimum(const Vectorized>& a, + const Vectorized>& b) { + auto zero_vec = _mm512_set1_epi64(0); + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_GT_OQ); + auto min = _mm512_mask_blend_pd(mask, a, b); + // Exploit the fact that all-ones is a NaN. + auto isnan_mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_UNORD_Q); + auto isnan = _mm512_mask_set1_epi64(zero_vec, isnan_mask, + 0xFFFFFFFFFFFFFFFF); + return _mm512_or_pd(min, _mm512_castsi512_pd(isnan)); +} + +template <> +Vectorized> inline operator&(const Vectorized>& a, + const Vectorized>& b) { + return _mm512_and_pd(a, b); +} + +template <> +Vectorized> inline operator|(const Vectorized>& a, + const Vectorized>& b) { + return _mm512_or_pd(a, b); +} + +template <> +Vectorized> inline operator^(const Vectorized>& a, + const Vectorized>& b) { + return _mm512_xor_pd(a, b); +} + +inline Vectorized> Vectorized>::eq(const Vectorized>& other) const { + return (*this == other) & Vectorized>(_mm512_set1_pd(1.0)); +} + +inline Vectorized> Vectorized>::ne(const Vectorized>& other) const { + return (*this != other) & Vectorized>(_mm512_set1_pd(1.0)); +} + +#endif + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_complex_float.h b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_complex_float.h new file mode 100644 index 0000000000000000000000000000000000000000..966f42a2534840362d465e8facb8f64cc90a61e7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_complex_float.h @@ -0,0 +1,1032 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + +template <> class Vectorized> { +private: + __m512 values; + static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0}; +public: + using value_type = c10::complex; + using size_type = int; + static constexpr size_type size() { + return 8; + } + Vectorized() {} + Vectorized(__m512 v) : values(v) {} + Vectorized(c10::complex val) { + float real_value = val.real(); + float imag_value = val.imag(); + values = _mm512_setr_ps(real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value); + } + Vectorized(c10::complex val1, c10::complex val2, + c10::complex val3, c10::complex val4, + c10::complex val5, c10::complex val6, + c10::complex val7, c10::complex val8) { + values = _mm512_setr_ps(val1.real(), val1.imag(), + val2.real(), val2.imag(), + val3.real(), val3.imag(), + val4.real(), val4.imag(), + val5.real(), val5.imag(), + val6.real(), val6.imag(), + val7.real(), val7.imag(), + val8.real(), val8.imag()); + } + operator __m512() const { + return values; + } + template + static Vectorized> blend(const Vectorized>& a, + const Vectorized>& b) { + // convert c10::complex index mask to V index mask: xy -> xxyy + static_assert(mask > -1 && mask < 256, "Unexpected mask value"); + // The compiler would hopefully convert this switch condition + // into a jump table + switch (mask) { + case 0: + return a; + case 1: + return _mm512_mask_blend_ps(0x03, a.values, b.values); + case 2: + return _mm512_mask_blend_ps(0x0C, a.values, b.values); + case 3: + return _mm512_mask_blend_ps(0x0F, a.values, b.values); + case 4: + return _mm512_mask_blend_ps(0x30, a.values, b.values); + case 5: + return _mm512_mask_blend_ps(0x33, a.values, b.values); + case 6: + return _mm512_mask_blend_ps(0x3C, a.values, b.values); + case 7: + return _mm512_mask_blend_ps(0x3F, a.values, b.values); + case 8: + return _mm512_mask_blend_ps(0xC0, a.values, b.values); + case 9: + return _mm512_mask_blend_ps(0xC3, a.values, b.values); + case 10: + return _mm512_mask_blend_ps(0xCC, a.values, b.values); + case 11: + return _mm512_mask_blend_ps(0xCF, a.values, b.values); + case 12: + return _mm512_mask_blend_ps(0xF0, a.values, b.values); + case 13: + return _mm512_mask_blend_ps(0xF3, a.values, b.values); + case 14: + return _mm512_mask_blend_ps(0xFC, a.values, b.values); + case 15: + return _mm512_mask_blend_ps(0xFF, a.values, b.values); + case 16: + return _mm512_mask_blend_ps(0x300, a.values, b.values); + case 17: + return _mm512_mask_blend_ps(0x303, a.values, b.values); + case 18: + return _mm512_mask_blend_ps(0x30C, a.values, b.values); + case 19: + return _mm512_mask_blend_ps(0x30F, a.values, b.values); + case 20: + return _mm512_mask_blend_ps(0x330, a.values, b.values); + case 21: + return _mm512_mask_blend_ps(0x333, a.values, b.values); + case 22: + return _mm512_mask_blend_ps(0x33C, a.values, b.values); + case 23: + return _mm512_mask_blend_ps(0x33F, a.values, b.values); + case 24: + return _mm512_mask_blend_ps(0x3C0, a.values, b.values); + case 25: + return _mm512_mask_blend_ps(0x3C3, a.values, b.values); + case 26: + return _mm512_mask_blend_ps(0x3CC, a.values, b.values); + case 27: + return _mm512_mask_blend_ps(0x3CF, a.values, b.values); + case 28: + return _mm512_mask_blend_ps(0x3F0, a.values, b.values); + case 29: + return _mm512_mask_blend_ps(0x3F3, a.values, b.values); + case 30: + return _mm512_mask_blend_ps(0x3FC, a.values, b.values); + case 31: + return _mm512_mask_blend_ps(0x3FF, a.values, b.values); + case 32: + return _mm512_mask_blend_ps(0xC00, a.values, b.values); + case 33: + return _mm512_mask_blend_ps(0xC03, a.values, b.values); + case 34: + return _mm512_mask_blend_ps(0xC0C, a.values, b.values); + case 35: + return _mm512_mask_blend_ps(0xC0F, a.values, b.values); + case 36: + return _mm512_mask_blend_ps(0xC30, a.values, b.values); + case 37: + return _mm512_mask_blend_ps(0xC33, a.values, b.values); + case 38: + return _mm512_mask_blend_ps(0xC3C, a.values, b.values); + case 39: + return _mm512_mask_blend_ps(0xC3F, a.values, b.values); + case 40: + return _mm512_mask_blend_ps(0xCC0, a.values, b.values); + case 41: + return _mm512_mask_blend_ps(0xCC3, a.values, b.values); + case 42: + return _mm512_mask_blend_ps(0xCCC, a.values, b.values); + case 43: + return _mm512_mask_blend_ps(0xCCF, a.values, b.values); + case 44: + return _mm512_mask_blend_ps(0xCF0, a.values, b.values); + case 45: + return _mm512_mask_blend_ps(0xCF3, a.values, b.values); + case 46: + return _mm512_mask_blend_ps(0xCFC, a.values, b.values); + case 47: + return _mm512_mask_blend_ps(0xCFF, a.values, b.values); + case 48: + return _mm512_mask_blend_ps(0xF00, a.values, b.values); + case 49: + return _mm512_mask_blend_ps(0xF03, a.values, b.values); + case 50: + return _mm512_mask_blend_ps(0xF0C, a.values, b.values); + case 51: + return _mm512_mask_blend_ps(0xF0F, a.values, b.values); + case 52: + return _mm512_mask_blend_ps(0xF30, a.values, b.values); + case 53: + return _mm512_mask_blend_ps(0xF33, a.values, b.values); + case 54: + return _mm512_mask_blend_ps(0xF3C, a.values, b.values); + case 55: + return _mm512_mask_blend_ps(0xF3F, a.values, b.values); + case 56: + return _mm512_mask_blend_ps(0xFC0, a.values, b.values); + case 57: + return _mm512_mask_blend_ps(0xFC3, a.values, b.values); + case 58: + return _mm512_mask_blend_ps(0xFCC, a.values, b.values); + case 59: + return _mm512_mask_blend_ps(0xFCF, a.values, b.values); + case 60: + return _mm512_mask_blend_ps(0xFF0, a.values, b.values); + case 61: + return _mm512_mask_blend_ps(0xFF3, a.values, b.values); + case 62: + return _mm512_mask_blend_ps(0xFFC, a.values, b.values); + case 63: + return _mm512_mask_blend_ps(0xFFF, a.values, b.values); + case 64: + return _mm512_mask_blend_ps(0x3000, a.values, b.values); + case 65: + return _mm512_mask_blend_ps(0x3003, a.values, b.values); + case 66: + return _mm512_mask_blend_ps(0x300C, a.values, b.values); + case 67: + return _mm512_mask_blend_ps(0x300F, a.values, b.values); + case 68: + return _mm512_mask_blend_ps(0x3030, a.values, b.values); + case 69: + return _mm512_mask_blend_ps(0x3033, a.values, b.values); + case 70: + return _mm512_mask_blend_ps(0x303C, a.values, b.values); + case 71: + return _mm512_mask_blend_ps(0x303F, a.values, b.values); + case 72: + return _mm512_mask_blend_ps(0x30C0, a.values, b.values); + case 73: + return _mm512_mask_blend_ps(0X30C3, a.values, b.values); + case 74: + return _mm512_mask_blend_ps(0x30CC, a.values, b.values); + case 75: + return _mm512_mask_blend_ps(0x30CF, a.values, b.values); + case 76: + return _mm512_mask_blend_ps(0x30F0, a.values, b.values); + case 77: + return _mm512_mask_blend_ps(0x30F3, a.values, b.values); + case 78: + return _mm512_mask_blend_ps(0x30FC, a.values, b.values); + case 79: + return _mm512_mask_blend_ps(0x30FF, a.values, b.values); + case 80: + return _mm512_mask_blend_ps(0x3300, a.values, b.values); + case 81: + return _mm512_mask_blend_ps(0X3303, a.values, b.values); + case 82: + return _mm512_mask_blend_ps(0x330C, a.values, b.values); + case 83: + return _mm512_mask_blend_ps(0x330F, a.values, b.values); + case 84: + return _mm512_mask_blend_ps(0x3330, a.values, b.values); + case 85: + return _mm512_mask_blend_ps(0x3333, a.values, b.values); + case 86: + return _mm512_mask_blend_ps(0x333C, a.values, b.values); + case 87: + return _mm512_mask_blend_ps(0X333F, a.values, b.values); + case 88: + return _mm512_mask_blend_ps(0x33C0, a.values, b.values); + case 89: + return _mm512_mask_blend_ps(0x33C3, a.values, b.values); + case 90: + return _mm512_mask_blend_ps(0x33CC, a.values, b.values); + case 91: + return _mm512_mask_blend_ps(0x33CF, a.values, b.values); + case 92: + return _mm512_mask_blend_ps(0x33F0, a.values, b.values); + case 93: + return _mm512_mask_blend_ps(0x33F3, a.values, b.values); + case 94: + return _mm512_mask_blend_ps(0x33FC, a.values, b.values); + case 95: + return _mm512_mask_blend_ps(0x33FF, a.values, b.values); + case 96: + return _mm512_mask_blend_ps(0X3C00, a.values, b.values); + case 97: + return _mm512_mask_blend_ps(0x3C03, a.values, b.values); + case 98: + return _mm512_mask_blend_ps(0x3C0C, a.values, b.values); + case 99: + return _mm512_mask_blend_ps(0x3C0F, a.values, b.values); + case 100: + return _mm512_mask_blend_ps(0x3C30, a.values, b.values); + case 101: + return _mm512_mask_blend_ps(0x3C33, a.values, b.values); + case 102: + return _mm512_mask_blend_ps(0x3C3C, a.values, b.values); + case 103: + return _mm512_mask_blend_ps(0x3C3F, a.values, b.values); + case 104: + return _mm512_mask_blend_ps(0x3CC0, a.values, b.values); + case 105: + return _mm512_mask_blend_ps(0x3CC3, a.values, b.values); + case 106: + return _mm512_mask_blend_ps(0x3CCC, a.values, b.values); + case 107: + return _mm512_mask_blend_ps(0x3CCF, a.values, b.values); + case 108: + return _mm512_mask_blend_ps(0x3CF0, a.values, b.values); + case 109: + return _mm512_mask_blend_ps(0x3CF3, a.values, b.values); + case 110: + return _mm512_mask_blend_ps(0x3CFC, a.values, b.values); + case 111: + return _mm512_mask_blend_ps(0x3CFF, a.values, b.values); + case 112: + return _mm512_mask_blend_ps(0x3F00, a.values, b.values); + case 113: + return _mm512_mask_blend_ps(0x3F03, a.values, b.values); + case 114: + return _mm512_mask_blend_ps(0x3F0C, a.values, b.values); + case 115: + return _mm512_mask_blend_ps(0x3F0F, a.values, b.values); + case 116: + return _mm512_mask_blend_ps(0x3F30, a.values, b.values); + case 117: + return _mm512_mask_blend_ps(0x3F33, a.values, b.values); + case 118: + return _mm512_mask_blend_ps(0x3F3C, a.values, b.values); + case 119: + return _mm512_mask_blend_ps(0x3F3F, a.values, b.values); + case 120: + return _mm512_mask_blend_ps(0x3FC0, a.values, b.values); + case 121: + return _mm512_mask_blend_ps(0x3FC3, a.values, b.values); + case 122: + return _mm512_mask_blend_ps(0x3FCC, a.values, b.values); + case 123: + return _mm512_mask_blend_ps(0x3FCF, a.values, b.values); + case 124: + return _mm512_mask_blend_ps(0x3FF0, a.values, b.values); + case 125: + return _mm512_mask_blend_ps(0x3FF3, a.values, b.values); + case 126: + return _mm512_mask_blend_ps(0x3FFC, a.values, b.values); + case 127: + return _mm512_mask_blend_ps(0x3FFF, a.values, b.values); + case 128: + return _mm512_mask_blend_ps(0xC000, a.values, b.values); + case 129: + return _mm512_mask_blend_ps(0xC003, a.values, b.values); + case 130: + return _mm512_mask_blend_ps(0xC00C, a.values, b.values); + case 131: + return _mm512_mask_blend_ps(0xC00F, a.values, b.values); + case 132: + return _mm512_mask_blend_ps(0xC030, a.values, b.values); + case 133: + return _mm512_mask_blend_ps(0xC033, a.values, b.values); + case 134: + return _mm512_mask_blend_ps(0xC03C, a.values, b.values); + case 135: + return _mm512_mask_blend_ps(0xC03F, a.values, b.values); + case 136: + return _mm512_mask_blend_ps(0xC0C0, a.values, b.values); + case 137: + return _mm512_mask_blend_ps(0xC0C3, a.values, b.values); + case 138: + return _mm512_mask_blend_ps(0xC0CC, a.values, b.values); + case 139: + return _mm512_mask_blend_ps(0xC0CF, a.values, b.values); + case 140: + return _mm512_mask_blend_ps(0xC0F0, a.values, b.values); + case 141: + return _mm512_mask_blend_ps(0xC0F3, a.values, b.values); + case 142: + return _mm512_mask_blend_ps(0xC0FC, a.values, b.values); + case 143: + return _mm512_mask_blend_ps(0xC0FF, a.values, b.values); + case 144: + return _mm512_mask_blend_ps(0xC300, a.values, b.values); + case 145: + return _mm512_mask_blend_ps(0xC303, a.values, b.values); + case 146: + return _mm512_mask_blend_ps(0xC30C, a.values, b.values); + case 147: + return _mm512_mask_blend_ps(0xC30F, a.values, b.values); + case 148: + return _mm512_mask_blend_ps(0xC330, a.values, b.values); + case 149: + return _mm512_mask_blend_ps(0xC333, a.values, b.values); + case 150: + return _mm512_mask_blend_ps(0xC33C, a.values, b.values); + case 151: + return _mm512_mask_blend_ps(0xC33F, a.values, b.values); + case 152: + return _mm512_mask_blend_ps(0xC3C0, a.values, b.values); + case 153: + return _mm512_mask_blend_ps(0xC3C3, a.values, b.values); + case 154: + return _mm512_mask_blend_ps(0xC3CC, a.values, b.values); + case 155: + return _mm512_mask_blend_ps(0xC3CF, a.values, b.values); + case 156: + return _mm512_mask_blend_ps(0xC3F0, a.values, b.values); + case 157: + return _mm512_mask_blend_ps(0xC3F3, a.values, b.values); + case 158: + return _mm512_mask_blend_ps(0xC3FC, a.values, b.values); + case 159: + return _mm512_mask_blend_ps(0xC3FF, a.values, b.values); + case 160: + return _mm512_mask_blend_ps(0xCC00, a.values, b.values); + case 161: + return _mm512_mask_blend_ps(0xCC03, a.values, b.values); + case 162: + return _mm512_mask_blend_ps(0xCC0C, a.values, b.values); + case 163: + return _mm512_mask_blend_ps(0xCC0F, a.values, b.values); + case 164: + return _mm512_mask_blend_ps(0xCC30, a.values, b.values); + case 165: + return _mm512_mask_blend_ps(0xCC33, a.values, b.values); + case 166: + return _mm512_mask_blend_ps(0xCC3C, a.values, b.values); + case 167: + return _mm512_mask_blend_ps(0xCC3F, a.values, b.values); + case 168: + return _mm512_mask_blend_ps(0xCCC0, a.values, b.values); + case 169: + return _mm512_mask_blend_ps(0xCCC3, a.values, b.values); + case 170: + return _mm512_mask_blend_ps(0xCCCC, a.values, b.values); + case 171: + return _mm512_mask_blend_ps(0xCCCF, a.values, b.values); + case 172: + return _mm512_mask_blend_ps(0xCCF0, a.values, b.values); + case 173: + return _mm512_mask_blend_ps(0xCCF3, a.values, b.values); + case 174: + return _mm512_mask_blend_ps(0xCCFC, a.values, b.values); + case 175: + return _mm512_mask_blend_ps(0xCCFF, a.values, b.values); + case 176: + return _mm512_mask_blend_ps(0xCF00, a.values, b.values); + case 177: + return _mm512_mask_blend_ps(0xCF03, a.values, b.values); + case 178: + return _mm512_mask_blend_ps(0xCF0C, a.values, b.values); + case 179: + return _mm512_mask_blend_ps(0xCF0F, a.values, b.values); + case 180: + return _mm512_mask_blend_ps(0xCF30, a.values, b.values); + case 181: + return _mm512_mask_blend_ps(0xCF33, a.values, b.values); + case 182: + return _mm512_mask_blend_ps(0xCF3C, a.values, b.values); + case 183: + return _mm512_mask_blend_ps(0xCF3F, a.values, b.values); + case 184: + return _mm512_mask_blend_ps(0xCFC0, a.values, b.values); + case 185: + return _mm512_mask_blend_ps(0xCFC3, a.values, b.values); + case 186: + return _mm512_mask_blend_ps(0xCFCC, a.values, b.values); + case 187: + return _mm512_mask_blend_ps(0xCFCF, a.values, b.values); + case 188: + return _mm512_mask_blend_ps(0xCFF0, a.values, b.values); + case 189: + return _mm512_mask_blend_ps(0xCFF3, a.values, b.values); + case 190: + return _mm512_mask_blend_ps(0xCFFC, a.values, b.values); + case 191: + return _mm512_mask_blend_ps(0xCFFF, a.values, b.values); + case 192: + return _mm512_mask_blend_ps(0xF000, a.values, b.values); + case 193: + return _mm512_mask_blend_ps(0xF003, a.values, b.values); + case 194: + return _mm512_mask_blend_ps(0xF00C, a.values, b.values); + case 195: + return _mm512_mask_blend_ps(0xF00F, a.values, b.values); + case 196: + return _mm512_mask_blend_ps(0xF030, a.values, b.values); + case 197: + return _mm512_mask_blend_ps(0xF033, a.values, b.values); + case 198: + return _mm512_mask_blend_ps(0xF03C, a.values, b.values); + case 199: + return _mm512_mask_blend_ps(0xF03F, a.values, b.values); + case 200: + return _mm512_mask_blend_ps(0XF0C0, a.values, b.values); + case 201: + return _mm512_mask_blend_ps(0xF0C3, a.values, b.values); + case 202: + return _mm512_mask_blend_ps(0xF0CC, a.values, b.values); + case 203: + return _mm512_mask_blend_ps(0xF0CF, a.values, b.values); + case 204: + return _mm512_mask_blend_ps(0xF0F0, a.values, b.values); + case 205: + return _mm512_mask_blend_ps(0xF0F3, a.values, b.values); + case 206: + return _mm512_mask_blend_ps(0xF0FC, a.values, b.values); + case 207: + return _mm512_mask_blend_ps(0xF0FF, a.values, b.values); + case 208: + return _mm512_mask_blend_ps(0XF300, a.values, b.values); + case 209: + return _mm512_mask_blend_ps(0xF303, a.values, b.values); + case 210: + return _mm512_mask_blend_ps(0xF30C, a.values, b.values); + case 211: + return _mm512_mask_blend_ps(0xF30F, a.values, b.values); + case 212: + return _mm512_mask_blend_ps(0xF330, a.values, b.values); + case 213: + return _mm512_mask_blend_ps(0xF333, a.values, b.values); + case 214: + return _mm512_mask_blend_ps(0XF33C, a.values, b.values); + case 215: + return _mm512_mask_blend_ps(0xF33F, a.values, b.values); + case 216: + return _mm512_mask_blend_ps(0xF3C0, a.values, b.values); + case 217: + return _mm512_mask_blend_ps(0xF3C3, a.values, b.values); + case 218: + return _mm512_mask_blend_ps(0xF3CC, a.values, b.values); + case 219: + return _mm512_mask_blend_ps(0xF3CF, a.values, b.values); + case 220: + return _mm512_mask_blend_ps(0xF3F0, a.values, b.values); + case 221: + return _mm512_mask_blend_ps(0xF3F3, a.values, b.values); + case 222: + return _mm512_mask_blend_ps(0xF3FC, a.values, b.values); + case 223: + return _mm512_mask_blend_ps(0XF3FF, a.values, b.values); + case 224: + return _mm512_mask_blend_ps(0xFC00, a.values, b.values); + case 225: + return _mm512_mask_blend_ps(0xFC03, a.values, b.values); + case 226: + return _mm512_mask_blend_ps(0xFC0C, a.values, b.values); + case 227: + return _mm512_mask_blend_ps(0xFC0F, a.values, b.values); + case 228: + return _mm512_mask_blend_ps(0xFC30, a.values, b.values); + case 229: + return _mm512_mask_blend_ps(0xFC33, a.values, b.values); + case 230: + return _mm512_mask_blend_ps(0xFC3C, a.values, b.values); + case 231: + return _mm512_mask_blend_ps(0xFC3F, a.values, b.values); + case 232: + return _mm512_mask_blend_ps(0xFCC0, a.values, b.values); + case 233: + return _mm512_mask_blend_ps(0xFCC3, a.values, b.values); + case 234: + return _mm512_mask_blend_ps(0xFCCC, a.values, b.values); + case 235: + return _mm512_mask_blend_ps(0xFCCF, a.values, b.values); + case 236: + return _mm512_mask_blend_ps(0xFCF0, a.values, b.values); + case 237: + return _mm512_mask_blend_ps(0xFCF3, a.values, b.values); + case 238: + return _mm512_mask_blend_ps(0xFCFC, a.values, b.values); + case 239: + return _mm512_mask_blend_ps(0xFCFF, a.values, b.values); + case 240: + return _mm512_mask_blend_ps(0xFF00, a.values, b.values); + case 241: + return _mm512_mask_blend_ps(0xFF03, a.values, b.values); + case 242: + return _mm512_mask_blend_ps(0xFF0C, a.values, b.values); + case 243: + return _mm512_mask_blend_ps(0xFF0F, a.values, b.values); + case 244: + return _mm512_mask_blend_ps(0xFF30, a.values, b.values); + case 245: + return _mm512_mask_blend_ps(0xFF33, a.values, b.values); + case 246: + return _mm512_mask_blend_ps(0xFF3C, a.values, b.values); + case 247: + return _mm512_mask_blend_ps(0xFF3F, a.values, b.values); + case 248: + return _mm512_mask_blend_ps(0xFFC0, a.values, b.values); + case 249: + return _mm512_mask_blend_ps(0xFFC3, a.values, b.values); + case 250: + return _mm512_mask_blend_ps(0xFFCC, a.values, b.values); + case 251: + return _mm512_mask_blend_ps(0xFFCF, a.values, b.values); + case 252: + return _mm512_mask_blend_ps(0xFFF0, a.values, b.values); + case 253: + return _mm512_mask_blend_ps(0xFFF3, a.values, b.values); + case 254: + return _mm512_mask_blend_ps(0xFFFC, a.values, b.values); + default: break; + } + return b; + } + static Vectorized> blendv(const Vectorized>& a, + const Vectorized>& b, + const Vectorized>& mask) { + // convert c10::complex index mask to V index mask: xy -> xxyy + auto mask_ = _mm512_unpacklo_ps(mask.values, mask.values); + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask_), all_ones, _MM_CMPINT_EQ); + return _mm512_mask_blend_ps(mmask, a.values, b.values); + } + template + static Vectorized> arange(c10::complex base = 0., + step_t step = static_cast(1)) { + return Vectorized>(base, + base + step, + base + c10::complex(2)*step, + base + c10::complex(3)*step, + base + c10::complex(4)*step, + base + c10::complex(5)*step, + base + c10::complex(6)*step, + base + c10::complex(7)*step); + } + static Vectorized> set(const Vectorized>& a, + const Vectorized>& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + return b; + } + static Vectorized> loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm512_loadu_ps(reinterpret_cast(ptr)); + + __at_align__ float tmp_values[2*size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(2*size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(c10::complex)); + return _mm512_load_ps(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm512_storeu_ps(reinterpret_cast(ptr), values); + } else if (count > 0) { + float tmp_values[2*size()]; + _mm512_storeu_ps(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(c10::complex)); + } + } + // AVX512 doesn't have horizontal add & horizontal sub instructions. + // TODO: hadd_pd() & hsub_pd() may have scope for improvement. + static inline __m512 hadd_ps(__m512 a, __m512 b) { + __m512i idx1 = _mm512_set_epi32(30, 14, 28, 12, 26, 10, 24, 8, 22, 6, 20, 4, 18, 2, 16, 0); + __m512i idx2 = _mm512_set_epi32(31, 15, 29, 13, 27, 11, 25, 9, 23, 7, 21, 5, 19, 3, 17, 1); + return _mm512_add_ps(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b), + _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b)); + } + static inline __m512 hsub_ps(__m512 a, __m512 b) { + __m512i idx1 = _mm512_set_epi32(30, 14, 28, 12, 26, 10, 24, 8, 22, 6, 20, 4, 18, 2, 16, 0); + __m512i idx2 = _mm512_set_epi32(31, 15, 29, 13, 27, 11, 25, 9, 23, 7, 21, 5, 19, 3, 17, 1); + return _mm512_sub_ps(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b), + _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b)); + } + const c10::complex& operator[](int idx) const = delete; + c10::complex& operator[](int idx) = delete; + Vectorized> map(c10::complex (*const f)(const c10::complex &)) const { + __at_align__ c10::complex tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + __m512 abs_2_() const { + auto val_2 = _mm512_mul_ps(values, values); // a*a b*b + auto ret = hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b + return ret; + } + __m512 abs_() const { + return _mm512_sqrt_ps(abs_2_()); // abs abs + } + Vectorized> abs() const { + const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + return _mm512_and_ps(abs_(), real_mask); // abs 0 + } + __m512 angle_() const { + //angle = atan2(b/a) + auto b_a = _mm512_permute_ps(values, 0xB1); // b a + return Sleef_atan2f16_u10(values, b_a); // 90-angle angle + } + Vectorized> angle() const { + const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + auto angle = _mm512_permute_ps(angle_(), 0xB1); // angle 90-angle + return _mm512_and_ps(angle, real_mask); // angle 0 + } + Vectorized> sgn() const { + auto abs = abs_(); + auto zero = _mm512_setzero_ps(); + auto mask = _mm512_cmp_ps_mask(abs, zero, _CMP_EQ_OQ); + auto abs_val = Vectorized(abs); + + auto div = values / abs_val.values; // x / abs(x) + + return _mm512_mask_blend_ps(mask, div, zero); + } + __m512 real_() const { + const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + return _mm512_and_ps(values, real_mask); + } + Vectorized> real() const { + return real_(); + } + __m512 imag_() const { + const __m512 imag_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF)); + return _mm512_and_ps(values, imag_mask); + } + Vectorized> imag() const { + return _mm512_permute_ps(imag_(), 0xB1); //b a + } + __m512 conj_() const { + const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, + 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + return _mm512_xor_ps(values, sign_mask); // a -b + } + Vectorized> conj() const { + return conj_(); + } + Vectorized> log() const { + // Most trigonomic ops use the log() op to improve complex number performance. + return map(std::log); + } + Vectorized> log2() const { + const __m512 log2_ = _mm512_set1_ps(std::log(2)); + return _mm512_div_ps(log(), log2_); + } + Vectorized> log10() const { + const __m512 log10_ = _mm512_set1_ps(std::log(10)); + return _mm512_div_ps(log(), log10_); + } + Vectorized> log1p() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + const __m512 one = _mm512_set1_ps(1); + + auto conj = conj_(); + auto b_a = _mm512_permute_ps(conj, 0xB1); //-b a + auto ab = _mm512_mul_ps(conj, b_a); //-ab -ab + auto im = _mm512_add_ps(ab, ab); //-2ab -2ab + + auto val_2 = _mm512_mul_ps(values, values); // a*a b*b + auto re = hsub_ps(val_2, _mm512_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a + re = _mm512_sub_ps(one, re); + + auto root = Vectorized(_mm512_mask_blend_ps(0xAAAA, re, im)).sqrt(); //sqrt(re + i*im) + auto ln = Vectorized(_mm512_add_ps(b_a, root)).log(); //ln(iz + sqrt()) + return Vectorized(_mm512_permute_ps(ln.values, 0xB1)).conj(); //-i*ln() + } + Vectorized> acos() const { + return map(std::acos); + } + Vectorized> atan() const; + Vectorized> atan2(const Vectorized> &b) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> erf() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> erfc() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> exp() const { + //exp(a + bi) + // = exp(a)*(cos(b) + sin(b)i) + auto exp = Sleef_expf16_u10(values); //exp(a) exp(b) + exp = _mm512_mask_blend_ps(0xAAAA, exp, _mm512_permute_ps(exp, 0xB1)); //exp(a) exp(a) + + auto sin_cos = Sleef_sincosf16_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)] + auto cos_sin = _mm512_mask_blend_ps(0xAAAA, _mm512_permute_ps(sin_cos.y, 0xB1), + sin_cos.x); //cos(b) sin(b) + return _mm512_mul_ps(exp, cos_sin); + } + Vectorized> expm1() const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> sin() const { + return map(std::sin); + } + Vectorized> sinh() const { + return map(std::sinh); + } + Vectorized> cos() const { + return map(std::cos); + } + Vectorized> cosh() const { + return map(std::cosh); + } + Vectorized> ceil() const { + return _mm512_ceil_ps(values); + } + Vectorized> floor() const { + return _mm512_floor_ps(values); + } + Vectorized> hypot(const Vectorized> &b) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> igamma(const Vectorized> &x) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> igammac(const Vectorized> &x) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> neg() const { + auto zero = _mm512_setzero_ps(); + return _mm512_sub_ps(zero, values); + } + Vectorized> nextafter(const Vectorized> &b) const { + AT_ERROR("not supported for complex numbers"); + } + Vectorized> round() const { + return _mm512_roundscale_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized> tan() const { + return map(std::tan); + } + Vectorized> tanh() const { + return map(std::tanh); + } + Vectorized> trunc() const { + return _mm512_roundscale_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized> sqrt() const { + return map(std::sqrt); + } + Vectorized> reciprocal() const; + Vectorized> rsqrt() const { + return sqrt().reciprocal(); + } + Vectorized> pow(const Vectorized> &exp) const { + __at_align__ c10::complex x_tmp[size()]; + __at_align__ c10::complex y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized> operator==(const Vectorized>& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_EQ_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF)); + } + Vectorized> operator!=(const Vectorized>& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF)); + } + Vectorized> operator<(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator<=(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>=(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized> eq(const Vectorized>& other) const; + Vectorized> ne(const Vectorized>& other) const; + Vectorized> lt(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> le(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> gt(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> ge(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } +}; + +template <> Vectorized> inline operator+(const Vectorized> &a, + const Vectorized> &b) { + return _mm512_add_ps(a, b); +} + +template <> Vectorized> inline operator-(const Vectorized> &a, + const Vectorized> &b) { + return _mm512_sub_ps(a, b); +} + +template <> Vectorized> inline operator*(const Vectorized> &a, + const Vectorized> &b) { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i + const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, + 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto ac_bd = _mm512_mul_ps(a, b); //ac bd + + auto d_c = _mm512_permute_ps(b, 0xB1); //d c + d_c = _mm512_xor_ps(sign_mask, d_c); //d -c + auto ad_bc = _mm512_mul_ps(a, d_c); //ad -bc + + auto ret = Vectorized>::hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc + return ret; +} + +template <> Vectorized> inline operator/(const Vectorized> &a, + const Vectorized> &b) { + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() + //im = (bc - ad)/abs_2() + const __m512 sign_mask = _mm512_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, + -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0); + auto ac_bd = _mm512_mul_ps(a, b); //ac bd + + auto d_c = _mm512_permute_ps(b, 0xB1); //d c + d_c = _mm512_xor_ps(sign_mask, d_c); //-d c + auto ad_bc = _mm512_mul_ps(a, d_c); //-ad bc + + auto re_im = Vectorized>::hadd_ps(ac_bd, ad_bc);//ac + bd bc - ad + return _mm512_div_ps(re_im, b.abs_2_()); +} + +// reciprocal. Implement this here so we can use multiplication. +inline Vectorized> Vectorized>::reciprocal() const { + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() = c/abs_2() + //im = (bc - ad)/abs_2() = d/abs_2() + const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, + 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto c_d = _mm512_xor_ps(sign_mask, values); //c -d + return _mm512_div_ps(c_d, abs_2_()); +} + +inline Vectorized> Vectorized>::atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + const __m512 i = _mm512_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + const Vectorized i_half = _mm512_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + + auto sum = Vectorized(_mm512_add_ps(i, values)); // a 1+b + auto sub = Vectorized(_mm512_sub_ps(i, values)); // -a 1-b + auto ln = (sum/sub).log(); // ln((i + z)/(i - z)) + return i_half*ln; // i/2*ln() +} + +template <> +Vectorized> inline maximum(const Vectorized>& a, + const Vectorized>& b) { + auto zero_vector = _mm512_set1_epi32(0); + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_LT_OQ); + auto max = _mm512_mask_blend_ps(mask, a, b); + // Exploit the fact that all-ones is a NaN. + auto isnan_mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_UNORD_Q); + auto isnan = _mm512_mask_set1_epi32(zero_vector, isnan_mask, 0xFFFFFFFF); + return _mm512_or_ps(max, _mm512_castsi512_ps(isnan)); +} + +template <> +Vectorized> inline minimum(const Vectorized>& a, + const Vectorized>& b) { + auto zero_vector = _mm512_set1_epi32(0); + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_GT_OQ); + auto min = _mm512_mask_blend_ps(mask, a, b); + // Exploit the fact that all-ones is a NaN. + auto isnan_mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_UNORD_Q); + auto isnan = _mm512_mask_set1_epi32(zero_vector, isnan_mask, 0xFFFFFFFF); + return _mm512_or_ps(min, _mm512_castsi512_ps(isnan)); +} + +template <> +Vectorized> inline operator&(const Vectorized>& a, + const Vectorized>& b) { + return _mm512_and_ps(a, b); +} + +template <> +Vectorized> inline operator|(const Vectorized>& a, + const Vectorized>& b) { + return _mm512_or_ps(a, b); +} + +template <> +Vectorized> inline operator^(const Vectorized>& a, + const Vectorized>& b) { + return _mm512_xor_ps(a, b); +} + +inline Vectorized> Vectorized>::eq( + const Vectorized>& other) const { + return (*this == other) & Vectorized>(_mm512_set1_ps(1.0f)); +} + +inline Vectorized> Vectorized>::ne( + const Vectorized>& other) const { + return (*this != other) & Vectorized>(_mm512_set1_ps(1.0f)); +} + +#endif + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_double.h b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_double.h new file mode 100644 index 0000000000000000000000000000000000000000..077ce2381cdcd944f303a0dc97b3389a2bba16a5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_double.h @@ -0,0 +1,455 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#if (defined(CPU_CAPABILITY_AVX512)) && !defined(_MSC_VER) +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + +template <> class Vectorized { +private: + static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0}; +public: + // values needs to be public for compilation with clang + // as vec512.h uses it + __m512d values; + using value_type = double; + using size_type = int; + static constexpr size_type size() { + return 8; + } + Vectorized() {} + Vectorized(__m512d v) : values(v) {} + Vectorized(double val) { + values = _mm512_set1_pd(val); + } + Vectorized(double val1, double val2, double val3, double val4, + double val5, double val6, double val7, double val8) { + values = _mm512_setr_pd(val1, val2, val3, val4, val5, val6, val7, val8); + } + operator __m512d() const { + return values; + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + return _mm512_mask_blend_pd(mask, a.values, b.values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + auto all_ones = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF); + auto mmask = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask.values), all_ones, _MM_CMPINT_EQ); + return _mm512_mask_blend_pd(mmask, a.values, b.values); + } + template + static Vectorized arange(double base = 0., step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, + base + 7 * step); + } + static Vectorized set(const Vectorized& a, const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm512_loadu_pd(reinterpret_cast(ptr)); + + + __at_align__ double tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(double)); + return _mm512_load_pd(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm512_storeu_pd(reinterpret_cast(ptr), values); + } else if (count > 0) { + double tmp_values[size()]; + _mm512_storeu_pd(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(double)); + } + } + const double& operator[](int idx) const = delete; + double& operator[](int idx) = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __mmask8 cmp = _mm512_cmp_pd_mask(values, _mm512_set1_pd(0.0), _CMP_EQ_OQ); + return static_cast(cmp); + } + Vectorized isnan() const { + auto cmp_mask = _mm512_cmp_pd_mask(values, _mm512_set1_pd(0.0), _CMP_UNORD_Q); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + Vectorized map(double (*const f)(double)) const { + __at_align__ double tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + auto mask = _mm512_set1_pd(-0.f); + return _mm512_andnot_pd(mask, values); + } + Vectorized angle() const { + const auto zero_vec = _mm512_castsi512_pd(zero_vector); + const auto nan_vec = _mm512_set1_pd(NAN); + const auto not_nan_mask = _mm512_cmp_pd_mask(values, values, _CMP_EQ_OQ); + const auto not_nan = _mm512_mask_set1_epi64(zero_vector, not_nan_mask, + 0xFFFFFFFFFFFFFFFF); + const auto nan_mask = _mm512_cmp_pd_mask(_mm512_castsi512_pd(not_nan), + zero_vec, _CMP_EQ_OQ); + const auto pi = _mm512_set1_pd(c10::pi); + + const auto neg_mask = _mm512_cmp_pd_mask(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm512_mask_blend_pd(neg_mask, zero_vec, pi); + angle = _mm512_mask_blend_pd(nan_mask, angle, nan_vec); + return angle; + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm512_set1_pd(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return Vectorized(Sleef_acosd8_u10(values)); + } + Vectorized asin() const { + return Vectorized(Sleef_asind8_u10(values)); + } + Vectorized atan() const { + return Vectorized(Sleef_atand8_u10(values)); + } + Vectorized atan2(const Vectorized &b) const { + return Vectorized(Sleef_atan2d8_u10(values, b)); + } + Vectorized copysign(const Vectorized &sign) const { + return Vectorized(Sleef_copysignd8(values, sign)); + } + Vectorized erf() const { + return Vectorized(Sleef_erfd8_u10(values)); + } + Vectorized erfc() const { + return Vectorized(Sleef_erfcd8_u15(values)); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return Vectorized(Sleef_expd8_u10(values)); + } + Vectorized expm1() const { + return Vectorized(Sleef_expm1d8_u10(values)); + } + Vectorized fmod(const Vectorized& q) const { + return Vectorized(Sleef_fmodd8(values, q)); + } + Vectorized hypot(const Vectorized &b) const { + return Vectorized(Sleef_hypotd8_u05(values, b)); + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized igamma(const Vectorized &x) const { + __at_align__ double tmp[size()]; + __at_align__ double tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igamma(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized igammac(const Vectorized &x) const { + __at_align__ double tmp[size()]; + __at_align__ double tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igammac(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized log() const { + return Vectorized(Sleef_logd8_u10(values)); + } + Vectorized log2() const { + return Vectorized(Sleef_log2d8_u10(values)); + } + Vectorized log10() const { + return Vectorized(Sleef_log10d8_u10(values)); + } + Vectorized log1p() const { + return Vectorized(Sleef_log1pd8_u10(values)); + } + Vectorized sin() const { + return Vectorized(Sleef_sind8_u10(values)); + } + Vectorized sinh() const { + return Vectorized(Sleef_sinhd8_u10(values)); + } + Vectorized cos() const { + return Vectorized(Sleef_cosd8_u10(values)); + } + Vectorized cosh() const { + return Vectorized(Sleef_coshd8_u10(values)); + } + Vectorized ceil() const { + return _mm512_ceil_pd(values); + } + Vectorized floor() const { + return _mm512_floor_pd(values); + } + Vectorized frac() const; + Vectorized neg() const { + return _mm512_xor_pd(_mm512_set1_pd(-0.), values); + } + Vectorized nextafter(const Vectorized &b) const { + return Vectorized(Sleef_nextafterd8(values, b)); + } + Vectorized round() const { + return _mm512_roundscale_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized tan() const { + return Vectorized(Sleef_tand8_u10(values)); + } + Vectorized tanh() const { + return Vectorized(Sleef_tanhd8_u10(values)); + } + Vectorized trunc() const { + return _mm512_roundscale_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized lgamma() const { + return Vectorized(Sleef_lgammad8_u10(values)); + } + Vectorized sqrt() const { + return _mm512_sqrt_pd(values); + } + Vectorized reciprocal() const { + return _mm512_div_pd(_mm512_set1_pd(1), values); + } + Vectorized rsqrt() const { + return _mm512_div_pd(_mm512_set1_pd(1), _mm512_sqrt_pd(values)); + } + Vectorized pow(const Vectorized &b) const { + return Vectorized(Sleef_powd8_u10(values, b)); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized operator==(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_EQ_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator!=(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_NEQ_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator<(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_LT_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator<=(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_LE_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator>(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_GT_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator>=(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_GE_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm512_add_pd(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm512_sub_pd(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm512_mul_pd(a, b); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return _mm512_div_pd(a, b); +} + +// frac. Implement this here so we can use subtraction. +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + auto zero_vec = _mm512_set1_epi64(0); + Vectorized max = _mm512_max_pd(a, b); + auto isnan_mask = _mm512_cmp_pd_mask(a, b, _CMP_UNORD_Q); + auto isnan = _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vec, isnan_mask, + 0xFFFFFFFFFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + return _mm512_or_pd(max, isnan); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + auto zero_vec = _mm512_set1_epi64(0); + Vectorized min = _mm512_min_pd(a, b); + auto isnan_mask = _mm512_cmp_pd_mask(a, b, _CMP_UNORD_Q); + auto isnan = _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vec, isnan_mask, + 0xFFFFFFFFFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + return _mm512_or_pd(min, isnan); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { + return _mm512_min_pd(max, _mm512_max_pd(min, a)); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + return _mm512_max_pd(min, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + return _mm512_min_pd(max, a); +} + +template <> +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm512_and_pd(a, b); +} + +template <> +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm512_or_pd(a, b); +} + +template <> +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm512_xor_pd(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0); +} + +template <> +inline void convert(const double* src, double* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + _mm512_storeu_pd(dst + i, _mm512_loadu_pd(src + i)); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm512_fmadd_pd(a, b, c); +} + +#endif + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_float.h b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_float.h new file mode 100644 index 0000000000000000000000000000000000000000..e0c93a834118748c4e4dd86ea7f33e06db629276 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_float.h @@ -0,0 +1,470 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + +template <> class Vectorized { +private: + static constexpr __m512i zero_vec {0, 0, 0, 0, 0, 0, 0, 0}; +public: + __m512 values; + using value_type = float; + using size_type = int; + static constexpr size_type size() { + return 16; + } + Vectorized() {} + Vectorized(__m512 v) : values(v) {} + Vectorized(float val) { + values = _mm512_set1_ps(val); + } + Vectorized(float val1, float val2, float val3, float val4, + float val5, float val6, float val7, float val8, + float val9, float val10, float val11, float val12, + float val13, float val14, float val15, float val16) { + values = _mm512_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8, + val9, val10, val11, val12, val13, val14, val15, val16); + } + operator __m512() const { + return values; + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + return _mm512_mask_blend_ps(mask, a.values, b.values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask.values), all_ones, _MM_CMPINT_EQ); + return _mm512_mask_blend_ps(mmask, a.values, b.values); + } + template + static Vectorized arange(float base = 0.f, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step); + } + static Vectorized set(const Vectorized& a, const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + case 8: + return blend<255>(a, b); + case 9: + return blend<511>(a, b); + case 10: + return blend<1023>(a, b); + case 11: + return blend<2047>(a, b); + case 12: + return blend<4095>(a, b); + case 13: + return blend<8191>(a, b); + case 14: + return blend<16383>(a, b); + case 15: + return blend<32767>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm512_loadu_ps(reinterpret_cast(ptr)); + __at_align__ float tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(float)); + return _mm512_loadu_ps(tmp_values); + } + void store(void* ptr, int64_t count = size()) const { + if (count == size()) { + _mm512_storeu_ps(reinterpret_cast(ptr), values); + } else if (count > 0) { + float tmp_values[size()]; + _mm512_storeu_ps(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(float)); + } + } + const float& operator[](int idx) const = delete; + float& operator[](int idx) = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __mmask16 cmp = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_EQ_OQ); + return static_cast(cmp); + } + Vectorized isnan() const { + auto mask = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_UNORD_Q); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + Vectorized map(float (*const f)(float)) const { + __at_align__ float tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + auto mask = _mm512_set1_ps(-0.f); + return _mm512_andnot_ps(mask, values); + } + Vectorized angle() const { + __m512 zero_vec = _mm512_set1_ps(0.f); + const auto nan_vec = _mm512_set1_ps(NAN); + const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ); + const auto not_nan_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec), + not_nan_mask, 0xFFFFFFFF); + const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(not_nan_vec), + zero_vec, _CMP_EQ_OQ); + const auto pi = _mm512_set1_ps(c10::pi); + + const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi); + angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec); + return angle; + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm512_set1_ps(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return Vectorized(Sleef_acosf16_u10(values)); + } + Vectorized asin() const { + return Vectorized(Sleef_asinf16_u10(values)); + } + Vectorized atan() const { + return Vectorized(Sleef_atanf16_u10(values)); + } + Vectorized atan2(const Vectorized &b) const { + return Vectorized(Sleef_atan2f16_u10(values, b)); + } + Vectorized copysign(const Vectorized &sign) const { + return Vectorized(Sleef_copysignf16(values, sign)); + } + Vectorized erf() const { + return Vectorized(Sleef_erff16_u10(values)); + } + Vectorized erfc() const { + return Vectorized(Sleef_erfcf16_u15(values)); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return Vectorized(Sleef_expf16_u10(values)); + } + Vectorized expm1() const { + return Vectorized(Sleef_expm1f16_u10(values)); + } + Vectorized fmod(const Vectorized& q) const { + return Vectorized(Sleef_fmodf16(values, q)); + } + Vectorized log() const { + return Vectorized(Sleef_logf16_u10(values)); + } + Vectorized log2() const { + return Vectorized(Sleef_log2f16_u10(values)); + } + Vectorized log10() const { + return Vectorized(Sleef_log10f16_u10(values)); + } + Vectorized log1p() const { + return Vectorized(Sleef_log1pf16_u10(values)); + } + Vectorized frac() const; + Vectorized sin() const { + return Vectorized(Sleef_sinf16_u10(values)); + } + Vectorized sinh() const { + return Vectorized(Sleef_sinhf16_u10(values)); + } + Vectorized cos() const { + return Vectorized(Sleef_cosf16_u10(values)); + } + Vectorized cosh() const { + return Vectorized(Sleef_coshf16_u10(values)); + } + Vectorized ceil() const { + return _mm512_ceil_ps(values); + } + Vectorized floor() const { + return _mm512_floor_ps(values); + } + Vectorized hypot(const Vectorized &b) const { + return Vectorized(Sleef_hypotf16_u05(values, b)); + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized igamma(const Vectorized &x) const { + __at_align__ float tmp[size()]; + __at_align__ float tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igamma(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized igammac(const Vectorized &x) const { + __at_align__ float tmp[size()]; + __at_align__ float tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igammac(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized neg() const { + return _mm512_xor_ps(_mm512_set1_ps(-0.f), values); + } + Vectorized nextafter(const Vectorized &b) const { + return Vectorized(Sleef_nextafterf16(values, b)); + } + Vectorized round() const { + return _mm512_roundscale_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized tan() const { + return Vectorized(Sleef_tanf16_u10(values)); + } + Vectorized tanh() const { + return Vectorized(Sleef_tanhf16_u10(values)); + } + Vectorized trunc() const { + return _mm512_roundscale_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized lgamma() const { + return Vectorized(Sleef_lgammaf16_u10(values)); + } + Vectorized sqrt() const { + return _mm512_sqrt_ps(values); + } + Vectorized reciprocal() const { + return _mm512_div_ps(_mm512_set1_ps(1), values); + } + Vectorized rsqrt() const { + return _mm512_div_ps(_mm512_set1_ps(1), _mm512_sqrt_ps(values)); + } + Vectorized pow(const Vectorized &b) const { + return Vectorized(Sleef_powf16_u10(values, b)); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized operator==(const Vectorized& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_EQ_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + + Vectorized operator!=(const Vectorized& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + + Vectorized operator<(const Vectorized& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LT_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + + Vectorized operator<=(const Vectorized& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LE_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + + Vectorized operator>(const Vectorized& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GT_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + + Vectorized operator>=(const Vectorized& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GE_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm512_add_ps(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm512_sub_ps(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm512_mul_ps(a, b); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return _mm512_div_ps(a, b); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + auto zero_vec = _mm512_set1_epi32(0); + auto max = _mm512_max_ps(a, b); + auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q); + auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask, + 0xFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + return _mm512_or_ps(max, isnan); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + auto zero_vec = _mm512_set1_epi32(0); + auto min = _mm512_min_ps(a, b); + auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q); + auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask, + 0xFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + return _mm512_or_ps(min, isnan); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { + return _mm512_min_ps(max, _mm512_max_ps(min, a)); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + return _mm512_min_ps(max, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + return _mm512_max_ps(min, a); +} + +template <> +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm512_and_ps(a, b); +} + +template <> +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm512_or_ps(a, b); +} + +template <> +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm512_xor_ps(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +template <> +inline void convert(const float* src, float* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + _mm512_storeu_ps(dst + i, _mm512_loadu_ps(src + i)); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm512_fmadd_ps(a, b, c); +} + +#endif + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_int.h b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_int.h new file mode 100644 index 0000000000000000000000000000000000000000..c2cbc0b1d7f944d699b872f2c9cd4cc0271b6af7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_int.h @@ -0,0 +1,1168 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include + +namespace at { +namespace vec { +inline namespace CPU_CAPABILITY { + +#ifdef CPU_CAPABILITY_AVX512 + +struct Vectorizedi { +protected: + __m512i values; + static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0}; + static inline __m512i invert(const __m512i& v) { + const auto ones = _mm512_set1_epi64(-1); + return _mm512_xor_si512(ones, v); + } +public: + Vectorizedi() {} + Vectorizedi(__m512i v) : values(v) {} + operator __m512i() const { + return values; + } +}; + +#else + +struct Vectorizedi {}; // dummy definition to make Vectorizedi always defined + +#endif // CPU_CAPABILITY_AVX512 + +#ifdef CPU_CAPABILITY_AVX512 + +template <> +class Vectorized : public Vectorizedi { +private: + static const Vectorized ones; +public: + using value_type = int64_t; + using size_type = int; + static constexpr size_type size() { + return 8; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int64_t v) { values = _mm512_set1_epi64(v); } + Vectorized(int64_t val1, int64_t val2, int64_t val3, int64_t val4, + int64_t val5, int64_t val6, int64_t val7, int64_t val8) { + values = _mm512_setr_epi64(val1, val2, val3, val4, + val5, val6, val7, val8); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + return _mm512_mask_blend_epi64(mask, a.values, b.values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + auto msb_one = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF); + auto mask_ = _mm512_cmp_epi64_mask(mask, msb_one, _MM_CMPINT_EQ); + return _mm512_mask_blend_epi64(mask_, a.values, b.values); + } + template + static Vectorized arange(int64_t base = 0, step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm512_loadu_si512(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ int64_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int64_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html + _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values); + } else if (count > 0) { + __at_align__ int64_t tmp_values[size()]; + _mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int64_t)); + } + } + const int64_t& operator[](int idx) const = delete; + int64_t& operator[](int idx) = delete; + Vectorized abs() const { + auto is_larger_mask = _mm512_cmpgt_epi64_mask(zero_vector, values); + auto is_larger = _mm512_mask_set1_epi64(zero_vector, is_larger_mask, 0xFFFFFFFFFFFFFFFF); + auto inverse = _mm512_xor_si512(values, is_larger); + return _mm512_sub_epi64(inverse, is_larger); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm512_set1_epi64(0); + } + Vectorized conj() const { + return *this; + } + Vectorized frac() const; + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + auto mask = _mm512_cmpeq_epi64_mask(values, other.values); + return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF); + } + Vectorized operator!=(const Vectorized& other) const { + auto mask = _mm512_cmpneq_epi64_mask(values, other.values); + return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF); + } + Vectorized operator<(const Vectorized& other) const { + auto mask = _mm512_cmplt_epi64_mask(values, other.values); + return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF); + } + Vectorized operator<=(const Vectorized& other) const { + auto mask = _mm512_cmple_epi64_mask(values, other.values); + return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF); + } + Vectorized operator>(const Vectorized& other) const { + auto mask = _mm512_cmpgt_epi64_mask(values, other.values); + return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF); + } + Vectorized operator>=(const Vectorized& other) const { + auto mask = _mm512_cmpge_epi64_mask(values, other.values); + return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +class Vectorized : public Vectorizedi { +private: + static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0}; + static const Vectorized ones; +public: + using value_type = int32_t; + static constexpr int size() { + return 16; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int32_t v) { values = _mm512_set1_epi32(v); } + Vectorized(int32_t val1, int32_t val2, int32_t val3, int32_t val4, + int32_t val5, int32_t val6, int32_t val7, int32_t val8, + int32_t val9, int32_t val10, int32_t val11, int32_t val12, + int32_t val13, int32_t val14, int32_t val15, int32_t val16) { + values = _mm512_setr_epi32(val1, val2, val3, val4, val5, val6, val7, val8, + val9, val10, val11, val12, val13, val14, val15, val16); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + return _mm512_mask_blend_epi32(mask, a.values, b.values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + auto msb_one = _mm512_set1_epi32(0xFFFFFFFF); + auto mask_ = _mm512_cmp_epi32_mask(mask, msb_one, _MM_CMPINT_EQ); + return _mm512_mask_blend_epi32(mask_, a.values, b.values); + } + template + static Vectorized arange(int32_t base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int32_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + case 8: + return blend<255>(a, b); + case 9: + return blend<511>(a, b); + case 10: + return blend<1023>(a, b); + case 11: + return blend<2047>(a, b); + case 12: + return blend<4095>(a, b); + case 13: + return blend<8191>(a, b); + case 14: + return blend<16383>(a, b); + case 15: + return blend<32767>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm512_loadu_si512(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int32_t count) { + __at_align__ int32_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int32_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html + _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values); + } else if (count > 0) { + __at_align__ int32_t tmp_values[size()]; + _mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int32_t)); + } + } + const int32_t& operator[](int idx) const = delete; + int32_t& operator[](int idx) = delete; + Vectorized abs() const { + return _mm512_abs_epi32(values); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm512_set1_epi32(0); + } + Vectorized conj() const { + return *this; + } + Vectorized frac() const; + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + auto mask = _mm512_cmpeq_epi32_mask(values, other.values); + return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF); + } + Vectorized operator!=(const Vectorized& other) const { + auto mask = _mm512_cmpneq_epi32_mask(values, other.values); + return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF); + } + Vectorized operator<(const Vectorized& other) const { + auto mask = _mm512_cmplt_epi32_mask(values, other.values); + return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF); + } + Vectorized operator<=(const Vectorized& other) const { + auto mask = _mm512_cmple_epi32_mask(values, other.values); + return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF); + } + Vectorized operator>(const Vectorized& other) const { + auto mask = _mm512_cmpgt_epi32_mask(values, other.values); + return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF); + } + Vectorized operator>=(const Vectorized& other) const { + auto mask = _mm512_cmpge_epi32_mask(values, other.values); + return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF); + } + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +inline void convert(const int32_t *src, float *dst, int64_t n) { + int64_t i; + // int32_t and float have same size +#ifndef _MSC_VER +# pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto input_vec = _mm512_loadu_si512(reinterpret_cast(src + i)); + auto output_vec = _mm512_cvtepi32_ps(input_vec); + _mm512_storeu_ps(reinterpret_cast(dst + i), output_vec); + } +#ifndef _MSC_VER +# pragma unroll +#endif + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +inline void convert(const int32_t *src, double *dst, int64_t n) { + int64_t i; + // int32_t has half the size of double +#ifndef _MSC_VER +# pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto input_256_vec = _mm256_loadu_si256(reinterpret_cast(src + i)); + auto output_vec = _mm512_cvtepi32_pd(input_256_vec); + _mm512_storeu_pd(reinterpret_cast(dst + i), output_vec); + } +#ifndef _MSC_VER +# pragma unroll +#endif + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +class Vectorized : public Vectorizedi { +private: + static const Vectorized ones; + static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0}; +public: + using value_type = int16_t; + static constexpr int size() { + return 32; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int16_t v) { values = _mm512_set1_epi16(v); } + Vectorized(int16_t val1, int16_t val2, int16_t val3, int16_t val4, + int16_t val5, int16_t val6, int16_t val7, int16_t val8, + int16_t val9, int16_t val10, int16_t val11, int16_t val12, + int16_t val13, int16_t val14, int16_t val15, int16_t val16, + int16_t val17, int16_t val18, int16_t val19, int16_t val20, + int16_t val21, int16_t val22, int16_t val23, int16_t val24, + int16_t val25, int16_t val26, int16_t val27, int16_t val28, + int16_t val29, int16_t val30, int16_t val31, int16_t val32) { + values = _mm512_set_epi16(val32, val31, val30, val29, val28, val27, val26, val25, + val24, val23, val22, val21, val20, val19, val18, val17, + val16, val15, val14, val13, val12, val11, val10, val9, + val8, val7, val6, val5, val4, val3, val2, val1); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + return _mm512_mask_blend_epi16(mask, a.values, b.values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + auto msb_one = _mm512_set1_epi16(0xFFFF); + auto mask_ = _mm512_cmp_epi16_mask(mask, msb_one, _MM_CMPINT_EQ); + return _mm512_mask_blend_epi16(mask_, a.values, b.values); + } + template + static Vectorized arange(int16_t base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step, + base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step, + base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step, + base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step, + base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step + ); + } + static Vectorized + set(Vectorized a, Vectorized b, int16_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<0x1>(a, b); + case 2: + return blend<0x3>(a, b); + case 3: + return blend<0x7>(a, b); + case 4: + return blend<0xF>(a, b); + case 5: + return blend<0x1F>(a, b); + case 6: + return blend<0x3F>(a, b); + case 7: + return blend<0x7F>(a, b); + case 8: + return blend<0xFF>(a, b); + case 9: + return blend<0x1FF>(a, b); + case 10: + return blend<0x3FF>(a, b); + case 11: + return blend<0x7FF>(a, b); + case 12: + return blend<0xFFF>(a, b); + case 13: + return blend<0x1FFF>(a, b); + case 14: + return blend<0x3FFF>(a, b); + case 15: + return blend<0x7FFF>(a, b); + case 16: + return blend<0xFFFF>(a, b); + case 17: + return blend<0x1FFFF>(a, b); + case 18: + return blend<0x3FFFF>(a, b); + case 19: + return blend<0x7FFFF>(a, b); + case 20: + return blend<0xFFFFF>(a, b); + case 21: + return blend<0x1FFFFF>(a, b); + case 22: + return blend<0x3FFFFF>(a, b); + case 23: + return blend<0x7FFFFF>(a, b); + case 24: + return blend<0xFFFFFF>(a, b); + case 25: + return blend<0x1FFFFFF>(a, b); + case 26: + return blend<0x3FFFFFF>(a, b); + case 27: + return blend<0x7FFFFFF>(a, b); + case 28: + return blend<0xFFFFFFF>(a, b); + case 29: + return blend<0x1FFFFFFF>(a, b); + case 30: + return blend<0x3FFFFFFF>(a, b); + case 31: + return blend<0x7FFFFFFF>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm512_loadu_si512(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int16_t count) { + __at_align__ int16_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int16_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html + _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values); + } else if (count > 0) { + __at_align__ int16_t tmp_values[size()]; + _mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int16_t)); + } + } + const int16_t& operator[](int idx) const = delete; + int16_t& operator[](int idx) = delete; + Vectorized abs() const { + return _mm512_abs_epi16(values); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm512_set1_epi16(0); + } + Vectorized conj() const { + return *this; + } + Vectorized frac() const; + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + auto mask = _mm512_cmpeq_epi16_mask(values, other.values); + return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF); + } + Vectorized operator!=(const Vectorized& other) const { + auto mask = _mm512_cmpneq_epi16_mask(values, other.values); + return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF); + } + Vectorized operator<(const Vectorized& other) const { + auto mask = _mm512_cmplt_epi16_mask(values, other.values); + return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF); + } + Vectorized operator<=(const Vectorized& other) const { + auto mask = _mm512_cmple_epi16_mask(values, other.values); + return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF); + } + Vectorized operator>(const Vectorized& other) const { + auto mask = _mm512_cmpgt_epi16_mask(values, other.values); + return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF); + } + Vectorized operator>=(const Vectorized& other) const { + auto mask = _mm512_cmpge_epi16_mask(values, other.values); + return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +class Vectorized : public Vectorizedi { +private: + static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0}; + static const Vectorized ones; +public: + using value_type = int8_t; + static constexpr int size() { + return 64; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int8_t v) { values = _mm512_set1_epi8(v); } + Vectorized(int8_t val1, int8_t val2, int8_t val3, int8_t val4, + int8_t val5, int8_t val6, int8_t val7, int8_t val8, + int8_t val9, int8_t val10, int8_t val11, int8_t val12, + int8_t val13, int8_t val14, int8_t val15, int8_t val16, + int8_t val17, int8_t val18, int8_t val19, int8_t val20, + int8_t val21, int8_t val22, int8_t val23, int8_t val24, + int8_t val25, int8_t val26, int8_t val27, int8_t val28, + int8_t val29, int8_t val30, int8_t val31, int8_t val32, + int8_t val33, int8_t val34, int8_t val35, int8_t val36, + int8_t val37, int8_t val38, int8_t val39, int8_t val40, + int8_t val41, int8_t val42, int8_t val43, int8_t val44, + int8_t val45, int8_t val46, int8_t val47, int8_t val48, + int8_t val49, int8_t val50, int8_t val51, int8_t val52, + int8_t val53, int8_t val54, int8_t val55, int8_t val56, + int8_t val57, int8_t val58, int8_t val59, int8_t val60, + int8_t val61, int8_t val62, int8_t val63, int8_t val64){ + values = _mm512_set_epi8(val64, val63, val62, val61, val60, val59, val58, val57, + val56, val55, val54, val53,val52, val51, val50, val49, + val48, val47, val46, val45, val44, val43, val42, val41, + val40, val39, val38, val37, val36, val35, val34, val33, + val32, val31, val30, val29, val28, val27, val26, val25, + val24, val23, val22, val21, val20, val19, val18, val17, + val16, val15, val14, val13, val12, val11, val10, val9, + val8, val7, val6, val5, val4, val3, val2, val1); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + return _mm512_mask_blend_epi8(mask, a.values, b.values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + auto msb_one = _mm512_set1_epi8(0xFF); + auto mask_ = _mm512_cmp_epi8_mask(mask, msb_one, _MM_CMPINT_EQ); + return _mm512_mask_blend_epi8(mask_, a.values, b.values); + } + template + static Vectorized arange(int8_t base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step, + base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step, + base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step, + base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step, + base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step, + base + 32 * step, base + 33 * step, base + 34 * step, base + 35 * step, + base + 36 * step, base + 37 * step, base + 38 * step, base + 39 * step, + base + 40 * step, base + 41 * step, base + 42 * step, base + 43 * step, + base + 44 * step, base + 45 * step, base + 46 * step, base + 47 * step, + base + 48 * step, base + 49 * step, base + 50 * step, base + 51 * step, + base + 52 * step, base + 53 * step, base + 54 * step, base + 55 * step, + base + 56 * step, base + 57 * step, base + 58 * step, base + 59 * step, + base + 60 * step, base + 61 * step, base + 62 * step, base + 63 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int8_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<0x1>(a, b); + case 2: + return blend<0x3>(a, b); + case 3: + return blend<0x7>(a, b); + case 4: + return blend<0xF>(a, b); + case 5: + return blend<0x1F>(a, b); + case 6: + return blend<0x3F>(a, b); + case 7: + return blend<0x7F>(a, b); + case 8: + return blend<0xFF>(a, b); + case 9: + return blend<0x1FF>(a, b); + case 10: + return blend<0x3FF>(a, b); + case 11: + return blend<0x7FF>(a, b); + case 12: + return blend<0xFFF>(a, b); + case 13: + return blend<0x1FFF>(a, b); + case 14: + return blend<0x3FFF>(a, b); + case 15: + return blend<0x7FFF>(a, b); + case 16: + return blend<0xFFFF>(a, b); + case 17: + return blend<0x1FFFF>(a, b); + case 18: + return blend<0x3FFFF>(a, b); + case 19: + return blend<0x7FFFF>(a, b); + case 20: + return blend<0xFFFFF>(a, b); + case 21: + return blend<0x1FFFFF>(a, b); + case 22: + return blend<0x3FFFFF>(a, b); + case 23: + return blend<0x7FFFFF>(a, b); + case 24: + return blend<0xFFFFFF>(a, b); + case 25: + return blend<0x1FFFFFF>(a, b); + case 26: + return blend<0x3FFFFFF>(a, b); + case 27: + return blend<0x7FFFFFF>(a, b); + case 28: + return blend<0xFFFFFFF>(a, b); + case 29: + return blend<0x1FFFFFFF>(a, b); + case 30: + return blend<0x3FFFFFFF>(a, b); + case 31: + return blend<0x7FFFFFFF>(a, b); + case 32: + return blend<0xFFFFFFFF>(a, b); + case 33: + return blend<0x1FFFFFFFF>(a, b); + case 34: + return blend<0x3FFFFFFFF>(a, b); + case 35: + return blend<0x7FFFFFFFF>(a, b); + case 36: + return blend<0xFFFFFFFFF>(a, b); + case 37: + return blend<0x1FFFFFFFFF>(a, b); + case 38: + return blend<0x3FFFFFFFFF>(a, b); + case 39: + return blend<0x7FFFFFFFFF>(a, b); + case 40: + return blend<0xFFFFFFFFFF>(a, b); + case 41: + return blend<0x1FFFFFFFFFF>(a, b); + case 42: + return blend<0x3FFFFFFFFFF>(a, b); + case 43: + return blend<0x7FFFFFFFFFF>(a, b); + case 44: + return blend<0xFFFFFFFFFFF>(a, b); + case 45: + return blend<0x1FFFFFFFFFFF>(a, b); + case 46: + return blend<0x3FFFFFFFFFFF>(a, b); + case 47: + return blend<0x7FFFFFFFFFFF>(a, b); + case 48: + return blend<0xFFFFFFFFFFFF>(a, b); + case 49: + return blend<0x1FFFFFFFFFFFF>(a, b); + case 50: + return blend<0x3FFFFFFFFFFFF>(a, b); + case 51: + return blend<0x7FFFFFFFFFFFF>(a, b); + case 52: + return blend<0xFFFFFFFFFFFFF>(a, b); + case 53: + return blend<0x1FFFFFFFFFFFFF>(a, b); + case 54: + return blend<0x3FFFFFFFFFFFFF>(a, b); + case 55: + return blend<0x7FFFFFFFFFFFFF>(a, b); + case 56: + return blend<0xFFFFFFFFFFFFFF>(a, b); + case 57: + return blend<0x1FFFFFFFFFFFFFF>(a, b); + case 58: + return blend<0x3FFFFFFFFFFFFFF>(a, b); + case 59: + return blend<0x7FFFFFFFFFFFFFF>(a, b); + case 60: + return blend<0xFFFFFFFFFFFFFFF>(a, b); + case 61: + return blend<0x1FFFFFFFFFFFFFFF>(a, b); + case 62: + return blend<0x3FFFFFFFFFFFFFFF>(a, b); + case 63: + return blend<0x7FFFFFFFFFFFFFFF>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm512_loadu_si512(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int8_t count) { + __at_align__ int8_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int8_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html + _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values); + } else if (count > 0) { + __at_align__ int8_t tmp_values[size()]; + _mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int8_t)); + } + } + const int8_t& operator[](int idx) const = delete; + int8_t& operator[](int idx) = delete; + Vectorized abs() const { + return _mm512_abs_epi8(values); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm512_set1_epi8(0); + } + Vectorized conj() const { + return *this; + } + Vectorized frac() const; + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + auto mask = _mm512_cmpeq_epi8_mask(values, other.values); + return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF); + } + Vectorized operator!=(const Vectorized& other) const { + auto mask = _mm512_cmpneq_epi8_mask(values, other.values); + return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF); + } + Vectorized operator<(const Vectorized& other) const { + auto mask = _mm512_cmplt_epi8_mask(values, other.values); + return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF); + } + Vectorized operator<=(const Vectorized& other) const { + auto mask = _mm512_cmple_epi8_mask(values, other.values); + return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF); + } + Vectorized operator>(const Vectorized& other) const { + auto mask = _mm512_cmpgt_epi8_mask(values, other.values); + return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF); + } + Vectorized operator>=(const Vectorized& other) const { + auto mask = _mm512_cmpge_epi8_mask(values, other.values); + return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm512_add_epi64(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm512_add_epi32(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm512_add_epi16(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm512_add_epi8(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm512_sub_epi64(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm512_sub_epi32(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm512_sub_epi16(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm512_sub_epi8(a, b); +} + +// Negation. Defined here so we can utilize operator- +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm512_mullo_epi64(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm512_mullo_epi32(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm512_mullo_epi16(a, b); +} + +template +Vectorized inline int_elementwise_binary_512(const Vectorized& a, const Vectorized& b, Op op) { + T values_a[Vectorized::size()]; + T values_b[Vectorized::size()]; + a.store(values_a); + b.store(values_b); + for (int i = 0; i != Vectorized::size(); i++) { + values_a[i] = op(values_a[i], values_b[i]); + } + return Vectorized::loadu(values_a); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + // We don't have an instruction for multiplying int8_t + return int_elementwise_binary_512(a, b, std::multiplies()); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm512_min_epi64(a, b); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm512_min_epi32(a, b); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm512_min_epi16(a, b); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm512_min_epi8(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm512_max_epi64(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm512_max_epi32(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm512_max_epi16(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm512_max_epi8(a, b); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm512_min_epi64(max_val, _mm512_max_epi64(a, min_val)); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm512_min_epi32(max_val, _mm512_max_epi32(a, min_val)); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm512_min_epi16(max_val, _mm512_max_epi16(a, min_val)); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm512_min_epi8(max_val, _mm512_max_epi8(a, min_val)); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm512_min_epi64(max_val, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm512_min_epi32(max_val, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm512_min_epi16(max_val, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm512_min_epi8(max_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm512_max_epi64(min_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm512_max_epi32(min_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm512_max_epi16(min_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm512_max_epi8(min_val, a); +} + +template +Vectorized inline convert_to_int32(const T* ptr) { + return Vectorized::loadu(ptr); +} + +template<> +Vectorized inline convert_to_int32(const int8_t* ptr) { + return _mm512_cvtepi8_epi32(_mm_loadu_si128(reinterpret_cast(ptr))); +} + +template<> +Vectorized inline convert_to_int32(const uint8_t* ptr) { + return _mm512_cvtepu8_epi32(_mm_loadu_si128(reinterpret_cast(ptr))); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_512(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_512(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_512(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_512(a, b, std::divides()); +} + +template>::value, int> = 0> +inline Vectorized operator&(const Vectorized& a, const Vectorized& b) { + return _mm512_and_si512(a, b); +} +template>::value, int> = 0> +inline Vectorized operator|(const Vectorized& a, const Vectorized& b) { + return _mm512_or_si512(a, b); +} +template>::value, int> = 0> +inline Vectorized operator^(const Vectorized& a, const Vectorized& b) { + return _mm512_xor_si512(a, b); +} +template>::value, int> = 0> +inline Vectorized operator~(const Vectorized& a) { + return _mm512_xor_si512(a, _mm512_set1_epi32(-1)); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +#endif + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_qint.h b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_qint.h new file mode 100644 index 0000000000000000000000000000000000000000..87cf44283c0be17fbfb25b6eb0395a23ad0ac87f --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec512/vec512_qint.h @@ -0,0 +1,1254 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include + +#include +#include +#include +#include + +#include + +// This file defines Vectorized<> for the quantized types. +// +// +// Currently, we simply use these classes as efficient converters between +// the quantized types and Vectorized, usually in bandwidth-bound cases +// where doing the arithmetic in full-precision is acceptable (e.g. +// elementwise operators). +// +// +// Conversions are as follows: +// Vectorized -> 4x Vectorized +// Vectorized -> 4x Vectorized +// Vectorized -> 1x Vectorized +// +// The size of the returned float vector is specified by the special +// constexpr function float_num_vecs. The type of the value returned +// from dequantize (and expected as an argument to quantize) is +// specified by float_vec_return_type. +// +// When writing kernels with these vectors, it is expected that floating- +// point operations will be carried out in a loop over Vectorized::float_num_vecs +// iterations. + +namespace at { +namespace vec { +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + +struct Vectorizedqi { + protected: + __m512i vals __attribute__((aligned(64))); + + public: + Vectorizedqi() {} + Vectorizedqi(__m512i v) : vals(v) {} + operator __m512i() const { + return vals; + } +}; + + +template +__m512i pack_saturate_and_clamp( + __m512i first, + __m512i second, + T min_val, + T max_val); + +template <> +inline __m512i pack_saturate_and_clamp( + __m512i first, + __m512i second, + int32_t min_val, + int32_t max_val) { + // This function is for linkage only, will not be used + AT_ERROR("pack_saturate_and_clamp is not supported"); +} + +template <> +inline __m512i pack_saturate_and_clamp( + __m512i first, + __m512i second, + int8_t min_val, + int8_t max_val) { + __m512i packed_and_sat = _mm512_packs_epi16(first, second); + return _mm512_max_epi8( + _mm512_set1_epi8(min_val), + _mm512_min_epi8(packed_and_sat, _mm512_set1_epi8(max_val))); +} + +template <> +inline __m512i pack_saturate_and_clamp( + __m512i first, + __m512i second, + uint8_t min_val, + uint8_t max_val) { + __m512i packed_and_sat = _mm512_packus_epi16(first, second); + return _mm512_max_epu8( + _mm512_set1_epi8(min_val), + _mm512_min_epu8(packed_and_sat, _mm512_set1_epi8(max_val))); +} + + +template +inline void __attribute__((always_inline)) QuantizeAvx512( + const float* src, + typename T::underlying* dst, + int len, + float inverse_scale, + int64_t zero_point) { + constexpr int VLEN = 16; + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + const __m512i min_v = _mm512_set1_epi32(min_val); + const __m512i max_v = _mm512_set1_epi32(max_val); + // This is the largest int32 value < int32_max exactly representable in float + constexpr int32_t int32_float_max_val = + std::numeric_limits::max() - 127; + int i = 0; + __m512 inverse_scale_v = _mm512_set1_ps(inverse_scale); + // clang-format off + static const __m512i shuffle_mask_v = _mm512_set_epi8( + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00); + // clang-format on + __m512i permute_mask_v = + _mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02, + 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00); + __m512i permute_mask_l8_v = + _mm512_set_epi32(0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0c, 0x08, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00); + int len_aligned = len / (VLEN * 4) * (VLEN * 4); + for (; i < len_aligned; i += 4 * VLEN) { + // x + __m512 x_vals = _mm512_load_ps(src + i); + __m512 x_transformed_v = _mm512_mul_ps(x_vals, inverse_scale_v); + // If the floating point value is greater than int32_max, + // _mm512_cvtps_epi32 converts them to -ve. Clip at int32_float_max_val to + // Clip at int32_float_max_val to avoid this. + x_transformed_v = + _mm512_min_ps(x_transformed_v, _mm512_set1_ps(int32_float_max_val)); + // y + __m512 y_vals = _mm512_load_ps(src + i + VLEN); + __m512 y_transformed_v = _mm512_mul_ps(y_vals, inverse_scale_v); + y_transformed_v = + _mm512_min_ps(y_transformed_v, _mm512_set1_ps(int32_float_max_val)); + // z + __m512 z_vals = _mm512_load_ps(src + i + 2 * VLEN); + __m512 z_transformed_v = _mm512_mul_ps(z_vals, inverse_scale_v); + z_transformed_v = + _mm512_min_ps(z_transformed_v, _mm512_set1_ps(int32_float_max_val)); + // w + __m512 w_vals = _mm512_load_ps(src + i + 3 * VLEN); + __m512 w_transformed_v = _mm512_mul_ps(w_vals, inverse_scale_v); + w_transformed_v = + _mm512_min_ps(w_transformed_v, _mm512_set1_ps(int32_float_max_val)); + + __m512i x_rounded_v = _mm512_cvtps_epi32(x_transformed_v); + __m512i y_rounded_v = _mm512_cvtps_epi32(y_transformed_v); + __m512i z_rounded_v = _mm512_cvtps_epi32(z_transformed_v); + __m512i w_rounded_v = _mm512_cvtps_epi32(w_transformed_v); + + // add zero point + x_rounded_v = _mm512_add_epi32(x_rounded_v, _mm512_set1_epi32(zero_point)); + y_rounded_v = _mm512_add_epi32(y_rounded_v, _mm512_set1_epi32(zero_point)); + z_rounded_v = _mm512_add_epi32(z_rounded_v, _mm512_set1_epi32(zero_point)); + w_rounded_v = _mm512_add_epi32(w_rounded_v, _mm512_set1_epi32(zero_point)); + + __m512i xy_packed_v = _mm512_packs_epi32(x_rounded_v, y_rounded_v); + __m512i zw_packed_v = _mm512_packs_epi32(z_rounded_v, w_rounded_v); + __m512i xyzw_clamped_v = pack_saturate_and_clamp( + xy_packed_v, zw_packed_v, min_val, max_val); + + xyzw_clamped_v = + _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v); + _mm512_storeu_si512(reinterpret_cast<__m512i*>(dst + i), xyzw_clamped_v); + } + + // Additional 8-lane AVX512 version to take advantage when len is smaller + // based on fbgemm::QuantizeAvx2 (https://github.com/pytorch/FBGEMM) + for (; i < len / VLEN * VLEN; i += VLEN) { + __m512 x_vals = _mm512_load_ps(src + i); + __m512 x_transformed_v = _mm512_mul_ps(x_vals, inverse_scale_v); + x_transformed_v = + _mm512_min_ps(x_transformed_v, _mm512_set1_ps(int32_float_max_val)); + __m512i x_rounded_v = _mm512_cvtps_epi32(x_transformed_v); + x_rounded_v = _mm512_add_epi32(x_rounded_v, _mm512_set1_epi32(zero_point)); + __m512i x_clipped_v = + _mm512_max_epi32(min_v, _mm512_min_epi32(max_v, x_rounded_v)); + + x_clipped_v = _mm512_shuffle_epi8(x_clipped_v, shuffle_mask_v); + x_clipped_v = _mm512_permutexvar_epi32(permute_mask_l8_v, x_clipped_v); + _mm_storeu_si128( + reinterpret_cast<__m128i*>(dst + i), + _mm512_castsi512_si128(x_clipped_v)); + } + + for (; i < len; ++i) { + float transformed = src[i] * inverse_scale; + + // Not exactly the same behavior as the vectorized code. + // The vectorized code above always rounds to even in halfway cases + // (https://software.intel.com/en-us/node/523819), but std::nearbyint + // does the same only when the current rounding mode is FE_TONEAREST. + // However, in practice, this should not be a problem because most cases + // use the default rounding mode FE_TONEAREST. + // Note that we cannot implement the same behavior as the vectorized code + // using std::round because it does rounding away from zero in halfway + // cases. + transformed = zero_point + nearbyint(transformed); + float clipped = + std::min(std::max(transformed, float(min_val)), float(max_val)); + dst[i] = clipped; + } +} + +template<> +struct Vectorized : public Vectorizedqi { + using size_type = int; + static constexpr size_type size() { + return 16; + } + + static constexpr int float_num_vecs() { + return 1; + } + + static constexpr int int_num_vecs() { + return 1; + } + + using float_vec_return_type = std::array, 1>; + using int_vec_return_type = std::array, 1>; + using value_type = c10::qint32::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + Vectorized() {} + + Vectorized(__m512i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::qint32& val) { + value_type uw = val.val_; + vals = _mm512_set1_epi32(uw); + } + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm512_storeu_si512((__m512i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return loadu(tmp_values); + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_zp_premul) const { + __m512 float_vals = _mm512_cvtepi32_ps(vals); + return {vec::fmadd(scale, Vectorized(float_vals), scale_zp_premul)}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + Vectorized retval; + auto rhs_data = (__m512)rhs[0]; + at::native::quantize_vec( + scale, zero_point, (float*)&rhs_data, (c10::qint32*)&retval.vals, 16); + return retval; + } + + Vectorized maximum(Vectorized b) const { + return _mm512_max_epi32(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm512_min_epi32(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm512_min_epi32( + _mm512_max_epi32(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + return {_mm512_sub_epi32(vals, b)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m512 multiplier_v = _mm512_set1_ps(multiplier); + __m512i zero_point_v = _mm512_set1_epi32(zero_point); + + __m512 scaled = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[0]), multiplier_v); + __m512i rounded = _mm512_cvtps_epi32(scaled); + return _mm512_add_epi32(rounded, zero_point_v); + } + + private: + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm512_loadu_si512((const __m512i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline operator*( + const Vectorized& a, + const Vectorized& b) { + return _mm512_mullo_epi32(a, b); +} + +template <> +Vectorized inline operator+( + const Vectorized& a, + const Vectorized& b) { + return _mm512_add_epi32(a, b); +} + +/* + * Convert values from int32 back to int8/uint8 + */ +template +__m512i RequantizeAvx512( + const std::array, 4>& inp, + __m512 multiplier, + __m512i zp) { + static_assert( + std::is_same::value || std::is_same::value, + "Only int8_t/uint8_t are supported"); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + __m512i permute_mask_v = + _mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02, + 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00); + __m512 x_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[0]), multiplier); + __m512 y_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[1]), multiplier); + __m512 z_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[2]), multiplier); + __m512 w_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[3]), multiplier); + + __m512i x_rounded_v = _mm512_cvtps_epi32(x_scaled_v); + __m512i y_rounded_v = _mm512_cvtps_epi32(y_scaled_v); + __m512i z_rounded_v = _mm512_cvtps_epi32(z_scaled_v); + __m512i w_rounded_v = _mm512_cvtps_epi32(w_scaled_v); + + /* Add zero point */ + __m512i x_v = _mm512_add_epi32(x_rounded_v, zp); + __m512i y_v = _mm512_add_epi32(y_rounded_v, zp); + __m512i z_v = _mm512_add_epi32(z_rounded_v, zp); + __m512i w_v = _mm512_add_epi32(w_rounded_v, zp); + + /* Pack to int16_t and saturate */ + __m512i xy_packed_v = _mm512_packs_epi32(x_v, y_v); + __m512i zw_packed_v = _mm512_packs_epi32(z_v, w_v); + + __m512i xyzw_clamped_v = + pack_saturate_and_clamp(xy_packed_v, zw_packed_v, min_val, max_val); + + /* + * xyzw_clamped_v has results in the following layout so we need to + * permute: x0-3 y0-3 z0-3 w0-3 x4-7 y4-7 z4-7 w4-7 x8-11 y8-11 z8-11 w8-11 x12-15 y12-15 z12-15 w12-15 + */ + xyzw_clamped_v = _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v); + return xyzw_clamped_v; +} + +template<> +struct Vectorized : public Vectorizedqi { + static constexpr int size() { + return 64; + } + + static constexpr int float_num_vecs() { + return 4; + } + + static constexpr int int_num_vecs() { + return 4; + } + + using float_vec_return_type = std::array, 4>; + using int_vec_return_type = std::array, 4>; + using value_type = typename c10::qint8::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + + Vectorized() {} + Vectorized(__m512i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::qint8& val) { + value_type uw = val.val_; + vals = _mm512_set1_epi8(uw); + } + + // This is needed because the compiler emits awful code for the default + // constructor for moving the enum + Vectorized(const Vectorized& other) : Vectorizedqi(other.vals) { } + + // This is added to avoid error: definition of implicit copy assignment operator + // for 'Vectorized' is deprecated because it has a user-declared + // copy constructor [-Werror,-Wdeprecated-copy] + Vectorized& operator=(const Vectorized&) = default; + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm512_storeu_si512((__m512i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return loadu(tmp_values); + } + + private: + __m512i cvtepi8_epi32(__m128i epi8_vals) const { + return _mm512_cvtepi8_epi32(epi8_vals); + } + + public: + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_neg_zp_premul) const { + __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]); + __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]); + __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]); + __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]); + + __m512 float_val0 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val0)); + __m512 float_val1 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val1)); + __m512 float_val2 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val2)); + __m512 float_val3 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val3)); + + auto val0 = + vec::fmadd(scale, Vectorized(float_val0), scale_neg_zp_premul); + auto val1 = + vec::fmadd(scale, Vectorized(float_val1), scale_neg_zp_premul); + auto val2 = + vec::fmadd(scale, Vectorized(float_val2), scale_neg_zp_premul); + auto val3 = + vec::fmadd(scale, Vectorized(float_val3), scale_neg_zp_premul); + return {val0, val1, val2, val3}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + auto* rhs_data = (float*)rhs.data(); + int8_t quantized_values[64]; + QuantizeAvx512( + rhs_data, quantized_values, 64, inverse_scale, zero_point); + return Vectorized::loadu(quantized_values); + } + + Vectorized maximum(Vectorized b) const { + return _mm512_max_epi8(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm512_min_epi8(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm512_min_epi8( + _mm512_max_epi8(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]); + __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]); + __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]); + __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]); + + __m512i int32_val0 = cvtepi8_epi32(int_val0); + __m512i int32_val1 = cvtepi8_epi32(int_val1); + __m512i int32_val2 = cvtepi8_epi32(int_val2); + __m512i int32_val3 = cvtepi8_epi32(int_val3); + + __m128i int_b0 = _mm_set_epi64x(b.vals[1], b.vals[0]); + __m128i int_b1 = _mm_set_epi64x(b.vals[3], b.vals[2]); + __m128i int_b2 = _mm_set_epi64x(b.vals[5], b.vals[4]); + __m128i int_b3 = _mm_set_epi64x(b.vals[7], b.vals[6]); + + __m512i int32_b0 = cvtepi8_epi32(int_b0); + __m512i int32_b1 = cvtepi8_epi32(int_b1); + __m512i int32_b2 = cvtepi8_epi32(int_b2); + __m512i int32_b3 = cvtepi8_epi32(int_b3); + + __m512i res_0 = _mm512_sub_epi32(int32_val0, int32_b0); + __m512i res_1 = _mm512_sub_epi32(int32_val1, int32_b1); + __m512i res_2 = _mm512_sub_epi32(int32_val2, int32_b2); + __m512i res_3 = _mm512_sub_epi32(int32_val3, int32_b3); + + return {Vectorized(res_0), + Vectorized(res_1), + Vectorized(res_2), + Vectorized(res_3)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m512 multiplier_v = _mm512_set1_ps(multiplier); + __m512i zero_point_v = _mm512_set1_epi32(zero_point); + return RequantizeAvx512(inp, multiplier_v, zero_point_v); + } + + private: + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm512_loadu_si512((const __m512i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template<> +struct Vectorized : public Vectorizedqi { + static constexpr int size() { + return 64; + } + + static constexpr int float_num_vecs() { + return 4; + } + + static constexpr int int_num_vecs() { + return 4; + } + + using float_vec_return_type = std::array, 4>; + using int_vec_return_type = std::array, 4>; + using value_type = typename c10::quint8::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + Vectorized() {} + + Vectorized(__m512i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::quint8& val) { + value_type uw = val.val_; + vals = _mm512_set1_epi8(uw); + } + + Vectorized(const Vectorized& other) : Vectorizedqi(other.vals) { } + + // This is added to avoid error: definition of implicit copy assignment operator + // for 'Vectorized' is deprecated because it has a user-declared + // copy constructor [-Werror,-Wdeprecated-copy] + Vectorized& operator=(const Vectorized&) = default; + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm512_storeu_si512((__m512i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return loadu(tmp_values); + } + + private: + __m512i cvtepu8_epi32(__m128i epu8_vals) const { + return _mm512_cvtepu8_epi32(epu8_vals); + } + + public: + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_zp_premul) const { + __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]); + __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]); + __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]); + __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]); + + __m512 float_val0 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val0)); + __m512 float_val1 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val1)); + __m512 float_val2 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val2)); + __m512 float_val3 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val3)); + + auto val0 = + vec::fmadd(scale, Vectorized(float_val0), scale_zp_premul); + auto val1 = + vec::fmadd(scale, Vectorized(float_val1), scale_zp_premul); + auto val2 = + vec::fmadd(scale, Vectorized(float_val2), scale_zp_premul); + auto val3 = + vec::fmadd(scale, Vectorized(float_val3), scale_zp_premul); + + return {val0, val1, val2, val3}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + auto* rhs_data = (float*)rhs.data(); + uint8_t quantized_values[64]; + QuantizeAvx512( + rhs_data, quantized_values, 64, inverse_scale, zero_point); + return Vectorized::loadu(quantized_values); + } + + Vectorized maximum(Vectorized b) const { + return _mm512_max_epu8(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm512_min_epu8(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm512_min_epu8( + _mm512_max_epu8(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]); + __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]); + __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]); + __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]); + + __m512i int32_val0 = cvtepu8_epi32(int_val0); + __m512i int32_val1 = cvtepu8_epi32(int_val1); + __m512i int32_val2 = cvtepu8_epi32(int_val2); + __m512i int32_val3 = cvtepu8_epi32(int_val3); + + __m128i int_b0 = _mm_set_epi64x(b.vals[1], b.vals[0]); + __m128i int_b1 = _mm_set_epi64x(b.vals[3], b.vals[2]); + __m128i int_b2 = _mm_set_epi64x(b.vals[5], b.vals[4]); + __m128i int_b3 = _mm_set_epi64x(b.vals[7], b.vals[6]); + + __m512i int32_b0 = cvtepu8_epi32(int_b0); + __m512i int32_b1 = cvtepu8_epi32(int_b1); + __m512i int32_b2 = cvtepu8_epi32(int_b2); + __m512i int32_b3 = cvtepu8_epi32(int_b3); + + __m512i res_0 = _mm512_sub_epi32(int32_val0, int32_b0); + __m512i res_1 = _mm512_sub_epi32(int32_val1, int32_b1); + __m512i res_2 = _mm512_sub_epi32(int32_val2, int32_b2); + __m512i res_3 = _mm512_sub_epi32(int32_val3, int32_b3); + return {Vectorized(res_0), + Vectorized(res_1), + Vectorized(res_2), + Vectorized(res_3)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m512 multiplier_v = _mm512_set1_ps(multiplier); + __m512i zero_point_v = _mm512_set1_epi32(zero_point); + return RequantizeAvx512(inp, multiplier_v, zero_point_v); + } + + private: + + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm512_loadu_si512((const __m512i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +#else + +// NOTE: These are low-performance implementations that we fall back on. + +template < + typename T, + typename float_vec_return_type_, + typename int_vec_return_type_, + int size_> +struct VectorizedQuantizedConverter { + static constexpr int size() { + return size_; + } + + static constexpr int float_num_vecs() { + return size() / 8; + } + + static constexpr int int_num_vecs() { + return size() / 8; + } + + using float_vec_return_type = float_vec_return_type_; + using int_vec_return_type = int_vec_return_type_; + + using value_type = typename T::underlying; + std::array vals; + + VectorizedQuantizedConverter(T val) { + for (const auto i : c10::irange(size())) { + vals[i] = val.val_; + } + } + + VectorizedQuantizedConverter(const void* ptr) { + memcpy(vals.data(), ptr, sizeof(value_type) * size()); + } + + void store(void* ptr, int count = size()) const { + memcpy(ptr, vals.data(), count * sizeof(value_type)); + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_zp_premul) const { + float_vec_return_type rv; + for (const auto i : c10::irange(float_num_vecs())) { + float tmp_vals[16]; + for (const auto j : c10::irange(16)) { + tmp_vals[j] = at::native::dequantize_val( + scale[j], zero_point[j], T(vals[16 * i + j])); + } + rv[i] = Vectorized(tmp_vals[0], + tmp_vals[1], + tmp_vals[2], + tmp_vals[3], + tmp_vals[4], + tmp_vals[5], + tmp_vals[6], + tmp_vals[7], + tmp_vals[8], + tmp_vals[9], + tmp_vals[10], + tmp_vals[11], + tmp_vals[12], + tmp_vals[13], + tmp_vals[14], + tmp_vals[15]); + } + return rv; + } + + protected: + VectorizedQuantizedConverter() {} +}; + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 16> { + Vectorized() + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 16>() {} + Vectorized(c10::qint32 val) + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 16>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 16>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return loadu(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 16], 16); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::qint32*)qvals.data(), + 16 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + for (const auto i : c10::irange(size())) { + retval[0].vals[i] = vals[i] - b.vals[i]; + } + return retval; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = + nearbyint(static_cast(inp[0].vals[i]) * multiplier) + + zero_point; + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline operator*( + const Vectorized& a, + const Vectorized& b) { + Vectorized retval; + for (const auto i : c10::irange(std::decay_t::size())) { + retval.vals[i] = a.vals[i] * b.vals[i]; + } + return retval; +} + +template <> +Vectorized inline operator+( + const Vectorized& a, + const Vectorized& b) { + Vectorized retval; + for (const auto i : c10::irange(std::decay_t::size())) { + retval.vals[i] = a.vals[i] + b.vals[i]; + } + return retval; +} + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 64> { + Vectorized() + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 64>() {} + Vectorized(c10::qint8 val) + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 64>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 64>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return loadu(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 16], 16); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::qint8*)qvals.data(), + 16 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + constexpr int elem_per_int_vec = size() / int_num_vecs(); + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + retval[i].vals[j] = + static_cast(vals[i * elem_per_int_vec + j]) - + static_cast(b.vals[i * elem_per_int_vec + j]); + } + } + return retval; + } + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + constexpr int elem_per_int_vec = size() / int_num_vecs(); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + Vectorized retval; + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + int32_t rounded = + nearbyint(static_cast(inp[i].vals[j]) * multiplier) + + zero_point; + retval.vals[i * elem_per_int_vec + j] = + std::min(std::max(rounded, min_val), max_val); + } + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 64> { + Vectorized() + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 64>() {} + Vectorized(c10::quint8 val) + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 64>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 64>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return loadu(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 16], 16); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::quint8*)qvals.data(), + 16 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + constexpr int elem_per_int_vec = size() / int_num_vecs(); + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + retval[i].vals[j] = + static_cast(vals[i * elem_per_int_vec + j]) - + static_cast(b.vals[i * elem_per_int_vec + j]); + } + } + return retval; + } + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + constexpr int elem_per_int_vec = size() / int_num_vecs(); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + Vectorized retval; + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + int32_t rounded = + nearbyint(static_cast(inp[i].vals[j]) * multiplier) + + zero_point; + retval.vals[i * elem_per_int_vec + j] = + std::min(std::max(rounded, min_val), max_val); + } + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +#endif // defined(CPU_CAPABILITY_AVX512) && !defined(MSVC) + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vec/vec_base.h b/voice_bridge/torch/include/ATen/cpu/vec/vec_base.h new file mode 100644 index 0000000000000000000000000000000000000000..1974d55943b30b26cce0e49ad4a8ddea8534e17e --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vec/vec_base.h @@ -0,0 +1,986 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] +// +// Note [Do not compile initializers with AVX] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// If you define a static initializer in this file, the initialization will use +// AVX instructions because these object files are compiled with AVX enabled. +// We need to avoid non-trivial global data in these architecture specific files +// because there's no way to guard the global initializers with CPU capability +// detection. +// +// See https://github.com/pytorch/pytorch/issues/37577 for an instance +// of this bug in the past. + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// These macros helped us unify vec_base.h +#ifdef CPU_CAPABILITY_AVX512 +#if defined(__GNUC__) +#define __at_align__ __attribute__((aligned(64))) +#elif defined(_WIN32) +#define __at_align__ __declspec(align(64)) +#else +#define __at_align__ +#endif +#define VECTOR_WIDTH 64 +#define int_vector __m512i +#else // CPU_CAPABILITY_AVX512 +#if defined(__GNUC__) +#define __at_align__ __attribute__((aligned(32))) +#elif defined(_WIN32) +#define __at_align__ __declspec(align(32)) +#else +#define __at_align__ +#endif +#define VECTOR_WIDTH 32 +#define int_vector __m256i +#endif // CPU_CAPABILITY_AVX512 + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { +// at::Half and at::BFloat16 should be treated as floating point +template +struct is_floating_point: + std::integral_constant::value || + std::is_same::value || + std::is_same::value> { +}; + +template struct int_of_size; + +#define DEFINE_INT_OF_SIZE(int_t) \ +template<> struct int_of_size { using type = int_t; } + +DEFINE_INT_OF_SIZE(int64_t); +DEFINE_INT_OF_SIZE(int32_t); +DEFINE_INT_OF_SIZE(int16_t); +DEFINE_INT_OF_SIZE(int8_t); + +#undef DEFINE_INT_OF_SIZE + +template +using int_same_size_t = typename int_of_size::type; + +// NOTE: If you specialize on a type, you must define all operations! + +// emulates Vectorized types +#if defined(__s390x__) +template +#else +template +#endif +struct Vectorized { +private: + __at_align__ T values[VECTOR_WIDTH / sizeof(T)]; +public: + using value_type = T; + using size_type = int; + // Note [constexpr static function to avoid odr-usage compiler bug] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Why, you might ask, is size defined to be a static constexpr function, + // rather than a more ordinary 'static constexpr int size;' variable? + // The problem lies within ODR rules for static constexpr members versus + // static constexpr functions. First, recall that this class (along with all + // of its derivations) live in an anonymous namespace: they are intended to be + // *completely* inlined at their use-sites, because we need to compile it + // multiple times for different instruction sets. + // + // Because of this constraint, we CANNOT provide a single definition for + // any static members in this class; since we want to compile the class + // multiple times, there wouldn't actually be any good place to put the + // definition. Now here is the problem: if we ODR-use a static constexpr + // member, we are *obligated* to provide a definition. Without the + // definition, you get a compile error like: + // + // relocation R_X86_64_PC32 against undefined symbol + // `_ZN2at6vec25612_GLOBAL__N_16VectorizedIdE4sizeE' can not be used when making + // a shared object; recompile with -fPIC + // + // If this were C++17, we could replace a static constexpr variable with + // an inline variable which doesn't require one definition. But we are not + // C++17. So the next best thing is to replace the member with a static + // constexpr (and therefore inline) function, which does not require ODR + // either. + // + // Also, technically according to the C++ standard, we don't have to define + // a constexpr variable if we never odr-use it. But it seems that some + // versions GCC/Clang have buggy determinations on whether or not an + // identifier is odr-used or not, and in any case it's hard to tell if + // a variable is odr-used or not. So best to just cut the problem at the root. + static constexpr size_type size_T = sizeof(T); // Workaround to compile with VS2022. + static constexpr size_type size() { + return VECTOR_WIDTH / size_T; + } + Vectorized() : values{static_cast(0)} {} + Vectorized(T val) { + for (int i = 0; i != size(); i++) { + values[i] = val; + } + } + template> + Vectorized(Args... vals) : values{vals...}{ + } + // This also implies const T& operator[](int idx) const + inline operator const T*() const { + return values; + } + // This also implies T& operator[](int idx) + inline operator T*() { + return values; + } + // Return the values as char* for type punning + auto as_bytes() const -> const char* { + return reinterpret_cast(values); + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + int64_t mask = mask_; + Vectorized vector; + for (const auto i : c10::irange(size())) { + if (mask & 0x01) { + vector[i] = b[i]; + } else { + vector[i] = a[i]; + } + mask = mask >> 1; + } + return vector; + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + Vectorized vector; + int_same_size_t buffer[size()]; + mask.store(buffer); + for (const auto i : c10::irange(size())) { + if (buffer[i] & 0x01) + { + vector[i] = b[i]; + } else { + vector[i] = a[i]; + } + } + return vector; + } + template // step sometimes requires a higher precision type (e.g., T=int, step_t=double) + static Vectorized arange(T base = static_cast(0), step_t step = static_cast(1)) { + Vectorized vector; + for (const auto i : c10::irange(size())) { + vector.values[i] = base + i * step; + } + return vector; + } + static Vectorized set(const Vectorized& a, const Vectorized& b, int64_t count = size()) { + Vectorized vector; + for (const auto i : c10::irange(size())) { + if (i < count) { + vector[i] = b[i]; + } else { + vector[i] = a[i]; + } + } + return vector; + } + static Vectorized loadu(const void* ptr) { + Vectorized vector; + std::memcpy(vector.values, ptr, VECTOR_WIDTH); + return vector; + } + static Vectorized loadu(const void* ptr, int64_t count) { + Vectorized vector; + std::memcpy(vector.values, ptr, count * sizeof(T)); + return vector; + } + void store(void* ptr, int count = size()) const { + std::memcpy(ptr, values, count * sizeof(T)); + } + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + int mask = 0; + for (int i = 0; i < size(); ++ i) { + if (values[i] == static_cast(0)) { + mask |= (1 << i); + } + } + return mask; + } + Vectorized isnan() const { + Vectorized vector; + for (int64_t i = 0; i != size(); i++) { + if (_isnan(values[i])) { + std::memset(static_cast(vector.values + i), 0xFF, sizeof(T)); + } else { + std::memset(static_cast(vector.values + i), 0, sizeof(T)); + } + } + return vector; + } + Vectorized map(T (*const f)(T)) const { + Vectorized ret; + for (int64_t i = 0; i != size(); i++) { + ret[i] = f(values[i]); + } + return ret; + } + Vectorized map(T (*const f)(const T &)) const { + Vectorized ret; + for (int64_t i = 0; i != size(); i++) { + ret[i] = f(values[i]); + } + return ret; + } + template ::value && !c10::is_complex::value, int>::type = 0> + Vectorized abs() const { + // other_t_abs is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "other_t_abs must be T"); + return map([](T x) -> T { return x < static_cast(0) ? -x : x; }); + } + template ::value, int>::type = 0> + Vectorized abs() const { + // float_t_abs is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "float_t_abs must be T"); + // Specifically deal with floating-point because the generic code above won't handle -0.0 (which should result in + // 0.0) properly. + return map([](T x) -> T { return std::abs(x); }); + } + template ::value, int>::type = 0> + Vectorized abs() const { + // complex_t_abs is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "complex_t_abs must be T"); + // Specifically map() does not perform the type conversion needed by abs. + return map([](T x) { return static_cast(std::abs(x)); }); + } + + template ::value, int>::type = 0> + Vectorized sgn() const { + return map(at::native::sgn_impl); + } + + template ::value, int>::type = 0> + Vectorized angle() const { + // other_t_angle is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "other_t_angle must be T"); + return map(at::native::angle_impl); // compiler is unable to resolve the overload without + } + template ::value, int>::type = 0> + Vectorized angle() const { + // complex_t_angle is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "complex_t_angle must be T"); + return map([](T x) { return static_cast(std::arg(x)); }); + } + template ::value, int>::type = 0> + Vectorized real() const { + // other_t_real is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "other_t_real must be T"); + return *this; + } + template ::value, int>::type = 0> + Vectorized real() const { + // complex_t_real is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "complex_t_real must be T"); + return map([](T x) { return static_cast(x.real()); }); + } + template ::value, int>::type = 0> + Vectorized imag() const { + // other_t_imag is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "other_t_imag must be T"); + return Vectorized(0); + } + template ::value, int>::type = 0> + Vectorized imag() const { + // complex_t_imag is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "complex_t_imag must be T"); + return map([](T x) { return static_cast(x.imag()); }); + } + template ::value, int>::type = 0> + Vectorized conj() const { + // other_t_conj is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "other_t_conj must be T"); + return *this; + } + template ::value, int>::type = 0> + Vectorized conj() const { + // complex_t_conj is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "complex_t_conj must be T"); + return map([](T x) { return static_cast(std::conj(x)); }); + } + Vectorized acos() const { + return map(std::acos); + } + Vectorized asin() const { + return map(std::asin); + } + Vectorized atan() const { + return map(std::atan); + } + Vectorized atan2(const Vectorized &exp) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::atan2(values[i], exp[i]); + } + return ret; + } + template < + typename U = T, + typename std::enable_if_t::value, int> = 0> + Vectorized copysign(const Vectorized &sign) const { + Vectorized ret; + for (size_type i = 0; i < size(); i++) { + ret[i] = c10::copysign(values[i], sign[i]); + } + return ret; + } + Vectorized erf() const { + return map(std::erf); + } + Vectorized erfc() const { + return map(std::erfc); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return map(std::exp); + } + Vectorized expm1() const { + return map(std::expm1); + } + Vectorized frac() const { + return *this - this->trunc(); + } + template < + typename U = T, + typename std::enable_if_t::value, int> = 0> + Vectorized fmod(const Vectorized& q) const { + // U is for SFINAE purposes only. Make sure it is not changed. + static_assert(std::is_same::value, "U must be T"); + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::fmod(values[i], q[i]); + } + return ret; + } + Vectorized log() const { + return map(std::log); + } + Vectorized log10() const { + return map(std::log10); + } + Vectorized log1p() const { + return map(std::log1p); + } + template ::value, int>::type = 0> + Vectorized log2() const { + // other_t_log2 is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "other_t_log2 must be T"); + return map(std::log2); + } + template ::value, int>::type = 0> + Vectorized log2() const { + // complex_t_log2 is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same::value, "complex_t_log2 must be T"); + const T log_2 = T(std::log(2.0)); + return Vectorized(map(std::log))/Vectorized(log_2); + } + Vectorized ceil() const { + return map(at::native::ceil_impl); + } + Vectorized cos() const { + return map(std::cos); + } + Vectorized cosh() const { + return map(std::cosh); + } + Vectorized floor() const { + return map(at::native::floor_impl); + } + Vectorized hypot(const Vectorized &b) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::hypot(values[i], b[i]); + } + return ret; + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized igamma(const Vectorized &x) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = calc_igamma(values[i], x[i]); + } + return ret; + } + Vectorized igammac(const Vectorized &x) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = calc_igammac(values[i], x[i]); + } + return ret; + } + Vectorized neg() const { + // NB: the trailing return type is needed because we need to coerce the + // return value back to T in the case of unary operator- incuring a + // promotion + return map([](T x) -> T { return -x; }); + } + Vectorized nextafter(const Vectorized &b) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::nextafter(values[i], b[i]); + } + return ret; + } + Vectorized round() const { + // We do not use std::round because we would like to round midway numbers to the nearest even integer. + return map(at::native::round_impl); + } + Vectorized sin() const { + return map(std::sin); + } + Vectorized sinh() const { + return map(std::sinh); + } + Vectorized tan() const { + return map(std::tan); + } + Vectorized tanh() const { + return map(std::tanh); + } + Vectorized trunc() const { + return map(at::native::trunc_impl); + } + Vectorized lgamma() const { + return map(std::lgamma); + } + Vectorized sqrt() const { + return map(std::sqrt); + } + Vectorized reciprocal() const { + return map([](T x) { return (T)(1) / x; }); + } + Vectorized rsqrt() const { + return map([](T x) { return (T)1 / std::sqrt(x); }); + } + Vectorized pow(const Vectorized &exp) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::pow(values[i], exp[i]); + } + return ret; + } +private: + template + inline Vectorized binary_pred(const Vectorized& other, Op op) const { + // All bits are set to 1 if the pred is true, otherwise 0. + Vectorized vector; + for (int64_t i = 0; i != size(); i++) { + if (op(values[i], other.values[i])) { + std::memset(static_cast(vector.values + i), 0xFF, sizeof(T)); + } else { + std::memset(static_cast(vector.values + i), 0, sizeof(T)); + } + } + return vector; + } + +public: + Vectorized operator==(const Vectorized& other) const { return binary_pred(other, std::equal_to()); } + Vectorized operator!=(const Vectorized& other) const { return binary_pred(other, std::not_equal_to()); } + Vectorized operator>=(const Vectorized& other) const { return binary_pred(other, std::greater_equal()); } + Vectorized operator<=(const Vectorized& other) const { return binary_pred(other, std::less_equal()); } + Vectorized operator>(const Vectorized& other) const { return binary_pred(other, std::greater()); } + Vectorized operator<(const Vectorized& other) const { return binary_pred(other, std::less()); } + +private: + template + inline Vectorized binary_pred_bool(const Vectorized& other, Op op) const { + // 1 if the pred is true, otherwise 0. + Vectorized vector; + for (int i = 0; i != size(); ++ i) { + vector[i] = static_cast(op(values[i], other.values[i])); + } + return vector; + } + +public: + Vectorized eq(const Vectorized& other) const { return binary_pred_bool(other, std::equal_to()); } + Vectorized ne(const Vectorized& other) const { return binary_pred_bool(other, std::not_equal_to()); } + Vectorized gt(const Vectorized& other) const { return binary_pred_bool(other, std::greater()); } + Vectorized ge(const Vectorized& other) const { return binary_pred_bool(other, std::greater_equal()); } + Vectorized lt(const Vectorized& other) const { return binary_pred_bool(other, std::less()); } + Vectorized le(const Vectorized& other) const { return binary_pred_bool(other, std::less_equal()); } +}; + +template Vectorized inline operator+(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] + b[i]; + } + return c; +} + +template Vectorized inline operator-(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] - b[i]; + } + return c; +} + +template Vectorized inline operator*(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] * b[i]; + } + return c; +} + +template Vectorized inline operator/(const Vectorized &a, const Vectorized &b) __ubsan_ignore_float_divide_by_zero__ { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] / b[i]; + } + return c; +} + +template Vectorized inline operator||( + const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] || b[i]; + } + return c; +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template ::value, int>::type = 0> +Vectorized inline maximum(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = (a[i] > b[i]) ? a[i] : b[i]; + if (_isnan(a[i])) { + // If either input is NaN, propagate a NaN. + // NOTE: The case where b[i] was NaN is handled correctly by the naive + // ternary operator above. + c[i] = a[i]; + } + } + return c; +} + +template ::value, int>::type = 0> +Vectorized inline maximum(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = (std::abs(a[i]) > std::abs(b[i])) ? a[i] : b[i]; + if (_isnan(a[i])) { + // If either input is NaN, propagate a NaN. + // NOTE: The case where b[i] was NaN is handled correctly by the naive + // ternary operator above. + c[i] = a[i]; + } + } + return c; +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template ::value, int>::type = 0> +Vectorized inline minimum(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = (a[i] < b[i]) ? a[i] : b[i]; + if (_isnan(a[i])) { + // If either input is NaN, propagate a NaN. + // NOTE: The case where b[i] was NaN is handled correctly by the naive + // ternary operator above. + c[i] = a[i]; + } + } + return c; +} + +template ::value, int>::type = 0> +Vectorized inline minimum(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = (std::abs(a[i]) < std::abs(b[i])) ? a[i] : b[i]; + if (_isnan(a[i])) { + // If either input is NaN, propagate a NaN. + // NOTE: The case where b[i] was NaN is handled correctly by the naive + // ternary operator above. + c[i] = a[i]; + } + } + return c; +} + +template ::value, int>::type = 0> +Vectorized inline clamp(const Vectorized &a, const Vectorized &min_vec, const Vectorized &max_vec) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = std::min(std::max(a[i], min_vec[i]), max_vec[i]); + } + return c; +} + +template ::value, int>::type = 0> +Vectorized inline clamp_max(const Vectorized &a, const Vectorized &max_vec) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] > max_vec[i] ? max_vec[i] : a[i]; + } + return c; +} + +template ::value, int>::type = 0> +Vectorized inline clamp_min(const Vectorized &a, const Vectorized &min_vec) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] < min_vec[i] ? min_vec[i] : a[i]; + } + return c; +} + +struct Vectorizedi; + +#if defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512) +template +static inline Vectorized bitwise_binary_op(const Vectorized &a, const Vectorized &b, Op op) { + int_vector buffer; +#if defined(CPU_CAPABILITY_AVX2) + int_vector a_buffer = _mm256_load_si256(reinterpret_cast((const T*)a)); + int_vector b_buffer = _mm256_load_si256(reinterpret_cast((const T*)b)); +#elif defined(CPU_CAPABILITY_AVX512) + int_vector a_buffer = _mm512_load_si512(reinterpret_cast((const T*)a)); + int_vector b_buffer = _mm512_load_si512(reinterpret_cast((const T*)b)); +#endif + buffer = op(a_buffer, b_buffer); + __at_align__ T results[Vectorized::size()]; + +#if defined(CPU_CAPABILITY_AVX2) + _mm256_store_si256(reinterpret_cast(results), buffer); +#elif defined(CPU_CAPABILITY_AVX512) + _mm512_store_si512(reinterpret_cast(results), buffer); +#endif + return Vectorized::loadu(results); +} + +template>::value, int> = 0> +inline Vectorized operator&(const Vectorized& a, const Vectorized& b) { + // We enclose _mm512_and_si512 or _mm256_and_si256 with lambda because it is always_inline +#if defined(CPU_CAPABILITY_AVX2) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_and_si256(a, b); }); +#elif defined(CPU_CAPABILITY_AVX512) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_and_si512(a, b); }); +#endif +} +template>::value, int> = 0> +inline Vectorized operator|(const Vectorized& a, const Vectorized& b) { + // We enclose _mm512_or_si512 or _mm256_or_si256 with lambda because it is always_inline +#if defined(CPU_CAPABILITY_AVX2) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_or_si256(a, b); }); +#elif defined(CPU_CAPABILITY_AVX512) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_or_si512(a, b); }); +#endif +} +template>::value, int> = 0> +inline Vectorized operator^(const Vectorized& a, const Vectorized& b) { + // We enclose _mm512_xor_si512 or _mm256_xor_si256 with lambda because it is always_inline +#if defined(CPU_CAPABILITY_AVX2) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_xor_si256(a, b); }); +#elif defined(CPU_CAPABILITY_AVX512) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_xor_si512(a, b); }); +#endif +} + +#else + +template +auto load(char const* data) -> T { + T ret; + std::memcpy(&ret, data, sizeof(ret)); + return ret; +} + +template +static inline Vectorized bitwise_binary_op(const Vectorized &a, const Vectorized &b, Op op) { + static constexpr uint32_t element_no = VECTOR_WIDTH / sizeof(intmax_t); + __at_align__ intmax_t buffer[element_no]; + static_assert(VECTOR_WIDTH % sizeof(intmax_t) == 0, "VECTOR_WIDTH not a multiple of sizeof(intmax_t)"); + static_assert(sizeof(buffer) == sizeof(Vectorized), "sizeof(buffer) must match sizeof(Vectorized)"); + // We should be using memcpy in order to respect the strict aliasing rule + // see: https://github.com/pytorch/pytorch/issues/66119 + // Using char* is defined in the C11 standard 6.5 Expression paragraph 7 + // (http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf) + const auto* a_data = a.as_bytes(); + const auto* b_data = b.as_bytes(); + // load each intmax_t chunk and process; increase pointers by sizeof(intmax_t) + for (auto& out : buffer) { + out = op(load(a_data), load(b_data)); + a_data += sizeof(intmax_t); + b_data += sizeof(intmax_t); + } + assert(a_data == a.as_bytes() + sizeof(a)); + assert(b_data == b.as_bytes() + sizeof(b)); + return Vectorized::loadu(buffer); +} + +template>::value, int> = 0> +inline Vectorized operator&(const Vectorized& a, const Vectorized& b) { + return bitwise_binary_op(a, b, std::bit_and()); +} +template>::value, int> = 0> +inline Vectorized operator|(const Vectorized& a, const Vectorized& b) { + return bitwise_binary_op(a, b, std::bit_or()); +} +template>::value, int> = 0> +inline Vectorized operator^(const Vectorized& a, const Vectorized& b) { + return bitwise_binary_op(a, b, std::bit_xor()); +} + +#endif // defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512) + +template>::value, int> = 0> +inline Vectorized operator~(const Vectorized& a) { + Vectorized ones; // All bits are 1 + memset((T*) ones, 0xFF, VECTOR_WIDTH); + return a ^ ones; +} + + +template +inline Vectorized& operator += (Vectorized& a, const Vectorized& b) { + a = a + b; + return a; +} +template +inline Vectorized& operator -= (Vectorized& a, const Vectorized& b) { + a = a - b; + return a; +} +template +inline Vectorized& operator /= (Vectorized& a, const Vectorized& b) { + a = a / b; + return a; +} +template +inline Vectorized& operator %= (Vectorized& a, const Vectorized& b) { + a = a % b; + return a; +} +template +inline Vectorized& operator *= (Vectorized& a, const Vectorized& b) { + a = a * b; + return a; +} + +template +inline Vectorized fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return a * b + c; +} + +template +std::enable_if_t> +inline gather(T const* base_addr, const Vectorized>& vindex) { + static constexpr int size = Vectorized::size(); + int_same_size_t index_arr[size]; + vindex.store(static_cast(index_arr)); + T buffer[size]; + for (const auto i : c10::irange(size)) { + buffer[i] = base_addr[index_arr[i] * scale / sizeof(T)]; + } + return Vectorized::loadu(static_cast(buffer)); +} + +template +std::enable_if_t> +inline mask_gather(const Vectorized& src, T const* base_addr, + const Vectorized>& vindex, Vectorized& mask) { + static constexpr int size = Vectorized::size(); + T src_arr[size]; + int_same_size_t mask_arr[size]; // use int type so we can logical and + int_same_size_t index_arr[size]; + src.store(static_cast(src_arr)); + mask.store(static_cast(mask_arr)); + vindex.store(static_cast(index_arr)); + T buffer[size]; + for (const auto i : c10::irange(size)) { + if (mask_arr[i] & 0x01) { // check highest bit + buffer[i] = base_addr[index_arr[i] * scale / sizeof(T)]; + } else { + buffer[i] = src_arr[i]; + } + } + mask = Vectorized(); // "zero out" mask + return Vectorized::loadu(static_cast(buffer)); +} + +// Cast a given vector to another type without changing the bits representation. +// So a Vectorized of 512 bits containing all ones can be cast to a +// Vectorized of 512 bits containing all ones (i.e., eight negative 1s). +// A Vec of 256 bits containing all ones can be cast to a +// Vec of 256 bits containing all ones (i.e., four negative 1s). +// There is a struct here because we don't have static_if and I can't +// partially specialize a templated function. +template +struct CastImpl { + static inline Vectorized apply(const Vectorized& src) { + src_t src_arr[Vectorized::size()]; + src.store(static_cast(src_arr)); + return Vectorized::loadu(static_cast(src_arr)); + } +}; + +template +struct CastImpl { + static inline Vectorized apply(const Vectorized& src) { + return src; + } +}; + +template +inline Vectorized cast(const Vectorized& src) { + return CastImpl::apply(src); +} + +template +inline Vectorized> convert_to_int_of_same_size(const Vectorized& src) { + static constexpr int size = Vectorized::size(); + T src_arr[size]; + src.store(static_cast(src_arr)); + int_same_size_t buffer[size]; + for (const auto i : c10::irange(size)) { + buffer[i] = static_cast>(src_arr[i]); + } + return Vectorized>::loadu(static_cast(buffer)); +} + +// Example inputs for AVX512: +// a Vectorized = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7} +// b Vectorized = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15} +// returns: +// Vectorized = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} +// Vectorized = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} +// Example inputs for AVX2: a Vectorized = {a0, b0, a1, b1, a2, b2, a3, b3} +// b Vectorized = {a4, b4, a5, b5, a6, b6, a7, b7} +// returns: Vectorized = {a0, a1, a2, a3, a4, a5, a6, a7} +// Vectorized = {b0, b1, b2, b3, b4, b5, b6, b7} +template +inline std::enable_if_t::size() % 2 == 0, std::pair, Vectorized>> +deinterleave2(const Vectorized& a, const Vectorized& b) { + static constexpr int size = Vectorized::size(); + static constexpr int half_size = size / 2; + T a_arr[size]; + T b_arr[size]; + T buffer1[size]; + T buffer2[size]; + a.store(static_cast(a_arr)); + b.store(static_cast(b_arr)); + for (const auto i : c10::irange(half_size)) { + buffer1[i] = a_arr[i * 2]; + buffer1[half_size + i] = b_arr[i * 2]; + buffer2[i] = a_arr[i * 2 + 1]; + buffer2[half_size + i] = b_arr[i * 2 + 1]; + } + return std::make_pair(Vectorized::loadu(static_cast(buffer1)), + Vectorized::loadu(static_cast(buffer2))); +} + +// inverse operation of deinterleave2 +// Example inputs for AVX512: +// a Vectorized = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} +// b Vectorized = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} +// returns, for AVX512: +// Vectorized = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7} +// Vectorized = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15} +// Example inputs for AVX2 : a Vectorized = {a0, a1, a2, a3, a4, a5, a6, a7} +// b Vectorized = {b0, b1, b2, b3, b4, b5, b6, b7} +// returns: Vectorized = {a0, b0, a1, b1, a2, b2, a3, b3} +// Vectorized = {a4, b4, a5, b5, a6, b6, a7, b7} +template +inline std::enable_if_t::size() % 2 == 0, std::pair, Vectorized>> +interleave2(const Vectorized& a, const Vectorized& b) { + static constexpr int size = Vectorized::size(); + static constexpr int half_size = size / 2; + T a_arr[size]; + T b_arr[size]; + T buffer1[size]; + T buffer2[size]; + a.store(static_cast(a_arr)); + b.store(static_cast(b_arr)); + for (const auto i : c10::irange(half_size)) { + buffer1[i * 2] = a_arr[i]; + buffer1[i * 2 + 1] = b_arr[i]; + buffer2[i * 2] = a_arr[half_size + i]; + buffer2[i * 2 + 1] = b_arr[half_size + i]; + } + return std::make_pair(Vectorized::loadu(static_cast(buffer1)), + Vectorized::loadu(static_cast(buffer2))); +} + +template +inline void convert(const src_T *src, dst_T *dst, int64_t n) { +#ifndef _MSC_VER +# pragma unroll +#endif + for (const auto i : c10::irange(n)) { + (void)i; //Suppress unused variable warning + *dst = c10::convert(c10::load(src)); + src++; + dst++; + } +} + +}}} diff --git a/voice_bridge/torch/include/ATen/cpu/vml.h b/voice_bridge/torch/include/ATen/cpu/vml.h new file mode 100644 index 0000000000000000000000000000000000000000..d8d0a1544ccdb10c5815170239d832190d42261b --- /dev/null +++ b/voice_bridge/torch/include/ATen/cpu/vml.h @@ -0,0 +1,207 @@ +#pragma once + +#include +#include +#include +#include +#include + +// This header implements various unary operations using a MKL VML style +// interface. + +// It implements various functions with a simple interface +// For example it enables the user to call vsin(float* out, const float* in, +// size) This functions takes a pointer to a contious output array of floats and +// a constant input array. It will then apply sin to each value in the input +// array and write the result into the output array. out and in may point to the +// same memory, i.e. this fully supports in-place operations. These functions +// also implement their own parallelization, so take precautions when calling +// these from threaded functions. + +// When MKL is available it will call into MKL's VML library similar to NumPy +// If MKL is not available it will use SLEEF. + +// This file might be compiled under AVX or AVX2 when called from e.g. +// UnaryOpsKernel.cpp + +#include +#include +#include +#include +#include + +#if AT_MKL_ENABLED() && !defined(__APPLE__) +#include +#endif + +#define DL_RUNTIME_BUG(op, type_) +#define DL_RUNTIME_BUG_BFLOAT16() + +namespace at { +namespace vml { +inline namespace CPU_CAPABILITY { + +using namespace vec; + +template +inline void vrsqrt(scalar_t* out, scalar_t* in, int64_t size) { + parallel_for(0, size, 2048, [out, in](int64_t begin, int64_t end) { + map( + [](const Vectorized& x) { + return Vectorized((scalar_t)(1)) / x.sqrt(); + }, + out + begin, + in + begin, + end - begin); + }); +} + +// NB: We ignore numerical errors by convention and leave them to the user + +// We unfortunately need to duplicate code here to deal with the SSE-AVX +// transition bug (see [Note SSE-AVX transitions]). As soon as we can expect +// users to use a version of glibc newer than 2.23 we will be able to ditch +// this. This duplication is also necessary since not all functions (e.g. rsqrt) +// might be part of cmath. + +// for BFloat16, we need specialize it, the reason is that avx/avx2 and glic=2.23, +// we can't give DL_RUNTIME_BUG volatile type in x = std::op(x); + +#define IMPLEMENT_VML_BUG(op) \ + template \ + inline void v##op(scalar_t* out, const scalar_t* in, int64_t size) { \ + DL_RUNTIME_BUG(op, scalar_t) \ + parallel_for(0, size, 2048, [out, in](int64_t begin, int64_t end) { \ + map([](const Vectorized& x) { return x.op(); }, \ + out + begin, \ + in + begin, \ + end - begin); \ + }); \ + } \ + template <> \ + inline void v##op( \ + c10::BFloat16* out, const c10::BFloat16* in, int64_t size) { \ + parallel_for(0, size, 2048, [out, in](int64_t begin, int64_t end) { \ + DL_RUNTIME_BUG_BFLOAT16() \ + using vecscalar_t = vec_scalar_t; \ + map([](const Vectorized& x) { return x.op(); }, \ + out + begin, \ + in + begin, \ + end - begin); \ + }); \ + } + +#define IMPLEMENT_VML(op) \ + template \ + inline void v##op(scalar_t* out, const scalar_t* in, int64_t size) { \ + parallel_for(0, size, 2048, [out, in](int64_t begin, int64_t end) { \ + using vecscalar_t = vec_scalar_t; \ + map([](const Vectorized& x) { return x.op(); }, \ + out + begin, \ + in + begin, \ + end - begin); \ + }); \ + } + +IMPLEMENT_VML(abs) +IMPLEMENT_VML(acos) +IMPLEMENT_VML(asin) +IMPLEMENT_VML(atan) +IMPLEMENT_VML(ceil) +IMPLEMENT_VML(cos) +// IMPLEMENT_VML_BUG(cosh) +IMPLEMENT_VML(erf) +IMPLEMENT_VML(erfc) +IMPLEMENT_VML(erfinv) +IMPLEMENT_VML(exp) +IMPLEMENT_VML(expm1) +IMPLEMENT_VML(floor) +IMPLEMENT_VML(i0) +IMPLEMENT_VML(i0e) +IMPLEMENT_VML(reciprocal) +IMPLEMENT_VML(log) +IMPLEMENT_VML(log10) +IMPLEMENT_VML(log1p) +IMPLEMENT_VML(log2) +IMPLEMENT_VML(neg) +IMPLEMENT_VML(sin) +// IMPLEMENT_VML_BUG(sinh) +IMPLEMENT_VML(sqrt) +IMPLEMENT_VML(round) +IMPLEMENT_VML(rsqrt) +IMPLEMENT_VML(tan) +IMPLEMENT_VML(tanh) +IMPLEMENT_VML(trunc) +IMPLEMENT_VML(lgamma) + + +#if AT_MKL_ENABLED() && !defined(__APPLE__) + +// NB: LP64 MKL is the most commonly used and thus we assume it here. That means +// we need to expect MKL_INT to be of type int, which implies int32_t in most +// cases. +static_assert( + std::is_same::value, + "MKL_INT is assumed to be int32_t"); +#define IMPLEMENT_VML_MKL_STUB(op, mklop, type, mkltype) \ + template <> \ + inline void v##op(type * out, const type * in, int64_t size) { \ + int64_t max_mkl_ind = std::numeric_limits::max(); \ + if (size <= static_cast(max_mkl_ind)) { \ + vm##mkltype##mklop( \ + size, in, out, VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \ + } else { \ + MKL_INT ind = 0; \ + int64_t chunks = size / max_mkl_ind; \ + int64_t rest = size % max_mkl_ind; \ + for (; ind < chunks; ind++) { \ + vm##mkltype##mklop( \ + max_mkl_ind, \ + in + ind * max_mkl_ind, \ + out + ind * max_mkl_ind, \ + VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \ + } \ + vm##mkltype##mklop( \ + rest, \ + in + ind * max_mkl_ind, \ + out + ind * max_mkl_ind, \ + VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \ + } \ + } + +#define IMPLEMENT_VML_MKL(op, mklop) \ + IMPLEMENT_VML_MKL_STUB(op, mklop, float, s) \ + IMPLEMENT_VML_MKL_STUB(op, mklop, double, d) + +// NB: abs, cosh and sinh were temporarily disabled due to issues with Apple +// NB: expm1 is disabled because on some configs it produces expm1(nan)=-1 +IMPLEMENT_VML_MKL(abs, Abs) +IMPLEMENT_VML_MKL(acos, Acos) +IMPLEMENT_VML_MKL(asin, Asin) +IMPLEMENT_VML_MKL(atan, Atan) +IMPLEMENT_VML_MKL(cos, Cos) +// IMPLEMENT_VML_MKL(cosh, Cosh) +IMPLEMENT_VML_MKL(erf, Erf) +IMPLEMENT_VML_MKL(erfc, Erfc) +IMPLEMENT_VML_MKL(erfinv, ErfInv) +IMPLEMENT_VML_MKL(exp, Exp) +// IMPLEMENT_VML_MKL(expm1, Expm1) +IMPLEMENT_VML_MKL(log, Ln) +IMPLEMENT_VML_MKL(log10, Log10) +IMPLEMENT_VML_MKL(log1p, Log1p) +IMPLEMENT_VML_MKL(sin, Sin) +// IMPLEMENT_VML_MKL(sinh, Sinh) +IMPLEMENT_VML_MKL(sqrt, Sqrt) +IMPLEMENT_VML_MKL(tan, Tan) +IMPLEMENT_VML_MKL(tanh, Tanh) +IMPLEMENT_VML_MKL(trunc, Trunc) + +#if INTEL_MKL_VERSION >= 20180406 +IMPLEMENT_VML_MKL(log2, Log2) +#endif + +#endif + +} // namespace +} // namespace vml +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/ATenCUDAGeneral.h b/voice_bridge/torch/include/ATen/cuda/ATenCUDAGeneral.h new file mode 100644 index 0000000000000000000000000000000000000000..c64643546a2c1097a7a323dafc6cf5079d1b2fd9 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/ATenCUDAGeneral.h @@ -0,0 +1,9 @@ +#pragma once + +#include +#include +#include + +#include + +// Use TORCH_CUDA_CPP_API or TORCH_CUDA_CU_API for exports from this folder diff --git a/voice_bridge/torch/include/ATen/cuda/ApplyGridUtils.cuh b/voice_bridge/torch/include/ATen/cuda/ApplyGridUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..cbedcdfb21653a982c42f28ef6483a09b0bbd22d --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/ApplyGridUtils.cuh @@ -0,0 +1,47 @@ +#include + +#include + +namespace at { namespace cuda { + +/** + Computes ceil(a / b) +*/ +template +__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) { + return (a + b - 1) / b; +} + +namespace { + +// Threads per block for our apply kernel +// FIXME: use occupancy calculator instead +constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512; +constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4; + +template +inline bool getApplyGrid(uint64_t totalElements, dim3& grid, int64_t curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) { + if (curDevice == -1) return false; + uint64_t numel_per_thread = static_cast(max_threads_per_block) * static_cast(step); + uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread); + uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0]; + if (numBlocks > maxGridX) + numBlocks = maxGridX; + grid = dim3(numBlocks); + return true; +} + +constexpr int getApplyBlocksPerSM() { + return AT_APPLY_BLOCKS_PER_SM; +} + +constexpr int getApplyBlockSize() { + return AT_APPLY_THREADS_PER_BLOCK; +} + +inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) { + return dim3(max_threads_per_block); +} + +} +}} // namespace at::cuda diff --git a/voice_bridge/torch/include/ATen/cuda/AsmUtils.cuh b/voice_bridge/torch/include/ATen/cuda/AsmUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..7c6ee2be734553de51c99fe0fd6b8b9060f23138 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/AsmUtils.cuh @@ -0,0 +1,150 @@ +#pragma once +#include + +// Collection of direct PTX functions + +namespace at { +namespace cuda { + +template +struct Bitfield {}; + +template <> +struct Bitfield { + static __device__ __host__ __forceinline__ + unsigned int getBitfield(unsigned int val, int pos, int len) { +#if !defined(__CUDA_ARCH__) + pos &= 0xff; + len &= 0xff; + + unsigned int m = (1u << len) - 1u; + return (val >> pos) & m; +#else + unsigned int ret; + asm("bfe.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(val), "r"(pos), "r"(len)); + return ret; +#endif + } + + static __device__ __host__ __forceinline__ + unsigned int setBitfield(unsigned int val, unsigned int toInsert, int pos, int len) { +#if !defined(__CUDA_ARCH__) + pos &= 0xff; + len &= 0xff; + + unsigned int m = (1u << len) - 1u; + toInsert &= m; + toInsert <<= pos; + m <<= pos; + + return (val & ~m) | toInsert; +#else + unsigned int ret; + asm("bfi.b32 %0, %1, %2, %3, %4;" : + "=r"(ret) : "r"(toInsert), "r"(val), "r"(pos), "r"(len)); + return ret; +#endif + } +}; + +template <> +struct Bitfield { + static __device__ __host__ __forceinline__ + uint64_t getBitfield(uint64_t val, int pos, int len) { +#if !defined(__CUDA_ARCH__) + pos &= 0xff; + len &= 0xff; + + uint64_t m = (1u << len) - 1u; + return (val >> pos) & m; +#else + uint64_t ret; + asm("bfe.u64 %0, %1, %2, %3;" : "=l"(ret) : "l"(val), "r"(pos), "r"(len)); + return ret; +#endif + } + + static __device__ __host__ __forceinline__ + uint64_t setBitfield(uint64_t val, uint64_t toInsert, int pos, int len) { +#if !defined(__CUDA_ARCH__) + pos &= 0xff; + len &= 0xff; + + uint64_t m = (1u << len) - 1u; + toInsert &= m; + toInsert <<= pos; + m <<= pos; + + return (val & ~m) | toInsert; +#else + uint64_t ret; + asm("bfi.b64 %0, %1, %2, %3, %4;" : + "=l"(ret) : "l"(toInsert), "l"(val), "r"(pos), "r"(len)); + return ret; +#endif + } +}; + +__device__ __forceinline__ int getLaneId() { +#if defined(USE_ROCM) + return __lane_id(); +#else + int laneId; + asm("mov.s32 %0, %%laneid;" : "=r"(laneId) ); + return laneId; +#endif +} + +#if defined(USE_ROCM) +__device__ __forceinline__ unsigned long long int getLaneMaskLt() { + const std::uint64_t m = (1ull << getLaneId()) - 1ull; + return m; +} +#else +__device__ __forceinline__ unsigned getLaneMaskLt() { + unsigned mask; + asm("mov.u32 %0, %%lanemask_lt;" : "=r"(mask)); + return mask; +} +#endif + +#if defined (USE_ROCM) +__device__ __forceinline__ unsigned long long int getLaneMaskLe() { + std::uint64_t m = UINT64_MAX >> (sizeof(std::uint64_t) * CHAR_BIT - (getLaneId() + 1)); + return m; +} +#else +__device__ __forceinline__ unsigned getLaneMaskLe() { + unsigned mask; + asm("mov.u32 %0, %%lanemask_le;" : "=r"(mask)); + return mask; +} +#endif + +#if defined(USE_ROCM) +__device__ __forceinline__ unsigned long long int getLaneMaskGt() { + const std::uint64_t m = getLaneMaskLe(); + return m ? ~m : m; +} +#else +__device__ __forceinline__ unsigned getLaneMaskGt() { + unsigned mask; + asm("mov.u32 %0, %%lanemask_gt;" : "=r"(mask)); + return mask; +} +#endif + +#if defined(USE_ROCM) +__device__ __forceinline__ unsigned long long int getLaneMaskGe() { + const std::uint64_t m = getLaneMaskLt(); + return ~m; +} +#else +__device__ __forceinline__ unsigned getLaneMaskGe() { + unsigned mask; + asm("mov.u32 %0, %%lanemask_ge;" : "=r"(mask)); + return mask; +} +#endif + +}} // namespace at::cuda diff --git a/voice_bridge/torch/include/ATen/cuda/Atomic.cuh b/voice_bridge/torch/include/ATen/cuda/Atomic.cuh new file mode 100644 index 0000000000000000000000000000000000000000..42975411e841e1c3de1c2818dd212adf9c98aa3e --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/Atomic.cuh @@ -0,0 +1,502 @@ +#pragma once + +#include +#include +#include + +#include + +template +struct AtomicFPOp; + +template <> +struct AtomicFPOp { + template + inline __device__ at::Half operator() (at::Half *address, at::Half val, const func_t& func) { + unsigned int * address_as_ui = + (unsigned int *) ((char *)address - ((size_t)address & 2)); + unsigned int old = *address_as_ui; + unsigned int assumed; + + at::Half hsum; + do { + assumed = old; + hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + hsum = func(hsum, val); + old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x; + old = atomicCAS(address_as_ui, assumed, old); + } while (assumed != old); + hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + return hsum; + } +}; + +template <> +struct AtomicFPOp { + template + inline __device__ at::BFloat16 operator() (at::BFloat16 *address, at::BFloat16 val, const func_t& func) { + unsigned int * address_as_ui = + (unsigned int *) ((char *)address - ((size_t)address & 2)); + unsigned int old = *address_as_ui; + unsigned int assumed; + + at::BFloat16 bsum; + do { + assumed = old; + bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + bsum = func(bsum, val); + old = (size_t)address & 2 ? (old & 0xffff) | (bsum.x << 16) : (old & 0xffff0000) | bsum.x; + old = atomicCAS(address_as_ui, assumed, old); + } while (assumed != old); + bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + return bsum.x; + } +}; + +template <> +struct AtomicFPOp { + template + inline __device__ double operator() (double * address, double val, const func_t& func) { + unsigned long long int* address_as_ull = (unsigned long long int*)address; + unsigned long long int old = *address_as_ull; + unsigned long long int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, func(val, assumed)); + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __longlong_as_double(old); + } +}; + +#define ATOMIC_INTEGER_IMPL(NAME) \ +template \ +struct Atomic##NAME##IntegerImpl; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + size_t offset = (size_t)address & 3; \ + uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \ + uint32_t old = *address_as_ui; \ + uint32_t shift = offset * 8; \ + uint32_t old_byte; \ + uint32_t newval; \ + uint32_t assumed; \ + \ + do { \ + assumed = old; \ + old_byte = (old >> shift) & 0xff; \ + newval = static_cast(func(val, static_cast(old_byte))); \ + newval = (old & ~(0x000000ff << shift)) | (newval << shift); \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + size_t offset = (size_t)address & 2; \ + uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \ + bool is_32_align = offset; \ + uint32_t old = *address_as_ui; \ + uint32_t old_bytes; \ + uint32_t newval; \ + uint32_t assumed; \ + \ + do { \ + assumed = old; \ + old_bytes = is_32_align ? old >> 16 : old & 0xffff; \ + newval = static_cast(func(val, static_cast(old_bytes))); \ + newval = is_32_align ? (old & 0xffff) | (newval << 16) : (old & 0xffff0000) | newval; \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + uint32_t * address_as_ui = (uint32_t *) (address); \ + uint32_t old = *address_as_ui; \ + uint32_t newval; \ + uint32_t assumed; \ + \ + do { \ + assumed = old; \ + newval = static_cast(func(val, static_cast(old))); \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + unsigned long long * address_as_ui = (unsigned long long *) (address); \ + unsigned long long old = *address_as_ui; \ + unsigned long long newval; \ + unsigned long long assumed; \ + \ + do { \ + assumed = old; \ + newval = static_cast(func(val, static_cast(old))); \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; + + +# define GPU_ATOMIC_INTEGER(NAME, OP, DTYPE) \ +static inline __device__ void gpuAtomic##NAME(DTYPE *address, DTYPE val) { \ +Atomic##NAME##IntegerImpl()(address, \ + val, \ + [](DTYPE a, DTYPE b) { \ + return OP; \ + }); \ +} \ + +ATOMIC_INTEGER_IMPL(Add) +GPU_ATOMIC_INTEGER(Add, a || b, bool) + +// Don't instantiate gpuAtomicAdd with the macro as it seems non-standard (see int32, int64) +static inline __device__ void gpuAtomicAdd(uint8_t *address, uint8_t val) { + AtomicAddIntegerImpl()(address, + val, + [](uint8_t a, uint8_t b) { + return a + b; + }); +} + +static inline __device__ void gpuAtomicAdd(int8_t *address, int8_t val) { + AtomicAddIntegerImpl()(address, + val, + [](int8_t a, int8_t b) { + return a + b; + }); +} + +static inline __device__ void gpuAtomicAdd(int16_t *address, int16_t val) { + AtomicAddIntegerImpl()(address, + val, + [](int16_t a, int16_t b) { + return a + b; + }); +} + +static inline __device__ int32_t gpuAtomicAdd(int32_t *address, int32_t val) { + return atomicAdd(address, val); +} + +static inline __device__ void gpuAtomicAdd(int64_t *address, int64_t val) { +#if defined(USE_ROCM) + __atomic_fetch_add(address, val, __ATOMIC_RELAXED); +#else + AtomicAddIntegerImpl()(address, + val, + [](int64_t a, int64_t b) { + return a + b; + }); +#endif +} + +static inline __device__ at::Half gpuAtomicAdd(at::Half *address, at::Half val) { +#if defined(USE_ROCM) || ((defined(CUDA_VERSION) && CUDA_VERSION < 10000) || (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700))) + return AtomicFPOp()(address, val, + [](at::Half hsum, at::Half val) { + return hsum + val; + }); +#else + return atomicAdd(reinterpret_cast<__half*>(address), val); +#endif +} + +static inline __device__ at::BFloat16 gpuAtomicAdd(at::BFloat16 *address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return bsum + val; + }); +} + +#if defined(CUDA_VERSION) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600 || CUDA_VERSION < 8000) +// from CUDA C Programmic Guide +static inline __device__ double atomicAdd(double* address, double val) +#if defined(__clang__) && defined(__CUDA__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wgcc-compat" + __attribute__((enable_if(true, ""))) +#pragma GCC diagnostic pop +#endif +{ + + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(val + __longlong_as_double(assumed)); + }); +} +#elif defined(USE_ROCM) || !(defined(__CUDA_ARCH__) && (defined(CUDA_VERSION) && CUDA_VERSION < 8000)) + +/* Note [hip-clang differences to hcc] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * The upcoming hip-clang compiler for ROCm differs from hcc in a few details. + * It exports the __HIP__ macro, we can hence differentiate between hcc and + * hip-clang. In the below, hcc only received support for atomicAdd with double + * typing after work week 18312. hip-clang had support from the first version. + * In general, the code-visible differences between hip-clang and hcc will be + * minimal. + */ + +#if defined(USE_ROCM) && __hcc_workweek__ < 18312 && !__HIP__ + // This needs to be defined for the host side pass + static inline __device__ double atomicAdd(double *address, double val) { } +#endif +#endif + +static inline __device__ double gpuAtomicAdd(double *address, double val) { + return atomicAdd(address, val); +} + +static inline __device__ float gpuAtomicAdd(float *address, float val) { + return atomicAdd(address, val); +} + +template +static inline __device__ void gpuAtomicAdd(c10::complex *address, c10::complex val) { + gpuAtomicAdd(&address->real_, val.real_); + gpuAtomicAdd(&address->imag_, val.imag_); +} + +/* Note [gpuAtomicAdd vs atomicAdd] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Some extensions such as torchvision call atomicAdd() + * directly and require non-library provided data type support. Only for these, we + * continue to provide atomicAdd overloads. + */ +static inline __device__ at::Half atomicAdd(at::Half *address, at::Half val) { + return gpuAtomicAdd(address, val); +} + +static inline __device__ at::BFloat16 atomicAdd(at::BFloat16 *address, at::BFloat16 val) { + return gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(uint8_t *address, uint8_t val) { + gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(int8_t *address, int8_t val) { + gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(int16_t *address, int16_t val) { + gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(int64_t *address, int64_t val) { + gpuAtomicAdd(address, val); +} + +static inline __device__ void atomicAdd(bool *address, bool val) { + gpuAtomicAdd(address, val); +} + +/* Note [explicitly non-returning atomics] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * AMD's MI100 (gfx908) provides an optimized fp32 atomicAdd, exposed via atomicAddNoRet(). + * Due to compiler limitations, callers must opt-in to guarantee the optimized instruction. + * This non-returning atomicAddNoRet cannot be used to implement the returning atomicAdd, + * therefore we need a new API 'gpuAtomicAddNoReturn'. + */ +template +static inline __device__ void gpuAtomicAddNoReturn(c10::complex *address, c10::complex val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(uint8_t *address, uint8_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(int8_t *address, int8_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(int16_t *address, int16_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(int32_t *address, int32_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(int64_t *address, int64_t val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(bool *address, bool val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(at::Half *address, at::Half val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(at::BFloat16 *address, at::BFloat16 val) { gpuAtomicAdd(address, val); } +static inline __device__ void gpuAtomicAddNoReturn(double *address, double val) { gpuAtomicAdd(address, val); } + +/* Special case fp32 atomic. */ +#if defined(USE_ROCM) +static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { atomicAddNoRet(address, val); } +#else +static inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { gpuAtomicAdd(address, val); } +#endif + +// Atomic multiplication implementation. + +ATOMIC_INTEGER_IMPL(Mul) +GPU_ATOMIC_INTEGER(Mul, a * b, uint8_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int8_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int16_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int32_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int64_t) + +inline __device__ at::Half gpuAtomicMul(at::Half * address, at::Half val) { + return AtomicFPOp()(address, val, + [](at::Half bsum, at::Half val) { + return bsum * val; + }); +} + +inline __device__ at::BFloat16 gpuAtomicMul(at::BFloat16 * address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return bsum * val; + }); +} + +inline __device__ double gpuAtomicMul(double * address, double val) { + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(val * __longlong_as_double(assumed)); + }); +} + +// Dont use a templated function for this since the addition function defaults to the CUDA built-in. +inline __device__ float gpuAtomicMul (float * address, float val) { + unsigned int* address_as_ull = (unsigned int*)address; + unsigned int old = *address_as_ull; + unsigned int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(val * + __int_as_float(assumed))); + + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __int_as_float(old); +} + +// Atomic maximum implementation. + +template +__host__ __device__ T safe_max(T a, T b) { + #if defined(__HIPCC__) + // TODO: remove this special case for HIP when issue is fixed: + // https://github.com/ROCm-Developer-Tools/HIP/issues/2209 + T max = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::max(a, b)); + #else + T max = at::_isnan(b) ? b : std::max(a, b); + #endif + + return max; +} + +ATOMIC_INTEGER_IMPL(Max) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), uint8_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int8_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int16_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int32_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int64_t) + +inline __device__ at::Half gpuAtomicMax(at::Half * address, at::Half val) { + return AtomicFPOp()(address, val, + [](at::Half bsum, at::Half val) { + return safe_max(bsum, val); + }); +} + +inline __device__ at::BFloat16 gpuAtomicMax(at::BFloat16 * address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return safe_max(bsum, val); + }); +} + +inline __device__ double gpuAtomicMax(double * address, double val) { + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(safe_max(val, __longlong_as_double(assumed))); + }); +} + +// Dont use a templated function for this since the addition function defaults to the CUDA built-in. +inline __device__ float gpuAtomicMax(float * address, float val) { + unsigned int* address_as_ull = (unsigned int*)address; + unsigned int old = *address_as_ull; + unsigned int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(safe_max(val, __int_as_float(assumed)))); + + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __int_as_float(old); +} + +// Atomic minimum implementation. + +template +__host__ __device__ T safe_min(T a, T b) { + #if defined(__HIPCC__) + // TODO: remove this special case for HIP when issue is fixed: + // https://github.com/ROCm-Developer-Tools/HIP/issues/2209 + T min = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::min(a, b)); + #else + T min = at::_isnan(b) ? b : std::min(a, b); + #endif + + return min; +} + +ATOMIC_INTEGER_IMPL(Min) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), uint8_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int8_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int16_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int32_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int64_t) + +inline __device__ at::Half gpuAtomicMin(at::Half * address, at::Half val) { + return AtomicFPOp()(address, val, + [](at::Half bsum, at::Half val) { + return safe_min(bsum, val); + }); +} + +inline __device__ at::BFloat16 gpuAtomicMin(at::BFloat16 * address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return safe_min(bsum, val); + }); +} + +inline __device__ double gpuAtomicMin(double * address, double val) { + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(safe_min(val, __longlong_as_double(assumed))); + }); +} + +// Dont use a templated function for this since the addition function defaults to the CUDA built-in. +inline __device__ float gpuAtomicMin(float * address, float val) { + unsigned int* address_as_ull = (unsigned int*)address; + unsigned int old = *address_as_ull; + unsigned int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(safe_min(val, __int_as_float(assumed)))); + + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __int_as_float(old); +} diff --git a/voice_bridge/torch/include/ATen/cuda/CUDAApplyUtils.cuh b/voice_bridge/torch/include/ATen/cuda/CUDAApplyUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..6a8ca194397da3681b19a9b393372fae606e3c24 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDAApplyUtils.cuh @@ -0,0 +1,539 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +// +// This file contains pointwise operation functions and kernels that +// work on both contiguous and non-contiguous tensor arguments of +// arbitrary (up to MAX_CUTORCH_DIMS) dimensioned arguments without +// copying or temporary storage. +// + +/* + NOTE [ CUDA_tensor_applyN helpers ] + + The following CUDA_tensor_applyN (where N currently can be 1, 2, 3, or 4) + functions apply a pointwise operator to N tensor(s). + + The calling convention is + + 1. The template arguments should be, sequentially, + - First N typename args specify the scalar types of each of the N tensors. + - (Optional) `int step` arg specifies the number of elements processed + together at the same time. + Default is 1. + - A usually omitted (i.e., inferred) typename arg specifies the type of the + function/functor applied on `N * step` values in each iteration of each + CUDA thread. + 2. The arguments should be, sequentially, + - N tensors + - op: a function/functor that processes `N * step` values at the same time. + - If `step == 1`, it must have signature + `void(*)(scalar1_t&, scalar2_t&, ..., scalarN_t&)`, where + `scalar*_t`s are the first N typename template args, and the inputs + are the `N` values from the `N` tensors retrieved at a common index. + - Otherwise, it must must have signature + void(*)(int n, scalar1_t&, scalar1_t&, ..., scalar1_t&, // repeat `step` times + scalar2_t&, scalar2_t&, ..., scalar2_t&, // repeat `step` times + ..., + scalarN_t&, scalarN_t&, ..., scalarN_t&) // repeat `step` times + Different from `step == 1` case, it processes `N * step` values taken + from `step` common indices. Moreover, the first input `n` represents the + number of valid indices (it will always have `0 < n <= step`). It will + almost always be `step`, but at the boundary we may not have full `step` + elements and `n` can be a lesser value. + + E.g., if `step == 4` and `N == 2`, `op` could be + + [](int n, scalar1_t &u1, scalar1_t &u2, scalar1_t &u3, scalar1_t &u4, + scalar2_t &v1, scalar2_t &v2, scalar2_t &v3, scalar2_t &v4) { + // Only process u1, ..., un and v1, ..., vn. + // So if `n == 3`, `u4` and `v4` need not to be considered. + } + + In both cases, the references can actually be const, but at least one of + them should be non-const in order to write the output. + - (Optional, but recommended) N TensorArgType args that specify for each + tensor whether `op` reads AND writes ] (i.e., TensorArgType::ReadWrite), + or only reads (i.e., TensorArgType::ReadOnly). + Default is TensorArgType::ReadWrite for first Tensor, and + TensorArgType::ReadOnly for the rest. + + E.g., + + to compute a = b^2 for a and b of same dtype, we can call + + CUDA_tensor_apply2( + a, b, + [] __device__ (scalar &a_val, const scalar &b_val) { a_val = b_val * b_val; } + ); + + to work on 2 values at the same time, we can call + + CUDA_tensor_apply2( + a, b, + [] __device__ (int n, scalar1 &a_val1, scalar1 &a_val2, + const scalar2 &b_val1, const scalar2 &b_val2) { + // call special vectorized op here, or just do elementwise and enjoy unrolling... + // if n == 1, only process a_val1 and b_val1 + } + ); +*/ + +namespace at { +namespace cuda { + +// TODO: combine with TensorArg? So far that's been for debugging, and this is functional... +enum class TensorArgType { ReadWrite, ReadOnly }; + +namespace { + +// Rearrange dimensions for pointwise operations so that strides are in +// decreasing order as much as possible, so that kernels have better memory +// access patterns. +// +// For example, consider a binary operation on two "transposed" 2-dim tensors: +// sizes: 256 512 +// aInfo->strides: 1 256 +// bInfo->strides: 1 256 +// +// Given this, each concurrent memory access inside kernelPointwiseApply2() is +// exactly 256 elements apart, resulting in poor performance. +// +// This function exchanges dimensions so that memory access is contiguous: +// sizes: 512 256 +// aInfo->strides: 256 1 +// bInfo->strides: 256 1 +// +// (Actually, it becomes even better because now collapseDims() can turn each +// input into one contiguous array.) +// +// In general, given M (<=4) TensorInfo's with N dimensions, we can view each +// strides[i] (0 <= i < N) as an M-tuple. Given each pair i < j, we exchange +// strides[i] and [j] if +// (1) strides[i][k] < strides[j][k] for some k (0 <= k < M) +// (exchanging them will benefit input #k), and +// (2) strides[i][k] <= strieds[j][k] for all k +// (exchanging them will not make any input worse). +template +inline void rearrangeDims(detail::TensorInfo* aInfo, + detail::TensorInfo* bInfo = nullptr, + detail::TensorInfo* cInfo = nullptr, + detail::TensorInfo* dInfo = nullptr) { + int numInfos = 1; + int dims = aInfo->dims; + IndexType *sizes[4] = { aInfo->sizes, }; + IndexType *strides[4] = { aInfo->strides, }; + + if (bInfo != nullptr) { + ++numInfos; + if (bInfo->dims != dims) return; + sizes[1] = bInfo->sizes; + strides[1] = bInfo->strides; + } + + if (cInfo != nullptr) { + ++numInfos; + if (cInfo->dims != dims) return; + sizes[2] = cInfo->sizes; + strides[2] = cInfo->strides; + } + + if (dInfo != nullptr) { + ++numInfos; + if (dInfo->dims != dims) return; + sizes[3] = dInfo->sizes; + strides[3] = dInfo->strides; + } + + // Bail out if sizes do not match: we are using "deprecated pointwise + // behavior" among tensors of different shapes but same number of elements. + for (int i = 1; i < numInfos; ++i) { + for (int j = 0; j < dims; ++j) { + if (sizes[i][j] != sizes[0][j]) return; + } + } + + for (int i = 0; i < dims - 1; ++i) { + // No need to consider dimensions of size 1. + if (sizes[0][i] == 1) continue; + + for (int j = i + 1; j < dims; ++j) { + if (sizes[0][j] == 1) continue; + + // Compare the relative sizes of strides between dim #i and dim #j. + bool hasIncreasingStrides = false; + bool hasDecreasingStrides = false; + + for (int k = 0; k < numInfos; k++) { + IndexType stride_i = strides[k][i]; + IndexType stride_j = strides[k][j]; + if (stride_i < stride_j) { + hasIncreasingStrides = true; + } else if (stride_i > stride_j) { + hasDecreasingStrides = true; + } + } + + if (hasIncreasingStrides && !hasDecreasingStrides) { + for (int k = 0; k < numInfos; k++) { + IndexType size = sizes[k][i]; + sizes[k][i] = sizes[k][j]; + sizes[k][j] = size; + + IndexType stride = strides[k][i]; + strides[k][i] = strides[k][j]; + strides[k][j] = stride; + } + } + } + } +} + +// The `remaining_steps` argument is used to support Op that operates on +// multiple elements at the same time. Generally, the strategy of ApplyOpN is to +// 1. Initialize `remaining_steps = step`, where `step` is the template arg of +// CUDA_tensor_applyN helpers. The input arg `n` to `apply()` represents the +// number of elements in bound for this call. It will almost always equal to +// `step` except at boundaries. +// 2. If `remaining_steps > 0` convert the current linearIndex to offset (if in +// bound), and recursively call `ApplyOpN` with `remaining_steps - 1`. +// 3. At `remaining_steps = 0`, +// if `step = 1`, call `op(tensor1_val, tensor2_val, ...)`; +// if `step > 1`, call `op(n, tensor1_val1, tensor1_val2, ..., tesor1_valstep, +// tensor2_val1, tensor2_val2, ..., tesor2_valstep, +// ... +// tensorN_val1, tensorN_val2, ..., tesorN_valstep);` +// +// See NOTE [ CUDA_tensor_applyN helpers ] above for how Op may look like. + +template +struct ApplyOp1 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, const Op &op, int n, + IndexType linearIndex, Offsets... aOffsets) { + // Convert `linearIndex` into an offset of `a` + const IndexType aOffset = sizeof...(Offsets) < n ? + detail::IndexToOffset::get(linearIndex, a) : 0; + + ApplyOp1::apply( + a, op, n, linearIndex + 1, aOffsets..., aOffset + ); +} +}; + +// Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`). +// We don't need to pass in how many elements need to processed in this case. +template +struct ApplyOp1 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, const Op &op, + int n, IndexType linearIndex, Offset offset) { + op(a.data[offset]); +} +}; + +template +struct ApplyOp1 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, const Op &op, int n, + IndexType linearIndex, Offsets... offsets) { + op(n, a.data[offsets]...); +} +}; + +template +#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) +C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM) +#endif +__global__ void kernelPointwiseApply1(detail::TensorInfo a, + IndexType totalElements, const Op op) { + for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step; + linearIndex < totalElements; + linearIndex += gridDim.x * blockDim.x * step) { + ApplyOp1::apply( + a, op, ::min(step, static_cast(totalElements - linearIndex)), linearIndex); + } +} + + +template +struct ApplyOp2 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, + detail::TensorInfo &b, + const Op &op, int64_t n, IndexType linearIndex, + Offsets... aOffsets, Offsets... bOffsets) { + // Convert `linearIndex` into an offset of `a` + const IndexType aOffset = static_cast(sizeof...(Offsets)) < n ? + detail::IndexToOffset::get(linearIndex, a) : 0; + + // Convert `linearIndex` into an offset of `b` + const IndexType bOffset = static_cast(sizeof...(Offsets)) < n ? + detail::IndexToOffset::get(linearIndex, b) : 0; + + ApplyOp2::apply( + a, b, op, n, linearIndex + 1, aOffsets..., aOffset, bOffsets..., bOffset + ); +} +}; + +// Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`). +// We don't need to pass in how many elements need to processed in this case. +template +struct ApplyOp2 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, + detail::TensorInfo &b, + const Op &op, int /*n*/, IndexType /*linearIndex*/, + Offset aOffset, Offset bOffset) { + op(a.data[aOffset], b.data[bOffset]); +} +}; + +template +struct ApplyOp2 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, + detail::TensorInfo &b, + const Op &op, int n, IndexType linearIndex, + Offsets... aOffsets, Offsets... bOffsets) { + op(n, a.data[aOffsets]..., b.data[bOffsets]...); +} +}; + +template +#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) +C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) +#endif +__global__ void +kernelPointwiseApply2(detail::TensorInfo a, + detail::TensorInfo b, + IndexType totalElements, + const Op op) { + for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step; + linearIndex < totalElements; + linearIndex += gridDim.x * blockDim.x * step) { + ApplyOp2::apply( + a, b, op, ::min(step, static_cast(totalElements - linearIndex)), + linearIndex); + } +} + +} // namespace + +template +inline bool CUDA_tensor_apply2(at::TensorBase a, + at::TensorBase b, + const Op op, + TensorArgType aType = TensorArgType::ReadWrite, + TensorArgType bType = TensorArgType::ReadOnly) { + TORCH_CHECK(a.device().is_cuda() && b.device().is_cuda(), + "CUDA_tensor_apply2: Expected tensors to have CUDA DeviceType, but got " + "tensors with type ", a.device().type(), " and ", b.device().type()); + int64_t totalElements = a.numel(); + + if (totalElements != b.numel()) { + return false; + } + + if (a.dim() > MAX_TENSORINFO_DIMS || + b.dim() > MAX_TENSORINFO_DIMS) { + return false; + } + + if (a.numel() == 0) { + // Empty tensor; do nothing + return true; + } + const dim3 block = getApplyBlock(max_threads_per_block); + + dim3 grid; + int64_t curDevice = current_device(); + if (curDevice == -1) return false; + if (!getApplyGrid(totalElements, grid, curDevice, max_threads_per_block)) { + return false; + } + + /* + Expands readable/writable tensors whose indices may be "overlapped." + This ensures that each element of the tensor is operated on once and only + once. + */ + TensorBase oldA; + TensorBase oldB; + + if (aType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(a)) { + // Must perform in contiguous space + oldA = std::exchange(a, a.contiguous()); + } + if (bType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(b)) { + // Must perform in contiguous space + oldB = std::exchange(b, b.contiguous()); + } + + // It is possible that the tensor dimensions are able to be collapsed, + // and thus we can reduce the actual code complexity of the copy by + // exploiting this knowledge statically, since the div/mod is the + // most expensive part of the operation, more so than memory accesses. + // For instance, when copying a non-contiguous to a contiguous tensor + // (or vice versa), the contiguous tensor can be collapsed to one + // dimension, and the loop to translate the linear index to the array + // index can be similarly collapsed. That is what this unrolling is for. + +#define HANDLE_CASE(TYPE, A, B) \ + kernelPointwiseApply2 \ + <<>>( \ + aInfo, bInfo, static_cast(totalElements), op); \ + C10_CUDA_KERNEL_LAUNCH_CHECK(); + +#define HANDLE_B_CASE(TYPE, A, B) { \ + switch (B) { \ + case 1: \ + HANDLE_CASE(TYPE, A, 1); \ + break; \ + case 2: \ + HANDLE_CASE(TYPE, A, 2); \ + break; \ + default: \ + HANDLE_CASE(TYPE, A, -1); \ + break; \ + } \ +} + +#define HANDLE_A_CASE(TYPE, A, B) { \ + switch (A) { \ + case 1: \ + HANDLE_B_CASE(TYPE, 1, B); \ + break; \ + case 2: \ + HANDLE_B_CASE(TYPE, 2, B); \ + break; \ + default: \ + HANDLE_B_CASE(TYPE, -1, B); \ + break; \ + } \ +} + + if (detail::canUse32BitIndexMath(a) && + detail::canUse32BitIndexMath(b)) { + detail::TensorInfo aInfo = + detail::getTensorInfo(a); + + detail::TensorInfo bInfo = + detail::getTensorInfo(b); + rearrangeDims(&aInfo, &bInfo); + aInfo.collapseDims(); + bInfo.collapseDims(); + + HANDLE_A_CASE(unsigned int, aInfo.dims, bInfo.dims); + } else { + detail::TensorInfo aInfo = + detail::getTensorInfo(a); + + detail::TensorInfo bInfo = + detail::getTensorInfo(b); + rearrangeDims(&aInfo, &bInfo); + aInfo.collapseDims(); + bInfo.collapseDims(); + + /* + Only instantiates the all 1D special case and the fallback all nD case for + large (64-bit indexed) tensors to reduce compilation time. + */ + if (aInfo.dims == 1 && bInfo.dims == 1) { + HANDLE_CASE(uint64_t, 1, 1); + } else { + HANDLE_CASE(uint64_t, -1, -1); + } + } +#undef HANDLE_CASE +#undef HANDLE_B_CASE +#undef HANDLE_A_CASE + + if (oldA.defined()) { + at::native::copy_ignoring_overlaps(oldA, a); + } + + if (oldB.defined()) { + at::native::copy_ignoring_overlaps(oldB, b); + } + + return true; +} + +/* Provides default step = 1 to CUDA_tensor_apply2. */ +template +inline bool CUDA_tensor_apply2(const at::TensorBase &a, + const at::TensorBase &b, + const Op op, + TensorArgType aType = TensorArgType::ReadWrite, + TensorArgType bType = TensorArgType::ReadOnly) { + return CUDA_tensor_apply2(a, b, op, aType, bType); +} + +} // cuda +} // at diff --git a/voice_bridge/torch/include/ATen/cuda/CUDABlas.h b/voice_bridge/torch/include/ATen/cuda/CUDABlas.h new file mode 100644 index 0000000000000000000000000000000000000000..96c7fc81842282482d831561a862f226fd78b3a4 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDABlas.h @@ -0,0 +1,311 @@ +#pragma once +/* + Provides a subset of CUDA BLAS functions as templates: + + gemm(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, + ldc) + + gemv(transa, m, n, alpha, a, lda, x, incx, beta, y, incy) + + dot(n, x, incx, y, incy, result) + + where Dtype is double, float, at::Half or at::BFloat16 (ROCm, NOT for dot). + The functions are available in at::cuda::blas namespace. + */ + +#include +#include + +namespace at { +namespace cuda { +namespace blas { + +// RAII guard that sets the CuBLAS pointer mode and restores it to +// its previous value when the guard is destroyed +class PointerModeGuard { +public: + PointerModeGuard(cublasHandle_t handle, cublasPointerMode_t mode) : + handle(handle) { + TORCH_CUDABLAS_CHECK(cublasGetPointerMode(handle, &previous_mode)); + TORCH_CUDABLAS_CHECK(cublasSetPointerMode(handle, mode)); + } + + ~PointerModeGuard() { + cublasSetPointerMode(handle, previous_mode); + } + +private: + cublasHandle_t handle; + cublasPointerMode_t previous_mode; +}; + +/* LEVEL 3 BLAS FUNCTIONS */ + +#define CUDABLAS_GEMM_ARGTYPES(Dtype) \ + char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type alpha, \ + const Dtype *a, int64_t lda, const Dtype *b, int64_t ldb, at::opmath_type beta,\ + Dtype *c, int64_t ldc + +template +inline void gemm(CUDABLAS_GEMM_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::gemm: not implemented for ", typeid(Dtype).name()); +} + +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(double)); +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(float)); +#if !defined(USE_ROCM) || (defined(USE_ROCM) && ROCM_VERSION >= 21000) + template <> + void gemm>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +#endif +#if !defined(USE_ROCM) || (defined(USE_ROCM) && ROCM_VERSION >= 21000) + template <> + void gemm>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +#endif +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(at::Half)); +#if defined(USE_ROCM) || defined(CUDA_VERSION) && CUDA_VERSION >= 11000 +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)); +#endif + +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 && !defined(_MSC_VER) +enum GEMMAndBiasActivationEpilogue { + None, + RELU, + GELU, +}; + +// NOTE: GELU activation is not supported prior to CUDA 11.4 and will +// do nothing if passed in that case. +template +void gemm_and_bias( + bool transpose_mat1, + bool transpose_mat2, + int64_t m, + int64_t n, + int64_t k, + at::opmath_type alpha_val, + const Dtype* mat1_ptr, + int64_t mat1_ld, + const Dtype* mat2_ptr, + int64_t mat2_ld, + const Dtype* bias, + Dtype* result_ptr, + int64_t result_ld, + GEMMAndBiasActivationEpilogue activation = GEMMAndBiasActivationEpilogue::None); +#endif + +#define CUDABLAS_BGEMM_ARGTYPES(Dtype) \ + char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type alpha, \ + const Dtype *a, int64_t lda, int64_t stridea, \ + const Dtype *b, int64_t ldb, int64_t strideb, \ + at::opmath_type beta, Dtype *c, int64_t ldc, int64_t stridec, int64_t num_batches + +template +inline void bgemm(CUDABLAS_BGEMM_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::bgemm: not implemented for ", typeid(Dtype).name()); +} + +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(double)); +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(float)); +template <> +void bgemm>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(at::Half)); +#if defined(USE_ROCM) || defined(CUDA_VERSION) && CUDA_VERSION >= 11000 +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16)); +#endif + +#define CUDABLAS_TRSM_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \ + cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \ + const Dtype *alpha, const Dtype *A, int lda, Dtype *B, int ldb + +template +inline void trsm(CUDABLAS_TRSM_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::trsm: not implemented for ", typeid(Dtype).name()); +} + +template <> +TORCH_CUDA_CU_API void trsm(CUDABLAS_TRSM_ARGTYPES(float)); +template <> +TORCH_CUDA_CU_API void trsm(CUDABLAS_TRSM_ARGTYPES(double)); +template <> +TORCH_CUDA_CU_API void trsm>(CUDABLAS_TRSM_ARGTYPES(c10::complex)); +template <> +TORCH_CUDA_CU_API void trsm>(CUDABLAS_TRSM_ARGTYPES(c10::complex)); + +#define CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \ + cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \ + const Dtype *alpha, Dtype *A[], int lda, Dtype *B[], int ldb, \ + int batchCount + +template +inline void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::blas::trsmBatched: not implemented for ", + typeid(Dtype).name()); +} + +template <> +TORCH_CUDA_CU_API void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(float)); +template <> +TORCH_CUDA_CU_API void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(double)); +template <> +TORCH_CUDA_CU_API void trsmBatched>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex)); +template <> +TORCH_CUDA_CU_API void trsmBatched>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex)); + +/* LEVEL 2 BLAS FUNCTIONS */ + +#define CUDABLAS_GEMV_ARGTYPES(Dtype) \ + char trans, int64_t m, int64_t n, Dtype alpha, const Dtype *a, int64_t lda, \ + const Dtype *x, int64_t incx, Dtype beta, Dtype *y, int64_t incy + +template +inline void gemv(CUDABLAS_GEMV_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::gemv: not implemented for ", typeid(Dtype).name()); +} + +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(double)); +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(float)); +#if !defined(USE_ROCM) || (defined(USE_ROCM) && ROCM_VERSION >= 21000) +template <> +void gemv>(CUDABLAS_GEMV_ARGTYPES(c10::complex)); +template <> +void gemv>(CUDABLAS_GEMV_ARGTYPES(c10::complex)); +#endif +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(at::Half)); +#if defined(USE_ROCM) || defined(CUDA_VERSION) && CUDA_VERSION >= 11000 +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(at::BFloat16)); +#endif + +/* LEVEL 1 BLAS FUNCTIONS */ + +#define CUDABLAS_DOT_ARGTYPES(Dtype) \ + cublasHandle_t handle, int n, const Dtype *x, int incx, const Dtype *y, \ + int incy, Dtype *result + +template +inline void dot(CUDABLAS_DOT_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::dot: not implemented for ", typeid(Dtype).name()); +} + +template <> +void dot(CUDABLAS_DOT_ARGTYPES(double)); +template <> +void dot(CUDABLAS_DOT_ARGTYPES(float)); +template <> +void dot(CUDABLAS_DOT_ARGTYPES(at::Half)); +template <> +void dot(CUDABLAS_DOT_ARGTYPES(at::BFloat16)); +template <> +void dot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); +template <> +void dot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); + +template +inline void vdot(CUDABLAS_DOT_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::vdot: not implemented for ", typeid(Dtype).name()); +} + +template <> +void vdot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); +template <> +void vdot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); + +// This guards blocks use of getrsBatched, geqrfBatched, getrfBatched on platforms other than cuda +#ifdef CUDART_VERSION + +#define CUDABLAS_GETRS_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasOperation_t trans, \ + int n, int nrhs, Dtype** dA_array, int lda, int* ipiv_array, \ + Dtype** dB_array, int ldb, int* info_array, int batchsize + +template +void getrsBatched(CUDABLAS_GETRS_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::getrsBatched: not implemented for ", + typeid(Dtype).name()); +} +template<> +TORCH_CUDA_CU_API void getrsBatched(CUDABLAS_GETRS_ARGTYPES(float)); +template<> +TORCH_CUDA_CU_API void getrsBatched(CUDABLAS_GETRS_ARGTYPES(double)); +template<> +TORCH_CUDA_CU_API void getrsBatched>(CUDABLAS_GETRS_ARGTYPES(c10::complex)); +template<> +TORCH_CUDA_CU_API void getrsBatched>(CUDABLAS_GETRS_ARGTYPES(c10::complex)); + +#define CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype) \ + cublasHandle_t handle, int m, int n, Dtype **A_array, int lda, \ + Dtype **tau_array, int *info, int batchsize + +template +void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::blas::geqrfBatched: not implemented for ", + typeid(Dtype).name()); +} +template <> +TORCH_CUDA_CU_API void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(float)); +template <> +TORCH_CUDA_CU_API void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(double)); +template <> +TORCH_CUDA_CU_API void geqrfBatched>( + CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex)); +template <> +TORCH_CUDA_CU_API void geqrfBatched>( + CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex)); + +#define CUDABLAS_GETRF_ARGTYPES(Dtype) \ + int n, Dtype** dA_array, int ldda, int* ipiv_array, int* info_array, int batchsize + +template +void getrfBatched(CUDABLAS_GETRF_ARGTYPES(Dtype)) { + TORCH_CHECK(false, "at::cuda::blas::getrfBatched: not implemented for ", typeid(Dtype).name()); +} +template<> +TORCH_CUDA_CU_API void getrfBatched(CUDABLAS_GETRF_ARGTYPES(float)); +template<> +TORCH_CUDA_CU_API void getrfBatched(CUDABLAS_GETRF_ARGTYPES(double)); +template<> +TORCH_CUDA_CU_API void getrfBatched>(CUDABLAS_GETRF_ARGTYPES(c10::complex)); +template<> +TORCH_CUDA_CU_API void getrfBatched>(CUDABLAS_GETRF_ARGTYPES(c10::complex)); + +#define CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasOperation_t trans, int m, int n, int nrhs, Dtype** dA_array, int ldda, Dtype** dC_array, int lddc, int* info, int *devInfoArray, int batchSize + +template +void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype)) { + TORCH_INTERNAL_ASSERT(false, "at::cuda::blas::gelsBatched: not implemented for ", typeid(Dtype).name()); +} + +template<> +TORCH_CUDA_CU_API void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(double)); +template<> +TORCH_CUDA_CU_API void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(float)); +template<> +TORCH_CUDA_CU_API void gelsBatched>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex)); +template<> +TORCH_CUDA_CU_API void gelsBatched>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex)); + +#endif // CUDART_VERSION + +} // namespace blas +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/CUDAContext.h b/voice_bridge/torch/include/ATen/cuda/CUDAContext.h new file mode 100644 index 0000000000000000000000000000000000000000..0167cd585eaa28467d816cc953e15d5945633b0c --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDAContext.h @@ -0,0 +1,80 @@ +#pragma once + +#include + +#include +#include +#include + +#ifdef CUDART_VERSION +#include +#endif + +#include +#include +#include +#include +#include + +namespace at { +namespace cuda { + +/* +A common CUDA interface for ATen. + +This interface is distinct from CUDAHooks, which defines an interface that links +to both CPU-only and CUDA builds. That interface is intended for runtime +dispatch and should be used from files that are included in both CPU-only and +CUDA builds. + +CUDAContext, on the other hand, should be preferred by files only included in +CUDA builds. It is intended to expose CUDA functionality in a consistent +manner. + +This means there is some overlap between the CUDAContext and CUDAHooks, but +the choice of which to use is simple: use CUDAContext when in a CUDA-only file, +use CUDAHooks otherwise. + +Note that CUDAContext simply defines an interface with no associated class. +It is expected that the modules whose functions compose this interface will +manage their own state. There is only a single CUDA context/state. +*/ + +/** + * DEPRECATED: use device_count() instead + */ +inline int64_t getNumGPUs() { + return c10::cuda::device_count(); +} + +/** + * CUDA is available if we compiled with CUDA, and there are one or more + * devices. If we compiled with CUDA but there is a driver problem, etc., + * this function will report CUDA is not available (rather than raise an error.) + */ +inline bool is_available() { + return c10::cuda::device_count() > 0; +} + +TORCH_CUDA_CPP_API cudaDeviceProp* getCurrentDeviceProperties(); + +TORCH_CUDA_CPP_API int warp_size(); + +TORCH_CUDA_CPP_API cudaDeviceProp* getDeviceProperties(int64_t device); + +TORCH_CUDA_CPP_API bool canDeviceAccessPeer( + int64_t device, + int64_t peer_device); + +TORCH_CUDA_CPP_API Allocator* getCUDADeviceAllocator(); + +/* Handles */ +TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle(); +TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle(); + +#ifdef CUDART_VERSION +TORCH_CUDA_CPP_API cusolverDnHandle_t getCurrentCUDASolverDnHandle(); +#endif + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/CUDADataType.h b/voice_bridge/torch/include/ATen/cuda/CUDADataType.h new file mode 100644 index 0000000000000000000000000000000000000000..d25722c080ec3bcb21d86a3b5fca078ca4577a57 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDADataType.h @@ -0,0 +1,97 @@ +#pragma once + +#include + +#include +#include + +namespace at { +namespace cuda { + +template +cudaDataType getCudaDataType() { + TORCH_INTERNAL_ASSERT(false, "Cannot convert type ", typeid(scalar_t).name(), " to cudaDataType.") +} + +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_16F; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_32F; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_64F; +} +template<> inline cudaDataType getCudaDataType>() { + return CUDA_C_16F; +} +template<> inline cudaDataType getCudaDataType>() { + return CUDA_C_32F; +} +template<> inline cudaDataType getCudaDataType>() { + return CUDA_C_64F; +} + +// HIP doesn't define integral types +#ifndef USE_ROCM +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_8U; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_8I; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_32I; +} +#endif + +#if !defined(USE_ROCM) && defined(CUDA_VERSION) && CUDA_VERSION >= 11000 +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_16I; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_64I; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_16BF; +} +#endif + +inline cudaDataType ScalarTypeToCudaDataType(const c10::ScalarType& scalar_type) { + switch (scalar_type) { +// HIP doesn't define integral types +#ifndef USE_ROCM + case c10::ScalarType::Byte: + return CUDA_R_8U; + case c10::ScalarType::Char: + return CUDA_R_8I; + case c10::ScalarType::Int: + return CUDA_R_32I; +#endif + case c10::ScalarType::Half: + return CUDA_R_16F; + case c10::ScalarType::Float: + return CUDA_R_32F; + case c10::ScalarType::Double: + return CUDA_R_64F; + case c10::ScalarType::ComplexHalf: + return CUDA_C_16F; + case c10::ScalarType::ComplexFloat: + return CUDA_C_32F; + case c10::ScalarType::ComplexDouble: + return CUDA_C_64F; +#if !defined(USE_ROCM) && defined(CUDA_VERSION) && CUDA_VERSION >= 11000 + case c10::ScalarType::Short: + return CUDA_R_16I; + case c10::ScalarType::Long: + return CUDA_R_64I; + case c10::ScalarType::BFloat16: + return CUDA_R_16BF; +#endif + default: + TORCH_INTERNAL_ASSERT(false, "Cannot convert ScalarType ", scalar_type, " to cudaDataType.") + } +} + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/CUDADevice.h b/voice_bridge/torch/include/ATen/cuda/CUDADevice.h new file mode 100644 index 0000000000000000000000000000000000000000..929639c738d7bca09a67a1b7558911ca7ad0aaa2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDADevice.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +#include +#include + +namespace at { +namespace cuda { + +inline Device getDeviceFromPtr(void* ptr) { + cudaPointerAttributes attr{}; + + AT_CUDA_CHECK(cudaPointerGetAttributes(&attr, ptr)); + +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 + TORCH_CHECK(attr.type != cudaMemoryTypeUnregistered, + "The specified pointer resides on host memory and is not registered with any CUDA device."); +#endif + + return {DeviceType::CUDA, static_cast(attr.device)}; +} + +}} // namespace at::cuda diff --git a/voice_bridge/torch/include/ATen/cuda/CUDAEvent.h b/voice_bridge/torch/include/ATen/cuda/CUDAEvent.h new file mode 100644 index 0000000000000000000000000000000000000000..1c3c67949e5897293dbffc909b78c6f10b889839 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDAEvent.h @@ -0,0 +1,207 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace at { namespace cuda { + +/* +* CUDAEvents are movable not copyable wrappers around CUDA's events. +* +* CUDAEvents are constructed lazily when first recorded unless it is +* reconstructed from a cudaIpcEventHandle_t. The event has a device, and this +* device is acquired from the first recording stream. However, if reconstructed +* from a handle, the device should be explicitly specified; or if ipc_handle() is +* called before the event is ever recorded, it will use the current device. +* Later streams that record the event must match this device. +*/ +struct TORCH_CUDA_CPP_API CUDAEvent { + // Constructors + // Default value for `flags` is specified below - it's cudaEventDisableTiming + CUDAEvent() {} + CUDAEvent(unsigned int flags) : flags_{flags} {} + + CUDAEvent( + DeviceIndex device_index, const cudaIpcEventHandle_t* handle) { + device_index_ = device_index; + CUDAGuard guard(device_index_); + + AT_CUDA_CHECK(cudaIpcOpenEventHandle(&event_, *handle)); + is_created_ = true; + } + + // Note: event destruction done on creating device to avoid creating a + // CUDA context on other devices. + ~CUDAEvent() { + try { + if (is_created_) { + CUDAGuard guard(device_index_); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_deletion(reinterpret_cast(event_)); + } + cudaEventDestroy(event_); + } + } catch (...) { /* No throw */ } + } + + CUDAEvent(const CUDAEvent&) = delete; + CUDAEvent& operator=(const CUDAEvent&) = delete; + + CUDAEvent(CUDAEvent&& other) { moveHelper(std::move(other)); } + CUDAEvent& operator=(CUDAEvent&& other) { + moveHelper(std::move(other)); + return *this; + } + + operator cudaEvent_t() const { return event(); } + + // Less than operator (to allow use in sets) + friend bool operator<(const CUDAEvent& left, const CUDAEvent& right) { + return left.event_ < right.event_; + } + + optional device() const { + if (is_created_) { + return at::Device(at::kCUDA, device_index_); + } else { + return {}; + } + } + + bool isCreated() const { return is_created_; } + DeviceIndex device_index() const {return device_index_;} + cudaEvent_t event() const { return event_; } + + // Note: cudaEventQuery can be safely called from any device + bool query() const { + if (!is_created_) { + return true; + } + + cudaError_t err = cudaEventQuery(event_); + if (err == cudaSuccess) { + return true; + } else if (err != cudaErrorNotReady) { + C10_CUDA_CHECK(err); + } else { + // ignore and clear the error if not ready + cudaGetLastError(); + } + + return false; + } + + void record() { record(getCurrentCUDAStream()); } + + void recordOnce(const CUDAStream& stream) { + if (!was_recorded_) record(stream); + } + + // Note: cudaEventRecord must be called on the same device as the event. + void record(const CUDAStream& stream) { + if (!is_created_) { + createEvent(stream.device_index()); + } + + TORCH_CHECK(device_index_ == stream.device_index(), "Event device ", device_index_, + " does not match recording stream's device ", stream.device_index(), "."); + CUDAGuard guard(device_index_); + AT_CUDA_CHECK(cudaEventRecord(event_, stream)); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_record( + reinterpret_cast(event_), + reinterpret_cast(stream.stream()) + ); + } + was_recorded_ = true; + } + + // Note: cudaStreamWaitEvent must be called on the same device as the stream. + // The event has no actual GPU resources associated with it. + void block(const CUDAStream& stream) { + if (is_created_) { + CUDAGuard guard(stream.device_index()); + AT_CUDA_CHECK(cudaStreamWaitEvent(stream, event_, 0)); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_wait( + reinterpret_cast(event_), + reinterpret_cast(stream.stream()) + ); + } + } + } + + // Note: cudaEventElapsedTime can be safely called from any device + float elapsed_time(const CUDAEvent& other) const { + TORCH_CHECK(is_created_ && other.isCreated(), + "Both events must be recorded before calculating elapsed time."); + float time_ms = 0; + // raise cudaErrorNotReady if either event is recorded but not yet completed + AT_CUDA_CHECK(cudaEventElapsedTime(&time_ms, event_, other.event_)); + return time_ms; + } + + // Note: cudaEventSynchronize can be safely called from any device + void synchronize() const { + if (is_created_) { + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_synchronization(reinterpret_cast(event_)); + } + AT_CUDA_CHECK(cudaEventSynchronize(event_)); + } + } + + // Note: cudaIpcGetEventHandle must be called on the same device as the event + void ipc_handle(cudaIpcEventHandle_t * handle) { + if (!is_created_) { + // this CUDAEvent object was initially constructed from flags but event_ + // is not created yet. + createEvent(getCurrentCUDAStream().device_index()); + } + CUDAGuard guard(device_index_); + AT_CUDA_CHECK(cudaIpcGetEventHandle(handle, event_)); + } + +private: + unsigned int flags_ = cudaEventDisableTiming; + bool is_created_ = false; + bool was_recorded_ = false; + DeviceIndex device_index_ = -1; + cudaEvent_t event_{}; + + void createEvent(DeviceIndex device_index) { + device_index_ = device_index; + CUDAGuard guard(device_index_); + AT_CUDA_CHECK(cudaEventCreateWithFlags(&event_, flags_)); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_creation(reinterpret_cast(event_)); + } + is_created_ = true; + } + + void moveHelper(CUDAEvent&& other) { + std::swap(flags_, other.flags_); + std::swap(is_created_, other.is_created_); + std::swap(was_recorded_, other.was_recorded_); + std::swap(device_index_, other.device_index_); + std::swap(event_, other.event_); + } +}; + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/CUDAGeneratorImpl.h b/voice_bridge/torch/include/ATen/cuda/CUDAGeneratorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..60130b884719f4dec5612a32f3fac30bbbfc76ae --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDAGeneratorImpl.h @@ -0,0 +1,132 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { +/** + * Note [CUDA Graph-safe RNG states] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Strategy: + * ~~~~~~~~~ + * (It helps to look at + * cuda/detail/PhiloxCudaStateRaw.cuh and + * cuda/detail/UnpackRaw.cuh + * while you read this.) + * + * A CUDA graph containing multiple RNG ops behaves like a + * single giant kernel from the perspective of ops external + * to the graph. During graph capture, logic below records + * the total of all offset increments that occur in the graphed + * region, and records the final total as the offset for the + * entire graph. + * + * When the graph reruns, the logic that reruns it + * increments this device's CUDA generator's offset + * by that total. + * + * Meanwhile, within the graph, at capture time, instead of + * populating PhiloxCudaStates with the uint64_t offset pulled + * directly from the global state, PhiloxCudaState instead + * holds a pointer to one-element stream-local int64_t device tensor + * holding an initial offset value, and a uint64_t holding an + * intra-graph offset. (The intra-graph offset starts from zero + * when capture begins.) In each consumer kernel, + * at::cuda::philox::unpack computes the offset to use for this kernel + * as intra-graph offset + *initial offset. + * + * When the graph reruns, the logic that reruns it first + * fill_s the initial offset tensor with this device's + * CUDA generator's current offset. + * + * The control flow above ensures graphed execution is bitwise + * identical to eager execution as long as RNG ops are enqueued + * from a single thread, even if RNG ops and graphs containing + * RNG ops are enqueued and run simultaneously on multiple streams. + * + * Usage: + * ~~~~~~ + * PhiloxCudaState in this file, and unpack() in + * cuda/CUDAGraphsUtils.cuh allow non-divergent use of + * CUDAGeneratorImpl whether graph capture is underway or not. + * + * Each PhiloxCudaState instance should be used for one and only one + * consumer kernel. + * + * Example (see e.g. native/cuda/Dropout.cu): + * + * #include + * #include + * + * __global__ void kernel(..., PhiloxCudaState philox_args) { + * auto seeds = at::cuda::philox::unpack(philox_args); + * IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; + * curandStatePhilox4_32_10_t state; + * curand_init(std::get<0>(seeds), // seed + * idx, // per-thread subsequence + * std::get<1>(seeds), // offset in subsequence + * &state); + * ... + * } + * + * host_caller(...) { + * PhiloxCudaState rng_engine_inputs; + * { + * // See Note [Acquire lock when using random generators] + * std::lock_guard lock(gen->mutex_); + * + * // gen could be HostState or DevState here! No divergent code needed! + * rng_engine_inputs = gen->philox_cuda_state(offset_increment); + * } + * kernel<<<...>>>(..., rng_engine_inputs); + * } + * + */ + +struct TORCH_CUDA_CPP_API CUDAGeneratorImpl : public c10::GeneratorImpl { + // Constructors + CUDAGeneratorImpl(DeviceIndex device_index = -1); + ~CUDAGeneratorImpl() override = default; + + // CUDAGeneratorImpl methods + std::shared_ptr clone() const; + void set_current_seed(uint64_t seed) override; + uint64_t current_seed() const override; + uint64_t seed() override; + void set_state(const c10::TensorImpl& new_state) override; + c10::intrusive_ptr get_state() const override; + void set_philox_offset_per_thread(uint64_t offset); + uint64_t philox_offset_per_thread() const; + void capture_prologue(int64_t* seed_extragraph, int64_t* offset_extragraph); + uint64_t capture_epilogue(); + PhiloxCudaState philox_cuda_state(uint64_t increment); + + // Temporarily accommodates call sites that use philox_engine_inputs. + // Allows incremental refactor of call sites to use philox_cuda_state. + std::pair philox_engine_inputs(uint64_t increment); + + static DeviceType device_type(); + +private: + CUDAGeneratorImpl* clone_impl() const override; + uint64_t seed_ = default_rng_seed_val; + uint64_t philox_offset_per_thread_ = 0; + int64_t* seed_extragraph_{}; + int64_t* offset_extragraph_{}; + uint32_t offset_intragraph_ = 0; + bool graph_expects_this_gen_ = false; +}; + +namespace cuda { +namespace detail { + +TORCH_CUDA_CPP_API const Generator& getDefaultCUDAGenerator( + DeviceIndex device_index = -1); +TORCH_CUDA_CPP_API Generator createCUDAGenerator(DeviceIndex device_index = -1); + +} // namespace detail +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/CUDAGraph.h b/voice_bridge/torch/include/ATen/cuda/CUDAGraph.h new file mode 100644 index 0000000000000000000000000000000000000000..bacad79102a3ec7e8f79acfdd5bfce6ed5dd4627 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDAGraph.h @@ -0,0 +1,78 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { + +struct CUDAGeneratorImpl; + +namespace cuda { + +// Standalone way to get a unique mempool id usable as a pool=... argument +// to CUDAGraph::capture_begin +TORCH_CUDA_CPP_API MempoolId_t graph_pool_handle(); + +struct TORCH_CUDA_CPP_API CUDAGraph { + CUDAGraph(); + ~CUDAGraph(); + + void capture_begin(MempoolId_t pool={0, 0}); + void capture_end(); + void replay(); + void reset(); + MempoolId_t pool(); + + protected: +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 + cudaGraph_t graph_ = NULL; + cudaGraphExec_t graph_exec_ = NULL; +#endif + + // internal states so reset() can do its best cleaning up + // Set to true in capture_end if cudaStreamEndCapture succeeded + // Set back to false soon after, when graph_ is consumed by cudaGraphInstantiate + // to create graph_exec_, then graph_ is deleted + bool has_graph_ = false; + // Set to true in capture_end if cudaGraphInstantiate succeeded + bool has_graph_exec_ = false; + + // uuid of this instance's current capture, retrieved from Cuda + CaptureId_t id_; + + // uuid used to request a particular private mempool from CUDACachingAllocator. + // By default, this will be set to {id_, 0}. + // + // If capture_begin is called with "pool=other_graph.pool()", this graph's mempool_id_ + // will be set to the other graph's mempool_id_, and therefore share a mempool with the + // other graph. + // + // If capture_begin is called with "pool=handle" where "handle" came from graph_pool_handle(), + // it will share a mempool with any other captures that used "pool=handle". + // + // Sharing a mempool across graphs saves memory, and it's safe if you + // know you'll replay those graphs in the same order you captured them. + MempoolId_t mempool_id_; + + // Stream on which capture began + at::cuda::CUDAStream capture_stream_; + + // Default generator on device where capture began + at::CUDAGeneratorImpl* capture_gen_; + + // Device where capture occurred. Right now, for simplicity, we require all ops + // in a capture to run on the same device, but this is a limitation of CUDAGraph, + // not CUDA itself. We can straightforwardly modify CUDAGraph to support multi-device + // captures if needed. + int capture_dev_; + + // RNG state trackers + at::Tensor seed_extragraph_; + at::Tensor offset_extragraph_; + uint64_t wholegraph_increment_; +}; + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/CUDAGraphsUtils.cuh b/voice_bridge/torch/include/ATen/cuda/CUDAGraphsUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..8925e456924b46a10987465ffcd69e71b04ae597 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDAGraphsUtils.cuh @@ -0,0 +1,59 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +// c10/cuda/CUDAGraphsC10Utils.h has utils used by both c10 and aten. +// This file adds utils used by aten only. + +namespace at { +namespace cuda { + +using CaptureId_t = c10::cuda::CaptureId_t; +using CaptureStatus = c10::cuda::CaptureStatus; + +// Use this version where you don't want to create a CUDA context if none exists. +inline CaptureStatus currentStreamCaptureStatus() { +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 + // don't create a context if we don't have to + if (at::cuda::detail::hasPrimaryContext(c10::cuda::current_device())) { + return c10::cuda::currentStreamCaptureStatusMayInitCtx(); + } else { + return CaptureStatus::None; + } +#else + return CaptureStatus::None; +#endif +} + +inline void assertNotCapturing(std::string attempt) { + auto status = currentStreamCaptureStatus(); + TORCH_CHECK(status == CaptureStatus::None, + attempt, + " during CUDA graph capture. If you need this call to be captured, " + "please file an issue. " + "Current cudaStreamCaptureStatus: ", + status); +} + +inline void errorIfCapturingCudnnBenchmark(std::string version_specific) { + auto status = currentStreamCaptureStatus(); + TORCH_CHECK(status == CaptureStatus::None, + "Current cudaStreamCaptureStatus: ", + status, + "\nCapturing ", + version_specific, + "is prohibited. Possible causes of this error:\n" + "1. No warmup iterations occurred before capture.\n" + "2. The convolutions you're trying to capture use dynamic shapes, " + "in which case capturing them is generally prohibited."); +} + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/CUDASparse.h b/voice_bridge/torch/include/ATen/cuda/CUDASparse.h new file mode 100644 index 0000000000000000000000000000000000000000..d309cd5d8e31127a77fd1adbcd3cb2e0917c4e9f --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDASparse.h @@ -0,0 +1,54 @@ +#pragma once + +#include + +// cuSparse Generic API added in CUDA 10.1 +// Windows support added in CUDA 11.0 +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && ((CUSPARSE_VERSION >= 10300) || (CUSPARSE_VERSION >= 11000 && defined(_WIN32))) +#define AT_USE_CUSPARSE_GENERIC_API() 1 +#else +#define AT_USE_CUSPARSE_GENERIC_API() 0 +#endif + +// hipSparse Generic API ROCm 5.2 +#if defined(USE_ROCM) && ROCM_VERSION >= 50200 +#define AT_USE_HIPSPARSE_GENERIC_52_API() 1 +#else +#define AT_USE_HIPSPARSE_GENERIC_52_API() 0 +#endif + +// hipSparse Generic API ROCm 5.1 +#if defined(USE_ROCM) && ROCM_VERSION >= 50100 +#define AT_USE_HIPSPARSE_GENERIC_API() 1 +#else +#define AT_USE_HIPSPARSE_GENERIC_API() 0 +#endif + +// cuSparse Generic API spsv function was added in CUDA 11.3.0 +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11500) +#define AT_USE_CUSPARSE_GENERIC_SPSV() 1 +#else +#define AT_USE_CUSPARSE_GENERIC_SPSV() 0 +#endif + +// cuSparse Generic API spsm function was added in CUDA 11.3.1 +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11600) +#define AT_USE_CUSPARSE_GENERIC_SPSM() 1 +#else +#define AT_USE_CUSPARSE_GENERIC_SPSM() 0 +#endif + +// cuSparse Generic API sddmm function was added in CUDA 11.2.1 (cuSparse version 11400) +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11400) +#define AT_USE_CUSPARSE_GENERIC_SDDMM() 1 +#else +#define AT_USE_CUSPARSE_GENERIC_SDDMM() 0 +#endif + +// BSR triangular solve functions were added in hipSPARSE 1.11.2 (ROCm 4.5.0) +#if defined(CUDART_VERSION) || \ + (defined(USE_ROCM) && ROCM_VERSION >= 40500 ) +#define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 1 +#else +#define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 0 +#endif diff --git a/voice_bridge/torch/include/ATen/cuda/CUDASparseBlas.h b/voice_bridge/torch/include/ATen/cuda/CUDASparseBlas.h new file mode 100644 index 0000000000000000000000000000000000000000..eba84682c986dca1ed5dd4dffbc84edd300a8c3d --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDASparseBlas.h @@ -0,0 +1,322 @@ +#pragma once + +/* + Provides a subset of cuSPARSE functions as templates: + + csrgeam2(...) + + where scalar_t is double, float, c10::complex or c10::complex. + The functions are available in at::cuda::sparse namespace. +*/ + +#include +#include + +namespace at { +namespace cuda { +namespace sparse { + +#define CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \ + const cusparseMatDescr_t descrA, int nnzA, \ + const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \ + const int *csrSortedColIndA, const scalar_t *beta, \ + const cusparseMatDescr_t descrB, int nnzB, \ + const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \ + const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \ + const scalar_t *csrSortedValC, const int *csrSortedRowPtrC, \ + const int *csrSortedColIndC, size_t *pBufferSizeInBytes + +template +inline void csrgeam2_bufferSizeExt( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::csrgeam2_bufferSizeExt: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void csrgeam2_bufferSizeExt( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(float)); +template <> +void csrgeam2_bufferSizeExt( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(double)); +template <> +void csrgeam2_bufferSizeExt>( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex)); +template <> +void csrgeam2_bufferSizeExt>( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex)); + +#define CUSPARSE_CSRGEAM2_NNZ_ARGTYPES() \ + cusparseHandle_t handle, int m, int n, const cusparseMatDescr_t descrA, \ + int nnzA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, \ + const cusparseMatDescr_t descrB, int nnzB, const int *csrSortedRowPtrB, \ + const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \ + int *csrSortedRowPtrC, int *nnzTotalDevHostPtr, void *workspace + +template +inline void csrgeam2Nnz(CUSPARSE_CSRGEAM2_NNZ_ARGTYPES()) { + TORCH_CUDASPARSE_CHECK(cusparseXcsrgeam2Nnz( + handle, + m, + n, + descrA, + nnzA, + csrSortedRowPtrA, + csrSortedColIndA, + descrB, + nnzB, + csrSortedRowPtrB, + csrSortedColIndB, + descrC, + csrSortedRowPtrC, + nnzTotalDevHostPtr, + workspace)); +} + +#define CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \ + const cusparseMatDescr_t descrA, int nnzA, \ + const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \ + const int *csrSortedColIndA, const scalar_t *beta, \ + const cusparseMatDescr_t descrB, int nnzB, \ + const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \ + const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \ + scalar_t *csrSortedValC, int *csrSortedRowPtrC, int *csrSortedColIndC, \ + void *pBuffer + +template +inline void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::csrgeam2: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(float)); +template <> +void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(double)); +template <> +void csrgeam2>( + CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex)); +template <> +void csrgeam2>( + CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRMM_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, cusparseOperation_t transB, int mb, int n, \ + int kb, int nnzb, const scalar_t *alpha, \ + const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + const scalar_t *B, int ldb, const scalar_t *beta, scalar_t *C, int ldc + +template +inline void bsrmm(CUSPARSE_BSRMM_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrmm: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrmm(CUSPARSE_BSRMM_ARGTYPES(float)); +template <> +void bsrmm(CUSPARSE_BSRMM_ARGTYPES(double)); +template <> +void bsrmm>(CUSPARSE_BSRMM_ARGTYPES(c10::complex)); +template <> +void bsrmm>(CUSPARSE_BSRMM_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRMV_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, int mb, int nb, int nnzb, \ + const scalar_t *alpha, const cusparseMatDescr_t descrA, \ + const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \ + int blockDim, const scalar_t *x, const scalar_t *beta, scalar_t *y + +template +inline void bsrmv(CUSPARSE_BSRMV_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrmv: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrmv(CUSPARSE_BSRMV_ARGTYPES(float)); +template <> +void bsrmv(CUSPARSE_BSRMV_ARGTYPES(double)); +template <> +void bsrmv>(CUSPARSE_BSRMV_ARGTYPES(c10::complex)); +template <> +void bsrmv>(CUSPARSE_BSRMV_ARGTYPES(c10::complex)); + +#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() + +#define CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, int mb, int nnzb, \ + const cusparseMatDescr_t descrA, scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsv2Info_t info, int *pBufferSizeInBytes + +template +inline void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsv2_bufferSize: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(float)); +template <> +void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(double)); +template <> +void bsrsv2_bufferSize>( + CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex)); +template <> +void bsrsv2_bufferSize>( + CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, int mb, int nnzb, \ + const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsv2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer + +template +inline void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsv2_analysis: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(float)); +template <> +void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(double)); +template <> +void bsrsv2_analysis>( + CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex)); +template <> +void bsrsv2_analysis>( + CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, int mb, int nnzb, const scalar_t *alpha, \ + const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsv2Info_t info, const scalar_t *x, scalar_t *y, \ + cusparseSolvePolicy_t policy, void *pBuffer + +template +inline void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsv2_solve: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(float)); +template <> +void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(double)); +template <> +void bsrsv2_solve>( + CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex)); +template <> +void bsrsv2_solve>( + CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \ + int nnzb, const cusparseMatDescr_t descrA, scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsm2Info_t info, int *pBufferSizeInBytes + +template +inline void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsm2_bufferSize: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(float)); +template <> +void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(double)); +template <> +void bsrsm2_bufferSize>( + CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex)); +template <> +void bsrsm2_bufferSize>( + CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \ + int nnzb, const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsm2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer + +template +inline void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsm2_analysis: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(float)); +template <> +void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(double)); +template <> +void bsrsm2_analysis>( + CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex)); +template <> +void bsrsm2_analysis>( + CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \ + int nnzb, const scalar_t *alpha, const cusparseMatDescr_t descrA, \ + const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \ + int blockDim, bsrsm2Info_t info, const scalar_t *B, int ldb, \ + scalar_t *X, int ldx, cusparseSolvePolicy_t policy, void *pBuffer + +template +inline void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsm2_solve: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(float)); +template <> +void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(double)); +template <> +void bsrsm2_solve>( + CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex)); +template <> +void bsrsm2_solve>( + CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex)); + +#endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE + +} // namespace sparse +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/CUDASparseDescriptors.h b/voice_bridge/torch/include/ATen/cuda/CUDASparseDescriptors.h new file mode 100644 index 0000000000000000000000000000000000000000..60c9ff0ffa88a250fcf196ca86aaed3fda0b187a --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDASparseDescriptors.h @@ -0,0 +1,216 @@ +#pragma once + +#include +#include +#include + +#include + +#if defined(USE_ROCM) +#include +#endif + +namespace at { +namespace cuda { +namespace sparse { + +template +struct CuSparseDescriptorDeleter { + void operator()(T* x) { + if (x != nullptr) { + TORCH_CUDASPARSE_CHECK(destructor(x)); + } + } +}; + +template +class CuSparseDescriptor { + public: + T* descriptor() const { + return descriptor_.get(); + } + T* descriptor() { + return descriptor_.get(); + } + + protected: + std::unique_ptr> descriptor_; +}; + +#if defined(USE_ROCM) +// hipSPARSE doesn't define this +using cusparseMatDescr = std::remove_pointer::type; +using cusparseDnMatDescr = std::remove_pointer::type; +using cusparseDnVecDescr = std::remove_pointer::type; +using cusparseSpMatDescr = std::remove_pointer::type; +using cusparseSpMatDescr = std::remove_pointer::type; +using cusparseSpGEMMDescr = std::remove_pointer::type; +#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() +using bsrsv2Info = std::remove_pointer::type; +using bsrsm2Info = std::remove_pointer::type; +#endif +#endif + +class TORCH_CUDA_CPP_API CuSparseMatDescriptor + : public CuSparseDescriptor { + public: + CuSparseMatDescriptor() { + cusparseMatDescr_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } + + CuSparseMatDescriptor(bool upper, bool unit) { + cusparseFillMode_t fill_mode = + upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER; + cusparseDiagType_t diag_type = + unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT; + cusparseMatDescr_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor)); + TORCH_CUDASPARSE_CHECK(cusparseSetMatFillMode(raw_descriptor, fill_mode)); + TORCH_CUDASPARSE_CHECK(cusparseSetMatDiagType(raw_descriptor, diag_type)); + descriptor_.reset(raw_descriptor); + } +}; + +#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() + +class TORCH_CUDA_CPP_API CuSparseBsrsv2Info + : public CuSparseDescriptor { + public: + CuSparseBsrsv2Info() { + bsrsv2Info_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsv2Info(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; + +class TORCH_CUDA_CPP_API CuSparseBsrsm2Info + : public CuSparseDescriptor { + public: + CuSparseBsrsm2Info() { + bsrsm2Info_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsm2Info(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; + +#endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE + +#if AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API() + +cusparseIndexType_t getCuSparseIndexType(const c10::ScalarType& scalar_type); + +#if AT_USE_HIPSPARSE_GENERIC_52_API() || AT_USE_CUSPARSE_GENERIC_API() +class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor + : public CuSparseDescriptor { + public: + explicit CuSparseDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1); +}; +#endif //AT_USE_HIPSPARSE_GENERIC_52_API() || AT_USE_CUSPARSE_GENERIC_API() + +class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor + : public CuSparseDescriptor { + public: + explicit CuSparseDnVecDescriptor(const Tensor& input); +}; + +class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor + : public CuSparseDescriptor {}; + +class TORCH_CUDA_CPP_API CuSparseSpMatCsrDescriptor + : public CuSparseSpMatDescriptor { + public: + explicit CuSparseSpMatCsrDescriptor(const Tensor& input, int64_t batch_offset = -1); + +#if defined(USE_ROCM) || (defined(CUDA_VERSION) && CUDA_VERSION >= 11000) + std::tuple get_size() { + int64_t rows, cols, nnz; + TORCH_CUDASPARSE_CHECK(cusparseSpMatGetSize( + this->descriptor(), + &rows, + &cols, + &nnz)); + return std::make_tuple(rows, cols, nnz); + } + + void set_tensor(const Tensor& input) { + auto crow_indices = input.crow_indices(); + auto col_indices = input.col_indices(); + auto values = input.values(); + + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(crow_indices.is_contiguous()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(col_indices.is_contiguous()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.is_contiguous()); + TORCH_CUDASPARSE_CHECK(cusparseCsrSetPointers( + this->descriptor(), + crow_indices.data_ptr(), + col_indices.data_ptr(), + values.data_ptr())); + } +#endif + +#if AT_USE_CUSPARSE_GENERIC_SPSV() + void set_mat_fill_mode(bool upper) { + cusparseFillMode_t fill_mode = + upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER; + TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute( + this->descriptor(), + CUSPARSE_SPMAT_FILL_MODE, + &fill_mode, + sizeof(fill_mode))); + } + + void set_mat_diag_type(bool unit) { + cusparseDiagType_t diag_type = + unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT; + TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute( + this->descriptor(), + CUSPARSE_SPMAT_DIAG_TYPE, + &diag_type, + sizeof(diag_type))); + } +#endif +}; + +#if AT_USE_CUSPARSE_GENERIC_SPSV() +class TORCH_CUDA_CPP_API CuSparseSpSVDescriptor + : public CuSparseDescriptor { + public: + CuSparseSpSVDescriptor() { + cusparseSpSVDescr_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseSpSV_createDescr(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; +#endif + +#if AT_USE_CUSPARSE_GENERIC_SPSM() +class TORCH_CUDA_CPP_API CuSparseSpSMDescriptor + : public CuSparseDescriptor { + public: + CuSparseSpSMDescriptor() { + cusparseSpSMDescr_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseSpSM_createDescr(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; +#endif + +#if (defined(USE_ROCM) && ROCM_VERSION >= 50200) || (defined(CUDA_VERSION) && CUDA_VERSION >= 11000) +class TORCH_CUDA_CPP_API CuSparseSpGEMMDescriptor + : public CuSparseDescriptor { + public: + CuSparseSpGEMMDescriptor() { + cusparseSpGEMMDescr_t raw_descriptor; + TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; +#endif + +#endif // AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API() + +} // namespace sparse +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/CUDATensorMethods.cuh b/voice_bridge/torch/include/ATen/cuda/CUDATensorMethods.cuh new file mode 100644 index 0000000000000000000000000000000000000000..e4e89ea1cdb77da1d7866ffe99c64dabfd735d27 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDATensorMethods.cuh @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace at { +template <> +inline __half* Tensor::data() const { + return reinterpret_cast<__half*>(data()); +} +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/CUDAUtils.h b/voice_bridge/torch/include/ATen/cuda/CUDAUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..15b5e026643068aac70658449adceed8b4159475 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CUDAUtils.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace at { namespace cuda { + +// Check if every tensor in a list of tensors matches the current +// device. +inline bool check_device(ArrayRef ts) { + if (ts.empty()) { + return true; + } + Device curDevice = Device(kCUDA, current_device()); + for (const Tensor& t : ts) { + if (t.device() != curDevice) return false; + } + return true; +} + +}} // namespace at::cuda diff --git a/voice_bridge/torch/include/ATen/cuda/CachingHostAllocator.h b/voice_bridge/torch/include/ATen/cuda/CachingHostAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..22a1265294e027fd7eefa6e7d505fc4fc6b0b457 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/CachingHostAllocator.h @@ -0,0 +1,39 @@ +#pragma once + +#include +#include + +namespace at { +namespace cuda { + +// +// A caching allocator for CUDA host allocations (pinned memory). +// +// This provides a drop-in replacement for THCudaHostAllocator, which re-uses +// freed pinned (page-locked) memory allocations. This avoids device +// synchronizations due to cudaFreeHost calls. +// +// To ensure correct behavior, THCCachingHostAllocator_recordEvent must be +// called anytime a pointer from this allocator is used in a cudaMemcpyAsync +// call between host and device, and passed the corresponding context from the +// allocation. This is currently invoked by at::native::copy_kernel_cuda. +// +// Note that this allocator does not split larger allocations into smaller +// blocks, unlike the caching device allocator. +// +TORCH_CUDA_CPP_API c10::Allocator* getCachingHostAllocator(); + +// Records an event in the specified stream. The allocation corresponding to the +// input `ptr`/`ctx` will not be re-used until the event has occurred. +TORCH_CUDA_CPP_API bool +CachingHostAllocator_recordEvent(void* ptr, void* ctx, c10::cuda::CUDAStream stream); + +// Releases cached pinned memory allocations via cudaHostFree +TORCH_CUDA_CPP_API void CachingHostAllocator_emptyCache(); + +inline TORCH_CUDA_CPP_API at::DataPtr HostAlloc(size_t size) { + return getCachingHostAllocator()->allocate(size); +} + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/DeviceUtils.cuh b/voice_bridge/torch/include/ATen/cuda/DeviceUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..dc17aa80ca84b14759f0fdde2a6a455551bc748e --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/DeviceUtils.cuh @@ -0,0 +1,115 @@ +#pragma once + +#include +#include +#include + +__device__ __forceinline__ unsigned int ACTIVE_MASK() +{ +#if !defined(USE_ROCM) + return __activemask(); +#else +// will be ignored anyway + return 0xffffffff; +#endif +} + +#if defined(USE_ROCM) +__device__ __forceinline__ unsigned long long int WARP_BALLOT(int predicate) +{ +return __ballot(predicate); +} +#else +__device__ __forceinline__ unsigned int WARP_BALLOT(int predicate, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __ballot_sync(mask, predicate); +#else + return __ballot(predicate); +#endif +} +#endif + +template +__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +template +__device__ __forceinline__ T WARP_SHFL(T value, int srcLane, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __shfl_sync(mask, value, srcLane, width); +#else + return __shfl(value, srcLane, width); +#endif +} + +template +__device__ __forceinline__ T WARP_SHFL_UP(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __shfl_up_sync(mask, value, delta, width); +#else + return __shfl_up(value, delta, width); +#endif +} + +template +__device__ __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __shfl_down_sync(mask, value, delta, width); +#else + return __shfl_down(value, delta, width); +#endif +} + +#if defined(USE_ROCM) +template<> +__device__ __forceinline__ int64_t WARP_SHFL_DOWN(int64_t value, unsigned int delta, int width , unsigned int mask) +{ + //(HIP doesn't support int64_t). Trick from https://devblogs.nvidia.com/faster-parallel-reductions-kepler/ + int2 a = *reinterpret_cast(&value); + a.x = __shfl_down(a.x, delta); + a.y = __shfl_down(a.y, delta); + return *reinterpret_cast(&a); +} +#endif + +template<> +__device__ __forceinline__ c10::Half WARP_SHFL_DOWN(c10::Half value, unsigned int delta, int width, unsigned int mask) +{ + return c10::Half(WARP_SHFL_DOWN(value.x, delta, width, mask), c10::Half::from_bits_t{}); +} + +template +__device__ __forceinline__ c10::complex WARP_SHFL_DOWN(c10::complex value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return c10::complex( + __shfl_down_sync(mask, value.real_, delta, width), + __shfl_down_sync(mask, value.imag_, delta, width)); +#else + return c10::complex( + __shfl_down(value.real_, delta, width), + __shfl_down(value.imag_, delta, width)); +#endif +} + +/** + * For CC 3.5+, perform a load using __ldg + */ +template +__device__ __forceinline__ T doLdg(const T* p) { +#if __CUDA_ARCH__ >= 350 && !defined(USE_ROCM) + return __ldg(p); +#else + return *p; +#endif +} diff --git a/voice_bridge/torch/include/ATen/cuda/EmptyTensor.h b/voice_bridge/torch/include/ATen/cuda/EmptyTensor.h new file mode 100644 index 0000000000000000000000000000000000000000..bcccceeae16c509ac11b45d62af35265cde9611f --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/EmptyTensor.h @@ -0,0 +1,45 @@ +#pragma once +#include + +namespace at { +namespace detail { + +TORCH_CUDA_CPP_API TensorBase empty_cuda( + IntArrayRef size, + ScalarType dtype, + c10::optional device_opt, + c10::optional memory_format_opt); + +TORCH_CUDA_CPP_API TensorBase empty_cuda( + IntArrayRef size, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt, + c10::optional memory_format_opt); + +TORCH_CUDA_CPP_API TensorBase empty_cuda( + IntArrayRef size, + const TensorOptions &options); + +TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( + IntArrayRef size, + IntArrayRef stride, + ScalarType dtype, + c10::optional device_opt); + +TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( + IntArrayRef size, + IntArrayRef stride, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt); + +TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( + IntArrayRef size, + IntArrayRef stride, + const TensorOptions &options); + + +}} // namespace at::detail diff --git a/voice_bridge/torch/include/ATen/cuda/Exceptions.h b/voice_bridge/torch/include/ATen/cuda/Exceptions.h new file mode 100644 index 0000000000000000000000000000000000000000..94afbf09201d1ad3c76fd70ebc8f471d221ff996 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/Exceptions.h @@ -0,0 +1,156 @@ +#pragma once + +#include +#include +#include + +#ifdef CUDART_VERSION +#include +#endif + +#include +#include +#include + + +namespace c10 { + +class CuDNNError : public c10::Error { + using Error::Error; +}; + +} // namespace c10 + +#define AT_CUDNN_CHECK_WITH_SHAPES(EXPR, ...) AT_CUDNN_CHECK(EXPR, "\n", ##__VA_ARGS__) + +// See Note [CHECK macro] +#define AT_CUDNN_CHECK(EXPR, ...) \ + do { \ + cudnnStatus_t status = EXPR; \ + if (status != CUDNN_STATUS_SUCCESS) { \ + if (status == CUDNN_STATUS_NOT_SUPPORTED) { \ + TORCH_CHECK_WITH(CuDNNError, false, \ + "cuDNN error: ", \ + cudnnGetErrorString(status), \ + ". This error may appear if you passed in a non-contiguous input.", ##__VA_ARGS__); \ + } else { \ + TORCH_CHECK_WITH(CuDNNError, false, \ + "cuDNN error: ", cudnnGetErrorString(status), ##__VA_ARGS__); \ + } \ + } \ + } while (0) + +namespace at { namespace cuda { namespace blas { +C10_EXPORT const char* _cublasGetErrorEnum(cublasStatus_t error); +}}} // namespace at::cuda::blas + +#define TORCH_CUDABLAS_CHECK(EXPR) \ + do { \ + cublasStatus_t __err = EXPR; \ + TORCH_CHECK(__err == CUBLAS_STATUS_SUCCESS, \ + "CUDA error: ", \ + at::cuda::blas::_cublasGetErrorEnum(__err), \ + " when calling `" #EXPR "`"); \ + } while (0) + +const char *cusparseGetErrorString(cusparseStatus_t status); + +#define TORCH_CUDASPARSE_CHECK(EXPR) \ + do { \ + cusparseStatus_t __err = EXPR; \ + TORCH_CHECK(__err == CUSPARSE_STATUS_SUCCESS, \ + "CUDA error: ", \ + cusparseGetErrorString(__err), \ + " when calling `" #EXPR "`"); \ + } while (0) + +// cusolver related headers are only supported on cuda now +#ifdef CUDART_VERSION + +namespace at { namespace cuda { namespace solver { +C10_EXPORT const char* cusolverGetErrorMessage(cusolverStatus_t status); +}}} // namespace at::cuda::solver + +// When cuda < 11.5, cusolver raises CUSOLVER_STATUS_EXECUTION_FAILED when input contains nan. +// When cuda >= 11.5, cusolver normally finishes execution and sets info array indicating convergence issue. +#define TORCH_CUSOLVER_CHECK(EXPR) \ + do { \ + cusolverStatus_t __err = EXPR; \ + if ((CUDA_VERSION < 11500 && \ + __err == CUSOLVER_STATUS_EXECUTION_FAILED) || \ + (CUDA_VERSION >= 11500 && \ + __err == CUSOLVER_STATUS_INVALID_VALUE)) { \ + TORCH_CHECK_LINALG( \ + false, \ + "cusolver error: ", \ + at::cuda::solver::cusolverGetErrorMessage(__err), \ + ", when calling `" #EXPR "`", \ + ". This error may appear if the input matrix contains NaN."); \ + } else { \ + TORCH_CHECK( \ + __err == CUSOLVER_STATUS_SUCCESS, \ + "cusolver error: ", \ + at::cuda::solver::cusolverGetErrorMessage(__err), \ + ", when calling `" #EXPR "`"); \ + } \ + } while (0) + +#else +#define TORCH_CUSOLVER_CHECK(EXPR) EXPR +#endif + +#define AT_CUDA_CHECK(EXPR) C10_CUDA_CHECK(EXPR) + +// For CUDA Driver API +// +// This is here instead of in c10 because NVRTC is loaded dynamically via a stub +// in ATen, and we need to use its nvrtcGetErrorString. +// See NOTE [ USE OF NVRTC AND DRIVER API ]. +#if !defined(USE_ROCM) + +#define AT_CUDA_DRIVER_CHECK(EXPR) \ + do { \ + CUresult __err = EXPR; \ + if (__err != CUDA_SUCCESS) { \ + const char* err_str; \ + CUresult get_error_str_err C10_UNUSED = at::globalContext().getNVRTC().cuGetErrorString(__err, &err_str); \ + if (get_error_str_err != CUDA_SUCCESS) { \ + AT_ERROR("CUDA driver error: unknown error"); \ + } else { \ + AT_ERROR("CUDA driver error: ", err_str); \ + } \ + } \ + } while (0) + +#else + +#define AT_CUDA_DRIVER_CHECK(EXPR) \ + do { \ + CUresult __err = EXPR; \ + if (__err != CUDA_SUCCESS) { \ + AT_ERROR("CUDA driver error: ", static_cast(__err)); \ + } \ + } while (0) + +#endif + +// For CUDA NVRTC +// +// Note: As of CUDA 10, nvrtc error code 7, NVRTC_ERROR_BUILTIN_OPERATION_FAILURE, +// incorrectly produces the error string "NVRTC unknown error." +// The following maps it correctly. +// +// This is here instead of in c10 because NVRTC is loaded dynamically via a stub +// in ATen, and we need to use its nvrtcGetErrorString. +// See NOTE [ USE OF NVRTC AND DRIVER API ]. +#define AT_CUDA_NVRTC_CHECK(EXPR) \ + do { \ + nvrtcResult __err = EXPR; \ + if (__err != NVRTC_SUCCESS) { \ + if (static_cast(__err) != 7) { \ + AT_ERROR("CUDA NVRTC error: ", at::globalContext().getNVRTC().nvrtcGetErrorString(__err)); \ + } else { \ + AT_ERROR("CUDA NVRTC error: NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"); \ + } \ + } \ + } while (0) diff --git a/voice_bridge/torch/include/ATen/cuda/NumericLimits.cuh b/voice_bridge/torch/include/ATen/cuda/NumericLimits.cuh new file mode 100644 index 0000000000000000000000000000000000000000..7081e94837caa7d5050128e0bfe19aa67f93cd39 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/NumericLimits.cuh @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include +#include + +// NumericLimits.cuh is a holder for numeric limits definitions of commonly used +// types. This header is very specific to ROCm HIP and may be removed in the future. +// This header is derived from the legacy THCNumerics.cuh. + +// The lower_bound and upper_bound constants are same as lowest and max for +// integral types, but are -inf and +inf for floating point types. They are +// useful in implementing min, max, etc. + +namespace at { + +template +struct numeric_limits { +}; + +// WARNING: the following at::numeric_limits definitions are there only to support +// HIP compilation for the moment. Use std::numeric_limits if you are not +// compiling for ROCm. +// from @colesbury: "The functions on numeric_limits aren't marked with +// __device__ which is why they don't work with ROCm. CUDA allows them +// because they're constexpr." + +namespace { + // ROCm doesn't like INFINITY too. + constexpr double inf = INFINITY; +} + +template <> +struct numeric_limits { + static inline __host__ __device__ bool lowest() { return false; } + static inline __host__ __device__ bool max() { return true; } + static inline __host__ __device__ bool lower_bound() { return false; } + static inline __host__ __device__ bool upper_bound() { return true; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ uint8_t lowest() { return 0; } + static inline __host__ __device__ uint8_t max() { return UINT8_MAX; } + static inline __host__ __device__ uint8_t lower_bound() { return 0; } + static inline __host__ __device__ uint8_t upper_bound() { return UINT8_MAX; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ int8_t lowest() { return INT8_MIN; } + static inline __host__ __device__ int8_t max() { return INT8_MAX; } + static inline __host__ __device__ int8_t lower_bound() { return INT8_MIN; } + static inline __host__ __device__ int8_t upper_bound() { return INT8_MAX; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ int16_t lowest() { return INT16_MIN; } + static inline __host__ __device__ int16_t max() { return INT16_MAX; } + static inline __host__ __device__ int16_t lower_bound() { return INT16_MIN; } + static inline __host__ __device__ int16_t upper_bound() { return INT16_MAX; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ int32_t lowest() { return INT32_MIN; } + static inline __host__ __device__ int32_t max() { return INT32_MAX; } + static inline __host__ __device__ int32_t lower_bound() { return INT32_MIN; } + static inline __host__ __device__ int32_t upper_bound() { return INT32_MAX; } +}; + +template <> +struct numeric_limits { +#ifdef _MSC_VER + static inline __host__ __device__ int64_t lowest() { return _I64_MIN; } + static inline __host__ __device__ int64_t max() { return _I64_MAX; } + static inline __host__ __device__ int64_t lower_bound() { return _I64_MIN; } + static inline __host__ __device__ int64_t upper_bound() { return _I64_MAX; } +#else + static inline __host__ __device__ int64_t lowest() { return INT64_MIN; } + static inline __host__ __device__ int64_t max() { return INT64_MAX; } + static inline __host__ __device__ int64_t lower_bound() { return INT64_MIN; } + static inline __host__ __device__ int64_t upper_bound() { return INT64_MAX; } +#endif +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ at::Half lowest() { return at::Half(0xFBFF, at::Half::from_bits()); } + static inline __host__ __device__ at::Half max() { return at::Half(0x7BFF, at::Half::from_bits()); } + static inline __host__ __device__ at::Half lower_bound() { return at::Half(0xFC00, at::Half::from_bits()); } + static inline __host__ __device__ at::Half upper_bound() { return at::Half(0x7C00, at::Half::from_bits()); } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ at::BFloat16 lowest() { return at::BFloat16(0xFF7F, at::BFloat16::from_bits()); } + static inline __host__ __device__ at::BFloat16 max() { return at::BFloat16(0x7F7F, at::BFloat16::from_bits()); } + static inline __host__ __device__ at::BFloat16 lower_bound() { return at::BFloat16(0xFF80, at::BFloat16::from_bits()); } + static inline __host__ __device__ at::BFloat16 upper_bound() { return at::BFloat16(0x7F80, at::BFloat16::from_bits()); } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ float lowest() { return -FLT_MAX; } + static inline __host__ __device__ float max() { return FLT_MAX; } + static inline __host__ __device__ float lower_bound() { return -static_cast(inf); } + static inline __host__ __device__ float upper_bound() { return static_cast(inf); } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ double lowest() { return -DBL_MAX; } + static inline __host__ __device__ double max() { return DBL_MAX; } + static inline __host__ __device__ double lower_bound() { return -inf; } + static inline __host__ __device__ double upper_bound() { return inf; } +}; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/PeerToPeerAccess.h b/voice_bridge/torch/include/ATen/cuda/PeerToPeerAccess.h new file mode 100644 index 0000000000000000000000000000000000000000..a5a4f85cdef41ec0d3ae8027a02f06fce7155f35 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/PeerToPeerAccess.h @@ -0,0 +1,12 @@ +#include +#include + +namespace at { +namespace cuda { +namespace detail { +void init_p2p_access_cache(int64_t num_devices); +} + +TORCH_CUDA_CPP_API bool get_p2p_access(int source_dev, int dest_dev); + +}} // namespace at::cuda diff --git a/voice_bridge/torch/include/ATen/cuda/PinnedMemoryAllocator.h b/voice_bridge/torch/include/ATen/cuda/PinnedMemoryAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..cb9ffc1a7dfc459696d483e0e306543a1cdba40d --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/PinnedMemoryAllocator.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include + +namespace at { namespace cuda { + +inline TORCH_CUDA_CPP_API at::Allocator* getPinnedMemoryAllocator() { + return getCachingHostAllocator(); +} +}} // namespace at::cuda diff --git a/voice_bridge/torch/include/ATen/cuda/ScanUtils.cuh b/voice_bridge/torch/include/ATen/cuda/ScanUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..8b3ef2df76de7dd350c196b1304d38b5ea2b580e --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/ScanUtils.cuh @@ -0,0 +1,79 @@ +#pragma once + +#include +#include +#include +#include + +// Collection of in-kernel scan / prefix sum utilities + +namespace at { +namespace cuda { + +// Inclusive prefix sum for binary vars using intra-warp voting + +// shared memory +template +__device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) { + // Within-warp, we use warp voting. +#if defined (USE_ROCM) + unsigned long long int vote = WARP_BALLOT(in); + T index = __popcll(getLaneMaskLe() & vote); + T carry = __popcll(vote); +#else + T vote = WARP_BALLOT(in); + T index = __popc(getLaneMaskLe() & vote); + T carry = __popc(vote); +#endif + + int warp = threadIdx.x / C10_WARP_SIZE; + + // Per each warp, write out a value + if (getLaneId() == 0) { + smem[warp] = carry; + } + + __syncthreads(); + + // Sum across warps in one thread. This appears to be faster than a + // warp shuffle scan for CC 3.0+ + if (threadIdx.x == 0) { + int current = 0; + for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) { + T v = smem[i]; + smem[i] = binop(smem[i], current); + current = binop(current, v); + } + } + + __syncthreads(); + + // load the carry from the preceding warp + if (warp >= 1) { + index = binop(index, smem[warp - 1]); + } + + *out = index; + + if (KillWARDependency) { + __syncthreads(); + } +} + +// Exclusive prefix sum for binary vars using intra-warp voting + +// shared memory +template +__device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) { + inclusiveBinaryPrefixScan(smem, in, out, binop); + + // Inclusive to exclusive + *out -= (T) in; + + // The outgoing carry for all threads is the last warp's sum + *carry = smem[at::ceil_div(blockDim.x, C10_WARP_SIZE) - 1]; + + if (KillWARDependency) { + __syncthreads(); + } +} + +}} // namespace at::cuda diff --git a/voice_bridge/torch/include/ATen/cuda/Sleep.h b/voice_bridge/torch/include/ATen/cuda/Sleep.h new file mode 100644 index 0000000000000000000000000000000000000000..a5f37efe8cbf42974baf1b02bc8795d66420ee63 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/Sleep.h @@ -0,0 +1,11 @@ +#pragma once +#include +#include + +namespace at { +namespace cuda { + +// enqueues a kernel that spins for the specified number of cycles +TORCH_CUDA_CU_API void sleep(int64_t cycles); + +}} // namespace at::cuda diff --git a/voice_bridge/torch/include/ATen/cuda/ThrustAllocator.h b/voice_bridge/torch/include/ATen/cuda/ThrustAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..92fc5cc13ba08ed67ef046269e907ff62fe117e2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/ThrustAllocator.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +namespace at { +namespace cuda { + +/// Allocator for Thrust to re-route its internal device allocations +/// to the THC allocator +class ThrustAllocator { +public: + typedef char value_type; + + char* allocate(std::ptrdiff_t size) { + return static_cast(c10::cuda::CUDACachingAllocator::raw_alloc(size)); + } + + void deallocate(char* p, size_t size) { + c10::cuda::CUDACachingAllocator::raw_delete(p); + } +}; + +} +} diff --git a/voice_bridge/torch/include/ATen/cuda/cub.cuh b/voice_bridge/torch/include/ATen/cuda/cub.cuh new file mode 100644 index 0000000000000000000000000000000000000000..7ac10378b0bcde74f097a82de40986980f664364 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/cub.cuh @@ -0,0 +1,420 @@ +#pragma once +#include + +#include +#include +#include +#include + +#include + +#include + +#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() + +#include + +#else + +// include cub in a safe manner, see: +// https://github.com/pytorch/pytorch/pull/55292 +#undef CUB_NS_POSTFIX //undef to avoid redefinition warnings +#undef CUB_NS_PREFIX +#undef CUB_NS_QUALIFIER +#define CUB_NS_PREFIX namespace at_cuda_detail { +#define CUB_NS_POSTFIX } +#define CUB_NS_QUALIFIER ::at_cuda_detail::cub +#include +#undef CUB_NS_POSTFIX +#undef CUB_NS_PREFIX +#undef CUB_NS_QUALIFIER + +#endif + +#include +#include +#include + +// handle the temporary storage and 'twice' calls for cub API +#define CUB_WRAPPER(func, ...) do { \ + size_t temp_storage_bytes = 0; \ + func(nullptr, temp_storage_bytes, __VA_ARGS__); \ + auto& caching_allocator = *::c10::cuda::CUDACachingAllocator::get(); \ + auto temp_storage = caching_allocator.allocate(temp_storage_bytes); \ + func(temp_storage.get(), temp_storage_bytes, __VA_ARGS__); \ + AT_CUDA_CHECK(cudaGetLastError()); \ +} while (false) + +#ifdef USE_ROCM +#define NO_ROCM(x) +#define ROCM_HIPCUB(x) ::hipcub +#else +#define NO_ROCM(x) x +#define ROCM_HIPCUB(x) x +#endif + +#if (!defined(USE_ROCM) && !CUB_SUPPORTS_NV_BFLOAT16()) || \ + (defined(USE_ROCM) && ROCM_VERSION >= 40500) + +#if !defined(USE_ROCM) +namespace at_cuda_detail { +#endif + +// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 + +template <> +struct ROCM_HIPCUB(cub)::FpLimits +{ + static __host__ __device__ __forceinline__ c10::BFloat16 Max() { + unsigned short max_word = 0x7F7F; + return reinterpret_cast(max_word); + } + + static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { + unsigned short lowest_word = 0xFF7F; + return reinterpret_cast(lowest_word); + } +}; + +template <> +struct ROCM_HIPCUB(cub)::NumericTraits: + ROCM_HIPCUB(cub)::BaseTraits {}; + +#if !defined(USE_ROCM) +} // namespace at_cuda_detail +#endif + +#endif + +#if !defined(USE_ROCM) +namespace at { namespace native { +namespace cub = ::at_cuda_detail::cub; +}} +#endif + +namespace at { +namespace cuda { +namespace cub { + +namespace detail { + +template +struct cuda_type { + using type = T; +}; +template<> +struct cuda_type { + using type = __half; +}; + +#if !defined(USE_ROCM) && CUB_SUPPORTS_NV_BFLOAT16() + +template<> +struct cuda_type { + using type = __nv_bfloat16; +}; + +#elif (defined(USE_ROCM) && ROCM_VERSION >= 40500) + +template<> +struct cuda_type { + using type = hip_bfloat16; +}; + +#endif + +} // namespace detail + +template +inline void segmented_sort_pairs( + const key_t *keys_in, key_t *keys_out, + const value_t *values_in, value_t *values_out, + int64_t num_elements, int64_t num_segments, + OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, + bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8 +) { + TORCH_CHECK(num_elements <= std::numeric_limits::max(), + "cub sort does not support sorting more than INT_MAX elements"); + TORCH_CHECK(num_segments <= std::numeric_limits::max(), + "cub sort does not support sorting more than INT_MAX elements"); + using key_t_ = typename detail::cuda_type::type; + + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr keys_out_owner; + + if (keys_out == nullptr) { + keys_out_owner = allocator->allocate(num_elements * sizeof(key_t)); + keys_out = reinterpret_cast(keys_out_owner.get()); + } + + const key_t_ *keys_in_ = reinterpret_cast(keys_in); + key_t_ *keys_out_ = reinterpret_cast(keys_out); + + if (descending) { + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, + keys_in_, keys_out_, values_in, values_out, + num_elements, num_segments, begin_offsets, end_offsets, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } else { + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairs, + keys_in_, keys_out_, values_in, values_out, + num_elements, num_segments, begin_offsets, end_offsets, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } +} + +#if CUB_SUPPORTS_UNIQUE_BY_KEY() +template +inline void unique_by_key( + KeysInputIteratorT keys_in, ValuesInputIteratorT values_in, + KeysOutputIteratorT keys_out, ValuesOutputIteratorT values_out, + NumSelectedIteratorT num_selected, int64_t num_input_items) +{ + // TODO: use thrust::discard_iterator to handle null keys_out when https://github.com/NVIDIA/cub/issues/406 is fixed. + constexpr bool null_keys_out = std::is_same::value; + using KeyT = typename std::iterator_traits::value_type; + using RealKeysOutputIteratorT = typename std::conditional::type; + RealKeysOutputIteratorT keys_out_; + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr keys_out_owner; + c10::guts::if_constexpr( + [&](auto _) { + keys_out_owner = allocator->allocate(num_input_items * sizeof(KeyT)); + keys_out_ = static_cast(keys_out_owner.get()); + }, + [&](auto _) { + keys_out_ = keys_out; + } + ); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::UniqueByKey, + keys_in, values_in, keys_out_, values_out, num_selected, num_input_items, c10::cuda::getCurrentCUDAStream()); +} +#endif + +namespace impl { + +template +C10_LAUNCH_BOUNDS_1(1) +__global__ void transform_vals(InputIteratorT1 a, InputIteratorT2 b, OutputIteratorT out, ScanOpT scan_op){ + // NOTE: out here not the final scan output, but an intermediate of the accumulation type. + using acc_t = typename std::iterator_traits::value_type; + *out = scan_op(static_cast(*a), static_cast(*b)); +} + +#if !CUB_SUPPORTS_FUTURE_VALUE() +template +struct chained_iterator { + using iterator_category = std::random_access_iterator_tag; + using difference_type = std::ptrdiff_t; + using value_type = ValueT; + using pointer = ValueT*; + using reference = ValueT&; + + InputIteratorT iter; + ValueT *first; + difference_type offset = 0; + + __device__ ValueT operator[](difference_type i) { + i += offset; + if (i == 0) { + return *first; + } else { + return ValueT(iter[i - 1]); + } + } + __device__ chained_iterator operator+(difference_type i) { + return chained_iterator{iter, first, i}; + } + __device__ ValueT operator*() { + return (*this)[0]; + } +}; +#endif + +// even though cub is supposed to support tensors with int_max elements, in reality it doesn't, +// so split at int_max/2 +constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 +} + +// non synchronizing cub call +// even though cub is supposed to support tensors with int_max elements, in reality it doesn't, +// so split at int_max/2 +template +inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, int64_t num_items) { +#if defined(USE_ROCM) && (ROCM_VERSION >= 50000) + //For ROCm, use hipCUB chained iterators + CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::InclusiveScan, + input, + output, + scan_op, + num_items, + at::cuda::getCurrentCUDAStream()); + C10_HIP_KERNEL_LAUNCH_CHECK(); +#else + // non synchronizing cub call + // even though cub is supposed to support tensors with int_max elements, in reality it doesn't, + // so split at int_max/2 + int size_cub = std::min(num_items, max_cub_size); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input, + output, + scan_op, + size_cub, + at::cuda::getCurrentCUDAStream()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + using input_t = typename std::iterator_traits::value_type; + for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) { + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr first_elem = allocator->allocate(sizeof(input_t)); + auto first_elem_ptr = reinterpret_cast(first_elem.get()); + + size_cub = std::min(num_items - i, max_cub_size); + impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( + output + i - 1, + input + i, + first_elem_ptr, + scan_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +#if !CUB_SUPPORTS_FUTURE_VALUE() + using ArgIndexInputIterator = NO_ROCM(at_cuda_detail)::cub::ArgIndexInputIterator; + using tuple = typename ArgIndexInputIterator::value_type; + auto input_iter_transform = [=] __device__ (const tuple &x)->input_t { + if (x.key == 0) { + return *first_elem_ptr; + } else { + return x.value; + } + }; + auto input_ = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator( + ArgIndexInputIterator(input + i), input_iter_transform); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input_, + output + i, + scan_op, + size_cub, + at::cuda::getCurrentCUDAStream()); +#else + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input + i + 1, + output + i, + scan_op, + ::at_cuda_detail::cub::FutureValue(first_elem_ptr), + size_cub, + at::cuda::getCurrentCUDAStream()); +#endif + } +#endif +} + +template +inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, InitValueT init_value, int64_t num_items) { +#if defined(USE_ROCM) && (ROCM_VERSION >= 50000) + //For ROCm, use hipCUB chained iterators + CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::ExclusiveScan, + input, + output, + scan_op, + init_value, + num_items, + at::cuda::getCurrentCUDAStream()); + C10_HIP_KERNEL_LAUNCH_CHECK(); +#else + // non synchronizing cub call + // even though cub is supposed to support tensors with int_max elements, in reality it doesn't, + // so split at int_max/2 + int size_cub = std::min(num_items, max_cub_size); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input, + output, + scan_op, + init_value, + size_cub, + at::cuda::getCurrentCUDAStream()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) { + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr first_elem = allocator->allocate(sizeof(InitValueT)); + auto first_elem_ptr = reinterpret_cast(first_elem.get()); + + size_cub = std::min(num_items - i, max_cub_size); + impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( + output + i - 1, + input + i - 1, + first_elem_ptr, + scan_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +#if !CUB_SUPPORTS_FUTURE_VALUE() + auto input_ = impl::chained_iterator{ + input + i, first_elem_ptr}; + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input_, + output + i, + scan_op, + size_cub, + at::cuda::getCurrentCUDAStream()); +#else + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input + i, + output + i, + scan_op, + ::at_cuda_detail::cub::FutureValue(first_elem_ptr), + size_cub, + at::cuda::getCurrentCUDAStream()); +#endif + } +#endif +} + +#if CUB_SUPPORTS_SCAN_BY_KEY() + +template +inline void inclusive_sum_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub InclusiveSumByKey does not support more than INT_MAX elements"); + CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveSumByKey, + keys, input, output, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream()); +} + +template +inline void inclusive_scan_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, ScanOpT scan_op, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub InclusiveSumByKey does not support more than INT_MAX elements"); + CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveScanByKey, + keys, input, output, scan_op, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream()); +} + +#endif + +template +void unique(InputIteratorT input, OutputIteratorT output, + NumSelectedIteratorT num_selected_out, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub unique does not support more than INT_MAX elements"); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique, + input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream()); +} + +template +void run_length_encode(InputIteratorT input, OutputIteratorT output, CountsOutputIteratorT counts_out, + LengthOutputIteratorT length_out, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub run_length_encode does not support more than INT_MAX elements"); + CUB_WRAPPER( + NO_ROCM(at_cuda_detail)::cub::DeviceRunLengthEncode::Encode, + input, output, counts_out, length_out, num_items, + at::cuda::getCurrentCUDAStream()); +} + +template +void reduce(InputIteratorT input, OutputIteratorT output, int64_t num_items, ReductionOpT op, T init) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub reduce does not support more than INT_MAX elements"); + CUB_WRAPPER( + NO_ROCM(at_cuda_detail)::cub::DeviceReduce::Reduce, + input, output, num_items, op, init, + at::cuda::getCurrentCUDAStream()); + +} + +}}} // namespace at::cuda::cub diff --git a/voice_bridge/torch/include/ATen/cuda/cub.h b/voice_bridge/torch/include/ATen/cuda/cub.h new file mode 100644 index 0000000000000000000000000000000000000000..2e6a808d6f510182fc34daaff5bac9130ce34790 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/cub.h @@ -0,0 +1,89 @@ +#pragma once +#include +#include +#include + +// NOTE: These templates are intentionally not defined in this header, +// which aviods re-compiling them for each translation unit. If you get +// a link error, you need to add an explicit instantiation for your +// types in cub.cu + +namespace at { +namespace cuda { +namespace cub { + +inline int get_num_bits(uint64_t max_key) { + int num_bits = 1; + while (max_key > 1) { + max_key >>= 1; + num_bits++; + } + return num_bits; +} + +namespace detail { + +// radix_sort_pairs doesn't interact with value_t other than to copy +// the data, so we can save template instantiations by reinterpreting +// it as an opaque type. +template struct alignas(N) OpaqueType { char data[N]; }; + +template +void radix_sort_pairs_impl( + const key_t *keys_in, key_t *keys_out, + const OpaqueType *values_in, OpaqueType *values_out, + int64_t n, bool descending, int64_t begin_bit, int64_t end_bit); + +} // namespace detail + +template +void radix_sort_pairs( + const key_t *keys_in, key_t *keys_out, + const value_t *values_in, value_t *values_out, + int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8) { + static_assert(std::is_trivially_copyable::value || + AT_ROCM_ENABLED(), // ROCm incorrectly fails this check for vector types + "radix_sort_pairs value type must be trivially copyable"); + // Make value type opaque, so all inputs of a certain size use the same template instantiation + using opaque_t = detail::OpaqueType; + static_assert(sizeof(value_t) <= 8 && (sizeof(value_t) & (sizeof(value_t) - 1)) == 0, + "This size of value_t is not instantiated. Please instantiate it in cub.cu" + " and modify this check."); + static_assert(sizeof(value_t) == alignof(value_t), "Expected value_t to be size-aligned"); + detail::radix_sort_pairs_impl( + keys_in, keys_out, + reinterpret_cast(values_in), + reinterpret_cast(values_out), + n, descending, begin_bit, end_bit); +} + +template +void radix_sort_keys( + const key_t *keys_in, key_t *keys_out, + int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8); + +// NOTE: Intermediate sums will be truncated to input_t precision +template +void inclusive_sum_truncating(const input_t *input, output_t *output, int64_t n); + +template +void inclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) { + return inclusive_sum_truncating(input, output, n); +} + +// NOTE: Sums are done is common_type +template +void exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t n); + +template +void exclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) { + return exclusive_sum_in_common_type(input, output, n); +} + +void mask_exclusive_sum(const uint8_t *mask, int64_t *output_idx, int64_t n); +inline void mask_exclusive_sum(const bool *mask, int64_t *output_idx, int64_t n) { + return mask_exclusive_sum( + reinterpret_cast(mask), output_idx, n); +} + +}}} // namespace at::cuda::cub diff --git a/voice_bridge/torch/include/ATen/cuda/cub_definitions.cuh b/voice_bridge/torch/include/ATen/cuda/cub_definitions.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a3d551673558f723372985f7b366bae82a580029 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/cub_definitions.cuh @@ -0,0 +1,53 @@ +#pragma once + +#if !defined(USE_ROCM) +#include // for CUDA_VERSION +#endif + +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 +#include +#else +#define CUB_VERSION 0 +#endif + +// cub sort support for __nv_bfloat16 is added to cub 1.13 in: +// https://github.com/NVIDIA/cub/pull/306 +#if CUB_VERSION >= 101300 +#define CUB_SUPPORTS_NV_BFLOAT16() true +#else +#define CUB_SUPPORTS_NV_BFLOAT16() false +#endif + +// cub support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: +// https://github.com/NVIDIA/cub/pull/326 +// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake +// starting from CUDA 11.5 +#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) +#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true +#else +#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false +#endif + +// cub support for UniqueByKey is added to cub 1.16 in: +// https://github.com/NVIDIA/cub/pull/405 +#if CUB_VERSION >= 101600 +#define CUB_SUPPORTS_UNIQUE_BY_KEY() true +#else +#define CUB_SUPPORTS_UNIQUE_BY_KEY() false +#endif + +// cub support for scan by key is added to cub 1.15 +// in https://github.com/NVIDIA/cub/pull/376 +#if CUB_VERSION >= 101500 +#define CUB_SUPPORTS_SCAN_BY_KEY() 1 +#else +#define CUB_SUPPORTS_SCAN_BY_KEY() 0 +#endif + +// cub support for cub::FutureValue is added to cub 1.15 in: +// https://github.com/NVIDIA/cub/pull/305 +#if CUB_VERSION >= 101500 +#define CUB_SUPPORTS_FUTURE_VALUE() true +#else +#define CUB_SUPPORTS_FUTURE_VALUE() false +#endif diff --git a/voice_bridge/torch/include/ATen/cuda/detail/CUDAHooks.h b/voice_bridge/torch/include/ATen/cuda/detail/CUDAHooks.h new file mode 100644 index 0000000000000000000000000000000000000000..d53276ab3bbac56d85b9b2c380743ca92c4d8d18 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/detail/CUDAHooks.h @@ -0,0 +1,56 @@ +#pragma once + +#include + +#include +#include + +// TODO: No need to have this whole header, we can just put it all in +// the cpp file + +namespace at { namespace cuda { namespace detail { + +// Set the callback to initialize Magma, which is set by +// torch_cuda_cu. This indirection is required so magma_init is called +// in the same library where Magma will be used. +TORCH_CUDA_CPP_API void set_magma_init_fn(void (*magma_init_fn)()); + +TORCH_CUDA_CPP_API bool hasPrimaryContext(int64_t device_index); +TORCH_CUDA_CPP_API c10::optional getDeviceIndexWithPrimaryContext(); + +// The real implementation of CUDAHooksInterface +struct CUDAHooks : public at::CUDAHooksInterface { + CUDAHooks(at::CUDAHooksArgs) {} + void initCUDA() const override; + Device getDeviceFromPtr(void* data) const override; + bool isPinnedPtr(void* data) const override; + const Generator& getDefaultCUDAGenerator(DeviceIndex device_index = -1) const override; + bool hasCUDA() const override; + bool hasMAGMA() const override; + bool hasCuDNN() const override; + bool hasCuSOLVER() const override; + bool hasROCM() const override; + const at::cuda::NVRTC& nvrtc() const override; + int64_t current_device() const override; + bool hasPrimaryContext(int64_t device_index) const override; + Allocator* getCUDADeviceAllocator() const override; + Allocator* getPinnedMemoryAllocator() const override; + bool compiledWithCuDNN() const override; + bool compiledWithMIOpen() const override; + bool supportsDilatedConvolutionWithCuDNN() const override; + bool supportsDepthwiseConvolutionWithCuDNN() const override; + bool supportsBFloat16ConvolutionWithCuDNNv8() const override; + bool hasCUDART() const override; + long versionCUDART() const override; + long versionCuDNN() const override; + std::string showConfig() const override; + double batchnormMinEpsilonCuDNN() const override; + int64_t cuFFTGetPlanCacheMaxSize(int64_t device_index) const override; + void cuFFTSetPlanCacheMaxSize(int64_t device_index, int64_t max_size) const override; + int64_t cuFFTGetPlanCacheSize(int64_t device_index) const override; + void cuFFTClearPlanCache(int64_t device_index) const override; + int getNumGPUs() const override; + void deviceSynchronize(int64_t device_index) const override; +}; + +}}} // at::cuda::detail diff --git a/voice_bridge/torch/include/ATen/cuda/detail/DeviceThreadHandles.h b/voice_bridge/torch/include/ATen/cuda/detail/DeviceThreadHandles.h new file mode 100644 index 0000000000000000000000000000000000000000..0bd03c6d3ac3d774d769c9862cfecbd586ebafaf --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/detail/DeviceThreadHandles.h @@ -0,0 +1,151 @@ +// Some stateful GPU libraries, such as cuDNN, cuBLAS, use handles to store states. +// These handles are tied to device, and these libraries requires/recommends not to +// share handles across host threads. +// +// These libraries recommend using one handle per host thread. We may not want to do +// this because threads are relatively light-weight, but creating and destroying +// handles is expensive (destroying the handle causes synchronizations). DataParallel, +// for example, creates new threads for each forward pass. +// +// This file implements a handle pool mechanism. The handle pool returns handles on +// demand as threads request them. If all existing handles in the pool are in use, +// it creates a new one. As threads terminate, they release handles back into the pool. +// In this way, the handle pool never creates more handles than the high-water mark of +// active threads, so it's efficient with DataParallel. + +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace at { namespace cuda { namespace { + +template +struct DeviceThreadHandlePool : public std::enable_shared_from_this> { + + struct Handle { + Handle_t handle; + Handle(bool create = false) : handle(nullptr) + { + if(create) Create(&handle); + } + // std::vector.emplace() and push_back() may route through temporaries and call + // copy/move constructors along the way. If this is the case, we don't want + // the destructors of temporaries to call cudnnDestroy on the handle. + // We can achieve safety (for the narrow case of stashing within std::vectors) + // by making Handle moveable but not copyable, and transferring handle ownership + // to the latest constructed object. This is not a substitute for full-blown + // reference counting, but reference counting may be overkill here. + // Another alternative is to wrap the saved Handles in unique_ptrs, i.e., + // unordered_map>> created_handles; + Handle(const Handle& rhs) = delete; + // Following https://stackoverflow.com/questions/3279543/what-is-the-copy-and-swap-idiom + Handle(Handle&& rhs) : Handle() { std::swap(handle, rhs.handle); } + // operator= takes argument by value + Handle& operator=(Handle rhs) { std::swap(handle, rhs.handle); return *this; } + ~Handle() { + if(handle) Destroy(handle); + } + }; + + std::mutex mutex; + + // Handles are lazily created as different threads request them, + // but are never destroyed until the end of the process. + // The maximum number of handles this process will create for each device is equal + // to the high-water mark of the number of concurrently active threads that request + // handles for that device. + // When threads terminate, they release their handles back into the pool for reuse. + // Otherwise, new handles would be created every time new threads were spawned, + // resulting in poor performance for Python modules that repeatedly or frequently + // spawned new sets of threads (like DataParallel, which creates a new set of threads + // for each forward pass). + // + // To prevent potential deadlocks, we explicitly choose not to cap the number + // of handles that are created per device. + // Example of danger: If we cap the max handles at 4, and 5 threads are sharing a device, + // only 4 can make forward progress at any time. The other 4 will not release their + // handles until they exit, so the fifth cannot make progress until then. This is + // not a problem...UNLESS all 5 threads attempt some sort of synchronization at an + // intermediate point (ie, before any of them have exited). We have no way to anticipate + // or enforce that user threads will not attempt such intermediate synchronization. + // The only way to ensure safety is to avoid imposing a cap on the number of handles. + std::unordered_map> created_handles; + std::unordered_map> available_handles; + + // PoolWindow lazily creates and caches the handles that a particular thread is using, + // so in the common case handle access doesn't incur either handle creation or a mutex lock. + class PoolWindow + { + public: + PoolWindow(std::shared_ptr parent): weak_parent(std::move(parent)) {} + ~PoolWindow(){ release(); } + + Handle_t reserve(int device) + { + // If this thread already has a handle for this device, return it + if(my_handles.find(device) != my_handles.end()) + return my_handles[device]; + + // otherwise, either grab a handle from the pool if one is available, + // or if not, create a new one. + auto parent = weak_parent.lock(); + TORCH_CHECK(parent, "Cannot create handle during program termination"); + std::lock_guard guard(parent->mutex); + + if(parent->available_handles[device].size() > 0) + { + my_handles[device] = parent->available_handles[device].back(); + parent->available_handles[device].pop_back(); + } + else + { + // In local testing, I do observe that emplace_back sometimes routes through temporaries + // that incur move-constructor and destructor calls. See comments in Handle above. + parent->created_handles[device].emplace_back(true /*create*/); + my_handles[device] = parent->created_handles[device].back().handle; + } + + return my_handles[device]; + } + + private: + // Stores the per-device handles currently owned by this thread + std::unordered_map my_handles; + + std::weak_ptr weak_parent; + + // Called by the destructor. Releases this thread's handles back into the pool. + void release() { + if(my_handles.size() > 0) { + auto parent = weak_parent.lock(); + if (!parent) { + // If this thread exits after atexit handlers have completed, the + // cuda context itself may be invalid, so we must leak the handles. + return; + } + + std::lock_guard guard(parent->mutex); + for(auto d_h : my_handles) + parent->available_handles[d_h.first].push_back(d_h.second); + } + } + }; + + // Warning: + // If you want to change this function, be aware that this function will be called + // by multiple threads and there is no mutex guarding the call of this function, so + // make sure your implementation is thread-safe. + PoolWindow *newPoolWindow() { + // The returned pointer will be owned by a thread local variable + // so that different threads does not share the same PoolWindow. + return new PoolWindow(this->shared_from_this()); + } +}; + +}}} // namespace at::cuda::detail:: diff --git a/voice_bridge/torch/include/ATen/cuda/detail/IndexUtils.cuh b/voice_bridge/torch/include/ATen/cuda/detail/IndexUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..74345673c287f0f3e9a961bc24b383d8126aa483 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/detail/IndexUtils.cuh @@ -0,0 +1,32 @@ +#pragma once + +#include +#include +#include + +namespace at { +namespace cuda { +namespace detail { + +TORCH_CUDA_CU_API bool maybeOverlappingIndices(const at::TensorBase &t); +using at::native::canUse32BitIndexMath; + +template +TensorInfo +getTensorInfo(const at::TensorBase &t) { + IndexType sz[MAX_TENSORINFO_DIMS]; + IndexType st[MAX_TENSORINFO_DIMS]; + + int dims = t.dim(); + for (int i = 0; i < dims; ++i) { + sz[i] = t.size(i); + st[i] = t.stride(i); + } + + return TensorInfo( + t.data_ptr(), dims, sz, st); +} + +} // detail +} // cuda +} // at diff --git a/voice_bridge/torch/include/ATen/cuda/detail/IntegerDivider.cuh b/voice_bridge/torch/include/ATen/cuda/detail/IntegerDivider.cuh new file mode 100644 index 0000000000000000000000000000000000000000..761e16aea3c23897dca02a16883137f54ff7cc8d --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/detail/IntegerDivider.cuh @@ -0,0 +1,126 @@ +#pragma once + +#include +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) +#include +#endif + +namespace at { +namespace cuda { +namespace detail { + +// A utility class to implement integer division by multiplication, given a fixed +// divisor. +// +// WARNING: The fast divider algorithm is only implemented for unsigned int; +// otherwise we default to plain integer division. For unsigned int, +// we further assume that the dividend is at most INT32_MAX. Thus, +// IntDivider must NOT be used for general integer division. +// +// This reduced range is enough for our purpose, and it allows us to +// slightly simplify the computation. +// +// (NOTE: Below, "2^k" denotes exponentiation, i.e., 1< 0), we can find a "magic number" m (2^N +// <= m < 2^(N+1)) and shift s such that: +// +// \floor(n / d) = \floor((m * n) / 2^(N+s)). +// +// Given such m and s, the integer division can be then implemented as: +// +// let m' = m - 2^N // 0 <= m' < 2^N +// +// fast_integer_division(n): +// // Multiply two N-bit unsigned integers: the result is a 2N-bit unsigned +// // integer. Then take the higher N bits. +// t = (m' * n) >> N +// +// // Here we use the fact that n is less than 2^(N-1): otherwise the value +// // of (t + n) may not fit in an N-bit integer. +// return (t + n) >> s +// +// Finding such a magic number is surprisingly easy: +// +// s = \ceil(\log_2 d) +// m' = \floor(2^N * (2^s - d) / d) + 1 // Need 2N-bit integer arithmetic. +// +// See also: +// - Division by Invariant Integers Using Multiplication, +// TorbjΓΆrn Granlund and Peter L. Montgomery, 1994. +// +// - http://www.hackersdelight.org/magic.htm +// +// - http://ridiculousfish.com/blog/posts/labor-of-division-episode-i.html + +// Result of div/mod operation stored together. +template +struct DivMod { + Value div, mod; + + C10_HOST_DEVICE DivMod(Value div, Value mod) : div(div), mod(mod) { } +}; + +// Base case: we only have an implementation for uint32_t for now. For +// everything else, we use plain division. +template +struct IntDivider { + IntDivider() { } // Dummy constructor for arrays. + IntDivider(Value d) : divisor(d) { } + + C10_HOST_DEVICE inline Value div(Value n) const { return n / divisor; } + C10_HOST_DEVICE inline Value mod(Value n) const { return n % divisor; } + C10_HOST_DEVICE inline DivMod divmod(Value n) const { + return DivMod(n / divisor, n % divisor); + } + + Value divisor; +}; + +// Implement fast integer division. +template <> +struct IntDivider { + static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int."); + + IntDivider() { } // Dummy constructor for arrays. + + IntDivider(unsigned int d) : divisor(d) { + assert(divisor >= 1 && divisor <= INT32_MAX); + + // TODO: gcc/clang has __builtin_clz() but it's not portable. + for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break; + + uint64_t one = 1; + uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1; + m1 = magic; + assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits. + } + + C10_HOST_DEVICE inline unsigned int div(unsigned int n) const { +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) + // 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and + // 'm1'. + unsigned int t = __umulhi(n, m1); + return (t + n) >> shift; +#else + // Using uint64_t so that the addition does not overflow. + uint64_t t = ((uint64_t) n * m1) >> 32; + return (t + n) >> shift; +#endif + } + + C10_HOST_DEVICE inline unsigned int mod(unsigned int n) const { + return n - div(n) * divisor; + } + + C10_HOST_DEVICE inline DivMod divmod(unsigned int n) const { + unsigned int q = div(n); + return DivMod(q, n - q * divisor); + } + + unsigned int divisor; // d above. + unsigned int m1; // Magic number: m' above. + unsigned int shift; // Shift amounts. +}; + +}}} // namespace at::cuda::detail diff --git a/voice_bridge/torch/include/ATen/cuda/detail/KernelUtils.h b/voice_bridge/torch/include/ATen/cuda/detail/KernelUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..b36e78c9b9a69d687eda85865f980c8e6cb478f7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/detail/KernelUtils.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +namespace at { namespace cuda { namespace detail { + +// CUDA: grid stride looping +// +// int64_t _i_n_d_e_x specifically prevents overflow in the loop increment. +// If input.numel() < INT_MAX, _i_n_d_e_x < INT_MAX, except after the final +// iteration of the loop where _i_n_d_e_x += blockDim.x * gridDim.x can be +// greater than INT_MAX. But in that case _i_n_d_e_x >= n, so there are no +// further iterations and the overflowed value in i=_i_n_d_e_x is not used. +#define CUDA_KERNEL_LOOP_TYPE(i, n, index_type) \ + int64_t _i_n_d_e_x = blockIdx.x * blockDim.x + threadIdx.x; \ + for (index_type i=_i_n_d_e_x; _i_n_d_e_x < (n); _i_n_d_e_x+=blockDim.x * gridDim.x, i=_i_n_d_e_x) + +#define CUDA_KERNEL_LOOP(i, n) CUDA_KERNEL_LOOP_TYPE(i, n, int) + + +// Use 1024 threads per block, which requires cuda sm_2x or above +constexpr int CUDA_NUM_THREADS = 1024; + +// CUDA: number of blocks for threads. +inline int GET_BLOCKS(const int64_t N, const int64_t max_threads_per_block=CUDA_NUM_THREADS) { + TORCH_INTERNAL_ASSERT(N > 0, "CUDA kernel launch blocks must be positive, but got N=", N); + constexpr int64_t max_int = std::numeric_limits::max(); + + // Round up division for positive number that cannot cause integer overflow + auto block_num = (N - 1) / max_threads_per_block + 1; + TORCH_INTERNAL_ASSERT(block_num <= max_int, "Can't schedule too many blocks on CUDA device"); + + return static_cast(block_num); +} + +}}} // namespace at::cuda::detail diff --git a/voice_bridge/torch/include/ATen/cuda/detail/LazyNVRTC.h b/voice_bridge/torch/include/ATen/cuda/detail/LazyNVRTC.h new file mode 100644 index 0000000000000000000000000000000000000000..810e1c322dbd8c5c30f776d8689d621448578cc7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/detail/LazyNVRTC.h @@ -0,0 +1,11 @@ +#pragma once +#include +namespace at { namespace cuda { +// Forward-declares at::cuda::NVRTC +struct NVRTC; + +namespace detail { +extern NVRTC lazyNVRTC; +} + +}} // at::cuda::detail diff --git a/voice_bridge/torch/include/ATen/cuda/detail/OffsetCalculator.cuh b/voice_bridge/torch/include/ATen/cuda/detail/OffsetCalculator.cuh new file mode 100644 index 0000000000000000000000000000000000000000..2959fe8e65b87e4113f02225079bf1ff7ef8cbc7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/detail/OffsetCalculator.cuh @@ -0,0 +1,119 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +// If element_sizes is nullptr, then the strides will be in bytes, otherwise +// the strides will be in # of elements. +// Operands that share the same shape, but may have different strides. +// OffsetCalculator iterates the tensor in a column-major order + +#if defined(USE_ROCM) +constexpr int MAX_DIMS = 16; +#else +constexpr int MAX_DIMS = 25; +#endif + +template +struct OffsetCalculator { + // We allow having negative strides to implement some operations like torch.flip + using stride_t = std::conditional_t, + index_t>; + // The offset for each argument. Wrapper around fixed-size array. + // On CUDA, zero sized array is not allowed, so when we are handling nullary + // operators, we need to create a size 1 offset to avoid compiler failure. + // This size 1 offset is just a placeholder, and we will not use it. + using offset_type = at::detail::Array(NARGS, 1)>; + + // if element_sizes is nullptr, then the strides will be in bytes, otherwise + // the strides will be in # of elements. + OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides, const int64_t* element_sizes=nullptr) : dims(dims) { + TORCH_CHECK(dims <= MAX_DIMS, "tensor has too many (>", MAX_DIMS, ") dims"); + for (int i=0; i < dims; i++){ + sizes_[i] = at::cuda::detail::IntDivider(sizes[i]); + for (int arg = 0; arg < NARGS; arg++) { + int64_t element_size = (element_sizes == nullptr ? 1LL : element_sizes[arg]); + strides_[i][arg] = strides[arg][i] / element_size; + } + } + } + + C10_HOST_DEVICE offset_type get(index_t linear_idx) const { + offset_type offsets; + #pragma unroll + for (int arg = 0; arg < NARGS; arg++) { + offsets[arg] = 0; + } + + #pragma unroll + for (int dim = 0; dim < MAX_DIMS; ++dim) { + if (dim == dims) { + break; + } + auto divmod = sizes_[dim].divmod(linear_idx); + linear_idx = divmod.div; + + #pragma unroll + for (int arg = 0; arg < NARGS; arg++) { + offsets[arg] += divmod.mod * strides_[dim][arg]; + } + + } + return offsets; + } + + int dims; + at::cuda::detail::IntDivider sizes_[MAX_DIMS]; + stride_t strides_[MAX_DIMS][std::max(NARGS, 1)]; +}; + +template +struct TrivialOffsetCalculator { + // The offset for each argument. Wrapper around fixed-size array. + // The offsets are in # of elements, not in bytes. + // On CUDA, zero sized array is not allowed, so when we are handling nullary + // operators, we need to create a size 1 offset to avoid compiler failure. + // This size 1 offset is just a placeholder, and we will not use it. + using offset_type = at::detail::Array(NARGS, 1)>; + + C10_HOST_DEVICE offset_type get(index_t linear_idx) const { + offset_type offsets; + #pragma unroll + for (int arg = 0; arg < NARGS; arg++) { + offsets[arg] = linear_idx; + } + return offsets; + } +}; + +// Make an OffsetCalculator with byte offsets +template +static OffsetCalculator make_offset_calculator(const at::TensorIteratorBase& iter) { + TORCH_INTERNAL_ASSERT(N <= iter.ntensors()); + std::array strides; + for (int i = 0; i < N; i++) { + strides[i] = iter.strides(i).data(); + } + return OffsetCalculator(iter.ndim(), iter.shape().data(), strides.data()); +} + +// Make an OffsetCalculator with element offsets +template +static OffsetCalculator make_element_offset_calculator( + const at::TensorIteratorBase& iter) { + TORCH_INTERNAL_ASSERT(N <= iter.ntensors()); + std::array strides; + std::array element_sizes; + for (int i = 0; i < N; i++) { + strides[i] = iter.strides(i).data(); + element_sizes[i] = iter.element_size(i); + } + return OffsetCalculator( + iter.ndim(), iter.shape().data(), strides.data(), element_sizes.data()); +} diff --git a/voice_bridge/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh b/voice_bridge/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a9b67b41ac45d73ec2c4477300f695b2d4ff89b4 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh @@ -0,0 +1,43 @@ +// No "#pragma once" because this is a raw definition that can be copied by jit codegen. +// Eager mode clients should not include this file directly, instead, +// they should #include , which has a #pragma once. + +// Stores RNG state values. Passed as a kernel argument. +// See Note [CUDA Graph-safe RNG states]. +// +// The raw definition lives in its own file so jit codegen can easily copy it. +namespace at { + +struct PhiloxCudaState { + PhiloxCudaState() = default; + // Called if graph capture is not underway + PhiloxCudaState(uint64_t seed, + uint64_t offset) { + seed_.val = seed; + offset_.val = offset; + } + // Called if graph capture is underway + PhiloxCudaState(int64_t* seed, + int64_t* offset_extragraph, + uint32_t offset_intragraph) { + seed_.ptr = seed; + offset_.ptr = offset_extragraph; + offset_intragraph_ = offset_intragraph; + captured_ = true; + } + + // Public members, directly accessible by at::cuda::philox::unpack. + // If we made them private with getters/setters, the getters/setters + // would have to be __device__, and we can't declare __device__ in ATen. + union Payload { + uint64_t val; + int64_t* ptr; + }; + + Payload seed_; + Payload offset_; + uint32_t offset_intragraph_ = 0; + bool captured_ = false; +}; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/detail/TensorInfo.cuh b/voice_bridge/torch/include/ATen/cuda/detail/TensorInfo.cuh new file mode 100644 index 0000000000000000000000000000000000000000..cfa17a056c6da4373357a9840cfb9109e881bc20 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/detail/TensorInfo.cuh @@ -0,0 +1,120 @@ +#pragma once + +#include + +namespace at { +namespace cuda { +namespace detail { + +#define MAX_TENSORINFO_DIMS 25 + +// CUDA kernel argument that defines tensor layout +template +struct TensorInfo { + TensorInfo(); + TensorInfo(T* p, + int dim, + IndexType sz[MAX_TENSORINFO_DIMS], + IndexType st[MAX_TENSORINFO_DIMS]); + + // Set the size of the given dimension to 1, as if it were a + // reduction dim (allows you to calculate offsets of the reduction + // slice) + void reduceDim(int dim); + + // See note on [collapse dims]. + int collapseDims(const int excludeDim = -1); + + // Contiguous tensors of more than one dimension are collapsed down + // to one tensor + __host__ __device__ inline bool isContiguous() const { + return (dims == 1 && strides[0] == 1); + } + + T* data; + IndexType sizes[MAX_TENSORINFO_DIMS]; + IndexType strides[MAX_TENSORINFO_DIMS]; + int dims; +}; + +template +TensorInfo::TensorInfo() { + data = nullptr; + dims = 0; +} + +template +TensorInfo::TensorInfo(T* p, + int dim, + IndexType sz[MAX_TENSORINFO_DIMS], + IndexType st[MAX_TENSORINFO_DIMS]) { + data = p; + dims = dim; + AT_ASSERT(dims < MAX_TENSORINFO_DIMS); + + for (int i = 0; i < dim; ++i) { + sizes[i] = sz[i]; + strides[i] = st[i]; + } +} + +template +void +TensorInfo::reduceDim(int dim) { + TORCH_CHECK(dim < dims && dim >= 0, "expected dim between 0 and dims - 1"); + sizes[dim] = 1; +} + +template +int +TensorInfo::collapseDims(const int excludeDim) { + auto result = at::collapse_dims(sizes, strides, dims, excludeDim); + dims = std::get<1>(result); + return std::get<0>(result); +} + +// Translate a linear index for the apply to a T* offset; +// specialized on `Dims` to reduce nvcc compilation time +template +struct IndexToOffset { + static __host__ __device__ IndexType get( + IndexType linearId, + const TensorInfo& info) { + + IndexType offset = 0; + + // Uses static dims + for (int i = Dims - 1; i > 0; --i) { + IndexType curDimIndex = linearId % info.sizes[i]; + IndexType curDimOffset = curDimIndex * info.strides[i]; + offset += curDimOffset; + linearId /= info.sizes[i]; + } + + return offset + linearId * info.strides[0]; + } +}; + +// Uses dynamic (runtime) instead of static (compiletime) dims +template +struct IndexToOffset { + static inline __host__ __device__ IndexType get( + IndexType linearId, + const TensorInfo& info) { + + IndexType offset = 0; + + for (int i = info.dims - 1; i > 0; --i) { + IndexType curDimIndex = linearId % info.sizes[i]; + IndexType curDimOffset = curDimIndex * info.strides[i]; + offset += curDimOffset; + linearId /= info.sizes[i]; + } + + return offset + linearId * info.strides[0]; + } +}; + +} // detail +} // cuda +} // at diff --git a/voice_bridge/torch/include/ATen/cuda/detail/UnpackRaw.cuh b/voice_bridge/torch/include/ATen/cuda/detail/UnpackRaw.cuh new file mode 100644 index 0000000000000000000000000000000000000000..f8fa4ebbf160ae22429a729b2860756e374314b7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/detail/UnpackRaw.cuh @@ -0,0 +1,32 @@ +// No "#pragma once" because this is a raw definition that can be copied by jit codegen. +// Eager mode clients should not include this file directly, instead, +// they should #include , which has a #pragma once. + +namespace at { +namespace cuda { +namespace philox { + +// In-kernel call to retrieve philox seed and offset from a PhiloxCudaState instance whether +// that instance was created with graph capture underway or not. +// See Note [CUDA Graph-safe RNG states]. +// +// We can't write a __device__ function in CUDAGeneratorImpl.h, because it's in ATen. +// Also, whatever call unpacks PhiloxCudaState in consumer kernels must be inlineable. +// Easiest thing that comes to mind is, define a __device__ unpack helper here, in ATen/cuda. +// +// The raw definition lives in its own file so jit codegen can easily copy it. +__device__ __forceinline__ std::tuple +unpack(at::PhiloxCudaState arg) { + if (arg.captured_) { + // static_cast avoids "warning: invalid narrowing conversion from "long" to "unsigned long". + // *(arg.offset_.ptr) is a broadcast load of a single int64_t to the entire kernel. + // For most threads' reads it will hit in cache, so it shouldn't hurt performance. + return std::make_tuple(static_cast(*arg.seed_.ptr), static_cast(*(arg.offset_.ptr) + arg.offset_intragraph_)); + } else { + return std::make_tuple(arg.seed_.val, arg.offset_.val); + } +} + +} // namespace philox +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/cuda/jiterator.h b/voice_bridge/torch/include/ATen/cuda/jiterator.h new file mode 100644 index 0000000000000000000000000000000000000000..ac2c4d7cecf3f717a8a2de2a200d024e82a20e9a --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/jiterator.h @@ -0,0 +1,40 @@ +#pragma once +#include + +#if AT_USE_JITERATOR() + +#include +#include +#include + +#include +#include + +namespace at { +namespace cuda { + +TORCH_CUDA_CPP_API c10::SmallVector CompileAndLaunchKernel( + const std::string& code_string, + const std::string& kernel_name, + const int num_outputs, + const c10::SmallVector& tensors, + const c10::SmallVector& extra_args, + bool return_by_ref); + +}} // namespace at::cuda + +#else + +namespace at { namespace cuda { +TORCH_CUDA_CPP_API c10::SmallVector CompileAndLaunchKernel( + const std::string& code_string, + const std::string& kernel_name, + const int num_outputs, + const c10::SmallVector& tensors, + const c10::SmallVector& extra_args, + bool return_by_ref) { + TORCH_CHECK(false, "Jiterator is not supported"); + } +}} // namespace at::cuda + +#endif // AT_USE_JITERATOR() diff --git a/voice_bridge/torch/include/ATen/cuda/jiterator_impl.h b/voice_bridge/torch/include/ATen/cuda/jiterator_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..5ba251055ad2a8d423db99b0eae4c88a7d05f7dc --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/jiterator_impl.h @@ -0,0 +1,250 @@ +#pragma once +#include + +#if AT_USE_JITERATOR() + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace at { +namespace native { + + +#define AT_FOR_8_CASES(_) \ + _(1) \ + _(2) \ + _(3) \ + _(4) \ + _(5) \ + _(6) \ + _(7) \ + _(8) + +#define AT_FOR_8_CASES_WITH_COMMA(_) \ + _(1) , \ + _(2) , \ + _(3) , \ + _(4) , \ + _(5) , \ + _(6) , \ + _(7) , \ + _(8) + +c10::SmallVector get_extra_args_typenames(const c10::SmallVector& extra_args) { + c10::SmallVector args_typenames(extra_args.size()); + for (auto i = 0; i < extra_args.size(); ++i) { + args_typenames[i] = at::cuda::jit::typeName(extra_args[i].type()); + } + return args_typenames; +} + +int can_vectorize_up_to(at::ScalarType type, char* pointer) { + switch(type) { +#define DEFINE_CASE(ctype, scalartype) \ + case ScalarType::scalartype : return memory::can_vectorize_up_to(pointer); + + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE) +#undef DEFINE_CASE + + default: TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type); + } +} + +// jitted version of the above +// See Note [Jiterator], this relies on the assumptions enumerated there +int jitted_can_vectorize_up_to(const TensorIteratorBase& iter) { + const at::ScalarType common_dtype = iter.common_dtype(); + const at::ScalarType result_dtype = common_dtype; + + // Deals with output + int result = can_vectorize_up_to(result_dtype, static_cast(iter.data_ptr(0))); + + // Incorporates input(s) + for (auto i = 1; i < iter.ntensors(); ++i) { + result = std::min(result, can_vectorize_up_to(common_dtype, static_cast(iter.data_ptr(i)))); + } + + return result; +} + +template +static std::unique_ptr> make_unique_offset_calculator( + const TensorIteratorBase& iter) { + // array size can not be 0, this happens when N == 0 + constexpr int array_size = std::max(N, 1); + TORCH_INTERNAL_ASSERT(N == (IS_INPUT ? iter.ninputs() : iter.noutputs())); + + std::array strides; + int64_t element_sizes[array_size]; + for (int i = 0; i < N; i++) { + int index = IS_INPUT ? i + iter.noutputs() : i; + strides[i] = iter.strides(index).data(); + element_sizes[i] = iter.element_size(index); + } + return std::make_unique>(iter.ndim(), iter.shape().data(), strides.data(), element_sizes); +} + +template +struct OffsetCalculatorVariant { +#define DEFINE_CASE(index) std::unique_ptr> + using OffsetCalculatorTypes = c10::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + OffsetCalculatorVariant(const TensorIteratorBase& iter) { + int num = IS_INPUT ? iter.ninputs() : iter.noutputs(); + + switch(num) { +#define DEFINE_CASE(index) \ + case index : v = make_unique_offset_calculator(iter); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + default: + TORCH_CHECK(false, "OffsetCalculatorVariant is not implemented for num_tensor = ", num); + } + } + + void* data_ptr() { + return c10::visit([](auto & v){ return static_cast(v.get()); }, v); + } + + private: + OffsetCalculatorTypes v; +}; + +struct ArrayVariant { +// works for up to 8 input + 8 outputs +#define DEFINE_CASE(index) at::detail::Array, at::detail::Array + using ArrayTypes = c10::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + ArrayVariant(const TensorIteratorBase& iter) { + int ntensors = iter.ntensors(); + switch(ntensors) { +#define DEFINE_CASE(index) \ + case index: array = at::detail::Array{}; break; \ + case index+8: array = at::detail::Array{}; break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "ArrayVariant is not implemented for ntensors = ", ntensors); + } + + c10::visit([&](auto& a) { + for (auto i = 0; i < ntensors; ++i) { + a[i] = (char*)iter.data_ptr(i); + } + }, array); + } + + void* data_ptr() { + return c10::visit([](auto & a){ return static_cast(&a); }, array); + } + +private: + ArrayTypes array; +}; + +struct TrivialOffsetCalculatorVariant { +#define DEFINE_CASE(index) TrivialOffsetCalculator + using TrivialOffsetCalculatorTypes = c10::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + TrivialOffsetCalculatorVariant(int num) { + switch(num) { +#define DEFINE_CASE(index) \ + case index: v = TrivialOffsetCalculator(); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "TrivialOffsetCalculatorVariant is not implemented for num_tensors = ", num); + } + } + + void* data_ptr() { + return c10::visit([](auto & v){ return static_cast(&v); }, v); + } + +private: + TrivialOffsetCalculatorTypes v; +}; + +struct LoadWithCastVariant { +#define DEFINE_CASE(index) std::unique_ptr> + using LoadWithCastPtr = c10::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + LoadWithCastVariant(const TensorIteratorBase& iter) { + int arity = iter.ninputs(); + switch(arity) { +#define DEFINE_CASE(index) \ + case index: v = std::make_unique>(iter); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "LoadWithCastVariant is not implemented for ninputs = ", arity); + } + } + + void* data_ptr() { + return c10::visit([](auto & v){ return static_cast(v.get()); }, v); + } + +private: + LoadWithCastPtr v; +}; + +struct StoreWithCastVariant { +#define DEFINE_CASE(index) std::unique_ptr> + using StoreWithCastPtr = c10::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + StoreWithCastVariant(const TensorIteratorBase& iter) { + int num = iter.noutputs(); + switch(num) { +#define DEFINE_CASE(index) \ + case index: v = std::make_unique>(iter); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "StoreWithCastVariant is not implemented for noutputs = ", num); + } + } + + void* data_ptr() { + return c10::visit([](auto & v){ return static_cast(v.get()); }, v); + } + +private: + StoreWithCastPtr v; +}; + +}} // namespace at::native + + +#endif // AT_USE_JITERATOR() diff --git a/voice_bridge/torch/include/ATen/cuda/llvm_jit_strings.h b/voice_bridge/torch/include/ATen/cuda/llvm_jit_strings.h new file mode 100644 index 0000000000000000000000000000000000000000..237bcdbb4ccb8ff5cb4e4acfbddc0fbda606064b --- /dev/null +++ b/voice_bridge/torch/include/ATen/cuda/llvm_jit_strings.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace at { +namespace cuda { + +TORCH_CUDA_CPP_API const std::string &get_traits_string(); +TORCH_CUDA_CPP_API const std::string &get_cmath_string(); +TORCH_CUDA_CPP_API const std::string &get_complex_body_string(); +TORCH_CUDA_CPP_API const std::string &get_complex_half_body_string(); +TORCH_CUDA_CPP_API const std::string &get_complex_math_string(); + +}} // namespace at diff --git a/voice_bridge/torch/include/ATen/cudnn/Descriptors.h b/voice_bridge/torch/include/ATen/cudnn/Descriptors.h new file mode 100644 index 0000000000000000000000000000000000000000..a7bcb5eb72eac9eab15ac5b8cfd68dc7618aba4d --- /dev/null +++ b/voice_bridge/torch/include/ATen/cudnn/Descriptors.h @@ -0,0 +1,342 @@ +#pragma once + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { + +std::string cudnnTypeToString(cudnnDataType_t dtype); + +// TODO: Add constructors for all of the descriptors + +inline int dataSize(cudnnDataType_t dataType) +{ + switch (dataType) { +#if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8200 + case CUDNN_DATA_BFLOAT16: +#endif + case CUDNN_DATA_HALF: return 2; + case CUDNN_DATA_FLOAT: return 4; + default: return 8; + } +} + +// The stride for a size-1 dimensions is not uniquely determined; in +// fact, it can be anything you want, because the fact that the +// tensor is size 1 at this dimension means that you will never actually +// try advancing your pointer by this stride. +// +// However, CuDNN has a much more stringent requirement on strides: +// if you are passing a contiguous input, it better be the case +// that the stride for dim i is the product of the sizes of dims +// i+1 to the end. This stride is indeed uniquely determined. This +// function modifies 'stride' in place so this invariant holds. +static inline void fixSizeOneDimStride(int dim, const int *size, int *stride, bool nhwc) { + int64_t z = 1; + int index = 0; + std::vector permutation(dim); + + if (nhwc) { + permutation[index++] = 1; + } + for (int d = dim-1; d > 1; d--) { + permutation[index++] = d; + } + if (!nhwc) { + permutation[index++] = 1; + } + permutation[index++] = 0; + for (int d : permutation) { + if (size[d] == 1) { + stride[d] = z; + } else { + z *= size[d]; + } + } +} + +template +struct DescriptorDeleter { + void operator()(T* x) { + if (x != nullptr) { + AT_CUDNN_CHECK(dtor(x)); + } + } +}; + +// A generic class for wrapping cuDNN descriptor types. All you need +// is to give the underlying type the Descriptor_t points to (usually, +// if it's cudnnTensorDescriptor_t it points to cudnnTensorStruct), +// the constructor and the destructor. Subclasses are responsible +// for defining a set() function to actually set the descriptor. +// +// Descriptors default construct to a nullptr, and have a descriptor +// initialized the first time you call set() or any other initializing +// function. +template +class TORCH_CUDA_CPP_API Descriptor { + public: + // TODO: Figure out why const-correctness doesn't work here + + // Use desc() to access the underlying descriptor pointer in + // a read-only fashion. Most client code should use this. + // If the descriptor was never initialized, this will return + // nullptr. + T* desc() const { return desc_.get(); } + T* desc() { return desc_.get(); } + + // Use mut_desc() to access the underlying descriptor pointer + // if you intend to modify what it points to (e.g., using + // cudnnSetFooDescriptor). This will ensure that the descriptor + // is initialized. Code in this file will use this function. + T* mut_desc() { init(); return desc_.get(); } +protected: + void init() { + if (desc_ == nullptr) { + T* raw_desc; + AT_CUDNN_CHECK(ctor(&raw_desc)); + desc_.reset(raw_desc); + } + } +private: + std::unique_ptr> desc_; +}; + +class TORCH_CUDA_CPP_API TensorDescriptor : public Descriptor< + cudnnTensorStruct, + &cudnnCreateTensorDescriptor, + &cudnnDestroyTensorDescriptor> { + public: + TensorDescriptor() {} + explicit TensorDescriptor(const at::Tensor &t, size_t pad = 0) { + set(t, pad); + } + + // Note [CuDNN broadcast padding] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // pad specifies the minimum dimensionality of the tensor descriptor + // we produce (it doesn't have anything to do with, e.g., convolution + // padding). If 't' is lower-dimensional than 'pad', the remaining + // dimensions (on the right) are padded with ones. This doesn't + // affect the underlying data layout. This is particularly useful for + // dealing with a pecularity of the CuDNN API, which is that broadcasting in CuDNN is + // done in two steps: first, the client code is expected to pad out + // (the dimensions) input tensors to be the same dimension as the + // target broadcast, and then second, CuDNN takes of actually + // broadcasting size 1 dimensions. + + void set(const at::Tensor &t, size_t pad = 0); + void set(const at::Tensor &t, at::MemoryFormat memory_format, size_t pad = 0); + void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0); + + void print(); + +private: + void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad, bool nhwc); + + void set(cudnnDataType_t dataType, int dim, int* size, int* stride, bool nhwc) { + fixSizeOneDimStride(dim, size, stride, nhwc); + AT_CUDNN_CHECK(cudnnSetTensorNdDescriptor(mut_desc(), dataType, dim, size, stride)); + } +}; + +std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d); + +class TORCH_CUDA_CPP_API FilterDescriptor : public Descriptor< + cudnnFilterStruct, + &cudnnCreateFilterDescriptor, + &cudnnDestroyFilterDescriptor> { + public: + void set(const at::Tensor &t, int64_t pad = 0) { + set(t, at::MemoryFormat::Contiguous, pad); + } + + void set(const at::Tensor &t, const at::MemoryFormat memory_format, int64_t pad = 0); + + void print(); +private: + void set(cudnnDataType_t dataType, int dim, int* size, cudnnTensorFormat_t filter_format) { + AT_CUDNN_CHECK(cudnnSetFilterNdDescriptor(mut_desc(), dataType, filter_format, dim, size)); + } +}; + +std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d); + +struct TORCH_CUDA_CPP_API ConvolutionDescriptor + : public Descriptor< + cudnnConvolutionStruct, + &cudnnCreateConvolutionDescriptor, + &cudnnDestroyConvolutionDescriptor> { + void set(cudnnDataType_t dataType, int dim, int* pad, int* stride, int * upscale /* aka dilation */, int groups, bool allow_tf32) { + cudnnDataType_t mathType = dataType; + if (dataType == CUDNN_DATA_HALF) mathType = CUDNN_DATA_FLOAT; + AT_CUDNN_CHECK(cudnnSetConvolutionNdDescriptor(mut_desc(), dim, pad, stride, upscale, + CUDNN_CROSS_CORRELATION, mathType)); + AT_CUDNN_CHECK(cudnnSetConvolutionGroupCount(mut_desc(), groups)); + // See Note [behavior of cudnnFind and cudnnGet] + AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_DEFAULT_MATH)); + if(dataType == CUDNN_DATA_HALF) { + AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_TENSOR_OP_MATH)); + } else if (dataType == CUDNN_DATA_FLOAT && !allow_tf32) { +#if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8000 + AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_FMA_MATH)); +#endif + } + } +}; + +struct TORCH_CUDA_CPP_API SpatialTransformerDescriptor + : public Descriptor< + cudnnSpatialTransformerStruct, + &cudnnCreateSpatialTransformerDescriptor, + &cudnnDestroySpatialTransformerDescriptor> { + void set(cudnnDataType_t dataType, int dim, int* size) { + AT_CUDNN_CHECK(cudnnSetSpatialTransformerNdDescriptor(mut_desc(), CUDNN_SAMPLER_BILINEAR, dataType, dim, size)); + } +}; + +struct TORCH_CUDA_CPP_API DropoutDescriptor + : public Descriptor< + cudnnDropoutStruct, + &cudnnCreateDropoutDescriptor, + &cudnnDestroyDropoutDescriptor> { + at::Tensor state; + + // Initialize a dropout descriptor's RNG state. + // WARNING: This function is very expensive, avoid calling this function! + void initialize_rng(cudnnHandle_t handle, float dropout, long long int seed, const TensorOptions& options) { + TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout"); + size_t state_size; + AT_CUDNN_CHECK(cudnnDropoutGetStatesSize(handle, &state_size)); + AT_ASSERT(options.device().type() == kCUDA); + AT_ASSERT(options.dtype() == kByte); + state = at::empty({static_cast(state_size)}, options); + AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, dropout, state.data_ptr(), state_size, seed)); + } + + // Restore a dropout descriptor given a dropout probability and existing RNG state. + void set(cudnnHandle_t handle, float dropout, at::Tensor state_) { + TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout"); + state = state_; + void *state_ptr = state.data_ptr(); + size_t state_size = state.size(0); + // NB: The seed doesn't actually matter, so we give a dummy value + AT_CUDNN_CHECK(cudnnRestoreDropoutDescriptor(mut_desc(), handle, dropout, state_ptr, state_size, 0 /* seed */)); + } + + // Restore a dropout descriptor corresponding to no dropout + void set_no_dropout(cudnnHandle_t handle) { + // NB: seed doesn't matter when dropout = 0, because no random number + // initialization actually takes place when there is no dropout. + // NB: Empirically, cudnnSetDropoutDescriptor is cheap when + // dropoot == 0 + AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, 0 /* dropout */, nullptr, 0 /* state_size */, 0 /* seed */)); + } +}; + +struct TORCH_CUDA_CPP_API RNNDescriptor : public Descriptor< + cudnnRNNStruct, + &cudnnCreateRNNDescriptor, + &cudnnDestroyRNNDescriptor> { + DropoutDescriptor dropout_desc_; + void set(cudnnHandle_t handle, int hidden_size, int proj_size, int num_layers, DropoutDescriptor&& dropout_desc, + cudnnRNNInputMode_t input_mode, cudnnDirectionMode_t bidirectional, + cudnnRNNMode_t mode, cudnnDataType_t datatype, cudnnDataType_t input_type, cudnnRNNAlgo_t algo, bool allow_tf32) { + dropout_desc_ = std::move(dropout_desc); + + AT_CUDNN_CHECK(cudnnSetRNNDescriptor_v6( + handle, + mut_desc(), + hidden_size, + num_layers, + dropout_desc_.desc(), + input_mode, + bidirectional, + mode, + algo, + datatype)); + if (proj_size != 0) { + AT_CUDNN_CHECK(cudnnSetRNNProjectionLayers( + handle, + /*rnnDesc=*/mut_desc(), + /*recProjSize=*/proj_size, + /*outProjSize=*/0)); + } + cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties(); + if (prop->major >= 7) { + if (input_type == CUDNN_DATA_HALF) { + cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_TENSOR_OP_MATH); + } +#if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8000 + else if (input_type == CUDNN_DATA_FLOAT && !allow_tf32) { + cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_FMA_MATH); + } +#endif + else { + // Technically, as the default it's not necessary to explicitly + // set this. + cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_DEFAULT_MATH); + } + } + } +}; + +struct TORCH_CUDA_CPP_API CTCLossDescriptor + : public Descriptor< + cudnnCTCLossStruct, + &cudnnCreateCTCLossDescriptor, + &cudnnDestroyCTCLossDescriptor> { + void set(cudnnDataType_t datatype) { + AT_CUDNN_CHECK(cudnnSetCTCLossDescriptor(mut_desc(), datatype)); + } +#if CUDNN_VERSION >= 7600 + void setEx( + cudnnDataType_t datatype, + cudnnLossNormalizationMode_t normMode, + cudnnNanPropagation_t gradMode) { + AT_CUDNN_CHECK( + cudnnSetCTCLossDescriptorEx(mut_desc(), datatype, normMode, gradMode)); + } +#endif +}; + +struct TORCH_CUDA_CPP_API ActivationDescriptor + : public Descriptor< + cudnnActivationStruct, + &cudnnCreateActivationDescriptor, + &cudnnDestroyActivationDescriptor> { + void set(cudnnActivationMode_t mode) { + AT_ASSERT( + mode == CUDNN_ACTIVATION_RELU, + "TODO: support more cuDNN activation modes"); + AT_CUDNN_CHECK(cudnnSetActivationDescriptor( + mut_desc(), + mode, + cudnnNanPropagation_t::CUDNN_NOT_PROPAGATE_NAN, + std::numeric_limits::max())); + } +}; + +union Constant +{ + float f; + double d; + Constant(cudnnDataType_t dataType, double value) { + if (dataType == CUDNN_DATA_HALF || dataType == CUDNN_DATA_FLOAT) { + f = static_cast(value); + } else { + d = value; + } + } +}; + +}} // namespace diff --git a/voice_bridge/torch/include/ATen/cudnn/Exceptions.h b/voice_bridge/torch/include/ATen/cudnn/Exceptions.h new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/voice_bridge/torch/include/ATen/cudnn/Handle.h b/voice_bridge/torch/include/ATen/cudnn/Handle.h new file mode 100644 index 0000000000000000000000000000000000000000..f74638455817a894c774d9064cb981fc1c707dd5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cudnn/Handle.h @@ -0,0 +1,9 @@ +#pragma once + +#include +#include + +namespace at { namespace native { + +TORCH_CUDA_CPP_API cudnnHandle_t getCudnnHandle(); +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/cudnn/Handles.h b/voice_bridge/torch/include/ATen/cudnn/Handles.h new file mode 100644 index 0000000000000000000000000000000000000000..5b9a081f0c11b57b093891e0dd2adbd969a79f96 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cudnn/Handles.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/voice_bridge/torch/include/ATen/cudnn/Types.h b/voice_bridge/torch/include/ATen/cudnn/Types.h new file mode 100644 index 0000000000000000000000000000000000000000..fb3d97f4c20527a88bfc96548b00ce154973cc84 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cudnn/Types.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include + +namespace at { namespace native { + +TORCH_CUDA_CPP_API cudnnDataType_t +getCudnnDataTypeFromScalarType(const at::ScalarType dtype); +cudnnDataType_t getCudnnDataType(const at::Tensor& tensor); + +int64_t cudnn_version(); + +}} // namespace at::cudnn diff --git a/voice_bridge/torch/include/ATen/cudnn/Utils.h b/voice_bridge/torch/include/ATen/cudnn/Utils.h new file mode 100644 index 0000000000000000000000000000000000000000..9552953e88ee0511d540e4dc70437d5a822a6ef7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/cudnn/Utils.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { namespace native { + +// cuDNN has a buggy check for tensor being contiguous (that is, it does +// not ignore stride for dimension that is equal to 0). This function +// makes tensors which have zero stride contiguous, by setting the +// strides to 1 as cuDNN likes. +inline Tensor contiguousIfZeroInStrides(const Tensor& t) { + for (auto s : t.strides()) { + if (s == 0) return t.contiguous(); + } + return t; +} + +}} diff --git a/voice_bridge/torch/include/ATen/cudnn/cudnn-wrapper.h b/voice_bridge/torch/include/ATen/cudnn/cudnn-wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..0358b15c117f7d5f08d3fbbe55aa45c7e02801cc --- /dev/null +++ b/voice_bridge/torch/include/ATen/cudnn/cudnn-wrapper.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +#define STRINGIFY(x) #x +#define STRING(x) STRINGIFY(x) + +#if CUDNN_MAJOR < 6 +#pragma message ("CuDNN v" STRING(CUDNN_MAJOR) " found, but need at least CuDNN v6. You can get the latest version of CuDNN from https://developer.nvidia.com/cudnn or disable CuDNN with USE_CUDNN=0") +#pragma message "We strongly encourage you to move to 6.0 and above." +#pragma message "This message is intended to annoy you enough to update." +#endif + +#undef STRINGIFY +#undef STRING diff --git a/voice_bridge/torch/include/ATen/detail/CUDAHooksInterface.h b/voice_bridge/torch/include/ATen/detail/CUDAHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..7ba8f68d94b208122f10fc713b95bbc5cbd06be0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/detail/CUDAHooksInterface.h @@ -0,0 +1,207 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include + +// Forward-declares at::cuda::NVRTC +namespace at { namespace cuda { +struct NVRTC; +}} // at::cuda + +namespace at { +class Context; +} + +// NB: Class must live in `at` due to limitations of Registry.h. +namespace at { + +#ifdef _MSC_VER +constexpr const char* CUDA_HELP = + "PyTorch splits its backend into two shared libraries: a CPU library " + "and a CUDA library; this error has occurred because you are trying " + "to use some CUDA functionality, but the CUDA library has not been " + "loaded by the dynamic linker for some reason. The CUDA library MUST " + "be loaded, EVEN IF you don't directly use any symbols from the CUDA library! " + "One common culprit is a lack of -INCLUDE:?warp_size@cuda@at@@YAHXZ " + "in your link arguments; many dynamic linkers will delete dynamic library " + "dependencies if you don't depend on any of their symbols. You can check " + "if this has occurred by using link on your binary to see if there is a " + "dependency on *_cuda.dll library."; +#else +constexpr const char* CUDA_HELP = + "PyTorch splits its backend into two shared libraries: a CPU library " + "and a CUDA library; this error has occurred because you are trying " + "to use some CUDA functionality, but the CUDA library has not been " + "loaded by the dynamic linker for some reason. The CUDA library MUST " + "be loaded, EVEN IF you don't directly use any symbols from the CUDA library! " + "One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many " + "dynamic linkers will delete dynamic library dependencies if you don't " + "depend on any of their symbols. You can check if this has occurred by " + "using ldd on your binary to see if there is a dependency on *_cuda.so " + "library."; +#endif + +// The CUDAHooksInterface is an omnibus interface for any CUDA functionality +// which we may want to call into from CPU code (and thus must be dynamically +// dispatched, to allow for separate compilation of CUDA code). How do I +// decide if a function should live in this class? There are two tests: +// +// 1. Does the *implementation* of this function require linking against +// CUDA libraries? +// +// 2. Is this function *called* from non-CUDA ATen code? +// +// (2) should filter out many ostensible use-cases, since many times a CUDA +// function provided by ATen is only really ever used by actual CUDA code. +// +// TODO: Consider putting the stub definitions in another class, so that one +// never forgets to implement each virtual function in the real implementation +// in CUDAHooks. This probably doesn't buy us much though. +struct TORCH_API CUDAHooksInterface { + // This should never actually be implemented, but it is used to + // squelch -Werror=non-virtual-dtor + virtual ~CUDAHooksInterface() {} + + // Initialize THCState and, transitively, the CUDA state + virtual void initCUDA() const { + TORCH_CHECK(false, "Cannot initialize CUDA without ATen_cuda library. ", CUDA_HELP); + } + + virtual const Generator& getDefaultCUDAGenerator(DeviceIndex device_index = -1) const { + (void)device_index; // Suppress unused variable warning + TORCH_CHECK(false, "Cannot get default CUDA generator without ATen_cuda library. ", CUDA_HELP); + } + + virtual Device getDeviceFromPtr(void* /*data*/) const { + TORCH_CHECK(false, "Cannot get device of pointer on CUDA without ATen_cuda library. ", CUDA_HELP); + } + + virtual bool isPinnedPtr(void* /*data*/) const { + return false; + } + + virtual bool hasCUDA() const { + return false; + } + + virtual bool hasCUDART() const { + return false; + } + + virtual bool hasMAGMA() const { + return false; + } + + virtual bool hasCuDNN() const { + return false; + } + + virtual bool hasCuSOLVER() const { + return false; + } + + virtual bool hasROCM() const { + return false; + } + + virtual const at::cuda::NVRTC& nvrtc() const { + TORCH_CHECK(false, "NVRTC requires CUDA. ", CUDA_HELP); + } + + virtual bool hasPrimaryContext(int64_t device_index) const { + TORCH_CHECK(false, "Cannot call hasPrimaryContext(", device_index, ") without ATen_cuda library. ", CUDA_HELP); + } + + virtual int64_t current_device() const { + return -1; + } + + virtual Allocator* getPinnedMemoryAllocator() const { + TORCH_CHECK(false, "Pinned memory requires CUDA. ", CUDA_HELP); + } + + virtual Allocator* getCUDADeviceAllocator() const { + TORCH_CHECK(false, "CUDADeviceAllocator requires CUDA. ", CUDA_HELP); + } + + virtual bool compiledWithCuDNN() const { + return false; + } + + virtual bool compiledWithMIOpen() const { + return false; + } + + virtual bool supportsDilatedConvolutionWithCuDNN() const { + return false; + } + + virtual bool supportsDepthwiseConvolutionWithCuDNN() const { + return false; + } + + virtual bool supportsBFloat16ConvolutionWithCuDNNv8() const { + return false; + } + + virtual long versionCuDNN() const { + TORCH_CHECK(false, "Cannot query cuDNN version without ATen_cuda library. ", CUDA_HELP); + } + + virtual long versionCUDART() const { + TORCH_CHECK(false, "Cannot query CUDART version without ATen_cuda library. ", CUDA_HELP); + } + + virtual std::string showConfig() const { + TORCH_CHECK(false, "Cannot query detailed CUDA version without ATen_cuda library. ", CUDA_HELP); + } + + virtual double batchnormMinEpsilonCuDNN() const { + TORCH_CHECK(false, + "Cannot query batchnormMinEpsilonCuDNN() without ATen_cuda library. ", CUDA_HELP); + } + + virtual int64_t cuFFTGetPlanCacheMaxSize(int64_t /*device_index*/) const { + TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP); + } + + virtual void cuFFTSetPlanCacheMaxSize(int64_t /*device_index*/, int64_t /*max_size*/) const { + TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP); + } + + virtual int64_t cuFFTGetPlanCacheSize(int64_t /*device_index*/) const { + TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP); + } + + virtual void cuFFTClearPlanCache(int64_t /*device_index*/) const { + TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP); + } + + virtual int getNumGPUs() const { + return 0; + } + + virtual void deviceSynchronize(int64_t /*device_index*/) const { + TORCH_CHECK(false, "Cannot synchronize CUDA device without ATen_cuda library. ", CUDA_HELP); + } +}; + +// NB: dummy argument to suppress "ISO C++11 requires at least one argument +// for the "..." in a variadic macro" +struct TORCH_API CUDAHooksArgs {}; + +C10_DECLARE_REGISTRY(CUDAHooksRegistry, CUDAHooksInterface, CUDAHooksArgs); +#define REGISTER_CUDA_HOOKS(clsname) \ + C10_REGISTER_CLASS(CUDAHooksRegistry, clsname, clsname) + +namespace detail { +TORCH_API const CUDAHooksInterface& getCUDAHooks(); +} // namespace detail +} // namespace at diff --git a/voice_bridge/torch/include/ATen/detail/FunctionTraits.h b/voice_bridge/torch/include/ATen/detail/FunctionTraits.h new file mode 100644 index 0000000000000000000000000000000000000000..aab7300b585feaf5de07c2b56accd625d168be7b --- /dev/null +++ b/voice_bridge/torch/include/ATen/detail/FunctionTraits.h @@ -0,0 +1,78 @@ +#pragma once + +#include + +// Modified from https://stackoverflow.com/questions/7943525/is-it-possible-to-figure-out-the-parameter-type-and-return-type-of-a-lambda + +// Fallback, anything with an operator() +template +struct function_traits : public function_traits { +}; + +// Pointers to class members that are themselves functors. +// For example, in the following code: +// template +// struct S { +// func_t f; +// }; +// template +// S make_s(func_t f) { +// return S { .f = f }; +// } +// +// auto s = make_s([] (int, float) -> double { /* ... */ }); +// +// function_traits traits; +template +struct function_traits : public function_traits { +}; + +// Const class member functions +template +struct function_traits : public function_traits { +}; + +// Reference types +template +struct function_traits : public function_traits {}; +template +struct function_traits : public function_traits {}; + +// Free functions +template +struct function_traits { + // arity is the number of arguments. + enum { arity = sizeof...(Args) }; + + typedef std::tuple ArgsTuple; + typedef ReturnType result_type; + + template + struct arg + { + typedef typename std::tuple_element>::type type; + // the i-th argument is equivalent to the i-th tuple element of a tuple + // composed of those arguments. + }; +}; + +template +struct nullary_function_traits { + using traits = function_traits; + using result_type = typename traits::result_type; +}; + +template +struct unary_function_traits { + using traits = function_traits; + using result_type = typename traits::result_type; + using arg1_t = typename traits::template arg<0>::type; +}; + +template +struct binary_function_traits { + using traits = function_traits; + using result_type = typename traits::result_type; + using arg1_t = typename traits::template arg<0>::type; + using arg2_t = typename traits::template arg<1>::type; +}; diff --git a/voice_bridge/torch/include/ATen/detail/HIPHooksInterface.h b/voice_bridge/torch/include/ATen/detail/HIPHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..ed902e6172ee709c785ba6787e60aa519b9596b2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/detail/HIPHooksInterface.h @@ -0,0 +1,71 @@ +#pragma once + +#include +#include +#include + +#include + +#include +#include +#include + +namespace at { +class Context; +} + +// NB: Class must live in `at` due to limitations of Registry.h. +namespace at { + +// The HIPHooksInterface is an omnibus interface for any HIP functionality +// which we may want to call into from CPU code (and thus must be dynamically +// dispatched, to allow for separate compilation of HIP code). See +// CUDAHooksInterface for more detailed motivation. +struct TORCH_API HIPHooksInterface { + // This should never actually be implemented, but it is used to + // squelch -Werror=non-virtual-dtor + virtual ~HIPHooksInterface() {} + + // Initialize the HIP library state + virtual void initHIP() const { + AT_ERROR("Cannot initialize HIP without ATen_hip library."); + } + + virtual std::unique_ptr initHIPGenerator(Context*) const { + AT_ERROR("Cannot initialize HIP generator without ATen_hip library."); + } + + virtual bool hasHIP() const { + return false; + } + + virtual int64_t current_device() const { + return -1; + } + + virtual Allocator* getPinnedMemoryAllocator() const { + AT_ERROR("Pinned memory requires HIP."); + } + + virtual void registerHIPTypes(Context*) const { + AT_ERROR("Cannot registerHIPTypes() without ATen_hip library."); + } + + virtual int getNumGPUs() const { + return 0; + } +}; + +// NB: dummy argument to suppress "ISO C++11 requires at least one argument +// for the "..." in a variadic macro" +struct TORCH_API HIPHooksArgs {}; + +C10_DECLARE_REGISTRY(HIPHooksRegistry, HIPHooksInterface, HIPHooksArgs); +#define REGISTER_HIP_HOOKS(clsname) \ + C10_REGISTER_CLASS(HIPHooksRegistry, clsname, clsname) + +namespace detail { +TORCH_API const HIPHooksInterface& getHIPHooks(); + +} // namespace detail +} // namespace at diff --git a/voice_bridge/torch/include/ATen/detail/ORTHooksInterface.h b/voice_bridge/torch/include/ATen/detail/ORTHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..caee55cdfaf9935c01a756609ffbdd8ade7f040d --- /dev/null +++ b/voice_bridge/torch/include/ATen/detail/ORTHooksInterface.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include + +constexpr const char* ORT_HELP = + " You need to 'import torch_ort' to use the 'ort' device in PyTorch. " + "The 'torch_ort' module is provided by the ONNX Runtime itself " + "(https://onnxruntime.ai)."; + +// NB: Class must live in `at` due to limitations of Registry.h. +namespace at { + +struct TORCH_API ORTHooksInterface { + // This should never actually be implemented, but it is used to + // squelch -Werror=non-virtual-dtor + virtual ~ORTHooksInterface() {} + + virtual std::string showConfig() const { + TORCH_CHECK(false, "Cannot query detailed ORT version information.", ORT_HELP); + } +}; + +// NB: dummy argument to suppress "ISO C++11 requires at least one argument +// for the "..." in a variadic macro" +struct TORCH_API ORTHooksArgs {}; + +C10_DECLARE_REGISTRY(ORTHooksRegistry, ORTHooksInterface, ORTHooksArgs); +#define REGISTER_ORT_HOOKS(clsname) \ + C10_REGISTER_CLASS(ORTHooksRegistry, clsname, clsname) + +namespace detail { +TORCH_API const ORTHooksInterface& getORTHooks(); +} // namespace detail + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/div_rtn.h b/voice_bridge/torch/include/ATen/div_rtn.h new file mode 100644 index 0000000000000000000000000000000000000000..4935f49ae2726389441e4012cc15bcf3981f2e84 --- /dev/null +++ b/voice_bridge/torch/include/ATen/div_rtn.h @@ -0,0 +1,11 @@ +#pragma once + +// Integer division rounding to -Infinity +template +static inline T div_rtn(T x, T y) { + int q = x / y; + int r = x % y; + if ((r != 0) && ((r < 0) != (y < 0))) + --q; + return q; +} diff --git a/voice_bridge/torch/include/ATen/dlpack.h b/voice_bridge/torch/include/ATen/dlpack.h new file mode 100644 index 0000000000000000000000000000000000000000..0ad1e119bcb0407a1f01afee51e7a1afa904e58e --- /dev/null +++ b/voice_bridge/torch/include/ATen/dlpack.h @@ -0,0 +1,200 @@ +/*! + * Copyright (c) 2017 by Contributors + * \file dlpack.h + * \brief The common header of DLPack. + */ +#ifndef DLPACK_DLPACK_H_ +#define DLPACK_DLPACK_H_ + +#ifdef __cplusplus +#define DLPACK_EXTERN_C extern "C" +#else +#define DLPACK_EXTERN_C +#endif + +/*! \brief The current version of dlpack */ +#define DLPACK_VERSION 60 + +/*! \brief DLPACK_DLL prefix for windows */ +#ifdef _WIN32 +#ifdef DLPACK_EXPORTS +#define DLPACK_DLL __declspec(dllexport) +#else +#define DLPACK_DLL __declspec(dllimport) +#endif +#else +#define DLPACK_DLL +#endif + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif +/*! + * \brief The device type in DLDevice. + */ +typedef enum { + /*! \brief CPU device */ + kDLCPU = 1, + /*! \brief CUDA GPU device */ + kDLCUDA = 2, + /*! + * \brief Pinned CUDA CPU memory by cudaMallocHost + */ + kDLCUDAHost = 3, + /*! \brief OpenCL devices. */ + kDLOpenCL = 4, + /*! \brief Vulkan buffer for next generation graphics. */ + kDLVulkan = 7, + /*! \brief Metal for Apple GPU. */ + kDLMetal = 8, + /*! \brief Verilog simulator buffer */ + kDLVPI = 9, + /*! \brief ROCm GPUs for AMD GPUs */ + kDLROCM = 10, + /*! + * \brief Pinned ROCm CPU memory allocated by hipMallocHost + */ + kDLROCMHost = 11, + /*! + * \brief Reserved extension device type, + * used for quickly test extension device + * The semantics can differ depending on the implementation. + */ + kDLExtDev = 12, + /*! + * \brief CUDA managed/unified memory allocated by cudaMallocManaged + */ + kDLCUDAManaged = 13, +} DLDeviceType; + +/*! + * \brief A Device for Tensor and operator. + */ +typedef struct { + /*! \brief The device type used in the device. */ + DLDeviceType device_type; + /*! + * \brief The device index. + * For vanilla CPU memory, pinned memory, or managed memory, this is set to 0. + */ + int device_id; +} DLDevice; + +/*! + * \brief The type code options DLDataType. + */ +typedef enum { + /*! \brief signed integer */ + kDLInt = 0U, + /*! \brief unsigned integer */ + kDLUInt = 1U, + /*! \brief IEEE floating point */ + kDLFloat = 2U, + /*! + * \brief Opaque handle type, reserved for testing purposes. + * Frameworks need to agree on the handle data type for the exchange to be + * well-defined. + */ + kDLOpaqueHandle = 3U, + /*! \brief bfloat16 */ + kDLBfloat = 4U, + /*! + * \brief complex number + * (C/C++/Python layout: compact struct per complex number) + */ + kDLComplex = 5U, +} DLDataTypeCode; + +/*! + * \brief The data type the tensor can hold. + * + * Examples + * - float: type_code = 2, bits = 32, lanes=1 + * - float4(vectorized 4 float): type_code = 2, bits = 32, lanes=4 + * - int8: type_code = 0, bits = 8, lanes=1 + * - std::complex: type_code = 5, bits = 64, lanes = 1 + */ +typedef struct { + /*! + * \brief Type code of base types. + * We keep it uint8_t instead of DLDataTypeCode for minimal memory + * footprint, but the value should be one of DLDataTypeCode enum values. + * */ + uint8_t code; + /*! + * \brief Number of bits, common choices are 8, 16, 32. + */ + uint8_t bits; + /*! \brief Number of lanes in the type, used for vector types. */ + uint16_t lanes; +} DLDataType; + +/*! + * \brief Plain C Tensor object, does not manage memory. + */ +typedef struct { + /*! + * \brief The opaque data pointer points to the allocated data. This will be + * CUDA device pointer or cl_mem handle in OpenCL. This pointer is always + * aligned to 256 bytes as in CUDA. + * + * For given DLTensor, the size of memory required to store the contents of + * data is calculated as follows: + * + * \code{.c} + * static inline size_t GetDataSize(const DLTensor* t) { + * size_t size = 1; + * for (tvm_index_t i = 0; i < t->ndim; ++i) { + * size *= t->shape[i]; + * } + * size *= (t->dtype.bits * t->dtype.lanes + 7) / 8; + * return size; + * } + * \endcode + */ + void* data; + /*! \brief The device of the tensor */ + DLDevice device; + /*! \brief Number of dimensions */ + int ndim; + /*! \brief The data type of the pointer*/ + DLDataType dtype; + /*! \brief The shape of the tensor */ + int64_t* shape; + /*! + * \brief strides of the tensor (in number of elements, not bytes) + * can be NULL, indicating tensor is compact and row-majored. + */ + int64_t* strides; + /*! \brief The offset in bytes to the beginning pointer to data */ + uint64_t byte_offset; +} DLTensor; + +/*! + * \brief C Tensor object, manage memory of DLTensor. This data structure is + * intended to facilitate the borrowing of DLTensor by another framework. It is + * not meant to transfer the tensor. When the borrowing framework doesn't need + * the tensor, it should call the deleter to notify the host that the resource + * is no longer needed. + */ +typedef struct DLManagedTensor { + /*! \brief DLTensor which is being memory managed */ + DLTensor dl_tensor; + /*! \brief the context of the original host framework of DLManagedTensor in + * which DLManagedTensor is used in the framework. It can also be NULL. + */ + void* manager_ctx; + /*! \brief Destructor signature void (*)(void*) - this should be called + * to destruct manager_ctx which holds the DLManagedTensor. It can be NULL + * if there is no way for the caller to provide a reasonable destructor. + * The destructors deletes the argument self as well. + */ + void (*deleter)(struct DLManagedTensor* self); +} DLManagedTensor; +#ifdef __cplusplus +} // DLPACK_EXTERN_C +#endif +#endif // DLPACK_DLPACK_H_ diff --git a/voice_bridge/torch/include/ATen/functorch/ADInterpreters.h b/voice_bridge/torch/include/ATen/functorch/ADInterpreters.h new file mode 100644 index 0000000000000000000000000000000000000000..b8ad638c5aee4b27b3db77c5160be5b9ee67edcc --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/ADInterpreters.h @@ -0,0 +1,36 @@ +#pragma once +#include + +namespace at { namespace functorch { + +// These are the interpreters for our AD transforms +// (grad, vjp and jvp). +// See NOTE: [functorch interpreter stack] for more details. + +struct GradInterpreterPtr { + explicit GradInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Grad); } + TransformType key() const { return base_->key(); } + int64_t level() const { return base_->level(); } + void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack); + void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case); + bool prevGradMode() const { + return c10::get(base_->meta()).prevGradMode_; + } + private: + const Interpreter* base_; +}; + +struct JvpInterpreterPtr { + explicit JvpInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Jvp); } + TransformType key() const { return base_->key(); } + int64_t level() const { return base_->level(); } + void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack); + void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case); + bool prevFwdGradMode() const { + return c10::get(base_->meta()).prevFwdGradMode_; + } + private: + const Interpreter* base_; +}; + +}} // namespace at::functorch diff --git a/voice_bridge/torch/include/ATen/functorch/BatchRulesHelper.h b/voice_bridge/torch/include/ATen/functorch/BatchRulesHelper.h new file mode 100644 index 0000000000000000000000000000000000000000..219c01c89c56e4d90bca2e78d7ad7c25634a5ad1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/BatchRulesHelper.h @@ -0,0 +1,471 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +// This file contains helper functions for batching rules. + +namespace at { namespace functorch { + +TORCH_API Tensor reshape_dim_into(int64_t src, int64_t dst, const Tensor& x); +TORCH_API Tensor reshape_dim_outof(int64_t src, int64_t size1, const Tensor& x); + +TORCH_API Tensor reshape_dim_outof_symint(int64_t src, c10::SymInt size1, const Tensor& x); + +Tensor moveBatchDimToFront(const Tensor& tensor, optional maybe_batch_dim); +int64_t rankWithoutBatchDim(const Tensor& tensor, optional maybe_batch_dim); +int64_t numelWithoutBatchDim(const Tensor& tensor, optional maybe_batch_dim); +optional valIfNonempty(optional maybe_empty, int64_t new_val); +int64_t getPhysicalDim(const Tensor& tensor, bool has_batch_dim, int64_t logical_dim); +VmapDimVector getPhysicalDims(const Tensor& tensor, bool has_batch_dim, IntArrayRef logical_dims); + +void vmapIncompatibleInplaceError(const char* schema_name); + +Tensor maybePadToLogicalRank(const Tensor& tensor, optional has_bdim, int64_t logical_rank); + +void check_randomness(RandomnessType randomness); +void check_randomness(RandomnessType randomness, bool any_tensor_bdim); + +inline Tensor ensure_has_bdim(const Tensor& tensor, bool has_bdim, int64_t batch_size) { + if (has_bdim) { + return tensor; + } + const auto sizes = tensor.sizes(); + DimVector expanded_shape; + expanded_shape.reserve(sizes.size()); + expanded_shape.emplace_back(batch_size); + expanded_shape.insert(expanded_shape.end(), sizes.begin(), sizes.end()); + return tensor.expand(expanded_shape); +} + +#define VMAP_SUPPORT(op, batch_rule) \ + m.impl(#op, op ## _generated_plumbing); + +#define VMAP_SUPPORT2(op, overload, batch_rule) \ + m.impl(#op "." #overload, op ## _ ## overload ## _generated_plumbing); + +#define OP_DECOMPOSE(op) m.impl(#op, static_cast(native::op)); +#define OP_DECOMPOSE2(op, overload) m.impl(#op"."#overload, static_cast(native::op)); + +// DO NOT USE ME DIRECTLY! Use BASIC_UNARY_BATCH_RULE to save yourself some pain +template +struct BasicUnaryBatchRuleHelper; + +template +struct BasicUnaryBatchRuleHelper> { + static std::tuple> apply( + const Tensor& tensor, + optional batch_dim, + T... extra_args) { + return std::make_tuple(Func(tensor, std::forward(extra_args)...), batch_dim); + } +}; + +// USAGE: BASIC_UNARY_BATCH_RULE(at::sin) +// INCORRECT USAGE: BASIC_UNARY_BATCH_RULE(&at::sin) +// It is important that this macro is not passed a function pointer!! +#define BASIC_UNARY_BATCH_RULE(fn) SINGLE_ARG(\ + BasicUnaryBatchRuleHelper<\ + decltype(&fn),\ + &fn,\ + c10::guts::function_traits::parameter_types>::apply) + +#define UNARY_POINTWISE(op) \ + VMAP_SUPPORT(op, BASIC_UNARY_BATCH_RULE(ATEN_FN(op))); + +template +struct VariadicBdimsBatchRuleHelper; + +template +struct VariadicBdimsBatchRuleHelper> { + static std::tuple> apply( + const Tensor& tensor, + optional batch_dim, + T... extra_args) { + auto tensor_ = moveBatchDimToFront(tensor, batch_dim); + return std::make_tuple(Func(tensor_, std::forward(extra_args)...), 0); + } +}; + +// USAGE: VARIADIC_BDIMS_BATCH_RULE(at::cholesky_inverse) +// INCORRECT USAGE: VARIADIC_BDIMS_BATCH_RULE(&at::cholesky_inverse) +// It is important that this macro is not passed a function pointer!! +#define VARIADIC_BDIMS_BATCH_RULE(fn) SINGLE_ARG(\ + VariadicBdimsBatchRuleHelper<\ + decltype(&fn),\ + &fn,\ + c10::guts::function_traits::parameter_types>::apply) + +#define VARIADIC_BDIMS(op) \ + VMAP_SUPPORT(op, VARIADIC_BDIMS_BATCH_RULE(ATEN_FN(op))); + +#define VARIADIC_BDIMS2(op, overload) \ + VMAP_SUPPORT2(op, overload, VARIADIC_BDIMS_BATCH_RULE(ATEN_FN2(op, overload))); + +template +void boxed_tensor_inputs_batch_rule(const c10::OperatorHandle& op, torch::jit::Stack* stack) { + const auto& schema = op.schema(); + const auto num_returns = schema.returns().size(); + const auto num_arguments = schema.arguments().size(); + + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + TORCH_INTERNAL_ASSERT(maybe_layer.has_value()); + int64_t cur_level = maybe_layer->layerId(); + + auto orig_arguments = torch::jit::last(*stack, num_arguments); + if (std::none_of(orig_arguments.begin(), orig_arguments.end(), ivalueParticipatesInCurrentLevel)) { + op.callBoxed(stack); + return; + } + + auto arguments = torch::jit::pop(*stack, num_arguments); + std::vector>> tensor_inputs; + std::vector tensor_pos; + for (const auto idx : c10::irange(0, num_arguments)) { + const auto& ivalue = arguments[idx]; + if (ivalue.isTensor()) { + Tensor tensor_value; + optional tensor_bdim; + std::tie(tensor_value, tensor_bdim) = unwrapTensorAtLevel(ivalue.toTensor(), cur_level); + tensor_inputs.emplace_back(tensor_value, tensor_bdim); + tensor_pos.push_back(idx); + } + } + Func(tensor_inputs); + + size_t tensor_idx = 0; + TORCH_INTERNAL_ASSERT(tensor_pos.size() > 0); + for (const auto arg_idx : c10::irange(0, num_arguments)) { + if (tensor_idx >= tensor_pos.size() || (int64_t)arg_idx != tensor_pos[tensor_idx]) { + torch::jit::push(stack, arguments[arg_idx]); + } else { + TORCH_INTERNAL_ASSERT(tensor_idx < tensor_inputs.size()); + torch::jit::push(stack, tensor_inputs[tensor_idx].first); + tensor_idx++; + } + } + + op.callBoxed(stack); + const auto returns = torch::jit::pop(*stack, num_returns); + for (const auto& ret : returns) { + if (ret.isTensor()) { + torch::jit::push(stack, makeBatched(ret.toTensor(), 0, cur_level)); + } else { + TORCH_INTERNAL_ASSERT(false, "This boxed batching rule does not currently support ops that return non-tensor values"); + } + } +} + +inline void handle_pointwise_ops(std::vector>> &tensor_inputs) { + int64_t out_logical_rank = 0; + for (auto& tensor_input : tensor_inputs) { + int64_t cur_logical_rank = rankWithoutBatchDim(tensor_input.first, tensor_input.second); + out_logical_rank = std::max(out_logical_rank, cur_logical_rank); + } + for (auto& tensor_input: tensor_inputs) { + tensor_input.first = moveBatchDimToFront(tensor_input.first, tensor_input.second); + tensor_input.first = maybePadToLogicalRank(tensor_input.first, tensor_input.second, out_logical_rank); + } +} + +#define POINTWISE_BOXED(op) \ + m.impl(#op, torch::CppFunction::makeFromBoxedFunction>()); + +#define POINTWISE_BOXED2(op, overload) \ + m.impl(#op "." #overload, torch::CppFunction::makeFromBoxedFunction>()); + +inline void handle_variadic_bdims(std::vector>> &tensor_inputs) { + for (auto & tensor_input : tensor_inputs) { + tensor_input.first = moveBatchDimToFront(tensor_input.first, tensor_input.second); + } +} + +#define VARIADIC_BDIMS_BOXED(op) \ + m.impl(#op, torch::CppFunction::makeFromBoxedFunction>()); + +using UnpackedBatchedTensor = std::tuple>; + +inline void find_and_unpack_tensors( + const torch::jit::Stack* stack, + int64_t num_args, + int64_t cur_level, + SmallVector* tensors, + SmallVector* tensors_pos, + int64_t* batch_size) { + + int64_t computed_batch_size = -1; + int64_t args_begin = stack->size() - num_args; + + for (const auto idx : c10::irange(0, num_args)) { + const auto& ivalue = (*stack)[args_begin + idx]; + if (!ivalue.isTensor()) { + continue; + } + auto unpacked = unwrapTensorAtLevel(ivalue.toTensor(), cur_level); + const auto& tensor_value = std::get<0>(unpacked); + const auto tensor_bdim = std::get<1>(unpacked); + if (tensor_bdim.has_value()) { + auto candidate_batch_size = tensor_value.size(*tensor_bdim); + if (computed_batch_size == -1) { + computed_batch_size = candidate_batch_size; + } + TORCH_INTERNAL_ASSERT(candidate_batch_size == computed_batch_size); + } + + tensors->push_back(std::move(unpacked)); + tensors_pos->push_back(idx); + } + TORCH_INTERNAL_ASSERT(computed_batch_size > -1); + *batch_size = computed_batch_size; +} + +inline void boxed_existing_bdim_all_batch_rule( + const c10::OperatorHandle& op, torch::jit::Stack* stack) { + const auto& schema = op.schema(); + const auto num_returns = schema.returns().size(); + const auto num_arguments = schema.arguments().size(); + + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + TORCH_INTERNAL_ASSERT(maybe_layer.has_value()); + int64_t cur_level = maybe_layer->layerId(); + + const auto arguments = torch::jit::last(stack, num_arguments); + if (std::none_of(arguments.begin(), arguments.end(), ivalueParticipatesInCurrentLevel)) { + op.callBoxed(stack); + return; + } + + int64_t args_begin = stack->size() - num_arguments; + SmallVector tensor_inputs; + SmallVector tensor_pos; + int64_t batch_size; + + find_and_unpack_tensors( + stack, num_arguments, cur_level, + &tensor_inputs, &tensor_pos, &batch_size); + + // for each tensor, ensure it has a bdim and reshape it. + for (const auto tensor_idx : c10::irange(0, tensor_inputs.size())) { + const auto& value = std::get<0>(tensor_inputs[tensor_idx]); + auto bdim = std::get<1>(tensor_inputs[tensor_idx]); + auto value_ = ensure_has_bdim(value, bdim.has_value(), batch_size); + if (!bdim.has_value()) { + bdim = 0; + } + (*stack)[args_begin + tensor_pos[tensor_idx]] = reshape_dim_into(*bdim, 0, value_); + } + + op.callBoxed(stack); + + for (const auto idx : c10::irange(args_begin, args_begin + num_returns)) { + const auto& ret = (*stack)[idx]; + TORCH_INTERNAL_ASSERT(ret.isTensor(), + "This boxed batching rule does not currently support ops that return non-tensor values"); + (*stack)[idx] = makeBatched(reshape_dim_outof(0, batch_size, ret.toTensor()), 0, cur_level); + } +} + +// Use when all tensors arguments accept one (normal) batch dim. +// This batching rule expands the batch dim on all Tensors, reshapes it into +// dim 0, calls the op, and then reshapes the batch dim out of dim 0. +// This is not the most efficient thing; if there are alternatives, plese try +// to use them. Use this only as a last resort. +#define EXISTING_BDIM_ALL_BOXED(op) \ + m.impl(#op, torch::CppFunction::makeFromBoxedFunction()); + +template +inline void boxed_all_tensors_have_optional_bdim( + const c10::OperatorHandle& op, torch::jit::Stack* stack) { + const auto& schema = op.schema(); + const auto num_returns = schema.returns().size(); + const auto num_arguments = schema.arguments().size(); + + c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched); + auto maybe_layer = maybeCurrentDynamicLayer(); + TORCH_INTERNAL_ASSERT(maybe_layer.has_value()); + int64_t cur_level = maybe_layer->layerId(); + + const auto arguments = torch::jit::last(stack, num_arguments); + if (std::none_of(arguments.begin(), arguments.end(), ivalueParticipatesInCurrentLevel)) { + op.callBoxed(stack); + return; + } + + int64_t args_begin = stack->size() - num_arguments; + SmallVector tensor_inputs; + SmallVector tensor_pos; + int64_t batch_size; + + find_and_unpack_tensors( + stack, num_arguments, cur_level, + &tensor_inputs, &tensor_pos, &batch_size); + + optional is_no_batch_dim_case; + + for (const auto tensor_idx : c10::irange(0, tensor_inputs.size())) { + const auto& value = std::get<0>(tensor_inputs[tensor_idx]); + auto bdim = std::get<1>(tensor_inputs[tensor_idx]); + const auto logical_rank = rankWithoutBatchDim(value, bdim); + + if (!is_no_batch_dim_case.has_value()) { + is_no_batch_dim_case = (logical_rank == feature_rank); + } + auto value_ = ensure_has_bdim(value, bdim.has_value(), batch_size); + if (!bdim.has_value()) { + bdim = 0; + } + if (*is_no_batch_dim_case) { + TORCH_INTERNAL_ASSERT(logical_rank == feature_rank); + value_ = moveBatchDimToFront(value_, bdim); + if (tensor_idx == contig_tensor_index) { + value_ = value_.contiguous(); + } + (*stack)[args_begin + tensor_pos[tensor_idx]] = value_; + continue; + } + TORCH_INTERNAL_ASSERT(logical_rank == feature_rank + 1); + value_ = reshape_dim_into(*bdim, 0, value_); + if (tensor_idx == contig_tensor_index) { + value_ = value_.contiguous(); + } + (*stack)[args_begin + tensor_pos[tensor_idx]] = value_; + } + + op.callBoxed(stack); + + for (const auto idx : c10::irange(args_begin, args_begin + num_returns)) { + const auto& ret = (*stack)[idx]; + TORCH_INTERNAL_ASSERT(ret.isTensor(), + "This boxed batching rule does not currently support ops that return non-tensor values"); + if (*is_no_batch_dim_case) { + (*stack)[idx] = makeBatched(ret.toTensor(), 0, cur_level); + } else { + (*stack)[idx] = makeBatched(reshape_dim_outof(0, batch_size, ret.toTensor()), 0, cur_level); + } + } +} + +// Useful for many NN operators. +// The operator must satisfy the following: +// - All arguments must accept an optional batch dim. +// - All arguments must be the same rank +#define ALL_TENSORS_HAVE_OPTIONAL_BDIM_BOXED(feature_rank, op) \ + m.impl(#op, torch::CppFunction::makeFromBoxedFunction>()); + +#define ALL_TENSORS_HAVE_OPTIONAL_BDIM_BOXED_CONTIG1(feature_rank, op, contig_tensor_index) \ + m.impl(#op, \ + torch::CppFunction::makeFromBoxedFunction<\ + boxed_all_tensors_have_optional_bdim<\ + feature_rank, \ + contig_tensor_index>\ + >()); + +template +struct ExistingBdimBatchRuleHelper; + +template +struct ExistingBdimBatchRuleHelper> { + static std::tuple> apply( + const Tensor& self, + optional self_bdim, + T... extra_args) { + auto self_ = reshape_dim_into(*self_bdim, 0, self); + auto out = Func(self_, std::forward(extra_args)...); + return std::make_tuple(reshape_dim_outof_symint(0, self.sym_sizes()[*self_bdim], out), 0); + } +}; + +// USAGE: EXISTING_BDIM_BATCH_RULE(at::cholesky_inverse) +// INCORRECT USAGE: EXISTING_BDIM_BATCH_RULE(&at::cholesky_inverse) +// It is important that this macro is not passed a function pointer!! +#define EXISTING_BDIM_BATCH_RULE(fn) SINGLE_ARG(\ + ExistingBdimBatchRuleHelper<\ + decltype(&fn),\ + &fn,\ + c10::guts::function_traits::parameter_types>::apply) + + +#define EXISTING_BDIM(op) \ + VMAP_SUPPORT(op, EXISTING_BDIM_BATCH_RULE(ATEN_FN(op))); + +#define EXISTING_BDIM2(op, overload) \ + VMAP_SUPPORT2(op, overload, EXISTING_BDIM_BATCH_RULE(ATEN_FN2(op, overload))); + +#define INVOKE(object,ptrToMember) ((object).*(ptrToMember)) + + +template +Tensor& unary_inplace_batch_rule(Tensor& self, optional, ExtraArgs... extra_args) { + INVOKE(self, Method)(std::forward(extra_args)...); + return self; +} + +inline int64_t get_bdim_size4( + const Tensor& a_value, optional a_bdim, + const Tensor& b_value, optional b_bdim, + const Tensor& c_value, optional c_bdim, + const Tensor& d_value, optional d_bdim) { + if (a_bdim) + return a_value.size(*a_bdim); + if (b_bdim) + return b_value.size(*b_bdim); + if (c_bdim) + return c_value.size(*c_bdim); + if (d_bdim) + return d_value.size(*d_bdim); + TORCH_INTERNAL_ASSERT(false); +} + +inline int64_t get_bdim_size3( + const Tensor& a_value, optional a_bdim, + const Tensor& b_value, optional b_bdim, + const Tensor& c_value, optional c_bdim) { + if (a_bdim) + return a_value.size(*a_bdim); + if (b_bdim) + return b_value.size(*b_bdim); + if (c_bdim) + return c_value.size(*c_bdim); + TORCH_INTERNAL_ASSERT(false); +} + +inline int64_t get_bdim_size2( + const Tensor& a_value, optional a_bdim, + const Tensor& b_value, optional b_bdim) { + if (a_bdim) + return a_value.size(*a_bdim); + if (b_bdim) + return b_value.size(*b_bdim); + TORCH_INTERNAL_ASSERT(false); +} + +// [start, start + 1, ..., stop - 1] +inline VmapDimVector range(int64_t start, int64_t stop) { + TORCH_INTERNAL_ASSERT(stop >= start); + VmapDimVector dims; + dims.reserve(stop - start); + for (int64_t i = start; i < stop; i++) { + dims.emplace_back(i); + } + return dims; +} +std::tuple _binary_pointwise_helper( + const Tensor& tensor, optional tensor_batch_dim, const Tensor& other, optional other_batch_dim, + bool do_type_promotion=true); + +}} diff --git a/voice_bridge/torch/include/ATen/functorch/BatchedFallback.h b/voice_bridge/torch/include/ATen/functorch/BatchedFallback.h new file mode 100644 index 0000000000000000000000000000000000000000..05d223568a37665dede237a273ab48745cd7764d --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/BatchedFallback.h @@ -0,0 +1,80 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once +#include +#include +#include + +namespace at { +namespace functorch { + +// This file contains code for the vmap fallback (also known as the +// BatchedTensor fallback or the Batched fallback). This code runs +// when an operation doesn't have a batching rule implemented. + +// If an operator doesn't have a batching rule implemented then we fallback +// to this implementation. The fallback doesn't work on out= variants or +// view operations; that is, it works for out-of-place operations and +// in-place non-view operations. +// +// For out-of-place operations, the fallback effectively takes all of the +// BatchedTensors in `stack`, slices them, and runs `op` on all of the +// corresponding slices to produce slices of the outputs. The output slices +// then get `torch.stack`ed to create the +// final returns. +// +// The performance of the fallback is not very good because it introduces an +// extra copy from stacking the sliced outputs. Because of this, we prefer to +// write batching rules for operators whenever possible. +void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Stack* stack); + +// The vmap fallback emits a warning by default, but it may be disabled if +// the user finds it to be too annoying. +TORCH_API bool isVmapFallbackWarningEnabled(); +TORCH_API void setVmapFallbackWarningEnabled(bool enabled); + +// Used for testing. The vmap fallback is enabled by default. When it is disabled, +// it raises an error. +TORCH_API bool isVmapFallbackEnabled(); +TORCH_API void setVmapFallbackEnabled(bool enabled); + +template A vector_to_result(const std::vector& buffer) { + return buffer[0].to(); +} +template std::tuple vector_to_result(const std::vector& buffer) { + return std::make_tuple(buffer[0].to(), buffer[1].to()); +} +template std::tuple vector_to_result(const std::vector& buffer) { + return std::make_tuple(buffer[0].to(), buffer[1].to(), buffer[2].to()); +} + +// slow_fallback is a way to call the vmap fallback inside some boxed kernel. +// There is probably some better way to metaprogram this. +template +Ret slow_fallback(const c10::OperatorHandle& op, ArrayRef args) { + std::vector stack(args.begin(), args.end()); + batchedTensorForLoopFallback(op, &stack); + return vector_to_result(stack); +} + +template +std::tuple slow_fallback(const c10::OperatorHandle& op, ArrayRef args) { + std::vector stack(args.begin(), args.end()); + batchedTensorForLoopFallback(op, &stack); + return vector_to_result(stack); +} + +template +std::tuple slow_fallback(const c10::OperatorHandle& op, ArrayRef args) { + std::vector stack(args.begin(), args.end()); + batchedTensorForLoopFallback(op, &stack); + return vector_to_result(stack); +} + + +} +} // namespace at diff --git a/voice_bridge/torch/include/ATen/functorch/BatchedTensorImpl.h b/voice_bridge/torch/include/ATen/functorch/BatchedTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..320989604570b8cc17728c98e086fbb6596a64fe --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/BatchedTensorImpl.h @@ -0,0 +1,160 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include + +#include +#include +#include + +namespace at { +namespace functorch { + +using Tensor = at::Tensor; + +// We assume this in a few other places in the codebase, +// but there isn't a centralized definition. +constexpr int64_t kVmapMaxTensorDims = 64; + +// The valid vmap levels range from [0, 64). This effectively means that we +// support a maximum of 64 nested vmaps. +constexpr int64_t kVmapNumLevels = 64; + +// Store this number of elements of BatchDims on the stack. Most people will +// probably use <= 5 nested vmaps, but adjust this number as necessary. +constexpr int64_t kBatchDimsStackSize = 5; + +// A BatchedTensorImpl holds an underlying Tensor and a single batch dim +// NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a +// BatchedTensorImpl. +// +// The batch dimensions are treated as being "private"; they are not user-visible. +// For example, in the following Tensor, +// bt = BatchedTensorImpl(ones(2, 3, 5, 7), lvl=1, dim=0) +// dimension 0 is batch dimension. +// +// bt.sizes() returns (5, 7); bt.sum(0) performs a reduction over the (public) +// dim 0, which is equivalent to dim 3 in the underlying ones(2, 3, 5, 7) tensor. +struct TORCH_API BatchedTensorImpl : public c10::TensorImpl { + explicit BatchedTensorImpl(at::DispatchKeySet key_set, Tensor value, int64_t dim, int64_t level); + + // Returns batch dimension of this tensor + int64_t bdim() const { return bdim_; } + + // Returns batch dimension of this tensor + int64_t level() const { return level_; } + + // BatchedTensorImpl wraps a Tensor + const Tensor& value() const { return value_; } + + // Given a public dimension index, return the dimension index in the underlying + // value() tensor. + // For example, if we have + // bt = BatchedTensorImpl(ones(2, 3, 5, 7), lvl=1, dim=0) + // bt.actualDim(0) -> 1 + // bt.actualDim(1) -> 2 + // bt.actualDim(2) -> 3 + // bt.actualDim(3) -> Error + int64_t actualDim(int64_t dim, bool wrap_dim = true) const; + + // We have to override this because we opted into CustomStrides + IntArrayRef strides_custom() const override; + SymIntArrayRef sym_strides_custom() const override; + // Override a bunch of methods inherited from TensorImpl to return error messages. + bool is_contiguous_custom(at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const override; + void set_size(int64_t dim, int64_t new_size) override; + void set_stride(int64_t dim, int64_t new_stride) override; + void set_storage_offset(int64_t storage_offset) override; +#ifdef DEBUG + bool has_storage() const override; +#endif + + void refreshTensorMetadata(); + + // Used in torchdim. torchdim uses non-lexical BatchedTensor; the way it + // accomplishes this is a hack where it is able to modify the levels of + // BatchedTensor to match the level of the current vmap transform. + void _unsafe_set_level(int64_t level) { + level_ = level; + } + + // Used in batching rule for in-place view operations that can change + // the index of the bdim (think squeeze_, unsqueeze_) + void unsafe_set_bdim(int64_t bdim) { + // NB: you MUST call refreshTensorMetadata after doing this. + bdim_ = bdim; + } + private: + // see NOTE: [BatchedTensorImpl levels invariant] + void checkInvariants() const; + const char* tensorimpl_type_name() const override; + + Tensor value_; + + int64_t level_; + int64_t bdim_; +}; + +// NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a +// BatchedTensorImpl. +inline bool isBatchedTensor(const Tensor& tensor) { + return tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::FuncTorchBatched); +} + +// It is unsafe to call this on a Tensor that is not backed by a +// BatchedTensorImpl. Please use `maybeGetBatchedImpl` whenever possible. +inline BatchedTensorImpl* unsafeGetBatchedImpl(Tensor tensor) { + return static_cast(tensor.unsafeGetTensorImpl()); +} + +inline BatchedTensorImpl* maybeGetBatchedImpl(Tensor tensor) { + if (!isBatchedTensor(tensor)) { + return nullptr; + } + return unsafeGetBatchedImpl(tensor); +} + +// Returns a bitset. If bit i is set, then that means dim i is a batchdim. +inline std::bitset createBatchDimBitset(int64_t dim) { + std::bitset is_bdim; + is_bdim.set(dim); + return is_bdim; +} + +// Creates a bitset for the given level +inline std::bitset createVmapLevelsBitset(int64_t level) { + std::bitset result; + result.set(level); + return result; +} + +// Use this to construct a BatchedTensor from a regular Tensor +TORCH_API Tensor makeBatched(const Tensor& tensor, int64_t dim, int64_t level); + +// Adds a batch dim to `tensor`, returning a BatchedTensor +TORCH_API Tensor addBatchDim(const Tensor& tensor, int64_t dim, int64_t level); + +// Certain dispatch keys must be propagated to the BatchedTensor (or, in general, +// any wrapper Tensor subclasses). This is because there are methods on Tensor +// that skip dispatch and check for the presence of a dispatch key (e.g. is_cpu()). +// TODO: should probably contain more (or all?) backend keys +constexpr DispatchKeySet kKeysToPropagateToWrapper({ + DispatchKey::Negative, + DispatchKey::Conjugate, + DispatchKey::XLA, + DispatchKey::CUDA, + DispatchKey::CPU, +}); + +inline DispatchKeySet getKeysToPropagateToWrapper(const Tensor& tensor, DispatchKeySet to_propagate=kKeysToPropagateToWrapper) { + auto key_set = tensor.unsafeGetTensorImpl()->key_set(); + return key_set & kKeysToPropagateToWrapper; +} + +} +} diff --git a/voice_bridge/torch/include/ATen/functorch/BatchingMetaprogramming.h b/voice_bridge/torch/include/ATen/functorch/BatchingMetaprogramming.h new file mode 100644 index 0000000000000000000000000000000000000000..e77960f441feda2d6ed7a5bd3b811d3c5456a0a1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/BatchingMetaprogramming.h @@ -0,0 +1,128 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once +#include +#include + +// This file contains template metaprogramming things that are used for our +// batching rules. +// +// See NOTE: [vmap plumbing] for more details on why this is necessary. +// The plumbing has a bunch of metaprogramming hacks for determining the signature +// of a batching rule from the signature of the operator, many of which use the +// helper functions in this file. + +namespace at { +namespace functorch { + +// Metaprogramming things +template using typelist = c10::guts::typelist::typelist; +template using head_t = c10::guts::typelist::head_t; +template using concat_t = c10::guts::typelist::concat_t; +template class debug_t; + +// tail operation +template +struct tail final { + static_assert(c10::guts::false_t::value, + "In typelist::tail, the T argument must be typelist<...>."); +}; +template +struct tail> final { + using type = typelist; +}; +template using tail_t = typename tail::type; + +template +struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext { + using type = Next; +}; +template +struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext, Next, Tail> { + using type = Tail; +}; +template +struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext, Next, Tail> { + using type = Tail; +}; +template +struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext, Next, Tail> { + using type = Tail; +}; +template +struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext, optional, Next, Tail> { + using type = Tail; +}; +template +struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext&, optional, Next, Tail> { + using type = Tail; +}; +template +struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext&, optional, Next, Tail> { + using type = Tail; +}; +template +struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext, optional, Next, Tail> { + using type = Tail; +}; +template struct RemoveBatchDimAfterTensor { + using first = head_t; + using next = tail_t; + using second = head_t; + using tail = tail_t; + + using type = concat_t< + typelist, + typename RemoveBatchDimAfterTensor< + typename IfFirstIsTensorAndSecondisBatchDimThenTailElseNext::type + >::type + >; +}; +template struct RemoveBatchDimAfterTensor> { + using type = typelist; +}; +template <> struct RemoveBatchDimAfterTensor> { + using type = typelist<>; +}; +template using remove_batch_dim_after_tensor_t = typename RemoveBatchDimAfterTensor::type; + +template struct UnpackSingleItemTuple { + using type = T; +}; +template struct UnpackSingleItemTuple> { + using type = T; +}; +template using unpack_single_item_tuple_t = typename UnpackSingleItemTuple::type; + +template struct BuildFunctionHelper; +template struct BuildFunctionHelper> { + using type = Return(Args...); +}; +template +struct BuildFunction { + using type = typename BuildFunctionHelper>::type; +}; +template using build_function_t = typename BuildFunction::type; + + +template struct ToOperatorType { + using batch_rule_return_type = typename c10::guts::function_traits::return_type; + using batch_rule_parameter_types = typename c10::guts::function_traits::parameter_types; + + using operator_parameter_types = remove_batch_dim_after_tensor_t; + using operator_return_type = + unpack_single_item_tuple_t< + c10::guts::typelist::to_tuple_t< + remove_batch_dim_after_tensor_t< + c10::guts::typelist::from_tuple_t>>>; + + using type = build_function_t; +}; +template using to_operator_t = typename ToOperatorType::type; + +} +} // namespace at diff --git a/voice_bridge/torch/include/ATen/functorch/DynamicLayer.h b/voice_bridge/torch/include/ATen/functorch/DynamicLayer.h new file mode 100644 index 0000000000000000000000000000000000000000..576a9621651a46e47ffa02fb8bec8d8e96fe0abd --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/DynamicLayer.h @@ -0,0 +1,122 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Forward declared +namespace c10 { struct AutogradMetaInterface; } + +namespace at { +namespace functorch { + +// This file contains the implementation of functorch's interpreter stack. +// See NOTE: [functorch interpreter stack] first before reading on. +// +// NB: the functorch interpreter stack is also referred to as: +// - the "dynamic layer stack" -- an older name for "interpreter" was +// "dynamic layer". +// - the "functorch mode stack". You can think of each functorch transform as a +// "mode" (in the same sense as torch_dispatch mode or torch_function mode), +// and functorch being an implementation of a "mode stack" where the modes +// may be arbitrary composed. + +// DynamicLayer is basically the same thing as an Interpreter. +// It represents a functorch transform and it holds an Interpreter, +// which contains metadata related to the transform and instructions on +// how to perform the transform. +// +// TODO: we can excise DynamicLayer in favor of Interpreter, +// But I am going to leave it for now as a compatiblity shim to avoid +// needing to refactor a lot of callsites... +struct TORCH_API DynamicLayer { + explicit DynamicLayer( + TransformType transform_type, + int64_t layerId, + optional batchSize = nullopt, + optional randomness = nullopt, + optional prev_grad_mode = nullopt, + optional pre_fwd_grad_mode = nullopt, + optional functionalize_add_back_views = nullopt); + + TransformType key() const; + int64_t layerId() const; + + const Interpreter& interpreter() const { return interpreter_; } + Interpreter& interpreter() { return interpreter_; } + + // Only valid for vmap + int64_t batchSize() const; + RandomnessType randomness() const; + + private: + Interpreter interpreter_; +}; + +TORCH_API int64_t initAndPushDynamicLayer( + TransformType transform_type, + optional batch_size = nullopt, + optional randomness = nullopt, + optional prev_grad_mode = nullopt, + optional prev_fwd_grad_mode = nullopt, + optional functionalize_add_back_views = nullopt); +TORCH_API DynamicLayer popDynamicLayerAndDeleteMetadata(); +TORCH_API c10::optional maybeCurrentDynamicLayer(); +TORCH_API const std::vector& getDynamicLayerStack(); +TORCH_API void setDynamicLayerStack(const std::vector& stack); +TORCH_API void setDynamicLayerFrontBackKeysIncluded(bool included); + +// NB: Not lock safe, you should only call this from Python where the GIL will +// prevent race conditions. +TORCH_API bool areTransformsActive(); + +// NOTE: [Life handles and lexically scoped transforms] +// functorch transforms are lexically scoped. +// Given a level, we store a "life handle" that is a boolean that tells us if the +// transform with that level is active or not. +// +// functorch's TensorWrapper (for grad transforms) stores a life handle. +// If a TensorWrapper escapes from the scope of the transform, then somehow +// it must know it escaped; it can tell by querying the life handle. +// +// NB: not lock safe. TODO: does it need a lock? +TORCH_API std::shared_ptr getLifeHandleForLevel(int64_t level); + +// Returns if an operator is in-place. An operator is inplace if: +// 1. The first argument is a Tensor and it is being written to +// 2. The first argument is being returned +// 3. No other arguments are aliased +// Here is an example of an in-place operator: +// add_(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) +TORCH_API bool isInplaceOp(const c10::FunctionSchema& schema); + +// Given the indices of unwrapped inputs and the schema, this returns the indices of any outputs that should remain unwrapped +TORCH_API c10::optional findAliasedOutput(const FunctionSchema& schema, const int64_t immutable_input); + +TORCH_API Tensor unwrapIfDead(const Tensor& tensor); + +// Pretty printers +TORCH_API std::ostream& operator<<(std::ostream& os, const DynamicLayer& layer); +TORCH_API std::ostream& operator<<(std::ostream& os, const std::vector& dynamicLayerStack); + +// While a functorch grad transform is active, Tensor.requires_grad_() gets +// disabled. These two functions are the mechanism to controlling that. +TORCH_API void setInplaceRequiresGradAllowed(bool allowed); +TORCH_API bool getInplaceRequiresGradAllowed(); + +} +} // namespace at diff --git a/voice_bridge/torch/include/ATen/functorch/FunctionalizeInterpreter.h b/voice_bridge/torch/include/ATen/functorch/FunctionalizeInterpreter.h new file mode 100644 index 0000000000000000000000000000000000000000..4157eb82d84fd51095e5b6d151e76c680f55e177 --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/FunctionalizeInterpreter.h @@ -0,0 +1,22 @@ +#pragma once +#include + +namespace at { namespace functorch { + +// This is the interpreter that handles the functionalize() transform. +// See NOTE: [functorch interpreter stack] for more details. + +struct FunctionalizeInterpreterPtr { + explicit FunctionalizeInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Functionalize); } + TransformType key() const { return base_->key(); } + int64_t level() const { return base_->level(); } + void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack); + void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case); + bool functionalizeAddBackViews() const { + return c10::get(base_->meta()).functionalizeAddBackViews_; + } + private: + const Interpreter* base_; +}; + +}} // namespace at::functorch diff --git a/voice_bridge/torch/include/ATen/functorch/Interpreter.h b/voice_bridge/torch/include/ATen/functorch/Interpreter.h new file mode 100644 index 0000000000000000000000000000000000000000..f521e26f2b64fda31e22dbb13e863f53305d5c73 --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/Interpreter.h @@ -0,0 +1,194 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace at { namespace functorch { + +// NOTE: [functorch interpreter stack] +// +// functorch's dispatching system uses a stack of interpreters. +// Historically we've referred to this as the "DynamicLayerStack". +// +// An interpreter is something that reads in the code it is passed +// and then executes it. We have a different interpreter per-transform: +// the "VmapInterpreter" is responsible for reading in operators (like aten::mv) +// and executing the batched version of it (the batching rule for aten::mv). +// +// Concretely, each interpreter is responsible for two things: +// +// 1) process(ophandle, stack) +// Given an operator handle and a stack of arguments, the interpreter is +// responsible for figuring out how to execute the operation under the semantics +// of the interpreter. For e.g. VmapInterpreter, this is figuring out how to call +// the batching rule. +// +// The batching rules are stored as kernels on the FuncTorchBatched key, so the way +// VmapInterpreter calls the batching rule is roughly: (A) exclude all +// dispatch keys aside from the Batched key, (B) redispatch so we get to the +// Batched key. +// +// 2) sendToNextInterpreter(ophandle, stack) +// The VmapInterpreter, when it sees aten::mv, will process it into a call to +// aten::mm. It then needs to send the call to aten::mm to the next interpreter +// in the interpreter stack. +// +// The VmapInterpreter just does this via a call to ophandle.callBoxed(stack) +// and most Interpreters will implement it this way. + +enum RandomnessType { + Error, // always errors when calling a random function + Same, // randomness appears the same across batches + Different, // randomness appears different across batches + END +}; + +enum class TransformType { + Torch, // Unused + Vmap, + Grad, // reverse-mode AD, aka vjp + Jvp, // forward-mode AD + Functionalize, +}; + +std::ostream& operator<<(std::ostream& os, const TransformType& t); + +// NOTE: [Interpreter "subclassing" design] +// +// How are various Interpreters for different transforms (vmap, grad, ...) +// implemented? +// +// Accessing interpreters is in the hot-path of functorch so we have a constraint +// that this code must be as fast as possible. +// +// As a result, we stay away from virtual methods and this causes our code +// to look a little funny. +// +// `Interpreter` is the struct for Interpreters. It holds ALL of the +// relevant information (what type of interpreter it is and the metadata). +// Metadata for each interpreter is represented as a Union (c10::variant) +// of all possible metadata (VmapInterpreterMeta, GradInterpreterMeta, ...). +// +// Given an Interpreter, how do I get a "VmapInterpreter"? You may wish to do this +// if you want to access the metadata fields (like batchSize and randomness). +// +// Each type of interpreter (e.g. Vmap) has a convenience struct +// (e.g. VmapInterpreterPtr) associated with it. +// +// Construct the convenience struct with VmapInterpreterPtr(Interpreter*), +// and then one can access methods on VmapInterpreterPtr like so: +// >>> VmapInterpreterPtr(&interpreter).batchSize() +// +// Finally, Interpreter::process switches on the type of the interpreter +// and calls one of {Transform}Intepreter::processImpl under the hood. +// Same for Interpreter::sendToNextInterpreter :) + +struct VmapInterpreterMeta { + explicit VmapInterpreterMeta(int64_t batchSize, RandomnessType randomness) : + batchSize_(batchSize), randomness_(randomness) {} + int64_t batchSize_; + RandomnessType randomness_; +}; + +struct GradInterpreterMeta { + explicit GradInterpreterMeta(bool prevGradMode): prevGradMode_(prevGradMode) {} + bool prevGradMode_; +}; + +struct JvpInterpreterMeta { + explicit JvpInterpreterMeta(bool prevFwdGradMode) : prevFwdGradMode_(prevFwdGradMode) {} + bool prevFwdGradMode_; +}; + +struct FunctionalizeInterpreterMeta { + explicit FunctionalizeInterpreterMeta(bool functionalizeAddBackViews) : + functionalizeAddBackViews_(functionalizeAddBackViews) {} + bool functionalizeAddBackViews_; +}; + +typedef c10::variant< + int64_t, + GradInterpreterMeta, + JvpInterpreterMeta, + VmapInterpreterMeta, + FunctionalizeInterpreterMeta +> InterpreterMeta; + + +struct Interpreter { + // factory functions + static Interpreter Vmap(int64_t level, int64_t batchSize, RandomnessType randomness) { + return Interpreter(TransformType::Vmap, level, VmapInterpreterMeta(batchSize, randomness)); + } + static Interpreter Grad(int64_t level, bool prevGradMode) { + return Interpreter(TransformType::Grad, level, GradInterpreterMeta(prevGradMode)); + } + static Interpreter Jvp(int64_t level, bool prevFwdGradMode) { + return Interpreter(TransformType::Jvp, level, JvpInterpreterMeta(prevFwdGradMode)); + } + static Interpreter Functionalize(int64_t level, bool functionalizeAddBackViews) { + return Interpreter(TransformType::Functionalize, level, FunctionalizeInterpreterMeta(functionalizeAddBackViews)); + } + + // methods + TransformType key() const { return type_; } + int64_t level() const { return level_; } + const InterpreterMeta& meta() const { return meta_; } + + void process(const c10::OperatorHandle& op, torch::jit::Stack* stack); + void sendToNextInterpreter(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case); + + void saveLocalDispatchKeySet(c10::impl::LocalDispatchKeySet keyset) { + TORCH_INTERNAL_ASSERT(!savedLocalDispatchKeySet_.has_value()); + savedLocalDispatchKeySet_ = std::move(keyset); + } + void clearSavedLocalDispatchKeySet() { + TORCH_INTERNAL_ASSERT(savedLocalDispatchKeySet_.has_value()); + savedLocalDispatchKeySet_ = c10::nullopt; + } + c10::impl::LocalDispatchKeySet getSavedLocalDispatchKeySet() const { + TORCH_INTERNAL_ASSERT(savedLocalDispatchKeySet_.has_value()); + return *savedLocalDispatchKeySet_; + } + + // Please don't use this + explicit Interpreter() = default; + + private: + explicit Interpreter(TransformType type, int64_t level, InterpreterMeta meta): + type_(type), level_(level), meta_(meta) {} + + // fields + TransformType type_; + int64_t level_; + optional savedLocalDispatchKeySet_; + InterpreterMeta meta_; +}; + +// Applies the following for-loop: +// for i in range(begin, end): +// args[i] = func(args[i]) +void foreachTensorInplace(std::vector& args, int64_t begin, int64_t end, + std::function func); + +// Applies the following for-loop: +// for i in range(begin, end): +// if use_flag_relative[i] == 1: <-- treats use_flag_relative as a bitset +// args[i] = func(args[i], i - begin, true) +// args[i] = func(args[i], i - begin) +void foreachTensorInplaceWithFlag(std::vector& args, int64_t begin, int64_t end, + const std::bitset<64> use_flag_relative, std::function func); + +std::vector findUnwrappedInputs(std::vector& args, int64_t begin, int64_t end); + +DispatchKeySet keysToExcludeWhenEnteringDynamicLayer(TransformType key); + +void setup_dispatch_key_tls(DispatchKeySet exclude, DispatchKeySet include); + +void sanityCheckStack(const c10::OperatorHandle& op, torch::jit::Stack* stack); + +}} // namespace at::functorch diff --git a/voice_bridge/torch/include/ATen/functorch/LegacyVmapTransforms.h b/voice_bridge/torch/include/ATen/functorch/LegacyVmapTransforms.h new file mode 100644 index 0000000000000000000000000000000000000000..5fc05b6c8038c28cd254994ecfd36f9628555720 --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/LegacyVmapTransforms.h @@ -0,0 +1,188 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include +#include + +namespace at { +namespace functorch { + +// This files contains the legacy (now-deprecated) batching rule API. +// Please try to use the new-style batching rule API (see writing_batch_rules.md) + +// This file contains abstractions used for transforming *logical* vmap arguments +// into *physical* arguments. (Keep reading for definitions of these terms). + +// NOTE: [Logical vs physical args] +// Consider the following vmap. +// vmap(vmap(func, in_dims=(2,)), in_dims=(0,))(torch.ones(2, 3, 4)) +// This would produce a BatchedTensor wrapping a Tensor of size [2, 3, 4], +// with batch dims 0 and 2: +// BatchedTensor(ones(2, 3, 4), bdims=[(lvl=1,dim=0),(lvl=2,dim=2)]) +// +// We say the *logical* view of the tensor has size [3] -- tensors inside +// `func` appear to have size [3]. +// However, the *physical* underlying tensor (the one passed to vmap) has size +// [2, 3, 4]. +// +// This notion of logical vs physical also extends to non-tensor arguments. +// Consider the previous tensor; let's assume the user called +// `torch.sum(tensor, dim=0)` inside of `func`. Then the logical +// dimension they are reducing over is dim 0 but the physical dim is dim 1 +// (the first non-batch dimension) + +// Forward declared; see NOTE: [What is a VmapPhysicalView?] +struct VmapPhysicalView; + +// Most PyTorch operators take 4 or fewer inputs. +constexpr int64_t kVmapTransformStaticInputSize = 4; +using VmapPhysicalViewVec = SmallVector; + +// Pytorch generally advertises good performance for <= 5 dims. +// (see ATen/core/DimVector.h). We add a few extra dims (~3) for vmap +// dimensions to get 8. Adjust this number as necessary +constexpr int64_t kVmapStaticDimVecSize = 8; +using VmapDimVector = SmallVector; + +// NOTE: [What is an VmapTransform?] +// An *VmapTransform* converts logical views of tensors to physical views. +// +// Batching rules use VmapTransforms to convert logical arguments to +// physical arguments, then call one or more at:: operator that handles the +// physical arguments, and then converts the physical result back to a logical +// argument. + +// VmapTransform for operators that take tensors with multiple batch dims. +// Given one or more logical views on Tensors, `logicalToPhysical` +// permutes all of the batch dims to the front of the tensor, aligns +// and expands the batch dims to match each other (according to their `level`), +// and returns a VmapPhysicalView on the tensor(s). +struct TORCH_API MultiBatchVmapTransform { + static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor); + static VmapPhysicalViewVec logicalToPhysical(ITensorListRef logical_tensors); +}; + +// VmapTransform for operators that broadcast all inputs. +// Given some logical views on Tensors, `logicalToPhysical`: +// - permutes all of the batch dims to the front of the tensors +// - aligns all the batch dims to the collective levels of all of the tensors. +// If a tensor does not have a batch dim for a vmap level, then it receives +// a size-one dimension for said level. +// - aligns the non-batch dims to have the same dimensionality, adding extra +// size-1 dimensions in between the batch dimensions and the non-batch dimensions +// so that the batch dimensions are lined up from the right. +// +// For example: given inputs of size (B, 2) and (B, 3, 2) where B is the batch +// dimension, BroadcastingVmapTransform returns VmapPhysicalViews that wrap tensors +// of size (B, 1, 2) and (B, 3, 2). +// +// Given inputs of size (B, 2) and (2,), BroadcastingVmapTransform returns +// VmapPhysicalViews wrapping tensors of size (B, 2) and (1, 2). We don't +// actually *need* to return a tensor of size (1, 2) for the second tensor +// because the broadcasting operation takes care of that for us, but we do +// it anyways to keep things simple. +struct TORCH_API BroadcastingVmapTransform { + static VmapPhysicalViewVec logicalToPhysical(TensorList logical_tensors); +}; + +// Forward declared, if you're reading this file head to toe, don't worry about +// it yet. +struct VmapPhysicalToLogicalMap; + +// NOTE: [What is a VmapPhysicalView?] +// VmapPhysicalView represents a physical view on a Tensor. +// +// One can use it to further convert logical dimension indices, logical shapes, +// and more to their physical variants, or convert a new (physical) tensor into +// a logical BatchedTensor. (TODO(rzou): some of these are not yet implemented). +// +// VmapPhysicalView stores a physical tensor with all of its batch dimensions at +// the front and some levels that correspond to said batch dimensions. +// +// The levels bitset specifies which vmap levels correspond to the batch +// dimensions at the front of the tensor. In particular, the number of set bits +// corresponds to the number of batch dimensions on `tensor` and the rightmost +// bit of `levels` specifies the maximum number of nested vmaps we are in at +// this point in time. +// For example, given: +// physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5, 6), levels={1, 3}) +// +// Rightmost bit of `levels` is 3 indicating the number of nested vmaps less +// than or equal to 3. +// bitset: 010100 +// ^ +// | +// levels: 012345 +struct TORCH_API VmapPhysicalView { + VmapPhysicalView(Tensor&& tensor, std::bitset levels) + : levels_(levels), tensor_(tensor) { + // TORCH_INTERNAL_ASSERT(!isBatchedTensor(tensor)); + } + + Tensor& tensor() { return tensor_; } + const Tensor& tensor() const { return tensor_; } + + // Maps logical dim indices to physical dim indices. Also does dim wrapping. + // + // For example, given: + // physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5), levels={1, 3}) + // + // Then physical_view.getPhysicalDims({0, 1}) returns {2, 3}. + // This is because the size of levels tell us that the first two dimensions + // of `tensor_` are batch dimensions, so a logical dim of `n` is actually + // a physical dim of `n + 2`. + VmapDimVector getPhysicalDims(IntArrayRef logical_dims) const; + int64_t getPhysicalDim(int64_t logical_dim) const; + + // Returns a VmapPhysicalToLogicalMap object. This can be used for + // mapping a physical tensor to a new logical tensor (BatchedTensor) + VmapPhysicalToLogicalMap getPhysicalToLogicalMap() const; + + // Maps a logical shape to a physical shape by pre-pending the batch + // sizes to the logical shape. + VmapDimVector getPhysicalShape(IntArrayRef logical_shape) const; + SymDimVector getPhysicalShape(c10::SymIntArrayRef logical_shape) const; + + int64_t numBatchDims() const; + + private: + int64_t numLogicalDims() const; + + std::bitset levels_; + Tensor tensor_; +}; + +// Convenience struct used for mapping a physical tensor (a non-BatchedTensor) +// to a logical one (BatchedTensor). It holds some levels that are used to do the +// mapping and assumes that the batch dimensions in the physical tensor all +// occur at the front of the tensor. +struct TORCH_API VmapPhysicalToLogicalMap { + VmapPhysicalToLogicalMap(std::bitset levels): levels_(levels) {} + + // Maps a physical tensor to a new logical tensor (BatchedTensor). + // Assumes that all of the "batch dimensions" are at the front + // of the physical tensor. For example, given: + // - x = rank-4 Tensor with size 2, 3, 5, 7 + // - levels = (2, 4) + // Returns: + // - BatchedTensor(x, bdims=[(dim=0,lvl=2), (dim=1, lvl=4)]) + Tensor apply(const Tensor& physical_tensor) const; + + // Given a vector of physical tensors, + // 1. maps each tensor to a new logical tensor. Assumes that all of the + // "batch dimensions" are at the front of the physical tensors. + // 2. stores the new logical tensors back into the passed-in vector. This is + // to avoid additional dynamic allocations. + void applyInplace(std::vector& physical_tensors) const; + + std::bitset levels_; +}; + + +} +} // namespace at diff --git a/voice_bridge/torch/include/ATen/functorch/Macros.h b/voice_bridge/torch/include/ATen/functorch/Macros.h new file mode 100644 index 0000000000000000000000000000000000000000..eb0a763261bf051a814c2bfc128f4edd07732bdf --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/Macros.h @@ -0,0 +1,3 @@ +#pragma once + +#define SINGLE_ARG(...) __VA_ARGS__ diff --git a/voice_bridge/torch/include/ATen/functorch/PlumbingHelper.h b/voice_bridge/torch/include/ATen/functorch/PlumbingHelper.h new file mode 100644 index 0000000000000000000000000000000000000000..9eb486a6eefa0c46d84dbdc8e350113515efc851 --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/PlumbingHelper.h @@ -0,0 +1,61 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. +#pragma once +#include +#include +#include + +// NOTE: [vmap plumbing] +// +// Here's how "batching rules" work. +// - we register kernels to the Batched key +// - these kernels have the same signatures as the original operators. +// For example, at::sin(Tensor self) accepts a Tensor, and the batched kernel +// must also accept a Tensor +// - However, it is more natural for users to write a batching rule like the +// following: sin_batch_rule(Tensor self, optional self_bdim) +// - There is some codegenerated layer (the "plumbing") that wraps the user +// defined batching rule (e.g. sin_batch_rule) in a kernel that can be +// registered to the Batched key. +// +// The plumbing is responsible for wrapping a batching rule into a form that may +// be registered as the kernel for the batched key. + +namespace at { namespace functorch { + +// Create a BatchedTensor given a tensor, bdim, and level +TORCH_API Tensor makeBatched(const Tensor& tensor, optional bdim, int64_t level); + +// Given a Tensor that may or may not be a BatchedTensor, unwrap it. +// If `tensor` is not a BatchedTensor, or is a BatchedTensor but the level +// doesn't match, then this returns (tensor, nullopt). +// Otherwise, it returns (unwrap(tensor), bdim). +TORCH_API std::tuple> unwrapTensorAtLevel(const Tensor& tensor, int64_t level); + +// Creates a vector of BatchedTensor +TORCH_API std::vector makeBatchedVector(const std::vector& tensors, optional bdim, int64_t level); + +// Returns True if ANY tensor in tensors is batched at level +TORCH_API bool isBatchedAtLevel(ITensorListRef tensors, int64_t level); +TORCH_API bool isBatchedAtLevel(const c10::List> maybe_tensors, int64_t level); +TORCH_API bool isBatchedAtLevel(const Tensor& tensor, int64_t level); +TORCH_API bool isBatchedAtLevel(const c10::optional& maybe_tensor, int64_t level); + +// Convenience helper. Returns true if any tensor is batched at level +TORCH_API bool areAnyBatchedAtLevel(ArrayRef> maybe_tensors, int64_t level); + +inline bool ivalueParticipatesInCurrentLevel(const IValue& ivalue) { + if (ivalue.isTensor()) { + auto maybe_level = maybeCurrentDynamicLayer(); + TORCH_INTERNAL_ASSERT(maybe_level.has_value()); + auto current_level = maybe_level->layerId(); + return isBatchedAtLevel(ivalue.toTensor(), current_level); + } + // TODO: should really check this + return false; +} + +}} diff --git a/voice_bridge/torch/include/ATen/functorch/TensorWrapper.h b/voice_bridge/torch/include/ATen/functorch/TensorWrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..25da91fd88e8f33b9e79c80f03aeefbb408a38a8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/TensorWrapper.h @@ -0,0 +1,97 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// All rights reserved. +// +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. + +#pragma once + +#include +#include + +namespace at { +namespace functorch { + +// NOTE: [functorch's TensorWrapper] +// +// Taking better suggestions for a name. TensorWrapper is the wrapper Tensor +// Subclass for functorch's grad-based transforms (grad, vjp, jvp). It is +// analogous to how vmap uses BatchedTensor as the wrapper Tensor subclass. +// +// If you're familiar with the Tensor-Variable merge, TensorWrapper is effectively +// another Variable. +// +// Consider grad(grad(torch.sin))(x). This wraps `x` as TensorWrapper(TensorWrapper(x)). +// The reason why is so that each TensorWrapper can hold its own AutogradMeta and +// participate in a **separate** autograd graph. +// +// There are alternative designs we could have chosen (e.g. each grad transform +// stores a weak map of Tensor -> AutogradMeta); the benefit of the TensorWrapper +// design is that we can re-use existing VariableType kernels (i.e. Autograd kernels) +// without much modification. Since a TensorWrapper looks like a regular Tensor, +// the VariableType kernel can pull out the AutogradMeta struct from where it +// expects and extend the autograd graph + +struct TORCH_API TensorWrapper : public c10::TensorImpl { + explicit TensorWrapper( + c10::DispatchKeySet key_set, + Tensor value, + int64_t level, + std::shared_ptr is_alive, + bool is_immutable = false, // if true, this came from an operation that aliases an immutable tensor + bool use_value_sizes_strides = true); + + // Override a bunch of methods inherited from TensorImpl to return error messages + void set_size(int64_t dim, int64_t new_size) override; + void set_stride(int64_t dim, int64_t new_stride) override; + void set_storage_offset(int64_t storage_offset) override; + + void refreshMetadata(); + + const Tensor& value() const { + return value_; + } + optional level() const { + if (is_alive()) { + return level_; + } + return {}; + } + bool is_immutable() const { + return is_immutable_; + } + bool is_alive() const; + + // Overrides necessary for autograd + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override; + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override; + void shallow_copy_from(const c10::intrusive_ptr& impl) override; + + private: + const char* tensorimpl_type_name() const override; + Tensor value_; + int64_t level_; + bool is_immutable_; + + // TensorWrapper receives a boolean flag on whether or not the Grad Interpreter + // that created it is still alive or not. + // If the Grad Interpreter is no longer alive then it attempts to behave like + // a regular Tensor. + // + // When we exit the level, this wrapper may be marked as "not alive". + // Wrappers that are not alive: + // 1) May still have autograd metadata on them + // 2) Forward dispatches to the underlying value() + std::shared_ptr is_alive_; +}; + +TORCH_API Tensor makeTensorWrapper(const Tensor& tensor, int64_t level, bool is_immutable=false); +TORCH_API TensorWrapper* maybeGetTensorWrapper(const Tensor& tensor); +TORCH_API void dumpTensor(std::ostream & ss, const Tensor& tensor); +TORCH_API void dumpTensorCout(const Tensor& tensor); +} +} // namespace at diff --git a/voice_bridge/torch/include/ATen/functorch/VmapInterpreter.h b/voice_bridge/torch/include/ATen/functorch/VmapInterpreter.h new file mode 100644 index 0000000000000000000000000000000000000000..2e4e6fff212f39d837c19aa6f37cc2135dba1046 --- /dev/null +++ b/voice_bridge/torch/include/ATen/functorch/VmapInterpreter.h @@ -0,0 +1,25 @@ +#pragma once +#include + +namespace at { namespace functorch { + +// This is the interpreter that handles the functionalize() transform. +// See NOTE: [functorch interpreter stack] for more details. + +struct VmapInterpreterPtr { + explicit VmapInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Vmap); } + TransformType key() const { return base_->key(); } + int64_t level() const { return base_->level(); } + void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack); + void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case); + int64_t batchSize() const { + return c10::get(base_->meta()).batchSize_; + } + RandomnessType randomness() const { + return c10::get(base_->meta()).randomness_; + } + private: + const Interpreter* base_; +}; + +}} // namespace at::functorch diff --git a/voice_bridge/torch/include/ATen/hip/impl/HIPAllocatorMasqueradingAsCUDA.h b/voice_bridge/torch/include/ATen/hip/impl/HIPAllocatorMasqueradingAsCUDA.h new file mode 100644 index 0000000000000000000000000000000000000000..c764cda19dd5ea20c783de3adb2092a169918cde --- /dev/null +++ b/voice_bridge/torch/include/ATen/hip/impl/HIPAllocatorMasqueradingAsCUDA.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include + +// Use of c10::hip namespace here makes hipification easier, because +// I don't have to also fix namespaces. Sorry! +namespace c10 { namespace hip { + +// Takes a valid HIPAllocator (of any sort) and turns it into +// an allocator pretending to be a CUDA allocator. See +// Note [Masquerading as CUDA] +class HIPAllocatorMasqueradingAsCUDA final : public Allocator { + Allocator* allocator_; +public: + explicit HIPAllocatorMasqueradingAsCUDA(Allocator* allocator) + : allocator_(allocator) {} + DataPtr allocate(size_t size) const override { + DataPtr r = allocator_->allocate(size); + r.unsafe_set_device(Device(DeviceType::CUDA, r.device().index())); + return r; + } + DeleterFnPtr raw_deleter() const override { + return allocator_->raw_deleter(); + } +}; + +}} // namespace c10::hip diff --git a/voice_bridge/torch/include/ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h b/voice_bridge/torch/include/ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h new file mode 100644 index 0000000000000000000000000000000000000000..3aaa9d06c5e91f562382ecd56ea1d3c8a25d41af --- /dev/null +++ b/voice_bridge/torch/include/ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include +#include + +namespace c10 { +// forward declaration +class DataPtr; +namespace hip { +namespace HIPCachingAllocatorMasqueradingAsCUDA { + +C10_HIP_API Allocator* get(); +C10_HIP_API void recordStreamMasqueradingAsCUDA(const DataPtr& ptr, HIPStreamMasqueradingAsCUDA stream); + +} // namespace HIPCachingAllocatorMasqueradingAsCUDA +} // namespace hip +} // namespace c10 diff --git a/voice_bridge/torch/include/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h b/voice_bridge/torch/include/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h new file mode 100644 index 0000000000000000000000000000000000000000..c0ef7476196be80947a35a14e94e6a5ca6807b05 --- /dev/null +++ b/voice_bridge/torch/include/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h @@ -0,0 +1,353 @@ +#pragma once + +#include + +// The includes of HIPGuard.h +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +// Use of c10::hip namespace here makes hipification easier, because +// I don't have to also fix namespaces. Sorry! +namespace c10 { namespace hip { + +// Note [Masquerading as CUDA] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// c10_hip is very easy to understand: it is HIPified from c10_cuda, +// and anywhere you said CUDA, the source code now says HIP. HIPified +// PyTorch is much harder to understand: it is HIPified from regular +// PyTorch, yes, but NO source-to-source translation from CUDA to +// HIP occurs; instead, anywhere we see "CUDA", it actually means "HIP". +// For example, when you use HIPified PyTorch, you say x.cuda() to +// move a tensor onto ROCm device. We call this situation "HIP +// masquerading as CUDA". +// +// This leads to a very awkward situation when we want to call c10_hip +// code from PyTorch, since c10_hip is expecting things to be called +// HIP, but PyTorch is calling them CUDA (masquerading as HIP). To +// fix this impedance mismatch, we have MasqueradingAsCUDA variants +// for all c10_hip classes. These translate between the "HIP" and "CUDA +// masquerading as HIP" worlds. For example, +// HIPGuardImplMasqueradingAsCUDA (this file) provides something like a +// HIPGuardImpl, but it reports its DeviceType as CUDA (e.g., type() +// returns CUDA, getDevice() reports the current HIP device as a CUDA +// device.) +// +// We should be able to delete all of these classes entirely once +// we switch PyTorch to calling a HIP a HIP. +// +// When you add a new MasqueradingAsCUDA class/function, you need to +// also update the rewrite rules in torch/utils/hipify/cuda_to_hip_mappings.py +// +// +// +// By the way, note that the cpp file associated with this also +// *overwrites* the entry in the DeviceGuardImpl registry for CUDA with +// this HIP implementation. + +struct HIPGuardImplMasqueradingAsCUDA final : public c10::impl::DeviceGuardImplInterface { + static constexpr DeviceType static_type = DeviceType::CUDA; + HIPGuardImplMasqueradingAsCUDA() {} + HIPGuardImplMasqueradingAsCUDA(DeviceType t) { + TORCH_INTERNAL_ASSERT(t == DeviceType::CUDA); + } + DeviceType type() const override { + return DeviceType::CUDA; + } + Device exchangeDevice(Device d) const override { + TORCH_INTERNAL_ASSERT(d.is_cuda()); + Device old_device = getDevice(); + if (old_device.index() != d.index()) { + C10_HIP_CHECK(hipSetDevice(d.index())); + } + return old_device; + } + Device getDevice() const override { + int device; + C10_HIP_CHECK(hipGetDevice(&device)); + return Device(DeviceType::CUDA, device); + } + void setDevice(Device d) const override { + TORCH_INTERNAL_ASSERT(d.is_cuda()); + C10_HIP_CHECK(hipSetDevice(d.index())); + } + void uncheckedSetDevice(Device d) const noexcept override { + C10_HIP_CHECK_WARN(hipSetDevice(d.index())); + } + Stream getStream(Device d) const noexcept override { + return getCurrentHIPStreamMasqueradingAsCUDA(d.index()).unwrap(); + } + Stream getDefaultStream(Device d) const override { + return getDefaultHIPStreamMasqueradingAsCUDA(d.index()); + } + Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false) const override { + return getStreamFromPoolMasqueradingAsCUDA(isHighPriority, d.index()); + } + Stream exchangeStream(Stream s) const noexcept override { + HIPStreamMasqueradingAsCUDA cs(s); + auto old_stream = getCurrentHIPStreamMasqueradingAsCUDA(s.device().index()); + setCurrentHIPStreamMasqueradingAsCUDA(cs); + return old_stream.unwrap(); + } + DeviceIndex deviceCount() const noexcept override { + int deviceCnt; + hipError_t _err; + _err = hipGetDeviceCount(&deviceCnt); +#if defined(USE_ROCM) && (ROCM_VERSION < 50201) + if(_err == hipErrorInvalidDevice) + return 0; +#endif + if(_err != hipErrorNoDevice && _err != hipSuccess) + C10_HIP_CHECK(_err); + return deviceCnt; + } + + // Event-related functions + // Note: hipEventCreateWithFlags should be called on the same device as + // the recording stream's device. + void createEvent( + hipEvent_t* hip_event, + const EventFlag flag) const { + // Maps PyTorch's Event::Flag to HIP flag + auto hip_flag = hipEventDefault; + switch (flag) { + case EventFlag::PYTORCH_DEFAULT: + case EventFlag::HIP_EVENT_DISABLE_TIMING: + hip_flag = hipEventDisableTiming; + break; + case EventFlag::BACKEND_DEFAULT: + case EventFlag::HIP_EVENT_DEFAULT: + hip_flag = hipEventDefault; + break; + default: + TORCH_CHECK(false, "HIP event received unknown flag"); + } + + C10_HIP_CHECK(hipEventCreateWithFlags(hip_event, hip_flag)); + } + + void destroyEvent( + void* event, + const DeviceIndex device_index) const noexcept override { + if (!event) return; + auto hip_event = static_cast(event); + int orig_device; + C10_HIP_CHECK_WARN(hipGetDevice(&orig_device)); + C10_HIP_CHECK_WARN(hipSetDevice(device_index)); + C10_HIP_CHECK_WARN(hipEventDestroy(hip_event)); + C10_HIP_CHECK_WARN(hipSetDevice(orig_device)); + } + + void record(void** event, + const Stream& stream, + const DeviceIndex device_index, + const EventFlag flag) const override { + TORCH_CHECK(device_index == -1 || device_index == stream.device_index(), + "Event device index ", + device_index, + " does not match recording stream's device index ", + stream.device_index(), + "."); + + hipEvent_t hip_event = static_cast(*event); + HIPStreamMasqueradingAsCUDA hip_stream{stream}; + + // Moves to stream's device to record + const auto orig_device = getDevice(); + setDevice(stream.device()); + + // Creates the event (lazily) + if (!hip_event) createEvent(&hip_event, flag); + C10_HIP_CHECK(hipEventRecord(hip_event, hip_stream)); + // Makes the void* point to the (possibly just allocated) HIP event + *event = hip_event; + + // Resets device + setDevice(orig_device); + } + + void block( + void* event, + const Stream& stream) const override { + if (!event) return; + hipEvent_t hip_event = static_cast(event); + HIPStreamMasqueradingAsCUDA hip_stream{stream}; + const auto orig_device = getDevice(); + setDevice(stream.device()); + C10_HIP_CHECK(hipStreamWaitEvent( + hip_stream, + hip_event, + /*flags (must be zero)=*/ 0)); + setDevice(orig_device); + } + + bool queryEvent(void* event) const override { + if (!event) return true; + hipEvent_t hip_event = static_cast(event); + const hipError_t err = hipEventQuery(hip_event); + if (err != hipErrorNotReady) C10_HIP_CHECK(err); + else { + // ignore and clear the error if not ready + hipGetLastError(); + } + return (err == hipSuccess); + } + + // Stream-related functions + bool queryStream(const Stream& stream) const override { + HIPStreamMasqueradingAsCUDA hip_stream{stream}; + return hip_stream.query(); + } + + void synchronizeStream(const Stream& stream) const override { + HIPStreamMasqueradingAsCUDA hip_stream{stream}; + hip_stream.synchronize(); + } + + void recordDataPtrOnStream( + const c10::DataPtr& data_ptr, + const Stream& stream) const override { + HIPStreamMasqueradingAsCUDA hip_stream{stream}; + HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA(data_ptr, hip_stream); + } +}; + +// All of the guards which have HIPGuardImpl burned in need to also have +// variants using HIPGuardImplMasqueradingAsCUDA. + +/// This code is all a direct copy from c10/cuda/HIPGuardMasqueradingAsCUDA.h, but with +/// the correct InlineDeviceGuard burned in. Sorry about the +/// copy-pasting. + +struct HIPGuardMasqueradingAsCUDA { + explicit HIPGuardMasqueradingAsCUDA() = delete; + explicit HIPGuardMasqueradingAsCUDA(DeviceIndex device_index) : guard_(device_index) {} + explicit HIPGuardMasqueradingAsCUDA(Device device) : guard_(device) {} + + HIPGuardMasqueradingAsCUDA(const HIPGuardMasqueradingAsCUDA&) = delete; + HIPGuardMasqueradingAsCUDA& operator=(const HIPGuardMasqueradingAsCUDA&) = delete; + HIPGuardMasqueradingAsCUDA(HIPGuardMasqueradingAsCUDA&& other) = delete; + HIPGuardMasqueradingAsCUDA& operator=(HIPGuardMasqueradingAsCUDA&& other) = delete; + + void set_device(Device device) { guard_.set_device(device); } + void reset_device(Device device) { guard_.reset_device(device); } + void set_index(DeviceIndex device_index) { guard_.set_index(device_index); } + Device original_device() const { return guard_.original_device(); } + Device current_device() const { return guard_.current_device(); } + + private: + c10::impl::InlineDeviceGuard guard_; +}; + +struct OptionalHIPGuardMasqueradingAsCUDA { + explicit OptionalHIPGuardMasqueradingAsCUDA() : guard_() {} + explicit OptionalHIPGuardMasqueradingAsCUDA(optional device_opt) : guard_(device_opt) {} + explicit OptionalHIPGuardMasqueradingAsCUDA(optional device_index_opt) : guard_(device_index_opt) {} + + OptionalHIPGuardMasqueradingAsCUDA(const OptionalHIPGuardMasqueradingAsCUDA&) = delete; + OptionalHIPGuardMasqueradingAsCUDA& operator=(const OptionalHIPGuardMasqueradingAsCUDA&) = delete; + OptionalHIPGuardMasqueradingAsCUDA(OptionalHIPGuardMasqueradingAsCUDA&& other) = delete; + OptionalHIPGuardMasqueradingAsCUDA& operator=(OptionalHIPGuardMasqueradingAsCUDA&& other) = delete; + + void set_device(Device device) { guard_.set_device(device); } + void reset_device(Device device) { guard_.reset_device(device); } + void set_index(DeviceIndex device_index) { guard_.set_index(device_index); } + optional original_device() const { return guard_.original_device(); } + optional current_device() const { return guard_.current_device(); } + void reset() { guard_.reset(); } + +private: + c10::impl::InlineOptionalDeviceGuard guard_; +}; + +struct HIPStreamGuardMasqueradingAsCUDA { + explicit HIPStreamGuardMasqueradingAsCUDA() = delete; + explicit HIPStreamGuardMasqueradingAsCUDA(Stream stream) : guard_(stream) {} + HIPStreamGuardMasqueradingAsCUDA(const HIPStreamGuardMasqueradingAsCUDA&) = delete; + HIPStreamGuardMasqueradingAsCUDA& operator=(const HIPStreamGuardMasqueradingAsCUDA&) = delete; + HIPStreamGuardMasqueradingAsCUDA(HIPStreamGuardMasqueradingAsCUDA&& other) = delete; + HIPStreamGuardMasqueradingAsCUDA& operator=(HIPStreamGuardMasqueradingAsCUDA&& other) = delete; + + void reset_stream(Stream stream) { guard_.reset_stream(stream); } + + HIPStreamMasqueradingAsCUDA original_stream() const { + return HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, guard_.original_stream()); + } + HIPStreamMasqueradingAsCUDA current_stream() const { + return HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, guard_.current_stream()); + } + + Device current_device() const { return guard_.current_device(); } + Device original_device() const { return guard_.original_device(); } + +private: + c10::impl::InlineStreamGuard guard_; +}; + +struct OptionalHIPStreamGuardMasqueradingAsCUDA { + explicit OptionalHIPStreamGuardMasqueradingAsCUDA() : guard_() {} + explicit OptionalHIPStreamGuardMasqueradingAsCUDA(Stream stream) : guard_(stream) {} + explicit OptionalHIPStreamGuardMasqueradingAsCUDA(optional stream_opt) : guard_(stream_opt) {} + + OptionalHIPStreamGuardMasqueradingAsCUDA(const OptionalHIPStreamGuardMasqueradingAsCUDA&) = delete; + OptionalHIPStreamGuardMasqueradingAsCUDA& operator=(const OptionalHIPStreamGuardMasqueradingAsCUDA&) = delete; + OptionalHIPStreamGuardMasqueradingAsCUDA(OptionalHIPStreamGuardMasqueradingAsCUDA&& other) = delete; + OptionalHIPStreamGuardMasqueradingAsCUDA& operator=(OptionalHIPStreamGuardMasqueradingAsCUDA&& other) = delete; + + void reset_stream(Stream stream) { guard_.reset_stream(stream); } + + optional original_stream() const { + auto r = guard_.original_stream(); + if (r.has_value()) { + return make_optional(HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, r.value())); + } else { + return nullopt; + } + } + + optional current_stream() const { + auto r = guard_.current_stream(); + if (r.has_value()) { + return make_optional(HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, r.value())); + } else { + return nullopt; + } + } + + void reset() { guard_.reset(); } + +private: + c10::impl::InlineOptionalStreamGuard guard_; +}; + +struct HIPMultiStreamGuardMasqueradingAsCUDA { + explicit HIPMultiStreamGuardMasqueradingAsCUDA(ArrayRef streams) + : guard_(unwrapStreams(streams)) {} + + HIPMultiStreamGuardMasqueradingAsCUDA(const HIPMultiStreamGuardMasqueradingAsCUDA&) = delete; + HIPMultiStreamGuardMasqueradingAsCUDA& operator=(const HIPMultiStreamGuardMasqueradingAsCUDA&) = delete; + HIPMultiStreamGuardMasqueradingAsCUDA(HIPMultiStreamGuardMasqueradingAsCUDA&& other) = delete; + HIPMultiStreamGuardMasqueradingAsCUDA& operator=(HIPMultiStreamGuardMasqueradingAsCUDA&& other) = delete; + +private: + c10::impl::InlineMultiStreamGuard guard_; + + static std::vector unwrapStreams(ArrayRef hipStreams) { + std::vector streams; + streams.reserve(hipStreams.size()); + for (const HIPStreamMasqueradingAsCUDA& hipStream : hipStreams) { + streams.push_back(hipStream); + } + return streams; + } +}; + +}} // namespace c10::hip diff --git a/voice_bridge/torch/include/ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h b/voice_bridge/torch/include/ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h new file mode 100644 index 0000000000000000000000000000000000000000..417943da3777d53e5e7615c6b2e2c3c71ae1a3ec --- /dev/null +++ b/voice_bridge/torch/include/ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h @@ -0,0 +1,124 @@ +#pragma once + +#include + +// Use of c10::hip namespace here makes hipification easier, because +// I don't have to also fix namespaces. Sorry! +namespace c10 { namespace hip { + +// See Note [Masquerading as CUDA] for motivation + +class HIPStreamMasqueradingAsCUDA { +public: + + enum Unchecked { UNCHECKED }; + + explicit HIPStreamMasqueradingAsCUDA(Stream stream) + : HIPStreamMasqueradingAsCUDA(UNCHECKED, stream) { + // We did the coercion unchecked; check that it was right. + TORCH_CHECK(stream.device().is_cuda() /* !!! */); + } + + explicit HIPStreamMasqueradingAsCUDA(Unchecked, Stream stream) + // Unsafely coerce the "CUDA" stream into a HIP stream + : stream_( + HIPStream( + Stream( + Stream::UNSAFE, + Device(DeviceType::HIP, stream.device_index()), + stream.id()) + ) + ) {} + + // New constructor, just for this. Does NOT coerce. + explicit HIPStreamMasqueradingAsCUDA(HIPStream stream) : stream_(stream) {} + + bool operator==(const HIPStreamMasqueradingAsCUDA& other) const noexcept { + return stream_ == other.stream_; + } + + bool operator!=(const HIPStreamMasqueradingAsCUDA& other) const noexcept { + return stream_ != other.stream_; + } + + operator hipStream_t() const { return stream_.stream(); } + + operator Stream() const { + // Unsafely coerce HIP stream into a "CUDA" stream + return Stream(Stream::UNSAFE, device(), id()); + } + + DeviceIndex device_index() const { return stream_.device_index(); } + + Device device() const { + // Unsafely coerce HIP device into CUDA device + return Device(DeviceType::CUDA, stream_.device_index()); + } + + StreamId id() const { return stream_.id(); } + bool query() const { return stream_.query(); } + void synchronize() const { stream_.synchronize(); } + int priority() const { return stream_.priority(); } + hipStream_t stream() const { return stream_.stream(); } + + Stream unwrap() const { + // Unsafely coerce HIP stream into "CUDA" stream + return Stream(Stream::UNSAFE, device(), id()); + } + + uint64_t pack() const noexcept { + // Unsafely coerce HIP stream into "CUDA" stream before packing + return unwrap().pack(); + } + + static HIPStreamMasqueradingAsCUDA unpack(uint64_t bits) { + // NB: constructor manages CUDA->HIP translation for us + return HIPStreamMasqueradingAsCUDA(Stream::unpack(bits)); + } + + static std::tuple priority_range() { return HIPStream::priority_range(); } + + // New method, gets the underlying HIPStream + HIPStream hip_stream() const { return stream_; } + +private: + HIPStream stream_; +}; + +HIPStreamMasqueradingAsCUDA +inline getStreamFromPoolMasqueradingAsCUDA(const bool isHighPriority = false, DeviceIndex device = -1) { + return HIPStreamMasqueradingAsCUDA(getStreamFromPool(isHighPriority, device)); +} + +HIPStreamMasqueradingAsCUDA +inline getStreamFromExternalMasqueradingAsCUDA(hipStream_t ext_stream, DeviceIndex device) { + return HIPStreamMasqueradingAsCUDA(getStreamFromExternal(ext_stream, device)); +} + +inline HIPStreamMasqueradingAsCUDA getDefaultHIPStreamMasqueradingAsCUDA(DeviceIndex device_index = -1) { + return HIPStreamMasqueradingAsCUDA(getDefaultHIPStream(device_index)); +} + +inline HIPStreamMasqueradingAsCUDA getCurrentHIPStreamMasqueradingAsCUDA(DeviceIndex device_index = -1) { + return HIPStreamMasqueradingAsCUDA(getCurrentHIPStream(device_index)); +} + +inline void setCurrentHIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA stream) { + setCurrentHIPStream(stream.hip_stream()); +} + +inline std::ostream& operator<<(std::ostream& stream, const HIPStreamMasqueradingAsCUDA& s) { + stream << s.hip_stream() << " (masquerading as CUDA)"; + return stream; +} + +}} // namespace c10::hip + +namespace std { + template <> + struct hash { + size_t operator()(c10::hip::HIPStreamMasqueradingAsCUDA s) const noexcept { + return std::hash{}(s.unwrap()); + } + }; +} // namespace std diff --git a/voice_bridge/torch/include/ATen/jit_macros.h b/voice_bridge/torch/include/ATen/jit_macros.h new file mode 100644 index 0000000000000000000000000000000000000000..9af826549021a0853beb83c74b6ac695728ab054 --- /dev/null +++ b/voice_bridge/torch/include/ATen/jit_macros.h @@ -0,0 +1,7 @@ +#pragma once +#include +#include + +// AT_USE_JITERATOR(), controls whether we jit some elementwise kernels +#define AT_USE_JITERATOR() true +#define jiterator_stringify(...) std::string(#__VA_ARGS__); diff --git a/voice_bridge/torch/include/ATen/jiterator_macros.h b/voice_bridge/torch/include/ATen/jiterator_macros.h new file mode 100644 index 0000000000000000000000000000000000000000..3aa4c7ebb0af07fd65012d9d531aaa140dd6c212 --- /dev/null +++ b/voice_bridge/torch/include/ATen/jiterator_macros.h @@ -0,0 +1,38 @@ +#pragma once +#include +#include + +#define JITERATOR_HOST_DEVICE C10_HOST_DEVICE +#if defined(_MSC_VER) && defined(__CUDACC__) +// NVRTC on Windows errors if __host__ __device__ attribute is +// present on kernel. +// error: attribute "__host__" does not apply here +// error: attribute "__device__" does not apply here +#define JITERATOR_HOST_DEVICE +#endif + +// jiterator_also_stringify_as macro is used to define code (for CPU/ROCm) +// and generate code string for `jiterator` (only when compiling for CUDA). +// Usage : +// jiterator_also_stringify_as( +// jiterator_code(template T identity(T x) { return x; }), +// identity_string); +// This will define the template `identity` as present in code and +// also define `std::string identity_string` with the code as the string +// if this is being compiled for CUDA. + +// `jiterator_code` macro is to deal with `,` in the kernel code. +// These `,`s confuse the preprocessor into thinking we are passing +// multiple arguments to the macro. +#define jiterator_code(...) __VA_ARGS__ +#if defined(__CUDACC__) || defined(__HIPCC__) +// CPU and CUDA and ROCm case +#define stringify_code(...) #__VA_ARGS__ +#define jiterator_also_stringify_as(code, str_name) \ + code /* define the function */ \ + const std::string str_name = std::string(stringify_code(code)); +#else +// CPU only or CPU and ROCm case +// Only needs the function +#define jiterator_also_stringify_as(code, str_name) code +#endif diff --git a/voice_bridge/torch/include/ATen/native/Activation.h b/voice_bridge/torch/include/ATen/native/Activation.h new file mode 100644 index 0000000000000000000000000000000000000000..ba2dbc0768e8f8277af026a7bb51365ff99abbc8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Activation.h @@ -0,0 +1,90 @@ +#pragma once + +#include + +namespace c10 { +class Scalar; +} + +namespace at { +struct TensorIterator; +struct TensorIteratorBase; +class TensorBase; +} + +namespace at { namespace native { + +// These constants control the approximation behavior of gelu function. +enum GeluType { + None, // Baseline Gelu + Tanh, // Tahn Gelu Approximation + END +}; + +static GeluType get_gelutype_enum(const c10::string_view approximate) { + if (approximate == "none") { + return GeluType::None; + } else if (approximate == "tanh") { + return GeluType::Tanh; + } else { + TORCH_CHECK(false, "approximate argument must be either none or tanh."); + } +} + +using structured_activation_fn = void (*)(TensorIteratorBase&); +using structured_activation_backward_fn = void (*)(TensorIteratorBase&); + +using activation_fn = void (*)(TensorIterator&); +using activation_backward_fn = void (*)(TensorIterator&); +using softplus_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&); +using softplus_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&); +using threshold_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&); +using hardtanh_backward_fn = void (*)(TensorIterator&, const c10::Scalar&, const c10::Scalar&); +using hardsigmoid_fn = void(*)(TensorIteratorBase&); +using hardsigmoid_backward_fn = void(*)(TensorIteratorBase&); +using hardswish_fn = void(*)(TensorIterator&); +using hardswish_backward_fn = void(*)(TensorIterator&); +using shrink_fn = void (*)(TensorIteratorBase&, const c10::Scalar&); +using softshrink_fn = void (*)(TensorIteratorBase&, const c10::Scalar&); +using shrink_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&); +using elu_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&, const c10::Scalar&); +using elu_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&, const c10::Scalar&, bool); +using leaky_relu_fn = void (*)(TensorIteratorBase&, const c10::Scalar&); +using leaky_relu_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&); +using log_sigmoid_cpu_fn = void (*)(TensorBase&, TensorBase&, const TensorBase&); +using gelu_fn = void (*)(TensorIteratorBase&, GeluType); +using gelu_backward_fn = void (*)(TensorIteratorBase&, GeluType); +using glu_jvp_fn = void (*)(TensorIteratorBase&); + +DECLARE_DISPATCH(elu_fn, elu_stub); +DECLARE_DISPATCH(elu_backward_fn, elu_backward_stub); +DECLARE_DISPATCH(softplus_fn, softplus_stub); +DECLARE_DISPATCH(softplus_backward_fn, softplus_backward_stub); +DECLARE_DISPATCH(log_sigmoid_cpu_fn, log_sigmoid_cpu_stub); +DECLARE_DISPATCH(activation_backward_fn, log_sigmoid_backward_stub); +DECLARE_DISPATCH(threshold_fn, threshold_stub); +DECLARE_DISPATCH(gelu_fn, GeluKernel); +DECLARE_DISPATCH(gelu_backward_fn, GeluBackwardKernel); +DECLARE_DISPATCH(hardtanh_backward_fn, hardtanh_backward_stub); +DECLARE_DISPATCH(hardsigmoid_fn, hardsigmoid_stub); +DECLARE_DISPATCH(hardsigmoid_backward_fn, hardsigmoid_backward_stub); +DECLARE_DISPATCH(hardswish_fn, hardswish_stub); +DECLARE_DISPATCH(hardswish_backward_fn, hardswish_backward_stub); +DECLARE_DISPATCH(shrink_fn, hardshrink_stub); +DECLARE_DISPATCH(softshrink_fn, softshrink_stub); +DECLARE_DISPATCH(shrink_backward_fn, shrink_backward_stub); +DECLARE_DISPATCH(leaky_relu_fn, leaky_relu_stub); +DECLARE_DISPATCH(leaky_relu_backward_fn, leaky_relu_backward_stub); +DECLARE_DISPATCH(structured_activation_fn, glu_stub); +DECLARE_DISPATCH(activation_backward_fn, glu_backward_stub); +DECLARE_DISPATCH(glu_jvp_fn, glu_jvp_stub); +DECLARE_DISPATCH(structured_activation_fn, silu_stub); +DECLARE_DISPATCH(structured_activation_backward_fn, silu_backward_stub); +DECLARE_DISPATCH(structured_activation_fn, mish_stub); +DECLARE_DISPATCH(activation_backward_fn, mish_backward_stub); +DECLARE_DISPATCH(activation_fn, prelu_cpu_stub); +DECLARE_DISPATCH(activation_backward_fn, prelu_backward_cpu_stub); + +} // namespace native + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/AdaptivePooling.h b/voice_bridge/torch/include/ATen/native/AdaptivePooling.h new file mode 100644 index 0000000000000000000000000000000000000000..f93bb87a49374cd39a7598824f40b0c43058ecc2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/AdaptivePooling.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include + +namespace at { +class Tensor; + +namespace native { + +using adaptive_avg_pooling_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size); +using adaptive_avg_pooling_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output); +DECLARE_DISPATCH(adaptive_avg_pooling_fn, adaptive_avg_pool2d_kernel); +DECLARE_DISPATCH(adaptive_avg_pooling_backward_fn, adaptive_avg_pool2d_backward_kernel); + +using adaptive_max_pooling_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size); +using adaptive_max_pooling_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices); +DECLARE_DISPATCH(adaptive_max_pooling_fn, adaptive_max_pool2d_kernel); +DECLARE_DISPATCH(adaptive_max_pooling_backward_fn, adaptive_max_pool2d_backward_kernel); + +static inline int64_t start_index(int64_t a, int64_t b, int64_t c) { + return (a / b) * c + ((a % b) * c) / b; +} + +static inline int64_t end_index(int64_t a, int64_t b, int64_t c) { + return 1 + ((a + 1) * c - 1) / b; +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/BatchLinearAlgebra.h b/voice_bridge/torch/include/ATen/native/BatchLinearAlgebra.h new file mode 100644 index 0000000000000000000000000000000000000000..955b83b3855a20445ad6737a574afe082a155ba8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/BatchLinearAlgebra.h @@ -0,0 +1,320 @@ +#pragma once + +#include +#include +#include + +// Forward declare TI +namespace at { +class Tensor; +struct TensorIterator; + +namespace native { +enum class TransposeType; +} + +} + +namespace at { namespace native { + +enum class LapackLstsqDriverType : int64_t { Gels, Gelsd, Gelsy, Gelss}; + +#if AT_BUILD_WITH_LAPACK() +// Define per-batch functions to be used in the implementation of batched +// linear algebra operations + +template +void lapackCholesky(char uplo, int n, scalar_t *a, int lda, int *info); + +template +void lapackCholeskyInverse(char uplo, int n, scalar_t *a, int lda, int *info); + +template +void lapackEig(char jobvl, char jobvr, int n, scalar_t *a, int lda, scalar_t *w, scalar_t* vl, int ldvl, scalar_t *vr, int ldvr, scalar_t *work, int lwork, value_t *rwork, int *info); + +template +void lapackGeqrf(int m, int n, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info); + +template +void lapackOrgqr(int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info); + +template +void lapackOrmqr(char side, char trans, int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *c, int ldc, scalar_t *work, int lwork, int *info); + +template +void lapackSyevd(char jobz, char uplo, int n, scalar_t* a, int lda, value_t* w, scalar_t* work, int lwork, value_t* rwork, int lrwork, int* iwork, int liwork, int* info); + +template +void lapackGels(char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info); + +template +void lapackGelsd(int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + value_t *s, value_t rcond, int *rank, + scalar_t* work, int lwork, + value_t *rwork, int* iwork, int *info); + +template +void lapackGelsy(int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + int *jpvt, value_t rcond, int *rank, + scalar_t *work, int lwork, value_t* rwork, int *info); + +template +void lapackGelss(int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + value_t *s, value_t rcond, int *rank, + scalar_t *work, int lwork, + value_t *rwork, int *info); + +template +struct lapackLstsq_impl; + +template +struct lapackLstsq_impl { + static void call( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackGels( + trans, m, n, nrhs, + a, lda, b, ldb, + work, lwork, info); + } +}; + +template +struct lapackLstsq_impl { + static void call( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackGelsy( + m, n, nrhs, + a, lda, b, ldb, + jpvt, rcond, rank, + work, lwork, rwork, info); + } +}; + +template +struct lapackLstsq_impl { + static void call( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackGelsd( + m, n, nrhs, + a, lda, b, ldb, + s, rcond, rank, + work, lwork, + rwork, iwork, info); + } +}; + +template +struct lapackLstsq_impl { + static void call( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackGelss( + m, n, nrhs, + a, lda, b, ldb, + s, rcond, rank, + work, lwork, + rwork, info); + } +}; + +template +void lapackLstsq( + char trans, int m, int n, int nrhs, + scalar_t *a, int lda, scalar_t *b, int ldb, + scalar_t *work, int lwork, int *info, // Gels flavor + int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor + value_t *s, // Gelss flavor + int *iwork // Gelsd flavor + ) { + lapackLstsq_impl::call( + trans, m, n, nrhs, + a, lda, b, ldb, + work, lwork, info, + jpvt, rcond, rank, rwork, + s, + iwork); +} + +template +void lapackLuSolve(char trans, int n, int nrhs, scalar_t *a, int lda, int *ipiv, scalar_t *b, int ldb, int *info); + +template +void lapackLu(int m, int n, scalar_t *a, int lda, int *ipiv, int *info); + +template +void lapackLdlHermitian( + char uplo, + int n, + scalar_t* a, + int lda, + int* ipiv, + scalar_t* work, + int lwork, + int* info); + +template +void lapackLdlSymmetric( + char uplo, + int n, + scalar_t* a, + int lda, + int* ipiv, + scalar_t* work, + int lwork, + int* info); + +template +void lapackLdlSolveHermitian( + char uplo, + int n, + int nrhs, + scalar_t* a, + int lda, + int* ipiv, + scalar_t* b, + int ldb, + int* info); + +template +void lapackLdlSolveSymmetric( + char uplo, + int n, + int nrhs, + scalar_t* a, + int lda, + int* ipiv, + scalar_t* b, + int ldb, + int* info); + +template +void lapackSvd(char jobz, int m, int n, scalar_t *a, int lda, value_t *s, scalar_t *u, int ldu, scalar_t *vt, int ldvt, scalar_t *work, int lwork, value_t *rwork, int *iwork, int *info); +#endif + +#if AT_BUILD_WITH_BLAS() +template +void blasTriangularSolve(char side, char uplo, char trans, char diag, int n, int nrhs, scalar_t* a, int lda, scalar_t* b, int ldb); +#endif + +using cholesky_fn = void (*)(const Tensor& /*input*/, const Tensor& /*info*/, bool /*upper*/); +DECLARE_DISPATCH(cholesky_fn, cholesky_stub); + +using cholesky_inverse_fn = Tensor& (*)(Tensor& /*result*/, Tensor& /*infos*/, bool /*upper*/); + +DECLARE_DISPATCH(cholesky_inverse_fn, cholesky_inverse_stub); + +using linalg_eig_fn = void (*)(Tensor& /*eigenvalues*/, Tensor& /*eigenvectors*/, Tensor& /*infos*/, const Tensor& /*input*/, bool /*compute_eigenvectors*/); + +DECLARE_DISPATCH(linalg_eig_fn, linalg_eig_stub); + +using geqrf_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/); +DECLARE_DISPATCH(geqrf_fn, geqrf_stub); + +using orgqr_fn = Tensor& (*)(Tensor& /*result*/, const Tensor& /*tau*/); +DECLARE_DISPATCH(orgqr_fn, orgqr_stub); + +using ormqr_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/, const Tensor& /*other*/, bool /*left*/, bool /*transpose*/); +DECLARE_DISPATCH(ormqr_fn, ormqr_stub); + +using linalg_eigh_fn = void (*)( + const Tensor& /*eigenvalues*/, + const Tensor& /*eigenvectors*/, + const Tensor& /*infos*/, + bool /*upper*/, + bool /*compute_eigenvectors*/); +DECLARE_DISPATCH(linalg_eigh_fn, linalg_eigh_stub); + +using lstsq_fn = void (*)( + const Tensor& /*a*/, + Tensor& /*b*/, + Tensor& /*rank*/, + Tensor& /*singular_values*/, + Tensor& /*infos*/, + double /*rcond*/, + std::string /*driver_name*/); +DECLARE_DISPATCH(lstsq_fn, lstsq_stub); + +using triangular_solve_fn = void (*)( + const Tensor& /*A*/, + const Tensor& /*B*/, + bool /*left*/, + bool /*upper*/, + TransposeType /*transpose*/, + bool /*unitriangular*/); +DECLARE_DISPATCH(triangular_solve_fn, triangular_solve_stub); + +using lu_factor_fn = void (*)( + const Tensor& /*input*/, + const Tensor& /*pivots*/, + const Tensor& /*infos*/, + bool /*compute_pivots*/); +DECLARE_DISPATCH(lu_factor_fn, lu_factor_stub); + +using unpack_pivots_fn = void(*)( + TensorIterator& iter, + const int64_t dim_size, + const int64_t max_pivot); +DECLARE_DISPATCH(unpack_pivots_fn, unpack_pivots_stub); + +using lu_solve_fn = void (*)( + const Tensor& /*LU*/, + const Tensor& /*pivots*/, + const Tensor& /*B*/, + TransposeType /*trans*/); +DECLARE_DISPATCH(lu_solve_fn, lu_solve_stub); + +using ldl_factor_fn = void (*)( + const Tensor& /*LD*/, + const Tensor& /*pivots*/, + const Tensor& /*info*/, + bool /*upper*/, + bool /*hermitian*/); +DECLARE_DISPATCH(ldl_factor_fn, ldl_factor_stub); + +using svd_fn = void (*)( + const Tensor& /*A*/, + const bool /*full_matrices*/, + const bool /*compute_uv*/, + const c10::optional& /*driver*/, + const Tensor& /*U*/, + const Tensor& /*S*/, + const Tensor& /*Vh*/, + const Tensor& /*info*/); +DECLARE_DISPATCH(svd_fn, svd_stub); + +using ldl_solve_fn = void (*)( + const Tensor& /*LD*/, + const Tensor& /*pivots*/, + const Tensor& /*result*/, + bool /*upper*/, + bool /*hermitian*/); +DECLARE_DISPATCH(ldl_solve_fn, ldl_solve_stub); +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/BinaryOps.h b/voice_bridge/torch/include/ATen/native/BinaryOps.h new file mode 100644 index 0000000000000000000000000000000000000000..de1c9c8c0bbfe8083a4a7dee0610dcb941eae93a --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/BinaryOps.h @@ -0,0 +1,117 @@ +#pragma once + +#include +#include +#include + +namespace at { +struct TensorIterator; +struct TensorIteratorBase; +} + +namespace at { namespace native { + +inline void alpha_check(const ScalarType dtype, const Scalar& alpha) { + TORCH_CHECK(! alpha.isBoolean() || dtype == ScalarType::Bool, + "Boolean alpha only supported for Boolean results."); + TORCH_CHECK(isFloatingType(dtype) || isComplexType(dtype) + || alpha.isIntegral(true), + "For integral input tensors, argument alpha must not be a floating point number."); + TORCH_CHECK(isComplexType(dtype) || !alpha.isComplex(), + "For non-complex input tensors, argument alpha must not be a complex number.") +} + +// Basic checking for all sub functions. +inline void sub_check(const TensorBase& self, const TensorBase& other) { + TORCH_CHECK(self.scalar_type() != kBool || other.scalar_type() != kBool, + "Subtraction, the `-` operator, with two bool tensors is not supported. " + "Use the `^` or `logical_xor()` operator instead.") + TORCH_CHECK(self.scalar_type() != kBool && other.scalar_type() != kBool, + "Subtraction, the `-` operator, with a bool tensor is not supported. " + "If you are trying to invert a mask, use the `~` or `logical_not()` operator instead."); +} + +inline void sub_check(const TensorBase& self, const Scalar& scalar) { + TORCH_CHECK(self.scalar_type() != kBool || !scalar.isBoolean(), + "Subtraction, the `-` operator, with two bool tensors is not supported. " + "Use the `^` or `logical_xor()` operator instead.") + TORCH_CHECK(self.scalar_type() != kBool && !scalar.isBoolean(), + "Subtraction, the `-` operator, with a bool tensor is not supported. " + "If you are trying to invert a mask, use the `~` or `logical_not()` operator instead."); +} + +using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha); +using structured_binary_fn_double = void(*)(TensorIteratorBase&, double); +using structured_binary_fn = void(*)(TensorIteratorBase&); + +using binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha); +using binary_fn_double = void(*)(TensorIterator&, double); +using binary_fn = void(*)(TensorIterator&); +using binary_clamp_fn_alpha = + void(*)(TensorIterator&, const Scalar& alpha, const Scalar& min_val, const Scalar& max_val); + +// NB: codegenned +DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub); + +DECLARE_DISPATCH(binary_clamp_fn_alpha, add_clamp_stub); +DECLARE_DISPATCH(structured_binary_fn_alpha, sub_stub); +DECLARE_DISPATCH(structured_binary_fn, mul_stub); +DECLARE_DISPATCH(structured_binary_fn, div_true_stub); +DECLARE_DISPATCH(structured_binary_fn, div_floor_stub); +DECLARE_DISPATCH(structured_binary_fn, div_trunc_stub); +DECLARE_DISPATCH(structured_binary_fn, atan2_stub); +DECLARE_DISPATCH(structured_binary_fn, remainder_stub); +DECLARE_DISPATCH(structured_binary_fn, bitwise_and_stub); +DECLARE_DISPATCH(structured_binary_fn, bitwise_or_stub); +DECLARE_DISPATCH(structured_binary_fn, bitwise_xor_stub); +DECLARE_DISPATCH(structured_binary_fn, lshift_stub); +DECLARE_DISPATCH(structured_binary_fn, rshift_stub); +DECLARE_DISPATCH(binary_fn, logical_xor_stub); +DECLARE_DISPATCH(binary_fn, logical_and_stub); +DECLARE_DISPATCH(binary_fn, logical_or_stub); +DECLARE_DISPATCH(structured_binary_fn, lt_stub); +DECLARE_DISPATCH(structured_binary_fn, le_stub); +DECLARE_DISPATCH(structured_binary_fn, gt_stub); +DECLARE_DISPATCH(structured_binary_fn, ge_stub); +DECLARE_DISPATCH(structured_binary_fn, eq_stub); +DECLARE_DISPATCH(structured_binary_fn, ne_stub); +DECLARE_DISPATCH(binary_fn, max_elementwise_stub); +DECLARE_DISPATCH(binary_fn, min_elementwise_stub); +DECLARE_DISPATCH(structured_binary_fn, maximum_stub); +DECLARE_DISPATCH(structured_binary_fn, minimum_stub); +DECLARE_DISPATCH(structured_binary_fn, fmax_stub); +DECLARE_DISPATCH(structured_binary_fn, fmin_stub); +DECLARE_DISPATCH(structured_binary_fn_double, smooth_l1_stub); +DECLARE_DISPATCH(binary_fn_double, huber_stub); +DECLARE_DISPATCH(structured_binary_fn, sigmoid_backward_stub); +DECLARE_DISPATCH(binary_fn_alpha, logit_backward_stub); +DECLARE_DISPATCH(structured_binary_fn, tanh_backward_stub); +DECLARE_DISPATCH(structured_binary_fn, mse_stub); +DECLARE_DISPATCH(structured_binary_fn, fmod_stub); +DECLARE_DISPATCH(structured_binary_fn, logaddexp_stub); +DECLARE_DISPATCH(structured_binary_fn, logaddexp2_stub); +DECLARE_DISPATCH(structured_binary_fn, gcd_stub); +DECLARE_DISPATCH(structured_binary_fn, lcm_stub); +DECLARE_DISPATCH(structured_binary_fn, hypot_stub); +DECLARE_DISPATCH(structured_binary_fn, igamma_stub); +DECLARE_DISPATCH(structured_binary_fn, igammac_stub); +DECLARE_DISPATCH(structured_binary_fn, nextafter_stub); +DECLARE_DISPATCH(structured_binary_fn, heaviside_stub); +DECLARE_DISPATCH(structured_binary_fn, copysign_stub); +DECLARE_DISPATCH(structured_binary_fn, xlogy_stub); +DECLARE_DISPATCH(structured_binary_fn, xlog1py_stub); +DECLARE_DISPATCH(structured_binary_fn, zeta_stub); +DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_t_stub); +DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_u_stub); +DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_v_stub); +DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_w_stub); +DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_h_stub); +DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_he_stub); +DECLARE_DISPATCH(structured_binary_fn, laguerre_polynomial_l_stub); +DECLARE_DISPATCH(structured_binary_fn, legendre_polynomial_p_stub); +DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_t_stub); +DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_u_stub); +DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_v_stub); +DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_w_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/BucketizationUtils.h b/voice_bridge/torch/include/ATen/native/BucketizationUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..e23fa126780779609b5489e069a23589d13d6cc6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/BucketizationUtils.h @@ -0,0 +1,167 @@ +#pragma once + +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at { +namespace native { + +// original values given by raw_*. If an original value is not contiguous, will make a contiguous copy to +// the corresponding trimmed_* value. Additionally, if the dtypes of the boundary and input tensor do not +// match, will change them to be a common super type so comparisons are done between the same types. +// For any trimmed_* tensor, if its outgoing value matches what it was incoming (typically null), then the +// corresponding raw_* version should be used since it was already contiguous of the right type. +inline void searchsorted_maybe_trim_input_tensors( + Tensor& trimmed_input, + Tensor& trimmed_boundaries, + Tensor& trimmed_sorter, + const Tensor& raw_input, + const Tensor& raw_boundaries, + const Tensor& raw_sorter) { + bool in_is_contiguous = raw_input.is_contiguous(); + bool bd_is_contiguous = raw_boundaries.is_contiguous(); + bool sort_is_contiguous = raw_sorter.is_contiguous(); + + if (!in_is_contiguous) { + TORCH_WARN_ONCE("torch.searchsorted(): input value tensor is non-contiguous, this will lower the performance due " + "to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous input value " + "tensor if possible. This message will only appear once per program."); + trimmed_input = raw_input.contiguous(); + } + if (!bd_is_contiguous) { + TORCH_WARN_ONCE("torch.searchsorted(): boundary tensor is non-contiguous, this will lower the performance due " + "to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous boundary " + "tensor if possible. This message will only appear once per program."); + trimmed_boundaries = raw_boundaries.contiguous(); + } + if (!sort_is_contiguous) { + TORCH_WARN_ONCE("torch.searchsorted(): sorter tensor is non-contiguous, this will lower the performance due " + "to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous sorter " + "tensor if possible. This message will only appear once per program."); + trimmed_sorter = raw_sorter.contiguous(); + } + if (raw_input.dtype() != raw_boundaries.dtype()) { + at::native::ResultTypeState state = {}; + state = at::native::update_result_type_state(raw_boundaries, state); + state = at::native::update_result_type_state(raw_input, state); + ScalarType common_stype = at::native::result_type(state); + + TORCH_INTERNAL_ASSERT(common_stype != ScalarType::Undefined); + if (common_stype != raw_input.scalar_type()) { + trimmed_input = in_is_contiguous ? raw_input.to(common_stype) : trimmed_input.to(common_stype); + } + if (common_stype != raw_boundaries.scalar_type()) { + trimmed_boundaries = bd_is_contiguous ? raw_boundaries.to(common_stype) : trimmed_boundaries.to(common_stype); + } + } +} + +/* unused but needed for internal jagged tensor class */ +inline void searchsorted_maybe_trim_input_tensors( + Tensor& trimmed_input, + Tensor& trimmed_boundaries, + const Tensor& raw_input, + const Tensor& raw_boundaries) { + Tensor trimmed_sorter; + Tensor raw_sorter; + return searchsorted_maybe_trim_input_tensors( + trimmed_input, + trimmed_boundaries, + trimmed_sorter, + raw_input, + raw_boundaries, + raw_sorter); +} + +inline bool searchsorted_dims_matched_before_last_dim(const Tensor& boundaries, const Tensor& input) { + if (boundaries.dim() != input.dim()) { + return false; + } + const auto& dims_bd = boundaries.sizes(); + const auto& dims_in = input.sizes(); + for (int64_t dim = 0; dim + 1 < boundaries.dim(); ++dim) { + if (dims_bd[dim] != dims_in[dim]) { + return false; + } + } + return true; +} + +inline Tensor searchsorted_scalar_tensor(const Scalar& scalar, const c10::Device& device) { + auto tensor = c10::scalar_to_tensor(scalar, device); + // This is to adopt the scalar promotion rules defined in native/TypeProperties.h + // So we have the same type promotion rules as binary operations. + tensor.unsafeGetTensorImpl()->set_wrapped_number(true); + return tensor; +} + +inline void searchsorted_pre_check( + const Tensor& boundaries, + const Tensor& input, + const Tensor& output, + const bool out_int32, + const bool right, + const c10::optional side_opt, + const Tensor& sorter) { + if (side_opt) { + const c10::string_view side = *side_opt; + TORCH_CHECK(side == "left" || side == "right", "torch.searchsorted(): side can only be 'left' or 'right' but ", + "got ", side); + + // assume the user has not explicitly set (right=False, side="right") + TORCH_CHECK(!right || side == "right", "torch.searchsorted(): side and right can't be set to opposites, got side " + "of ", side, " while right was True"); + } + + TORCH_CHECK(boundaries.device() == input.device(), "torch.searchsorted(): boundaries and input value tensors ", + "should have same device type, but got boundaries tensor device type ", boundaries.device(), " and input value ", + "tensor device type ", input.device()); + + if (sorter.defined()) { + TORCH_CHECK(sorter.device() == boundaries.device(), "torch.searchsorted(): sorter and boundary tensors should ", + "have same device type, but got sorter tensor device type ", sorter.device(), " and input value tensor ", + "device type ", boundaries.device()); + + TORCH_CHECK(sorter.sizes() == boundaries.sizes(), "torch.searchsorted(): boundary and sorter must have the same " + "size, but got boundary tensor ", boundaries.sizes(), "and got sorter tensor ", sorter.sizes()); + + TORCH_CHECK(sorter.scalar_type() == ScalarType::Long, "torch.searchsorted(): sorter must be a tensor of long ", + "dtype but got dtype ", sorter.scalar_type()); + } + + TORCH_CHECK(input.dim() > 0 || (input.dim() == 0 && input.numel() == 1 && boundaries.dim() == 1), + "torch.searchsorted(): input value can be a scalar only when boundaries tensor dimension is 1, but we got ", + "boundaries tensor dim(", boundaries.dim(), ") and input value's dim(", input.dim(), ") numel(", + input.numel(), ")"); + + TORCH_CHECK(boundaries.dim() != 0, "torch.searchsorted(): boundaries tensor should have positive dimension, but ", + "got 0 dimension"); + + TORCH_CHECK(boundaries.dim() == 1 || searchsorted_dims_matched_before_last_dim(boundaries, input), + "torch.searchsorted(): boundaries tensor should be 1 dimension or the first N-1 dimensions of boundaries tensor ", + "and input value tensor must match, but we got boundaries tensor ", boundaries.sizes(), " and input value tensor ", + input.sizes()); + + ScalarType output_dtype = output.scalar_type(); + TORCH_CHECK( + (output_dtype == ScalarType::Long && !out_int32) || + (output_dtype == ScalarType::Int && out_int32), + "torch.searchsorted(): output tensor's dtype is wrong, it can only be Int(int32) or Long(int64) depending on ", + "whether out_int32 flag is True, but we got output tensor's dtype ", output_dtype, + " and out_int32 flag is ", (out_int32 ? "True" : "False")); + + if (out_int32) { + TORCH_CHECK(boundaries.sizes().back() < INT_MAX, + "torch.searchsorted(): the size of boundaries' last dimension should be less than ", INT_MAX, ", but we got ", + boundaries.sizes().back()); + } +} + +}} diff --git a/voice_bridge/torch/include/ATen/native/CPUBlas.h b/voice_bridge/torch/include/ATen/native/CPUBlas.h new file mode 100644 index 0000000000000000000000000000000000000000..969bfe2afd946e347080740547f2a946a6534ac6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/CPUBlas.h @@ -0,0 +1,164 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +namespace cpublas { + +namespace internal { +void normalize_last_dims( + TransposeType transa, TransposeType transb, + int64_t m, int64_t n, int64_t k, + int64_t *lda, int64_t *ldb, int64_t *ldc); +} // namespace internal + +using gemm_fn = void(*)( + at::ScalarType type, + TransposeType transa, TransposeType transb, + int64_t m, int64_t n, int64_t k, + const Scalar& alpha, + const void *a, int64_t lda, + const void *b, int64_t ldb, + const Scalar& beta, + void *c, int64_t ldc); + +DECLARE_DISPATCH(gemm_fn, gemm_stub); + +template +void gemm( + TransposeType transa, TransposeType transb, + int64_t m, int64_t n, int64_t k, + at::opmath_type alpha, + const scalar_t *a, int64_t lda, + const scalar_t *b, int64_t ldb, + at::opmath_type beta, + scalar_t *c, int64_t ldc) { + internal::normalize_last_dims(transa, transb, m, n, k, &lda, &ldb, &ldc); + gemm_stub( + kCPU, c10::CppTypeToScalarType::value, + transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); +} + +void gemm( + TransposeType transa, TransposeType transb, + int64_t m, int64_t n, int64_t k, + double alpha, + const double *a, int64_t lda, + const double *b, int64_t ldb, + double beta, + double *c, int64_t ldc); + +void gemm( + TransposeType transa, TransposeType transb, + int64_t m, int64_t n, int64_t k, + float alpha, + const float *a, int64_t lda, + const float *b, int64_t ldb, + float beta, + float *c, int64_t ldc); + +void gemm( + TransposeType transa, TransposeType transb, + int64_t m, int64_t n, int64_t k, + float alpha, + const at::BFloat16 *a, int64_t lda, + const at::BFloat16 *b, int64_t ldb, + float beta, + at::BFloat16 *c, int64_t ldc); + +void gemm( + TransposeType transa, TransposeType transb, + int64_t m, int64_t n, int64_t k, + c10::complex alpha, + const c10::complex *a, int64_t lda, + const c10::complex *b, int64_t ldb, + c10::complex beta, + c10::complex *c, int64_t ldc); + +void gemm( + TransposeType transa, TransposeType transb, + int64_t m, int64_t n, int64_t k, + c10::complex alpha, + const c10::complex *a, int64_t lda, + const c10::complex *b, int64_t ldb, + c10::complex beta, + c10::complex *c, int64_t ldc); + +void gemm( + TransposeType transa, TransposeType transb, + int64_t m, int64_t n, int64_t k, + int64_t alpha, + const int64_t *a, int64_t lda, + const int64_t *b, int64_t ldb, + int64_t beta, + int64_t *c, int64_t ldc); + +template +void gemm_batched( + TransposeType transa, TransposeType transb, + int64_t batch_size, int64_t m, int64_t n, int64_t k, + scalar_t alpha, + const scalar_t * const *a, int64_t lda, + const scalar_t * const *b, int64_t ldb, + const scalar_t beta, + scalar_t * const *c, int64_t ldc); + +template +void gemm_batched_with_stride( + TransposeType transa, TransposeType transb, + int64_t batch_size, int64_t m, int64_t n, int64_t k, + scalar_t alpha, + const scalar_t *a, int64_t lda, int64_t batch_stride_a, + const scalar_t *b, int64_t ldb, int64_t batch_stride_b, + scalar_t beta, + scalar_t *c, int64_t ldc, int64_t batch_stride_c); + +using axpy_fn = void(*)(at::ScalarType type, int64_t n, const Scalar& a, const void *x, int64_t incx, void *y, int64_t incy); + +DECLARE_DISPATCH(axpy_fn, axpy_stub); + +template +void axpy(int64_t n, scalar_t a, const scalar_t *x, int64_t incx, scalar_t *y, int64_t incy){ + if(n == 1) + { + incx = 1; + incy = 1; + } + axpy_stub( + kCPU, c10::CppTypeToScalarType::value, + n, a, x, incx, y, incy); +} + +void axpy(int64_t n, double a, const double *x, int64_t incx, double *y, int64_t incy); +void axpy(int64_t n, float a, const float *x, int64_t incx, float *y, int64_t incy); +void axpy(int64_t n, c10::complex a, const c10::complex *x, int64_t incx, c10::complex *y, int64_t incy); +void axpy(int64_t n, c10::complex a, const c10::complex *x, int64_t incx, c10::complex *y, int64_t incy); + +using copy_fn = void(*)(at::ScalarType type, int64_t n, const void *x, int64_t incx, void *y, int64_t incy); + +DECLARE_DISPATCH(copy_fn, copy_stub); + +template +void copy(int64_t n, const scalar_t *x, int64_t incx, scalar_t *y, int64_t incy) { + if(n == 1) + { + incx = 1; + incy = 1; + } + copy_stub( + kCPU, c10::CppTypeToScalarType::value, + n, x, incx, y, incy); +} + +void copy(int64_t n, const double *x, int64_t incx, double *y, int64_t incy); +void copy(int64_t n, const float *x, int64_t incx, float *y, int64_t incy); +void copy(int64_t n, const c10::complex *x, int64_t incx, c10::complex *y, int64_t incy); +void copy(int64_t n, const c10::complex *x, int64_t incx, c10::complex *y, int64_t incy); + +}}} // namespace at::native::cpublas diff --git a/voice_bridge/torch/include/ATen/native/CPUFallback.h b/voice_bridge/torch/include/ATen/native/CPUFallback.h new file mode 100644 index 0000000000000000000000000000000000000000..2d4dfc98aa06eddbbd3c3f7fb206fb5b32e59ab1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/CPUFallback.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { + +// This function implements a boxed fallback to CPU. +// External backends can add their own custom logging on top if it to customize their own CPU fallbacks. +TORCH_API void cpu_fallback(const c10::OperatorHandle& op, torch::jit::Stack* stack); + +// This is a helper function that backends can use to directly call their boxed CPU fallback +// TODO: update and add a usage example after https://github.com/pytorch/pytorch/pull/58092 lands. +template +struct _call_fallback_fn final {}; + +template +struct _call_fallback_fn final { + static ReturnType call(typename c10::maybe_keep_symint::type... args) { + auto op = c10::Dispatcher::singleton() + // TODO: figure out how to make compiler happy without dynamic casts + .findSchemaOrThrow((const char*) Op::name, (const char*) Op::overload_name) + //.findSchemaOrThrow("a", "b") + .typed::type...)>(); + return c10::impl::BoxedKernelWrapper::type...)>::call( + c10::BoxedKernel::makeFromFunction(), + op, + c10::DispatchKeySet(), // we know that the cpu_fallback doesn't use the dispatch keyset. + // TODO: get std::forward<> to work + args... + ); + } +}; + +template +using call_fallback_fn_symint = _call_fallback_fn; + +template +using call_fallback_fn = _call_fallback_fn; + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/CanUse32BitIndexMath.h b/voice_bridge/torch/include/ATen/native/CanUse32BitIndexMath.h new file mode 100644 index 0000000000000000000000000000000000000000..26aa7befd06ed7c53edcc66059a0f043f3298341 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/CanUse32BitIndexMath.h @@ -0,0 +1,13 @@ +#pragma once +#include +#include + +namespace at { +class TensorBase; +} + +namespace at { namespace native { + +TORCH_API bool canUse32BitIndexMath(const at::TensorBase &t, int64_t max_elem=std::numeric_limits::max()); + +}} diff --git a/voice_bridge/torch/include/ATen/native/ComplexHelper.h b/voice_bridge/torch/include/ATen/native/ComplexHelper.h new file mode 100644 index 0000000000000000000000000000000000000000..88668d13145c5b6a0b31c7004e5531d94da5e077 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/ComplexHelper.h @@ -0,0 +1,89 @@ +#pragma once + +#include +#include + +// WARNING: this header contains non-inline functions and should be only +// included from ONE cpp file + +namespace at { namespace native { + +// View tensor with new dtype, storage offset, sizes and strides +inline Tensor view_tensor( + const Tensor &tensor, ScalarType dtype, + int64_t offset, IntArrayRef sizes, IntArrayRef strides) { + Storage storage = tensor.storage(); + auto key_set = tensor.key_set().remove(DispatchKey::Conjugate); + auto new_tensor = detail::make_tensor( + c10::TensorImpl::VIEW, std::move(storage), key_set, scalarTypeToTypeMeta(dtype)); + auto * impl = new_tensor.unsafeGetTensorImpl(); + impl->set_storage_offset(offset); + impl->set_sizes_and_strides(sizes, strides); + return new_tensor; +} + +inline DimVector computeStrideForViewAsReal(IntArrayRef oldstride) { + DimVector res(oldstride.size() + 1); + for (const auto i : c10::irange(oldstride.size())) { + res[i] = oldstride[i] * 2; + } + res.back() = 1; + return res; +} + +Tensor _view_as_real_physical(const Tensor& self) { + TORCH_CHECK(self.is_complex(), "view_as_real is only supported for complex tensors"); + auto old_sizes = self.sizes(); + DimVector new_sizes(old_sizes.size() + 1); + std::copy(old_sizes.begin(), old_sizes.end(), new_sizes.begin()); + // last dimension will always have two elements containing the real and imag vals + new_sizes.back() = 2; + auto new_strides = computeStrideForViewAsReal(self.strides()); + auto new_storage_offset = 2 * self.storage_offset(); + const auto float_type = c10::toRealValueType(self.scalar_type()); + auto real_tensor = view_tensor(self, float_type, new_storage_offset, new_sizes, new_strides); + return real_tensor; +} + +// expects as input a complex tensor and returns back a tensor +// with corresponding real dtype containing the complex values +// in the last two dimensions +Tensor view_as_real(const Tensor& self) { + TORCH_CHECK(!self.is_conj(), "view_as_real doesn't work on unresolved conjugated tensors. To resolve the conjugate tensor so you can view it as real, use self.resolve_conj(); however, be warned that the resulting tensor will NOT alias the original."); + return _view_as_real_physical(self); +} + +inline DimVector computeStrideForViewAsComplex(IntArrayRef oldstride) { + const int64_t dim = oldstride.size(); + TORCH_CHECK(oldstride[dim-1] == 1, "Tensor must have a last dimension with stride 1"); + + DimVector res(dim - 1); + for (const auto i : c10::irange(res.size())) { + TORCH_CHECK(oldstride[i] % 2 == 0, "Tensor must have a stride divisible by 2 for all but last dimension"); + res[i] = oldstride[i] / 2; + } + return res; +} + +// expects as input a float or double tensor with last dimension of size 2 +// and returns back a tensor with corresponding complex dtype +Tensor view_as_complex(const Tensor& self) { + TORCH_CHECK( + self.scalar_type() == kFloat || self.scalar_type() == kDouble || self.scalar_type() == kHalf, + "view_as_complex is only supported for half, float and double tensors, but got a tensor of scalar type: ", self.scalar_type()); + + auto old_sizes = self.sizes(); + TORCH_CHECK(old_sizes.size() != 0, "Input tensor must have one or more dimensions"); + TORCH_CHECK(old_sizes[old_sizes.size()-1] == 2, "Tensor must have a last dimension of size 2"); + DimVector new_sizes(old_sizes.begin(), old_sizes.end() - 1); + + const auto new_strides = computeStrideForViewAsComplex(self.strides()); + const auto complex_type = c10::toComplexType(self.scalar_type()); + + TORCH_CHECK(self.storage_offset() % 2 == 0, "Tensor must have a storage_offset divisible by 2"); + const auto new_storage_offset = self.storage_offset() / 2; + + return view_tensor(self, complex_type, new_storage_offset, new_sizes, new_strides); +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/CompositeRandomAccessor.h b/voice_bridge/torch/include/ATen/native/CompositeRandomAccessor.h new file mode 100644 index 0000000000000000000000000000000000000000..a5984aa61d08597c66e8bf30811dd02c976a6591 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/CompositeRandomAccessor.h @@ -0,0 +1,34 @@ +#pragma once + +#include + +namespace at { namespace native { + +struct TupleInfoCPU { + template + using tuple = std::tuple; + + template + static constexpr auto tie(Types&... args) noexcept { + return std::tie(args...); + } +}; + +template +using CompositeRandomAccessorCPU = + CompositeRandomAccessor; + +template +void swap( + references_holder rh1, + references_holder rh2 +) { + return std::swap(rh1.data(), rh2.data()); +} + +template +auto get(references_holder rh) -> decltype(std::get(rh.data())) { + return std::get(rh.data()); +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/CompositeRandomAccessorCommon.h b/voice_bridge/torch/include/ATen/native/CompositeRandomAccessorCommon.h new file mode 100644 index 0000000000000000000000000000000000000000..0be75d8244f03ed13a6185901cdbdb46074d01bc --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/CompositeRandomAccessorCommon.h @@ -0,0 +1,261 @@ +#pragma once + +namespace at { namespace native { + +namespace { + +// operator_brackets_proxy is used in +// CompositeRandomAccessor in place of operator[]. +// For some iterators, references returned by operator[] +// could become invalid, operator_brackets_proxy tries to +// resolve that by making accessor[n] to be equivalent to +// *(accessor + n). +template +class operator_brackets_proxy { + using reference = typename std::iterator_traits::reference; + using value_type = typename std::iterator_traits::value_type; + +public: + C10_HOST_DEVICE + operator_brackets_proxy(Accessor const& accessor) + : accessor(accessor) + {} + + C10_HOST_DEVICE + operator reference() { + return *accessor; + } + + C10_HOST_DEVICE + reference operator*() { + return *accessor; + } + + C10_HOST_DEVICE + operator_brackets_proxy& operator=(value_type const& val) { + *accessor = val; + return *this; + } + +private: + Accessor accessor; +}; + +} + +// references_holder is used as a surrogate for the +// references type from std::iterator_traits in CompositeRandomAccessor. +// It is assumed in CompositeRandomAccessor that +// References = tuple, +// Values = tuple by default, +// but they could be anything as long as References could be +// cast to Values. +// If you plan to use it with STL, for example, you will need to +// define 'swap` and `get`(aka std::get) methods. +template +class references_holder { +public: + using values = Values; + using references = References; + + C10_HOST_DEVICE + references_holder(references refs) + : refs{refs} + {} + + C10_HOST_DEVICE + operator references() { + return refs; + } + + C10_HOST_DEVICE + operator values() { + return refs; + } + + C10_HOST_DEVICE + references_holder& operator=(values vals) { + refs = vals; + return *this; + } + + C10_HOST_DEVICE + references& data() { + return refs; + } + +protected: + references refs; +}; + +// CompositeRandomAccessor is essentially a simplified version of +// a random access iterator over two random access iterators. +// TupleInfo should contain a variadic type `tuple`, and a method `tie`, +// which constructs a tuple of references from a variadic list of arguments. +template +class CompositeRandomAccessor { + using self_type = CompositeRandomAccessor; + + using key_accessor_value_type = + typename std::iterator_traits::value_type; + using value_accessor_value_type = + typename std::iterator_traits::value_type; + using key_accessor_reference_type = + typename std::iterator_traits::reference; + using value_accessor_reference_type = + typename std::iterator_traits::reference; + + using composite_value_type = typename TupleInfo::template tuple< + key_accessor_value_type, + value_accessor_value_type>; + using composite_reference = typename TupleInfo::template tuple< + key_accessor_reference_type, + value_accessor_reference_type>; + +public: + using value_type = composite_value_type; + using reference = references_holder; + // Note that CompositeRandomAccessor does not hold key and values + // in a specific datastrcture, which means that a pointer to a (key, value) + // is not defined. Hence we just use a pointer type of the KeyAccessor. + using pointer = typename std::iterator_traits::pointer; + using difference_type = typename std::iterator_traits::difference_type; + using iterator_category = std::random_access_iterator_tag; + + C10_HOST_DEVICE + CompositeRandomAccessor() = default; + + C10_HOST_DEVICE + CompositeRandomAccessor(KeyAccessor keys, ValueAccessor values) + : keys(keys), values(values) + {} + + // Pointer-like operations { + C10_HOST_DEVICE + reference operator*() const { + return TupleInfo::tie(*keys, *values); + } + + // operator->() is supposed to return a pointer type. + // Since CompositeRandomAccessor does not hold pointers to pairs, + // we just return a pointer to a key. + C10_HOST_DEVICE + auto* operator->() const { + return keys.operator->(); + } + + C10_HOST_DEVICE + reference operator[](difference_type idx) { + return operator_brackets_proxy( + CompositeRandomAccessor(keys + idx, values + idx) + ); + } + // } + + // Prefix/postfix increment/decrement { + C10_HOST_DEVICE + CompositeRandomAccessor& operator++() { + ++keys; + ++values; + return *this; + } + + C10_HOST_DEVICE + CompositeRandomAccessor operator++(int) { + CompositeRandomAccessor copy(*this); + ++*this; + return copy; + } + + C10_HOST_DEVICE + CompositeRandomAccessor& operator--() { + --keys; + --values; + return *this; + } + + C10_HOST_DEVICE + CompositeRandomAccessor operator--(int) { + CompositeRandomAccessor copy(*this); + --*this; + return copy; + } + // } + + // Arithmetic operations { + C10_HOST_DEVICE + CompositeRandomAccessor& operator+=(difference_type offset) { + keys += offset; + values += offset; + return *this; + } + + C10_HOST_DEVICE + CompositeRandomAccessor operator+(difference_type offset) const { + return CompositeRandomAccessor(keys + offset, values + offset); + } + + C10_HOST_DEVICE + friend CompositeRandomAccessor operator+( + difference_type offset, + const CompositeRandomAccessor& accessor + ) { + return accessor + offset; + } + + C10_HOST_DEVICE + CompositeRandomAccessor& operator-=(difference_type offset) { + keys -= offset; + values -= offset; + return *this; + } + + C10_HOST_DEVICE + CompositeRandomAccessor operator-(difference_type offset) const { + return CompositeRandomAccessor(keys - offset, values - offset); + } + + C10_HOST_DEVICE + difference_type operator-(const CompositeRandomAccessor& other) const { + return keys - other.keys; + } + // } + + // Comparison operators { + C10_HOST_DEVICE + bool operator==(const CompositeRandomAccessor& other) const { + return keys == other.keys; + } + + C10_HOST_DEVICE + bool operator!=(const CompositeRandomAccessor& other) const { + return keys != other.keys; + } + + C10_HOST_DEVICE + bool operator<(const CompositeRandomAccessor& other) const { + return keys < other.keys; + } + + C10_HOST_DEVICE + bool operator<=(const CompositeRandomAccessor& other) const { + return keys <= other.keys; + } + + C10_HOST_DEVICE + bool operator>(const CompositeRandomAccessor& other) const { + return keys > other.keys; + } + + C10_HOST_DEVICE + bool operator>=(const CompositeRandomAccessor& other) const { + return keys >= other.keys; + } + // } + +protected: + KeyAccessor keys; + ValueAccessor values; +}; + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/ConvUtils.h b/voice_bridge/torch/include/ATen/native/ConvUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..a31dbee2bd759d555188a2363144b54af4172023 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/ConvUtils.h @@ -0,0 +1,410 @@ +#pragma once +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { + +using conv_depthwise2d_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, at::IntArrayRef, std::array); +DECLARE_DISPATCH(conv_depthwise2d_backward_fn, conv_depthwise2d_backward_stub); +using conv_depthwise3d_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, at::IntArrayRef, std::array); +DECLARE_DISPATCH(conv_depthwise3d_backward_fn, conv_depthwise3d_backward_stub); +using cudnn_convolution_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, int64_t, bool, bool, bool, std::array); +DECLARE_DISPATCH(cudnn_convolution_backward_fn, cudnn_convolution_backward_stub); +using mps_convolution_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, int64_t, std::array); +DECLARE_DISPATCH(mps_convolution_backward_fn, mps_convolution_backward_stub); +using cudnn_convolution_transpose_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool, std::array); +DECLARE_DISPATCH(cudnn_convolution_transpose_backward_fn, cudnn_convolution_transpose_backward_stub); +using miopen_convolution_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, int64_t, bool, bool, std::array); +DECLARE_DISPATCH(miopen_convolution_backward_fn, miopen_convolution_backward_stub); +using miopen_convolution_transpose_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, std::array); +DECLARE_DISPATCH(miopen_convolution_transpose_backward_fn, miopen_convolution_transpose_backward_stub); +using miopen_depthwise_convolution_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, int64_t, bool, bool, std::array); +DECLARE_DISPATCH(miopen_depthwise_convolution_backward_fn, miopen_depthwise_convolution_backward_stub); +using mkldnn_convolution_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, int64_t, std::array); +DECLARE_DISPATCH(mkldnn_convolution_backward_fn, mkldnn_convolution_backward_stub); +using slow_conv_dilated2d_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, at::IntArrayRef, std::array); +DECLARE_DISPATCH(slow_conv_dilated2d_backward_fn, slow_conv_dilated2d_backward_stub); +using slow_conv_dilated3d_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, at::IntArrayRef, std::array); +DECLARE_DISPATCH(slow_conv_dilated3d_backward_fn, slow_conv_dilated3d_backward_stub); +using slow_conv_transpose2d_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, std::array); +DECLARE_DISPATCH(slow_conv_transpose2d_backward_fn, slow_conv_transpose2d_backward_stub); +using slow_conv_transpose3d_backward_fn = std::tuple(*)( + const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef, + at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, std::array); +DECLARE_DISPATCH(slow_conv_transpose3d_backward_fn, slow_conv_transpose3d_backward_stub); + +namespace { + static bool cudnnv8_heuristic_mode_b = c10::utils::check_env("TORCH_CUDNN_USE_HEURISTIC_MODE_B") == true; +} + +static inline bool cudnnv8_enabled_check_debug() { + static bool cudnnv8_flag = c10::utils::check_env("TORCH_CUDNN_V8_API_ENABLED") == true; + static bool cudnnv8_debug = c10::utils::check_env("TORCH_CUDNN_V8_API_DEBUG") == true; + static uint8_t cudnnv8_debugcount = 0; + if (cudnnv8_debug == 1 && cudnnv8_debugcount < 10) { + TORCH_WARN("TORCH_CUDNN_V8_DEBUG ON, V8_FLAG: ", cudnnv8_flag, " TORCH_CUDNN_USE_HEURISTIC_MODE B: ", cudnnv8_heuristic_mode_b); + cudnnv8_debugcount++; + } + return cudnnv8_flag == 1; +} + +static inline bool cudnnv8_use_heur_mode_b() { + return cudnnv8_heuristic_mode_b; +} + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct ConvParams { + std::vector stride; + std::vector padding; + std::vector dilation; + bool transposed; + std::vector output_padding; + int groups; + bool benchmark; + bool deterministic; + bool cudnn_enabled; + bool allow_tf32; + + bool is_strided() const; + bool is_dilated() const; + bool is_padded() const; + bool is_output_padding_neg() const; + bool is_output_padding_big() const; + bool is_padding_neg() const; + bool is_stride_nonpos() const; + void view1d_as_2d(); + bool use_cpu_depthwise3x3_winograd(const at::Tensor& input, const at::Tensor& weight, const c10::optional& bias) const; + bool needs_64bit_indexing_no_split(const at::Tensor& input, const at::Tensor& weight) const; + bool use_cudnn(const at::Tensor& input, const at::Tensor& weight) const; + bool use_cudnn_depthwise(const at::Tensor& input, const at::Tensor& weight) const; + bool use_miopen(const at::Tensor& input, const at::Tensor& weight, bool bias_defined) const; + bool use_mkldnn(const at::Tensor& input, const at::Tensor& weight) const; + bool use_nnpack(const at::Tensor& input, const at::Tensor& weight) const; + bool use_xnnpack(const at::Tensor& input, const at::Tensor& weight, + const at::OptionalIntArrayRef bias_sizes_opt) const; + bool use_mps(const at::Tensor& input, const at::Tensor& weight) const; + bool is_depthwise(const at::Tensor& input, const at::Tensor& weight) const; +}; + +enum class ConvBackend { + CudaDepthwise2d, + CudaDepthwise3d, + Cudnn, + CudnnTranspose, + Empty, + Miopen, + MiopenDepthwise, + MiopenTranspose, + Mkldnn, + MkldnnEmpty, + NnpackSpatial, + Overrideable, + Slow2d, + Slow3d, + SlowDilated2d, + SlowDilated3d, + SlowTranspose2d, + SlowTranspose3d, + Winograd3x3Depthwise, + Xnnpack2d, + Mps, + MpsTranspose, +}; + +// Function to select the convolution backend based on the inputs and params. +// This overload is used within the convolution internals but not exposed to python. +// NB: The forward pass provides a bias tensor while the backward pass provides +// a bool indicating whether the bias is defined. This is done to save memory by +// avoiding saving the full bias tensor for backward. +TORCH_API ConvBackend _select_conv_backend( + const Tensor& input, + const Tensor& weight, + const c10::optional& bias_opt, + const at::OptionalIntArrayRef bias_sizes_opt, + const bool need_backward, + const ConvParams& params); + +// For BC reasons, have a copy that does not require bias_opt +TORCH_API ConvBackend select_conv_backend( + const Tensor& input, + const Tensor& weight, + const at::OptionalIntArrayRef bias_sizes_opt, + const bool need_backward, + const ConvParams& params); + +// Overload for selecting the convolution backend from the full set of convolution inputs. +// This overload is exposed to python for testing, etc. +TORCH_API ConvBackend select_conv_backend( + const Tensor& input, const Tensor& weight, const c10::optional& bias_opt, + IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, + bool transposed, IntArrayRef output_padding, int64_t groups); + +// --------------------------------------------------------------------- +// +// Math +// +// --------------------------------------------------------------------- + +constexpr int input_batch_size_dim = 0; // also grad_input +constexpr int input_channels_dim = 1; +constexpr int output_batch_size_dim = 0; // also grad_output +constexpr int output_channels_dim = 1; +constexpr int weight_output_channels_dim = 0; +constexpr int weight_input_channels_dim = 1; + +// Often written as 2 + max_dim (extra dims for batch size and channels) +constexpr int max_dim = 3; + +// --------------------------------------------------------------------- +// +// Checking +// +// --------------------------------------------------------------------- + +// Used on pad, stride and dilation +static void check_args(CheckedFrom c, IntArrayRef args, size_t expected_size, const char* arg_name) +{ + TORCH_CHECK(args.size() <= expected_size, + "Too many ", arg_name, " values (", args.size(), ") supplied, expecting ", + expected_size, " (while checking arguments for ", c, ")"); + TORCH_CHECK(args.size() >= expected_size, + "Not enough ", arg_name, " values (", args.size(), ") supplied, expecting ", + expected_size, " (while checking arguments for ", c, ")"); + + auto num_negative_values = std::count_if(args.begin(), args.end(), [](int x){return x < 0;}); + if (num_negative_values > 0){ + std::stringstream ss; + ss << arg_name << " should be greater than zero but got ("; + std::copy(args.begin(), args.end() - 1, std::ostream_iterator(ss,", ")); + ss << args.back() << ")" << " (while checking arguments for " << c << ")"; + AT_ERROR(ss.str()); + } +} + + +// NOTE [ Convolution checks ] +// +// NB: For many call sites, it is not strictly necessary to check all of +// these relationships (for example, for forward convolution, we compute +// the size of output ourselves, so we don't actually need to check +// output. However, writing a single function that does everything +// means we get to reuse it for both forwards and all backwards +// variants, even when the set of "real" inputs varies. The magic of +// relational computing! +// +// (There is one downside, which is that it is slightly harder to write +// error messages which are able to distinguish between real inputs +// (which the user can change) and computed inputs (which the user can +// only indirectly affect). It would be an interesting exercise to +// come up with a general framework to handle such situations.) +static void convolution_shape_check( + CheckedFrom c, + const TensorGeometryArg& input, const TensorGeometryArg& weight, const TensorGeometryArg& output, + IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) +{ + check_args(c, padding, input->dim() - 2, "padding"); + check_args(c, stride, padding.size(), "stride"); + check_args(c, dilation, padding.size(), "dilation"); + + // Input + checkDimRange(c, input, 3, 6 /* exclusive */); + checkSize_symint(c, input, input_channels_dim, weight->size(1) * groups); + + // Weight + checkSameDim(c, input, weight); + + // TODO: check that output->size() matches output_sizes + // TODO: check that weight matches output->sizes() + checkSameDim(c, input, output); +} + +// NB: conv_output_size and conv_input_size are not bijections, +// as conv_output_size loses information; this is why conv_input_size +// takes an extra output_padding argument to resolve the ambiguity. + +static inline std::vector conv_output_size( + IntArrayRef input_size, IntArrayRef weight_size, + IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation = IntArrayRef() +) { + // ASSERT(input_size.size() > 2) + // ASSERT(input_size.size() == weight_size.size()) + bool has_dilation = dilation.size() > 0; + auto dim = input_size.size(); + std::vector output_size(dim); + output_size[0] = input_size[input_batch_size_dim]; + output_size[1] = weight_size[weight_output_channels_dim]; + for (const auto d : c10::irange(2, dim)) { + auto dilation_ = has_dilation ? dilation[d - 2] : 1; + auto kernel = dilation_ * (weight_size[d] - 1) + 1; + output_size[d] = (input_size[d] + (2 * padding[d - 2]) - kernel) / stride[d - 2] + 1; + } + return output_size; +} + +static inline std::vector conv_input_size( + IntArrayRef output_size, IntArrayRef weight_size, + IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups +) { + // ASSERT(output_size.size() > 2) + // ASSERT(output_size.size() == weight_size.size()) + auto dim = output_size.size(); + std::vector input_size(dim); + input_size[0] = output_size[output_batch_size_dim]; + input_size[1] = weight_size[weight_input_channels_dim] * groups; + for (const auto d : c10::irange(2, dim)) { + int kernel = dilation[d - 2] * (weight_size[d] - 1) + 1; + input_size[d] = (output_size[d] - 1) * stride[d - 2] - (2 * padding[d - 2]) + + kernel + output_padding[d - 2]; + } + return input_size; +} + +static inline std::vector conv_weight_size( + IntArrayRef input_size, IntArrayRef output_size, + IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups +) { + auto dim = input_size.size(); + std::vector weight_size(dim); + weight_size[0] = output_size[1]; + weight_size[1] = input_size[1] / groups; + for (const auto d : c10::irange(2, dim)) { + int kernel = input_size[d] - (output_size[d] - 1) * stride[d - 2] + + 2 * padding[d - 2] - output_padding[d - 2]; + weight_size[d] = (kernel - 1) / dilation[d - 2] + 1; + } + return weight_size; +} + +static inline Tensor reshape_bias(int64_t dim, const Tensor& bias) { + std::vector shape(dim, 1); + shape[1] = -1; + return bias.reshape(shape); +} + +static inline at::MemoryFormat cudnn_conv_suggest_memory_format(const at::Tensor& input, const at::Tensor& weight) { + // disable NHWC for float64 input. + if (!at::detail::getCUDAHooks().compiledWithCuDNN() || + input.scalar_type() == at::kDouble || + weight.scalar_type() == at::kDouble) { + return at::MemoryFormat::Contiguous; + } + long cudnn_version = at::detail::getCUDAHooks().versionCuDNN(); + auto input_memory_format = input.suggest_memory_format(); + auto weight_memory_format = weight.suggest_memory_format(); + auto weight_ndim = weight.ndimension(); + + bool can_use_cudnn_channels_last_2d = (cudnn_version >= 7603) && (weight_ndim == 4) && ( + (input_memory_format == at::MemoryFormat::ChannelsLast) || + (weight_memory_format == at::MemoryFormat::ChannelsLast) + ); + if (can_use_cudnn_channels_last_2d) { + return at::MemoryFormat::ChannelsLast; + } + + bool can_use_cudnn_channels_last_3d = (cudnn_version >= 8005) && (weight_ndim == 5) && ( + (input_memory_format == at::MemoryFormat::ChannelsLast3d) || + (weight_memory_format == at::MemoryFormat::ChannelsLast3d) + ); + if (can_use_cudnn_channels_last_3d) { + return at::MemoryFormat::ChannelsLast3d; + } + + return at::MemoryFormat::Contiguous; +} + +static inline bool miopen_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) { + + // disable NHWC for float64 input. + if (!at::detail::getCUDAHooks().compiledWithMIOpen() || + input.scalar_type() == at::kDouble || + weight.scalar_type() == at::kDouble) { + return false; + } + + bool can_use_miopen_channels_last_2d = false; +#if defined(USE_ROCM) && (ROCM_VERSION >= 40300) + // TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen + // See #64427 + static c10::optional PYTORCH_MIOPEN_SUGGEST_NHWC = c10::utils::check_env("PYTORCH_MIOPEN_SUGGEST_NHWC"); + + auto input_memory_format = input.suggest_memory_format(); + auto weight_memory_format = weight.suggest_memory_format(); + + can_use_miopen_channels_last_2d = PYTORCH_MIOPEN_SUGGEST_NHWC && *PYTORCH_MIOPEN_SUGGEST_NHWC && ( + ( (input_memory_format == at::MemoryFormat::ChannelsLast) || + (weight_memory_format == at::MemoryFormat::ChannelsLast) ) + ); +#endif + + bool can_use_miopen_channels_last_3d = false; + + return can_use_miopen_channels_last_2d || can_use_miopen_channels_last_3d; +} + +static inline bool mkldnn_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) { + + // disable NHWC for float64 input. + if (input.scalar_type() == at::kDouble || + weight.scalar_type() == at::kDouble) { + return false; + } + + // disable NHWC for MkldnnCPU tensor. + if (input.is_mkldnn() || weight.is_mkldnn()) { + return false; + } + + auto input_memory_format = input.suggest_memory_format(); + auto weight_memory_format = weight.suggest_memory_format(); + + bool can_use_mkldnn_channels_last_2d = + (input_memory_format == at::MemoryFormat::ChannelsLast) || + (weight_memory_format == at::MemoryFormat::ChannelsLast); + + // TODO: add channels last 3d support + bool can_use_mkldnn_channels_last_3d = false; + + return can_use_mkldnn_channels_last_2d || can_use_mkldnn_channels_last_3d; +} + +static inline bool thnn_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) { + + auto input_memory_format = input.suggest_memory_format(); + auto weight_memory_format = weight.suggest_memory_format(); + + bool can_use_thnn_channels_last_2d = input.device().is_cpu() && ( + (input_memory_format == at::MemoryFormat::ChannelsLast) || ( + weight_memory_format == at::MemoryFormat::ChannelsLast)); + + return can_use_thnn_channels_last_2d; +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/ConvolutionMM3d.h b/voice_bridge/torch/include/ATen/native/ConvolutionMM3d.h new file mode 100644 index 0000000000000000000000000000000000000000..9567b5d928c1545ac51ecde6708f608f1183150d --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/ConvolutionMM3d.h @@ -0,0 +1,15 @@ +#include + +namespace at { +namespace native { + +std::tuple slow_conv3d_backward_cpu( + const Tensor& grad_output, + const Tensor& self, + const Tensor& weight, + IntArrayRef kernel_size, + IntArrayRef stride, + IntArrayRef padding, + std::array output_mask); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/Copy.h b/voice_bridge/torch/include/ATen/native/Copy.h new file mode 100644 index 0000000000000000000000000000000000000000..14abb32fa5ad4ba3cd8c78084569b313a4a692cd --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Copy.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace at { + +class Tensor; +struct TensorIterator; +class TensorBase; + +namespace native { + +using copy_fn = void (*)(TensorIterator&, bool non_blocking); + +DECLARE_DISPATCH(copy_fn, copy_stub); + +TORCH_API void copy_ignoring_overlaps(const TensorBase &dst, const TensorBase &src); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/Cross.h b/voice_bridge/torch/include/ATen/native/Cross.h new file mode 100644 index 0000000000000000000000000000000000000000..9daee7f2d6c43586630ad38ac731b22ce2416ff3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Cross.h @@ -0,0 +1,14 @@ +#pragma once + +#include + +namespace at { +class Tensor; + +namespace native { + +using cross_fn = void(*)(const Tensor&, const Tensor&, const Tensor&, const int64_t d); + +DECLARE_DISPATCH(cross_fn, cross_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/DilatedConvolutionUtils.h b/voice_bridge/torch/include/ATen/native/DilatedConvolutionUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..51b30a9bc77aedcaae1b9e23f35c45239be7e058 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/DilatedConvolutionUtils.h @@ -0,0 +1,233 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#define TORCH_CHECK_DIM_SIZE(T, DIM, DIM_SIZE, SIZE) \ + TORCH_CHECK( \ + T.dim() == DIM && T.size(DIM_SIZE) == SIZE, \ + "Need " #T " of dimension ", \ + DIM, \ + " and " #T ".size[", \ + DIM_SIZE, \ + "] == ", \ + SIZE, \ + " but got input to be of shape ", \ + T.sizes()) + +namespace at { +namespace native { +namespace internal { +namespace { +inline bool all_positive(IntArrayRef& arr) { + return std::all_of( + arr.begin(), arr.end(), [](int64_t item) { return item > 0; }); +} + +inline bool all_nonnegative(std::vector& arr) { + return std::all_of( + arr.begin(), arr.end(), [](int64_t item) { return item >= 0; }); +} + +} // namespace + +// calculate the rear part of output tensor sizes +template +std::vector get_output_size( + const Tensor& input, + IntArrayRef kernel_size, + IntArrayRef stride_size, + IntArrayRef pad_size, + IntArrayRef dilation_size) { + std::vector sizes; + for (const auto index : c10::irange(dim)) { + sizes.push_back( + div_rtn( + input.size(index + input.dim() - dim) + 2 * pad_size[index] - + (dilation_size[index] * (kernel_size[index] - 1) + 1), + stride_size[index]) + + 1); + } + return sizes; +} + +// calculate the sizes of output tensor +template +std::vector get_output_size( + const Tensor& input, + const Tensor& weight, + IntArrayRef kernel_size, + IntArrayRef stride_size, + IntArrayRef pad_size, + IntArrayRef dilation_size) { + auto output_size = get_output_size( + input, kernel_size, stride_size, pad_size, dilation_size); + output_size.insert(output_size.begin(), weight.size(0)); + if (input.dim() == dim + 2) { + output_size.insert(output_size.begin(), input.size(0)); + } + return output_size; +} +/* + slow_conv_dilated_shape_check - check user-input to dilated convolution + forward and backward functions. +*/ +template +void slow_conv_dilated_shape_check( + const Tensor& input, + const Tensor& weight, + const Tensor& bias, + const Tensor& grad_output, + IntArrayRef kernel_size, + IntArrayRef stride_size, + IntArrayRef pad_size, + IntArrayRef dilation_size) { + /* + When the following tensors are defined: + + bias, grad_weight, grad_output + + then these are assumed to be contiguous without checking + because of these tensors are made contiguous by calling + .contiguous() method or by resizing of zero-sized tensors in + forward/backward functions. + + When grad_weight is defined then it is assumed without + checking to have the same shape as weight, see backward + functions. + */ + // Check size arguments + TORCH_CHECK( + kernel_size.size() == dim, + "kernel sizes length should be ", + dim, + ", but got ", + kernel_size.size()); + TORCH_CHECK( + stride_size.size() == dim, + "strides length should be ", + dim, + ", but got ", + stride_size.size()); + TORCH_CHECK( + dilation_size.size() == dim, + "dilations length should be ", + dim, + ", but got ", + dilation_size.size()); + TORCH_CHECK( + pad_size.size() == dim, + "pads length should be ", + dim, + ", but got ", + pad_size.size()); + + TORCH_CHECK( + all_positive(kernel_size), + "kernel size should be greater than zero, but got ", + kernel_size); + TORCH_CHECK( + all_positive(stride_size), + "stride should be greater than zero, but got ", + stride_size); + TORCH_CHECK( + all_positive(dilation_size), + "dilation should be greater than zero, but got ", + dilation_size); + + // check input + TORCH_CHECK(input.defined(), "input must be defined"); + bool is_batch = input.dim() == dim + 2; + int64_t n = (is_batch ? 2 : 1); + int64_t ndim = n + dim; + if (!is_batch) { + // input dim has to be dim + 1 if not batched + TORCH_CHECK( + input.dim() == dim + 1, + "input must be 4D or 5D tensor but got ", + input.dim(), + "D tensor"); + } + + // check output sizes + auto output_size = get_output_size( + input, kernel_size, stride_size, pad_size, dilation_size); + + TORCH_CHECK( + all_nonnegative(output_size), + "calculated output size ", + output_size, + " is too small (all sizes must be non-negative)"); + + // check weight + TORCH_CHECK(weight.defined(), "weight must be defined"); + TORCH_CHECK( + weight.dim() == dim + 2, + "weight must be ", + dim + 2, + "D tensor but got ", + weight.dim(), + "D tensor dim=", + dim); + TORCH_CHECK( + weight.sizes().slice(2) == kernel_size, + "weight[2:] shape ", + weight.sizes().slice(2), + " must be equal to kernel_size ", + kernel_size); + + TORCH_CHECK_DIM_SIZE(input, input.dim(), (is_batch ? 1 : 0), weight.size(1)); + + // check bias when present + if (bias.defined()) { + TORCH_CHECK( + bias.dim() == 1, + "bias must be 1D tensor but got ", + bias.dim(), + "D tensor"); + TORCH_CHECK_DIM_SIZE(bias, 1, 0, weight.size(0)); + } + + // check grad_output when present + if (grad_output.defined()) { + TORCH_CHECK( + grad_output.dim() == ndim, + "grad_output must be ", + ndim, + "D tensor but got ", + grad_output.dim(), + "D tensor"); + if (is_batch) { + TORCH_CHECK( + grad_output.size(0) == input.size(0), + "grad_output.size(0)=", + grad_output.size(0), + " must be input.size(0)=", + input.size(0)); + } + TORCH_CHECK( + grad_output.size(n - 1) == weight.size(0), + "grad_output.size(", + n - 1, + ")=", + grad_output.size(n - 1), + " must be weight.size(0)=", + weight.size(0)); + TORCH_CHECK( + grad_output.sizes().slice(n) == output_size, + "grad_output[", + n, + ":] shape", + grad_output.sizes().slice(n), + " must be equal to output size ", + output_size); + } +} + +} // namespace internal +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/DispatchStub.h b/voice_bridge/torch/include/ATen/native/DispatchStub.h new file mode 100644 index 0000000000000000000000000000000000000000..bcbf41fd9d0ff0f4824ce32f7ae501cfdfb216da --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/DispatchStub.h @@ -0,0 +1,302 @@ +#pragma once + +#include +#include +#include + +#include +#include + +// Implements instruction set specific function dispatch. +// +// Kernels that may make use of specialized instruction sets (e.g. AVX2) are +// compiled multiple times with different compiler flags (e.g. -mavx2). A +// DispatchStub contains a table of function pointers for a kernel. At runtime, +// the fastest available kernel is chosen based on the features reported by +// cpuinfo. +// +// Example: +// +// In native/MyKernel.h: +// using fn_type = void(*)(const Tensor& x); +// DECLARE_DISPATCH(fn_type, stub); +// +// In native/MyKernel.cpp +// DEFINE_DISPATCH(stub); +// +// In native/cpu/MyKernel.cpp: +// namespace { +// // use anonymous namespace so that different cpu versions won't conflict +// void kernel(const Tensor& x) { ... } +// } +// REGISTER_DISPATCH(stub, &kernel); +// +// To call: +// stub(kCPU, tensor); +// +// TODO: CPU instruction set selection should be folded into whatever +// the main dispatch mechanism is. + +// ignore warnings about DispatchStub::DEFAULT, AVX, AVX2 defined elsewhere +#if defined(__clang__) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wundefined-var-template" +#endif + +namespace at { namespace native { + +enum class CPUCapability { + DEFAULT = 0, +#if defined(HAVE_VSX_CPU_DEFINITION) + VSX = 1, +#elif defined(HAVE_ZVECTOR_CPU_DEFINITION) + ZVECTOR = 1, +#else + AVX2 = 1, + AVX512 = 2, +#endif + NUM_OPTIONS +}; + +CPUCapability get_cpu_capability(); + +template +struct DispatchStub; + +/** + * The sole purpose of this class is to outline methods that don't need to be + * specialized or otherwise inlined and duplicated (by the compiler due to + * template expansion), since it causes size bloat if there are a significant + * number of specialization of the DispatchStub<> class. + */ +struct TORCH_API DispatchStubImpl { + void* get_call_ptr( + DeviceType device_type + , void *DEFAULT +#ifdef HAVE_AVX512_CPU_DEFINITION + , void *AVX512 +#endif +#ifdef HAVE_AVX2_CPU_DEFINITION + , void *AVX2 +#endif +#ifdef HAVE_VSX_CPU_DEFINITION + , void *VSX +#endif +#ifdef HAVE_ZVECTOR_CPU_DEFINITION + , void *ZVECTOR +#endif + ); + + /** + * The CPU Dispatch actual method is chosen in decreasing order of preference by + * DispatchStubImpl::choose_cpu_impl() in case none is found by + * DispatchStubImpl::get_call_ptr() in cpu_dispatch_ptr. + */ + void* choose_cpu_impl( + void *DEFAULT +#ifdef HAVE_AVX512_CPU_DEFINITION + , void *AVX512 +#endif +#ifdef HAVE_AVX2_CPU_DEFINITION + , void *AVX2 +#endif +#ifdef HAVE_VSX_CPU_DEFINITION + , void *VSX +#endif +#ifdef HAVE_ZVECTOR_CPU_DEFINITION + , void *ZVECTOR +#endif + ); + + // Fixing dispatch error in Windows debug builds. + // See https://github.com/pytorch/pytorch/issues/22681 for more details. + #if defined(_MSC_VER) && defined(_DEBUG) + std::atomic cpu_dispatch_ptr; + void* cuda_dispatch_ptr; + void* hip_dispatch_ptr; + void* mps_dispatch_ptr; + #else + std::atomic cpu_dispatch_ptr{nullptr}; + void* cuda_dispatch_ptr = nullptr; + void* hip_dispatch_ptr = nullptr; + void* mps_dispatch_ptr = nullptr; + #endif +}; + +template +struct DispatchStub { + using FnPtr = rT (*) (Args...); + + DispatchStub() = default; + DispatchStub(const DispatchStub&) = delete; + DispatchStub& operator=(const DispatchStub&) = delete; + +private: + FnPtr get_call_ptr(DeviceType device_type) { + return reinterpret_cast( + impl.get_call_ptr(device_type + , reinterpret_cast(DEFAULT) +#ifdef HAVE_AVX512_CPU_DEFINITION + , reinterpret_cast(AVX512) +#endif +#ifdef HAVE_AVX2_CPU_DEFINITION + , reinterpret_cast(AVX2) +#endif +#ifdef HAVE_VSX_CPU_DEFINITION + , reinterpret_cast(VSX) +#endif +#ifdef HAVE_ZVECTOR_CPU_DEFINITION + , reinterpret_cast(ZVECTOR) +#endif + ) + ); + } + +public: + template + rT operator()(DeviceType device_type, ArgTypes&&... args) { + FnPtr call_ptr = get_call_ptr(device_type); + return (*call_ptr)(std::forward(args)...); + } + + void set_cuda_dispatch_ptr(FnPtr fn_ptr) { + impl.cuda_dispatch_ptr = reinterpret_cast(fn_ptr); + } + + void set_hip_dispatch_ptr(FnPtr fn_ptr) { + impl.hip_dispatch_ptr = reinterpret_cast(fn_ptr); + } + + void set_mps_dispatch_ptr(FnPtr fn_ptr) { + impl.mps_dispatch_ptr = reinterpret_cast(fn_ptr); + } + + static TORCH_API FnPtr DEFAULT; +#ifdef HAVE_AVX512_CPU_DEFINITION + static TORCH_API FnPtr AVX512; +#endif +#ifdef HAVE_AVX2_CPU_DEFINITION + static TORCH_API FnPtr AVX2; +#endif +#ifdef HAVE_VSX_CPU_DEFINITION + static TORCH_API FnPtr VSX; +#endif +#ifdef HAVE_ZVECTOR_CPU_DEFINITION + static TORCH_API FnPtr ZVECTOR; +#endif +private: + DispatchStubImpl impl; +}; + +namespace { +template +struct RegisterCUDADispatch { + RegisterCUDADispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) { + stub.set_cuda_dispatch_ptr(value); + } +}; + +template +struct RegisterMPSDispatch { + RegisterMPSDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) { + stub.set_mps_dispatch_ptr(value); + } +}; + +template +struct RegisterHIPDispatch { + RegisterHIPDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) { + // TODO: make this point at hip_dispatch_ptr + stub.set_cuda_dispatch_ptr(value); + } +}; + +} // anonymous namespace +// Compiler will complain if you put things like std::tuple in +// the `fn` argument of DECLARE_DISPATCH. Some possible workarounds, e.g., +// adding parentheses and using helper struct to get rid of the parentheses, do +// not work with MSVC. So do a `using`-declaration if you need to pass in such +// `fn`, e.g., grid_sampler_2d_backward_cpu_kernel in GridSampleKernel.h. +#define DECLARE_DISPATCH(fn, name) \ + struct name : DispatchStub { \ + name() = default; \ + name(const name&) = delete; \ + name& operator=(const name&) = delete; \ + }; \ + extern TORCH_API struct name name + +#define DEFINE_DISPATCH(name) struct name name + +#define REGISTER_ARCH_DISPATCH(name, arch, fn) \ + template <> name::FnPtr TORCH_API DispatchStub::arch = fn; + +#ifdef HAVE_AVX512_CPU_DEFINITION +#define REGISTER_AVX512_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX512, fn) +#else +#define REGISTER_AVX512_DISPATCH(name, fn) +#endif + +#ifdef HAVE_AVX2_CPU_DEFINITION +#define REGISTER_AVX2_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX2, fn) +#else +#define REGISTER_AVX2_DISPATCH(name, fn) +#endif + +#ifdef HAVE_VSX_CPU_DEFINITION +#define REGISTER_VSX_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, VSX, fn) +#else +#define REGISTER_VSX_DISPATCH(name, fn) +#endif + +#ifdef HAVE_ZVECTOR_CPU_DEFINITION +#define REGISTER_ZVECTOR_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, ZVECTOR, fn) +#else +#define REGISTER_ZVECTOR_DISPATCH(name, fn) +#endif + +// Macro to register the same kernel for all CPU arch types. This is useful +// if a kernel does not benefit from being recompiled across different arch types. +#define REGISTER_ALL_CPU_DISPATCH(name, fn) \ + REGISTER_ARCH_DISPATCH(name, DEFAULT, fn) \ + REGISTER_AVX512_DISPATCH(name, fn) \ + REGISTER_AVX2_DISPATCH(name, fn) \ + REGISTER_VSX_DISPATCH(name, fn) \ + REGISTER_ZVECTOR_DISPATCH(name, fn) + +#define REGISTER_NO_CPU_DISPATCH(name) \ + REGISTER_ALL_CPU_DISPATCH(name, nullptr) + +#define REGISTER_CUDA_DISPATCH(name, fn) \ + static RegisterCUDADispatch name ## __register(name, fn); + +#define REGISTER_HIP_DISPATCH(name, fn) \ + static RegisterHIPDispatch name ## __register(name, fn); + +#define REGISTER_MPS_DISPATCH(name, fn) \ + static RegisterMPSDispatch name ## __register(name, fn); + +// NB: This macro must be used in an actual 'cu' file; if you try using +// it from a 'cpp' file it will not work! +#if defined(__CUDACC__) +#define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn) +#elif defined(__HIPCC__) +// TODO: cut this over to HIP dispatch once we stop pretending that CUDA +// is HIP in the PyTorch HIPify build. +#define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn) +// #define REGISTER_DISPATCH(name, fn) REGISTER_HIP_DISPATCH(name, fn) +#elif defined(__OBJC__) && defined(USE_MPS) +// NB: this macro must be used from a 'mm' file in order to dispatch a MPS kernel +#define REGISTER_DISPATCH(name, fn) REGISTER_MPS_DISPATCH(name, fn) +#elif defined(CPU_CAPABILITY) +#define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn) +#define REGISTER_NO_AVX512_DISPATCH(name) \ + REGISTER_AVX512_DISPATCH(name, nullptr) +#endif + + +}} // namespace at::native + + +#if defined(__clang__) +#pragma clang diagnostic pop +#endif diff --git a/voice_bridge/torch/include/ATen/native/Distance.h b/voice_bridge/torch/include/ATen/native/Distance.h new file mode 100644 index 0000000000000000000000000000000000000000..c2d881ae66f6af001c255d23cb1acd613af70d5f --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Distance.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace at { +class Tensor; + +namespace native { + +using pdist_forward_fn = void(*)(Tensor&, const Tensor&, const double p); +using pdist_backward_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const double p, const Tensor&); +using cdist_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const double p); +using cdist_backward_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const Tensor&, const double p, const Tensor&); + +DECLARE_DISPATCH(pdist_forward_fn, pdist_forward_stub); +DECLARE_DISPATCH(pdist_backward_fn, pdist_backward_stub); +DECLARE_DISPATCH(cdist_fn, cdist_stub); +DECLARE_DISPATCH(cdist_backward_fn, cdist_backward_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/DistributionTemplates.h b/voice_bridge/torch/include/ATen/native/DistributionTemplates.h new file mode 100644 index 0000000000000000000000000000000000000000..15e2be8c8f271961e00657878ab438ff2d3bfb0c --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/DistributionTemplates.h @@ -0,0 +1,352 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +namespace templates { + +// ==================================================== Random ======================================================== + +// The purpose of `update_from` and `update_to` is to find the closest valid int64_t number that can be used as actual `from`. +// The current implementation of `random_` uses uint64_t arithmetics and casts the result to the target dtype(scalar_t). +// This casting can result in generating numbers that happen to be greater or equal to `to` value. For instance: +// +// auto actual = torch::empty({3, 3}, torch::half); +// actual.random_(0, 65504); +// +// If random's uint64_t arithmetics produces 65503 as a random value after casting to torch::half it becomes 65504 +// and violates the requirement that random value must be less than `to`. To resolve this issue `update_from` and `update_to` +// moves `from` to the right and `to` to the left to the next closest value that won't go outside [from, to) after casting to +// the target dtype. For `to` = 65504 it moves left for (1 << (log2(to) - 11 + 1)) = 32 and becomes 65472, which is previous +// available number for torch::half dtype. +template +int64_t update_from(int64_t from) { + static_assert( + std::is_floating_point::value || + std::is_same::value || + std::is_same::value, "scalar_t must be floating-point type"); + const auto from_plus_1 = static_cast(static_cast(from + 1)); + if (from_plus_1 < from) { + int64_t from_ = std::abs(from + 1); + int n = 0; + while (from_ >>= 1) ++n; + // NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult) + from = from_plus_1 + (1LL << (n - std::numeric_limits::digits + 1)); + } + return from; +} + +template +int64_t update_to(int64_t to) { + static_assert( + std::is_floating_point::value || + std::is_same::value || + std::is_same::value, "scalar_t must be floating-point type"); + const auto to_minus_1 = static_cast(static_cast(to - 1)); + if (to_minus_1 >= to) { + int64_t to_ = std::abs(to - 1); + int n = 0; + while (to_ >>= 1) ++n; + // NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult) + to = to_minus_1 - (1LL << (n - std::numeric_limits::digits + 1)); + } + return to; +} + +template class random_kernel, typename RNG> +at::Tensor& random_impl(at::Tensor& self, c10::optional generator) { + auto iter = at::TensorIterator::borrowing_nullary_op(self); + random_kernel()(iter, generator); + return self; +} + +#define CHECK_OUT_OF_BOUNDS(var, name, min, max, dtype) \ + TORCH_CHECK(var >= min && var <= max, name , " is out of bounds for ", dtype); \ + +#define WARN_OUT_OF_BOUNDS(var, name, digits, dtype) \ + if (var < -(1LL << digits) || var > (1LL << digits)) { \ + TORCH_WARN(name , " is out of bounds [-(2^", digits, "), 2^", digits, "]. ", \ + "Due to precision limitations ", dtype, " can support discrete uniform distribution only within this range. ", \ + "This warning will become an error in version 1.7 release, please fix the code in advance"); \ + } + +static void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMeta dtype) { + const auto scalar_type = typeMetaToScalarType(dtype); + if (isFloatingType(scalar_type)) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "check_random_fp_bounds", [&] { + const auto min = static_cast(std::numeric_limits::lowest()); + const auto max = static_cast(std::numeric_limits::max()); + CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype); + CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype); + + constexpr auto digits = std::numeric_limits::digits; + WARN_OUT_OF_BOUNDS(from, "from", digits, dtype); + WARN_OUT_OF_BOUNDS(to_inc, "to - 1", digits, dtype); + }); + } else if (isIntegralType(scalar_type, /*includeBool=*/true)) { + AT_DISPATCH_INTEGRAL_TYPES_AND(at::ScalarType::Bool, scalar_type, "check_random_integral_bounds", [&]() { + const auto min = static_cast(std::numeric_limits::lowest()); + const auto max = static_cast(std::numeric_limits::max()); + CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype); + CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype); + }); + } else { + TORCH_CHECK(false, "check_random_bounds handles only integral, floating-point and boolean types"); + } +} + +template class random_from_to_kernel, typename RNG> +at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, c10::optional to_opt, c10::optional generator) { + uint64_t range = 0; + auto iter = at::TensorIterator::borrowing_nullary_op(self); + if (to_opt.has_value()) { + // [from, to) + int64_t to = *to_opt; + TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to); + if (isFloatingType(iter.dtype())) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_update_from_to", [&] { + from = update_from(from); + to = update_to(to); + TORCH_CHECK(from < to, "random_ expects 'from' casted to dtype to be less than 'to' casted to dtype, but got from=", from, " >= to=", to); + }); + } + check_from_to_in_range(from, to - 1, self.dtype()); + range = static_cast(to) - static_cast(from); + random_from_to_kernel()(iter, range, from, generator); + } else if (from != std::numeric_limits::lowest()) { + // [from, std::numeric_limits::max()] + int64_t to_inc = 0; + if (isFloatingType(iter.dtype())) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_from_to_range_calc", [&] { + constexpr int64_t scalar_t_max = static_cast(1) << std::numeric_limits::digits; + to_inc = scalar_t_max > std::numeric_limits::max() ? std::numeric_limits::max() : static_cast(scalar_t_max); + from = update_from(from); + TORCH_CHECK(from < to_inc, "random_ expects 'from' casted to dtype to be less than or equal to 'to_inc' casted to dtype, but got from=", from, " > to_inc=", to_inc); + }); + } else if (isIntegralType(iter.dtype(), /*includeBool=*/true)) { + AT_DISPATCH_INTEGRAL_TYPES_AND(at::ScalarType::Bool, self.scalar_type(), "random_from_to_range_calc", [&] { + if (std::is_same::value) { + to_inc = static_cast(true); + } else { + to_inc = static_cast(std::numeric_limits::max()); + } + }); + } else { + TORCH_CHECK(false, "random_from_to_impl handles only integral, floating-point and boolean types"); + } + check_from_to_in_range(from, to_inc, self.dtype()); + range = static_cast(to_inc) - static_cast(from) + 1; + random_from_to_kernel()(iter, range, from, generator); + } else { + // [std::numeric_limits::lowest(), std::numeric_limits::max()] + // range = 2^64 + random_from_to_kernel()(iter, generator); + } + return self; +} + +// ==================================================== Normal ======================================================== + +#define CHECK_NORMAL_TENSOR_STD(std) \ + do { \ + TORCH_CHECK( \ + !std.is_complex(), \ + "normal expects standard deviation to be non-complex"); \ + TORCH_CHECK( \ + std.numel() == 0 || std.is_meta() || std.min().ge(0).item(), \ + "normal expects all elements of std >= 0.0"); \ + } while (0) + +#define CHECK_NORMAL_STD(std) \ + TORCH_CHECK(std >= 0.0, "normal expects std >= 0.0, but found std ", std); + +template class normal_kernel, typename RNG> +Tensor& normal_impl_(Tensor& self, double mean, double std, c10::optional gen) { + CHECK_NORMAL_STD(std); + if (self.is_complex()) { + auto float_tensor = at::view_as_real(self); + // variance for normal distribution of the real and imaginary values + // is half of the input variance + normal_kernel()(float_tensor, mean, std/(std::sqrt(2)), gen); + } else { + normal_kernel()(self, mean, std, gen); + } + return self; +} + +template class normal_kernel, typename RNG> +Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, c10::optional gen) { + CHECK_NORMAL_STD(std); + auto std_tensor = at::empty_like(output, MemoryFormat::Contiguous); + auto shape = at::infer_size(mean.sizes(), std_tensor.sizes()); + at::native::resize_output(output, shape); + normal_impl_(output, 0, std, gen); + output.add_(mean); + return output; +} + +template class normal_kernel, typename RNG> +Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, c10::optional gen) { + CHECK_NORMAL_TENSOR_STD(std); + auto mean_tensor = at::full({}, mean, output.options()); + auto shape = at::infer_size(mean_tensor.sizes(), std.sizes()); + at::native::resize_output(output, shape); + normal_impl_(output, 0, 1, gen); + // CUDA NB: addcmul_out copies the tensor to be added into the output. + // The previous function here was addcmul_out(output, mean_tensor, output, std, 1); + // The third argument is not a constant reference and hence the samples in output are overwritten. + // Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std + output.mul_(std).add_(mean_tensor); + return output; +} + +template class normal_kernel, typename RNG> +Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, c10::optional gen) { + CHECK_NORMAL_TENSOR_STD(std); + auto shape = at::infer_size(mean.sizes(), std.sizes()); + at::native::resize_output(output, shape); + normal_impl_(output, 0, 1, gen); + // CUDA NB: addcmul_out copies the tensor to be added into the output. + // The previous function here was addcmul_out(output, mean, output, std, 1); + // The third argument is not a constant reference and hence the samples in output are overwritten. + // Consequently, the computation performed is mean + mean * std instead of mean + output * std + output.mul_(std).add_(mean); + return output; +} + +template class normal_kernel, typename RNG> +Tensor normal_impl(const Tensor& mean, double std, c10::optional gen) { + CHECK_NORMAL_STD(std); + Tensor ret = at::empty_like(mean, MemoryFormat::Contiguous); + normal_out_impl(ret, mean, std, gen); + return ret; +} + +template class normal_kernel, typename RNG> +Tensor normal_impl(double mean, const Tensor& std, c10::optional gen) { + CHECK_NORMAL_TENSOR_STD(std); + Tensor ret = at::empty_like(std, MemoryFormat::Contiguous); + normal_out_impl(ret, mean, std, gen); + return ret; +} + +template class normal_kernel, typename RNG> +Tensor normal_impl(const Tensor& mean, const Tensor& std, c10::optional gen) { + CHECK_NORMAL_TENSOR_STD(std); + auto shape = at::infer_size(mean.sizes(), std.sizes()); + Tensor ret = at::empty(shape, mean.options(), MemoryFormat::Contiguous); + normal_out_impl(ret, mean, std, gen); + return ret; +} + +// ==================================================== Uniform ======================================================= + +template class uniform_kernel, typename RNG> +at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, c10::optional generator) { + if (self.is_complex()) { + auto float_tensor = at::view_as_real(self); + uniform_impl_(float_tensor, from, to, generator); + } else { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "check_uniform_bounds", [&] { + const auto dtype = self.dtype(); + const auto min = static_cast(std::numeric_limits::lowest()); + const auto max = static_cast(std::numeric_limits::max()); + CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype); + CHECK_OUT_OF_BOUNDS(to, "to", min, max, dtype); + TORCH_CHECK(from <= to, "uniform_ expects to return a [from, to) range, but found from=", from, " > to=", to); + TORCH_CHECK((to - from) <= std::numeric_limits::max(), + "uniform_ expects to-from <= std::numeric_limits<", toString(self.scalar_type()), + ">::max(), but found to=", to, " and from=", from, + " which result in to-from to exceed the limit"); + from = std::min(std::max(from, min), max); + to = std::max(std::min(to, max), min); + }); + auto iter = at::TensorIterator::borrowing_nullary_op(self); + uniform_kernel()(iter, from, to, generator); + } + return self; +} + +// ================================================== LogNormal ======================================================= + +template class log_normal_kernel, typename RNG> +at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, c10::optional gen) { + TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std); + auto iter = TensorIterator::borrowing_nullary_op(self); + log_normal_kernel()(iter, mean, std, gen); + return self; +} + +// =================================================== Geometric ====================================================== + +template class geometric_kernel, typename RNG> +Tensor& geometric_impl_(Tensor& self, double p, c10::optional gen) { + TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p); + auto iter = TensorIterator::borrowing_nullary_op(self); + geometric_kernel()(iter, p, gen); + return self; +} + +// ================================================== Exponential ===================================================== + +template class exponential_kernel, typename RNG> +Tensor& exponential_impl_(Tensor& self, double lambda, c10::optional gen) { + TORCH_CHECK(lambda >= 0.0, "exponential_ expects lambda >= 0.0, but found lambda=", lambda); + auto iter = TensorIterator::borrowing_nullary_op(self); + exponential_kernel()(iter, lambda, gen); + return self; +} + +// ==================================================== Cauchy ======================================================== + +template class cauchy_kernel, typename RNG> +Tensor& cauchy_impl_(Tensor& self, double median, double sigma, c10::optional gen) { + auto iter = TensorIterator::borrowing_nullary_op(self); + cauchy_kernel()(iter, median, sigma, gen); + return self; +} + +// ==================================================== Bernoulli ===================================================== + +template class bernoulli_tensor_kernel, typename RNG> +Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, c10::optional gen) { + NoNamesGuard guard; + at::assert_no_internal_overlap(self); + bernoulli_tensor_kernel()(self, p_, gen); + return self; +} + +template class bernoulli_scalar_kernel, typename RNG> +Tensor& bernoulli_impl_(Tensor& self, double p, c10::optional gen) { + TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p); + at::assert_no_internal_overlap(self); + bernoulli_scalar_kernel()(self, p, gen); + return self; +} + +template class bernoulli_tensor_kernel, typename RNG> +Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, c10::optional gen) { + // result.resize_as_(self) requires self to have same dtype as result, so we + // use resize_ instead. + // TODO: Fix resize_as_. See pytorch/pytorch#11665. + result.resize_(self.sizes()); + bernoulli_impl_(result, self, gen); + namedinference::propagate_names(result, self); + return result; +} + +#undef CHECK_OUT_OF_BOUNDS +#undef WARN_OUT_OF_BOUNDS + +}}} diff --git a/voice_bridge/torch/include/ATen/native/Distributions.h b/voice_bridge/torch/include/ATen/native/Distributions.h new file mode 100644 index 0000000000000000000000000000000000000000..2c334157eba9f54f33b34ab45eab92567812d2ae --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Distributions.h @@ -0,0 +1,518 @@ +#pragma once + +#include +#include +#include + +// ROCM hcc doesn't work well with using std:: in kernel functions +#if defined(__CUDA_ARCH__) +#include +#define compat_exp c10::cuda::compat::exp +#define compat_ceil c10::cuda::compat::ceil +#define compat_floor c10::cuda::compat::floor +#define compat_log c10::cuda::compat::log +#define compat_pow c10::cuda::compat::pow +#define compat_sqrt c10::cuda::compat::sqrt +#define compat_tan c10::cuda::compat::tan +#define compat_abs c10::cuda::compat::abs +#define compat_log1p c10::cuda::compat::log1p +#elif defined(__HIPCC__) +#include +#define compat_exp c10::hip::compat::exp +#define compat_ceil c10::hip::compat::ceil +#define compat_floor c10::hip::compat::floor +#define compat_log c10::hip::compat::log +#define compat_pow c10::hip::compat::pow +#define compat_sqrt c10::hip::compat::sqrt +#define compat_tan c10::hip::compat::tan +#define compat_abs c10::hip::compat::abs +#define compat_log1p c10::hip::compat::log1p +#else +#define compat_exp std::exp +#define compat_ceil std::ceil +#define compat_floor std::floor +#define compat_log std::log +#define compat_pow std::pow +#define compat_sqrt std::sqrt +#define compat_tan std::tan +#define compat_abs std::abs +#define compat_log1p std::log1p +#endif + +namespace { + +#if !defined(__CUDA_ARCH__) && !defined(__HIPCC__) +// we cannot use std::isnan directly due to some incompatibility of +// gcc constexpr'ing and nvcc +using std::isnan; +#endif + +// Here sampler_t should be function type scalar_t(void). For gpu +// "sampler" is a device function, but since ROCM doesn't have +// equivalent to nvstd::function, we use a template type parameter to +// capture it. +template +struct BaseSampler { + sampler_t sampler; + C10_DEVICE BaseSampler(const sampler_t& sampler): sampler(sampler) {} + C10_DEVICE scalar_t sample() { + return sampler(); + } +}; + +// The function `sample_gamma` is +// is adapted from Numpy's distributions.c implementation. +// It is MIT licensed, so here is the copyright: + +/* Copyright 2005 Robert Kern (robert.kern@gmail.com) + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +template +C10_DEVICE scalar_t sample_gamma(scalar_t alpha, BaseSampler& standard_uniform, BaseSampler& standard_normal) { + accscalar_t scale = 1.0f; + + // Boost alpha for higher acceptance probability. + if (alpha < 1.0f) { + if (alpha == 0.f) return 0.f; + scale *= compat_pow(1 - standard_uniform.sample(), 1.0f / alpha); + alpha += 1.0f; + } + + // This implements the acceptance-rejection method of Marsaglia and Tsang (2000) + // doi:10.1145/358407.358414 + const accscalar_t d = alpha - 1.0f / 3.0f; + const accscalar_t c = 1.0f / compat_sqrt(9.0f * d); + for (;;) { + accscalar_t x, y; + do { + x = standard_normal.sample(); + y = 1.0f + c * x; + } while (y <= 0); + const accscalar_t v = y * y * y; + const accscalar_t u = 1 - standard_uniform.sample(); + const accscalar_t xx = x * x; + if (u < 1.0f - 0.0331f * xx * xx) + return static_cast(scale * d * v); + if (compat_log(u) < 0.5f * xx + d * (1.0f - v + compat_log(v))) + return static_cast(scale * d * v); + } +} + +/* the functions stirling_approx_tail, binomial_inversion, and btrs are adapted + * from TensorFlow's random_binomial_op.cc implementation. That code is under + * copyright: 2019 The TensorFlow Authors. + * + * It was released under the Apache License, Version 2.0 (the "License"), available at: + * http://www.apache.org/licenses/LICENSE-2.0 + */ + +template +C10_DEVICE scalar_t stirling_approx_tail(scalar_t k) { + const static scalar_t kTailValues[] = { + 0.0810614667953272, + 0.0413406959554092, + 0.0276779256849983, + 0.02079067210376509, + 0.0166446911898211, + 0.0138761288230707, + 0.0118967099458917, + 0.0104112652619720, + 0.00925546218271273, + 0.00833056343336287 + }; + if (k <= 9) { + return kTailValues[static_cast(k)]; + } + scalar_t kp1sq = (k + 1) * (k + 1); + return (1.0 / 12 - (1.0 / 360 - 1.0 / 1260 / kp1sq) / kp1sq) / (k + 1); +} + + +template +C10_DEVICE scalar_t binomial_inversion(scalar_t count, scalar_t prob, BaseSampler& standard_uniform) { + accscalar_t U; + accscalar_t geom_sum = 0; + scalar_t num_geom = 0; + + accscalar_t logprob = compat_log1p(-prob); + + while (1) { + U = standard_uniform.sample(); + accscalar_t geom = compat_ceil(compat_log(U) / logprob); + geom_sum += geom; + if (geom_sum > count) { + break; + } + num_geom = num_geom + 1; + } + return num_geom; +} + +template +C10_DEVICE scalar_t btrs(scalar_t count, scalar_t prob, BaseSampler& standard_uniform) { + scalar_t k; + accscalar_t U, V, us; + + // This is spq in the paper. + const accscalar_t stddev = compat_sqrt(count * prob * (1 - prob)); + + // Other coefficients for Transformed Rejection sampling. + const accscalar_t b = 1.15 + 2.53 * stddev; + const accscalar_t a = -0.0873 + 0.0248 * b + 0.01 * prob; + const accscalar_t c = count * prob + 0.5; + const accscalar_t v_r = 0.92 - 4.2 / b; + const accscalar_t r = prob / (1 - prob); + + const accscalar_t alpha = (2.83 + 5.1 / b) * stddev; + const accscalar_t m = compat_floor((count + 1) * prob); + + while (1) { + U = standard_uniform.sample() - 0.5; + V = standard_uniform.sample(); + + us = 0.5 - compat_abs(U); + k = static_cast(compat_floor((2 * a / us + b) * U + c)); + + // Reject non-sensical answers. + if (k < 0 || k > count) { + continue; + } + // Region for which the box is tight, and we can return our calculated value. + // This should happen 0.86 * v_r times. In the limit as n * p is large, + // the acceptance rate converges to ~79% (and in the lower regime it is ~24%). + if (us >= 0.07 && V <= v_r) { + return k; + } + + // This deviates from Hormann's BTRS algorithm, as there is a log missing. + // For all (u, v) pairs outside of the bounding box, this calculates the + // transformed-reject ratio. + V = compat_log(V * alpha / (a / (us * us) + b)); + accscalar_t upperbound = + ((m + 0.5) * compat_log((m + 1) / (r * (count - m + 1))) + + (count + 1) * compat_log((count - m + 1) / (count - k + 1)) + + (k + 0.5) * compat_log(r * (count - k + 1) / (k + 1)) + + stirling_approx_tail(m) + stirling_approx_tail(count - m) - + stirling_approx_tail(k) - stirling_approx_tail(count - k)); + + if (V <= upperbound) { + return k; + } + } +} + +template +C10_DEVICE scalar_t sample_binomial(scalar_t count, scalar_t prob, BaseSampler& standard_uniform) { + if (count <= 0.0 || prob <= 0.0) { + return 0; + } else if (prob >= 1.0) { + return count; + } else if (prob <= 0.5) { + if (count * prob >= 10.0) { + // btrs + return btrs(count, prob, standard_uniform); + } else { + // binomial inversion + return binomial_inversion(count, prob, standard_uniform); + } + } else if (prob > 0.5) { + scalar_t qprob = 1.0 - prob; + if (count * qprob >= 10.0) { + // btrs + return count - btrs(count, qprob, standard_uniform); + } else { + // count - binomial inversion + return count - binomial_inversion(count, qprob, standard_uniform); + } + } else { + // prob is nan? + return static_cast(NAN); + } +} + +/* + * This function is derived from the implementation of the digamma function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library] in ATen/native/Math.h. + */ +template +C10_DEVICE static inline scalar_t digamma_one(scalar_t x) { + constexpr accscalar_t PSI_10 = 2.25175258906672110764; + if (x == 0) { + return INFINITY; + } + accscalar_t additional_summand = 0; + int x_is_integer = x == compat_floor(x); + if (x < 0) { + if (x_is_integer) { + return INFINITY; + } + // it is more standard to write this as recursion, but + // nvcc does not like that + additional_summand = -c10::pi / + compat_tan(c10::pi * x); + x = 1 - x; + } + + // Push x to be >= 10 + accscalar_t result = 0; + while (x < 10) { + result -= 1 / x; + x += 1; + } + if (x == 10) { + return result + PSI_10 + additional_summand; + } + + // Compute asymptotic digamma + static const accscalar_t A[] = { + 8.33333333333333333333E-2, + -2.10927960927960927961E-2, + 7.57575757575757575758E-3, + -4.16666666666666666667E-3, + 3.96825396825396825397E-3, + -8.33333333333333333333E-3, + 8.33333333333333333333E-2, + }; + + accscalar_t y = 0; + if (x < 1.0e17f) { + accscalar_t z = 1.0 / (x * x); + y = z * polevl(z, A, 6); + } + return static_cast( + result + compat_log(x) - (0.5f / x) - y + additional_summand); +} + +// Computes the reparameterized gradient -(d/dalpha cdf(x;alpha)) / pdf(x;alpha) +// for random number x drawn from a standard Gamma distribution Gamma(alpha). +template +C10_HOST_DEVICE scalar_t standard_gamma_grad_one(scalar_t alpha_, scalar_t x_) { + // Use a Taylor series expansion for small x. + accscalar_t x = static_cast(x_); + accscalar_t alpha = static_cast(alpha_); + if (x < 0.8f) { + accscalar_t numer = 1; + accscalar_t denom = alpha; + auto series1 = numer / denom; + auto series2 = numer / (denom * denom); + for (int i = 1; i <= 5; ++i) { + numer *= -x / static_cast(i); + denom += 1; + series1 += numer / denom; + series2 += numer / (denom * denom); + } + const auto pow_x_alpha = compat_pow(x, alpha); + const auto gamma_pdf = compat_pow(x, alpha - 1) * compat_exp(-x); + const auto gamma_cdf = pow_x_alpha * series1; + const auto gamma_cdf_alpha = + (compat_log(x) - digamma_one(alpha)) * + gamma_cdf - + pow_x_alpha * series2; + const auto result = -gamma_cdf_alpha / gamma_pdf; + return isnan(result) ? static_cast( 0.f ) : static_cast(result); + } + + // Use a Rice saddle point expansion for large alpha. + if (alpha > 8.0f) { + if (0.9f * alpha <= x && x <= 1.1f * alpha) { + const auto numer_1 = 1 + 24 * alpha * (1 + 12 * alpha); + const auto numer_2 = 1440 * (alpha * alpha) + 6 * x * (53 - 120 * x) + - 65 * x * x / alpha + alpha * (107 + 3600 * x); + const auto denom = 1244160 * (alpha * alpha) * (alpha * alpha); + return static_cast(numer_1 * numer_2 / denom); + } + const auto denom = compat_sqrt(8 * alpha); + const auto term2 = denom / (alpha - x); + const auto term3 = compat_pow( + x - alpha - alpha * compat_log(x / alpha), + static_cast(-1.5)); + const auto term23 = (x < alpha) ? term2 - term3 : term2 + term3; + const auto term1 = compat_log(x / alpha) * term23 - + compat_sqrt(2 / alpha) * (alpha + x) / ((alpha - x) * (alpha - x)); + const auto stirling = 1 + 1 / (12 * alpha) * (1 + 1 / (24 * alpha)); + const auto numer = x * term1; + return static_cast(-stirling * numer / denom); + } + + // Use a bivariate rational approximation to the reparameterized gradient. + const auto u = compat_log(x / alpha); + const auto v = compat_log(alpha); + static const accscalar_t coef_uv[3][8] = { + {0.16009398, -0.094634809, 0.025146376, -0.0030648343, + 1, 0.32668115, 0.10406089, 0.0014179084}, + {0.53487893, 0.1298071, 0.065735949, -0.0015649758, + 0.16639465, 0.020070113, -0.0035938915, -0.00058392623}, + {0.040121004, -0.0065914022, -0.0026286047, -0.0013441777, + 0.017050642, -0.0021309326, 0.00085092367, -1.5247877e-07}, + }; + accscalar_t coef_v[8]; + for (int i = 0; i < 8; ++ i) { + coef_v[i] = coef_uv[0][i] + u * (coef_uv[1][i] + u * coef_uv[2][i]); + } + const auto p = coef_v[0] + v * (coef_v[1] + v * (coef_v[2] + v * coef_v[3])); + const auto q = coef_v[4] + v * (coef_v[5] + v * (coef_v[6] + v * coef_v[7])); + return static_cast(compat_exp(p / q)); +} + +// Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha. +// Assumes x is close to zero and uses a Taylor expansion. +template +C10_DEVICE static inline scalar_t _beta_grad_alpha_small(scalar_t x, scalar_t alpha, scalar_t beta) { + const scalar_t factor = digamma_one(alpha) + - digamma_one(alpha + beta) - compat_log(x); + scalar_t numer = 1; + scalar_t series = numer / alpha * (factor + 1 / alpha); + for (int i = 1; i <= 10; ++i) { + scalar_t casted_i = static_cast(i); + numer *= (casted_i - beta) * x / casted_i; + const scalar_t denom = alpha + casted_i; + series += numer / denom * (factor + 1 / denom); + } + const scalar_t result = x * compat_pow(1 - x, -beta) * series; + return isnan(result) ? static_cast( 0.f ) : result; +} + +// Approximate reparameterized gradient of Beta(x,alpha,beta) wrt beta. +// Assumes x is close to zero and uses a Taylor expansion. +template +C10_DEVICE static inline scalar_t _beta_grad_beta_small(scalar_t x, scalar_t alpha, scalar_t beta) { + const scalar_t factor = digamma_one(alpha + beta) - digamma_one(beta); + scalar_t numer = 1, betas = 1, dbetas = 0, series = factor / alpha; + for (int i = 1; i <= 8; ++i) { + scalar_t casted_i = static_cast(i); + numer *= -x / casted_i; + dbetas = dbetas * (beta - casted_i) + betas; + betas = betas * (beta - casted_i); + series += numer / (alpha + casted_i) * (dbetas + factor * betas); + } + const scalar_t result = -compat_pow(1 - x, 1 - beta) * series; + return isnan(result) ? static_cast( 0.f ) : result; +} + +// Approximate reparameterized gradient of Beta(x,alpha,beta) wrt alpha. +// Assumes alpha and beta are both large and uses a Rice saddle point expansion. +// To ensure numerical stability, this computation is performed at higher precision. +template +C10_DEVICE static inline scalar_t _beta_grad_alpha_mid(accscalar_t x, accscalar_t alpha, accscalar_t beta) { + const accscalar_t total = alpha + beta; + const accscalar_t mean = alpha / total; + const accscalar_t std = compat_sqrt(alpha * beta / (total + 1)) / total; + if (mean - 0.1 * std <= x && x <= mean + 0.1 * std) { + // Avoid the singularity at x = mean. + const accscalar_t poly = 47 * x * (beta * beta) * (beta * beta) + alpha * ( + (43 + 20 * (16 + 27 * beta) * x) * (beta * beta) * beta + alpha * ( + 3 * (59 + 180 * beta - 90 * x) * (beta * beta) + alpha * ( + (453 + 1620 * beta * (1 - x) - 455 * x) * beta + alpha * ( + 8 * (1 - x) * (135 * beta - 11))))); + const accscalar_t prefactor_num = (1 + 12 * alpha) * (1 + 12 * beta) / (total * total); + const accscalar_t prefactor_den = 12960 * alpha * alpha * alpha * beta * beta * (1 + 12 * total); + return prefactor_num / (1 - x) * poly / prefactor_den; + } + const accscalar_t prefactor = -x / compat_sqrt(2 * alpha * beta / total); + const accscalar_t stirling = (1 + 1 / (12 * alpha) + 1 / (288 * alpha * alpha)) + * (1 + 1 / (12 * beta) + 1 / (288 * beta * beta)) + / (1 + 1 / (12 * total) + 1 / (288 * total * total)); + const accscalar_t term1_num = 2 * (alpha * alpha) * (x - 1) + alpha * beta * (x - 1) - x * (beta * beta); + const accscalar_t axbx = alpha * (x - 1) + beta * x; + const accscalar_t term1_den = compat_sqrt(2 * alpha / beta) * compat_pow(total, static_cast(1.5f)) * axbx * axbx; + const accscalar_t term1 = term1_num / term1_den; + const accscalar_t term2 = 0.5f * compat_log(alpha / (total * x)); + const accscalar_t term3_num = compat_sqrt(8 * alpha * beta / total); + const accscalar_t term3_den = beta * x + alpha * (x - 1); + const accscalar_t term3 = term3_num / term3_den; + const accscalar_t term4_base = beta * compat_log(beta / (total * (1 - x))) + + alpha * compat_log(alpha / (total * x)); + const accscalar_t term4 = compat_pow(term4_base, static_cast(-1.5f)); + const accscalar_t term1234 = term1 + term2 * (term3 + (x < mean ? term4 : -term4)); + return static_cast(stirling * prefactor * term1234); +} + +// Computes a scaled reparameterized gradient +// -(d/dalpha cdf(x;alpha,beta)) / pdf(x;alpha,beta) / (1-x) +// for random number x drawn from a Beta distribution Beta(alpha,beta). +// This function inputs total=alpha+beta to make it easy to implement +// Dirichlet reparameterized gradients in terms of Betas. +template +C10_HOST_DEVICE static inline scalar_t dirichlet_grad_one(scalar_t x, scalar_t alpha, scalar_t total) { + accscalar_t x_ = static_cast(x); + accscalar_t alpha_ = static_cast(alpha); + accscalar_t total_ = static_cast(total); + + const scalar_t beta = total - alpha; + const accscalar_t beta_ = total_ - alpha_; + const scalar_t boundary = total * x * (1 - x); + + // Use an asymptotic approximation for x close to 0. + if (x <= 0.5f && boundary < 2.5f) { + return _beta_grad_alpha_small(x, alpha, beta); + } + + // Use an asymptotic approximation for x close to 1. + if (x >= 0.5f && boundary < 0.75f) { + return -_beta_grad_beta_small(1 - x, beta, alpha); + } + + // Use an asymptotic approximation when alpha and (total - alpha) are both large. + if (alpha > 6 && beta > 6) { + return _beta_grad_alpha_mid(x_, alpha_, beta_); + } + + // Use a rational correction to an analytic approximation. + static const accscalar_t c[2][3][3][4] = { + {{{1.003668233, -0.01061107488, -0.0657888334, 0.01201642863}, + {0.6336835991, -0.3557432599, 0.05486251648, -0.001465281033}, + {-0.03276231906, 0.004474107445, 0.002429354597, -0.0001557569013}}, + {{0.221950385, -0.3187676331, 0.01799915743, 0.01074823814}, + {-0.2951249643, 0.06219954479, 0.01535556598, 0.001550077057}, + {0.02155310298, 0.004170831599, 0.001292462449, 6.976601077e-05}}, + {{-0.05980841433, 0.008441916499, 0.01085618172, 0.002319392565}, + {0.02911413504, 0.01400243777, -0.002721828457, 0.000751041181}, + {0.005900514878, -0.001936558688, -9.495446725e-06, 5.385558597e-05}}}, + {{{1, -0.02924021934, -0.04438342661, 0.007285809825}, + {0.6357567472, -0.3473456711, 0.05454656494, -0.002407477521}, + {-0.03301322327, 0.004845219414, 0.00231480583, -0.0002307248149}}, + {{0.5925320577, -0.1757678135, 0.01505928619, 0.000564515273}, + {0.1014815858, -0.06589186703, 0.01272886114, -0.0007316646956}, + {-0.007258481865, 0.001096195486, 0.0003934994223, -4.12701925e-05}}, + {{0.06469649321, -0.0236701437, 0.002902096474, -5.896963079e-05}, + {0.001925008108, -0.002869809258, 0.0008000589141, -6.063713228e-05}, + {-0.0003477407336, 6.959756487e-05, 1.097287507e-05, -1.650964693e-06}}}, + }; + const accscalar_t u = compat_log(x_); + const accscalar_t a = compat_log(alpha_) - u; + const accscalar_t b = compat_log(total_) - a; + const accscalar_t pow_u[3] = {1, u, u * u}; + const accscalar_t pow_a[3] = {1, a, a * a}; + accscalar_t p = 0.0; + accscalar_t q = 0.0; + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 3; ++j) { + const accscalar_t ua = pow_u[i] * pow_a[j]; + p += ua * (c[0][i][j][0] + b * (c[0][i][j][1] + b * (c[0][i][j][2] + b * c[0][i][j][3]))); + q += ua * (c[1][i][j][0] + b * (c[1][i][j][1] + b * (c[1][i][j][2] + b * c[1][i][j][3]))); + } + } + const accscalar_t approx = x_ * (digamma_one(total_) - digamma_one(alpha_)) / beta_; + return static_cast(p / q * approx); +} + +} // namespace diff --git a/voice_bridge/torch/include/ATen/native/EmbeddingBag.h b/voice_bridge/torch/include/ATen/native/EmbeddingBag.h new file mode 100644 index 0000000000000000000000000000000000000000..6600c661d46ada19a626672727085f9750cb1c9d --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/EmbeddingBag.h @@ -0,0 +1,140 @@ +#include +#include + +#ifdef USE_FBGEMM +#include +#endif + +namespace at { +namespace native { + +void check_arguments( + const Tensor& weight, + const Tensor& indices, + const Tensor& offsets, + const int64_t mode, + const c10::optional& per_sample_weights, + bool include_last_offset); + +void make_bag_size_out( + Tensor& bag_size_out, + const Tensor& offsets, + const Tensor& indices, + const int64_t mode, + const bool include_last_offset, + const bool requires_grad); + +void make_max_indices_out( + Tensor& max_indices_out, + const Tensor& weight, + const Tensor& indices, + const Tensor& offsets, + const Tensor& bag_size, + const int64_t mode, + bool include_last_offset); + +void make_offset2bag_out( + Tensor& offset2bag, + Tensor& output, + const Tensor& weight, + const Tensor& indices, + const Tensor& offsets, + const int64_t mode, + const c10::optional& per_sample_weights, + const int64_t padding_idx = -1); + +#ifdef USE_FBGEMM + +template +struct _CallbackAndBlockSize { + using TCallback = typename fbgemm::EmbeddingSpMDMKernelSignature::Type; + + int64_t blockSize = -1; + TCallback callback = nullptr; + + static TCallback generateCallback(int64_t block_size) { + return fbgemm::GenerateEmbeddingSpMDM( + block_size, + has_weight, + /* normalize_by_lengths */false, + /* prefetch */16, + /* is_weight_positional */false, + /* use_offsets */true); + } + + _CallbackAndBlockSize() = default; + + explicit _CallbackAndBlockSize(c10::optional maybe_block_size) + : blockSize(maybe_block_size.value_or(-1)) + , callback(maybe_block_size.has_value() ? generateCallback(maybe_block_size.value()) : nullptr) + {} +}; + +template +struct _EmbeddingBagKernelCacheImpl : private StorageMixins... { + + _EmbeddingBagKernelCacheImpl() = default; + // use each of the mixins to store corresponding kernel and block size + explicit _EmbeddingBagKernelCacheImpl(c10::optional maybe_block_size) + : StorageMixins(maybe_block_size)... + {} + + // this method is thread safe (call sites may call from different threads) + template + typename _CallbackAndBlockSize::TCallback + getCallback(int64_t block_size) const { + // if the cache doesn't store the kernel for the incoming block size + // (so it is different from the one stored in corresponding mixin) + // regenerate the kernel (not writing it into the cache so we avoid locks) + if (block_size != _CallbackAndBlockSize::blockSize) { + return _CallbackAndBlockSize::generateCallback(block_size); + } + // else retrieve the cached kernel from the corresponding mixin + return _CallbackAndBlockSize::callback; + } +}; + +// instantiate the cache with the list of storage mixins +// for each of the 8 _EmbeddingBagKernelCache* usages in the EmbeddingBag.cpp impl file +using _EmbeddingBagKernelCache = _EmbeddingBagKernelCacheImpl< + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize, + _CallbackAndBlockSize>; +#else +struct _EmbeddingBagKernelCache { + explicit _EmbeddingBagKernelCache(c10::optional /* maybe_block_size */) {} +}; +#endif + +void _embedding_bag_cpu_impl_out(Tensor& output, Tensor& offset2bag, + Tensor& bag_size, Tensor* max_indices, + const Tensor &weight, const Tensor &indices, + const Tensor &offsets, const int64_t mode = 0, + const c10::optional& per_sample_weights = c10::nullopt, + bool include_last_offset = false, + int64_t padding_idx = -1, + _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr); + +void _embedding_bag_cpu_out( + at::Tensor& output, + at::Tensor& offset2bag, + at::Tensor& bag_size, + at::Tensor* p_max_indices, + const at::Tensor& weight, + const at::Tensor& indices, + const at::Tensor& offsets, + const bool scale_grad_by_freq, + const int64_t mode, + const bool sparse, + const c10::optional& per_sample_weights, + const bool include_last_offset, + const c10::optional& padding_idx, + _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr); + +} // native +} // at diff --git a/voice_bridge/torch/include/ATen/native/Fill.h b/voice_bridge/torch/include/ATen/native/Fill.h new file mode 100644 index 0000000000000000000000000000000000000000..f6de9580ae7c33340d2929c4c5f743e4aaf42339 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Fill.h @@ -0,0 +1,21 @@ +// Functions that fill Tensors with constants. Implementations are in Fill.cpp. + +#pragma once + +#include + +namespace c10 { +class Scalar; +} + +namespace at { +class Tensor; +struct TensorIterator; + +namespace native { + +DECLARE_DISPATCH(void(*)(TensorIterator&, const c10::Scalar&), fill_stub); + +Tensor& fill_out(Tensor& self, const Scalar& value); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/ForeachUtils.h b/voice_bridge/torch/include/ATen/native/ForeachUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..033052f401f6bd91efb5f3c1cd71f4defce90ccb --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/ForeachUtils.h @@ -0,0 +1,137 @@ +#pragma once + +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at { +namespace native { +namespace { +// Check if tensor list has either a boolean tensor or a integer tensor +bool has_integral_tensor(TensorList tensors, const bool includeBool) { + return std::any_of(tensors.begin(), tensors.end(), + [&includeBool](const auto & t) { return at::isIntegralType(t.scalar_type(), includeBool); }); +} +// check if tensor list has bool tensors +bool has_bool_tensor(TensorList tensors) { + return std::any_of(tensors.begin(), tensors.end(), + [](const auto & t) -> bool { return t.scalar_type() == ScalarType::Bool; }); +} + +// Check foreach API restrictions +// - Tensor lists must be non-empty. +// - All TensorLists and ScalarLists must have the same number of elements. +// - Corresponding tensors must have the same size. +void check_foreach_api_restrictions(TensorList tensors) { + TORCH_CHECK(tensors.size() > 0, "Tensor list must have at least one tensor."); +} + +void check_foreach_api_restrictions(TensorList tensors, ArrayRef scalars) { + check_foreach_api_restrictions(tensors); + TORCH_CHECK(tensors.size() == scalars.size(), "Tensor list must have same number of elements as scalar list."); +} + +void check_foreach_api_restrictions(TensorList tensors1, TensorList tensors2) { + TORCH_CHECK(tensors1.size() > 0, "Tensor list must have at least one tensor."); + TORCH_CHECK(tensors2.size() > 0, "Tensor list must have at least one tensor."); + TORCH_CHECK(tensors1.size() == tensors2.size(), "Tensor lists must have the same number of tensors, got ", tensors1.size(), " and ", tensors2.size()); +} + +void check_foreach_api_restrictions(TensorList tensors1, TensorList tensors2, TensorList tensors3) { + TORCH_CHECK(tensors1.size() > 0, "Tensor list must have at least one tensor."); + TORCH_CHECK(tensors2.size() > 0, "Tensor list must have at least one tensor."); + TORCH_CHECK(tensors3.size() > 0, "Tensor list must have at least one tensor."); + TORCH_CHECK(tensors1.size() == tensors2.size(), "Tensor lists must have the same number of tensors, got ", tensors1.size(), " and ", tensors2.size()); + TORCH_CHECK(tensors1.size() == tensors3.size(), "Tensor lists must have the same number of tensors, got ", tensors1.size(), " and ", tensors3.size()); +} + +void check_foreach_api_restrictions(TensorList tensors1, TensorList tensors2, TensorList tensors3, ArrayRef scalars) { + check_foreach_api_restrictions(tensors1, tensors2, tensors3); + TORCH_CHECK(tensors1.size() == scalars.size(), "Tensor list must have same number of elements as scalar list, got ", tensors1.size(), " and ", scalars.size()); +} + +// To go via 'fast' path, several conditions must be satisfied +// - All tensors in all lists must have the same dtype. +// - All tensors must be on the same device +// - All tensors must have strided layout +// - All tensors must be non-overlapping and dense +// - Resulting tensor must have the same dtype as the input one + +// Please, make sure to call check_foreach_api_restrictions before calling this method. +// There is a set of preconditions that have to be satisfied. +bool check_fast_path_restrictions( + ArrayRef tensorLists, + ArrayRef scalarList = {}, + bool does_op_promote_integer_inputs_to_float = false) { + const auto expected_dtype = tensorLists[0][0].dtype(); + const auto expected_device = tensorLists[0][0].device(); + + auto is_tensor_okay = [&](const Tensor& tensor) { + return tensor.dtype() == expected_dtype && + tensor.device() == expected_device && + tensor.layout() == at::kStrided && + tensor.is_non_overlapping_and_dense(); + }; + + for (const auto& tensorList : tensorLists) { + for (const auto& tensor : tensorList) { + if (!is_tensor_okay(tensor)) { + return false; + } + } + } + + // Check if corresponding tensors in tensor lists have the same sizes and strides. + for (const auto& tensor_list : tensorLists) { + for (const auto j : c10::irange(tensorLists[0].size())) { + if (tensorLists[0][j].sizes() != tensor_list[j].sizes()) { + return false; + } + if (tensorLists[0][j].strides() != tensor_list[j].strides()) { + return false; + } + } + } + + // This function has already checked that `tensorList[j][i]` for all j, i has the same dtype + // using `is_tensor_okay` function above. + // This means we only need to check if {tensorList[0][0], tensorList[0][1], tensorList[0][2], ...} + // do type promotion with scalarLIst. + for (const auto i : c10::irange(tensorLists[0].size())) { + // For division, integer inputs will result in float. + if (does_op_promote_integer_inputs_to_float) { + if (at::isIntegralType(tensorLists[0][i].scalar_type(), /*includeBool*/ true)) { + return false; + } + } + if (scalarList.size() > 0) { + const auto& scalar = scalarList.size() == 1 ? scalarList[0] : scalarList[i]; + const auto& tensor = tensorLists[0][i]; + // note(mkozuki): This check might be responsible for `_foreach_add(bool_tensors, bool_tensors)` + // being pushed to slow path. + if (tensor.scalar_type() != at::native::result_type(scalar, tensor)) { + return false; + } + } + } + + return true; +} + +bool can_use_fast_route(ArrayRef tensorLists, + ArrayRef scalarList = {}, + bool does_op_promote_integer_inputs_to_float = false) { + return check_fast_path_restrictions(tensorLists, scalarList, does_op_promote_integer_inputs_to_float); +} + +bool can_use_fast_route(TensorList tensors1, TensorList tensors2, bool does_op_promote_integer_inputs_to_float = false) { + return can_use_fast_route({tensors1, tensors2}, {}, does_op_promote_integer_inputs_to_float); +} + +} +}} // at::native diff --git a/voice_bridge/torch/include/ATen/native/FunctionOfAMatrixUtils.h b/voice_bridge/torch/include/ATen/native/FunctionOfAMatrixUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..68b26ed1381133db9de0ba7cb2187578fb7d680d --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/FunctionOfAMatrixUtils.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include + +namespace at { +struct TensorIterator; + +namespace native { + +using _compute_linear_combination_fn = void(*)( + TensorIterator& iter, + int64_t in_stride, + int64_t coeff_stride, + int64_t num_summations +); + +DECLARE_DISPATCH(_compute_linear_combination_fn, _compute_linear_combination_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/GridSampler.h b/voice_bridge/torch/include/ATen/native/GridSampler.h new file mode 100644 index 0000000000000000000000000000000000000000..f4a735032430a150f549a928c5bcc7bf566702dc --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/GridSampler.h @@ -0,0 +1,298 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace at { namespace native { + +using detail::GridSamplerInterpolation; +using detail::GridSamplerPadding; + +// Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value, +// where we view each pixel as an area between (idx - 0.5) and (idx + 0.5). +// if align_corners: -1 and +1 get sent to the centers of the corner pixels +// -1 --> 0 +// +1 --> (size - 1) +// scale_factor = (size - 1) / 2 +// if not align_corners: -1 and +1 get sent to the image edges +// -1 --> -0.5 +// +1 --> (size - 1) + 0.5 == size - 0.5 +// scale_factor = size / 2 +template +static inline scalar_t grid_sampler_unnormalize(scalar_t coord, int64_t size, + bool align_corners) { + if (align_corners) { + // unnormalize coord from [-1, 1] to [0, size - 1] + return ((coord + 1) / 2) * (size - 1); + } else { + // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] + return ((coord + 1) * size - 1) / 2; + } +} + +// grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize +// except that it also returns the `d output / d input` via pointer argument +// `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static inline scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int64_t size, + bool align_corners, scalar_t *grad_in) { + if (align_corners) { + // unnormalize coord from [-1, 1] to [0, size - 1] + *grad_in = static_cast(size - 1) / 2; + return ((coord + 1) / 2) * (size - 1); + } else { + // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] + *grad_in = static_cast(size) / 2; + return ((coord + 1) * size - 1) / 2; + } +} + +// Clips coordinates to between 0 and clip_limit - 1 +template +static inline scalar_t clip_coordinates(scalar_t in, int64_t clip_limit) { + return std::min(static_cast(clip_limit - 1), std::max(in, static_cast(0))); +} + +// clip_coordinates_set_grad works similarly to clip_coordinates except that +// it also returns the `d output / d input` via pointer argument `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static inline scalar_t clip_coordinates_set_grad(scalar_t in, int64_t clip_limit, + scalar_t *grad_in) { + // Note that it is important for the gradient calculation that borders + // are considered out of bounds. + if (in <= static_cast(0)) { + *grad_in = static_cast(0); + return static_cast(0); + } else { + scalar_t max = static_cast(clip_limit - 1); + if (in >= max) { + *grad_in = static_cast(0); + return max; + } else { + *grad_in = static_cast(1); + return in; + } + } +} + +// Reflects coordinates until they fall between low and high (inclusive). +// The bounds are passed as twice their value so that half-integer values +// can be represented as ints. +template +static inline scalar_t reflect_coordinates(scalar_t in, int64_t twice_low, + int64_t twice_high) { + if (twice_low == twice_high) { + return static_cast(0); + } + scalar_t min = static_cast(twice_low) / 2; + scalar_t span = static_cast(twice_high - twice_low) / 2; + in = std::fabs(in - min); + // `fmod` returns same sign as `in`, which is positive after the `fabs` above. + scalar_t extra = std::fmod(in, span); + int flips = static_cast(std::floor(in / span)); + if (flips % 2 == 0) { + return extra + min; + } else { + return span - extra + min; + } +} + +// reflect_coordinates_set_grad works similarly to reflect_coordinates except +// that it also returns the `d output / d input` via pointer argument +// `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static inline scalar_t reflect_coordinates_set_grad(scalar_t in, int64_t twice_low, + int64_t twice_high, scalar_t *grad_in) { + if (twice_low == twice_high) { + *grad_in = static_cast(0); + return static_cast(0); + } + int grad_in_mult_; + scalar_t min = static_cast(twice_low) / 2; + scalar_t span = static_cast(twice_high - twice_low) / 2; + in = in - min; + if (in < static_cast(0)) { + grad_in_mult_ = -1; + in = -in; + } else { + grad_in_mult_ = 1; + } + // `fmod` returns same sign as `in`, which is positive after the `if` above. + scalar_t extra = std::fmod(in, span); + int flips = static_cast(std::floor(in / span)); + if (flips % 2 == 0) { + *grad_in = static_cast(grad_in_mult_); + return extra + min; + } else { + *grad_in = static_cast(-grad_in_mult_); + return span - extra + min; + } +} + +// Mapping the out-of-boundary points back into boundary +// This would only affect padding_mode=border or reflection +template +static inline scalar_t compute_coordinates(scalar_t coord, int64_t size, + GridSamplerPadding padding_mode, + bool align_corners) { + if (padding_mode == GridSamplerPadding::Border) { + // clip coordinates to image borders + coord = clip_coordinates(coord, size); + } else if (padding_mode == GridSamplerPadding::Reflection) { + // reflect coordinates by image borders + if (align_corners) { + coord = reflect_coordinates(coord, 0, 2*(size - 1)); + } else { + coord = reflect_coordinates(coord, -1, 2*size - 1); + } + // clip coordinates to image borders + coord = clip_coordinates(coord, size); + } + return coord; +} + +// Computes the pixel source index value for a grid coordinate +template +static inline scalar_t grid_sampler_compute_source_index( + scalar_t coord, + int64_t size, + GridSamplerPadding padding_mode, + bool align_corners) { + coord = grid_sampler_unnormalize(coord, size, align_corners); + coord = compute_coordinates(coord, size, padding_mode, align_corners); + return coord; +} + +// grid_sampler_compute_source_index_set_grad works similarly to +// grid_sampler_compute_source_index except that it also returns the +// `d output / d input` via pointer argument `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static inline scalar_t grid_sampler_compute_source_index_set_grad( + scalar_t coord, + int64_t size, + GridSamplerPadding padding_mode, + bool align_corners, + scalar_t *grad_in) { + scalar_t grad_clip, grad_refl; + coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in); + if (padding_mode == GridSamplerPadding::Border) { + // clip coordinates to image borders + coord = clip_coordinates_set_grad(coord, size, &grad_clip); + *grad_in = (*grad_in) * grad_clip; + } else if (padding_mode == GridSamplerPadding::Reflection) { + // reflect coordinates by image borders + if (align_corners) { + coord = reflect_coordinates_set_grad(coord, 0, 2*(size - 1), &grad_refl); + } else { + coord = reflect_coordinates_set_grad(coord, -1, 2*size - 1, &grad_refl); + } + // clip coordinates to image borders + coord = clip_coordinates_set_grad(coord, size, &grad_clip); + *grad_in = (*grad_in) * grad_refl * grad_clip; + } + return coord; +} + +static inline bool within_bounds_2d(int64_t h, int64_t w, int64_t H, int64_t W) { + return h >= 0 && h < H && w >= 0 && w < W; +} + +static inline bool within_bounds_3d(int64_t d, int64_t h, int64_t w, int64_t D, int64_t H, int64_t W) { + return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W; +} + +template +static inline scalar_t get_value_bounded( + scalar_t* data, + scalar_t x, + scalar_t y, + int64_t W, + int64_t H, + int64_t sW, + int64_t sH, + GridSamplerPadding padding_mode, + bool align_corners) { + + x = compute_coordinates(x, W, padding_mode, align_corners); + y = compute_coordinates(y, H, padding_mode, align_corners); + + int64_t ix = static_cast(x); + int64_t iy = static_cast(y); + + if (within_bounds_2d(iy, ix, H, W)) { + return data[iy * sH + ix * sW]; + } + return static_cast(0); +} + +template +static inline void safe_add_2d(scalar_t *data, int64_t h, int64_t w, + int64_t sH, int64_t sW, int64_t H, int64_t W, + scalar_t delta) { + if (within_bounds_2d(h, w, H, W)) { + data[h * sH + w * sW] += delta; + } +} + +template +static inline void safe_add_3d(scalar_t *data, int64_t d, int64_t h, int64_t w, + int64_t sD, int64_t sH, int64_t sW, + int64_t D, int64_t H, int64_t W, + scalar_t delta) { + if (within_bounds_3d(d, h, w, D, H, W)) { + data[d * sD + h * sH + w * sW] += delta; + } +} + +template +static inline void add_value_bounded( + scalar_t* data, + scalar_t x, + scalar_t y, + int64_t W, + int64_t H, + int64_t sW, + int64_t sH, + scalar_t delta, + GridSamplerPadding padding_mode, + bool align_corners) { + + x = compute_coordinates(x, W, padding_mode, align_corners); + y = compute_coordinates(y, H, padding_mode, align_corners); + + int64_t ix = static_cast(x); + int64_t iy = static_cast(y); + + safe_add_2d(data, iy, ix, sH, sW, H, W, delta); +} + +// Calculate the differential of the cubic convolution, i.e. `d coeff / d x` +template +static inline void get_cubic_coefficients_grad( + scalar_t coeffs[4], + scalar_t t) { + + // Must be the same as forward calculation in + // aten/src/ATen/native/UpSample.h:get_cubic_upsample_coefficients + scalar_t A = -0.75; + + scalar_t x; + x = -1 - t; // 1 < x = |-1 - tx| < 2 + coeffs[0] = (-3 * A * x - 10 * A ) * x - 8 * A; + x = -t; // x = |0 - tx| <= 1 + coeffs[1] = (-3 * (A + 2) * x - 2 * (A + 3)) * x; + x = 1 - t; // x = |1 - tx| <= 1 + coeffs[2] = (3 * (A + 2) * x - 2 * (A + 3)) * x; + x = 2 - t; // 1 < x = |2 - tx| < 2 + coeffs[3] = (3 * A * x - 10 * A) * x + 8 * A; +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/GridSamplerUtils.h b/voice_bridge/torch/include/ATen/native/GridSamplerUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..0b6f29de8c427368a6bb8d94de604d45fc1b71bc --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/GridSamplerUtils.h @@ -0,0 +1,109 @@ +#pragma once + +// See NOTE: [Tensor vs. TensorBase] +// https://github.com/pytorch/pytorch/pull/66979 +#include +#include +#include + +namespace at { namespace native { + +namespace detail { + +enum class GridSamplerInterpolation {Bilinear, Nearest, Bicubic}; +enum class GridSamplerPadding {Zeros, Border, Reflection}; + +} // namespace detail + +using detail::GridSamplerInterpolation; +using detail::GridSamplerPadding; + +namespace { + +// See NOTE [ grid_sampler Native Functions ]. +void check_grid_sampler_common( + const TensorBase& input, + const TensorBase& grid +) { + auto input_opt = input.options(); + auto grid_opt = grid.options(); + + TORCH_CHECK( + input.defined(), + "grid_sampler(): expected input to not be undefined"); + TORCH_CHECK( + grid.defined(), + "grid_sampler(): expected grid to not be undefined"); + TORCH_CHECK( + input_opt.device() == grid_opt.device(), + "grid_sampler(): expected input and grid to be on same device, but input " + "is on ", input_opt.device(), " and grid is on ", grid_opt.device()); + TORCH_CHECK( + input_opt.layout() == kStrided && grid_opt.layout() == kStrided, + "grid_sampler(): expected input and grid to have torch.strided layout, but " + "input has ", input_opt.layout(), " and grid has ", grid_opt.layout()); + TORCH_CHECK( + input.size(0) == grid.size(0), + "grid_sampler(): expected grid and input to have same batch size, but got " + "input with sizes ", input.sizes(), " and grid with sizes ", grid.sizes()); + TORCH_CHECK( + grid.size(-1) == input.dim() - 2, + "grid_sampler(): expected grid to have size ", input.dim() - 2, " in last " + "dimension, but got grid with sizes ", grid.sizes()); + + for (const auto i : c10::irange(2, input.dim())) { + TORCH_CHECK(input.size(i) > 0, + "grid_sampler(): expected input to have non-empty spatial dimensions, " + "but input has sizes ", input.sizes(), " with dimension ", i, " being " + "empty"); + } +} + +// See NOTE [ grid_sampler Native Functions ]. +void check_grid_sampler_2d( + const TensorBase& input, + const TensorBase& grid +) { + TORCH_CHECK( + input.dim() == 4 && input.dim() == grid.dim(), + "grid_sampler(): expected 4D input and grid with same number of " + "dimensions, but got input with sizes ", input.sizes(), + " and grid with sizes ", grid.sizes()); +} + +// See NOTE [ grid_sampler Native Functions ]. +void check_grid_sampler_3d( + const TensorBase& input, + const TensorBase& grid, + int64_t interpolation_mode +) { + TORCH_CHECK( + input.dim() == 5 && input.dim() == grid.dim(), + "grid_sampler(): expected 5D input and grid with same number of " + "dimensions, but got input with sizes ", input.sizes(), + " and grid with sizes ", grid.sizes()); + TORCH_CHECK( + !(input.dim() == 5 && + static_cast(interpolation_mode) == + GridSamplerInterpolation::Bicubic), + "grid_sampler(): bicubic interpolation only supports 4D input"); +} + +// See NOTE [ grid_sampler Native Functions ]. +// cudnn does not support inputs larger than 1024. +bool cond_cudnn_grid_sampler( + const TensorBase& input, + const TensorBase& grid +) { + return ( + at::native::cudnn_is_acceptable(input) && + at::native::cudnn_is_acceptable(grid) && + at::native::canUse32BitIndexMath(input) && + at::native::canUse32BitIndexMath(grid) && + input.dim() == 4 && + input.size(1) <= 1024); +} + +} // anonymous namespace + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/Histogram.h b/voice_bridge/torch/include/ATen/native/Histogram.h new file mode 100644 index 0000000000000000000000000000000000000000..9df0aafafc18de4fe36c8c792a5fae106cc81325 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Histogram.h @@ -0,0 +1,16 @@ +#pragma once + +#include +#include + +#include + +namespace at { namespace native { + +using histogramdd_fn = void(*)(const Tensor&, const c10::optional&, bool, Tensor&, const TensorList&); +using histogramdd_linear_fn = void(*)(const Tensor&, const c10::optional&, bool, Tensor&, const TensorList&, bool); + +DECLARE_DISPATCH(histogramdd_fn, histogramdd_stub); +DECLARE_DISPATCH(histogramdd_linear_fn, histogramdd_linear_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/IndexKernel.h b/voice_bridge/torch/include/ATen/native/IndexKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..41b4efc5f441872489282a8cac028e513c0fcb9e --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/IndexKernel.h @@ -0,0 +1,40 @@ +#pragma once +#include + +namespace at { +class Tensor; +class TensorBase; +struct TensorIterator; +struct TensorIteratorBase; +} + +namespace c10 { +class Scalar; +} + +namespace at { namespace native { + +using index_fn = void(*)(TensorIteratorBase &, IntArrayRef indexed_sizes, IntArrayRef indexed_strides); +using index_fill_fn = void(*)(TensorIterator & iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, const Scalar& source); +using index_copy_fn = void(*)(TensorIterator & iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride); +using index_put_fn = void(*)(TensorIterator &, IntArrayRef indexed_sizes, IntArrayRef indexed_strides, bool accumulate); +using put_fn = void(*)(TensorIterator & iter, const TensorBase& self, const bool accumulate); +using take_fn = void(*)(TensorIterator & iter, const TensorBase& input); +using flip_fn = void(*)(TensorIterator &, const bool); +using masked_fill_fn = void(*)(TensorIterator &, const Scalar& scalar); +using masked_select_fn = void(*)(TensorIterator &, int64_t orig_stride); +using masked_scatter_fn = void(*)(TensorIterator &, const TensorBase &); + +DECLARE_DISPATCH(index_fn, index_stub); +DECLARE_DISPATCH(index_fill_fn, index_fill_stub); +DECLARE_DISPATCH(index_copy_fn, index_copy_stub); +DECLARE_DISPATCH(index_put_fn, index_put_stub); +DECLARE_DISPATCH(put_fn, put_stub); +DECLARE_DISPATCH(take_fn, take_stub); +DECLARE_DISPATCH(flip_fn, flip_stub); +DECLARE_DISPATCH(masked_fill_fn, masked_fill_stub); +DECLARE_DISPATCH(masked_select_fn, masked_select_serial_stub); +DECLARE_DISPATCH(masked_select_fn, masked_select_stub); +DECLARE_DISPATCH(masked_scatter_fn, masked_scatter_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/IndexingUtils.h b/voice_bridge/torch/include/ATen/native/IndexingUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..500df7966d8e0e9d94c5c2b8dafabd77632fba59 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/IndexingUtils.h @@ -0,0 +1,154 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace at { namespace native { + +[[noreturn]] +static void invalid_mask(const Tensor & self, int64_t idx, const Tensor & mask, int64_t maskIdx) { + TORCH_CHECK_INDEX(false, "The shape of the mask ", mask.sizes(), " at index ", maskIdx, + " does not match the shape of the indexed tensor ", self.sizes(), " at index ", idx); +} + + +static C10_UNUSED std::vector expandTensors(const Tensor & self, IOptTensorListRef indices) { + // If indices come in as ByteTensor or BoolTensor (masks), expand them into the equivalent indexing by LongTensors + std::vector result; + for (const auto& index_opt : indices) { + if (!index_opt.has_value()) { + result.emplace_back(); + } else { + const auto& index = *index_opt; + if (index.scalar_type() == kByte || index.scalar_type() == kBool) { + if (index.scalar_type() == kByte) { + TORCH_WARN("indexing with dtype torch.uint8 is now deprecated," \ + " please use a dtype torch.bool instead."); + } + // The sizes of the ByteTensor mask or bool tensor must match the sizes of the + // corresponding dimensions in self + for (const auto j : c10::irange(index.dim())) { + int64_t srcIdx = result.size() + j; + if (index.size(j) != self.size(srcIdx)) { + invalid_mask(self, srcIdx, index, j); + } + } + // Replace with nonzeros + auto nonzero = index.nonzero(); + for (const auto j : c10::irange(index.dim())) { + result.emplace_back(nonzero.select(1, j)); + } + } else { + result.emplace_back(std::move(index)); + } + } + } + return result; +} + +static C10_UNUSED void checkIndexTensorTypes(IOptTensorListRef indices) { + for (const auto& tensor : indices) { + if (tensor.has_value() && tensor->defined()) { + auto scalarType = tensor->scalar_type(); + if (scalarType != kLong && scalarType != kByte && scalarType != kBool) { + TORCH_CHECK_INDEX(false, "tensors used as indices must be long, byte or bool tensors"); + } + } + } +} + +inline torch::List> toListOfOptionalTensors(ArrayRef list) { + torch::List> result; + result.reserve(list.size()); + for (const Tensor& a : list) { + result.push_back(a); + } + return result; +} + +inline torch::List> toListOfOptionalTensors(ArrayRef list) { + torch::List> result; + result.reserve(list.size()); + for (const IValue& a : list) { + result.push_back(a.isTensor() ? c10::optional(a.toTensor()) : c10::optional()); + } + return result; +} + +static C10_UNUSED bool hasContiguousSubspace(TensorList tl) { + // true if all the non-null tensors are adjacent + auto isDefined = [](const Tensor & tensor){ return tensor.defined(); }; + auto isNull = [](const Tensor & tensor){ return !tensor.defined(); }; + auto start = std::find_if(tl.begin(), tl.end(), isDefined); + auto stop = std::find_if(tl.rbegin(), tl.rend(), isDefined); + auto it = std::find_if(start, stop.base(), isNull); + return it == stop.base(); +} + + +// Transposes the tensor and indices together so that all the non-null indices +// index the first k dimensions of the tensor. Returns the transposed tensor +// and the reordered indices. For example: +// transposeToFront(tensor, {nullptr, a, nullptr, b}) +// returns +// tensor.permute([1, 3, 0, 2]), {a, b, nullptr, nullptr} +static C10_UNUSED std::tuple> +transposeToFront(Tensor self, TensorList indices) { + std::vector dims; + std::vector transposedIndices; + dims.reserve(self.dim()); + for (const auto i : c10::irange(self.dim())) { + if (indices[i].defined()) { + dims.push_back(i); + transposedIndices.emplace_back(indices[i]); + } + } + for (const auto i : c10::irange(self.dim())) { + if (!indices[i].defined()) { + dims.push_back(i); + transposedIndices.emplace_back(); + } + } + return std::make_tuple(self.permute(dims), std::move(transposedIndices)); +} + +inline std::tuple, std::vector> +transposeToFrontAndInvPerm(Tensor self, TensorList indices) { + std::vector dims; + std::vector invPerm; + std::vector transposedIndices; + dims.reserve(self.dim()); + invPerm.resize(self.dim()); + for (const auto i : c10::irange(self.dim())) { + if (indices[i].defined()) { + dims.push_back(i); + transposedIndices.emplace_back(indices[i]); + } + } + for (const auto i : c10::irange(self.dim())) { + if (!indices[i].defined()) { + dims.push_back(i); + transposedIndices.emplace_back(); + } + } + for (const auto i : c10::irange(self.dim())) { + invPerm[dims[i]] = i; + } + return std::make_tuple(self.permute(dims), std::move(transposedIndices), std::move(invPerm)); +} + +struct AdvancedIndex { + AdvancedIndex(const Tensor& src, TensorList indices); + + Tensor src; + std::vector indices; + DimVector indexed_sizes; + DimVector indexed_strides; + int64_t dims_before; + int64_t dims_after; +}; + + +}} diff --git a/voice_bridge/torch/include/ATen/native/Lerp.h b/voice_bridge/torch/include/ATen/native/Lerp.h new file mode 100644 index 0000000000000000000000000000000000000000..f24032f5e38d894d33cebe830853b618b6779d66 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Lerp.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include +#include + +namespace at { +namespace native { + +using lerp_fn_scalar = void (*)( + at::TensorIteratorBase& iter, + const Scalar& weight); + +using lerp_fn_tensor = void (*)( + at::TensorIteratorBase& iter); + +DECLARE_DISPATCH(lerp_fn_scalar, lerp_kernel_scalar_weight); +DECLARE_DISPATCH(lerp_fn_tensor, lerp_kernel_tensor_weight); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/LinearAlgebra.h b/voice_bridge/torch/include/ATen/native/LinearAlgebra.h new file mode 100644 index 0000000000000000000000000000000000000000..304fbb8e6847846ccbda19dd9f8b0ce7d1ce47e2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/LinearAlgebra.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include + +namespace c10 { +class Scalar; +} + +namespace at { +struct TensorIterator; +} + +namespace at { namespace native { + +using addr_fn = void (*)(TensorIterator &, const Scalar& beta, const Scalar& alpha); +DECLARE_DISPATCH(addr_fn, addr_stub); +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/LinearAlgebraUtils.h b/voice_bridge/torch/include/ATen/native/LinearAlgebraUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..351bc33f65900cbe3435c9cc5499db4d21d463de --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/LinearAlgebraUtils.h @@ -0,0 +1,625 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#include +#include +#include +#endif + +namespace at { namespace native { + +static inline c10::MaybeOwned expect_resolved_conj(const Tensor& tensor) { + if (tensor.is_conj()) { + return c10::MaybeOwned::owned(tensor.resolve_conj()); + } else { + return c10::MaybeOwned::borrowed(tensor); + } +} + +static inline DimVector batched_matrix_contiguous_strides( + const IntArrayRef sizes, + const bool f_contig = false) { + // f_contig chooses between the strides of a batch of Fortran (F-contiguous) + // and C-contiguous matrices + auto strides = c10::contiguous_strides(sizes); + auto dim = strides.size(); + + if (f_contig && dim >= 2) { + // Fix the strides of the last two dimensions, so that we return + // C-contiguous batches of F-contiguous matrices. + strides[dim - 1] = std::max(sizes[dim - 2], static_cast(1)); + strides[dim - 2] = 1; + } + return strides; +} + +/* + * Clones a Tensor so that the following conditions hold: + * If we think of a Tensor of having size (B, M, N), where B is any number + * of batch dimensions, then: + * - Each (M, N) matrix is in column major form + * - Let Tensor P have size (B, M, N) and Q have size (B, M', N'). + * Then when laid out in memory, the M by N matrix starting at + * P.data_ptr()[B * M * N] is of the same corresponding batch as the M' by N' + * matrix starting at Q.data_ptr()[B * M' * N']. + */ +static inline Tensor cloneBatchedColumnMajor(const Tensor& src) { + // If src is already in batched column major format, then + // this will be efficient (no reordering of the data will occur) + // because the first transpose will make the tensor contiguous, + // and cloning a contiguous tensor is fast. + auto result = src.mT().clone(at::MemoryFormat::Contiguous); + result.transpose_(-2, -1); + return result; +} + +/* + * contig chooses between C-contig (true) and F-contig (false) + */ +static inline c10::MaybeOwned borrow_else_clone(const bool cond, const Tensor& borrow, const Tensor& clone, const bool contig) { + return cond ? c10::MaybeOwned::borrowed(borrow) + : c10::MaybeOwned::owned(contig ? clone.clone(MemoryFormat::Contiguous) + : cloneBatchedColumnMajor(clone)); +} + +/* + * This method is designed to be a faster alternative to + * `cloneBatchedColumnMajor` with some additional features, + * namely: + * 1. It uses `copy` instead of `clone` which could be much faster. + * 2. `nrows` parameter used to create inputs with the number of rows larger + * than the original input, which is required for some LAPACK/MAGMA methods. + * 3. `desired_batch_size` is used to create copies with the batch size + * which is either the original batch size of the input, or its larger + * broadcasted shape. + */ +static inline Tensor copyBatchedColumnMajor(const Tensor& src, int64_t nrows = -1, + at::OptionalIntArrayRef desired_batch_sizes = c10::nullopt) { + nrows = (nrows == -1) ? src.size(-2) : nrows; + auto copy_sizes = desired_batch_sizes.has_value() + ? desired_batch_sizes.value().vec() + : IntArrayRef(src.sizes().data(), src.dim() - 2).vec(); + copy_sizes.insert(copy_sizes.end(), {nrows, src.size(-1)}); + const auto copy_strides = batched_matrix_contiguous_strides(copy_sizes, /*f-contig*/true); + auto copy = at::empty_strided(copy_sizes, copy_strides, src.options()); + copy.narrow(-2, 0, src.size(-2)).copy_(src); + return copy; +} + +/* + * Given batches of matrices with arbitrary batch dim, + * computes the number of batches. + */ +static inline int64_t batchCount(const Tensor& batched_matrices) { + int64_t result = 1; + for (int64_t i = 0; i < batched_matrices.ndimension() - 2; i++) { + result *= batched_matrices.size(i); + } + return result; +} + +// Computes the number of elements of a matrix in a batched matrix tensor +static inline int64_t matrixStride(const Tensor& batched_matrices) { + return batched_matrices.size(-1) * batched_matrices.size(-2); +} + +// Validates input shapes for operations on batches of square matrices (inverse, cholesky, symeig, eig) +static inline void checkIsMatrix(const Tensor& A, const char* const f_name, const char* const arg_name = "A") { + TORCH_CHECK(A.dim() >= 2, f_name, ": The input tensor ", arg_name, " must have at least 2 dimensions."); +} +static inline void squareCheckInputs(const Tensor& self, const char* const f_name, const char* const arg_name = "A") { + checkIsMatrix(self, f_name, arg_name); + TORCH_CHECK(self.size(-1) == self.size(-2), + f_name, + ": ", arg_name, " must be batches of square matrices, " + "but they are ", self.size(-2), " by ", self.size(-1), " matrices"); +} + +static inline void checkInputsSolver(const Tensor& A, + const Tensor& B, + const bool left, + const char* const f_name) { + squareCheckInputs(A, f_name, "A"); + checkIsMatrix(B, f_name, "B"); + TORCH_CHECK(left ? A.size(-2) == B.size(-2) : A.size(-1) == B.size(-1), + f_name, ": Incompatible shapes of A and B for the equation ", + left ? "AX = B" : "XA = B", + " (", A.size(-2), "x", A.size(-1), " and ", B.size(-2), "x", B.size(-1), ")"); +} + +static inline bool is_row_or_column_contiguous(const Tensor& t) { + // This could be made more general, similar to how it's checked in matmul, which would allow to + // ellide the copy with strides such as (6, 12, 1, 3) or (3, 1, 9), but this is quite tricky. + // We choose to be conservative for simplicity + return t.is_contiguous() || t.transpose(-2, -1).is_contiguous(); +} + +static inline TransposeType to_transpose_type(const bool contig, const bool conj) { + if (conj) { + if (contig) { TORCH_INTERNAL_ASSERT(false, "Invalid transpose type"); } + else { return TransposeType::ConjTranspose; } + } else { + if (contig) { return TransposeType::NoTranspose; } + else { return TransposeType::Transpose; } + } +} + + +// This function is designed to be used with linear algebra methods that minimize +// L(ax - b) = 0, where L is generally the identity map (`solve`, for example) +// or the L2 norm (`lstsq`). +// It is expected that `a` and `b` are contiguous tensors of column-major matrices +// (so that a.view({-1, a.size(-2), a.size(-1)}) succeeds, same for `b`), +// with the following additional properties: +// +// 1. a.dim() == b.dim() +// 2. a.shape[:-2] broadcasts over b.shape[:-2] +// 3. a.size(i) <= b.size(i) for i=0,..., a.dim() - 3 (only for batch dimensions) +// +// MAGMA/LAPACK modify tensor `a` in-place, and the main goal of this method +// is to be memory efficient, which means that if there exists an index i such that +// a.shape[i] < b.shape[i], 0 <= i <= a.dim() - 3, +// then instead of materializing copies of `a` in the broadcasted shape, we keep +// a buffer copy of `a` along with flags that check whether specific batch dimension +// indices for `a` were already accessed. If they were, we copy the data from the buffer +// into `a`. The number of copies does not exceed +// prod(max(a.shape[:-2], b.shape[:-2]) - a.shape[:-2] + 1) +// and this value is attained by tensors with non-empty batch dimensions. +// +// func_t `f` is a callable that is being supplied with +// scalar_t* a_working_ptr, scalar_t* b_working_ptr, int64_t a_linear_batch_idx. +// a_working_ptr and b_working_ptr can directly be passed to LAPACK/MAGMA routines, +// and a_linear_batch_idx is an index in the 3d representation which corresponds to +// the memory a_working_ptr points to, in other words: +// a_working_ptr == a.view({-1, a.size(-2), a.size(-1)}.select(0, a_linear_batch_idx).data_ptr(); +// a_linear_batch_idx is useful to store metadata related to `a`, such as, for example, +// its rank or singular values (see linalg_lstsq). +template +void batch_iterator_with_broadcasting(const Tensor& a, const Tensor& b, const func_t& f) { + IntArrayRef a_batch_sizes(a.sizes().data(), a.dim() - 2); + IntArrayRef b_batch_sizes(b.sizes().data(), b.dim() - 2); + + auto a_linear_batch_idx = at::arange(batchCount(a)).view(a_batch_sizes); + auto b_linear_batch_idx = at::arange(batchCount(b)).view(b_batch_sizes); + + TensorIterator iter = TensorIteratorConfig() + .set_check_mem_overlap(false) + .check_all_same_dtype(false) + .resize_outputs(false) + .add_output(b_linear_batch_idx) + .add_input(a_linear_batch_idx) + .build(); + + auto m = a.size(-2); + auto n = a.size(-1); + auto a_3d = a.view({batchCount(a), m, n}); + auto b_3d = b.view({batchCount(b), b.size(-2), b.size(-1)}); + + auto a_broadcasts_over_b = (a_batch_sizes != b_batch_sizes); + Tensor a_buffer, a_was_accessed, a_buffer_3d; + std::function check_if_copy_needed_for_a + = [](int64_t /*a_curr_linear_batch_idx*/){}; + if (a_broadcasts_over_b) { + a_buffer = at::empty_strided(a.sizes(), a.strides(), a.options()) + .copy_(a); + a_was_accessed = at::zeros(batchCount(a), at::kBool); + a_buffer_3d = a_buffer.view({batchCount(a), m, n}); + check_if_copy_needed_for_a = [&](int64_t a_curr_linear_batch_idx) { + auto* a_was_accessed_flag = a_was_accessed + .select(0, a_curr_linear_batch_idx) + .data_ptr(); + if (!(*a_was_accessed_flag)) { + *a_was_accessed_flag = true; + } + else { + a_3d.select(0, a_curr_linear_batch_idx) + .copy_(a_buffer_3d.select(0, a_curr_linear_batch_idx)); + } + }; + } + + auto loop = [&](char** data, const int64_t* strides, int64_t nelems) { + auto* b_batch_idx_ptr = data[0]; + auto* a_batch_idx_ptr = data[1]; + + for (const auto elem C10_UNUSED : c10::irange(nelems)) { + auto b_curr_linear_batch_idx = *reinterpret_cast(b_batch_idx_ptr); + auto a_curr_linear_batch_idx = *reinterpret_cast(a_batch_idx_ptr); + + check_if_copy_needed_for_a(a_curr_linear_batch_idx); + + auto* a_working_ptr = a_3d.select(0, a_curr_linear_batch_idx) + .data_ptr(); + auto* b_working_ptr = b_3d.select(0, b_curr_linear_batch_idx) + .data_ptr(); + f(a_working_ptr, b_working_ptr, a_curr_linear_batch_idx); + + b_batch_idx_ptr += strides[0]; + a_batch_idx_ptr += strides[1]; + } + }; + iter.serial_for_each(loop, {0, batchCount(b)}); +} + +// Returns the epsilon value for floating types except half +static inline double _get_epsilon(const ScalarType& sc_type) { + switch (sc_type) { + case at::ScalarType::Float: + return static_cast(std::numeric_limits::epsilon()); + case at::ScalarType::Double: + return std::numeric_limits::epsilon(); + default: + AT_ERROR("This function doesn't handle types other than float and double"); + } +} + +// Validates input shapes and devices +// for linear solve methods (solve, cholesky_solve, lu_solve, triangular_solve) +static inline void linearSolveCheckInputs(const Tensor& self, const Tensor& A, const char* name) { + TORCH_CHECK(self.device() == A.device(), + "Expected b and A to be on the same device, but found b on ", + self.device(), " and A on ", A.device(), " instead."); + + TORCH_CHECK(self.scalar_type() == A.scalar_type(), + "Expected b and A to have the same dtype, but found b of type ", + self.scalar_type(), " and A of type ", A.scalar_type(), " instead."); + + TORCH_CHECK(A.size(-1) == A.size(-2), + "A must be batches of square matrices, " + "but they are ", A.size(-2), " by ", A.size(-1), " matrices"); + + TORCH_CHECK(A.size(-1) == self.size(-2), + "Incompatible matrix sizes for ", name, ": each A " + "matrix is ", A.size(-1), " by ", A.size(-1), + " but each b matrix is ", self.size(-2), " by ", self.size(-1)); +} + +static inline void checkFloatingOrComplex(const Tensor& t, const char* const f_name, const bool allow_low_precision_dtypes=true) { + auto dtype = t.scalar_type(); + TORCH_CHECK((at::isFloatingType(dtype) || at::isComplexType(dtype)), + f_name, ": Expected a floating point or complex tensor as input. Got ", dtype); + if (!allow_low_precision_dtypes) { + TORCH_CHECK(dtype == kFloat || dtype == kDouble || dtype == kComplexFloat || dtype == kComplexDouble, + f_name, ": Low precision dtypes not supported. Got ", dtype); + } +} + + +// Checks if all the Tensors in a TensorList are of the same dimensions +static inline void checkAllSameDim(TensorList tensors, int64_t dim) { + for (auto &t : tensors) { + TORCH_CHECK(t.dim() == dim, "Tensor dimension is ", t.dim(), ", expected ", dim, " instead."); + } +} + +static inline std::tuple, std::vector> _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2) { + // broadcast the batch dimensions of arg1 and arg2. + IntArrayRef arg1_batch_sizes(arg1.sizes().data(), arg1.ndimension() - 2); + IntArrayRef arg2_batch_sizes(arg2.sizes().data(), arg2.ndimension() - 2); + std::vector expand_batch_portion = infer_size(arg1_batch_sizes, arg2_batch_sizes); + + std::vector arg1_expand_size({expand_batch_portion}); + arg1_expand_size.insert(arg1_expand_size.end(), { arg1.size(-2), arg1.size(-1) }); + + std::vector arg2_expand_size({expand_batch_portion}); + arg2_expand_size.insert(arg2_expand_size.end(), { arg2.size(-2), arg2.size(-1) }); + return std::make_tuple(std::move(arg1_expand_size), std::move(arg2_expand_size)); +} + +static inline std::tuple _linalg_broadcast_batch_dims(const Tensor& arg1, const Tensor& arg2, const char* name) { + // If there's no name we assume we don't want to check the errors + if (name != nullptr) { + linearSolveCheckInputs(arg1, arg2, name); + } + + std::vector arg1_expand_size, arg2_expand_size; + std::tie(arg1_expand_size, arg2_expand_size) = at::native::_linalg_broadcast_batch_dims(arg1, arg2); + + auto arg1_broadcasted = arg1_expand_size == arg1.sizes() ? arg1 : arg1.expand(arg1_expand_size); + auto arg2_broadcasted = arg2_expand_size == arg2.sizes() ? arg2 : arg2.expand(arg2_expand_size); + return std::make_tuple(arg1_broadcasted, arg2_broadcasted); +} + +static inline std::vector broadcast_batch_size(const Tensor& t1, const Tensor& t2, int64_t n_batch_dims) { + IntArrayRef t1_batch_sizes(t1.sizes().data(), n_batch_dims); + IntArrayRef t2_batch_sizes(t2.sizes().data(), n_batch_dims); + auto broadcasted_batch_sizes = infer_size(t1_batch_sizes, t2_batch_sizes); + return broadcasted_batch_sizes; +} + +// Return a permutation with the given axes moved to the end. +static inline Tensor _move_to_end(const Tensor& self, IntArrayRef axes) { + const std::vector a = axes.vec(); + const int64_t ndim = self.ndimension(); + std::vector perm; + + for (const auto i : c10::irange(ndim)) { + auto it = std::find(a.begin(), a.end(), i); + if (it == a.end()) { + perm.push_back(i); + } + } + for (auto i : a) { + perm.push_back(i); + } + + TORCH_CHECK((int64_t)perm.size() == ndim, + "duplicate or invalid axis in 'dim' argument for tensor with ndim==", ndim); + + return self.permute(perm); +} + +// parse the "mode" param in linalg_qr: return a tuple of bools (compute_q, reduced) +static inline std::tuple _parse_qr_mode(c10::string_view mode) { + bool compute_q; + bool reduced; + if (mode == "reduced") { + compute_q = true; + reduced = true; + } else if (mode == "complete") { + compute_q = true; + reduced = false; + } else if (mode == "r") { + compute_q = false; + reduced = true; // this is actually irrelevant in this mode + } else { + TORCH_CHECK(false, "qr received unrecognized mode '", mode, + "' but expected one of 'reduced' (default), 'r', or 'complete'"); + } + return std::make_tuple(compute_q, reduced); +} + +// Function to compute sizes, strides and the extra columns for the Q matrix in the QR Decomposition +static inline std::tuple _compute_geometry_for_Q( + const Tensor& input, + bool reduced) { + int64_t m = input.size(-2), n = input.size(-1); + int64_t n_columns_q; + + // We need to compute the required size of Q based on the `reduced` option + DimVector q_sizes(input.sizes()); + if (!reduced && m > n) { + q_sizes[input.dim() - 1] = m; + n_columns_q = m; + } else { + q_sizes[input.dim() - 1] = n; + n_columns_q = std::min(m, n); + } + auto q_strides = batched_matrix_contiguous_strides(q_sizes, /*f-contig*/true); + return std::make_tuple(q_sizes, q_strides, n_columns_q); +} + +static inline bool svd_uses_cusolver(const Tensor& A) { + // if cusolver is available, it is used unconditionally + return A.is_cuda() + && at::globalContext().hasCuSOLVER() + && at::globalContext().linalgPreferredBackend() != at::LinalgBackend::Magma; +} + + +// Function used instead of .to so that the original strides are retained +// .to doesn't retain strides and make the output tensor contiguous +static inline Tensor same_stride_to(const Tensor& original_tensor, const at::TensorOptions& options) { + auto strided_to = at::empty_strided(original_tensor.sizes(), + original_tensor.strides(), + options); + strided_to.copy_(original_tensor); + return strided_to; +} + +// Creates a dimension permutation array that can be given to `at::permute()`, which will shift +// the two specified dimensions to the end of a tensor, without changing the order of +// the other dimensions. `dim1` will be placed at the very end, and `dim0` will be +// placed just to the left of it. +// +// For instance, given a 4-D tensor, dimensions 1 and 3 can be shifted to the end by +// calling `create_dim_backshift_permutation(1, 3, 4)`. The resulting vector will +// be `vec(0, 2, 1, 3)`. +static inline std::vector create_dim_backshift_permutation(int64_t dim0, int64_t dim1, int64_t ndim) { + TORCH_CHECK( + (dim0 != dim1) && (dim0 < ndim) && (dim0 >= 0) && (dim1 < ndim) && (dim1 >= 0), + "duplicate or invalid dimensions"); + std::vector permutation(ndim); + int64_t cur_permuted_dim = 0; + for (const auto dim_ind : c10::irange(ndim)) { + if ((dim_ind != dim0) && (dim_ind != dim1)) { + permutation[cur_permuted_dim++] = dim_ind; + } + } + permutation[cur_permuted_dim++] = dim0; + permutation[cur_permuted_dim] = dim1; + return permutation; +} + +// Creates a dimension permutation array that can be given to `at::permute()`, which +// will reverse a given permutation. +// The reverse permutation array is created by swapping the indices and their +// associated values from the given permutation array. +static inline std::vector create_reverse_permutation(std::vector permutation) { + int64_t ndim = permutation.size(); + std::vector reverse_permutation(ndim); + for (const auto dim_ind : c10::irange(ndim)) { + reverse_permutation[permutation[dim_ind]] = dim_ind; + } + return reverse_permutation; +} + +// Compute R-work array size for MAGMA/LAPACK cgesdd/zgesdd +// See https://github.com/Reference-LAPACK/lapack/blob/122506cd8b6ce050a200920c3d4c0b153b150fd8/SRC/cgesdd.f#L186 +static inline int64_t computeLRWorkDim(const char jobz, int64_t m, int64_t n) { + auto mn = std::min(m, n); + auto mx = std::max(m, n); + if (jobz == 'N') { +#ifdef __APPLE__ + // According to `vecLib.framework/Headers/clapack.h` Accelerate.framework is based on LAPACK 3.2.1 + return 7 * mn; +#else + // These setting is valid for on LAPACK 3.6+ + return 5 * mn; +#endif + } + if (mx > 10 * mn) { + return 5 * mn * mn + 5 * mn; + } + return std::max(5 * mn * mn + 5 * mn, 2 * mx * mn + 2 * mn * mn + mn); +} + +// This function checks whether the uplo argument input is valid +// Allowed strings are "u", "U", "l", "L" +static inline void checkUplo(const c10::string_view uplo) { + // To use std::toupper safely with plain chars (or signed chars), the argument should first be converted to unsigned char + char uplo_uppercase = static_cast(std::toupper(static_cast(uplo[0]))); + TORCH_CHECK(uplo.size() == 1 && (uplo_uppercase == 'U' || uplo_uppercase == 'L'), + "Expected UPLO argument to be 'L' or 'U', but got ", uplo); +} + +static inline void checkSameDevice(const std::string& fn_name, Tensor result, Tensor input, const std::string& result_name = "result") { + TORCH_CHECK( + result.device() == input.device(), + fn_name, + ": Expected ", result_name, " and input tensors to be on the same device, but got ", + result_name, " on ", result.device(), " and input on ", input.device()); +} + +// Check the dtype of result and input tensors (for _out variants). +// Most linear algebra functions have the same dtype for input and output +// (either floating or complex type input), so we can check whether input's dtype can be casted to result's dtype. +// According to https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch +// c10::canCast is used for checking the "safe copy" dtype requirements. +static inline void checkLinalgCompatibleDtype(const std::string& fn_name, Tensor result, Tensor input, const std::string& result_name = "result") { + bool can_cast = c10::canCast(input.scalar_type(), result.scalar_type()); + TORCH_CHECK( + can_cast, + fn_name, + ": Expected ", result_name, " to be safely castable from ", input.scalar_type(), " dtype, but got ", + result_name, " with dtype ", result.scalar_type()); +} + +// Alternatively, we can check whether the specific expected output type (result_type) can be safely casted to out tensor dtype (out_type) +static inline void checkLinalgCompatibleDtype(const std::string& fn_name, ScalarType out_type, ScalarType result_type, const std::string& out_name = "result") { + bool can_cast = c10::canCast(result_type, out_type); + TORCH_CHECK( + can_cast, + fn_name, + ": Expected ", out_name, " to be safely castable from ", result_type, " dtype, but got ", + out_name, " with dtype ", out_type); +} + +static inline void checkNotComplexTolerance(const Tensor& tol, const c10::string_view f_name, const c10::string_view tol_name) { + TORCH_CHECK(!at::isComplexType(tol.scalar_type()), + f_name, ": ", tol_name, " tensor of complex type is not supported. Got ", tol.scalar_type()); +} + +/* + Two types of 'other' tensors are supported when solving + a system of linear equations matmul(input, x) = other: + * 1-dimensional (1D) tensor or batch of 1D tensors (vector case) + * 2-dimensional (2D) tensor or batch of 2D tensors (matrix case). + The original torch.solve supported only the matrix case, while NumPy works for both cases. + For the batched input we need to be able to distinguish them. + Let input.shape = (batch_dimensions, m, n), then 'other' is of vector type if other.shape == (batch_dimensions, m). + This rule is compatible with NumPy, see https://github.com/numpy/numpy/blob/v1.20.0/numpy/linalg/linalg.py#L384-L389 +*/ +static inline bool linalg_solve_is_vector_rhs(const Tensor& input, const Tensor& other) { + auto expected_batched_rhs_shape = IntArrayRef(input.sizes().data(), input.dim() - 1); // input.shape[:-1] + bool vector_case = other.dim() == 1 || (input.dim() - 1 == other.dim() && other.sizes().equals(expected_batched_rhs_shape)); + return vector_case; +} + +/* + Computes linear indices for a tensor with original_shape to access its elements like it was a materialized broadcast tensor. +*/ +static inline Tensor get_linear_indices(int64_t numel, IntArrayRef original_shape, IntArrayRef broadcast_shape) { + TensorOptions options = at::TensorOptions().dtype(at::kLong).device(at::kCPU); + return at::arange(numel, options).view(original_shape).broadcast_to(broadcast_shape).contiguous(); +} + +class BroadcastLinearIndices { + private: + Tensor linear_indices_; + bool is_broadcasting_; + + public: + BroadcastLinearIndices( + int64_t numel, + IntArrayRef original_shape, + IntArrayRef broadcast_shape) { + // The assumption is that the broadcast_shape is a materialized broadcast + // shape of the original_shape. We need to compute the linear indices + // compatible with the original_shape to access the elements in the original + // tensor corresponding to the broadcast tensor. + is_broadcasting_ = !original_shape.equals(broadcast_shape); + if (is_broadcasting_) { + linear_indices_ = + get_linear_indices(numel, original_shape, broadcast_shape); + } + } + int64_t operator()(int64_t broadcast_linear_index) { + return is_broadcasting_ + ? linear_indices_.data_ptr()[broadcast_linear_index] + : broadcast_linear_index; + } +}; + +static inline bool is_blas_compatible_column_major_order(const Tensor& input) { + IntArrayRef input_strides = input.strides(); + IntArrayRef input_sizes = input.sizes(); + auto ndim = input.dim(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2); + if (ndim > 3) { + return input.transpose(-2, -1).is_contiguous(); + } + auto leading_dimension = input_strides[ndim - 1]; + auto rows = input_sizes[ndim - 2]; + bool batch_stride_compatible = true; + if (ndim == 3) { + auto cols = input_sizes[ndim - 1]; + batch_stride_compatible = + input_strides[ndim - 3] >= leading_dimension * cols; + } + return (input_strides[ndim - 2] == 1) && + (leading_dimension >= std::max(1, rows)) && + batch_stride_compatible; +} + +static inline bool is_blas_compatible_row_major_order(const Tensor& input) { + IntArrayRef input_strides = input.strides(); + IntArrayRef input_sizes = input.sizes(); + auto ndim = input.dim(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(ndim >= 2); + if (ndim > 3) { + return input.is_contiguous(); + } + auto leading_dimension = input_strides[ndim - 2]; + auto cols = input_sizes[ndim - 1]; + bool batch_stride_compatible = true; + if (ndim == 3) { + auto rows = input_sizes[ndim - 2]; + batch_stride_compatible = + input_strides[ndim - 3] >= leading_dimension * rows; + } + return (input_strides[ndim - 1] == 1) && + (leading_dimension >= std::max(1, cols)) && + batch_stride_compatible; +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/LossMulti.h b/voice_bridge/torch/include/ATen/native/LossMulti.h new file mode 100644 index 0000000000000000000000000000000000000000..54736bcc123b2539a1212f87fd0d354560c8405d --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/LossMulti.h @@ -0,0 +1,72 @@ +#include +#include +#include + +#pragma once + +namespace at { namespace native { +namespace { + static C10_UNUSED void multilabel_margin_loss_shape_check( + int64_t& nframe, + int64_t& dim, + const int64_t& ndims, + TensorArg& target_arg, + const Tensor& input, + const Tensor& target) { + bool valid_inputs = (ndims == 2 && input.size(1) != 0) || (ndims == 1 && input.size(0) != 0) || ndims == 0; + TORCH_CHECK( + valid_inputs, + "Expected non-empty vector or matrix with optional 0-dim batch size, but got: ", + input.sizes()); + + if (ndims <= 1) { + nframe = 1; + dim = ndims == 0 ? 1 : input.size(0); + TORCH_CHECK( + valid_inputs && target.dim() <= 1 && target.numel() == dim, + "inconsistent size ", + target.sizes(), + " for ", + target_arg); + } else { + nframe = input.size(0); + dim = input.size(1); + TORCH_CHECK( + valid_inputs && target.dim() == 2 && target.size(0) == nframe && + target.size(1) == dim, + "inconsistent size ", + target.sizes(), + " for ", + target_arg); + } + } + + static C10_UNUSED void multi_margin_loss_shape_check( + int64_t& nframe, + int64_t& dim, + const int64_t& ndims, + TensorArg& target_arg, + const Tensor& input, + const Tensor& target) { + bool valid_inputs = (ndims == 2 && input.size(1) != 0) || (ndims == 1 && input.size(0) != 0) || ndims == 0; + if (ndims <= 1) { + nframe = 1; + dim = ndims == 0 ? 1 : input.size(0); + } else { + nframe = input.size(0); + dim = input.size(1); + } + + TORCH_CHECK( + valid_inputs, + "Expected non-empty vector or matrix with optional 0-dim batch size, but got: ", + input.sizes()); + TORCH_CHECK( + valid_inputs && target.dim() <= 1 && target.numel() == nframe, + "inconsistent target size, got: ", + target.sizes()); + } + + +} // anonymous namespace +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/Math.h b/voice_bridge/torch/include/ATen/native/Math.h new file mode 100644 index 0000000000000000000000000000000000000000..1fba036f35472c40bfffa6ad11a7fdc6033f1a14 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Math.h @@ -0,0 +1,3876 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion") +#endif + +/* The next function is taken from https://github.com/antelopeusersgroup/antelope_contrib/blob/master/lib/location/libgenloc/erfinv.c. +Below is the copyright. +Output was modified to be inf or -inf when input is 1 or -1. */ + + +/* + Copyright (c) 2014 Indiana University + All rights reserved. + + Written by Prof. Gary L. Pavlis, Dept. of Geol. Sci., + Indiana University, Bloomington, IN + + This software is licensed under the New BSD license: + + Redistribution and use in source and binary forms, + with or without modification, are permitted provided + that the following conditions are met: + + Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + + Redistributions in binary form must reproduce the + above copyright notice, this list of conditions and + the following disclaimer in the documentation and/or + other materials provided with the distribution. + + Neither the name of Indiana University nor + the names of its contributors may be used to endorse + or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED + WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +namespace { +/* + * This function is derived from the implementation of the i0e function in the + * Cephes Math Library. See note [3-Clause BSD License for the Cephes Math + * Library]. + * + * Computes an approximation of the exponentially scaled zeroth order modified + * Bessel function of the first kind. The approximation is actually two + * (sub)approximations, both using a Chebyshev polynomial expansion. One + * approximates the function over [0, 8], and the other over (8, infinity). This + * function takes the absolute value of all inputs to convert them into the + * domain of the approximation. + */ +jiterator_also_stringify_as(jiterator_code( + template + JITERATOR_HOST_DEVICE T chbevl(T x, const T array[], const int len) { + T b0, b1, b2; + + b0 = array[0]; + b1 = 0; + + for (int i = 1; i < len; ++i) { + b2 = b1; + b1 = b0; + b0 = x * b1 - b2 + array[i]; + } + + return T{0.5} * (b0 - b2); + } + + template + JITERATOR_HOST_DEVICE T calc_i0e(T _x) { + T x = fabs(_x); + + if (x <= T{8.0}) { + static const T coefficients[] = { + -4.41534164647933937950E-18, 3.33079451882223809783E-17, + -2.43127984654795469359E-16, 1.71539128555513303061E-15, + -1.16853328779934516808E-14, 7.67618549860493561688E-14, + -4.85644678311192946090E-13, 2.95505266312963983461E-12, + -1.72682629144155570723E-11, 9.67580903537323691224E-11, + -5.18979560163526290666E-10, 2.65982372468238665035E-9, + -1.30002500998624804212E-8, 6.04699502254191894932E-8, + -2.67079385394061173391E-7, 1.11738753912010371815E-6, + -4.41673835845875056359E-6, 1.64484480707288970893E-5, + -5.75419501008210370398E-5, 1.88502885095841655729E-4, + -5.76375574538582365885E-4, 1.63947561694133579842E-3, + -4.32430999505057594430E-3, 1.05464603945949983183E-2, + -2.37374148058994688156E-2, 4.93052842396707084878E-2, + -9.49010970480476444210E-2, 1.71620901522208775349E-1, + -3.04682672343198398683E-1, 6.76795274409476084995E-1}; + + T y = (x / T{2.0}) - T{2.0}; + return chbevl(y, coefficients, int{30}); + } + + // x > 8 + static const T coefficients[] = { + -7.23318048787475395456E-18, -4.83050448594418207126E-18, + 4.46562142029675999901E-17, 3.46122286769746109310E-17, + -2.82762398051658348494E-16, -3.42548561967721913462E-16, + 1.77256013305652638360E-15, 3.81168066935262242075E-15, + -9.55484669882830764870E-15, -4.15056934728722208663E-14, + 1.54008621752140982691E-14, 3.85277838274214270114E-13, + 7.18012445138366623367E-13, -1.79417853150680611778E-12, + -1.32158118404477131188E-11, -3.14991652796324136454E-11, + 1.18891471078464383424E-11, 4.94060238822496958910E-10, + 3.39623202570838634515E-9, 2.26666899049817806459E-8, + 2.04891858946906374183E-7, 2.89137052083475648297E-6, + 6.88975834691682398426E-5, 3.36911647825569408990E-3, + 8.04490411014108831608E-1}; + + return chbevl(T{32.0} / x - T{2.0}, coefficients, int{25}) / sqrt(x); + }), + i0e_string); // i0e_string +} + +#define CENTRAL_RANGE 0.7 + +template +static inline typename std::enable_if::value, T>::type +calc_erfinv(T y) { +/* Function to calculate inverse error function. Rational approximation +is used to generate an initial approximation, which is then improved to +full accuracy by two steps of Newton's method. Code is a direct +translation of the erfinv m file in matlab version 2.0. +Author: Gary L. Pavlis, Indiana University +Date: February 1996 +*/ + T x, z, num, dem; /*working variables */ + /* coefficients in rational expansion */ + T a[4] = { T(0.886226899), T(-1.645349621), T(0.914624893), T(-0.140543331) }; + T b[4] = { T(-2.118377725), T(1.442710462), T(-0.329097515), T(0.012229801) }; + T c[4] = { T(-1.970840454), T(-1.624906493), T(3.429567803), T(1.641345311) }; + T d[2] = { T(3.543889200), T(1.637067800) }; + T y_abs = std::abs(y); + if(y_abs > 1.0) return std::numeric_limits::quiet_NaN(); +#ifdef _WIN32 + // error C2039: '_copysign': is not a member of 'std' + if(y_abs == 1.0) return copysign(std::numeric_limits::infinity(), y); +#else + if(y_abs == 1.0) return std::copysign(std::numeric_limits::infinity(), y); +#endif + if(y_abs <= static_cast(CENTRAL_RANGE)) { + z = y * y; + num = (((a[3]*z + a[2])*z + a[1])*z + a[0]); + dem = ((((b[3]*z + b[2])*z + b[1])*z +b[0]) * z + static_cast(1.0)); + x = y * num / dem; + } + else{ + z = std::sqrt(-std::log((static_cast(1.0)-y_abs)/static_cast(2.0))); + num = ((c[3]*z + c[2])*z + c[1]) * z + c[0]; + dem = (d[1]*z + d[0])*z + static_cast(1.0); +#ifdef _WIN32 + // error C2039: '_copysign': is not a member of 'std' + x = copysign(num, y) / dem; +#else + x = std::copysign(num, y) / dem; +#endif + } + /* Two steps of Newton-Raphson correction */ + x = x - (std::erf(x) - y) / ((static_cast(2.0)/static_cast(std::sqrt(c10::pi)))*std::exp(-x*x)); + x = x - (std::erf(x) - y) / ((static_cast(2.0)/static_cast(std::sqrt(c10::pi)))*std::exp(-x*x)); + + return(x); +} + +#undef CENTRAL_RANGE + +/* + * Note [3-Clause BSD License for the Cephes Math Library] + * Code derived from implementations in the Cephes Math Library should mention its derivation and reference + * this note (ex. 'This function is derived from the implementation of X in the Cephes Math Library. See note + * [3-Clause BSD License for the Cephes Math Library]. The license is: + * Copyright (c) 2018, Steven Moshier + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Steven Moshier BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * This function is derived from the implementation of the zeta function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + */ +template +C10_HOST_DEVICE static inline scalar_t zeta(scalar_t x, scalar_t q) __ubsan_ignore_float_divide_by_zero__ { + using acc_t = at::acc_type; + const acc_t MACHEP = acc_t{1.11022302462515654042E-16}; + constexpr acc_t zero = acc_t{0.0}; + constexpr acc_t half = acc_t{0.5}; + constexpr acc_t one = acc_t{1.0}; + static const acc_t A[] = { + 12.0, + -720.0, + 30240.0, + -1209600.0, + 47900160.0, + -1.8924375803183791606e9, /*1.307674368e12/691*/ + 7.47242496e10, + -2.950130727918164224e12, /*1.067062284288e16/3617*/ + 1.1646782814350067249e14, /*5.109094217170944e18/43867*/ + -4.5979787224074726105e15, /*8.028576626982912e20/174611*/ + 1.8152105401943546773e17, /*1.5511210043330985984e23/854513*/ + -7.1661652561756670113e18 /*1.6938241367317436694528e27/236364091*/ + }; + + int i = 0; + acc_t a, b, k, s, t, w; + if (x == one) { + return std::numeric_limits::infinity(); + } + + if (x < one) { + return std::numeric_limits::quiet_NaN(); + } + + if (q <= zero) { + if (q == ::floor(q)) { + return std::numeric_limits::infinity(); + } + if (x != ::floor(x)) { + return std::numeric_limits::quiet_NaN(); + } + } + + s = ::pow(q, -x); + a = q; + i = 0; + b = zero; + while ((i < 9) || (a <= acc_t{9.0})) { + i += 1; + a += one; + b = ::pow(a, -x); + s += b; + if ((-MACHEP * s < b) && (b < MACHEP * s)) { + return static_cast(s); + } + }; + + w = a; + s += b * w / (x - one); + s -= half * b; + a = one; + k = zero; + for (int i = 0; i < 12; i++) { + a *= x + k; + b /= w; + t = a * b / A[i]; + s = s + t; + t = ::fabs(t / s); + if (t < MACHEP) { + return static_cast(s); + } + k += one; + a *= x + k; + b /= w; + k += one; + } + return static_cast(s); +} + +/* + * This function is derived from the implementation of the digamma function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Evaluates polynomial of degree N: + * + * 2 N + * y = C + C x + C x +...+ C x + * 0 1 2 N + * + * Coefficients are stored in reverse order: + * + * coef[0] = C , ..., coef[N] = C . + * N 0 + */ +template +C10_HOST_DEVICE static inline T polevl(const T x, const T A[], size_t len) { + T result = 0; + for (size_t i = 0; i <= len; i++) { + result = result * x + A[i]; + } + return result; +} + +static inline double trigamma(double x) __ubsan_ignore_float_divide_by_zero__ { + double sign = +1; + double result = 0; + if (x < 0.5) { + sign = -1; + const double sin_pi_x = sin(c10::pi * x); + result -= (c10::pi * c10::pi) / (sin_pi_x * sin_pi_x); + x = 1 - x; + } + for (int i = 0; i < 6; ++i) { + result += 1 / (x * x); + x += 1; + } + const double ixx = 1 / (x*x); + result += (1 + 1 / (2*x) + ixx * (1./6 - ixx * (1./30 - ixx * (1./42)))) / x; + return sign * result; +} + +static inline float trigamma(float x) __ubsan_ignore_float_divide_by_zero__ { + float sign = +1; + float result = 0; + if (x < 0.5f) { + sign = -1; + const float sin_pi_x = sinf(c10::pi * x); + result -= (c10::pi * c10::pi) / (sin_pi_x * sin_pi_x); + x = 1 - x; + } + for (int i = 0; i < 6; ++i) { + result += 1 / (x * x); + x += 1; + } + const float ixx = 1 / (x*x); + result += (1 + 1 / (2*x) + ixx * (1.f/6 - ixx * (1.f/30 - ixx * (1.f/42)))) / x; + return sign * result; +} + +/* + * This function is derived from the implementation of the digamma function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + */ +static inline double calc_digamma(double x) { + // [C++ Standard Reference: Gamma Function] https://en.cppreference.com/w/cpp/numeric/math/tgamma + static double PSI_10 = 2.25175258906672110764; + if (x == 0) { + // As per C++ standard for gamma related functions and SciPy, + // If the argument is ±0, ±∞ is returned + return std::copysign(INFINITY, -x); + } + + bool x_is_integer = x == trunc(x); + if (x < 0) { + if (x_is_integer) { + // As per C++ standard for gamma related functions and SciPy, + // If the argument is a negative integer, NaN is returned + return std::numeric_limits::quiet_NaN(); + } + // Extracts the fractional part of x as r, since tan(pi * r) is more numerically + // accurate than tan(pi * x). While these operations are mathematically equivalent + // since both x and r are in radians and tan() has a periodicity of pi, in practice + // the computation of pi * x is a source of error (when |x| > 1). + double q, r; + r = std::modf(x, &q); + return calc_digamma(1 - x) - c10::pi / tan(c10::pi * r); + } + + // Push x to be >= 10 + double result = 0; + while (x < 10) { + result -= 1 / x; + x += 1; + } + if (x == 10) { + return result + PSI_10; + } + + // Compute asymptotic digamma + static const double A[] = { + 8.33333333333333333333E-2, + -2.10927960927960927961E-2, + 7.57575757575757575758E-3, + -4.16666666666666666667E-3, + 3.96825396825396825397E-3, + -8.33333333333333333333E-3, + 8.33333333333333333333E-2, + }; + + double y = 0; + if (x < 1.0e17) { + double z = 1.0 / (x * x); + y = z * polevl(z, A, 6); + } + return result + log(x) - (0.5 / x) - y; +} + +/* + * This function is derived from the implementation of the digamma function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + */ +static inline float calc_digamma(float x) { + // See [C++ Standard Reference: Gamma Function] + static float PSI_10 = 2.25175258906672110764f; + if (x == 0) { + // As per C++ standard for gamma related functions and SciPy, + // If the argument is ±0, ±∞ is returned + return std::copysign(INFINITY, -x); + } + + bool x_is_integer = x == truncf(x); + if (x < 0) { + if (x_is_integer) { + // As per C++ standard for gamma related functions and SciPy, + // If the argument is a negative integer, NaN is returned + return std::numeric_limits::quiet_NaN(); + } + // Extracts the fractional part of x as r, since tan(pi * r) is more numerically + // accurate than tan(pi * x). While these operations are mathematically equivalent + // since both x and r are in radians and tan() has a periodicity of pi, in practice + // the computation of pi * x is a source of error (when |x| > 1). + double q, r; + r = std::modf(x, &q); + float pi_over_tan_pi_x = (float)(c10::pi / tan(c10::pi * r)); + return calc_digamma(1 - x) - pi_over_tan_pi_x; + } + + // Push x to be >= 10 + float result = 0; + while (x < 10) { + result -= 1 / x; + x += 1; + } + if (x == 10) { + return result + PSI_10; + } + + // Compute asymptotic digamma + static const float A[] = { + 8.33333333333333333333E-2f, + -2.10927960927960927961E-2f, + 7.57575757575757575758E-3f, + -4.16666666666666666667E-3f, + 3.96825396825396825397E-3f, + -8.33333333333333333333E-3f, + 8.33333333333333333333E-2f, + }; + + float y = 0; + if (x < 1.0e17f) { + float z = 1 / (x * x); + y = z * polevl(z, A, 6); + } + return result + logf(x) - (0.5f / x) - y; +} + +template +static inline C10_HOST_DEVICE scalar_t calc_polygamma(scalar_t x, int n) { + // already blocked if n <= 1 + const auto one = scalar_t{1}; + return ((n % 2) ? one : -one) * + ::exp(::lgamma(static_cast(n) + one)) * + zeta(static_cast(n + 1), x); +} + +// regularized lower incomplete gamma +// the regularized lower, upper incomplete gamma, as well as their +// helper functions follow SciPy's implementation + +/* References + * [igam1] "The Digital Library of Mathematical Functions", dlmf.nist.gov + * [igam2] Maddock et. al., "Incomplete Gamma Functions", + * https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html + */ + +/* + * This implementation of the regularized incomplete gamma functions and + * their helper functions are derived from the implementation of SciPy's + * gammainc, Cephes's igam and igamc, and Boost's Lanczos approximations. + * See NOTICE for the licenses. + */ +template +static scalar_t ratevl(scalar_t x, const scalar_t num[], int64_t M, + const scalar_t denom[], int64_t N) { + // evaluating rational function, i.e., the ratio of two polynomials + // the coefficients for numerator are given by `num` while coeffs for + // denumerator are given by `denom` + + int64_t i, dir; + scalar_t y, num_ans, denom_ans; + scalar_t absx = std::fabs(x); + const scalar_t *p; + + if (absx > 1) { + /* Evaluate as a polynomial in 1/x. */ + dir = -1; + p = num + M; + y = 1 / x; + } + else { + dir = 1; + p = num; + y = x; + } + + /* Evaluate the numerator */ + num_ans = *p; + p += dir; + for (i = 1; i <= M; i++) { + num_ans = num_ans * y + *p; + p += dir; + } + /* Evaluate the denominator */ + if (absx > 1) { + p = denom + N; + } + else { + p = denom; + } + + denom_ans = *p; + p += dir; + for (i = 1; i <= N; i++) { + denom_ans = denom_ans * y + *p; + p += dir; + } + if (absx > 1) { + i = N - M; + return std::pow(x, i) * num_ans / denom_ans; + } + else { + return num_ans / denom_ans; + } +} + +// SciPy's lanczos implementation is taken from Boost +/* (C) Copyright John Maddock 2006. + * Use, modification and distribution are subject to the + * Boost Software License, Version 1.0. See + * https://www.boost.org/LICENSE_1_0.txt or see NOTICE. + */ +template +static scalar_t lanczos_sum_expg_scaled(scalar_t x) { + // lanczos approximation + static const scalar_t lanczos_sum_expg_scaled_num[13] = { + 0.006061842346248906525783753964555936883222, + 0.5098416655656676188125178644804694509993, + 19.51992788247617482847860966235652136208, + 449.9445569063168119446858607650988409623, + 6955.999602515376140356310115515198987526, + 75999.29304014542649875303443598909137092, + 601859.6171681098786670226533699352302507, + 3481712.15498064590882071018964774556468, + 14605578.08768506808414169982791359218571, + 43338889.32467613834773723740590533316085, + 86363131.28813859145546927288977868422342, + 103794043.1163445451906271053616070238554, + 56906521.91347156388090791033559122686859 + }; + static const scalar_t lanczos_sum_expg_scaled_denom[13] = { + 1., + 66., + 1925., + 32670., + 357423., + 2637558., + 13339535., + 45995730., + 105258076., + 150917976., + 120543840., + 39916800., + 0. + }; + return ratevl(x, lanczos_sum_expg_scaled_num, + sizeof(lanczos_sum_expg_scaled_num) / sizeof(lanczos_sum_expg_scaled_num[0]) - 1, + lanczos_sum_expg_scaled_denom, + sizeof(lanczos_sum_expg_scaled_denom) / sizeof(lanczos_sum_expg_scaled_denom[0]) - 1); +} + +template +static scalar_t _igam_helper_fac(scalar_t a, scalar_t x) { + // compute x^a * exp(-a) / gamma(a) + // corrected from (15) and (16) in [igam2] by replacing exp(x - a) with + // exp(a - x). + + scalar_t ax, fac, res, num, numfac; + static scalar_t MAXLOG = std::is_same::value ? + 7.09782712893383996843E2 : 88.72283905206835; + static scalar_t EXP1 = 2.718281828459045; + static scalar_t lanczos_g = 6.024680040776729583740234375; + + if (std::fabs(a - x) > 0.4 * std::fabs(a)) { + ax = a * std::log(x) - x - std::lgamma(a); + if (ax < -MAXLOG) { + return 0.0; + } + return std::exp(ax); + } + + fac = a + lanczos_g - 0.5; + res = std::sqrt(fac / EXP1) / lanczos_sum_expg_scaled(a); + + if ((a < 200) && (x < 200)) { + res *= std::exp(a - x) * std::pow(x / fac, a); + } + else { + num = x - a - lanczos_g + 0.5; + numfac = num / fac; + res *= std::exp(a * (std::log1p(numfac) - numfac) + x * (0.5 - lanczos_g) / fac); + } + return res; +} + +template +static scalar_t _igam_helper_series(scalar_t a, scalar_t x) { + // Compute igam using DLMF 8.11.4. [igam1] + static scalar_t MACHEP = std::is_same::value ? + 1.11022302462515654042E-16 : 5.9604644775390625E-8; + static int MAXITER = 2000; + + int i; + scalar_t ans, ax, c, r; + + ax = _igam_helper_fac(a, x); + if (ax == 0.0) { + return 0.0; + } + + /* power series */ + r = a; + c = 1.0; + ans = 1.0; + + for (i = 0; i < MAXITER; i++) { + r += 1.0; + c *= x / r; + ans += c; + if (c <= MACHEP * ans) { + break; + } + } + return (ans * ax / a); +} + +template +static scalar_t _igamc_helper_series(scalar_t a, scalar_t x) { + // Compute igamc using DLMF 8.7.3 [igam1]. This is related to the series in + // _igam_helper_series but extra care is taken to avoid cancellation. + + int n; + scalar_t fac = 1; + scalar_t sum = 0; + scalar_t term, logx; + static scalar_t MAXITER = 2000; + static scalar_t MACHEP = std::is_same::value ? + 1.11022302462515654042E-16 : 5.9604644775390625E-8; + + for (n = 1; n < MAXITER; n++) { + fac *= -x / n; + term = fac / (a + n); + sum += term; + if (std::fabs(term) <= MACHEP * std::fabs(sum)) { + break; + } + } + + logx = std::log(x); + term = -std::expm1(a * logx - std::lgamma(1+a)); + return term - std::exp(a * logx - std::lgamma(a)) * sum; +} + +template +static scalar_t _igam_helper_asymptotic_series(scalar_t a, scalar_t x, bool igam) { + // Compute igam/igamc using DLMF 8.12.3/8.12.4 [igam1] + static const scalar_t d[25][25] = + {{-3.3333333333333333e-1, 8.3333333333333333e-2, -1.4814814814814815e-2, + 1.1574074074074074e-3, 3.527336860670194e-4, -1.7875514403292181e-4, + 3.9192631785224378e-5, -2.1854485106799922e-6, -1.85406221071516e-6, + 8.296711340953086e-7, -1.7665952736826079e-7, 6.7078535434014986e-9, + 1.0261809784240308e-8, -4.3820360184533532e-9, 9.1476995822367902e-10, + -2.551419399494625e-11, -5.8307721325504251e-11, 2.4361948020667416e-11, + -5.0276692801141756e-12, 1.1004392031956135e-13, 3.3717632624009854e-13, + -1.3923887224181621e-13, 2.8534893807047443e-14, -5.1391118342425726e-16, + -1.9752288294349443e-15}, + {-1.8518518518518519e-3, -3.4722222222222222e-3, 2.6455026455026455e-3, + -9.9022633744855967e-4, 2.0576131687242798e-4, -4.0187757201646091e-7, + -1.8098550334489978e-5, 7.6491609160811101e-6, -1.6120900894563446e-6, + 4.6471278028074343e-9, 1.378633446915721e-7, -5.752545603517705e-8, + 1.1951628599778147e-8, -1.7543241719747648e-11, -1.0091543710600413e-9, + 4.1627929918425826e-10, -8.5639070264929806e-11, 6.0672151016047586e-14, + 7.1624989648114854e-12, -2.9331866437714371e-12, 5.9966963656836887e-13, + -2.1671786527323314e-16, -4.9783399723692616e-14, 2.0291628823713425e-14, + -4.13125571381061e-15}, + {4.1335978835978836e-3, -2.6813271604938272e-3, 7.7160493827160494e-4, + 2.0093878600823045e-6, -1.0736653226365161e-4, 5.2923448829120125e-5, + -1.2760635188618728e-5, 3.4235787340961381e-8, 1.3721957309062933e-6, + -6.298992138380055e-7, 1.4280614206064242e-7, -2.0477098421990866e-10, + -1.4092529910867521e-8, 6.228974084922022e-9, -1.3670488396617113e-9, + 9.4283561590146782e-13, 1.2872252400089318e-10, -5.5645956134363321e-11, + 1.1975935546366981e-11, -4.1689782251838635e-15, -1.0940640427884594e-12, + 4.6622399463901357e-13, -9.905105763906906e-14, 1.8931876768373515e-17, + 8.8592218725911273e-15}, + {6.4943415637860082e-4, 2.2947209362139918e-4, -4.6918949439525571e-4, + 2.6772063206283885e-4, -7.5618016718839764e-5, -2.3965051138672967e-7, + 1.1082654115347302e-5, -5.6749528269915966e-6, 1.4230900732435884e-6, + -2.7861080291528142e-11, -1.6958404091930277e-7, 8.0994649053880824e-8, + -1.9111168485973654e-8, 2.3928620439808118e-12, 2.0620131815488798e-9, + -9.4604966618551322e-10, 2.1541049775774908e-10, -1.388823336813903e-14, + -2.1894761681963939e-11, 9.7909989511716851e-12, -2.1782191880180962e-12, + 6.2088195734079014e-17, 2.126978363279737e-13, -9.3446887915174333e-14, + 2.0453671226782849e-14}, + {-8.618882909167117e-4, 7.8403922172006663e-4, -2.9907248030319018e-4, + -1.4638452578843418e-6, 6.6414982154651222e-5, -3.9683650471794347e-5, + 1.1375726970678419e-5, 2.5074972262375328e-10, -1.6954149536558306e-6, + 8.9075075322053097e-7, -2.2929348340008049e-7, 2.956794137544049e-11, + 2.8865829742708784e-8, -1.4189739437803219e-8, 3.4463580499464897e-9, + -2.3024517174528067e-13, -3.9409233028046405e-10, 1.8602338968504502e-10, + -4.356323005056618e-11, 1.2786001016296231e-15, 4.6792750266579195e-12, + -2.1492464706134829e-12, 4.9088156148096522e-13, -6.3385914848915603e-18, + -5.0453320690800944e-14}, + {-3.3679855336635815e-4, -6.9728137583658578e-5, 2.7727532449593921e-4, + -1.9932570516188848e-4, 6.7977804779372078e-5, 1.419062920643967e-7, + -1.3594048189768693e-5, 8.0184702563342015e-6, -2.2914811765080952e-6, + -3.252473551298454e-10, 3.4652846491085265e-7, -1.8447187191171343e-7, + 4.8240967037894181e-8, -1.7989466721743515e-14, -6.3061945000135234e-9, + 3.1624176287745679e-9, -7.8409242536974293e-10, 5.1926791652540407e-15, + 9.3589442423067836e-11, -4.5134262161632782e-11, 1.0799129993116827e-11, + -3.661886712685252e-17, -1.210902069055155e-12, 5.6807435849905643e-13, + -1.3249659916340829e-13}, + {5.3130793646399222e-4, -5.9216643735369388e-4, 2.7087820967180448e-4, + 7.9023532326603279e-7, -8.1539693675619688e-5, 5.6116827531062497e-5, + -1.8329116582843376e-5, -3.0796134506033048e-9, 3.4651553688036091e-6, + -2.0291327396058604e-6, 5.7887928631490037e-7, 2.338630673826657e-13, + -8.8286007463304835e-8, 4.7435958880408128e-8, -1.2545415020710382e-8, + 8.6496488580102925e-14, 1.6846058979264063e-9, -8.5754928235775947e-10, + 2.1598224929232125e-10, -7.6132305204761539e-16, -2.6639822008536144e-11, + 1.3065700536611057e-11, -3.1799163902367977e-12, 4.7109761213674315e-18, + 3.6902800842763467e-13}, + {3.4436760689237767e-4, 5.1717909082605922e-5, -3.3493161081142236e-4, + 2.812695154763237e-4, -1.0976582244684731e-4, -1.2741009095484485e-7, + 2.7744451511563644e-5, -1.8263488805711333e-5, 5.7876949497350524e-6, + 4.9387589339362704e-10, -1.0595367014026043e-6, 6.1667143761104075e-7, + -1.7562973359060462e-7, -1.2974473287015439e-12, 2.695423606288966e-8, + -1.4578352908731271e-8, 3.887645959386175e-9, -3.8810022510194121e-17, + -5.3279941738772867e-10, 2.7437977643314845e-10, -6.9957960920705679e-11, + 2.5899863874868481e-17, 8.8566890996696381e-12, -4.403168815871311e-12, + 1.0865561947091654e-12}, + {-6.5262391859530942e-4, 8.3949872067208728e-4, -4.3829709854172101e-4, + -6.969091458420552e-7, 1.6644846642067548e-4, -1.2783517679769219e-4, + 4.6299532636913043e-5, 4.5579098679227077e-9, -1.0595271125805195e-5, + 6.7833429048651666e-6, -2.1075476666258804e-6, -1.7213731432817145e-11, + 3.7735877416110979e-7, -2.1867506700122867e-7, 6.2202288040189269e-8, + 6.5977038267330006e-16, -9.5903864974256858e-9, 5.2132144922808078e-9, + -1.3991589583935709e-9, 5.382058999060575e-16, 1.9484714275467745e-10, + -1.0127287556389682e-10, 2.6077347197254926e-11, -5.0904186999932993e-18, + -3.3721464474854592e-12}, + {-5.9676129019274625e-4, -7.2048954160200106e-5, 6.7823088376673284e-4, + -6.4014752602627585e-4, 2.7750107634328704e-4, 1.8197008380465151e-7, + -8.4795071170685032e-5, 6.105192082501531e-5, -2.1073920183404862e-5, + -8.8585890141255994e-10, 4.5284535953805377e-6, -2.8427815022504408e-6, + 8.7082341778646412e-7, 3.6886101871706965e-12, -1.5344695190702061e-7, + 8.862466778790695e-8, -2.5184812301826817e-8, -1.0225912098215092e-14, + 3.8969470758154777e-9, -2.1267304792235635e-9, 5.7370135528051385e-10, + -1.887749850169741e-19, -8.0931538694657866e-11, 4.2382723283449199e-11, + -1.1002224534207726e-11}, + {1.3324454494800656e-3, -1.9144384985654775e-3, 1.1089369134596637e-3, + 9.932404122642299e-7, -5.0874501293093199e-4, 4.2735056665392884e-4, + -1.6858853767910799e-4, -8.1301893922784998e-9, 4.5284402370562147e-5, + -3.127053674781734e-5, 1.044986828530338e-5, 4.8435226265680926e-11, + -2.1482565873456258e-6, 1.329369701097492e-6, -4.0295693092101029e-7, + -1.7567877666323291e-13, 7.0145043163668257e-8, -4.040787734999483e-8, + 1.1474026743371963e-8, 3.9642746853563325e-18, -1.7804938269892714e-9, + 9.7480262548731646e-10, -2.6405338676507616e-10, 5.794875163403742e-18, + 3.7647749553543836e-11}, + {1.579727660730835e-3, 1.6251626278391582e-4, -2.0633421035543276e-3, + 2.1389686185689098e-3, -1.0108559391263003e-3, -3.9912705529919201e-7, + 3.6235025084764691e-4, -2.8143901463712154e-4, 1.0449513336495887e-4, + 2.1211418491830297e-9, -2.5779417251947842e-5, 1.7281818956040463e-5, + -5.6413773872904282e-6, -1.1024320105776174e-11, 1.1223224418895175e-6, + -6.8693396379526735e-7, 2.0653236975414887e-7, 4.6714772409838506e-14, + -3.5609886164949055e-8, 2.0470855345905963e-8, -5.8091738633283358e-9, + -1.332821287582869e-16, 9.0354604391335133e-10, -4.9598782517330834e-10, + 1.3481607129399749e-10}, + {-4.0725121195140166e-3, 6.4033628338080698e-3, -4.0410161081676618e-3, + -2.183732802866233e-6, 2.1740441801254639e-3, -1.9700440518418892e-3, + 8.3595469747962458e-4, 1.9445447567109655e-8, -2.5779387120421696e-4, + 1.9009987368139304e-4, -6.7696499937438965e-5, -1.4440629666426572e-10, + 1.5712512518742269e-5, -1.0304008744776893e-5, 3.304517767401387e-6, + 7.9829760242325709e-13, -6.4097794149313004e-7, 3.8894624761300056e-7, + -1.1618347644948869e-7, -2.816808630596451e-15, 1.9878012911297093e-8, + -1.1407719956357511e-8, 3.2355857064185555e-9, 4.1759468293455945e-20, + -5.0423112718105824e-10}, + {-5.9475779383993003e-3, -5.4016476789260452e-4, 8.7910413550767898e-3, + -9.8576315587856125e-3, 5.0134695031021538e-3, 1.2807521786221875e-6, + -2.0626019342754683e-3, 1.7109128573523058e-3, -6.7695312714133799e-4, + -6.9011545676562133e-9, 1.8855128143995902e-4, -1.3395215663491969e-4, + 4.6263183033528039e-5, 4.0034230613321351e-11, -1.0255652921494033e-5, + 6.612086372797651e-6, -2.0913022027253008e-6, -2.0951775649603837e-13, + 3.9756029041993247e-7, -2.3956211978815887e-7, 7.1182883382145864e-8, + 8.925574873053455e-16, -1.2101547235064676e-8, 6.9350618248334386e-9, + -1.9661464453856102e-9}, + {1.7402027787522711e-2, -2.9527880945699121e-2, 2.0045875571402799e-2, + 7.0289515966903407e-6, -1.2375421071343148e-2, 1.1976293444235254e-2, + -5.4156038466518525e-3, -6.3290893396418616e-8, 1.8855118129005065e-3, + -1.473473274825001e-3, 5.5515810097708387e-4, 5.2406834412550662e-10, + -1.4357913535784836e-4, 9.9181293224943297e-5, -3.3460834749478311e-5, + -3.5755837291098993e-12, 7.1560851960630076e-6, -4.5516802628155526e-6, + 1.4236576649271475e-6, 1.8803149082089664e-14, -2.6623403898929211e-7, + 1.5950642189595716e-7, -4.7187514673841102e-8, -6.5107872958755177e-17, + 7.9795091026746235e-9}, + {3.0249124160905891e-2, 2.4817436002649977e-3, -4.9939134373457022e-2, + 5.9915643009307869e-2, -3.2483207601623391e-2, -5.7212968652103441e-6, + 1.5085251778569354e-2, -1.3261324005088445e-2, 5.5515262632426148e-3, + 3.0263182257030016e-8, -1.7229548406756723e-3, 1.2893570099929637e-3, + -4.6845138348319876e-4, -1.830259937893045e-10, 1.1449739014822654e-4, + -7.7378565221244477e-5, 2.5625836246985201e-5, 1.0766165333192814e-12, + -5.3246809282422621e-6, 3.349634863064464e-6, -1.0381253128684018e-6, + -5.608909920621128e-15, 1.9150821930676591e-7, -1.1418365800203486e-7, + 3.3654425209171788e-8}, + {-9.9051020880159045e-2, 1.7954011706123486e-1, -1.2989606383463778e-1, + -3.1478872752284357e-5, 9.0510635276848131e-2, -9.2828824411184397e-2, + 4.4412112839877808e-2, 2.7779236316835888e-7, -1.7229543805449697e-2, + 1.4182925050891573e-2, -5.6214161633747336e-3, -2.39598509186381e-9, + 1.6029634366079908e-3, -1.1606784674435773e-3, 4.1001337768153873e-4, + 1.8365800754090661e-11, -9.5844256563655903e-5, 6.3643062337764708e-5, + -2.076250624489065e-5, -1.1806020912804483e-13, 4.2131808239120649e-6, + -2.6262241337012467e-6, 8.0770620494930662e-7, 6.0125912123632725e-16, + -1.4729737374018841e-7}, + {-1.9994542198219728e-1, -1.5056113040026424e-2, 3.6470239469348489e-1, + -4.6435192311733545e-1, 2.6640934719197893e-1, 3.4038266027147191e-5, + -1.3784338709329624e-1, 1.276467178337056e-1, -5.6213828755200985e-2, + -1.753150885483011e-7, 1.9235592956768113e-2, -1.5088821281095315e-2, + 5.7401854451350123e-3, 1.0622382710310225e-9, -1.5335082692563998e-3, + 1.0819320643228214e-3, -3.7372510193945659e-4, -6.6170909729031985e-12, + 8.4263617380909628e-5, -5.5150706827483479e-5, 1.7769536448348069e-5, + 3.8827923210205533e-14, -3.53513697488768e-6, 2.1865832130045269e-6, + -6.6812849447625594e-7}, + {7.2438608504029431e-1, -1.3918010932653375, 1.0654143352413968, + 1.876173868950258e-4, -8.2705501176152696e-1, 8.9352433347828414e-1, + -4.4971003995291339e-1, -1.6107401567546652e-6, 1.9235590165271091e-1, + -1.6597702160042609e-1, 6.8882222681814333e-2, 1.3910091724608687e-8, + -2.146911561508663e-2, 1.6228980898865892e-2, -5.9796016172584256e-3, + -1.1287469112826745e-10, 1.5167451119784857e-3, -1.0478634293553899e-3, + 3.5539072889126421e-4, 8.1704322111801517e-13, -7.7773013442452395e-5, + 5.0291413897007722e-5, -1.6035083867000518e-5, 1.2469354315487605e-14, + 3.1369106244517615e-6}, + {1.6668949727276811, 1.165462765994632e-1, -3.3288393225018906, + 4.4692325482864037, -2.6977693045875807, -2.600667859891061e-4, + 1.5389017615694539, -1.4937962361134612, 6.8881964633233148e-1, + 1.3077482004552385e-6, -2.5762963325596288e-1, 2.1097676102125449e-1, + -8.3714408359219882e-2, -7.7920428881354753e-9, 2.4267923064833599e-2, + -1.7813678334552311e-2, 6.3970330388900056e-3, 4.9430807090480523e-11, + -1.5554602758465635e-3, 1.0561196919903214e-3, -3.5277184460472902e-4, + 9.3002334645022459e-14, 7.5285855026557172e-5, -4.8186515569156351e-5, + 1.5227271505597605e-5}, + {-6.6188298861372935, 1.3397985455142589e+1, -1.0789350606845146e+1, + -1.4352254537875018e-3, 9.2333694596189809, -1.0456552819547769e+1, + 5.5105526029033471, 1.2024439690716742e-5, -2.5762961164755816, + 2.3207442745387179, -1.0045728797216284, -1.0207833290021914e-7, + 3.3975092171169466e-1, -2.6720517450757468e-1, 1.0235252851562706e-1, + 8.4329730484871625e-10, -2.7998284958442595e-2, 2.0066274144976813e-2, + -7.0554368915086242e-3, 1.9402238183698188e-12, 1.6562888105449611e-3, + -1.1082898580743683e-3, 3.654545161310169e-4, -5.1290032026971794e-11, + -7.6340103696869031e-5}, + {-1.7112706061976095e+1, -1.1208044642899116, 3.7131966511885444e+1, + -5.2298271025348962e+1, 3.3058589696624618e+1, 2.4791298976200222e-3, + -2.061089403411526e+1, 2.088672775145582e+1, -1.0045703956517752e+1, + -1.2238783449063012e-5, 4.0770134274221141, -3.473667358470195, + 1.4329352617312006, 7.1359914411879712e-8, -4.4797257159115612e-1, + 3.4112666080644461e-1, -1.2699786326594923e-1, -2.8953677269081528e-10, + 3.3125776278259863e-2, -2.3274087021036101e-2, 8.0399993503648882e-3, + -1.177805216235265e-9, -1.8321624891071668e-3, 1.2108282933588665e-3, + -3.9479941246822517e-4}, + {7.389033153567425e+1, -1.5680141270402273e+2, 1.322177542759164e+2, + 1.3692876877324546e-2, -1.2366496885920151e+2, 1.4620689391062729e+2, + -8.0365587724865346e+1, -1.1259851148881298e-4, 4.0770132196179938e+1, + -3.8210340013273034e+1, 1.719522294277362e+1, 9.3519707955168356e-7, + -6.2716159907747034, 5.1168999071852637, -2.0319658112299095, + -4.9507215582761543e-9, 5.9626397294332597e-1, -4.4220765337238094e-1, + 1.6079998700166273e-1, -2.4733786203223402e-8, -4.0307574759979762e-2, + 2.7849050747097869e-2, -9.4751858992054221e-3, 6.419922235909132e-6, + 2.1250180774699461e-3}, + {2.1216837098382522e+2, 1.3107863022633868e+1, -4.9698285932871748e+2, + 7.3121595266969204e+2, -4.8213821720890847e+2, -2.8817248692894889e-2, + 3.2616720302947102e+2, -3.4389340280087117e+2, 1.7195193870816232e+2, + 1.4038077378096158e-4, -7.52594195897599e+1, 6.651969984520934e+1, + -2.8447519748152462e+1, -7.613702615875391e-7, 9.5402237105304373, + -7.5175301113311376, 2.8943997568871961, -4.6612194999538201e-7, + -8.0615149598794088e-1, 5.8483006570631029e-1, -2.0845408972964956e-1, + 1.4765818959305817e-4, 5.1000433863753019e-2, -3.3066252141883665e-2, + 1.5109265210467774e-2}, + {-9.8959643098322368e+2, 2.1925555360905233e+3, -1.9283586782723356e+3, + -1.5925738122215253e-1, 1.9569985945919857e+3, -2.4072514765081556e+3, + 1.3756149959336496e+3, 1.2920735237496668e-3, -7.525941715948055e+2, + 7.3171668742208716e+2, -3.4137023466220065e+2, -9.9857390260608043e-6, + 1.3356313181291573e+2, -1.1276295161252794e+2, 4.6310396098204458e+1, + -7.9237387133614756e-6, -1.4510726927018646e+1, 1.1111771248100563e+1, + -4.1690817945270892, 3.1008219800117808e-3, 1.1220095449981468, + -7.6052379926149916e-1, 3.6262236505085254e-1, 2.216867741940747e-1, + 4.8683443692930507e-1}}; + + int k, n, sgn; + int maxpow = 0; + static scalar_t MACHEP = std::is_same::value ? + 1.11022302462515654042E-16 : 5.9604644775390625E-8; + scalar_t lambda = x / a; + scalar_t sigma = (x - a) / a; + scalar_t eta, res, ck, ckterm, term, absterm; + scalar_t absoldterm = INFINITY; + scalar_t etapow[25] = {1}; + scalar_t sum = 0; + scalar_t afac = 1; + + if (igam) { + sgn = -1; + } + else { + sgn = 1; + } + + if (lambda > 1) { + eta = std::sqrt(-2 * (std::log1p(sigma) - sigma)); + } + else if (lambda < 1) { + eta = -std::sqrt(-2 * (std::log1p(sigma) - sigma)); + } + else { + eta = 0; + } + res = 0.5 * std::erfc(sgn * eta * std::sqrt(a / 2)); + + for (k = 0; k < 25; k++) { + ck = d[k][0]; + for (n = 1; n < 25; n++) { + if (n > maxpow) { + etapow[n] = eta * etapow[n-1]; + maxpow += 1; + } + ckterm = d[k][n]*etapow[n]; + ck += ckterm; + if (std::fabs(ckterm) < MACHEP * std::fabs(ck)) { + break; + } + } + term = ck * afac; + absterm = std::fabs(term); + if (absterm > absoldterm) { + break; + } + sum += term; + if (absterm < MACHEP * std::fabs(sum)) { + break; + } + absoldterm = absterm; + afac /= a; + } + res += sgn * std::exp(-0.5 * a * eta * eta) * sum / std::sqrt(2 * c10::pi * a); + + return res; +} + +template +static scalar_t _igamc_helper_continued_fraction(scalar_t a, scalar_t x) { + // Compute igamc using DLMF 8.9.2. [igam1] + int i; + scalar_t ans, ax, c, yc, r, t, y, z; + scalar_t pk, pkm1, pkm2, qk, qkm1, qkm2; + int MAXITER = 2000; + static scalar_t MACHEP = std::is_same::value ? + 1.11022302462515654042E-16 : 5.9604644775390625E-8; + static scalar_t BIG = std::is_same::value ? + 4.503599627370496e15 : 16777216.; + static scalar_t BIGINV = std::is_same::value ? + 2.22044604925031308085e-16 : 5.9604644775390625E-8; + + ax = _igam_helper_fac(a, x); + if (ax == 0.0) { + return 0.0; + } + + /* continued fraction */ + y = 1.0 - a; + z = x + y + 1.0; + c = 0.0; + pkm2 = 1.0; + qkm2 = x; + pkm1 = x + 1.0; + qkm1 = z * x; + ans = pkm1 / qkm1; + + for (i = 0; i < MAXITER; i++) { + c += 1.0; + y += 1.0; + z += 2.0; + yc = y * c; + pk = pkm1 * z - pkm2 * yc; + qk = qkm1 * z - qkm2 * yc; + if (qk != 0) { + r = pk / qk; + t = std::fabs((ans - r) / r); + ans = r; + } + else { + t = 1.0; + } + pkm2 = pkm1; + pkm1 = pk; + qkm2 = qkm1; + qkm1 = qk; + if (std::fabs(pk) > BIG) { + pkm2 *= BIGINV; + pkm1 *= BIGINV; + qkm2 *= BIGINV; + qkm1 *= BIGINV; + } + if (t <= MACHEP) { + break; + } + } + return ans * ax; +} + +template +static inline scalar_t calc_igammac(scalar_t a, scalar_t x) { + /* the calculation of the regularized upper incomplete gamma function + * is done differently based on the values of a and x: + * - if x and/or a is at the boundary of defined region, then assign the + * result at the boundary + * - if a is large and a ~ x, then using Uniform Asymptotic Expansions for + * Large Parameter (see DLMF 8.12.4 [igam1]) + * - if x > 1.1 and x < a, using the substraction from the regularized lower + * incomplete gamma + * - otherwise, calculate the series from [igam2] eq (5) + */ + scalar_t absxma_a; + + static scalar_t SMALL = 20.0; + static scalar_t LARGE = 200.0; + static scalar_t SMALLRATIO = 0.3; + static scalar_t LARGERATIO = 4.5; + + // note that in SciPy, a and x are non-negative, with exclusive 0s (i.e., + // at most 1 of them can be 0), where igammac(0, x) = 0.0 iff x > 0. + if ((x < 0) || (a < 0)) { + // out of defined-region of the function + return std::numeric_limits::quiet_NaN(); + } + else if (a == 0) { + if (x > 0) { + return 0.0; + } + else { + return std::numeric_limits::quiet_NaN(); + } + } + else if (x == 0) { + return 1.0; + } + else if (std::isinf(a)) { + if (std::isinf(x)) { + return std::numeric_limits::quiet_NaN(); + } + return 1.0; + } + else if (std::isinf(x)) { + return 0.0; + } + + absxma_a = std::fabs(x - a) / a; + if ((a > SMALL) && (a < LARGE) && (absxma_a < SMALLRATIO)) { + return _igam_helper_asymptotic_series(a, x, 0); + } + else if ((a > LARGE) && (absxma_a < LARGERATIO / std::sqrt(a))) { + return _igam_helper_asymptotic_series(a, x, 0); + } + + if (x > 1.1) { + if (x < a) { + return 1.0 - _igam_helper_series(a, x); + } + else { + return _igamc_helper_continued_fraction(a, x); + } + } + else if (x <= 0.5) { + if (-0.4 / std::log(x) < a) { + return 1.0 - _igam_helper_series(a, x); + } + else { + return _igamc_helper_series(a, x); + } + } + else { + if (x * 1.1 < a) { + return 1.0 - _igam_helper_series(a, x); + } + else { + return _igamc_helper_series(a, x); + } + } +} + +template +static inline scalar_t calc_igamma(scalar_t a, scalar_t x) { + /* the calculation of the regularized lower incomplete gamma function + * is done differently based on the values of a and x: + * - if x and/or a is at the boundary of defined region, then assign the + * result at the boundary + * - if a is large and a ~ x, then using Uniform Asymptotic Expansions for + * Large Parameter (see DLMF 8.12.3 [igam1]) + * - if x > 1 and x > a, using the substraction from the regularized upper + * incomplete gamma + * - otherwise, calculate the series from [igam2] eq (4) + */ + scalar_t absxma_a; + static scalar_t SMALL = 20.0; + static scalar_t LARGE = 200.0; + static scalar_t SMALLRATIO = 0.3; + static scalar_t LARGERATIO = 4.5; + + // boundary values following SciPy + // note that in SciPy, a and x are non-negative, with exclusive 0s (i.e., + // at most 1 of them can be 0), where igamma(0, x) = 1.0 iff x > 0. + if ((x < 0) || (a < 0)) { + // out of defined-region of the function + return std::numeric_limits::quiet_NaN(); + } + else if (a == 0) { + if (x > 0) { + return 1.0; + } + else { + return std::numeric_limits::quiet_NaN(); + } + } + else if (x == 0) { + return 0.0; // zero integration limit + } + else if (std::isinf(a)) { + if (std::isinf(x)) { + return std::numeric_limits::quiet_NaN(); + } + return 0.0; + } + else if (std::isinf(x)) { + return 1.0; + } + + /* Asymptotic regime where a ~ x. See [igam2] */ + absxma_a = std::fabs(x - a) / a; + if ((a > SMALL) && (a < LARGE) && (absxma_a < SMALLRATIO)) { + return _igam_helper_asymptotic_series(a, x, 1); + } + else if ((a > LARGE) && (absxma_a < LARGERATIO / std::sqrt(a))) { + return _igam_helper_asymptotic_series(a, x, 1); + } + + if ((x > 1.0) && (x > a)) { + return 1.0 - calc_igammac(a, x); + } + + return _igam_helper_series(a, x); +} + +template <> +C10_UNUSED c10::BFloat16 calc_igamma(c10::BFloat16 a, c10::BFloat16 x) { + return calc_igamma(float(a), float(x)); +} + +template <> +C10_UNUSED c10::Half calc_igamma(c10::Half a, c10::Half x) { + return calc_igamma(float(a), float(x)); +} + +template <> +C10_UNUSED c10::BFloat16 calc_igammac(c10::BFloat16 a, c10::BFloat16 x) { + return calc_igammac(float(a), float(x)); +} + +template <> +C10_UNUSED c10::Half calc_igammac(c10::Half a, c10::Half x) { + return calc_igammac(float(a), float(x)); +} + +inline c10::BFloat16 calc_erfinv(c10::BFloat16 a) { return calc_erfinv(float(a)); } + +template +static T abs_impl(T v) { + return std::abs(v); +} + +template <> +C10_UNUSED uint8_t abs_impl(uint8_t v) { + return v; +} + +template +static inline typename std::enable_if::value, T>::type +calc_gcd(T a, T b) { + a = abs_impl(a); + b = abs_impl(b); + while (a != 0) { + T c = a; + a = b % a; + b = c; + } + return b; +} + +/* + * This function is derived from the implementation of the chbevl function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Evaluates the series + * + * len-1 + * - ' + * y = > array[i] T (x/2) + * - i + * i=0 + * + * of Chebyshev polynomials Ti at argument x/2. + * + * Coefficients are stored in reverse order, i.e. the zero order term is last in the array. Note len is the number of + * coefficients, not the order. + * + * If coefficients are for the interval a to b, x must have been transformed to x -> 2(2x - b - a)/(b-a) before + * entering the routine. This maps x from (a, b) to (-1, 1), over which the Chebyshev polynomials are defined. + * + * If the coefficients are for the inverted interval, in which (a, b) is mapped to (1/b, 1/a), the transformation + * required is x -> 2(2ab/x - b - a)/(b-a). If b is infinity, this becomes x -> 4a/x - 1. + */ +template +static inline typename std::enable_if::value, T>::type +chbevl(const T x, const T array[], size_t len) { + T b0, b1, b2; + + b0 = array[0]; + b1 = static_cast(0.0); + + for (size_t i = 1; i < len; ++i) { + b2 = b1; + b1 = b0; + b0 = x * b1 - b2 + array[i]; + } + + return (static_cast(0.5) * (b0 - b2)); +} + +/* + * This function is derived from the implementation of the i0 function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Computes an approximation of the zeroth order modified Bessel function of the first kind. + * The approximation is actually two (sub)approximations, both using a Chebyshev polynomial expansion. + * One approximates the function over [0, 8], and the other over (8, infinity). This function takes the absolute value + * of all inputs to convert them into the domain of the approximation. + */ +template +static inline std::tuple chebyshev_coefficients_i0e_A() { + /* Chebyshev coefficients for exp(-x) I0(x) + * in the interval [0,8]. + * + * lim(x->0){ exp(-x) I0(x) } = 1. + */ + static const T coeff[] = { + -4.41534164647933937950E-18, 3.33079451882223809783E-17, + -2.43127984654795469359E-16, 1.71539128555513303061E-15, + -1.16853328779934516808E-14, 7.67618549860493561688E-14, + -4.85644678311192946090E-13, 2.95505266312963983461E-12, + -1.72682629144155570723E-11, 9.67580903537323691224E-11, + -5.18979560163526290666E-10, 2.65982372468238665035E-9, + -1.30002500998624804212E-8, 6.04699502254191894932E-8, + -2.67079385394061173391E-7, 1.11738753912010371815E-6, + -4.41673835845875056359E-6, 1.64484480707288970893E-5, + -5.75419501008210370398E-5, 1.88502885095841655729E-4, + -5.76375574538582365885E-4, 1.63947561694133579842E-3, + -4.32430999505057594430E-3, 1.05464603945949983183E-2, + -2.37374148058994688156E-2, 4.93052842396707084878E-2, + -9.49010970480476444210E-2, 1.71620901522208775349E-1, + -3.04682672343198398683E-1, 6.76795274409476084995E-1}; + return std::make_tuple(coeff, 30); +}; + +template +static inline std::tuple chebyshev_coefficients_i0e_B() { + /* Chebyshev coefficients for exp(-x) sqrt(x) I0(x) + * in the inverted interval [8,infinity]. + * + * lim(x->inf){ exp(-x) sqrt(x) I0(x) } = 1/sqrt(2pi). + */ + static const T coeff[] = { + -7.23318048787475395456E-18, -4.83050448594418207126E-18, + 4.46562142029675999901E-17, 3.46122286769746109310E-17, + -2.82762398051658348494E-16, -3.42548561967721913462E-16, + 1.77256013305652638360E-15, 3.81168066935262242075E-15, + -9.55484669882830764870E-15, -4.15056934728722208663E-14, + 1.54008621752140982691E-14, 3.85277838274214270114E-13, + 7.18012445138366623367E-13, -1.79417853150680611778E-12, + -1.32158118404477131188E-11, -3.14991652796324136454E-11, + 1.18891471078464383424E-11, 4.94060238822496958910E-10, + 3.39623202570838634515E-9, 2.26666899049817806459E-8, + 2.04891858946906374183E-7, 2.89137052083475648297E-6, + 6.88975834691682398426E-5, 3.36911647825569408990E-3, + 8.04490411014108831608E-1}; + + return std::make_tuple(coeff, 25); +}; + +template +static inline typename std::enable_if::value, std::tuple>::type +chebyshev_coefficients_i1e_A() { + /* Chebyshev coefficients for exp(-x) I1(x) + * in the interval [0,8]. + * + * lim(x->0){ exp(-x) I1(x) / x } = 1/2. + */ + static const T coeff[] = { + 2.77791411276104639959E-18, -2.11142121435816608115E-17, + 1.55363195773620046921E-16, -1.10559694773538630805E-15, + 7.60068429473540693410E-15, -5.04218550472791168711E-14, + 3.22379336594557470981E-13, -1.98397439776494371520E-12, + 1.17361862988909016308E-11, -6.66348972350202774223E-11, + 3.62559028155211703701E-10, -1.88724975172282928790E-9, + 9.38153738649577178388E-9, -4.44505912879632808065E-8, + 2.00329475355213526229E-7, -8.56872026469545474066E-7, + 3.47025130813767847674E-6, -1.32731636560394358279E-5, + 4.78156510755005422638E-5, -1.61760815825896745588E-4, + 5.12285956168575772895E-4, -1.51357245063125314899E-3, + 4.15642294431288815669E-3, -1.05640848946261981558E-2, + 2.47264490306265168283E-2, -5.29459812080949914269E-2, + 1.02643658689847095384E-1, -1.76416518357834055153E-1, + 2.52587186443633654823E-1}; + return std::make_tuple(coeff, 29); +}; + +template +static inline typename std::enable_if::value, std::tuple>::type +chebyshev_coefficients_i1e_A() { + /* Chebyshev coefficients for exp(-x) I1(x) + * in the interval [0,8]. + * + * lim(x->0){ exp(-x) I1(x) / x } = 1/2. + */ + static const T coeff[] = { + 9.38153738649577178388E-9f, + -4.44505912879632808065E-8f, + 2.00329475355213526229E-7f, + -8.56872026469545474066E-7f, + 3.47025130813767847674E-6f, + -1.32731636560394358279E-5f, + 4.78156510755005422638E-5f, + -1.61760815825896745588E-4f, + 5.12285956168575772895E-4f, + -1.51357245063125314899E-3f, + 4.15642294431288815669E-3f, + -1.05640848946261981558E-2f, + 2.47264490306265168283E-2f, + -5.29459812080949914269E-2f, + 1.02643658689847095384E-1f, + -1.76416518357834055153E-1f, + 2.52587186443633654823E-1f}; + return std::make_tuple(coeff, 17); +}; + +template +static inline typename std::enable_if::value, std::tuple>::type +chebyshev_coefficients_i1e_B() { + /* Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + * in the inverted interval [8,infinity]. + * + * lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + */ + static const T coeff[] = { + 7.51729631084210481353E-18, 4.41434832307170791151E-18, + -4.65030536848935832153E-17, -3.20952592199342395980E-17, + 2.96262899764595013876E-16, 3.30820231092092828324E-16, + -1.88035477551078244854E-15, -3.81440307243700780478E-15, + 1.04202769841288027642E-14, 4.27244001671195135429E-14, + -2.10154184277266431302E-14, -4.08355111109219731823E-13, + -7.19855177624590851209E-13, 2.03562854414708950722E-12, + 1.41258074366137813316E-11, 3.25260358301548823856E-11, + -1.89749581235054123450E-11, -5.58974346219658380687E-10, + -3.83538038596423702205E-9, -2.63146884688951950684E-8, + -2.51223623787020892529E-7, -3.88256480887769039346E-6, + -1.10588938762623716291E-4, -9.76109749136146840777E-3, + 7.78576235018280120474E-1}; + + return std::make_tuple(coeff, 25); +}; + +template +static inline typename std::enable_if::value, std::tuple>::type +chebyshev_coefficients_i1e_B() { + /* Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + * in the inverted interval [8,infinity]. + * + * lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + */ + static const T coeff[] = { + -3.83538038596423702205E-9f, + -2.63146884688951950684E-8f, + -2.51223623787020892529E-7f, + -3.88256480887769039346E-6f, + -1.10588938762623716291E-4f, + -9.76109749136146840777E-3f, + 7.78576235018280120474E-1f}; + + return std::make_tuple(coeff, 7); +}; + +template +static inline typename std::enable_if::value, T>::type +calc_i0(T _x) { + T x = std::abs(_x); + + if (x <= T{8.0}) { + auto coeff_pair = chebyshev_coefficients_i0e_A(); + auto A = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + T y = (x / T{2.0}) - T{2.0}; + return static_cast(std::exp(x) * chbevl(y, A, len)); + } + auto coeff_pair = chebyshev_coefficients_i0e_B(); + auto B = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + return std::exp(x) * chbevl(T{32.0} / x - T{2.0}, B, len) / std::sqrt(x); +} + +// Upcast bfloat16 input to float for numerical accuracy purposes +static inline c10::BFloat16 calc_i0(c10::BFloat16 a) { return calc_i0(static_cast(a)); } + +/* + * This function is derived from the implementation of the i1 function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Computes an approximation of the first order modified Bessel function of the first kind. + * The approximation is actually two (sub)approximations, both using a Chebyshev polynomial expansion. + * One approximates the function over [0, 8], and the other over (8, infinity). This function takes the absolute value + * of all inputs to convert them into the domain of the approximation. + */ +template +static inline typename std::enable_if::value, T>::type +calc_i1(T _x) { + T x = std::abs(_x); + + if (x <= T{8.0}) { + auto coeff_pair = chebyshev_coefficients_i1e_A(); + auto A = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + T y = (x / T{2.0}) - T{2.0}; + const T out = std::exp(x) * x * chbevl(y, A, len); + return (_x < T{0.0}) ? -out : out; + } + auto coeff_pair = chebyshev_coefficients_i1e_B(); + auto B = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + const T out = (std::exp(x) * chbevl(T{32.0} / x - T{2.0}, B, len)) / std::sqrt(x); + return (_x < T{0.0}) ? -out : out; +} + +/* + * This function is derived from the implementation of the i1e function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Computes an approximation of the exponentially scaled first order modified Bessel function of the first kind. + * The approximation is actually two (sub)approximations, both using a Chebyshev polynomial expansion. + * One approximates the function over [0, 8], and the other over (8, infinity). This function takes the absolute value + * of all inputs to convert them into the domain of the approximation. + */ +template +static inline typename std::enable_if::value, T>::type +calc_i1e(T _x) { + T x = std::abs(_x); + + if (x <= T{8.0}) { + auto coeff_pair = chebyshev_coefficients_i1e_A(); + auto A = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + T y = (x / T{2.0}) - T{2.0}; + const T out = chbevl(y, A, len) * x; + return (_x < T{0.0}) ? -out : out; + } + auto coeff_pair = chebyshev_coefficients_i1e_B(); + auto B = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + const auto out = chbevl(T{32.0} / x - T{2.0}, B, len) / std::sqrt(x); + return (_x < T{0.0}) ? -out : out; +} + +/* + * This function is derived from the implementation of the i1e function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Computes the argument, x, for which the area under the Gaussian probability density function + * (integrated from minus infinity to x) is equal to y. + */ +template +static inline C10_HOST_DEVICE T calc_ndtri(T y0) { + + /* sqrt(2pi) */ + constexpr T s2pi = 2.50662827463100050242E0; + constexpr T one = 1; + constexpr T zero = 0; + + /* approximation for 0 <= |y - 0.5| <= 3/8 */ + static const T P0[5] = { + -5.99633501014107895267E1, + 9.80010754185999661536E1, + -5.66762857469070293439E1, + 1.39312609387279679503E1, + -1.23916583867381258016E0, + }; + + static const T Q0[9] = { + 1.00000000000000000000E0, + 1.95448858338141759834E0, + 4.67627912898881538453E0, + 8.63602421390890590575E1, + -2.25462687854119370527E2, + 2.00260212380060660359E2, + -8.20372256168333339912E1, + 1.59056225126211695515E1, + -1.18331621121330003142E0, + }; + + /* Approximation for interval z = sqrt(-2 log y ) between 2 and 8 + * i.e., y between exp(-2) = .135 and exp(-32) = 1.27e-14. + */ + static const T P1[9] = { + 4.05544892305962419923E0, + 3.15251094599893866154E1, + 5.71628192246421288162E1, + 4.40805073893200834700E1, + 1.46849561928858024014E1, + 2.18663306850790267539E0, + -1.40256079171354495875E-1, + -3.50424626827848203418E-2, + -8.57456785154685413611E-4, + }; + + static const T Q1[9] = { + 1.00000000000000000000E0, + 1.57799883256466749731E1, + 4.53907635128879210584E1, + 4.13172038254672030440E1, + 1.50425385692907503408E1, + 2.50464946208309415979E0, + -1.42182922854787788574E-1, + -3.80806407691578277194E-2, + -9.33259480895457427372E-4, + }; + + /* Approximation for interval z = sqrt(-2 log y ) between 8 and 64 + * i.e., y between exp(-32) = 1.27e-14 and exp(-2048) = 3.67e-890. + */ + + static const T P2[9] = { + 3.23774891776946035970E0, + 6.91522889068984211695E0, + 3.93881025292474443415E0, + 1.33303460815807542389E0, + 2.01485389549179081538E-1, + 1.23716634817820021358E-2, + 3.01581553508235416007E-4, + 2.65806974686737550832E-6, + 6.23974539184983293730E-9, + }; + + static const T Q2[9] = { + 1.00000000000000000000E0, + 6.02427039364742014255E0, + 3.67983563856160859403E0, + 1.37702099489081330271E0, + 2.16236993594496635890E-1, + 1.34204006088543189037E-2, + 3.28014464682127739104E-4, + 2.89247864745380683936E-6, + 6.79019408009981274425E-9, + }; + + if (y0 == zero) { + return -std::numeric_limits::infinity(); + } + if (y0 == one) { + return std::numeric_limits::infinity(); + } + if (y0 < zero || y0 > one) { + return std::numeric_limits::quiet_NaN(); + } + bool code = true; + T y = y0; + if (y > one - T{0.13533528323661269189}) { /* 0.135... = exp(-2) */ + y = one - y; + code = false; + } + + if (y > T{0.13533528323661269189}) { + y = y - T{0.5}; + const T y2 = y * y; + T x = y + y * (y2 * polevl(y2, P0, 4) / polevl(y2, Q0, 8)); + return (x * s2pi); + } + + T x = ::sqrt(T{-2.0} * ::log(y)); + const T x0 = x - ::log(x) / x; + + const T z = one / x; + T x1; + if (x < T{8.0}) /* y > exp(-32) = 1.2664165549e-14 */ + { + x1 = z * polevl(z, P1, 8) / polevl(z, Q1, 8); + } else { + x1 = z * polevl(z, P2, 8) / polevl(z, Q2, 8); + } + x = x0 - x1; + if (code) { + x = -x; + } + return x; +} + +/* The next function is taken from http://ab-initio.mit.edu/Faddeev */ + +/* Copyright (c) 2012 Massachusetts Institute of Technology + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* erfcx(x) = exp(x^2) erfc(x) function, for real x, written by + Steven G. Johnson, October 2012. + + This function combines a few different ideas. + + First, for x > 50, it uses a continued-fraction expansion (same as + for the Faddeeva function, but with algebraic simplifications for z=i*x). + + Second, for 0 <= x <= 50, it uses Chebyshev polynomial approximations, + but with two twists: + + a) It maps x to y = 4 / (4+x) in [0,1]. This simple transformation, + inspired by a similar transformation in the octave-forge/specfun + erfcx by Soren Hauberg, results in much faster Chebyshev convergence + than other simple transformations I have examined. + + b) Instead of using a single Chebyshev polynomial for the entire + [0,1] y interval, we break the interval up into 100 equal + subintervals, with a switch/lookup table, and use much lower + degree Chebyshev polynomials in each subinterval. This greatly + improves performance in my tests. + + For x < 0, we use the relationship erfcx(-x) = 2 exp(x^2) - erfc(x), + with the usual checks for overflow etcetera. + + Performance-wise, it seems to be substantially faster than either + the SLATEC DERFC function [or an erfcx function derived therefrom] + or Cody's CALERF function (from netlib.org/specfun), while + retaining near machine precision in accuracy. */ + +/* Given y100=100*y, where y = 4/(4+x) for x >= 0, compute erfc(x). + + Uses a look-up table of 100 different Chebyshev polynomials + for y intervals [0,0.01], [0.01,0.02], ...., [0.99,1], generated + with the help of Maple and a little shell script. This allows + the Chebyshev polynomials to be of significantly lower degree (about 1/4) + compared to fitting the whole [0,1] interval with a single polynomial. */ + + +template +C10_HOST_DEVICE static inline typename std::enable_if::value, T>::type +erfcx_y100(T y100) +{ + switch (static_cast(y100)) { +case 0: { +T t = 2*y100 - 1; +return 0.70878032454106438663e-3 + (0.71234091047026302958e-3 + (0.35779077297597742384e-5 + (0.17403143962587937815e-7 + (0.81710660047307788845e-10 + (0.36885022360434957634e-12 + 0.15917038551111111111e-14 * t) * t) * t) * t) * t) * t; +} +case 1: { +T t = 2*y100 - 3; +return 0.21479143208285144230e-2 + (0.72686402367379996033e-3 + (0.36843175430938995552e-5 + (0.18071841272149201685e-7 + (0.85496449296040325555e-10 + (0.38852037518534291510e-12 + 0.16868473576888888889e-14 * t) * t) * t) * t) * t) * t; +} +case 2: { +T t = 2*y100 - 5; +return 0.36165255935630175090e-2 + (0.74182092323555510862e-3 + (0.37948319957528242260e-5 + (0.18771627021793087350e-7 + (0.89484715122415089123e-10 + (0.40935858517772440862e-12 + 0.17872061464888888889e-14 * t) * t) * t) * t) * t) * t; +} +case 3: { +T t = 2*y100 - 7; +return 0.51154983860031979264e-2 + (0.75722840734791660540e-3 + (0.39096425726735703941e-5 + (0.19504168704300468210e-7 + (0.93687503063178993915e-10 + (0.43143925959079664747e-12 + 0.18939926435555555556e-14 * t) * t) * t) * t) * t) * t; +} +case 4: { +T t = 2*y100 - 9; +return 0.66457513172673049824e-2 + (0.77310406054447454920e-3 + (0.40289510589399439385e-5 + (0.20271233238288381092e-7 + (0.98117631321709100264e-10 + (0.45484207406017752971e-12 + 0.20076352213333333333e-14 * t) * t) * t) * t) * t) * t; +} +case 5: { +T t = 2*y100 - 11; +return 0.82082389970241207883e-2 + (0.78946629611881710721e-3 + (0.41529701552622656574e-5 + (0.21074693344544655714e-7 + (0.10278874108587317989e-9 + (0.47965201390613339638e-12 + 0.21285907413333333333e-14 * t) * t) * t) * t) * t) * t; +} +case 6: { +T t = 2*y100 - 13; +return 0.98039537275352193165e-2 + (0.80633440108342840956e-3 + (0.42819241329736982942e-5 + (0.21916534346907168612e-7 + (0.10771535136565470914e-9 + (0.50595972623692822410e-12 + 0.22573462684444444444e-14 * t) * t) * t) * t) * t) * t; +} +case 7: { +T t = 2*y100 - 15; +return 0.11433927298290302370e-1 + (0.82372858383196561209e-3 + (0.44160495311765438816e-5 + (0.22798861426211986056e-7 + (0.11291291745879239736e-9 + (0.53386189365816880454e-12 + 0.23944209546666666667e-14 * t) * t) * t) * t) * t) * t; +} +case 8: { +T t = 2*y100 - 17; +return 0.13099232878814653979e-1 + (0.84167002467906968214e-3 + (0.45555958988457506002e-5 + (0.23723907357214175198e-7 + (0.11839789326602695603e-9 + (0.56346163067550237877e-12 + 0.25403679644444444444e-14 * t) * t) * t) * t) * t) * t; +} +case 9: { +T t = 2*y100 - 19; +return 0.14800987015587535621e-1 + (0.86018092946345943214e-3 + (0.47008265848816866105e-5 + (0.24694040760197315333e-7 + (0.12418779768752299093e-9 + (0.59486890370320261949e-12 + 0.26957764568888888889e-14 * t) * t) * t) * t) * t) * t; +} +case 10: { +T t = 2*y100 - 21; +return 0.16540351739394069380e-1 + (0.87928458641241463952e-3 + (0.48520195793001753903e-5 + (0.25711774900881709176e-7 + (0.13030128534230822419e-9 + (0.62820097586874779402e-12 + 0.28612737351111111111e-14 * t) * t) * t) * t) * t) * t; +} +case 11: { +T t = 2*y100 - 23; +return 0.18318536789842392647e-1 + (0.89900542647891721692e-3 + (0.50094684089553365810e-5 + (0.26779777074218070482e-7 + (0.13675822186304615566e-9 + (0.66358287745352705725e-12 + 0.30375273884444444444e-14 * t) * t) * t) * t) * t) * t; +} +case 12: { +T t = 2*y100 - 25; +return 0.20136801964214276775e-1 + (0.91936908737673676012e-3 + (0.51734830914104276820e-5 + (0.27900878609710432673e-7 + (0.14357976402809042257e-9 + (0.70114790311043728387e-12 + 0.32252476000000000000e-14 * t) * t) * t) * t) * t) * t; +} +case 13: { +T t = 2*y100 - 27; +return 0.21996459598282740954e-1 + (0.94040248155366777784e-3 + (0.53443911508041164739e-5 + (0.29078085538049374673e-7 + (0.15078844500329731137e-9 + (0.74103813647499204269e-12 + 0.34251892320000000000e-14 * t) * t) * t) * t) * t) * t; +} +case 14: { +T t = 2*y100 - 29; +return 0.23898877187226319502e-1 + (0.96213386835900177540e-3 + (0.55225386998049012752e-5 + (0.30314589961047687059e-7 + (0.15840826497296335264e-9 + (0.78340500472414454395e-12 + 0.36381553564444444445e-14 * t) * t) * t) * t) * t) * t; +} +case 15: { +T t = 2*y100 - 31; +return 0.25845480155298518485e-1 + (0.98459293067820123389e-3 + (0.57082915920051843672e-5 + (0.31613782169164830118e-7 + (0.16646478745529630813e-9 + (0.82840985928785407942e-12 + 0.38649975768888888890e-14 * t) * t) * t) * t) * t) * t; +} +case 16: { +T t = 2*y100 - 33; +return 0.27837754783474696598e-1 + (0.10078108563256892757e-2 + (0.59020366493792212221e-5 + (0.32979263553246520417e-7 + (0.17498524159268458073e-9 + (0.87622459124842525110e-12 + 0.41066206488888888890e-14 * t) * t) * t) * t) * t) * t; +} +case 17: { +T t = 2*y100 - 35; +return 0.29877251304899307550e-1 + (0.10318204245057349310e-2 + (0.61041829697162055093e-5 + (0.34414860359542720579e-7 + (0.18399863072934089607e-9 + (0.92703227366365046533e-12 + 0.43639844053333333334e-14 * t) * t) * t) * t) * t) * t; +} +case 18: { +T t = 2*y100 - 37; +return 0.31965587178596443475e-1 + (0.10566560976716574401e-2 + (0.63151633192414586770e-5 + (0.35924638339521924242e-7 + (0.19353584758781174038e-9 + (0.98102783859889264382e-12 + 0.46381060817777777779e-14 * t) * t) * t) * t) * t) * t; +} +case 19: { +T t = 2*y100 - 39; +return 0.34104450552588334840e-1 + (0.10823541191350532574e-2 + (0.65354356159553934436e-5 + (0.37512918348533521149e-7 + (0.20362979635817883229e-9 + (0.10384187833037282363e-11 + 0.49300625262222222221e-14 * t) * t) * t) * t) * t) * t; +} +case 20: { +T t = 2*y100 - 41; +return 0.36295603928292425716e-1 + (0.11089526167995268200e-2 + (0.67654845095518363577e-5 + (0.39184292949913591646e-7 + (0.21431552202133775150e-9 + (0.10994259106646731797e-11 + 0.52409949102222222221e-14 * t) * t) * t) * t) * t) * t; +} +case 21: { +T t = 2*y100 - 43; +return 0.38540888038840509795e-1 + (0.11364917134175420009e-2 + (0.70058230641246312003e-5 + (0.40943644083718586939e-7 + (0.22563034723692881631e-9 + (0.11642841011361992885e-11 + 0.55721092871111111110e-14 * t) * t) * t) * t) * t) * t; +} +case 22: { +T t = 2*y100 - 45; +return 0.40842225954785960651e-1 + (0.11650136437945673891e-2 + (0.72569945502343006619e-5 + (0.42796161861855042273e-7 + (0.23761401711005024162e-9 + (0.12332431172381557035e-11 + 0.59246802364444444445e-14 * t) * t) * t) * t) * t) * t; +} +case 23: { +T t = 2*y100 - 47; +return 0.43201627431540222422e-1 + (0.11945628793917272199e-2 + (0.75195743532849206263e-5 + (0.44747364553960993492e-7 + (0.25030885216472953674e-9 + (0.13065684400300476484e-11 + 0.63000532853333333334e-14 * t) * t) * t) * t) * t) * t; +} +case 24: { +T t = 2*y100 - 49; +return 0.45621193513810471438e-1 + (0.12251862608067529503e-2 + (0.77941720055551920319e-5 + (0.46803119830954460212e-7 + (0.26375990983978426273e-9 + (0.13845421370977119765e-11 + 0.66996477404444444445e-14 * t) * t) * t) * t) * t) * t; +} +case 25: { +T t = 2*y100 - 51; +return 0.48103121413299865517e-1 + (0.12569331386432195113e-2 + (0.80814333496367673980e-5 + (0.48969667335682018324e-7 + (0.27801515481905748484e-9 + (0.14674637611609884208e-11 + 0.71249589351111111110e-14 * t) * t) * t) * t) * t) * t; +} +case 26: { +T t = 2*y100 - 53; +return 0.50649709676983338501e-1 + (0.12898555233099055810e-2 + (0.83820428414568799654e-5 + (0.51253642652551838659e-7 + (0.29312563849675507232e-9 + (0.15556512782814827846e-11 + 0.75775607822222222221e-14 * t) * t) * t) * t) * t) * t; +} +case 27: { +T t = 2*y100 - 55; +return 0.53263363664388864181e-1 + (0.13240082443256975769e-2 + (0.86967260015007658418e-5 + (0.53662102750396795566e-7 + (0.30914568786634796807e-9 + (0.16494420240828493176e-11 + 0.80591079644444444445e-14 * t) * t) * t) * t) * t) * t; +} +case 28: { +T t = 2*y100 - 57; +return 0.55946601353500013794e-1 + (0.13594491197408190706e-2 + (0.90262520233016380987e-5 + (0.56202552975056695376e-7 + (0.32613310410503135996e-9 + (0.17491936862246367398e-11 + 0.85713381688888888890e-14 * t) * t) * t) * t) * t) * t; +} +case 29: { +T t = 2*y100 - 59; +return 0.58702059496154081813e-1 + (0.13962391363223647892e-2 + (0.93714365487312784270e-5 + (0.58882975670265286526e-7 + (0.34414937110591753387e-9 + (0.18552853109751857859e-11 + 0.91160736711111111110e-14 * t) * t) * t) * t) * t) * t; +} +case 30: { +T t = 2*y100 - 61; +return 0.61532500145144778048e-1 + (0.14344426411912015247e-2 + (0.97331446201016809696e-5 + (0.61711860507347175097e-7 + (0.36325987418295300221e-9 + (0.19681183310134518232e-11 + 0.96952238400000000000e-14 * t) * t) * t) * t) * t) * t; +} +case 31: { +T t = 2*y100 - 63; +return 0.64440817576653297993e-1 + (0.14741275456383131151e-2 + (0.10112293819576437838e-4 + (0.64698236605933246196e-7 + (0.38353412915303665586e-9 + (0.20881176114385120186e-11 + 0.10310784480000000000e-13 * t) * t) * t) * t) * t) * t; +} +case 32: { +T t = 2*y100 - 65; +return 0.67430045633130393282e-1 + (0.15153655418916540370e-2 + (0.10509857606888328667e-4 + (0.67851706529363332855e-7 + (0.40504602194811140006e-9 + (0.22157325110542534469e-11 + 0.10964842115555555556e-13 * t) * t) * t) * t) * t) * t; +} +case 33: { +T t = 2*y100 - 67; +return 0.70503365513338850709e-1 + (0.15582323336495709827e-2 + (0.10926868866865231089e-4 + (0.71182482239613507542e-7 + (0.42787405890153386710e-9 + (0.23514379522274416437e-11 + 0.11659571751111111111e-13 * t) * t) * t) * t) * t) * t; +} +case 34: { +T t = 2*y100 - 69; +return 0.73664114037944596353e-1 + (0.16028078812438820413e-2 + (0.11364423678778207991e-4 + (0.74701423097423182009e-7 + (0.45210162777476488324e-9 + (0.24957355004088569134e-11 + 0.12397238257777777778e-13 * t) * t) * t) * t) * t) * t; +} +case 35: { +T t = 2*y100 - 71; +return 0.76915792420819562379e-1 + (0.16491766623447889354e-2 + (0.11823685320041302169e-4 + (0.78420075993781544386e-7 + (0.47781726956916478925e-9 + (0.26491544403815724749e-11 + 0.13180196462222222222e-13 * t) * t) * t) * t) * t) * t; +} +case 36: { +T t = 2*y100 - 73; +return 0.80262075578094612819e-1 + (0.16974279491709504117e-2 + (0.12305888517309891674e-4 + (0.82350717698979042290e-7 + (0.50511496109857113929e-9 + (0.28122528497626897696e-11 + 0.14010889635555555556e-13 * t) * t) * t) * t) * t) * t; +} +case 37: { +T t = 2*y100 - 75; +return 0.83706822008980357446e-1 + (0.17476561032212656962e-2 + (0.12812343958540763368e-4 + (0.86506399515036435592e-7 + (0.53409440823869467453e-9 + (0.29856186620887555043e-11 + 0.14891851591111111111e-13 * t) * t) * t) * t) * t) * t; +} +case 38: { +T t = 2*y100 - 77; +return 0.87254084284461718231e-1 + (0.17999608886001962327e-2 + (0.13344443080089492218e-4 + (0.90900994316429008631e-7 + (0.56486134972616465316e-9 + (0.31698707080033956934e-11 + 0.15825697795555555556e-13 * t) * t) * t) * t) * t) * t; +} +case 39: { +T t = 2*y100 - 79; +return 0.90908120182172748487e-1 + (0.18544478050657699758e-2 + (0.13903663143426120077e-4 + (0.95549246062549906177e-7 + (0.59752787125242054315e-9 + (0.33656597366099099413e-11 + 0.16815130613333333333e-13 * t) * t) * t) * t) * t) * t; +} +case 40: { +T t = 2*y100 - 81; +return 0.94673404508075481121e-1 + (0.19112284419887303347e-2 + (0.14491572616545004930e-4 + (0.10046682186333613697e-6 + (0.63221272959791000515e-9 + (0.35736693975589130818e-11 + 0.17862931591111111111e-13 * t) * t) * t) * t) * t) * t; +} +case 41: { +T t = 2*y100 - 83; +return 0.98554641648004456555e-1 + (0.19704208544725622126e-2 + (0.15109836875625443935e-4 + (0.10567036667675984067e-6 + (0.66904168640019354565e-9 + (0.37946171850824333014e-11 + 0.18971959040000000000e-13 * t) * t) * t) * t) * t) * t; +} +case 42: { +T t = 2*y100 - 85; +return 0.10255677889470089531e0 + (0.20321499629472857418e-2 + (0.15760224242962179564e-4 + (0.11117756071353507391e-6 + (0.70814785110097658502e-9 + (0.40292553276632563925e-11 + 0.20145143075555555556e-13 * t) * t) * t) * t) * t) * t; +} +case 43: { +T t = 2*y100 - 87; +return 0.10668502059865093318e0 + (0.20965479776148731610e-2 + (0.16444612377624983565e-4 + (0.11700717962026152749e-6 + (0.74967203250938418991e-9 + (0.42783716186085922176e-11 + 0.21385479360000000000e-13 * t) * t) * t) * t) * t) * t; +} +case 44: { +T t = 2*y100 - 89; +return 0.11094484319386444474e0 + (0.21637548491908170841e-2 + (0.17164995035719657111e-4 + (0.12317915750735938089e-6 + (0.79376309831499633734e-9 + (0.45427901763106353914e-11 + 0.22696025653333333333e-13 * t) * t) * t) * t) * t) * t; +} +case 45: { +T t = 2*y100 - 91; +return 0.11534201115268804714e0 + (0.22339187474546420375e-2 + (0.17923489217504226813e-4 + (0.12971465288245997681e-6 + (0.84057834180389073587e-9 + (0.48233721206418027227e-11 + 0.24079890062222222222e-13 * t) * t) * t) * t) * t) * t; +} +case 46: { +T t = 2*y100 - 93; +return 0.11988259392684094740e0 + (0.23071965691918689601e-2 + (0.18722342718958935446e-4 + (0.13663611754337957520e-6 + (0.89028385488493287005e-9 + (0.51210161569225846701e-11 + 0.25540227111111111111e-13 * t) * t) * t) * t) * t) * t; +} +case 47: { +T t = 2*y100 - 95; +return 0.12457298393509812907e0 + (0.23837544771809575380e-2 + (0.19563942105711612475e-4 + (0.14396736847739470782e-6 + (0.94305490646459247016e-9 + (0.54366590583134218096e-11 + 0.27080225920000000000e-13 * t) * t) * t) * t) * t) * t; +} +case 48: { +T t = 2*y100 - 97; +return 0.12941991566142438816e0 + (0.24637684719508859484e-2 + (0.20450821127475879816e-4 + (0.15173366280523906622e-6 + (0.99907632506389027739e-9 + (0.57712760311351625221e-11 + 0.28703099555555555556e-13 * t) * t) * t) * t) * t) * t; +} +case 49: { +T t = 2*y100 - 99; +return 0.13443048593088696613e0 + (0.25474249981080823877e-2 + (0.21385669591362915223e-4 + (0.15996177579900443030e-6 + (0.10585428844575134013e-8 + (0.61258809536787882989e-11 + 0.30412080142222222222e-13 * t) * t) * t) * t) * t) * t; +} +case 50: { +T t = 2*y100 - 101; +return 0.13961217543434561353e0 + (0.26349215871051761416e-2 + (0.22371342712572567744e-4 + (0.16868008199296822247e-6 + (0.11216596910444996246e-8 + (0.65015264753090890662e-11 + 0.32210394506666666666e-13 * t) * t) * t) * t) * t) * t; +} +case 51: { +T t = 2*y100 - 103; +return 0.14497287157673800690e0 + (0.27264675383982439814e-2 + (0.23410870961050950197e-4 + (0.17791863939526376477e-6 + (0.11886425714330958106e-8 + (0.68993039665054288034e-11 + 0.34101266222222222221e-13 * t) * t) * t) * t) * t) * t; +} +case 52: { +T t = 2*y100 - 105; +return 0.15052089272774618151e0 + (0.28222846410136238008e-2 + (0.24507470422713397006e-4 + (0.18770927679626136909e-6 + (0.12597184587583370712e-8 + (0.73203433049229821618e-11 + 0.36087889048888888890e-13 * t) * t) * t) * t) * t) * t; +} +case 53: { +T t = 2*y100 - 107; +return 0.15626501395774612325e0 + (0.29226079376196624949e-2 + (0.25664553693768450545e-4 + (0.19808568415654461964e-6 + (0.13351257759815557897e-8 + (0.77658124891046760667e-11 + 0.38173420035555555555e-13 * t) * t) * t) * t) * t) * t; +} +case 54: { +T t = 2*y100 - 109; +return 0.16221449434620737567e0 + (0.30276865332726475672e-2 + (0.26885741326534564336e-4 + (0.20908350604346384143e-6 + (0.14151148144240728728e-8 + (0.82369170665974313027e-11 + 0.40360957457777777779e-13 * t) * t) * t) * t) * t) * t; +} +case 55: { +T t = 2*y100 - 111; +return 0.16837910595412130659e0 + (0.31377844510793082301e-2 + (0.28174873844911175026e-4 + (0.22074043807045782387e-6 + (0.14999481055996090039e-8 + (0.87348993661930809254e-11 + 0.42653528977777777779e-13 * t) * t) * t) * t) * t) * t; +} +case 56: { +T t = 2*y100 - 113; +return 0.17476916455659369953e0 + (0.32531815370903068316e-2 + (0.29536024347344364074e-4 + (0.23309632627767074202e-6 + (0.15899007843582444846e-8 + (0.92610375235427359475e-11 + 0.45054073102222222221e-13 * t) * t) * t) * t) * t) * t; +} +case 57: { +T t = 2*y100 - 115; +return 0.18139556223643701364e0 + (0.33741744168096996041e-2 + (0.30973511714709500836e-4 + (0.24619326937592290996e-6 + (0.16852609412267750744e-8 + (0.98166442942854895573e-11 + 0.47565418097777777779e-13 * t) * t) * t) * t) * t) * t; +} +case 58: { +T t = 2*y100 - 117; +return 0.18826980194443664549e0 + (0.35010775057740317997e-2 + (0.32491914440014267480e-4 + (0.26007572375886319028e-6 + (0.17863299617388376116e-8 + (0.10403065638343878679e-10 + 0.50190265831111111110e-13 * t) * t) * t) * t) * t) * t; +} +case 59: { +T t = 2*y100 - 119; +return 0.19540403413693967350e0 + (0.36342240767211326315e-2 + (0.34096085096200907289e-4 + (0.27479061117017637474e-6 + (0.18934228504790032826e-8 + (0.11021679075323598664e-10 + 0.52931171733333333334e-13 * t) * t) * t) * t) * t) * t; +} +case 60: { +T t = 2*y100 - 121; +return 0.20281109560651886959e0 + (0.37739673859323597060e-2 + (0.35791165457592409054e-4 + (0.29038742889416172404e-6 + (0.20068685374849001770e-8 + (0.11673891799578381999e-10 + 0.55790523093333333334e-13 * t) * t) * t) * t) * t) * t; +} +case 61: { +T t = 2*y100 - 123; +return 0.21050455062669334978e0 + (0.39206818613925652425e-2 + (0.37582602289680101704e-4 + (0.30691836231886877385e-6 + (0.21270101645763677824e-8 + (0.12361138551062899455e-10 + 0.58770520160000000000e-13 * t) * t) * t) * t) * t) * t; +} +case 62: { +T t = 2*y100 - 125; +return 0.21849873453703332479e0 + (0.40747643554689586041e-2 + (0.39476163820986711501e-4 + (0.32443839970139918836e-6 + (0.22542053491518680200e-8 + (0.13084879235290858490e-10 + 0.61873153262222222221e-13 * t) * t) * t) * t) * t) * t; +} +case 63: { +T t = 2*y100 - 127; +return 0.22680879990043229327e0 + (0.42366354648628516935e-2 + (0.41477956909656896779e-4 + (0.34300544894502810002e-6 + (0.23888264229264067658e-8 + (0.13846596292818514601e-10 + 0.65100183751111111110e-13 * t) * t) * t) * t) * t) * t; +} +case 64: { +T t = 2*y100 - 129; +return 0.23545076536988703937e0 + (0.44067409206365170888e-2 + (0.43594444916224700881e-4 + (0.36268045617760415178e-6 + (0.25312606430853202748e-8 + (0.14647791812837903061e-10 + 0.68453122631111111110e-13 * t) * t) * t) * t) * t) * t; +} +case 65: { +T t = 2*y100 - 131; +return 0.24444156740777432838e0 + (0.45855530511605787178e-2 + (0.45832466292683085475e-4 + (0.38352752590033030472e-6 + (0.26819103733055603460e-8 + (0.15489984390884756993e-10 + 0.71933206364444444445e-13 * t) * t) * t) * t) * t) * t; +} +case 66: { +T t = 2*y100 - 133; +return 0.25379911500634264643e0 + (0.47735723208650032167e-2 + (0.48199253896534185372e-4 + (0.40561404245564732314e-6 + (0.28411932320871165585e-8 + (0.16374705736458320149e-10 + 0.75541379822222222221e-13 * t) * t) * t) * t) * t) * t; +} +case 67: { +T t = 2*y100 - 135; +return 0.26354234756393613032e0 + (0.49713289477083781266e-2 + (0.50702455036930367504e-4 + (0.42901079254268185722e-6 + (0.30095422058900481753e-8 + (0.17303497025347342498e-10 + 0.79278273368888888890e-13 * t) * t) * t) * t) * t) * t; +} +case 68: { +T t = 2*y100 - 137; +return 0.27369129607732343398e0 + (0.51793846023052643767e-2 + (0.53350152258326602629e-4 + (0.45379208848865015485e-6 + (0.31874057245814381257e-8 + (0.18277905010245111046e-10 + 0.83144182364444444445e-13 * t) * t) * t) * t) * t) * t; +} +case 69: { +T t = 2*y100 - 139; +return 0.28426714781640316172e0 + (0.53983341916695141966e-2 + (0.56150884865255810638e-4 + (0.48003589196494734238e-6 + (0.33752476967570796349e-8 + (0.19299477888083469086e-10 + 0.87139049137777777779e-13 * t) * t) * t) * t) * t) * t; +} +case 70: { +T t = 2*y100 - 141; +return 0.29529231465348519920e0 + (0.56288077305420795663e-2 + (0.59113671189913307427e-4 + (0.50782393781744840482e-6 + (0.35735475025851713168e-8 + (0.20369760937017070382e-10 + 0.91262442613333333334e-13 * t) * t) * t) * t) * t) * t; +} +case 71: { +T t = 2*y100 - 143; +return 0.30679050522528838613e0 + (0.58714723032745403331e-2 + (0.62248031602197686791e-4 + (0.53724185766200945789e-6 + (0.37827999418960232678e-8 + (0.21490291930444538307e-10 + 0.95513539182222222221e-13 * t) * t) * t) * t) * t) * t; +} +case 72: { +T t = 2*y100 - 145; +return 0.31878680111173319425e0 + (0.61270341192339103514e-2 + (0.65564012259707640976e-4 + (0.56837930287837738996e-6 + (0.40035151353392378882e-8 + (0.22662596341239294792e-10 + 0.99891109760000000000e-13 * t) * t) * t) * t) * t) * t; +} +case 73: { +T t = 2*y100 - 147; +return 0.33130773722152622027e0 + (0.63962406646798080903e-2 + (0.69072209592942396666e-4 + (0.60133006661885941812e-6 + (0.42362183765883466691e-8 + (0.23888182347073698382e-10 + 0.10439349811555555556e-12 * t) * t) * t) * t) * t) * t; +} +case 74: { +T t = 2*y100 - 149; +return 0.34438138658041336523e0 + (0.66798829540414007258e-2 + (0.72783795518603561144e-4 + (0.63619220443228800680e-6 + (0.44814499336514453364e-8 + (0.25168535651285475274e-10 + 0.10901861383111111111e-12 * t) * t) * t) * t) * t) * t; +} +case 75: { +T t = 2*y100 - 151; +return 0.35803744972380175583e0 + (0.69787978834882685031e-2 + (0.76710543371454822497e-4 + (0.67306815308917386747e-6 + (0.47397647975845228205e-8 + (0.26505114141143050509e-10 + 0.11376390933333333333e-12 * t) * t) * t) * t) * t) * t; +} +case 76: { +T t = 2*y100 - 153; +return 0.37230734890119724188e0 + (0.72938706896461381003e-2 + (0.80864854542670714092e-4 + (0.71206484718062688779e-6 + (0.50117323769745883805e-8 + (0.27899342394100074165e-10 + 0.11862637614222222222e-12 * t) * t) * t) * t) * t) * t; +} +case 77: { +T t = 2*y100 - 155; +return 0.38722432730555448223e0 + (0.76260375162549802745e-2 + (0.85259785810004603848e-4 + (0.75329383305171327677e-6 + (0.52979361368388119355e-8 + (0.29352606054164086709e-10 + 0.12360253370666666667e-12 * t) * t) * t) * t) * t) * t; +} +case 78: { +T t = 2*y100 - 157; +return 0.40282355354616940667e0 + (0.79762880915029728079e-2 + (0.89909077342438246452e-4 + (0.79687137961956194579e-6 + (0.55989731807360403195e-8 + (0.30866246101464869050e-10 + 0.12868841946666666667e-12 * t) * t) * t) * t) * t) * t; +} +case 79: { +T t = 2*y100 - 159; +return 0.41914223158913787649e0 + (0.83456685186950463538e-2 + (0.94827181359250161335e-4 + (0.84291858561783141014e-6 + (0.59154537751083485684e-8 + (0.32441553034347469291e-10 + 0.13387957943111111111e-12 * t) * t) * t) * t) * t) * t; +} +case 80: { +T t = 2*y100 - 161; +return 0.43621971639463786896e0 + (0.87352841828289495773e-2 + (0.10002929142066799966e-3 + (0.89156148280219880024e-6 + (0.62480008150788597147e-8 + (0.34079760983458878910e-10 + 0.13917107176888888889e-12 * t) * t) * t) * t) * t) * t; +} +case 81: { +T t = 2*y100 - 163; +return 0.45409763548534330981e0 + (0.91463027755548240654e-2 + (0.10553137232446167258e-3 + (0.94293113464638623798e-6 + (0.65972492312219959885e-8 + (0.35782041795476563662e-10 + 0.14455745872000000000e-12 * t) * t) * t) * t) * t) * t; +} +case 82: { +T t = 2*y100 - 165; +return 0.47282001668512331468e0 + (0.95799574408860463394e-2 + (0.11135019058000067469e-3 + (0.99716373005509038080e-6 + (0.69638453369956970347e-8 + (0.37549499088161345850e-10 + 0.15003280712888888889e-12 * t) * t) * t) * t) * t) * t; +} +case 83: { +T t = 2*y100 - 167; +return 0.49243342227179841649e0 + (0.10037550043909497071e-1 + (0.11750334542845234952e-3 + (0.10544006716188967172e-5 + (0.73484461168242224872e-8 + (0.39383162326435752965e-10 + 0.15559069118222222222e-12 * t) * t) * t) * t) * t) * t; +} +case 84: { +T t = 2*y100 - 169; +return 0.51298708979209258326e0 + (0.10520454564612427224e-1 + (0.12400930037494996655e-3 + (0.11147886579371265246e-5 + (0.77517184550568711454e-8 + (0.41283980931872622611e-10 + 0.16122419680000000000e-12 * t) * t) * t) * t) * t) * t; +} +case 85: { +T t = 2*y100 - 171; +return 0.53453307979101369843e0 + (0.11030120618800726938e-1 + (0.13088741519572269581e-3 + (0.11784797595374515432e-5 + (0.81743383063044825400e-8 + (0.43252818449517081051e-10 + 0.16692592640000000000e-12 * t) * t) * t) * t) * t) * t; +} +case 86: { +T t = 2*y100 - 173; +return 0.55712643071169299478e0 + (0.11568077107929735233e-1 + (0.13815797838036651289e-3 + (0.12456314879260904558e-5 + (0.86169898078969313597e-8 + (0.45290446811539652525e-10 + 0.17268801084444444444e-12 * t) * t) * t) * t) * t) * t; +} +case 87: { +T t = 2*y100 - 175; +return 0.58082532122519320968e0 + (0.12135935999503877077e-1 + (0.14584223996665838559e-3 + (0.13164068573095710742e-5 + (0.90803643355106020163e-8 + (0.47397540713124619155e-10 + 0.17850211608888888889e-12 * t) * t) * t) * t) * t) * t; +} +case 88: { +T t = 2*y100 - 177; +return 0.60569124025293375554e0 + (0.12735396239525550361e-1 + (0.15396244472258863344e-3 + (0.13909744385382818253e-5 + (0.95651595032306228245e-8 + (0.49574672127669041550e-10 + 0.18435945564444444444e-12 * t) * t) * t) * t) * t) * t; +} +case 89: { +T t = 2*y100 - 179; +return 0.63178916494715716894e0 + (0.13368247798287030927e-1 + (0.16254186562762076141e-3 + (0.14695084048334056083e-5 + (0.10072078109604152350e-7 + (0.51822304995680707483e-10 + 0.19025081422222222222e-12 * t) * t) * t) * t) * t) * t; +} +case 90: { +T t = 2*y100 - 181; +return 0.65918774689725319200e0 + (0.14036375850601992063e-1 + (0.17160483760259706354e-3 + (0.15521885688723188371e-5 + (0.10601827031535280590e-7 + (0.54140790105837520499e-10 + 0.19616655146666666667e-12 * t) * t) * t) * t) * t) * t; +} +case 91: { +T t = 2*y100 - 183; +return 0.68795950683174433822e0 + (0.14741765091365869084e-1 + (0.18117679143520433835e-3 + (0.16392004108230585213e-5 + (0.11155116068018043001e-7 + (0.56530360194925690374e-10 + 0.20209663662222222222e-12 * t) * t) * t) * t) * t) * t; +} +case 92: { +T t = 2*y100 - 185; +return 0.71818103808729967036e0 + (0.15486504187117112279e-1 + (0.19128428784550923217e-3 + (0.17307350969359975848e-5 + (0.11732656736113607751e-7 + (0.58991125287563833603e-10 + 0.20803065333333333333e-12 * t) * t) * t) * t) * t) * t; +} +case 93: { +T t = 2*y100 - 187; +return 0.74993321911726254661e0 + (0.16272790364044783382e-1 + (0.20195505163377912645e-3 + (0.18269894883203346953e-5 + (0.12335161021630225535e-7 + (0.61523068312169087227e-10 + 0.21395783431111111111e-12 * t) * t) * t) * t) * t) * t; +} +case 94: { +T t = 2*y100 - 189; +return 0.78330143531283492729e0 + (0.17102934132652429240e-1 + (0.21321800585063327041e-3 + (0.19281661395543913713e-5 + (0.12963340087354341574e-7 + (0.64126040998066348872e-10 + 0.21986708942222222222e-12 * t) * t) * t) * t) * t) * t; +} +case 95: { +T t = 2*y100 - 191; +return 0.81837581041023811832e0 + (0.17979364149044223802e-1 + (0.22510330592753129006e-3 + (0.20344732868018175389e-5 + (0.13617902941839949718e-7 + (0.66799760083972474642e-10 + 0.22574701262222222222e-12 * t) * t) * t) * t) * t) * t; +} +case 96: { +T t = 2*y100 - 193; +return 0.85525144775685126237e0 + (0.18904632212547561026e-1 + (0.23764237370371255638e-3 + (0.21461248251306387979e-5 + (0.14299555071870523786e-7 + (0.69543803864694171934e-10 + 0.23158593688888888889e-12 * t) * t) * t) * t) * t) * t; +} +case 97: { +T t = 2*y100 - 195; +return 0.89402868170849933734e0 + (0.19881418399127202569e-1 + (0.25086793128395995798e-3 + (0.22633402747585233180e-5 + (0.15008997042116532283e-7 + (0.72357609075043941261e-10 + 0.23737194737777777778e-12 * t) * t) * t) * t) * t) * t; +} +case 98: { +T t = 2*y100 - 197; +return 0.93481333942870796363e0 + (0.20912536329780368893e-1 + (0.26481403465998477969e-3 + (0.23863447359754921676e-5 + (0.15746923065472184451e-7 + (0.75240468141720143653e-10 + 0.24309291271111111111e-12 * t) * t) * t) * t) * t) * t; +} +case 99: { +T t = 2*y100 - 199; +return 0.97771701335885035464e0 + (0.22000938572830479551e-1 + (0.27951610702682383001e-3 + (0.25153688325245314530e-5 + (0.16514019547822821453e-7 + (0.78191526829368231251e-10 + 0.24873652355555555556e-12 * t) * t) * t) * t) * t) * t; +} + } + // we only get here if y = 1, i.e. |x| < 4*eps, in which case + // erfcx is within 1e-15 of 1.. + return 1.0; +} + +template +C10_HOST_DEVICE static inline typename std::enable_if::value, T>::type +calc_erfcx(T x) +{ + if (at::_isnan(x)) { + return x; + } + + if (x >= 0) { + if (x > 50) { // continued-fraction expansion is faster + const T ispi = 0.56418958354775628694807945156; // 1 / sqrt(pi) + if (x > 5e7) { // 1-term expansion, important to avoid overflow + return ispi / x; + } + /* 5-term expansion (rely on compiler for CSE), simplified from: + ispi / (x+0.5/(x+1/(x+1.5/(x+2/x)))) */ + return ispi*((x*x) * (x*x+4.5) + 2) / (x * ((x*x) * (x*x+5) + 3.75)); + } + return erfcx_y100(400/(4+x)); + } + else { + if (x < -26.7) { + return std::numeric_limits::infinity(); + } + else if (x < -6.1) { + return 2*exp(x*x); + } + else { + return 2*exp(x*x) - erfcx_y100(400/(4-x)); + } + } +} + +/* + * Logarithm of Gaussian cumulative distribution function. + + * This implementation of log_ndtr and its helper functions + * follow SciPy's implementation + * See NOTICE for the licenses. + */ +template +static inline C10_HOST_DEVICE T calc_log_ndtr(T x) { + T t = x * M_SQRT1_2; + if (x < T{-1.0}) { + return std::log(calc_erfcx(-t) / 2) - t * t; + } else { + return std::log1p(-std::erfc(t) / 2); + } +} + +template +static inline C10_HOST_DEVICE T airy_ai_forward(T x) { + static const T AN[] = { + +3.46538101525629032477e-01, + +1.20075952739645805542e+01, + +7.62796053615234516538e+01, + +1.68089224934630576269e+02, + +1.59756391350164413639e+02, + +7.05360906840444183113e+01, + +1.40264691163389668864e+01, + +9.99999999999999995305e-01, + }; + + static const T AD[] = { + +5.67594532638770212846e-01, + +1.47562562584847203173e+01, + +8.45138970141474626562e+01, + +1.77318088145400459522e+02, + +1.64234692871529701831e+02, + +7.14778400825575695274e+01, + +1.40959135607834029598e+01, + +1.00000000000000000470e+00, + }; + + static const T AFN[] = { + -1.31696323418331795333e-01, + -6.26456544431912369773e-01, + -6.93158036036933542233e-01, + -2.79779981545119124951e-01, + -4.91900132609500318020e-02, + -4.06265923594885404393e-03, + -1.59276496239262096340e-04, + -2.77649108155232920844e-06, + -1.67787698489114633780e-08, + }; + + static const T AFD[] = { + +1.33560420706553243746e+01, + +3.26825032795224613948e+01, + +2.67367040941499554804e+01, + +9.18707402907259625840e+00, + +1.47529146771666414581e+00, + +1.15687173795188044134e-01, + +4.40291641615211203805e-03, + +7.54720348287414296618e-05, + +4.51850092970580378464e-07, + }; + + static const T AGN[] = { + +1.97339932091685679179e-02, + +3.91103029615688277255e-01, + +1.06579897599595591108e+00, + +9.39169229816650230044e-01, + +3.51465656105547619242e-01, + +6.33888919628925490927e-02, + +5.85804113048388458567e-03, + +2.82851600836737019778e-04, + +6.98793669997260967291e-06, + +8.11789239554389293311e-08, + +3.41551784765923618484e-10, + }; + + static const T AGD[] = { + +9.30892908077441974853e+00, + +1.98352928718312140417e+01, + +1.55646628932864612953e+01, + +5.47686069422975497931e+00, + +9.54293611618961883998e-01, + +8.64580826352392193095e-02, + +4.12656523824222607191e-03, + +1.01259085116509135510e-04, + +1.17166733214413521882e-06, + +4.91834570062930015649e-09, + }; + + int domain_flag = 0; + + T ai; + + if (std::isinf(x)) { + return std::numeric_limits::quiet_NaN(); + } + + if (x > T(103.892)) { + return T(0.0); + } + + T f; + T g; + T k; + + if (x < T(-2.09)) { + T z = T(1.0) / (T(-2.0) * x * std::sqrt(-x) / T(3.0)); + + T afn = 0.0; + + for (uint8_t index = 0; index <= 8; index++) { + afn = afn * (z * z) + AFN[index]; + } + + T afd = 0.0; + + for (uint8_t index = 0; index <= 8; index++) { + afd = afd * (z * z) + AFD[index]; + } + + T agn = 0.0; + + for (uint8_t index = 0; index <= 10 + 0; index++) { + agn = agn * (z * z) + AGN[index]; + } + + T agd = 0.0; + + for (uint8_t index = 0; index <= 10 - 1; index++) { + agd = agd * (z * z) + AGD[index]; + } + + T t = T(-2.0) * x * std::sqrt(-x) / T(3.0) + T(0.25) * M_PI; + + return T(5.64189583547756286948e-01) / std::sqrt(std::sqrt(-x)) * (std::sin(t) * (T(1.0) + z * z * afn / afd) - std::cos(t) * (z * agn / agd)); + } + + if (x >= T(2.09)) { + domain_flag = 5; + + T zeta = T(2.0) * x * std::sqrt(x) / T(3.0); + + T an = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + an = an * (T(1.0) / zeta) + AN[index]; + } + + T ad = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + ad = ad * (T(1.0) / zeta) + AD[index]; + } + + ai = T(5.64189583547756286948e-01) * (an / ad) / (T(2.0) * std::sqrt(std::sqrt(x)) * std::exp(zeta)); + + if (x > T(8.3203353)) { + return ai; + } + } + + f = 1.0; + g = x; + k = 1.0; + + T m = 1.0; + T n = x; + T t = 1.0; + T z = x * x * x; + + while (t > std::numeric_limits::epsilon()) { + m *= z; + k += T(1.0); + m /= k; + n *= z; + k += T(1.0); + n /= k; + m /= k; + f += m; + k += T(1.0); + n /= k; + g += n; + + t = std::abs(m / f); + } + + if ((domain_flag & 1) == 0) { + return T(0.355028053887817239260) * f - T(0.258819403792806798405) * g; + } + + return ai; +} // T airy_ai(T x) + +template +static inline C10_HOST_DEVICE T bessel_j0_forward(T x) { + static const T PP[] = { + +7.96936729297347051624e-04, + +8.28352392107440799803e-02, + +1.23953371646414299388e+00, + +5.44725003058768775090e+00, + +8.74716500199817011941e+00, + +5.30324038235394892183e+00, + +9.99999999999999997821e-01, + }; + + static const T PQ[] = { + +9.24408810558863637013e-04, + +8.56288474354474431428e-02, + +1.25352743901058953537e+00, + +5.47097740330417105182e+00, + +8.76190883237069594232e+00, + +5.30605288235394617618e+00, + +1.00000000000000000218e+00, + }; + + static const T QP[] = { + -1.13663838898469149931e-02, + -1.28252718670509318512e+00, + -1.95539544257735972385e+01, + -9.32060152123768231369e+01, + -1.77681167980488050595e+02, + -1.47077505154951170175e+02, + -5.14105326766599330220e+01, + -6.05014350600728481186e+00, + }; + + static const T QQ[] = { + +6.43178256118178023184e+01, + +8.56430025976980587198e+02, + +3.88240183605401609683e+03, + +7.24046774195652478189e+03, + +5.93072701187316984827e+03, + +2.06209331660327847417e+03, + +2.42005740240291393179e+02, + }; + + static const T RP[] = { + -4.79443220978201773821e+09, + +1.95617491946556577543e+12, + -2.49248344360967716204e+14, + +9.70862251047306323952e+15, + }; + + static const T RQ[] = { + +4.99563147152651017219e+02, + +1.73785401676374683123e+05, + +4.84409658339962045305e+07, + +1.11855537045356834862e+10, + +2.11277520115489217587e+12, + +3.10518229857422583814e+14, + +3.18121955943204943306e+16, + +1.71086294081043136091e+18, + }; + + if (x < T(0)) { + x = -x; + } + + if (x <= T(5.0)) { + if (x < T(0.00001)) { + return T(1.0) - x * x / T(4.0); + } + + T rp = 0.0; + + for (uint8_t index = 0; index <= 3; index++) { + rp = rp * (x * x) + RP[index]; + } + + T rq = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + rq = rq * (x * x) + RQ[index]; + } + + return (x * x - T(5.78318596294678452118e+00)) * (x * x - T(3.04712623436620863991e+01)) * rp / rq; + } + + T pp = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pp = pp * (T(25.0) / (x * x)) + PP[index]; + } + + T pq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pq = pq * (T(25.0) / (x * x)) + PQ[index]; + } + + T qp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + qp = qp * (T(25.0) / (x * x)) + QP[index]; + } + + T qq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + qq = qq * (T(25.0) / (x * x)) + QQ[index]; + } + + return (pp / pq * std::cos(x - T(0.785398163397448309615660845819875721)) - T(5.0) / x * (qp / qq) * std::sin(x - T(0.785398163397448309615660845819875721))) * T(0.797884560802865355879892119868763737) / std::sqrt(x); +} // bessel_j0_forward(T x) + +template +static inline C10_HOST_DEVICE T bessel_j1_forward(T x) { + static const T PP[] = { + +7.62125616208173112003e-04, + +7.31397056940917570436e-02, + +1.12719608129684925192e+00, + +5.11207951146807644818e+00, + +8.42404590141772420927e+00, + +5.21451598682361504063e+00, + +1.00000000000000000254e+00, + }; + + static const T PQ[] = { + +5.71323128072548699714e-04, + +6.88455908754495404082e-02, + +1.10514232634061696926e+00, + +5.07386386128601488557e+00, + +8.39985554327604159757e+00, + +5.20982848682361821619e+00, + +9.99999999999999997461e-01, + }; + + static const T QP[] = { + +5.10862594750176621635e-02, + +4.98213872951233449420e+00, + +7.58238284132545283818e+01, + +3.66779609360150777800e+02, + +7.10856304998926107277e+02, + +5.97489612400613639965e+02, + +2.11688757100572135698e+02, + +2.52070205858023719784e+01, + }; + + static const T QQ[] = { + +7.42373277035675149943e+01, + +1.05644886038262816351e+03, + +4.98641058337653607651e+03, + +9.56231892404756170795e+03, + +7.99704160447350683650e+03, + +2.82619278517639096600e+03, + +3.36093607810698293419e+02, + }; + + static const T RP[] = { + -8.99971225705559398224e+08, + +4.52228297998194034323e+11, + -7.27494245221818276015e+13, + +3.68295732863852883286e+15, + }; + + static const T RQ[] = { + +6.20836478118054335476e+02, + +2.56987256757748830383e+05, + +8.35146791431949253037e+07, + +2.21511595479792499675e+10, + +4.74914122079991414898e+12, + +7.84369607876235854894e+14, + +8.95222336184627338078e+16, + +5.32278620332680085395e+18, + }; + + if (x < T(0.0)) { + return -bessel_j1_forward(-x); + } + + if (x <= T(5.0)) { + T rp = 0.0; + + for (uint8_t index = 0; index <= 3; index++) { + rp = rp * (x * x) + RP[index]; + } + + T rq = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + rq = rq * (x * x) + RQ[index]; + } + + return rp / rq * x * (x * x - T(1.46819706421238932572e+01)) * (x * x - T(4.92184563216946036703e+01)); + } + + T pp = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pp = pp * (T(5.0) / x * (T(5.0) / x)) + PP[index]; + } + + T pq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pq = pq * (T(5.0) / x * (T(5.0) / x)) + PQ[index]; + } + + T qp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + qp = qp * (T(5.0) / x * (T(5.0) / x)) + QP[index]; + } + + T qq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + qq = qq * (T(5.0) / x * (T(5.0) / x)) + QQ[index]; + } + + return (pp / pq * std::cos(x - T(2.356194490192344928846982537459627163)) - T(5.0) / x * (qp / qq) * std::sin(x - T(2.356194490192344928846982537459627163))) * T(0.797884560802865355879892119868763737) / std::sqrt(x); +} // bessel_j1_forward(T x) + +template +static inline C10_HOST_DEVICE T bessel_y0_forward(T x) { + static const T PP[] = { + +7.96936729297347051624e-04, + +8.28352392107440799803e-02, + +1.23953371646414299388e+00, + +5.44725003058768775090e+00, + +8.74716500199817011941e+00, + +5.30324038235394892183e+00, + +9.99999999999999997821e-01, + }; + + static const T PQ[] = { + +9.24408810558863637013e-04, + +8.56288474354474431428e-02, + +1.25352743901058953537e+00, + +5.47097740330417105182e+00, + +8.76190883237069594232e+00, + +5.30605288235394617618e+00, + +1.00000000000000000218e+00, + }; + + static const T QP[] = { + -1.13663838898469149931e-02, + -1.28252718670509318512e+00, + -1.95539544257735972385e+01, + -9.32060152123768231369e+01, + -1.77681167980488050595e+02, + -1.47077505154951170175e+02, + -5.14105326766599330220e+01, + -6.05014350600728481186e+00, + }; + + static const T QQ[] = { + +6.43178256118178023184e+01, + +8.56430025976980587198e+02, + +3.88240183605401609683e+03, + +7.24046774195652478189e+03, + +5.93072701187316984827e+03, + +2.06209331660327847417e+03, + +2.42005740240291393179e+02, + }; + + static const T YP[] = { + +1.55924367855235737965e+04, + -1.46639295903971606143e+07, + +5.43526477051876500413e+09, + -9.82136065717911466409e+11, + +8.75906394395366999549e+13, + -3.46628303384729719441e+15, + +4.42733268572569800351e+16, + -1.84950800436986690637e+16, + }; + + static const T YQ[] = { + +1.04128353664259848412e+03, + +6.26107330137134956842e+05, + +2.68919633393814121987e+08, + +8.64002487103935000337e+10, + +2.02979612750105546709e+13, + +3.17157752842975028269e+15, + +2.50596256172653059228e+17, + }; + + if (x <= T(5.0)) { + if (x == T(0.0)) { + return -std::numeric_limits::infinity(); + } + + if (x < T(0.0)) { + return std::numeric_limits::quiet_NaN(); + } + + T yp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + yp = yp * (x * x) + YP[index]; + } + + T yq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + yq = yq * (x * x) + YQ[index]; + } + + return yp / yq + (T(0.636619772367581343075535053490057448) * std::log(x) * bessel_j0_forward(x)); + } + + T pp = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pp = pp * (T(25.0) / (x * x)) + PP[index]; + } + + T pq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pq = pq * (T(25.0) / (x * x)) + PQ[index]; + } + + T qp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + qp = qp * (T(25.0) / (x * x)) + QP[index]; + } + + T qq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + qq = qq * (T(25.0) / (x * x)) + QQ[index]; + } + + return (pp / pq * std::sin(x - T(0.785398163397448309615660845819875721)) + T(5.0) / x * (qp / qq) * std::cos(x - T(0.785398163397448309615660845819875721))) * T(0.797884560802865355879892119868763737) / std::sqrt(x); +} // bessel_y0_forward(T x) + +template +static inline C10_HOST_DEVICE T bessel_y1_forward(T x) { + static const T PP[] = { + +7.62125616208173112003e-04, + +7.31397056940917570436e-02, + +1.12719608129684925192e+00, + +5.11207951146807644818e+00, + +8.42404590141772420927e+00, + +5.21451598682361504063e+00, + +1.00000000000000000254e+00, + }; + + static const T PQ[] = { + +5.71323128072548699714e-04, + +6.88455908754495404082e-02, + +1.10514232634061696926e+00, + +5.07386386128601488557e+00, + +8.39985554327604159757e+00, + +5.20982848682361821619e+00, + +9.99999999999999997461e-01, + }; + + static const T QP[] = { + +5.10862594750176621635e-02, + +4.98213872951233449420e+00, + +7.58238284132545283818e+01, + +3.66779609360150777800e+02, + +7.10856304998926107277e+02, + +5.97489612400613639965e+02, + +2.11688757100572135698e+02, + +2.52070205858023719784e+01, + }; + + static const T QQ[] = { + +7.42373277035675149943e+01, + +1.05644886038262816351e+03, + +4.98641058337653607651e+03, + +9.56231892404756170795e+03, + +7.99704160447350683650e+03, + +2.82619278517639096600e+03, + +3.36093607810698293419e+02, + }; + + static const T YP[] = { + +1.26320474790178026440e+09, + -6.47355876379160291031e+11, + +1.14509511541823727583e+14, + -8.12770255501325109621e+15, + +2.02439475713594898196e+17, + -7.78877196265950026825e+17, + }; + + static const T YQ[] = { + +5.94301592346128195359e+02, + +2.35564092943068577943e+05, + +7.34811944459721705660e+07, + +1.87601316108706159478e+10, + +3.88231277496238566008e+12, + +6.20557727146953693363e+14, + +6.87141087355300489866e+16, + +3.97270608116560655612e+18, + }; + + if (x <= T(5.0)) { + if (x == T(0.0)) { + return -std::numeric_limits::infinity(); + } + + if (x <= T(0.0)) { + return std::numeric_limits::quiet_NaN(); + } + + T yp = 0.0; + + for (uint8_t index = 0; index <= 5; index++) { + yp = yp * (x * x) + YP[index]; + } + + T yq = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + yq = yq * (x * x) + YQ[index]; + } + + return x * (yp / yq) + (T(0.636619772367581343075535053490057448) * (bessel_j1_forward(x) * std::log(x) - T(1.0) / x)); + } + + T pp = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pp = pp * (T(5.0) / x * (T(5.0) / x)) + PP[index]; + } + + T pq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pq = pq * (T(5.0) / x * (T(5.0) / x)) + PQ[index]; + } + + T qp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + qp = qp * (T(5.0) / x * (T(5.0) / x)) + QP[index]; + } + + T qq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + qq = qq * (T(5.0) / x * (T(5.0) / x)) + QQ[index]; + } + + return (pp / pq * std::sin(x - T(2.356194490192344928846982537459627163)) + T(5.0) / x * (qp / qq) * std::cos(x - T(2.356194490192344928846982537459627163))) * T(0.797884560802865355879892119868763737) / std::sqrt(x); +} // bessel_y1_forward(T x) + +template +static inline C10_HOST_DEVICE T chebyshev_polynomial_t_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (std::abs(x) == T(1.0)) { + if (x > T(0.0) || n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if ((n > 6) && (std::abs(x) < T(1.0))) { + return std::cos(n * std::acos(x)); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x; + } + + T p = T(1.0); + T q = x; + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x) * q - p; + p = q; + q = r; + } + + return r; +} // chebyshev_polynomial_t_forward(T x, int64_t n) + +template +static inline C10_HOST_DEVICE T chebyshev_polynomial_t_forward(T x, T n) { + return chebyshev_polynomial_t_forward(x, static_cast(n)); +} // chebyshev_polynomial_t_forward(T x, T n) + +template +static inline C10_HOST_DEVICE T chebyshev_polynomial_u_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (std::abs(x) == T(1.0)) { + if (x > T(0.0) || n % 2 == 0) { + return n + 1; + } + + return -(n + 1); + } + + if ((n > 8) && (std::abs(x) < T(1.0))) { + if (std::sin(std::acos(x)) != T(0.0)) { + return std::sin((n + 1) * std::acos(x)) / std::sin(std::acos(x)); + } + + return (n + 1) * std::cos((n + 1) * std::acos(x)) / x; + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x; + } + + T p = T(1.0); + T q = x + x; + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x) * q - p; + p = q; + q = r; + } + + return r; +} // chebyshev_polynomial_u_forward(T x, int64_t n) + +template +static inline C10_HOST_DEVICE T chebyshev_polynomial_u_forward(T x, T n) { + return chebyshev_polynomial_u_forward(x, static_cast(n)); +} // chebyshev_polynomial_u_forward(T x, T n) + +template +static inline C10_HOST_DEVICE T chebyshev_polynomial_v_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (std::abs(x) == T(1.0)) { + if (x > T(0.0)) { + return T(1.0); + } + + if (n % 2 == 0) { + return n + n + 1; + } + + return -(n + n + 1); + } + + if ((n > 8) && (std::abs(x) < T(1.0))) { + if (std::sin(std::acos(x) / T(2.0)) != T(1.0)) { + return std::cos((n + T(0.5)) * std::acos(x)) / std::cos(std::acos(x) / T(2.0)); + } + + if (n % 2 == 0) { + return n + n + 1; + } + + return -(n + n + 1); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0); + } + + T p = T(1.0); + T q = x + x - T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x) * q - p; + p = q; + q = r; + } + + return r; +} // chebyshev_polynomial_v_forward(T x, int64_t n) + +template +static inline C10_HOST_DEVICE T chebyshev_polynomial_v_forward(T x, T n) { + return chebyshev_polynomial_v_forward(x, static_cast(n)); +} // chebyshev_polynomial_v_forward(T x, T n) + +template +static inline C10_HOST_DEVICE T chebyshev_polynomial_w_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (std::abs(x) == T(1.0)) { + if (x > T(0.0)) { + return n + n + 1; + } + + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if ((n > 8) && (std::abs(x) < T(1.0))) { + if (std::cos(std::acos(x) / T(2.0)) != T(1.0)) { + return std::sin((n + T(0.5)) * std::acos(x)) / std::sin(std::acos(x) / T(2.0)); + } + + if (x > T(0.0)) { + return n + n + 1; + } + + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x + T(1.0); + } + + T p = T(1.0); + T q = x + x + T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x) * q - p; + p = q; + q = r; + } + + return r; +} // chebyshev_polynomial_w_forward(T x, int64_t n) + +template +static inline C10_HOST_DEVICE T chebyshev_polynomial_w_forward(T x, T n) { + return chebyshev_polynomial_w_forward(x, static_cast(n)); +} // chebyshev_polynomial_w_forward(T x, T n) + +template +static inline C10_HOST_DEVICE T hermite_polynomial_h_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x; + } + + T p = T(1.0); + T q = x + x; + T r; + + for (int64_t k = 2; k < n + n; k += 2) { + r = (x + x) * q - k * p; + p = q; + q = r; + } + + return r; +} // hermite_polynomial_h_forward(T x, int64_t n) + +template +static inline C10_HOST_DEVICE T hermite_polynomial_h_forward(T x, T n) { + return hermite_polynomial_h_forward(x, static_cast(n)); +} // hermite_polynomial_h_forward(T x, T n) + +template +static inline C10_HOST_DEVICE T hermite_polynomial_he_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x; + } + + T p = T(1.0); + T q = x; + T r; + + for (int64_t k = 1; k < n; k++) { + r = x * q - k * p; + p = q; + q = r; + } + + return r; +} // hermite_polynomial_he_forward(T x, int64_t n) + +template +static inline C10_HOST_DEVICE T hermite_polynomial_he_forward(T x, T n) { + return hermite_polynomial_he_forward(x, static_cast(n)); +} // hermite_polynomial_he_forward(T x, T n) + +template +static inline C10_HOST_DEVICE T laguerre_polynomial_l_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (std::abs(x) == T(0.0)) { + return T(1.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return T(1.0) - x; + } + + T p = T(1.0); + T q = T(1.0) - x; + T r; + + for (int64_t k = 1; k < n; k++) { + r = (((k + k) + (T(1.0) - x)) * q - k * p) / (k + 1); + p = q; + q = r; + } + + return r; +} // laguerre_polynomial_l_forward(T x, int64_t n) + +template +static inline C10_HOST_DEVICE T laguerre_polynomial_l_forward(T x, T n) { + return laguerre_polynomial_l_forward(x, static_cast(n)); +} // laguerre_polynomial_l_forward(T x, T n) + +template +static inline C10_HOST_DEVICE T legendre_polynomial_p_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (std::abs(x) == T(1.0)) { + if (x > T(0.0) || n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x; + } + + T p = T(1.0); + T q = x; + T r; + + for (int64_t k = 1; k < n; k++) { + r = ((k + k + 1) * x * q - k * p) / (k + 1); + p = q; + q = r; + } + + return r; +} // legendre_polynomial_p_forward(T x, int64_t n) + +template +static inline C10_HOST_DEVICE T legendre_polynomial_p_forward(T x, T n) { + return legendre_polynomial_p_forward(x, static_cast(n)); +} // legendre_polynomial_p_forward(T x, T n) + +template +static inline C10_HOST_DEVICE T modified_bessel_i0_forward(T x) { + static const T A[] = { + -4.41534164647933937950e-18, + +3.33079451882223809783e-17, + -2.43127984654795469359e-16, + +1.71539128555513303061e-15, + -1.16853328779934516808e-14, + +7.67618549860493561688e-14, + -4.85644678311192946090e-13, + +2.95505266312963983461e-12, + -1.72682629144155570723e-11, + +9.67580903537323691224e-11, + -5.18979560163526290666e-10, + +2.65982372468238665035e-09, + -1.30002500998624804212e-08, + +6.04699502254191894932e-08, + -2.67079385394061173391e-07, + +1.11738753912010371815e-06, + -4.41673835845875056359e-06, + +1.64484480707288970893e-05, + -5.75419501008210370398e-05, + +1.88502885095841655729e-04, + -5.76375574538582365885e-04, + +1.63947561694133579842e-03, + -4.32430999505057594430e-03, + +1.05464603945949983183e-02, + -2.37374148058994688156e-02, + +4.93052842396707084878e-02, + -9.49010970480476444210e-02, + +1.71620901522208775349e-01, + -3.04682672343198398683e-01, + +6.76795274409476084995e-01, + }; + + static const T B[] = { + -7.23318048787475395456e-18, + -4.83050448594418207126e-18, + +4.46562142029675999901e-17, + +3.46122286769746109310e-17, + -2.82762398051658348494e-16, + -3.42548561967721913462e-16, + +1.77256013305652638360e-15, + +3.81168066935262242075e-15, + -9.55484669882830764870e-15, + -4.15056934728722208663e-14, + +1.54008621752140982691e-14, + +3.85277838274214270114e-13, + +7.18012445138366623367e-13, + -1.79417853150680611778e-12, + -1.32158118404477131188e-11, + -3.14991652796324136454e-11, + +1.18891471078464383424e-11, + +4.94060238822496958910e-10, + +3.39623202570838634515e-09, + +2.26666899049817806459e-08, + +2.04891858946906374183e-07, + +2.89137052083475648297e-06, + +6.88975834691682398426e-05, + +3.36911647825569408990e-03, + +8.04490411014108831608e-01, + }; + + T p; + T q = 0.0; + + if (std::abs(x) <= T(8.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 30; index++) { + p = q; + q = a; + a = ((std::abs(x) / T(2.0)) - T(2.0)) * q - p + A[index]; + } + + return std::exp(std::abs(x)) * (T(0.5) * (a - p)); + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(32.0) / std::abs(x) - T(2.0)) * q - p + B[index]; + } + + return std::exp(std::abs(x)) * (T(0.5) * (b - p)) / std::sqrt(std::abs(x)); +} // modified_bessel_i0_forward(T x) + +template +static inline C10_HOST_DEVICE T modified_bessel_i1_forward(T x) { + static const T A[] = { + +2.77791411276104639959e-18, + -2.11142121435816608115e-17, + +1.55363195773620046921e-16, + -1.10559694773538630805e-15, + +7.60068429473540693410e-15, + -5.04218550472791168711e-14, + +3.22379336594557470981e-13, + -1.98397439776494371520e-12, + +1.17361862988909016308e-11, + -6.66348972350202774223e-11, + +3.62559028155211703701e-10, + -1.88724975172282928790e-09, + +9.38153738649577178388e-09, + -4.44505912879632808065e-08, + +2.00329475355213526229e-07, + -8.56872026469545474066e-07, + +3.47025130813767847674e-06, + -1.32731636560394358279e-05, + +4.78156510755005422638e-05, + -1.61760815825896745588e-04, + +5.12285956168575772895e-04, + -1.51357245063125314899e-03, + +4.15642294431288815669e-03, + -1.05640848946261981558e-02, + +2.47264490306265168283e-02, + -5.29459812080949914269e-02, + +1.02643658689847095384e-01, + -1.76416518357834055153e-01, + +2.52587186443633654823e-01, + }; + + static const T B[] = { + +7.51729631084210481353e-18, + +4.41434832307170791151e-18, + -4.65030536848935832153e-17, + -3.20952592199342395980e-17, + +2.96262899764595013876e-16, + +3.30820231092092828324e-16, + -1.88035477551078244854e-15, + -3.81440307243700780478e-15, + +1.04202769841288027642e-14, + +4.27244001671195135429e-14, + -2.10154184277266431302e-14, + -4.08355111109219731823e-13, + -7.19855177624590851209e-13, + +2.03562854414708950722e-12, + +1.41258074366137813316e-11, + +3.25260358301548823856e-11, + -1.89749581235054123450e-11, + -5.58974346219658380687e-10, + -3.83538038596423702205e-09, + -2.63146884688951950684e-08, + -2.51223623787020892529e-07, + -3.88256480887769039346e-06, + -1.10588938762623716291e-04, + -9.76109749136146840777e-03, + +7.78576235018280120474e-01, + }; + + T p; + T q = 0.0; + + if (std::abs(x) <= T(8.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 29; index++) { + p = q; + q = a; + a = ((std::abs(x) / T(2.0)) - T(2.0)) * q - p + A[index]; + } + + if (x < T(0.0)) { + return -(T(0.5) * (a - p) * std::abs(x) * std::exp(std::abs(x))); + } + + return T(0.5) * (a - p) * std::abs(x) * std::exp(std::abs(x)); + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(32.0) / std::abs(x) - T(2.0)) * q - p + B[index]; + } + + if (x < T(0.0)) { + return -(std::exp(std::abs(x)) * (T(0.5) * (b - p)) / std::sqrt(std::abs(x))); + } + + return std::exp(std::abs(x)) * (T(0.5) * (b - p)) / std::sqrt(std::abs(x)); +} // modified_bessel_i1_forward(T x) + +template +static inline C10_HOST_DEVICE T modified_bessel_k0_forward(T x) { + static const T A[] = { + +1.37446543561352307156e-16, + +4.25981614279661018399e-14, + +1.03496952576338420167e-11, + +1.90451637722020886025e-09, + +2.53479107902614945675e-07, + +2.28621210311945178607e-05, + +1.26461541144692592338e-03, + +3.59799365153615016266e-02, + +3.44289899924628486886e-01, + -5.35327393233902768720e-01, + }; + + static const T B[] = { + +5.30043377268626276149e-18, + -1.64758043015242134646e-17, + +5.21039150503902756861e-17, + -1.67823109680541210385e-16, + +5.51205597852431940784e-16, + -1.84859337734377901440e-15, + +6.34007647740507060557e-15, + -2.22751332699166985548e-14, + +8.03289077536357521100e-14, + -2.98009692317273043925e-13, + +1.14034058820847496303e-12, + -4.51459788337394416547e-12, + +1.85594911495471785253e-11, + -7.95748924447710747776e-11, + +3.57739728140030116597e-10, + -1.69753450938905987466e-09, + +8.57403401741422608519e-09, + -4.66048989768794782956e-08, + +2.76681363944501510342e-07, + -1.83175552271911948767e-06, + +1.39498137188764993662e-05, + -1.28495495816278026384e-04, + +1.56988388573005337491e-03, + -3.14481013119645005427e-02, + +2.44030308206595545468e+00, + }; + + if (x == T(0.0)) { + return std::numeric_limits::infinity(); + } + + if (x < T(0.0)) { + return std::numeric_limits::quiet_NaN(); + } + + T p; + T q = 0.0; + + if (x <= T(2.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 10; index++) { + p = q; + q = a; + a = (x * x - T(2.0)) * q - p + A[index]; + } + + return T(0.5) * (a - p) - std::log(0.5 * x) * modified_bessel_i0_forward(x); + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(8.0) / x - T(2.0)) * q - p + B[index]; + } + + return std::exp(-x) * (T(0.5) * (b - p)) / std::sqrt(x); +} // modified_bessel_k0_forward(T x) + +template +static inline C10_HOST_DEVICE T modified_bessel_k1_forward(T x) { + static const T A[] = { + -7.02386347938628759343e-18, + -2.42744985051936593393e-15, + -6.66690169419932900609e-13, + -1.41148839263352776110e-10, + -2.21338763073472585583e-08, + -2.43340614156596823496e-06, + -1.73028895751305206302e-04, + -6.97572385963986435018e-03, + -1.22611180822657148235e-01, + -3.53155960776544875667e-01, + +1.52530022733894777053e+00, + }; + + static const T B[] = { + -5.75674448366501715755e-18, + +1.79405087314755922667e-17, + -5.68946255844285935196e-17, + +1.83809354436663880070e-16, + -6.05704724837331885336e-16, + +2.03870316562433424052e-15, + -7.01983709041831346144e-15, + +2.47715442448130437068e-14, + -8.97670518232499435011e-14, + +3.34841966607842919884e-13, + -1.28917396095102890680e-12, + +5.13963967348173025100e-12, + -2.12996783842756842877e-11, + +9.21831518760500529508e-11, + -4.19035475934189648750e-10, + +2.01504975519703286596e-09, + -1.03457624656780970260e-08, + +5.74108412545004946722e-08, + -3.50196060308781257119e-07, + +2.40648494783721712015e-06, + -1.93619797416608296024e-05, + +1.95215518471351631108e-04, + -2.85781685962277938680e-03, + +1.03923736576817238437e-01, + +2.72062619048444266945e+00, + }; + + if (x == T(0.0)) { + return std::numeric_limits::infinity(); + } + + if (x < T(0.0)) { + return std::numeric_limits::quiet_NaN(); + } + + T p; + T q = 0.0; + + if (x <= T(2.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 11; index++) { + p = q; + q = a; + a = (x * x - T(2.0)) * q - p + A[index]; + } + + return std::log(T(0.5) * x) * modified_bessel_i1_forward(x) + T(0.5) * (a - p) / x; + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(8.0) / x - T(2.0)) * q - p + B[index]; + } + + return std::exp(-x) * (T(0.5) * (b - p)) / std::sqrt(x); +} // modified_bessel_k1_forward(T x) + +template +static inline C10_HOST_DEVICE T scaled_modified_bessel_k0_forward(T x) { + static const T A[] = { + +1.37446543561352307156e-16, + +4.25981614279661018399e-14, + +1.03496952576338420167e-11, + +1.90451637722020886025e-09, + +2.53479107902614945675e-07, + +2.28621210311945178607e-05, + +1.26461541144692592338e-03, + +3.59799365153615016266e-02, + +3.44289899924628486886e-01, + -5.35327393233902768720e-01, + }; + + static const T B[] = { + +5.30043377268626276149e-18, + -1.64758043015242134646e-17, + +5.21039150503902756861e-17, + -1.67823109680541210385e-16, + +5.51205597852431940784e-16, + -1.84859337734377901440e-15, + +6.34007647740507060557e-15, + -2.22751332699166985548e-14, + +8.03289077536357521100e-14, + -2.98009692317273043925e-13, + +1.14034058820847496303e-12, + -4.51459788337394416547e-12, + +1.85594911495471785253e-11, + -7.95748924447710747776e-11, + +3.57739728140030116597e-10, + -1.69753450938905987466e-09, + +8.57403401741422608519e-09, + -4.66048989768794782956e-08, + +2.76681363944501510342e-07, + -1.83175552271911948767e-06, + +1.39498137188764993662e-05, + -1.28495495816278026384e-04, + +1.56988388573005337491e-03, + -3.14481013119645005427e-02, + +2.44030308206595545468e+00, + }; + + if (x == T(0.0)) { + return std::numeric_limits::infinity(); + } + + if (x < T(0.0)) { + return std::numeric_limits::quiet_NaN(); + } + + T p; + T q = 0.0; + + if (x <= T(2.0)) { + T a = A[0]; + + for (uint64_t index = 1; index < 10; index++) { + p = q; + q = a; + a = (x * x - T(2.0)) * q - p + A[index]; + } + + return (T(0.5) * (a - p) - std::log(T(0.5) * x) * modified_bessel_i0_forward(x)) * std::exp(x); + } + + T b = B[0]; + + for (uint64_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(8.0) / x - T(2.0)) * q - p + B[index]; + } + + return T(0.5) * (b - p) / std::sqrt(x); +} // T scaled_modified_bessel_k0_forward(T x) + +template +static inline C10_HOST_DEVICE T scaled_modified_bessel_k1_forward(T x) { + static const T A[] = { + -7.02386347938628759343e-18, + -2.42744985051936593393e-15, + -6.66690169419932900609e-13, + -1.41148839263352776110e-10, + -2.21338763073472585583e-08, + -2.43340614156596823496e-06, + -1.73028895751305206302e-04, + -6.97572385963986435018e-03, + -1.22611180822657148235e-01, + -3.53155960776544875667e-01, + +1.52530022733894777053e+00, + }; + + static const T B[] = { + -5.75674448366501715755e-18, + +1.79405087314755922667e-17, + -5.68946255844285935196e-17, + +1.83809354436663880070e-16, + -6.05704724837331885336e-16, + +2.03870316562433424052e-15, + -7.01983709041831346144e-15, + +2.47715442448130437068e-14, + -8.97670518232499435011e-14, + +3.34841966607842919884e-13, + -1.28917396095102890680e-12, + +5.13963967348173025100e-12, + -2.12996783842756842877e-11, + +9.21831518760500529508e-11, + -4.19035475934189648750e-10, + +2.01504975519703286596e-09, + -1.03457624656780970260e-08, + +5.74108412545004946722e-08, + -3.50196060308781257119e-07, + +2.40648494783721712015e-06, + -1.93619797416608296024e-05, + +1.95215518471351631108e-04, + -2.85781685962277938680e-03, + +1.03923736576817238437e-01, + +2.72062619048444266945e+00, + }; + + if (x == T(0.0)) { + return std::numeric_limits::infinity(); + } + + if (x < T(0.0)) { + return std::numeric_limits::quiet_NaN(); + } + + T p; + T q = 0.0; + + if (x <= T(2.0)) { + T a = A[0]; + + for (uint64_t index = 1; index < 11; index++) { + p = q; + q = a; + a = (x * x - T(2.0)) * q - p + A[index]; + } + + return (std::log(T(0.5) * x) * modified_bessel_i1_forward(x) + T(0.5) * (a - p) / x) * std::exp(x); + } + + T b = B[0]; + + for (uint64_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(8.0) / x - T(2.0)) * q - p + B[index]; + } + + return (T(0.5) * (b - p) / std::sqrt(x)); +} // T scaled_modified_bessel_k1_forward(T x) + +template +static inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_t_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (x == T(1.0)) { + return T(1.0); + } + + if (x == T(0.0)) { + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if ((n > 6) && (std::abs(x + x - T(1.0)) < T(1.0))) { + return std::cos(n * std::acos(x + x - T(1.0))); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0); + } + + T p = T(1.0); + T q = x + x - T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x - T(1.0) + (x + x - T(1.0))) * q - p; + p = q; + q = r; + } + + return r; +} // shifted_chebyshev_polynomial_t_forward(T x, int64_t n) + +template +static inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_t_forward(T x, T n) { + return shifted_chebyshev_polynomial_t_forward(x, static_cast(n)); +} // shifted_chebyshev_polynomial_t_forward(T x, T n) + +template +static inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_u_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (x == T(1.0)) { + return n + 1; + } + + if (x == T(0.0)) { + if (n % 2 == 0) { + return n + 1; + } + + return -(n + 1); + } + + if ((n > 6) && (std::abs(x + x - T(1.0)) < T(1.0))) { + if (std::sin(std::acos(x + x - T(1.0))) != T(0.0)) { + return std::sin((n + 1) * std::acos(x + x - T(1.0))) / std::sin(std::acos(x + x - T(1.0))); + } + + return (n + 1) * std::cos((n + 1) * std::acos(x + x - T(1.0))) / (x + x - T(1.0)); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0) + (x + x - T(1.0)); + } + + T p = T(1.0); + T q = x + x - T(1.0) + (x + x - T(1.0)); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x - T(1.0) + (x + x - T(1.0))) * q - p; + p = q; + q = r; + } + + return r; +} // shifted_chebyshev_polynomial_u_forward(T x, int64_t n) + +template +static inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_u_forward(T x, T n) { + return shifted_chebyshev_polynomial_u_forward(x, static_cast(n)); +} // shifted_chebyshev_polynomial_u_forward(T x, T n) + +template +static inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_v_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (x == T(1.0)) { + return T(1.0); + } + + if (x == T(0.0)) { + if (n % 2 == 0) { + return (n + n + 1); + } + + return -(n + n + 1); + } + + if ((n > 6) && (std::abs(x + x - T(1.0)) < T(1.0))) { + if (std::sin(std::acos(x + x - T(1.0)) / T(2.0)) != T(1.0)) { + return std::cos(((n) + T(0.5)) * std::acos(x + x - T(1.0))) / std::cos(std::acos(x + x - T(1.0)) / T(2.0)); + } + + if (n % 2 == 0) { + return n + n + 1; + } + + return -(n + n + 1); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0) + (x + x - T(1.0)) - T(1.0); + } + + T p = T(1.0); + T q = x + x - T(1.0) + (x + x - T(1.0)) - T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x - T(1.0) + (x + x - T(1.0))) * q - p; + p = q; + q = r; + } + + return r; +} // shifted_chebyshev_polynomial_v_forward(T x, int64_t n) + +template +static inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_v_forward(T x, T n) { + return shifted_chebyshev_polynomial_v_forward(x, static_cast(n)); +} // shifted_chebyshev_polynomial_v_forward(T x, T n) + +template +static inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_w_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (x == T(1.0)) { + return n + n + 1; + } + + if (x == T(0.0)) { + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if ((n > 4) && (std::abs(x + x - T(1.0)) < T(1.0))) { + if (std::cos(std::acos(x + x - T(1.0)) / T(2.0)) != T(1.0)) { + return std::sin((n + T(0.5)) * std::acos(x + x - T(1.0))) / std::sin(std::acos(x + x - T(1.0)) / T(2.0)); + } + + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0) + (x + x - T(1.0)) + T(1.0); + } + + T p = T(1.0); + T q = x + x - T(1.0) + (x + x - T(1.0)) + T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x - T(1.0) + (x + x - T(1.0))) * q - p; + p = q; + q = r; + } + + return r; +} // shifted_chebyshev_polynomial_w_forward(T x, int64_t n) + +template +static inline C10_HOST_DEVICE T shifted_chebyshev_polynomial_w_forward(T x, T n) { + return shifted_chebyshev_polynomial_w_forward(x, static_cast(n)); +} // shifted_chebyshev_polynomial_w_forward(T x, T n) + +template +static inline C10_HOST_DEVICE T spherical_bessel_j0_forward(T x) { + if (std::isinf(x)) { + return T(0.0); + } + + if (std::abs(x) < T(0.5)) { + return T(1.0) + x * x * (T(-1.0) / T(6.0) + x * x * (T(1.0) / T(120.0) + x * x * (T(-1.0) / T(5040.0) + x * x * (T(1.0) / T(362880.0) + x * x * (T(-1.0) / T(39916800.0) + x * x * (T(1.0) / T(6227020800.0))))))); + } + + return std::sin(x) / x; +} // T spherical_bessel_j0_forward(T x) + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/voice_bridge/torch/include/ATen/native/MathBitFallThroughLists.h b/voice_bridge/torch/include/ATen/native/MathBitFallThroughLists.h new file mode 100644 index 0000000000000000000000000000000000000000..97b0854d82d0a2fec6bb708db767d81273ec7bcc --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/MathBitFallThroughLists.h @@ -0,0 +1,71 @@ +#pragma once + +namespace at { +// views and their in-place version ops +#define TORCH_VIEW_FNS(m) \ + m.impl("as_strided_", torch::CppFunction::makeFallthrough()); \ + m.impl("detach", torch::CppFunction::makeFallthrough()); \ + m.impl("detach_", torch::CppFunction::makeFallthrough()); \ + m.impl("diagonal", torch::CppFunction::makeFallthrough()); \ + m.impl("expand", torch::CppFunction::makeFallthrough()); \ + m.impl("expand_as", torch::CppFunction::makeFallthrough()); \ + m.impl("movedim.int", torch::CppFunction::makeFallthrough()); \ + m.impl("movedim.intlist", torch::CppFunction::makeFallthrough()); \ + m.impl("narrow", torch::CppFunction::makeFallthrough()); \ + m.impl("permute", torch::CppFunction::makeFallthrough()); \ + m.impl("select.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("select.int", torch::CppFunction::makeFallthrough()); \ + m.impl("squeeze", torch::CppFunction::makeFallthrough()); \ + m.impl("squeeze_", torch::CppFunction::makeFallthrough()); \ + m.impl("transpose.int", torch::CppFunction::makeFallthrough()); \ + m.impl("transpose.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("transpose_", torch::CppFunction::makeFallthrough()); \ + m.impl("t", torch::CppFunction::makeFallthrough()); \ + m.impl("t_", torch::CppFunction::makeFallthrough()); \ + m.impl("real", torch::CppFunction::makeFallthrough()); \ + m.impl("imag", torch::CppFunction::makeFallthrough()); \ + m.impl("view_as_real", torch::CppFunction::makeFallthrough()); \ + m.impl("unflatten.int", torch::CppFunction::makeFallthrough()); \ + m.impl("unflatten.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("unfold", torch::CppFunction::makeFallthrough()); \ + m.impl("unsqueeze", torch::CppFunction::makeFallthrough()); \ + m.impl("unsqueeze_", torch::CppFunction::makeFallthrough()); \ + m.impl("view_as", torch::CppFunction::makeFallthrough()); \ + m.impl("unbind.int", torch::CppFunction::makeFallthrough()); \ + m.impl("unbind.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("split.Tensor", torch::CppFunction::makeFallthrough()); \ + m.impl("split_with_sizes", torch::CppFunction::makeFallthrough()); \ + m.impl("swapaxes", torch::CppFunction::makeFallthrough()); \ + m.impl("swapdims", torch::CppFunction::makeFallthrough()); \ + m.impl("chunk", torch::CppFunction::makeFallthrough()); \ + m.impl("reshape", torch::CppFunction::makeFallthrough()); \ + m.impl("alias", torch::CppFunction::makeFallthrough()); \ + m.impl("hsplit.int", torch::CppFunction::makeFallthrough()); \ + m.impl("hsplit.array", torch::CppFunction::makeFallthrough()); \ + m.impl("dsplit.int", torch::CppFunction::makeFallthrough()); \ + m.impl("dsplit.array", torch::CppFunction::makeFallthrough()); \ + m.impl("vsplit.int", torch::CppFunction::makeFallthrough()); \ + m.impl("vsplit.array", torch::CppFunction::makeFallthrough()); \ + m.impl("conj", torch::CppFunction::makeFallthrough()); \ + m.impl("_conj", torch::CppFunction::makeFallthrough()); \ + m.impl("_unsafe_view", torch::CppFunction::makeFallthrough()); \ + m.impl("resize_", torch::CppFunction::makeFallthrough()); + +#define TENSOR_UTILITIES_AND_CONSTRUCTORS(m) \ + m.impl("empty_like", torch::CppFunction::makeFallthrough()); \ + m.impl("empty.memory_format", torch::CppFunction::makeFallthrough()); \ + m.impl("empty.out", torch::CppFunction::makeFallthrough()); \ + m.impl("empty_strided", torch::CppFunction::makeFallthrough()); \ + m.impl("full_like", torch::CppFunction::makeFallthrough()); \ + m.impl("stride.int", torch::CppFunction::makeFallthrough()); \ + m.impl("stride.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("size.int", torch::CppFunction::makeFallthrough()); \ + m.impl("size.Dimname", torch::CppFunction::makeFallthrough()); \ + m.impl("is_complex", torch::CppFunction::makeFallthrough()); \ + m.impl("is_floating_point", torch::CppFunction::makeFallthrough()); \ + m.impl("requires_grad_", torch::CppFunction::makeFallthrough()); +} + +#define TORCH_VIEW_FNS_NATIVE_FN_REGISTRATION(m) \ + m.impl("as_strided", torch::CppFunction::makeFallthrough()); \ + m.impl("view", torch::CppFunction::makeFallthrough()); diff --git a/voice_bridge/torch/include/ATen/native/MathBitsFallback.h b/voice_bridge/torch/include/ATen/native/MathBitsFallback.h new file mode 100644 index 0000000000000000000000000000000000000000..4e9c2d9e98b182e54ea683a7b7ea3f6d864ab765 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/MathBitsFallback.h @@ -0,0 +1,151 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +// This fallback should only be used for operations that are self inverse and have a corresponding tensor +// bit (internally implemented using DispatchKey) to maintain the state on tensor using tensor bit. +// Currently there are two tensor bits that trigger this fallback: conjugate bit and negative bit. +// Conjugate bit is set on a tensor when `.conj()` is called and neg bit is set on a tensor when `.conj().imag` is called. + +// NOTE: To use this fallback, `clone` and `copy_` should fully understand and be able to correctly handle the semantic of your math bit. +struct MathOpFallback { + MathOpFallback(DispatchKey key_, string op_name_) : key(key_), op_name(op_name_) {} + virtual bool is_bit_set(const Tensor&) = 0; + void fallback_impl(const c10::OperatorHandle& op, DispatchKeySet dispatch_keys, torch::jit::Stack* stack) { + /* + Situations to handle: + 1. Out-of-place operation. Easy: materialize all inputs and + call it a day. + 2. Inplace operation. Desugar x.add_(2) into x.conj_().add_(2).conj_(). + Materialize other inputs as in (1). + 3. out= operation. Desugar add(x, 2, out=y) into y.copy_(add(x, 2)) + Materialize other inputs as in (1). + + It is important to be able to tell if we READ from an argument and if we + WRITE to an argument. Conservative approach is to assume that we always + READ from an argument, but in out= operations you can skip + conjugating inputs on entry that never get used. In the current schema we + can't easily tell if the operation is in in-place or out= operation. + + Note: + 1. Mutable tensorlists containing tensors whose math bit set to true are disallowed. + 2. Mutable tensors with math bit set to true are unconditionally cloned to ensure + correct behavior in the case when the mutable tensor shares memory with non mutable arguments. + + If we were to in-place resolve the math bit for mutable inputs, then the non-mutable inputs sharing partial or full memory + with these mutable inputs would read into wrong values in the following cases: + 1. Non mutable inputs have their math bit set to false. + 2. Math bit for mutable input(s) is resolved before the non mutable inputs (with bit set to true and sharing memory + with one or more mutable arg(s)) are cloned. + At the end, the final value of the mutable arguments from the stack are copied into the original input mutable tensor inputs. + */ + const auto& arguments = op.schema().arguments(); + const auto num_arguments = arguments.size(); + const auto stack_start = stack->size() - num_arguments; + + c10::optional is_write; + for (const auto i : c10::irange(num_arguments)) { + // Three possible states: + // 1. alias_info has no value --> out-of-place operation + // 2. alias_info does have a value, alias_info->is_write=True --> in-place or out= operation + // 3. alias_info does have a value, alias_info->is_write=False --> view operation + const AliasInfo* alias_info = arguments[i].alias_info(); + if (alias_info != nullptr) { + if (is_write.has_value()) { + TORCH_CHECK(*is_write == alias_info->isWrite(), + "Unsupported operator for ", op_name, " fallback: ", op.schema().name(), + op_name, " fallback doesn't work for operators with a mix " + "mutable and non-mutable inputs that alias with outputs, " + "this must be implemented manually. " + "If you got this error on a core op, please report a bug to PyTorch."); + } else { + is_write = alias_info->isWrite(); + } + } + } + + if (is_write.has_value() && !*is_write) { + // We assume that view operators automatically handle the math bit + // correctly by propagating the dispatch key in key_set. + // This is not necessarily always right, so you should test these cases. + op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack); + return; + } + + // Mutable inputs with math bit set to True and their clones + std::vector> mutable_inputs_with_their_clones; + for (const auto i : c10::irange(num_arguments)) { + auto& ivalue = (*stack)[stack_start + i]; + if (!(ivalue.isTensor() || ivalue.isTensorList())) { + continue; + } + const auto& argument = arguments[i]; + bool mut_arg = false; + if (argument.alias_info()) { + // Was already tested by is_write loop above + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(argument.alias_info()->isWrite()); + mut_arg = true; + } + if (ivalue.isTensor()) { + if (!is_bit_set(ivalue.toTensor())) { + continue; + } + auto tensor = std::move(ivalue).toTensor(); + auto resolved_tensor = at::clone(tensor); + if (mut_arg) { + TORCH_CHECK(mutable_inputs_with_their_clones.empty(), op_name, " fallback does not support operators with more than one mutable tensors with ", + op_name, "bit set to true."); + mutable_inputs_with_their_clones.emplace_back(std::make_pair(std::move(tensor), resolved_tensor)); + } + (*stack)[stack_start + i] = std::move(resolved_tensor); + } else if (ivalue.isTensorList()) { + auto tensors = std::move(ivalue).toTensorList(); + for(const auto j : c10::irange(tensors.size())) { + const auto& tensor = tensors[j]; + if (!is_bit_set(tensor)) { + continue; + } + TORCH_CHECK(!mut_arg, " fallback doesn't currently support mutable TensorLists with ", + op_name, " inputs. Please materialize all the ", op_name, " input tensor(s) in the mutable TensorList inputs before calling ", + op.schema().name()); + tensors[j] = at::clone(tensor); + } + (*stack)[stack_start + i] = std::move(tensors); + } + } + + op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack); + + TORCH_INTERNAL_ASSERT(mutable_inputs_with_their_clones.size() <= 1); + + for (std::pair mut_tensors: mutable_inputs_with_their_clones) { + auto& mutable_input = mut_tensors.first; + auto& cloned_mutable_input = mut_tensors.second; + auto& ivalue = (*stack)[stack_start]; + auto returned_output = std::move(ivalue).toTensor(); + + // sanity check to ensure that the tensor in stack aliases the cloned_mutable_input + TORCH_INTERNAL_ASSERT(cloned_mutable_input.is_same(returned_output)); + + // necessary for out= arg + at::native::resize_output(mutable_input, returned_output.sizes()); + + mutable_input.copy_(returned_output); + (*stack)[stack_start] = std::move(mutable_input); + } + } + + virtual ~MathOpFallback() = default; + + DispatchKey key; + string op_name; +}; +} +}// namespace at diff --git a/voice_bridge/torch/include/ATen/native/MaxPooling.h b/voice_bridge/torch/include/ATen/native/MaxPooling.h new file mode 100644 index 0000000000000000000000000000000000000000..e133ad5939c84a46a9d48dab066a106d26018e73 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/MaxPooling.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include +#include + +namespace at { +namespace native { + +// TODO(Heitor) Template by dimension +struct PoolingParams1D { + int64_t NB; // Number of batches + int64_t NC; // Number of channels + int64_t IW; // Input width + int64_t OW; // Output width + int64_t KW; // Kernel width + int64_t SJ; // Column stride + int64_t PJ; // Column padding + int64_t DJ; // Column dilation + + // Return index of input element for the given kernel and output index + inline int64_t index(int64_t kj, int64_t oj) const { + return oj * SJ + kj * DJ - PJ; + } + + // Return index of first output within bounds for this kernel index + inline int64_t valid_output_start(int64_t kj) const { + int64_t ij = index(kj, 0);; + return ij < 0 ? at::divup(-ij, SJ) : 0; + } + + // Return index one past last output within bounds for this kernel index + inline int64_t valid_output_end(int64_t kj) const { + int64_t ij = index(kj, OW - 1); + return ij >= IW ? OW - at::divup(ij - (IW - 1), SJ) : OW; + } +}; + +using pooling_fn = void (*)(Tensor&, const Tensor&, const PoolingParams1D&); + +DECLARE_DISPATCH(pooling_fn, max_pool1d_stub); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/NonEmptyUtils.h b/voice_bridge/torch/include/ATen/native/NonEmptyUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..bd830cb67816081eab07d903f2f7a68373a62cbd --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/NonEmptyUtils.h @@ -0,0 +1,27 @@ +#include +#include +#include + +namespace at { namespace native { + +inline int64_t ensure_nonempty_dim(int64_t dim) { + return std::max(dim, 1); +} + +inline int64_t ensure_nonempty_size(const TensorBase &t, int64_t dim) { + return t.dim() == 0 ? 1 : t.size(dim); +} + +inline int64_t ensure_nonempty_stride(const TensorBase &t, int64_t dim) { + return t.dim() == 0 ? 1 : t.stride(dim); +} + +using IdxVec = std::vector; +inline IdxVec ensure_nonempty_vec(IdxVec vec) { + if (vec.size() == 0) { + vec.push_back(1); + } + return vec; +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/NonSymbolicBC.h b/voice_bridge/torch/include/ATen/native/NonSymbolicBC.h new file mode 100644 index 0000000000000000000000000000000000000000..8feda50770ba26d900429bbd271f6a9da372b518 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/NonSymbolicBC.h @@ -0,0 +1,12 @@ +#pragma once +#include +#include +#include + +namespace at { +namespace native { +// This file contains non-symbolic signatures for ops that we have sym-intified the signature of. +// However, in certain cases (such as static runtime), we call the native versions of the ops directly. +// In those cases, we will duplicate the signature here with non-symbolic ints, and also duplicate the C++ implementation. +TORCH_API at::Tensor reshape(const at::Tensor& self, at::IntArrayRef proposed_shape); +}} diff --git a/voice_bridge/torch/include/ATen/native/Normalization.h b/voice_bridge/torch/include/ATen/native/Normalization.h new file mode 100644 index 0000000000000000000000000000000000000000..9500852799ef0b8546152a4c7c178289d8a03a37 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Normalization.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace at { +namespace native { + +using renorm_scale_factor_fn = void (*) (TensorIteratorBase& iter, double maxnorm); +DECLARE_DISPATCH(renorm_scale_factor_fn, renorm_scale_factor_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/PointwiseOps.h b/voice_bridge/torch/include/ATen/native/PointwiseOps.h new file mode 100644 index 0000000000000000000000000000000000000000..d2e2d44db2af1e74eda272afa6fa80729ab2a2db --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/PointwiseOps.h @@ -0,0 +1,28 @@ +// Ternary and higher-order pointwise operations +#pragma once + +#include + +namespace c10 { +class Scalar; +} + +namespace at { + +struct TensorIterator; +struct TensorIteratorBase; + +namespace native { + +using pointwise_fn = void (*)(TensorIterator&, const Scalar& scalar); +using structured_pointwise_fn = void (*)(TensorIteratorBase&, const Scalar& scalar); +using pointwise_fn_double = void (*)(TensorIterator&, const Scalar&, double); + +DECLARE_DISPATCH(structured_pointwise_fn, addcmul_stub); +DECLARE_DISPATCH(structured_pointwise_fn, addcdiv_stub); +DECLARE_DISPATCH(pointwise_fn_double, smooth_l1_backward_stub); +DECLARE_DISPATCH(pointwise_fn_double, huber_backward_stub); +DECLARE_DISPATCH(pointwise_fn, mse_backward_stub); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/Pool.h b/voice_bridge/torch/include/ATen/native/Pool.h new file mode 100644 index 0000000000000000000000000000000000000000..cf5b45b365d0559d4c3f1a8bc0623f6eb1eb3de2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Pool.h @@ -0,0 +1,324 @@ +#include +#include +#include +#include +#include + +#pragma once + +namespace at { +namespace native { + +using max_pool2d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, + int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH); +using max_pool2d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices); + +DECLARE_DISPATCH(max_pool2d_fn, max_pool2d_kernel); +DECLARE_DISPATCH(max_pool2d_backward_fn, max_pool2d_backward_kernel); + +// averge pooling has same signature for forward and backward +using avg_pool2d_fn = void(*)(const Tensor& output, const Tensor& input, int64_t kW, int64_t kH, + int64_t dW, int64_t dH, int64_t padW, int64_t padH, bool count_include_pad, c10::optional divisor_override); +using avg_pool2d_backward_fn = void(*)(const Tensor& output, const Tensor& input, int kW, int kH, + int dW, int dH, int padW, int padH, bool count_include_pad, c10::optional divisor_override); + +DECLARE_DISPATCH(avg_pool2d_fn, avg_pool2d_kernel); +DECLARE_DISPATCH(avg_pool2d_backward_fn, avg_pool2d_backward_kernel); + +namespace { + +template +static inline dest_t +safe_downcast(src_t v) +{ + TORCH_CHECK(std::numeric_limits::min() <= v && v <= std::numeric_limits::max(), + "integer out of range"); + + return static_cast(v); +} + +template +static inline T pooling_output_shape_pad_lr( + T inputSize, T kernelSize, T pad_l, T pad_r, T stride, T dilation, + bool ceil_mode) { + T outputSize = div_rtn( + inputSize + pad_l + pad_r - dilation * (kernelSize - 1) - 1 + + (ceil_mode ? stride - 1 : 0), stride) + 1; + if (ceil_mode) { + // ensure that the last pooling starts inside the image + // needed to avoid problems in ceil mode + if ((outputSize - 1) * stride >= inputSize + pad_l) { + --outputSize; + } + } + return outputSize; +} + +template +static inline T pooling_output_shape( + T inputSize, T kernelSize, T pad, T stride, T dilation, bool ceil_mode) { + TORCH_CHECK(stride != 0, "stride should not be zero"); + TORCH_CHECK(pad >= 0, + "pad must be non-negative, but got pad: ", pad); + TORCH_CHECK(pad <= kernelSize / 2, + "pad should be at most half of kernel size, but got pad=", + pad, " and kernel_size=", kernelSize) + return pooling_output_shape_pad_lr( + inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode); +} + +inline std::pair pooling_same_mode_padding_lr( + int64_t inputSize, int64_t kernelSize, int64_t stride, int64_t dilation) { + // NOTE: with strides, the output shape is ceil(inputSize/stride) + auto total_padding = dilation * (kernelSize - 1); + + // Prefer symmetric padding if possible + if (stride > 2 && (total_padding % 2 == 1)) { + // The floor in the output size calculation gives us a little wiggle room + auto wiggle_room = inputSize % stride - 1; + if (wiggle_room > 0) { + --total_padding; + } + } + + auto left = total_padding / 2; + return {left, total_padding - left}; +} + + +// AveragePool2d/DilatedMaxPool2d (forward) +static inline void +pool2d_shape_check( + const Tensor& input, + int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW, + int64_t nInputPlane, + int64_t inputHeight, int64_t inputWidth, + int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format) +{ + const int64_t ndim = input.ndimension(); + const int64_t nOutputPlane = nInputPlane; + + TORCH_CHECK(kW > 0 && kH > 0, + "kernel size should be greater than zero, but got ", + "kH: ", kH, " kW: ", kW); + TORCH_CHECK(dW > 0 && dH > 0, + "stride should be greater than zero, but got " + "dH: ", dH, " dW: ", dW); + TORCH_CHECK(dilationH > 0 && dilationW > 0, + "dilation should be greater than zero, but got ", + "dilationH: ", dilationH, " dilationW: ", dilationW); + + bool valid_dims = input.size(1) != 0 && input.size(2) != 0; + if (memory_format == at::MemoryFormat::ChannelsLast){ + // Expect tensor in NHWC format and allow 0-dim only for N. + TORCH_CHECK((ndim == 4 && valid_dims && input.size(3) != 0), + "Expected 4D (batch mode) tensor expected for input with channels_last layout" + " with optional 0 dim batch size for input, but got: ", input.sizes()); + } else { + TORCH_CHECK((ndim == 3 && input.size(0) != 0 && valid_dims) || + (ndim == 4 && valid_dims && input.size(3) != 0), + "Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got:", + input.sizes()); + } + + TORCH_CHECK(kW/2 >= padW && kH/2 >= padH, + "pad should be smaller than or equal to half of kernel size, but got ", + "padW = ", padW, ", padH = ", padH, ", kW = ", kW, ", kH = ", kH); + + TORCH_CHECK(outputWidth >= 1 && outputHeight >= 1, + "Given input size: (", + nInputPlane, "x", inputHeight, "x", inputWidth, "). ", + "Calculated output size: (", + nOutputPlane, "x", outputHeight, "x", outputWidth, "). ", + "Output size is too small"); +} + +// DilatedMaxPool2d (backward) +static inline void +max_pool2d_backward_shape_check( + const Tensor& input, + const Tensor& gradOutput, + const Tensor& indices, + int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW, + int64_t nInputPlane, + int64_t inputHeight, int64_t inputWidth, + int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format) +{ + pool2d_shape_check( + input, + kH, kW, dH, dW, padH, padW, dilationH, dilationW, + nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, memory_format); + + const int64_t ndim = input.ndimension(); + const int64_t nOutputPlane = nInputPlane; + + check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane); + check_dim_size(gradOutput, ndim, ndim-2, outputHeight); + check_dim_size(gradOutput, ndim, ndim-1, outputWidth); + + check_dim_size(indices, ndim, ndim-3, nOutputPlane); + check_dim_size(indices, ndim, ndim-2, outputHeight); + check_dim_size(indices, ndim, ndim-1, outputWidth); +} + +// AveragePool2d (backward) +static inline void +avg_pool2d_backward_shape_check( + const Tensor& input, + const Tensor& gradOutput, + int64_t /*nbatch*/, + int kH, int kW, int dH, int dW, int padH, int padW, + int64_t nInputPlane, + int64_t inputHeight, int64_t inputWidth, + int64_t outputHeight, int64_t outputWidth, + MemoryFormat memory_format) +{ + pool2d_shape_check( + input, + kH, kW, dH, dW, padH, padW, 1, 1, + nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, + memory_format); + + const int64_t ndim = input.ndimension(); + const int64_t nOutputPlane = nInputPlane; + + check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane); + check_dim_size(gradOutput, ndim, ndim-2, outputHeight); + check_dim_size(gradOutput, ndim, ndim-1, outputWidth); +} + +// AveragePool3d/DilatedMaxPool3d (forward) +static inline void +pool3d_shape_check( + const Tensor& input, + int64_t nslices, + int kT, int kH, int kW, + int dT, int dH, int dW, + int pT, int pH, int pW, + int dilationT, int dilationH, int dilationW, + int64_t itime, int64_t iheight, int64_t iwidth, + int64_t otime, int64_t oheight, int64_t owidth, + const char *fn_name, + bool check_input_size=false) +{ + const int64_t ndim = input.ndimension(); + + TORCH_CHECK(kT > 0 && kW > 0 && kH > 0, + "kernel size should be greater than zero, but got ", + "kT: ", kT, " kH: ", kH, " kW: ", kW); + TORCH_CHECK(dT > 0 && dW > 0 && dH > 0, + "stride should be greater than zero, but got ", + "dT: ", dT, " dH: ", dH, " dW: ", dW); + TORCH_CHECK(dilationT > 0 && dilationW > 0 && dilationH > 0, + "dilation should be greater than zero, but got ", + "dilationT: ", dilationT, " dilationH: ", dilationH, " dilationW: ", dilationW); + + TORCH_CHECK(ndim == 4 || ndim == 5, + fn_name, ": Expected 4D or 5D tensor for input, but got: ", input.sizes()); + + for (const auto i : c10::irange(ndim)) { + if (ndim == 5 && i == 0) { + // size of batch-dim can be 0. + continue; + } + TORCH_CHECK( + input.size(i) > 0, + fn_name, + ": Expected input's non-batch dimensions to have positive length," + " but input has a shape of ", + input.sizes(), + " and non-batch dimension ", + input.size(i), + " has length zero!") + } + + if (check_input_size) { // AveragePool3d + TORCH_CHECK(itime >= kT && iheight >= kH && iwidth >= kW, + "input image ", "(T: ", itime, " H: ", iheight, " W: ", iwidth, ") smaller than ", + "kernel size ", "(kT: ", kT, " kH: ", kH, " kW: ", kW, ")"); + } + + TORCH_CHECK(kT/2 >= pT && kW/2 >= pW && kH/2 >= pH, + "pad should be smaller than or equal to half of kernel size, but got " + "kT: ", kT, " kW: ", kW, " kH: ", kH, " padT: ", pT, " padW: ", pW, " padH: ", pH); + + TORCH_CHECK(otime >= 1 && owidth >= 1 && oheight >= 1, + "Given input size: (", + nslices,"x", itime, "x", iheight, "x", iwidth, "). ", + "Calculated output size: (", + nslices, "x", otime, "x", oheight, "x", owidth, "). ", + "Output size is too small"); +} + +static inline void +max_pool3d_backward_shape_check( + const Tensor& input, + const Tensor& gradOutput, + const Tensor& indices, + int64_t nslices, + int kT, int kH, int kW, + int dT, int dH, int dW, + int pT, int pH, int pW, + int dilationT, int dilationH, int dilationW, + int64_t itime, int64_t iheight, int64_t iwidth, + int64_t otime, int64_t oheight, int64_t owidth, + const char* fn_name) +{ + const int64_t ndim = input.ndimension(); + + pool3d_shape_check( + input, + nslices, + kT, kH, kW, + dT, dH, dW, + pT, pH, pW, + dilationT, dilationH, dilationW, + itime, iheight, iwidth, + otime, oheight, owidth, fn_name); + + check_dim_size(gradOutput, ndim, ndim-4, nslices); + check_dim_size(gradOutput, ndim, ndim-3, otime); + check_dim_size(gradOutput, ndim, ndim-2, oheight); + check_dim_size(gradOutput, ndim, ndim-1, owidth); + + check_dim_size(indices, ndim, ndim-4, nslices); + check_dim_size(indices, ndim, ndim-3, otime); + check_dim_size(indices, ndim, ndim-2, oheight); + check_dim_size(indices, ndim, ndim-1, owidth); +} + +static inline void +avg_pool3d_backward_shape_check( + const Tensor& input, + const Tensor& gradOutput, + int64_t nslices, + int kT, int kH, int kW, + int dT, int dH, int dW, + int pT, int pH, int pW, + int64_t itime, int64_t iheight, int64_t iwidth, + int64_t otime, int64_t oheight, int64_t owidth, + const char *fn_name) +{ + const int64_t ndim = input.ndimension(); + + pool3d_shape_check( + input, + nslices, + kT, kH, kW, + dT, dH, dW, + pT, pH, pW, + 1, 1, 1, + itime, iheight, iwidth, + otime, oheight, owidth, + fn_name, true); + + check_dim_size(gradOutput, ndim, ndim-4, nslices); + check_dim_size(gradOutput, ndim, ndim-3, otime); + check_dim_size(gradOutput, ndim, ndim-2, oheight); + check_dim_size(gradOutput, ndim, ndim-1, owidth); +} + +} // namespace + +} // at::native +} // at diff --git a/voice_bridge/torch/include/ATen/native/Pow.h b/voice_bridge/torch/include/ATen/native/Pow.h new file mode 100644 index 0000000000000000000000000000000000000000..068482ee300c73d7bd185482f85d109e400e9cc8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Pow.h @@ -0,0 +1,69 @@ +#pragma once + +#include + +namespace c10 { +class Scalar; +} + +namespace at { + +struct TensorIterator; +struct TensorIteratorBase; + +namespace native { + +#if defined(__CUDACC__) || defined(__HIPCC__) +#define HOST_DEVICE __host__ __device__ +#else +#define HOST_DEVICE +#endif + +// integral power in pytorch allows for negative exponents, giving truncated integral results. +// e.g. since 2**-1==0.5, the truncated integral result is zero. 1**negative_exponent is the +// only non-zero result. +template ::value, T>::type* = nullptr> +static inline HOST_DEVICE __ubsan_ignore_signed_int_overflow__ T powi_impl(T a, T b) { + T result = 1; + while (b) { + if (b & 1) { + result *= a; + } + b /= 2; + a *= a; + } + return result; +} + +template ::value && !std::is_signed::value, T>::type* = nullptr> +static inline HOST_DEVICE T powi(T a, T b) { + return powi_impl(a, b); +} + +template ::value && std::is_signed::value, T>::type* = nullptr> +static inline HOST_DEVICE T powi(T a, T b) { + if ( b < 0 ) { + if ( a == 1 ) { + return 1; + } else if ( a == -1 ) { + auto negative = (-b) % static_cast(2); + return negative ? -1 : 1; + } else { + return 0; + } + } + return powi_impl(a, b); +} + +using pow_tensor_tensor_fn = void (*)(TensorIteratorBase&); +using pow_tensor_scalar_fn = void (*)(TensorIteratorBase&, const c10::Scalar&); + +DECLARE_DISPATCH(pow_tensor_tensor_fn, pow_tensor_tensor_stub); +DECLARE_DISPATCH(pow_tensor_scalar_fn, pow_tensor_scalar_stub); + +} // namespace native + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/RNN.h b/voice_bridge/torch/include/ATen/native/RNN.h new file mode 100644 index 0000000000000000000000000000000000000000..2bdb9becf4fa529b91d974c9f2d26e697e42af74 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/RNN.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include + +namespace at { namespace native { + +using lstm_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&, TensorList, TensorList, bool, int64_t, double, bool, bool, bool); +using rnn_fn = void(*)(Tensor&, Tensor&, const Tensor&, const Tensor&, TensorList, bool, int64_t, double, bool, bool, bool); +using lstm_packed_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&, const Tensor&, TensorList, TensorList, bool, int64_t, double, bool, bool); +using rnn_packed_fn = void(*)(Tensor&, Tensor&, const Tensor&, const Tensor&, const Tensor&, TensorList, bool, int64_t, double, bool, bool); + +DECLARE_DISPATCH(lstm_fn, lstm_cudnn_stub); +DECLARE_DISPATCH(lstm_fn, lstm_miopen_stub); +DECLARE_DISPATCH(rnn_fn, gru_cudnn_stub); +DECLARE_DISPATCH(rnn_fn, gru_miopen_stub); +DECLARE_DISPATCH(rnn_fn, rnn_tanh_cudnn_stub); +DECLARE_DISPATCH(rnn_fn, rnn_tanh_miopen_stub); +DECLARE_DISPATCH(rnn_fn, rnn_relu_cudnn_stub); +DECLARE_DISPATCH(rnn_fn, rnn_relu_miopen_stub); +DECLARE_DISPATCH(lstm_packed_fn, lstm_packed_cudnn_stub); +DECLARE_DISPATCH(lstm_packed_fn, lstm_packed_miopen_stub); +DECLARE_DISPATCH(rnn_packed_fn, gru_packed_cudnn_stub); +DECLARE_DISPATCH(rnn_packed_fn, gru_packed_miopen_stub); +DECLARE_DISPATCH(rnn_packed_fn, rnn_tanh_packed_cudnn_stub); +DECLARE_DISPATCH(rnn_packed_fn, rnn_tanh_packed_miopen_stub); +DECLARE_DISPATCH(rnn_packed_fn, rnn_relu_packed_cudnn_stub); +DECLARE_DISPATCH(rnn_packed_fn, rnn_relu_packed_miopen_stub); + +inline void check_attributes(const Tensor& input, const TensorList& params, const TensorList& hiddens, bool check_dtype=false) { + auto input_device = input.device(); + auto input_dtype = input.scalar_type(); + + auto check_tensors = [&](const std::string& name, const Tensor& t) { + if (!t.defined()) return; + auto t_device = t.device(); + TORCH_CHECK(input_device == t_device, + "Input and ", name, " tensors are not at the same device, found input tensor at ", + input_device, " and ", name, " tensor at ", t_device); + if (check_dtype) { + auto t_dtype = t.scalar_type(); + TORCH_CHECK(input_dtype == t_dtype, + "Input and ", name, " tensors are not the same dtype, found input tensor with ", + input_dtype, " and ", name, " tensor with ", t_dtype); + } + }; + + for (auto h : hiddens) check_tensors("hidden", h); + for (auto p : params) check_tensors("parameter", p); +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/RangeFactories.h b/voice_bridge/torch/include/ATen/native/RangeFactories.h new file mode 100644 index 0000000000000000000000000000000000000000..df3b43856e0980841cace5500a3e009e1501c8a0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/RangeFactories.h @@ -0,0 +1,12 @@ +#include +#include + +namespace at { +struct TensorIterator; + +namespace native { + +DECLARE_DISPATCH(void(*)(TensorIterator&, const Scalar&, const Scalar&, const Scalar&), arange_stub); +DECLARE_DISPATCH(void(*)(TensorIterator&, const Scalar&, const Scalar&, int64_t), linspace_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/ReduceAllOps.h b/voice_bridge/torch/include/ATen/native/ReduceAllOps.h new file mode 100644 index 0000000000000000000000000000000000000000..ec591e79efac50d2752b406dfe0200f6ecfd0a37 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/ReduceAllOps.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace at { +class Tensor; +} + +namespace at { namespace native { + +using reduce_all_fn = void (*)(Tensor & result, const Tensor & self); +using reduce_min_max_fn = void (*)(Tensor & max_result, Tensor & min_result, const Tensor & self); +DECLARE_DISPATCH(reduce_all_fn, min_all_stub); +DECLARE_DISPATCH(reduce_all_fn, max_all_stub); + +}} diff --git a/voice_bridge/torch/include/ATen/native/ReduceOps.h b/voice_bridge/torch/include/ATen/native/ReduceOps.h new file mode 100644 index 0000000000000000000000000000000000000000..c14033de634d04f194d3f458a677b0c95ee3cd92 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/ReduceOps.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include + +namespace c10 { +class Scalar; +} + +namespace at { +struct TensorIterator; +class Tensor; +} + +namespace at { namespace native { + +using reduce_fn = void(*)(TensorIterator &); + +DECLARE_DISPATCH(reduce_fn, sum_stub); +DECLARE_DISPATCH(reduce_fn, nansum_stub); +DECLARE_DISPATCH(reduce_fn, prod_stub); +DECLARE_DISPATCH(reduce_fn, mean_stub); +DECLARE_DISPATCH(reduce_fn, and_stub); +DECLARE_DISPATCH(reduce_fn, or_stub); +DECLARE_DISPATCH(reduce_fn, min_values_stub); +DECLARE_DISPATCH(reduce_fn, max_values_stub); +DECLARE_DISPATCH(reduce_fn, argmax_stub); +DECLARE_DISPATCH(reduce_fn, argmin_stub); + +using reduce_std_var_function = + void (*)(TensorIterator&, int64_t correction, bool take_sqrt); +DECLARE_DISPATCH(reduce_std_var_function, std_var_stub); + +using reduce_norm_fn = + void (*)(Tensor&, const Tensor&, const c10::Scalar&, c10::optional); +DECLARE_DISPATCH(reduce_norm_fn, norm_kernel); + +using reduce_fn_flag = void(*)(TensorIterator &, const c10::Scalar&); +DECLARE_DISPATCH(reduce_fn_flag, norm_stub); + +using structured_cum_fn = void (*)(const Tensor&, const Tensor&, int64_t); +using cum_fn = void (*)(Tensor&, const Tensor&, int64_t); +DECLARE_DISPATCH(structured_cum_fn, cumsum_stub); +DECLARE_DISPATCH(structured_cum_fn, cumprod_stub); +DECLARE_DISPATCH(cum_fn, logcumsumexp_stub); + +DECLARE_DISPATCH(void (*)(const Tensor&, int64_t, bool, Tensor&, Tensor&), aminmax_stub); +DECLARE_DISPATCH(void (*)(const Tensor&, Tensor&, Tensor&), aminmax_allreduce_stub); + +// Used in cuda/Normalization.cu +TORCH_API std::tuple var_mean_out( + Tensor &result1, Tensor &result2, const Tensor &self, IntArrayRef dim, + int64_t correction, bool keepdim); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/ReduceOpsUtils.h b/voice_bridge/torch/include/ATen/native/ReduceOpsUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..9db9802ea788b6fc9d4b64734916ae0922ef97e3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/ReduceOpsUtils.h @@ -0,0 +1,423 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#endif + +namespace at { namespace native { + +// Maximum and minimum possible scalar values, including infinities +template +constexpr scalar_t upper_bound() { + using lim = std::numeric_limits; + return lim::has_infinity ? lim::infinity() : lim::max(); +} + +template +constexpr scalar_t lower_bound() { + using lim = std::numeric_limits; + return lim::has_infinity ? -lim::infinity() : lim::lowest(); +} + +static inline Tensor restride_dim( + const Tensor& src, int64_t dim, + IntArrayRef replacement_shape +) { + auto strides = ensure_nonempty_vec(src.strides().vec()); + strides[dim] = 0; + return src.as_strided(replacement_shape, strides); +} + +inline void _dimreduce_setup(const Tensor &result, const Tensor &self, + int64_t dim) { + IntArrayRef self_sizes = self.sizes(); + std::vector result_sizes; + result_sizes.insert(result_sizes.end(), self_sizes.begin(), self_sizes.end()); + result_sizes[dim] = 1; + result.resize_(result_sizes); +} + +inline bool _dimreduce_return_trivial(const Tensor &result, const Tensor &self, + const Scalar& ident, int64_t dim, bool keepdim) { + if (self.numel() == 1 && self.ndimension() == 0) { + result.resize_({}); + result.fill_(self); + return true; + } + // Return identity + if (self.numel() == 0) { + _dimreduce_setup(result, self, dim); + result.fill_(ident); + if (!keepdim) result.squeeze_(dim); + return true; + } + return false; +} + +inline bool _dimreduce_return_trivial_no_ident(Tensor &result, const Tensor &self, + int64_t /*dim*/, bool /*keepdim*/, const char* /*fn_name*/) { + if (self.numel() == 1 && self.ndimension() == 0) { + result.resize_({}); + result.fill_(self); + return true; + } + + return false; +} + +inline c10::optional _allreduce_return_trivial( + const Tensor& self, + const Scalar& ident) { + // Return identity + if (self.numel() == 0) { + return at::scalar_tensor(ident, self.options()); + } + return c10::nullopt; +} + +#define OPTION_TYPE_EQUALITY_CHECK(option, out, self) \ +{ \ + TORCH_CHECK(\ + out.option() == self.option(),\ + "expected ", #option, " ",\ + self.option(),\ + " but found ", out.option())\ +} + +static inline void check_scalar_type_device_layout_equal(const Tensor& out, const Tensor& self) { + OPTION_TYPE_EQUALITY_CHECK(scalar_type, out, self); + OPTION_TYPE_EQUALITY_CHECK(device, out.options(), self.options()); + OPTION_TYPE_EQUALITY_CHECK(layout, out.options(), self.options()); +} + +static inline Tensor integer_upcast(const Tensor& self, optional dtype) { + ScalarType scalarType = self.scalar_type(); + ScalarType upcast_scalarType = dtype.value_or(at::isIntegralType(scalarType, /*includeBool=*/true) ? ScalarType::Long : scalarType); + return self.toType(upcast_scalarType); +} + +using DimMask = TensorIterator::DimMask; + +static DimVector make_dim_vector(OptionalIntArrayRef opt_dims, int64_t ndim) { + if (opt_dims.has_value()) { + return DimVector(opt_dims.value()); + } else { + std::vector all_dims(ndim); + std::iota(all_dims.begin(), all_dims.end(), 0); + return DimVector(all_dims); + } +} + +static DimMask make_dim_mask(OptionalIntArrayRef opt_dims, int64_t ndim) { + DimMask mask; + if (opt_dims.has_value()) { + auto dims = opt_dims.value(); + if (dims.empty()) { + mask = DimMask().flip(); + } else { + mask = at::dim_list_to_bitset(dims, ndim); + } + } else { + mask = DimMask().flip(); + } + return mask; +} + +inline DimVector shape_from_dim_mask(const Tensor& self, DimMask mask, bool keepdim) { + auto shape = DimVector(self.sizes()); + for (int dim = shape.size() - 1; dim >= 0; dim--) { + if (mask[dim]) { + if (keepdim) { + shape[dim] = 1; + } else { + shape.erase(shape.begin() + dim); + } + } + } + return shape; +} + +static void resize_reduction_result( + Tensor& result, const Tensor& self, DimMask mask, bool keepdim, + ScalarType /*dtype*/) +{ + auto shape = shape_from_dim_mask(self, mask, keepdim); + TORCH_CHECK(result.defined(), "Cannot create a new tensor inside a reduction op. You likely tried to call an operator with an out argument but the out argument was an undefined tensor."); + at::native::resize_output(result, shape); +} + +inline Tensor create_reduction_result( + const Tensor& self, at::OptionalIntArrayRef dim, bool keepdim, ScalarType dtype +) { + DimMask mask = make_dim_mask(dim, self.dim()); + auto shape = shape_from_dim_mask(self, mask, keepdim); + return at::empty(shape, self.options().dtype(dtype)); +} + +static Tensor review_reduce_result(const Tensor& result, int ndim, DimMask mask, bool keepdim) { + if (keepdim) { + return result; + } + auto shape = DimVector(result.sizes()); + auto stride = DimVector(result.strides()); + for (const auto dim : c10::irange(ndim)) { + if (mask[dim]) { + shape.insert(shape.begin() + dim, 1); + stride.insert(stride.begin() + dim, 0); + } + } + return result.as_strided(shape, stride); +} + +static TensorIterator make_reduction( + const char* name, Tensor& result, const Tensor& self, + at::OptionalIntArrayRef dim_opt, + bool keepdim, ScalarType in_dtype, ScalarType out_dtype) { + // check that result type and dtype match if provided + TORCH_CHECK( + !result.defined() || result.scalar_type() == out_dtype, + name, ": provided dtype must match dtype of result. Got ", + toString(result.scalar_type()), + " and ", + toString(out_dtype), + "."); + // dim={} performs an all-reduce, same as dim=None + IntArrayRef dim = dim_opt.value_or(IntArrayRef{}); + int64_t ndim = self.dim(); + auto mask = make_dim_mask(dim, ndim); + resize_reduction_result(result, self, mask, keepdim, out_dtype); + auto viewed_result = review_reduce_result(result, ndim, mask, keepdim); + namedinference::propagate_names_for_reduction(result, self, dim, keepdim); + if (self.scalar_type() == in_dtype) { + return TensorIterator::reduce_op(viewed_result, self); + } + return TensorIterator::reduce_op(viewed_result, self.to(in_dtype)); +} + +static C10_UNUSED TensorIterator make_reduction( + const char* name, Tensor& result, const Tensor& self, + at::OptionalIntArrayRef dim, bool keepdim, ScalarType out_dtype) { + // special case for type promotion in mixed precision, improves computational + // efficiency. + // not generalize this to common mismatched input/output types to avoid cross + // product of templated kernel launches. + const bool gpu_lowp_to_f32 = ( + self.is_cuda() && (self.scalar_type() == kHalf || self.scalar_type() == kBFloat16) && out_dtype == kFloat); + auto in_dtype = gpu_lowp_to_f32 ? self.scalar_type() + : self.is_complex() ? c10::toComplexType(out_dtype) + : out_dtype; + return make_reduction(name, result, self, dim, keepdim, in_dtype, out_dtype); +} + +static TensorIterator make_reduction( + const char* name, Tensor& result1, Tensor& result2, const Tensor& self, + at::OptionalIntArrayRef dim_opt, bool keepdim, ScalarType dtype1, + ScalarType dtype2) { + // check that result type and dtype match if provided + TORCH_CHECK( + (!result1.defined() || result1.scalar_type() == dtype1) && (!result2.defined() || result2.scalar_type() == dtype2), + name, ": provided dtype must match dtype of result. Got ", + toString(result1.scalar_type()), toString(result2.scalar_type()), + " and ", + toString(dtype1), toString(dtype2), + "."); + + // dim={} performs an all-reduce, same as dim=None + auto dim = dim_opt.value_or(IntArrayRef{}); + int64_t ndim = self.dim(); + DimMask mask = make_dim_mask(dim, ndim); + resize_reduction_result(result1, self, mask, keepdim, dtype1); + auto viewed_result1 = review_reduce_result(result1, ndim, mask, keepdim); + + resize_reduction_result(result2, self, mask, keepdim, dtype2); + auto viewed_result2 = review_reduce_result(result2, ndim, mask, keepdim); + + namedinference::propagate_names_for_reduction(result1, self, dim, keepdim); + namedinference::propagate_names_for_reduction(result2, self, dim, keepdim); + + // special case for type promotion in mixed precision, improves computational + // efficiency. + // We don't generalize this to common mismatched input/output types to avoid cross + // product of templated kernel launches. + if (self.scalar_type() == dtype1 || + (self.is_cuda() && self.scalar_type() == kHalf && dtype1 == kFloat)) { + return TensorIterator::reduce_op(viewed_result1, viewed_result2, self); + } + return TensorIterator::reduce_op(viewed_result1, viewed_result2, self.to(dtype1)); +} + +static C10_UNUSED TensorIterator make_reduction( + const char* name, Tensor& result1, Tensor& result2, const Tensor& self, + at::OptionalIntArrayRef dim, bool keepdim, ScalarType dtype) { + return make_reduction(name, result1, result2, self, dim, keepdim, dtype, dtype); +} + +static void zero_numel_check_dims(const Tensor& self, const int64_t dim, const char *fn_name) { + if (self.ndimension() == 0) { + TORCH_CHECK_INDEX(dim == 0 || dim == -1, fn_name, + ": Expected reduction dim -1 or 0 for scalar but got ", dim); + } + else { + TORCH_CHECK_INDEX(self.size(dim) != 0, fn_name, + ": Expected reduction dim ", dim, " to have non-zero size."); + } +} + +static void zero_numel_check_dims(const Tensor& self, const IntArrayRef dim, const char *fn_name) { + TORCH_CHECK( + !dim.empty(), + fn_name, ": Expected reduction dim to be specified for input.numel() == 0. ", + "Specify the reduction dim with the 'dim' argument."); + for (const int64_t d : dim) { + zero_numel_check_dims(self, d, fn_name); + } +} + +static std::vector get_zero_numel_tensor_size( + const Tensor& self, + const int64_t dim, + const bool keepdim, + const char* fn_name) { + TORCH_INTERNAL_ASSERT(self.numel() == 0, fn_name, ": Expected self.numel() == 0."); + zero_numel_check_dims(self, dim, fn_name); + std::vector sizes; + if (keepdim) { + sizes = self.sizes().vec(); + sizes[dim] = 1; + } + else { + for (const auto d : c10::irange(self.dim())) { + if (d != dim) { + sizes.push_back(self.sizes()[d]); + } + } + } + return sizes; +} + +// Resize the result tensor and indices when result.numel() == 0 depending on values of +// dim and keepdim for returning tensors containing reduction results. +// This function should be called when you are reducing a zero-numel tensor and want to +// resize the output and return it. This function exists for resizing zero-numel +// tensors when the size of the reduction dimension is non-zero. +static C10_UNUSED void zero_numel_tensor_resize(Tensor& result, Tensor& result_indices, + const Tensor& self, const int64_t dim, + const bool keepdim, const char *fn_name) { + auto sizes = get_zero_numel_tensor_size(self, dim, keepdim, fn_name); + at::native::resize_output(result, sizes); + at::native::resize_output(result_indices, sizes); +} + +} // native + +namespace meta { + +static C10_UNUSED DimVector get_reduction_shape( + const Tensor& self, + IntArrayRef dims, + bool keepdim) { + auto mask = native::make_dim_mask(dims, self.dim()); + return native::shape_from_dim_mask(self, mask, keepdim); +} + +static void resize_reduction( + impl::MetaBase& meta, + const Tensor& self, + OptionalIntArrayRef opt_dims, + bool keepdim, + ScalarType out_dtype) { + DimVector dims_ = at::native::make_dim_vector(opt_dims, self.dim()); + maybe_wrap_dims(dims_, self.dim()); + auto shape = get_reduction_shape(self, dims_, keepdim); + meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype)); + namedinference::propagate_names_for_reduction( + meta.maybe_get_output(), self, dims_, keepdim); +} + +static void resize_reduction_with_indices( + impl::MetaBase& meta, + const Tensor& self, + IntArrayRef dims, + bool keepdim, + ScalarType out_dtype) { + DimVector dims_(dims); + maybe_wrap_dims(dims_, self.dim()); + auto shape = get_reduction_shape(self, dims_, keepdim); + meta.set_output_raw_strided(0, shape, {}, self.options().dtype(out_dtype)); + meta.set_output_raw_strided(1, shape, {}, self.options().dtype(kLong)); + namedinference::propagate_names_for_reduction( + meta.maybe_get_output(0), self, dims_, keepdim); + namedinference::propagate_names_for_reduction( + meta.maybe_get_output(1), self, dims_, keepdim); +} + +static TensorIterator make_reduction( + const Tensor& self, + const Tensor& result, + OptionalIntArrayRef opt_dims, + bool keepdim, + ScalarType in_dtype) { + int64_t ndim = self.dim(); + auto mask = at::native::make_dim_mask(opt_dims, ndim); + auto viewed_result = + at::native::review_reduce_result(result, ndim, mask, keepdim); + if (self.scalar_type() == in_dtype) { + return TensorIterator::reduce_op(viewed_result, self); + } + return TensorIterator::reduce_op(viewed_result, self.to(in_dtype)); +} + +static TensorIterator make_reduction( + const Tensor& self, + const Tensor& result1, + const Tensor& result2, + IntArrayRef dims, + bool keepdim, + ScalarType dtype1, + ScalarType /*dtype2*/) { + int64_t ndim = self.dim(); + auto mask = at::native::make_dim_mask(dims, ndim); + auto viewed_result1 = at::native::review_reduce_result(result1, ndim, mask, keepdim); + auto viewed_result2 = at::native::review_reduce_result(result2, ndim, mask, keepdim); + // special case for type promotion in mixed precision, improves computational efficiency. + // We don't generalize this to common mismatched input/output types to avoid cross product + // of templated kernel launches. + if (self.scalar_type() == dtype1 || + (self.is_cuda() && self.scalar_type() == kHalf && dtype1 == kFloat)) { + return TensorIterator::reduce_op(viewed_result1, viewed_result2, self); + } + return TensorIterator::reduce_op(viewed_result1, viewed_result2, self.to(dtype1)); +} + +static C10_UNUSED TensorIterator make_reduction_from_out_ty( + const Tensor& self, + const Tensor& result, + OptionalIntArrayRef opt_dims, + bool keepdim, + ScalarType out_dtype) { + // special case for type promotion in mixed precision, improves computational + // efficiency. + // not generalize this to common mismatched input/output types to avoid cross + // product of templated kernel launches. + const bool gpu_lowp_to_f32 = + (self.is_cuda() && + (self.scalar_type() == kHalf || self.scalar_type() == kBFloat16) && + out_dtype == kFloat); + auto in_dtype = gpu_lowp_to_f32 ? self.scalar_type() : out_dtype; + return make_reduction(self, result, opt_dims, keepdim, in_dtype); +} + +} // namespace meta +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/Repeat.h b/voice_bridge/torch/include/ATen/native/Repeat.h new file mode 100644 index 0000000000000000000000000000000000000000..dadbfb0c2374bb2071de1906c0c293826a05227e --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Repeat.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#endif + +namespace at { +namespace native { + +template < + typename index_t, + void compute(index_t*, int64_t*, index_t*, int64_t, int64_t)> +static inline Tensor repeat_interleave_common( + const Tensor& repeats, + c10::optional output_size) { + TORCH_CHECK( + repeats.dim() == 1, "repeat_interleave only accept 1D vector as repeat"); + TORCH_CHECK( + repeats.scalar_type() == at::kLong || repeats.scalar_type() == at::kInt, + "repeats has to be Long or Int tensor"); + if (repeats.size(0) == 0) { + return at::empty_like(repeats, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + } + Tensor repeats_ = repeats.contiguous(); + Tensor cumsum = repeats.cumsum(0); + int64_t total; + if (output_size.has_value()) { + total = output_size.value(); + } else { + total = cumsum[-1].item(); + TORCH_CHECK( + (repeats >= 0).all().item(), "repeats can not be negative"); + } + + Tensor result = at::empty({total}, repeats.options()); + index_t* repeat_ptr = repeats_.data_ptr(); + int64_t* cumsum_ptr = cumsum.data_ptr(); + index_t* result_ptr = result.data_ptr(); + compute(repeat_ptr, cumsum_ptr, result_ptr, repeats.size(0), total); + return result; +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/Resize.h b/voice_bridge/torch/include/ATen/native/Resize.h new file mode 100644 index 0000000000000000000000000000000000000000..0bed4232695a461aedaa249b8b50252414567d7e --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Resize.h @@ -0,0 +1,187 @@ +#pragma once + +#include +#include +#include +#include + +#include + + +namespace at { namespace native { + +// TODO: make all operations that resize given outputs use this function +// for consistency and maintainability. +// Some operations like `cat` might not be able to make the use of +// resize_output directly. For more details to understand how it works in `cat`, +// see https://github.com/pytorch/pytorch/pull/62560#discussion_r687363362 +// Resizes outputs +// Functions accepting output tensors, like with the "out" kwarg, should +// call this function to handle resizing their output tensor. +// Issues a warning if the output tensor has one or more elements and +// needs resizing +// NOTE: In the future the warning will become an error +// Returns a bool saying whether or not the resize actually happened or not +TORCH_API bool resize_output(const Tensor& output, IntArrayRef shape); + +// Utility for resize_output +// Returns a bool saying resize should happen or not and +// raises a warning if resizing for one or more elements +TORCH_API bool resize_output_check(const Tensor& output, IntArrayRef shape); + +TORCH_API void resize_bytes_cpu(StorageImpl* storage, size_t size_bytes); + +static inline void maybe_resize_storage_cpu(TensorImpl* self, size_t new_size_bytes) { + // It does not make sense to try to resize a storage + // to hold 0 elements, and this can break + // if storage_offset is positive but + // new_size is 0, so just bail in that case + // (same comment is in cuda/Resize.h) + if (self->numel() == 0) { + return; + } + + const Storage& storage = self->unsafe_storage(); + if (!storage) { + auto new_storage = c10::make_intrusive( + StorageImpl::use_byte_size_t(), + new_size_bytes, + c10::GetCPUAllocator(), + true); + self->set_storage_keep_dtype(std::move(new_storage)); + } else if (new_size_bytes > storage.nbytes()) { + resize_bytes_cpu(storage.unsafeGetStorageImpl(), new_size_bytes); + } +} + +inline TensorImpl* resize_impl_cpu_( + TensorImpl* self, + IntArrayRef size, + at::OptionalIntArrayRef stride, + bool resize_storage = true) { + if (self->sizes() == size && (!stride || self->strides() == stride.value())) { + return self; + } + + const auto itemsize = self->dtype().itemsize(); + const auto storage_offset = self->storage_offset(); + size_t storage_size = 1; + if (stride) { + self->set_sizes_and_strides(size, *stride); + storage_size = at::detail::computeStorageNbytes( + size, *stride, itemsize, storage_offset); + } else { + self->set_sizes_contiguous(size); + storage_size = at::detail::computeStorageNbytesContiguous( + size, itemsize, storage_offset); + } + + if (resize_storage) { + maybe_resize_storage_cpu(self, storage_size); + } + + return self; +} + +template +T maybe_convert_symint(c10::SymInt) = delete; + +template <> +inline c10::SymInt maybe_convert_symint(c10::SymInt x) { return x; } + +template <> +inline int64_t maybe_convert_symint(c10::SymInt x) { return x.expect_int(); } + +template +static inline void checkInBoundsForStorage( + ArrayRef size, + ArrayRef stride, + T storage_offset, + const caffe2::TypeMeta data_type, + const Storage& new_storage) { + T storage_size_bytes = + at::detail::computeStorageNbytes(size, stride, data_type.itemsize()); + T storage_offset_bytes = storage_offset * data_type.itemsize(); + if (storage_size_bytes == 0) { + // NB: (a tensor with arbitrary 0 dims)'s storage can have any numel. + return; + } + T new_storage_size_bytes = maybe_convert_symint(new_storage.sym_nbytes()); + TORCH_CHECK( + storage_size_bytes + storage_offset_bytes <= new_storage_size_bytes, + "setStorage: sizes ", + size, + ", strides ", + stride, + "," + " storage offset ", + storage_offset, + ", and itemsize ", + data_type.itemsize(), + " requiring a storage size of ", + storage_size_bytes + storage_offset_bytes, + " are out of bounds for storage of size ", + new_storage_size_bytes); +} + +template +static inline void checkSetStorage(Tensor& result, Storage storage, T storage_offset, + ArrayRef size, ArrayRef stride) { + // FIXME: stride should be optional + if (stride.data()) { + TORCH_CHECK(size.size() == stride.size(), "unequal size length (", size.size(), + ") and stride length (", stride.size(), ")"); + } + +#ifdef DEBUG + TORCH_CHECK(size.size() <= INT_MAX, "size length (", size.size(), ") greater than INT_MAX"); +#endif + + // storage: note this can't be replaced with result.set_(storage) as the semantics of that + // function is to set the tensor size to be equal to the size of the storage. + if (!result.storage().is_alias_of(storage)) { + // Caffe2 might have tensors whose storages are null, but we + // don't allow it in PyTorch. + TORCH_INTERNAL_ASSERT(storage); + TORCH_INTERNAL_ASSERT(result.storage()); + + // We used to allow this, but this breaks device caching. + // Let's put an actual error message for this one. + TORCH_CHECK(result.storage().device() == storage.device(), + "Attempted to set the storage of a tensor on device \"", result.storage().device(), + "\" to a storage on different device \"", storage.device(), + "\". This is no longer allowed; the devices must match."); + result.unsafeGetTensorImpl()->set_storage_keep_dtype(storage); + } + + // storageOffset + TORCH_CHECK(storage_offset >= 0, "Tensor: invalid storage offset ", storage_offset); +} + +/** + * Set self's sizes, strides, and storage_offset. + * (size, stride, storage_offset) must be in bounds for self's storage. + */ +template +inline void setStrided( + const Tensor& self, + ArrayRef size, + ArrayRef stride, + T storage_offset) { + TORCH_CHECK(size.size() == stride.size(), "mismatch in length of strides and shape"); + for (auto val : stride) { + TORCH_CHECK(val >= 0, + "as_strided: Negative strides are not supported at the moment, " + "got strides: ", stride); + } + + auto* self_ = self.unsafeGetTensorImpl(); + checkInBoundsForStorage( + size, stride, storage_offset, self_->dtype(), self_->storage()); + + /* storage offset */ + TORCH_CHECK(storage_offset >= 0, "Tensor: invalid storage offset ", storage_offset); + self_->set_sizes_and_strides(size, stride, c10::make_optional(storage_offset)); +} + +}} diff --git a/voice_bridge/torch/include/ATen/native/ResizeCommon.h b/voice_bridge/torch/include/ATen/native/ResizeCommon.h new file mode 100644 index 0000000000000000000000000000000000000000..1de4d74b3af686a7b564af285fec84614b409b1c --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/ResizeCommon.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include +#include + +namespace at { namespace native { + +template +inline T storage_size_for(ArrayRef size, ArrayRef stride) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(size.size() == stride.size(), + "storage_size_for(size, stride) requires that size and stride ", + "have the same size as a precondition."); + T storage_size = 1; + for (const auto dim : c10::irange(size.size())) { + if (size[dim] == 0) { + storage_size = 0; + break; + } + storage_size += (size[dim] - 1) * stride[dim]; + } + return storage_size; +} + +inline const Tensor& resize_named_tensor_( + const Tensor& self, + IntArrayRef size, + c10::optional optional_memory_format) { + TORCH_INTERNAL_ASSERT(self.has_names()); + TORCH_CHECK( + self.sizes() == size, + "Cannot resize named tensor with resize_ or resize_as_ (tried to resize " + "Tensor", + self.names(), + " with size ", + self.sizes(), + " to ", + size, + "). This may be caused by passing a named tensor ", + "as an `out=` argument; please ensure that the sizes are the same. "); + TORCH_CHECK( + !optional_memory_format.has_value(), + "Unsupported memory format for named tensor resize ", + optional_memory_format.value()); + return self; +} +}} diff --git a/voice_bridge/torch/include/ATen/native/ScatterGatherChecks.h b/voice_bridge/torch/include/ATen/native/ScatterGatherChecks.h new file mode 100644 index 0000000000000000000000000000000000000000..92e1edeb5fe0293c705acbd5938e5cdbd7335285 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/ScatterGatherChecks.h @@ -0,0 +1,128 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { namespace native { + +namespace { + +// checks whether index.dtype == int64 +// and self.dtype == src.dtype if src is a Tensor +static void scatter_gather_dtype_check( + const std::string& method_name, + const Tensor& self, + const Tensor& index, + const c10::optional& src_opt = c10::nullopt +) { + if (index.numel() != 0) { + TORCH_CHECK( + index.scalar_type() == at::ScalarType::Long, + method_name, "(): Expected dtype int64 for index" + ); + } + + if (src_opt.has_value()) { + auto src = src_opt.value(); + TORCH_CHECK( + self.scalar_type() == src.scalar_type(), + method_name, "(): Expected self.dtype to be equal to src.dtype" + ); + } +} + +// Used for `gather`-like methods +// Note: self means the input tensor here +// Test: +// 1. index.size(d) <= self.size(d) for all d != dim +// 2. index.dim() == self.dim() +static C10_UNUSED void gather_shape_check(const Tensor& self, int64_t dim, + const Tensor& index +) { + auto self_dims = ensure_nonempty_dim(self.dim()); + TORCH_CHECK(self_dims == ensure_nonempty_dim(index.dim()), + "Index tensor must have the same number of dimensions as input tensor" + ); + + for (const auto i : c10::irange(self_dims)) { + if (i != dim) { + TORCH_CHECK( + ensure_nonempty_size(index, i) <= ensure_nonempty_size(self, i), + "Size does not match at dimension ", i, + " expected index ", index.sizes(), + " to be smaller than self ", self.sizes(), + " apart from dimension ", dim + ); + } + } +} + +// Used for `scatter` and `scatter_add` +// Tests: +// 1. index.size(d) <= self.size(d) for all d != dim +// 2. index.size(d) <= src.size(d) for all d if src is a Tensor +// 3. index.dim() == self.dim() == src.dim() +static C10_UNUSED void scatter_shape_check( + const Tensor& self, int64_t dim, const Tensor& index, + const c10::optional& src_opt = c10::nullopt +) { + if (index.numel() == 0) return; + TORCH_CHECK( + ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()), + "Index tensor must have the same number of dimensions as self tensor" + ); + + bool is_wrong_shape = false; + int64_t self_dims = ensure_nonempty_dim(self.dim()); + + // Check: index.size(d) <= self.size(d) for all d != dim + for (const auto d : c10::irange(self_dims)) { + int64_t index_d_size = ensure_nonempty_size(index, d); + if (d == dim) continue; + if (index_d_size > ensure_nonempty_size(self, d)) { + is_wrong_shape = true; + break; + } + } + + // Check: index.size(d) <= src.size(d) for all d if src is Tensor + if (!is_wrong_shape && src_opt.has_value()) { + auto src = src_opt.value(); + for (const auto d : c10::irange(self_dims)) { + int64_t index_d_size = ensure_nonempty_size(index, d); + if (index_d_size > ensure_nonempty_size(src, d)) { + is_wrong_shape = true; + break; + } + } + } + + if (src_opt.has_value()) { + auto src = src_opt.value(); + + TORCH_CHECK( + ensure_nonempty_dim(src.dim()) == ensure_nonempty_dim(index.dim()), + "Index tensor must have the same number of dimensions as src tensor" + ); + + TORCH_CHECK(!is_wrong_shape, + "Expected index ", index.sizes(), + " to be smaller than self ", self.sizes(), + " apart from dimension ", dim, + " and to be smaller size than src ", src.sizes() + ); + } + else { + TORCH_CHECK(!is_wrong_shape, + "Expected index ", index.sizes(), + " to be smaller than self ", self.sizes(), + " apart from dimension ", dim + ); + } +} + +} // anonymous namespace + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/SegmentReduce.h b/voice_bridge/torch/include/ATen/native/SegmentReduce.h new file mode 100644 index 0000000000000000000000000000000000000000..7fb1512fd4c28a2968c259f572b1c67c52d33bb6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/SegmentReduce.h @@ -0,0 +1,51 @@ +#pragma once + +#include +#include +#include + +namespace at { +class Tensor; + +namespace native { + +enum SegmentReductionType { MAX, MEAN, MIN, SUM, PROD}; + +using segment_reduce_lengths_fn = Tensor (*)( + SegmentReductionType, + const Tensor&, + const Tensor&, + int64_t, + const c10::optional&); +DECLARE_DISPATCH(segment_reduce_lengths_fn, _segment_reduce_lengths_stub); + +using segment_reduce_offsets_fn = Tensor (*)( + SegmentReductionType, + const Tensor&, + const Tensor&, + int64_t, + const c10::optional&); +DECLARE_DISPATCH(segment_reduce_offsets_fn, _segment_reduce_offsets_stub); + +using segment_reduce_lengths_backward_fn = Tensor (*)( + const Tensor&, + const Tensor&, + const Tensor&, + SegmentReductionType, + const Tensor&, + int64_t, + const c10::optional&); +DECLARE_DISPATCH(segment_reduce_lengths_backward_fn, _segment_reduce_lengths_backward_stub); + +using segment_reduce_offsets_backward_fn = Tensor (*)( + const Tensor&, + const Tensor&, + const Tensor&, + SegmentReductionType, + const Tensor&, + int64_t, + const c10::optional&); +DECLARE_DISPATCH(segment_reduce_offsets_backward_fn, _segment_reduce_offsets_backward_stub); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/SharedReduceOps.h b/voice_bridge/torch/include/ATen/native/SharedReduceOps.h new file mode 100644 index 0000000000000000000000000000000000000000..0519bfa57e61c8e6667457c1007ee11b468da748 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/SharedReduceOps.h @@ -0,0 +1,543 @@ +#pragma once +// Please note that this file is +// used across both CPU and GPU. + +#include +#include +#include +#include +#include +#if defined(__CUDACC__) +#include +#include +#elif defined(__HIPCC__) +#include +#include +#endif +#if defined(__CUDACC__) || defined(__HIPCC__) +#include +#else +#include +#define device_sqrt std::sqrt +#endif +#if defined(__CUDACC__) || defined(__HIPCC__) +template +inline C10_DEVICE scalar_t max_propagate_nan(scalar_t a, scalar_t b) { +#if defined(__HIPCC__) + // TODO: remove this special case for HIP when issue is fixed: + // https://github.com/ROCm-Developer-Tools/HIP/issues/2209 + scalar_t max = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::max(a, b)); +#else + scalar_t max = at::_isnan(b) ? b : std::max(a, b); +#endif + return max; +} +template +inline C10_DEVICE scalar_t min_propagate_nan(scalar_t a, scalar_t b) { +#if defined(__HIPCC__) + // TODO: remove this special case for HIP when issue is fixed: + // https://github.com/ROCm-Developer-Tools/HIP/issues/2209 + scalar_t min = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::min(a, b)); +#else + scalar_t min = at::_isnan(b) ? b : std::min(a, b); +#endif + return min; +} +#define MAX(X, Y) max_propagate_nan(X,Y) +#define MIN(X, Y) min_propagate_nan(X,Y) +#else +#include +#define MAX(X, Y) max_impl(X,Y) +#define MIN(X, Y) min_impl(X,Y) +#endif + +// ROCM hcc doesn't work well with using std:: in kernel functions +#if defined(__CUDA_ARCH__) +#include +#define compat_pow c10::cuda::compat::pow +#elif defined(__HIPCC__) +#include +#define compat_pow c10::hip::compat::pow +#else +#define compat_pow std::pow +#endif + +namespace at { namespace native { + +namespace detail { + +#if defined(__CUDACC__) || defined(__HIPCC__) +template using pair = thrust::pair; +#else +template using pair = std::pair; +#endif + +} // namespace detail + +template +struct WelfordData { + scalar_t mean; + scalar_t m2; + index_t n; + combine_t nf; + + C10_HOST_DEVICE WelfordData() : mean(0), m2(0), n(0), nf(0) {} + + C10_HOST_DEVICE WelfordData( + scalar_t mean, + scalar_t m2, + index_t n, + combine_t nf) + : mean(mean), m2(m2), n(n), nf(nf) {} +}; + + +template +struct WelfordOps { + index_t correction; + bool take_sqrt; + public: + using acc_t = WelfordData; + inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, index_t /*idx*/) const { + acc_scalar_t delta = data - acc.mean; + // using acc.nf(combine_t) here, as acc.n(index_t) would still be converted + // accumulation in reduce is done through index_T + acc_scalar_t new_mean = acc.mean + delta / (acc.nf + 1); + acc_scalar_t new_delta = data - new_mean; + return { + new_mean, + acc.m2 + delta * new_delta, + acc.n + 1, + combine_t(acc.n + 1), // accumulate for combine_t uses index_t + }; + } + inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const { + if (a.nf == 0) { + return b; + } + if (b.nf == 0) { + return a; + } + acc_scalar_t delta = b.mean - a.mean; + combine_t new_count = a.nf + b.nf; + acc_scalar_t nb_over_n = b.nf / new_count; + return { + a.mean + delta * nb_over_n, + a.m2 + b.m2 + delta * delta * a.nf * nb_over_n, + // setting acc.n as -1 since acc.n might not be able to represent the count + // correctly within its range, setting it to -1 to avoid confusion + -1, + new_count + }; + } + inline C10_DEVICE res_t project(acc_t acc) const __ubsan_ignore_float_divide_by_zero__ { + const auto mean = static_cast(acc.mean); + const combine_t divisor = acc.nf > correction ? acc.nf - correction : 0; + const auto var = acc.m2 / divisor; + res_t results(take_sqrt ? device_sqrt(var) : var, mean); + return results; + } + + static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) { + return acc; + } + +#if defined(__CUDACC__) || defined(__HIPCC__) + inline __device__ acc_t warp_shfl_down(acc_t acc, int offset) const { + return { + WARP_SHFL_DOWN(acc.mean, offset) + , WARP_SHFL_DOWN(acc.m2, offset) + , WARP_SHFL_DOWN(acc.n, offset) + , WARP_SHFL_DOWN(acc.nf, offset) + }; + } +#endif + C10_HOST_DEVICE WelfordOps(index_t correction, bool take_sqrt) + : correction(correction), take_sqrt(take_sqrt) {} +}; + +template +struct MeanOps { + factor_t factor; + + inline C10_DEVICE acc_t reduce(acc_t a, acc_t b, int64_t /*idx*/) const { + return combine(a, b); + } + + inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const { + return a + b; + } + + inline C10_DEVICE acc_t project(acc_t a) const { + return a * factor; + } + + static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) { + return acc; + } + +#if defined(__CUDACC__) || defined(__HIPCC__) + inline C10_DEVICE acc_t warp_shfl_down(acc_t data, int offset) const { + return WARP_SHFL_DOWN(data, offset); + } +#endif + + MeanOps(factor_t factor): factor(factor) { + } +}; + +// This accumulator template is used to calculate the minimum absolute value of +// a set of numbers. +// `scalar_t` is the type of the input and `acc_t` is the type of the accumulated +// value. These types differ for complex number input support. +template +struct AbsMinOps { + + inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const { + return MIN(acc, static_cast(std::abs(data))); + } + + inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const { + return MIN(a, b); + } + + inline C10_DEVICE acc_t project(acc_t a) const { + return a; + } + + static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) { + return acc; + } + +#if defined(__CUDACC__) || defined(__HIPCC__) + inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const { + return WARP_SHFL_DOWN(acc, offset); + } +#endif +}; + +// This accumulator template is used to calculate the maximum absolute value of +// a set of numbers. +// `scalar_t` is the type of the input and `acc_t` is the type of the accumulated +// value. These types differ for complex number input support. +template +struct AbsMaxOps { + + inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const { + return MAX(acc, static_cast(std::abs(data))); + } + + inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const { + return MAX(a, b); + } + + inline C10_DEVICE acc_t project(acc_t a) const { + return a; + } + + static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) { + return acc; + } + +#if defined(__CUDACC__) || defined(__HIPCC__) + inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const { + return WARP_SHFL_DOWN(acc, offset); + } +#endif +}; + +// This accumulator template is used to calculate the norm of the absolute value +// of a set of numbers. +// `scalar_t` is the type of the input and `acc_t` is the type of the accumulated +// value. These types differ for complex number input support. +template +struct NormOps { + acc_t norm_; + + inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const { + return acc + compat_pow(static_cast(std::abs(data)), norm_); + } + + inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const { + return a + b; + } + + inline C10_DEVICE acc_t project(acc_t a) const { + return compat_pow(a, static_cast(1.0) / norm_); + } + + static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) { + return acc; + } + +#if defined(__CUDACC__) || defined(__HIPCC__) + inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const { + return WARP_SHFL_DOWN(acc, offset); + } +#endif + + NormOps(acc_t norm_): norm_(norm_) { + } +}; + +// This accumulator template is used to calculate the order zero norm of the +// absolute value of a set of numbers. +// `scalar_t` is the type of the input and `acc_t` is the type of the accumulated +// value. These types differ for complex number input support. +template +struct NormZeroOps { + inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const { + return acc + (data == static_cast(0) ? static_cast(0) : static_cast(1)); + } + + inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const { + return a + b; + } + + inline C10_DEVICE acc_t project(acc_t a) const { + return a; + } + + static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) { + return acc; + } + + +#if defined(__CUDACC__) || defined(__HIPCC__) + inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const { + return WARP_SHFL_DOWN(acc, offset); + } +#endif +}; + +// This accumulator template is used to calculate the order one norm of the +// absolute value of a set of numbers. +// `scalar_t` is the type of the input and `acc_t` is the type of the accumulated +// value. These types differ for complex number input support. +template +struct NormOneOps { + inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const { + return acc + static_cast(std::abs(data)); + } + + inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const { + return a + b; + } + + inline C10_DEVICE acc_t project(acc_t a) const { + return a; + } + + static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) { + return acc; + } + +#if defined(__CUDACC__) || defined(__HIPCC__) + inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const { + return WARP_SHFL_DOWN(acc, offset); + } +#endif +}; + + +template +struct AbsSwitch {}; + +template +inline C10_DEVICE acc_t abs_if_complex(scalar_t data, AbsSwitch) { + return static_cast(data); +} + +template +inline C10_DEVICE acc_t abs_if_complex(std::complex data, AbsSwitch) { + return static_cast(std::abs(data)); +} + +template +inline C10_DEVICE acc_t abs_if_complex(c10::complex data, AbsSwitch) { + return static_cast(std::abs(data)); +} + +// This accumulator template is used to calculate the order two norm of the +// absolute value of a set of numbers. +// `scalar_t` is the type of the input and `acc_t` is the type of the accumulated +// value. These types differ for complex number input support. +template +struct NormTwoOps { + inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const { + acc_t data_ = abs_if_complex(data, AbsSwitch()); + return acc + data_ * data_; + } + + inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const { + return a + b; + } + + inline C10_DEVICE acc_t project(acc_t a) const { + return device_sqrt(a); + } + + static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) { + return acc; + } + +#if defined(__CUDACC__) || defined(__HIPCC__) + inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const { + return WARP_SHFL_DOWN(acc, offset); + } +#endif +}; + +template +struct NanSumOps { + inline C10_DEVICE acc_t reduce(acc_t a, data_t b, int64_t /*idx*/) const { + return a + (at::_isnan(b) ? acc_t{0.} : acc_t{b}); + } + + inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const { + return a + b; + } + + inline C10_DEVICE data_t project(acc_t a) const { + return data_t{a}; + } + + static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) { + return acc; + } + +#if defined(__CUDACC__) || defined(__HIPCC__) + inline C10_DEVICE acc_t warp_shfl_down(acc_t data, int offset) const { + return WARP_SHFL_DOWN(data, offset); + } +#endif +}; + +namespace detail { + +template +struct LessOrNan { + C10_DEVICE bool operator () (scalar_t a, scalar_t b, int64_t idx_a, int64_t idx_b) const { + // If (a == b), then choose the one with lower idx, else min(a, b) + if (at::_isnan(a)) { + if (at::_isnan(b)) { + return idx_a < idx_b; + } + return true; + } + return (a == b) ? idx_a < idx_b : (a < b); + } +}; + +template +struct GreaterOrNan { + C10_DEVICE bool operator () (scalar_t a, scalar_t b, int64_t idx_a, int64_t idx_b) const { + // If (a == b), then choose the one with lower idx, else max(a, b) + if (at::_isnan(a)) { + if (at::_isnan(b)) { + return idx_a < idx_b; + } + return true; + } + return (a == b) ? idx_a < idx_b : (a > b); + } +}; + +template +struct MinMaxReductionOps { + using scalar_t = typename binary_function_traits::arg1_t; + using index_t = int64_t; + using arg_t = detail::pair; + + static C10_DEVICE arg_t project(arg_t arg) { + return arg; + } + + static C10_DEVICE arg_t reduce(arg_t arg, scalar_t val, int64_t idx) { + return comp_t{}(arg.first, val, arg.second, idx) ? arg : arg_t(val, idx); + } + + static C10_DEVICE arg_t combine(arg_t a, arg_t b) { + return comp_t{}(a.first, b.first, a.second, b.second) ? a : b; + } + + static C10_DEVICE arg_t translate_idx(arg_t a, int64_t base_idx) { + return {a.first, a.second + base_idx}; + } + +#if defined(__CUDACC__) || defined(__HIPCC__) + static C10_DEVICE arg_t warp_shfl_down(arg_t arg, int offset) { + return arg_t(WARP_SHFL_DOWN(arg.first, offset), + WARP_SHFL_DOWN(arg.second, offset)); + } +#endif +}; + +template +struct ArgReductionOps : public MinMaxReductionOps { + using typename MinMaxReductionOps::scalar_t; + using typename MinMaxReductionOps::index_t; + using typename MinMaxReductionOps::arg_t; + + static C10_DEVICE index_t project(arg_t arg) { + return arg.second; + } +}; + +} // namespace detail + +template +struct ArgMaxOps : + public detail::ArgReductionOps> { +}; + +template +struct ArgMinOps : + public detail::ArgReductionOps> { +}; + +template +struct MinOps : + public detail::MinMaxReductionOps> { +}; + +template +struct MaxOps : + public detail::MinMaxReductionOps> { +}; + +template +struct MinMaxOps { + using acc_t = detail::pair; + inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, index_t /*idx*/) const { + return combine(acc, {data, data}); + } + + inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const { + auto min_val = (at::_isnan(a.first) || a.first < b.first) ? a.first : b.first; + auto max_val = (at::_isnan(a.second) || a.second > b.second) ? a.second : b.second; + + return {min_val, max_val}; + } + + inline C10_DEVICE acc_t project(acc_t acc) const { + return acc; + } + + static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) { + return acc; + } + +#if defined(__CUDACC__) || defined(__HIPCC__) + inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const { + return { + WARP_SHFL_DOWN(acc.first, offset), WARP_SHFL_DOWN(acc.second, offset) + }; + } +#endif +}; + +}} // namespace at::native + +#undef MAX +#undef MIN diff --git a/voice_bridge/torch/include/ATen/native/SobolEngineOpsUtils.h b/voice_bridge/torch/include/ATen/native/SobolEngineOpsUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..d3d7a362f2e8721959f72014d752279ee807c71b --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/SobolEngineOpsUtils.h @@ -0,0 +1,51 @@ +/// This file contains some tensor-agnostic operations to be used in the +/// core functions of the `SobolEngine` +#include + +namespace at { +namespace native { +namespace sobol_utils { + +/// Function to return the minimum of number of bits to represent the integer `n` +inline int64_t bit_length(const int64_t n) { + int64_t nbits, nloc; + for (nloc = n, nbits = 0; nloc > 0; nloc /= 2, nbits++); + return nbits; +} + +/// Function to get the position of the rightmost zero in the bit representation of an integer +/// This value is the zero-indexed position +inline int64_t rightmost_zero(const int64_t n) { + int64_t z, i; + for (z = n, i = 0; z % 2 == 1; z /= 2, i++); + return i; +} + +/// Function to get a subsequence of bits in the representation of an integer starting from +/// `pos` and of length `length` +inline int64_t bitsubseq(const int64_t n, const int64_t pos, const int64_t length) { + return (n >> pos) & ((1 << length) - 1); +} + +/// Function to perform the inner product between a batched square matrix and a power of 2 vector +inline at::Tensor cdot_pow2(const at::Tensor& bmat) { + at::Tensor inter = at::arange(bmat.size(-1) - 1, -1, -1, bmat.options()); + inter = at::pow(2, inter).expand_as(bmat); + return at::mul(inter, bmat).sum(-1); +} + +/// All definitions below this point are data. These are constant, and should not be modified +/// without notice + +constexpr int64_t MAXDIM = 21201; +constexpr int64_t MAXDEG = 18; +constexpr int64_t MAXBIT = 30; +constexpr int64_t LARGEST_NUMBER = 1 << MAXBIT; +constexpr float RECIPD = 1.0 / LARGEST_NUMBER; + +extern const int64_t poly[MAXDIM]; +extern const int64_t initsobolstate[MAXDIM][MAXDEG]; + +} // namespace sobol_utils +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/Sorting.h b/voice_bridge/torch/include/ATen/native/Sorting.h new file mode 100644 index 0000000000000000000000000000000000000000..627ee452115015c7157018392debf9bc33166b29 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Sorting.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include + +namespace at { +class TensorBase; +} + +namespace at { +namespace native { + +enum class QUANTILE_INTERPOLATION_MODE : uint8_t { + LINEAR, + LOWER, + HIGHER, + MIDPOINT, + NEAREST +}; + +using sort_fn = void(*)(const TensorBase&, const TensorBase&, const TensorBase&, int64_t, bool, bool); +using topk_fn = void(*)(const TensorBase&, const TensorBase&, const TensorBase&, int64_t, int64_t, bool, bool); + +DECLARE_DISPATCH(sort_fn, sort_stub); +DECLARE_DISPATCH(topk_fn, topk_stub); + +void _fill_indices(const TensorBase &indices, int64_t dim); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/SortingUtils.h b/voice_bridge/torch/include/ATen/native/SortingUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..f6065927eba445785442d911d5dafb413668bd6d --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/SortingUtils.h @@ -0,0 +1,90 @@ +#pragma once + +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at { +namespace native { + +// ensure we get good values and indices for kthvalue, mode +// this will always be with the reducing dim as 1-d +inline void _reduction_with_indices_allocate_or_resize_output( + Tensor& values, + Tensor& indices, + const Tensor& self, + int64_t dim_, + bool keepdim) { + int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true); + auto result_sizes = self.sizes().vec(); + if (result_sizes.size() > 0) { + result_sizes[dim] = 1; + } + if (values.defined()) { + TORCH_CHECK( + self.options().type_equal(values.options()), + "output values must be of same type as input"); + if (!keepdim && values.dim() == self.dim() - 1) { + // unsqueeze to preserve passed in noncontiguous tensor in resize + values.unsqueeze_(dim); + } + resize_output(values, result_sizes); + } else { + values = at::empty(result_sizes, self.options()); + } + if (indices.defined()) { + TORCH_CHECK( + indices.dtype() == kLong, "output indices must be of scalar type Long"); + TORCH_CHECK( + indices.device() == self.device(), + "output indices must be on same device as input"); + if (!keepdim && indices.dim() == self.dim() - 1) { + // unsqueeze to preserve passed in noncontiguous tensor in resize + indices.unsqueeze_(dim); + } + resize_output(indices, result_sizes); + } else { + indices = at::empty(result_sizes, self.options().dtype(kLong)); + } +} + +// ensure we get good values and indices for topk +inline void _allocate_or_resize_output_with_indices( + Tensor& values, + Tensor& indices, + const Tensor& self, + int64_t dim_, + int64_t k) { + int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true); + auto result_sizes = self.sizes().vec(); + if (result_sizes.size() > 0) { + result_sizes[dim] = k; + } + if (values.defined()) { + TORCH_CHECK( + self.options().type_equal(values.options()), + "output values must be of same type as input"); + values.resize_(result_sizes); + } else { + values = at::empty(result_sizes, self.options()); + } + if (indices.defined()) { + TORCH_CHECK( + indices.dtype() == kLong, "output indices must be of scalar type Long"); + TORCH_CHECK( + indices.device() == self.device(), + "output indices must be on same device as input"); + indices.resize_(result_sizes); + } else { + indices = at::empty(result_sizes, self.options().dtype(kLong)); + } +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/SpectralOpsUtils.h b/voice_bridge/torch/include/ATen/native/SpectralOpsUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..bd38257d12755963c27e6998c808df0aa758d506 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/SpectralOpsUtils.h @@ -0,0 +1,80 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { namespace native { + +// Normalization types used in _fft_with_size +enum class fft_norm_mode { + none, // No normalization + by_root_n, // Divide by sqrt(signal_size) + by_n, // Divide by signal_size +}; + +// NOTE [ Fourier Transform Conjugate Symmetry ] +// +// Real-to-complex Fourier transform satisfies the conjugate symmetry. That is, +// assuming X is the transformed K-dimensionsal signal, we have +// +// X[i_1, ..., i_K] = X[j_i, ..., j_K]*, +// +// where j_k = (N_k - i_k) mod N_k, N_k being the signal size at dim k, +// * is the conjugate operator. +// +// Therefore, in such cases, FFT libraries return only roughly half of the +// values to avoid redundancy: +// +// X[:, :, ..., :floor(N / 2) + 1] +// +// This is also the assumption in cuFFT and MKL. In ATen SpectralOps, such +// halved signal will also be returned by default (flag onesided=True). +// The following infer_ft_real_to_complex_onesided_size function calculates the +// onesided size from the twosided size. +// +// Note that this loses some information about the size of signal at last +// dimension. E.g., both 11 and 10 maps to 6. Hence, the following +// infer_ft_complex_to_real_onesided_size function takes in optional parameter +// to infer the twosided size from given onesided size. +// +// cuFFT doc: http://docs.nvidia.com/cuda/cufft/index.html#multi-dimensional +// MKL doc: https://software.intel.com/en-us/mkl-developer-reference-c-dfti-complex-storage-dfti-real-storage-dfti-conjugate-even-storage#CONJUGATE_EVEN_STORAGE + +inline int64_t infer_ft_real_to_complex_onesided_size(int64_t real_size) { + return (real_size / 2) + 1; +} + +inline int64_t infer_ft_complex_to_real_onesided_size(int64_t complex_size, + int64_t expected_size=-1) { + int64_t base = (complex_size - 1) * 2; + if (expected_size < 0) { + return base + 1; + } else if (base == expected_size) { + return base; + } else if (base + 1 == expected_size) { + return base + 1; + } else { + std::ostringstream ss; + ss << "expected real signal size " << expected_size << " is incompatible " + << "with onesided complex frequency size " << complex_size; + AT_ERROR(ss.str()); + } +} + +using fft_fill_with_conjugate_symmetry_fn = + void (*)(ScalarType dtype, IntArrayRef mirror_dims, IntArrayRef half_sizes, + IntArrayRef in_strides, const void* in_data, + IntArrayRef out_strides, void* out_data); +DECLARE_DISPATCH(fft_fill_with_conjugate_symmetry_fn, fft_fill_with_conjugate_symmetry_stub); + +// In real-to-complex transform, cuFFT and MKL only fill half of the values +// due to conjugate symmetry. This function fills in the other half of the full +// fft by using the Hermitian symmetry in the signal. +// self should be the shape of the full signal and dims.back() should be the +// one-sided dimension. +// See NOTE [ Fourier Transform Conjugate Symmetry ] +TORCH_API void _fft_fill_with_conjugate_symmetry_(const Tensor& self, IntArrayRef dims); + +}} // at::native diff --git a/voice_bridge/torch/include/ATen/native/StridedRandomAccessor.h b/voice_bridge/torch/include/ATen/native/StridedRandomAccessor.h new file mode 100644 index 0000000000000000000000000000000000000000..bb7b2155cd39a82767b446d8e1a00cd1dde7ebf1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/StridedRandomAccessor.h @@ -0,0 +1,301 @@ +#pragma once + +namespace at { namespace native { + +// (Const)StridedRandomAccessor is a +// (const) random access iterator defined over +// a strided array. + +// The traits below are to introduce __restrict__ +// modifier on different platforms. + +template +struct DefaultPtrTraits { + using PtrType = T*; +}; + +#if (defined(_WIN32) || defined(_WIN64)) +#define RESTRICT __restrict +#else +#define RESTRICT __restrict__ +#endif + +template +struct RestrictPtrTraits { + using PtrType = T* RESTRICT; +}; + +template < + typename T, + typename index_t = int64_t, + template class PtrTraits = DefaultPtrTraits +> +class ConstStridedRandomAccessor { +public: + using difference_type = index_t; + using value_type = const T; + using pointer = const typename PtrTraits::PtrType; + using reference = const value_type&; + using iterator_category = std::random_access_iterator_tag; + + using PtrType = typename PtrTraits::PtrType; + using index_type = index_t; + + // Constructors { + C10_HOST_DEVICE + ConstStridedRandomAccessor(PtrType ptr, index_t stride) + : ptr{ptr}, stride{stride} + {} + + C10_HOST_DEVICE + explicit ConstStridedRandomAccessor(PtrType ptr) + : ptr{ptr}, stride{static_cast(1)} + {} + + C10_HOST_DEVICE + ConstStridedRandomAccessor() + : ptr{nullptr}, stride{static_cast(1)} + {} + // } + + // Pointer-like operations { + C10_HOST_DEVICE + reference operator*() const { + return *ptr; + } + + C10_HOST_DEVICE + const value_type* operator->() const { + return reinterpret_cast(ptr); + } + + C10_HOST_DEVICE + reference operator[](index_t idx) const { + return ptr[idx * stride]; + } + // } + + // Prefix/postfix increment/decrement { + C10_HOST_DEVICE + ConstStridedRandomAccessor& operator++() { + ptr += stride; + return *this; + } + + C10_HOST_DEVICE + ConstStridedRandomAccessor operator++(int) { + ConstStridedRandomAccessor copy(*this); + ++*this; + return copy; + } + + C10_HOST_DEVICE + ConstStridedRandomAccessor& operator--() { + ptr -= stride; + return *this; + } + + C10_HOST_DEVICE + ConstStridedRandomAccessor operator--(int) { + ConstStridedRandomAccessor copy(*this); + --*this; + return copy; + } + // } + + // Arithmetic operations { + C10_HOST_DEVICE + ConstStridedRandomAccessor& operator+=(index_t offset) { + ptr += offset * stride; + return *this; + } + + C10_HOST_DEVICE + ConstStridedRandomAccessor operator+(index_t offset) const { + return ConstStridedRandomAccessor(ptr + offset * stride, stride); + } + + C10_HOST_DEVICE + friend ConstStridedRandomAccessor operator+( + index_t offset, + const ConstStridedRandomAccessor& accessor + ) { + return accessor + offset; + } + + C10_HOST_DEVICE + ConstStridedRandomAccessor& operator-=(index_t offset) { + ptr -= offset * stride; + return *this; + } + + C10_HOST_DEVICE + ConstStridedRandomAccessor operator-(index_t offset) const { + return ConstStridedRandomAccessor(ptr - offset * stride, stride); + } + + // Note that this operator is well-defined when `this` and `other` + // represent the same sequences, i.e. when + // 1. this.stride == other.stride, + // 2. |other - this| / this.stride is an Integer. + C10_HOST_DEVICE + difference_type operator-(const ConstStridedRandomAccessor& other) const { + return (ptr - other.ptr) / stride; + } + // } + + // Comparison operators { + C10_HOST_DEVICE + bool operator==(const ConstStridedRandomAccessor& other) const { + return (ptr == other.ptr) && (stride == other.stride); + } + + C10_HOST_DEVICE + bool operator!=(const ConstStridedRandomAccessor& other) const { + return !(*this == other); + } + + C10_HOST_DEVICE + bool operator<(const ConstStridedRandomAccessor& other) const { + return ptr < other.ptr; + } + + C10_HOST_DEVICE + bool operator<=(const ConstStridedRandomAccessor& other) const { + return (*this < other) || (*this == other); + } + + C10_HOST_DEVICE + bool operator>(const ConstStridedRandomAccessor& other) const { + return !(*this <= other); + } + + C10_HOST_DEVICE + bool operator>=(const ConstStridedRandomAccessor& other) const { + return !(*this < other); + } + // } + +protected: + PtrType ptr; + index_t stride; +}; + +template < + typename T, + typename index_t = int64_t, + template class PtrTraits = DefaultPtrTraits +> +class StridedRandomAccessor + : public ConstStridedRandomAccessor { +public: + using difference_type = index_t; + using value_type = T; + using pointer = typename PtrTraits::PtrType; + using reference = value_type&; + + using BaseType = ConstStridedRandomAccessor; + using PtrType = typename PtrTraits::PtrType; + + // Constructors { + C10_HOST_DEVICE + StridedRandomAccessor(PtrType ptr, index_t stride) + : BaseType(ptr, stride) + {} + + C10_HOST_DEVICE + explicit StridedRandomAccessor(PtrType ptr) + : BaseType(ptr) + {} + + C10_HOST_DEVICE + StridedRandomAccessor() + : BaseType() + {} + // } + + // Pointer-like operations { + C10_HOST_DEVICE + reference operator*() const { + return *this->ptr; + } + + C10_HOST_DEVICE + value_type* operator->() const { + return reinterpret_cast(this->ptr); + } + + C10_HOST_DEVICE + reference operator[](index_t idx) const { + return this->ptr[idx * this->stride]; + } + // } + + // Prefix/postfix increment/decrement { + C10_HOST_DEVICE + StridedRandomAccessor& operator++() { + this->ptr += this->stride; + return *this; + } + + C10_HOST_DEVICE + StridedRandomAccessor operator++(int) { + StridedRandomAccessor copy(*this); + ++*this; + return copy; + } + + C10_HOST_DEVICE + StridedRandomAccessor& operator--() { + this->ptr -= this->stride; + return *this; + } + + C10_HOST_DEVICE + StridedRandomAccessor operator--(int) { + StridedRandomAccessor copy(*this); + --*this; + return copy; + } + // } + + // Arithmetic operations { + C10_HOST_DEVICE + StridedRandomAccessor& operator+=(index_t offset) { + this->ptr += offset * this->stride; + return *this; + } + + C10_HOST_DEVICE + StridedRandomAccessor operator+(index_t offset) const { + return StridedRandomAccessor(this->ptr + offset * this->stride, this->stride); + } + + C10_HOST_DEVICE + friend StridedRandomAccessor operator+( + index_t offset, + const StridedRandomAccessor& accessor + ) { + return accessor + offset; + } + + C10_HOST_DEVICE + StridedRandomAccessor& operator-=(index_t offset) { + this->ptr -= offset * this->stride; + return *this; + } + + C10_HOST_DEVICE + StridedRandomAccessor operator-(index_t offset) const { + return StridedRandomAccessor(this->ptr - offset * this->stride, this->stride); + } + + // Note that here we call BaseType::operator- version + C10_HOST_DEVICE + difference_type operator-(const BaseType& other) const { + return (static_cast(*this) - other); + } + // } +}; + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/TensorAdvancedIndexing.h b/voice_bridge/torch/include/ATen/native/TensorAdvancedIndexing.h new file mode 100644 index 0000000000000000000000000000000000000000..4db4c395209738fcf99382604f37f09d6a49436c --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TensorAdvancedIndexing.h @@ -0,0 +1,42 @@ +#pragma once + +// Indexing tensors by tensors + +#include +#include +#include + +namespace at { +struct TensorIterator; +} + +namespace at { namespace native { + +enum class SCATTER_GATHER_OP: uint8_t {REDUCE_ADD, REDUCE_MULTIPLY, REDUCE_MAXIMUM, REDUCE_MINIMUM, REDUCE_MEAN}; + +using index_put_with_sort_fn = void(*)(Tensor &, const c10::List> &, const Tensor &, bool accumulate, bool unsafe); +using index_put_with_sort_quantized_fn = void(*)(Tensor& self, const c10::List>& indices, const Tensor& value, double scale, int zero_point, bool unsafe); +using gather_fn = void (*)(const Tensor & result, const Tensor & self, int64_t dim, const Tensor & index); +using scatter_fn = void(*)(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src); +using scatter_fill_fn = void(*)(const Tensor& self, int64_t dim, const Tensor& index, const Scalar& src); +using scatter_add_fn = void(*)(const Tensor& self, int64_t dim, const Tensor& index, const Tensor& src); +using scatter_reduce_fn = void(*)(const Tensor& self, const int64_t dim, const Tensor& index, + const Tensor& src, const SCATTER_GATHER_OP& reduce); +using scatter_scalar_reduce_fn = void(*)(const Tensor& self, const int64_t dim, const Tensor& index, + const Scalar& value, const SCATTER_GATHER_OP& reduce); +using scatter_reduce_two_fn = void(*)(const Tensor& self, const int64_t dim, const Tensor& index, + const Tensor& src, const SCATTER_GATHER_OP& reduce); + +DECLARE_DISPATCH(index_put_with_sort_fn, index_put_with_sort_stub); +DECLARE_DISPATCH(index_put_with_sort_quantized_fn, index_put_with_sort_quantized_stub); +DECLARE_DISPATCH(gather_fn, gather_stub); +DECLARE_DISPATCH(scatter_fn, scatter_stub); +DECLARE_DISPATCH(scatter_fill_fn, scatter_fill_stub); +DECLARE_DISPATCH(scatter_add_fn, scatter_add_stub); +DECLARE_DISPATCH(scatter_reduce_fn, scatter_reduce_stub); +DECLARE_DISPATCH(scatter_scalar_reduce_fn, scatter_scalar_reduce_stub); +DECLARE_DISPATCH(scatter_reduce_two_fn, scatter_reduce_two_stub); + +TORCH_API Tensor& index_out(Tensor& result, const Tensor & self, const c10::List>& indices); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/TensorAdvancedIndexingUtils.h b/voice_bridge/torch/include/ATen/native/TensorAdvancedIndexingUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..8ffff8b6e912caf3fc0059030f7f6925fb7a30c1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TensorAdvancedIndexingUtils.h @@ -0,0 +1,89 @@ +#pragma once +#include +#include +#include + +namespace at { +namespace native { +namespace { +static std::string shapes_as_str(TensorList tensors) { + std::ostringstream os; + bool first = true; + for (auto& tensor : tensors) { + if (tensor.defined()) { + if (!first) { + os << ", "; + } + os << tensor.sizes(); + first = false; + } + } + return os.str(); +} +} // anonymous namespace + +static std::tuple canDispatchToMaskedFill(const Tensor& self, const torch::List>& indices, +const Tensor& value){ + if (!(value.numel() ==1 && value.device().is_cpu())){ + return std::make_tuple(false,Tensor()); + } + int64_t num_ind = 0; + Tensor mask; + auto self_device = self.device(); + for (const c10::optional i: indices) { + if (!i.has_value() || !(*i).defined()){ + num_ind++; + } else { + Tensor index = std::move(*i); + if ((index.scalar_type() != kByte && index.scalar_type() != kBool) || + index.device() != self_device || mask.defined()){ + return std::make_tuple(false, Tensor()); + } else { + mask = index; + for (const auto j : c10::irange(index.dim())) { + int64_t srcIdx = num_ind + j; + TORCH_CHECK_INDEX(index.size(j) == self.size(srcIdx), "The shape of the mask ", index.sizes(), " at index ", j, + " does not match the shape of the indexed tensor ", self.sizes(), " at index ", srcIdx); + } + num_ind += mask.ndimension(); + } + } + } + for (const auto i : c10::irange(num_ind, self.ndimension())) { + (void)i; //Suppress unused variable warning + mask = mask.unsqueeze(-1); + } + return std::make_tuple(true, mask); +} + +static AdvancedIndex make_info(Tensor self, IOptTensorListRef orig) { + checkIndexTensorTypes(orig); + // first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors + auto indices = expandTensors(self, orig); + // next broadcast all index tensors together + try { + indices = expand_outplace(indices); + } catch (std::exception& e) { + TORCH_CHECK_INDEX(false, "shape mismatch: indexing tensors could not be broadcast together" + " with shapes ", shapes_as_str(indices)); + } + // add missing null Tensors so that it matches self.dim() + while (indices.size() < (size_t)self.dim()) { + indices.emplace_back(); + } + // if the non-null indices are not all adjacent, transpose self and indices + // together so that they're adjacent at the front + if (!hasContiguousSubspace(indices)) { + std::tie(self, indices) = transposeToFront(self, indices); + } + // Ensure indices are on the same device as self + for (auto & indice : indices) { + if (indice.defined() && indice.device() != self.device()) { + indice = indice.to(self.device()); + } + } + return AdvancedIndex(self, indices); +} + +} // at +} // native diff --git a/voice_bridge/torch/include/ATen/native/TensorCompare.h b/voice_bridge/torch/include/ATen/native/TensorCompare.h new file mode 100644 index 0000000000000000000000000000000000000000..f35cd68d4806cb4575cc6e301e4ce6c8bb6ce214 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TensorCompare.h @@ -0,0 +1,49 @@ +#pragma once + +#include + +namespace c10 { +class Scalar; +} + +namespace at { +class Tensor; +struct TensorIterator; +struct TensorIteratorBase; +} + +namespace at { namespace native { + +using reduce_minmax_fn = + void (*)(Tensor&, Tensor&, const Tensor&, int64_t, bool); +using structured_reduce_minmax_fn = + void (*)(const Tensor&, const Tensor&, const Tensor&, int64_t, bool); + +DECLARE_DISPATCH(structured_reduce_minmax_fn, max_stub); +DECLARE_DISPATCH(structured_reduce_minmax_fn, min_stub); + +using where_fn = void (*)(TensorIterator &); +DECLARE_DISPATCH(where_fn, where_kernel); + +using is_infinity_op_fn = void (*)(TensorIteratorBase &); +DECLARE_DISPATCH(is_infinity_op_fn, isposinf_stub); +DECLARE_DISPATCH(is_infinity_op_fn, isneginf_stub); + +using mode_fn = void (*)(Tensor&, Tensor&, const Tensor&, int64_t, bool); +DECLARE_DISPATCH(mode_fn, mode_stub); + +using clamp_tensor_fn = void (*)(TensorIteratorBase &); +DECLARE_DISPATCH(clamp_tensor_fn, clamp_stub); + +namespace detail { + enum class ClampLimits {Min, Max, MinMax}; +} + +DECLARE_DISPATCH(void (*)(TensorIteratorBase &, const c10::Scalar&, const c10::Scalar&), clamp_scalar_stub); +DECLARE_DISPATCH(void (*)(TensorIteratorBase &, c10::Scalar), clamp_min_scalar_stub); +DECLARE_DISPATCH(void (*)(TensorIteratorBase &, c10::Scalar), clamp_max_scalar_stub); + +using isin_default_fn = void (*)(const Tensor&, const Tensor&, bool, const Tensor&); +DECLARE_DISPATCH(isin_default_fn, isin_default_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/TensorConversions.h b/voice_bridge/torch/include/ATen/native/TensorConversions.h new file mode 100644 index 0000000000000000000000000000000000000000..8ec21a75dcac165e9d98e18b49cd24b721c52a6e --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TensorConversions.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace at { + class Tensor; +namespace native { +bool to_will_alias( + const Tensor& self, + c10::optional dtype, + c10::optional layout, + c10::optional device, + bool copy, + c10::optional optional_memory_format); + +Tensor to_meta(const Tensor& tensor); +c10::optional to_meta(const c10::optional& tensor); +std::vector to_meta(at::ITensorListRef t_list); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/TensorDimApply.h b/voice_bridge/torch/include/ATen/native/TensorDimApply.h new file mode 100644 index 0000000000000000000000000000000000000000..ad9ca857eeab8c44d6c9e543f46b35c6a84d20d8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TensorDimApply.h @@ -0,0 +1,53 @@ +#include +#include + +namespace at { + namespace native { + //input tensors are non-zero dim and non-empty + template + void tensor_dim_apply3(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim, Function func) { + int ndims = self.dim(); + int tensor_dim_apply_has_finished = 0; + std::vector counter(ndims, 0); + T1* self_data = self.data_ptr(); + T1* values_data = values.data_ptr(); + T2* indices_data = indices.data_ptr(); + int64_t self_stride = self.stride(dim); + int64_t values_stride = values.stride(dim); + int64_t indices_stride = indices.stride(dim); + int self_dim_size = self.size(dim); + + while(!tensor_dim_apply_has_finished) { + func(self_data, values_data, indices_data, self_dim_size, self_stride, values_stride, indices_stride); + if(ndims == 1) + break; + for (const auto dim_i : c10::irange(ndims)) { + if(dim_i == dim) { + if(dim_i == (ndims - 1)) { + tensor_dim_apply_has_finished = 1; + break; + } + continue; + } + counter[dim_i]++; + self_data += self.stride(dim_i); + values_data += values.stride(dim_i); + indices_data += indices.stride(dim_i); + + if(counter[dim_i] == self.size(dim_i)) { + if(dim_i == ndims-1) { + tensor_dim_apply_has_finished = 1; + break; + } else { + self_data -= counter[dim_i]*self.stride(dim_i); + values_data -= counter[dim_i]*values.stride(dim_i); + indices_data -= counter[dim_i]*indices.stride(dim_i); + counter[dim_i] = 0; + } + } else { + break; + } + } + } + } +}} diff --git a/voice_bridge/torch/include/ATen/native/TensorFactories.h b/voice_bridge/torch/include/ATen/native/TensorFactories.h new file mode 100644 index 0000000000000000000000000000000000000000..35e058df4b3ab79a1ec07732ae0391c966e2f4b2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TensorFactories.h @@ -0,0 +1,123 @@ +#pragma once + +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at { namespace native { +// Different combinations of row, col, and offset can lead to two cases: +// +// Case 1 - Trapezoid (Triangle as a special case): row + offset <= col +// Example A: offset > 0 +// 1 1 0 0 0 +// 1 1 1 0 0 +// 1 1 1 1 0 +// Example B: offset <= 0 +// 0 0 0 +// 1 0 0 +// 1 1 0 +// In this case, we calculate the number of elements in the first row and +// last row of the tril respectively, and then compute the tril size. +// +// Case 2 - Trapezoid + Rectangle: row + offset > col +// Example: +// 1 1 0 +// 1 1 1 +// 1 1 1 +// In this case, we first calculate the size of top trapezoid, and then +// calculate the size of the bottom rectangle. +inline int64_t get_tril_size(int64_t row, int64_t col, int64_t offset) { + // If either dimension is 0 then the there is no tril + if (row == 0 || col == 0) { + return 0; + } + // number of elements in the first row of the tril + auto m_first_row = offset > 0 ? + std::min(col, 1 + offset) : // upper bounded by col + row + offset > 0; // either 0 or 1 + // number of elements in the last row of the tril, bounded by [0, col] + auto m_last_row = std::max(0, std::min(col, row + offset)); + // number of rows, bounded by [0, row] + auto n_row_all = std::max(0, std::min(row, row + offset)); + auto n_row_trapezoid = (m_last_row - m_first_row + 1); + + // calculate # of elements in the top trapezoid + auto tril_size = (m_first_row + m_last_row) * n_row_trapezoid >> 1; + + // calculate # of elements in the bottom rectangle if there is any + auto diff_row = n_row_all - n_row_trapezoid; + if (diff_row > 0) { + tril_size += diff_row * col; + } + + return tril_size; +} + +inline void check_args( + int64_t row, int64_t col, c10::optional layout_opt) { + TORCH_CHECK(row >= 0, "row must be non-negative, got", row); + TORCH_CHECK(col >= 0, "col must be non-negative, got", col); + if (layout_opt.has_value()) { + TORCH_CHECK( + *layout_opt == at::kStrided, + "only support layout=torch.strided, got", + *layout_opt) + } +} + +using at::check_size_nonnegative; + +// assumes maximum value in created tensor is n-1 (e.g., torch.randperm(n)) +inline void check_supported_max_int_with_precision(int64_t n, const Tensor& tensor) { + // match defined() to behavior of checks below + TORCH_CHECK(at::scalar_tensor(n>0?n-1:n, tensor.options()).defined(), + "n is too large for result tensor type: '", tensor.toString(), "'"); + + // Ensure sufficient precision for floating point representation. + switch (tensor.scalar_type()) { + case at::ScalarType::Half: + TORCH_CHECK(n <= (int64_t(1) << 11) + 1, "n cannot be greater than 2049 for Half type."); + break; + case at::ScalarType::Float: + TORCH_CHECK(n <= (int64_t(1) << 24) + 1, "n cannot be greater than 2^24+1 for Float type."); + break; + case at::ScalarType::Double: // Unlikely to happen, but doesn't hurt to check + TORCH_CHECK(n <= (int64_t(1) << 53) + 1, "n cannot be greater than 2^53+1 for Double type."); + break; + default: + break; + } +} + +// The ZeroTensor allocator ignores whatever allocation is requested and always +// gives you nullptr +struct ZeroTensorAllocator final : public at::Allocator { + ZeroTensorAllocator(at::Device device) : device_(device) {}; + ~ZeroTensorAllocator() override = default; + static void deleter(void* const pointer) { + TORCH_INTERNAL_ASSERT(!pointer); + } + DataPtr allocate(const size_t /*nbytes*/) const override { + return {nullptr, nullptr, &deleter, device_}; + } + DeleterFnPtr raw_deleter() const override { + return deleter; + } + at::Device device_; +}; + +using binary_fn = void (*)(TensorIterator&); + +DECLARE_DISPATCH(binary_fn, complex_stub); +DECLARE_DISPATCH(binary_fn, polar_stub); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/TensorIterator.h b/voice_bridge/torch/include/ATen/native/TensorIterator.h new file mode 100644 index 0000000000000000000000000000000000000000..e55d2a58d709926a24467a0056323096e0890fa9 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TensorIterator.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/voice_bridge/torch/include/ATen/native/TensorIteratorDynamicCasting.h b/voice_bridge/torch/include/ATen/native/TensorIteratorDynamicCasting.h new file mode 100644 index 0000000000000000000000000000000000000000..7cf57791230c899a16daf1114ba35702cb5fbdeb --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TensorIteratorDynamicCasting.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + + +// This file includes utilties for dynamic_casting done by TensorIterator, see CUDALoops.cuh and Loops.h. + +// dynamic_casting handles when the types expected by the iterator do not match the types of the arguments +// to the function that is being called. +// On CUDA, the cast is currently pushed down into the kernel (for performance reasons). +// On CPU, there is currently an internal assert that a dynamic_cast is not needed. + +namespace at { namespace native { + +// `needs_dynamic_casting` compares the types expected by iterator +// (i.e. dtypes of the operands) with the actual type of the arguments +// (and returns) of func_t +template::arity> +struct needs_dynamic_casting { + static bool check(TensorIteratorBase& iter) { + using traits = function_traits; + using cpp_type = typename traits::template arg::type; + using cpp_map = c10::CppTypeToScalarType; + + if (iter.input_dtype(nargs-1) != cpp_map::value) { + return true; + } + return needs_dynamic_casting::check(iter); + } +}; + +template +struct needs_dynamic_casting { + static bool check(TensorIteratorBase& iter) { + using traits = function_traits; + using cpp_type = typename traits::result_type; + + // we could assert output numbers are correct here, but checks + // (including arity) are currently pushed outside of this struct. + return c10::guts::if_constexpr::value>([]() { + return false; + }, /* else */ [&](auto _) { + // decltype(_) is used to delay computation + using delayed_type = typename decltype(_)::template type_identity; + return iter.dtype(0) != c10::CppTypeToScalarType::value; + }); + } +}; + +}} //namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/TensorProperties.h b/voice_bridge/torch/include/ATen/native/TensorProperties.h new file mode 100644 index 0000000000000000000000000000000000000000..fe6e8395c178e991d5a7a41e69933bafcd028796 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TensorProperties.h @@ -0,0 +1,12 @@ +#pragma once + +// See NOTE: [Tensor vs. TensorBase] +namespace at { +class TensorBase; +} + +namespace at { namespace native { + +TORCH_API bool cudnn_is_acceptable(const TensorBase& self); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/TensorShape.h b/voice_bridge/torch/include/ATen/native/TensorShape.h new file mode 100644 index 0000000000000000000000000000000000000000..21d0ba78261ec66f52ebad9486b0d4f6e9a3b44d --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TensorShape.h @@ -0,0 +1,63 @@ +#pragma once +#include +#include +#include + +namespace at { +namespace native { +inline bool cat_should_skip_tensor(const Tensor& t) { + return t.numel() == 0 && t.dim() == 1; +} + + // Check to see if the shape of tensors is compatible + // for being concatenated along a given dimension. +inline void check_cat_shape_except_dim(const Tensor & first, const Tensor & second, int64_t dimension, int64_t index) { + int64_t first_dims = first.dim(); + int64_t second_dims = second.dim(); + TORCH_CHECK(first_dims == second_dims, "Tensors must have same number of dimensions: got ", + first_dims, " and ", second_dims); + for (const auto dim : c10::irange(first_dims)) { + if (dim == dimension) { + continue; + } + int64_t first_dim_size = first.sizes()[dim]; + int64_t second_dim_size = second.sizes()[dim]; + TORCH_CHECK(first_dim_size == second_dim_size, "Sizes of tensors must match except in dimension ", + dimension, ". Expected size ", static_cast(first_dim_size), " but got size ", static_cast(second_dim_size), " for tensor number ", index, " in the list."); + } + } + +inline void check_cat_no_zero_dim(const MaterializedITensorListRef& tensors) { + int64_t i = 0; + for(const Tensor& t : tensors) { + TORCH_CHECK(t.dim() > 0, + "zero-dimensional tensor (at position ", i, ") cannot be concatenated"); + i++; + } +} + +inline int64_t get_num_splits(const Tensor& self, int64_t split_size, int64_t dim) { + TORCH_CHECK(self.dim() != 0, "split expects at least a 1-dimensional tensor"); + TORCH_CHECK(split_size >= 0, "split expects split_size be non-negative, but got split_size=", split_size); + int64_t dim_size = self.size(dim); + TORCH_CHECK(split_size > 0 || dim_size == 0, + "split_size can only be 0 if dimension size is 0, " + "but got dimension size of ", dim_size); + // if split_size is 0 and dimension size is 0, there is 1 split. + int64_t num_splits = 1; + if (split_size != 0) { + // ensuring num_splits is at least 1 makes consistent the case where split_size > dim_size + // (returns a single split). We might want to error here, but keep it for BC. + num_splits = std::max((dim_size + split_size - 1) / split_size, 1); + } + return num_splits; +} + +/// +/// For more information, see +/// https://pytorch.org/docs/master/generated/torch.Tensor.unfold.html#torch.Tensor.unfold +/// + +Tensor unfold(const Tensor& self, int64_t dimension, int64_t size, int64_t step); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/TensorTransformations.h b/voice_bridge/torch/include/ATen/native/TensorTransformations.h new file mode 100644 index 0000000000000000000000000000000000000000..4909ebe84bb03e73cf841a5a103aa9ac0bae0fe5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TensorTransformations.h @@ -0,0 +1,31 @@ +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +#include + +namespace at { +namespace native { + +static inline Tensor roll_common(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) { + TORCH_CHECK(shifts.size() > 0, "`shifts` required"); + if (dims.size() == 0 && shifts.size() == 1) { + auto flattened = self.contiguous().view(self.numel()); + return roll(flattened, shifts[0], 0).view(self.sizes()); + } + TORCH_CHECK( + shifts.size() == dims.size(), + "shifts and dimensions must align. shifts: ", shifts.size(), ", dims:", dims.size() + ); + AT_ASSERT(dims.size() > 1); + auto tail_shifts = shifts.slice(1); + auto tail_dims = dims.slice(1); + auto first_dim_rolled = roll(self, shifts[0], dims[0]); + return at::roll(first_dim_rolled, tail_shifts, tail_dims); +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/TopKImpl.h b/voice_bridge/torch/include/ATen/native/TopKImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..69d5c70236b872da3d46a79af29759b57810937e --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TopKImpl.h @@ -0,0 +1,95 @@ +#pragma once +#include +#include + +namespace at { +namespace native { + +#ifdef CPU_CAPABILITY +inline namespace CPU_CAPABILITY { +#else +inline namespace DEFAULT { +#endif + +// Core topk loop, shared between CPU and QuantizedCPU +template +void topk_impl_loop( + const int64_t mode_values_stride, + const int64_t mode_indices_stride, + const int64_t tmp_values_stride, + const int64_t k, + const int64_t dim_size, + const bool largest, + const bool sorted, + char** data, const int64_t* strides, const int64_t n) { + + using elem_t = std::pair; + std::vector queue(dim_size); + for (const auto i : c10::irange(n)) { + TensorAccessor mode_values( + reinterpret_cast(data[0] + i * strides[0]), + &k, &mode_values_stride); + TensorAccessor mode_indices( + reinterpret_cast(data[1] + i * strides[1]), + &k, &mode_indices_stride); + TensorAccessor tmp_values( + reinterpret_cast(data[2] + i * strides[2]), + &dim_size, &tmp_values_stride); + + auto n = dim_size; + auto use_partial_sort = k * 64 <= n; + + for (const auto j : c10::irange(n)) { + queue[j].first = tmp_values[j]; + queue[j].second = j; + } + + // we want nan to be sorted as top for numpy compatibility + if (use_partial_sort) { + if (largest) { + std::partial_sort(queue.begin(), queue.begin() + k, queue.end(), + [](const elem_t& x, const elem_t& y) -> bool { + return ((_isnan(x.first) && !_isnan(y.first)) || (x.first > y.first)); + }); + } else { + std::partial_sort(queue.begin(), queue.begin() + k, queue.end(), + [](const elem_t& x, const elem_t& y) -> bool { + return ((!_isnan(x.first) && _isnan(y.first)) || (x.first < y.first)); + }); + } + } else { + if (largest) { + std::nth_element(queue.begin(), queue.begin() + k - 1, queue.end(), + [](const elem_t& x, const elem_t& y) -> bool { + return ((_isnan(x.first) && !_isnan(y.first)) || (x.first > y.first)); + }); + if (sorted) { + std::sort(queue.begin(), queue.begin() + k - 1, + [](const elem_t& x, const elem_t& y) -> bool { + return ((_isnan(x.first) && !_isnan(y.first)) || (x.first > y.first)); + }); + } + } else { + std::nth_element(queue.begin(), queue.begin() + k -1, queue.end(), + [](const elem_t& x, const elem_t& y) -> bool { + return ((!_isnan(x.first) && _isnan(y.first)) || (x.first < y.first)); + }); + if (sorted) { + std::sort(queue.begin(), queue.begin() + k -1, + [](const elem_t& x, const elem_t& y) -> bool { + return ((!_isnan(x.first) && _isnan(y.first)) || (x.first < y.first)); + }); + } + } + } + + for (const auto j : c10::irange(k)) { + mode_values[j] = queue[j].first; + mode_indices[j] = queue[j].second; + } + } +} + +} // namespace CPU_CAPABILITY +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/TransposeType.h b/voice_bridge/torch/include/ATen/native/TransposeType.h new file mode 100644 index 0000000000000000000000000000000000000000..8956bbc5bf928c6c717617c6f2a5a63baebb2b08 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TransposeType.h @@ -0,0 +1,24 @@ +#pragma once +#include + +namespace at { +namespace native { + +// Used as an interface between the different BLAS-like libraries +enum class TransposeType { + NoTranspose, + Transpose, + ConjTranspose, +}; + +// Transforms TransposeType into the BLAS / LAPACK format +static inline char to_blas(TransposeType trans) { + switch (trans) { + case TransposeType::Transpose: return 'T'; + case TransposeType::NoTranspose: return 'N'; + case TransposeType::ConjTranspose: return 'C'; + } + TORCH_INTERNAL_ASSERT(false, "Invalid transpose type"); +} + +}} // at::native diff --git a/voice_bridge/torch/include/ATen/native/TriangularOpsUtils.h b/voice_bridge/torch/include/ATen/native/TriangularOpsUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..c5bce42ed3fd76084913fd1f6b07a531eaaba619 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TriangularOpsUtils.h @@ -0,0 +1,59 @@ +#include +#include + +namespace at { +namespace native { + +/* + * Given batches of matrices with arbitrary batch dim, + * computes the number of batches for Triu and Tril. This ignores stride 0 dimension + */ +static inline int64_t batchCountTrilTriu(const Tensor& batched_matrices) { + int64_t result = 1; + for (int64_t i = 0; i < batched_matrices.ndimension() - 2; i++) { + if (batched_matrices.stride(i) != 0) { + result *= batched_matrices.size(i); + } + } + return result; +} + +/* Checks a necessary property for the triu and tril implementations, hence the name. + * Here batch contiguity is checked for tensors with greater than 4 dimensions. + * Contiguous tensors and tensors with less than 3 dimensions pass this check + */ +static inline std::tuple checkTrilTriuBatchContiguous(const Tensor& tensor, bool allow_zero_stride) { + // Complete contiguity is the most desired property, which is why + // we return true if the tensor is contiguous + if (tensor.is_contiguous()) { + auto default_strides_for_size = batched_matrix_contiguous_strides(tensor.sizes()); + if (tensor.strides() == default_strides_for_size) { + return std::make_tuple(true, tensor); + } else { + return std::make_tuple(false, tensor.as_strided(tensor.sizes(), default_strides_for_size)); + } + } + + int64_t dims = tensor.dim(); + + // Tensors with dimension less than 4 are handled by default + if (allow_zero_stride && dims <= 3) { + return std::make_tuple(true, tensor); + } + + int64_t expected_stride = tensor.size(-1) * tensor.size(-2); + for (int64_t i = dims - 3; i >= 0; i--) { + // Skip trivial dimension; + if (allow_zero_stride && i == 0 && (tensor.stride(i) == 0 || tensor.size(i) == 1)) { + continue; + } + if (expected_stride != tensor.stride(i)) { + return std::make_tuple(false, tensor.contiguous()); + } + expected_stride *= tensor.size(i); + } + return std::make_tuple(true, tensor); +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/TypeProperties.h b/voice_bridge/torch/include/ATen/native/TypeProperties.h new file mode 100644 index 0000000000000000000000000000000000000000..b0f18c59488268dc0b3bf1d5f6b8f2454b8759ca --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/TypeProperties.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include + +namespace at { namespace native { + +struct ResultTypeState { + c10::ScalarType dimResult = ScalarType::Undefined; + c10::ScalarType wrappedResult = ScalarType::Undefined; + c10::ScalarType zeroResult = ScalarType::Undefined; +}; + +TORCH_API ResultTypeState update_result_type_state(const Tensor& tensor, const ResultTypeState& in_state); +TORCH_API ResultTypeState update_result_type_state(const Scalar& scalar, const ResultTypeState& in_state); +TORCH_API ScalarType result_type(const ResultTypeState& state); + +TORCH_API ScalarType result_type(ITensorListRef tensors); + +}} diff --git a/voice_bridge/torch/include/ATen/native/UnaryOps.h b/voice_bridge/torch/include/ATen/native/UnaryOps.h new file mode 100644 index 0000000000000000000000000000000000000000..103e522fa35db61f7182961d45e1a39313bcbc47 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/UnaryOps.h @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include + +namespace at { +class Tensor; +class TensorBase; +struct TensorIteratorBase; +} + +namespace at { namespace native { + +using unary_fn = void(*)(TensorIteratorBase&); +using unary_fn_with_scalar = void(*)(TensorIteratorBase&, const Scalar& a); + +DECLARE_DISPATCH(unary_fn, abs_stub); +DECLARE_DISPATCH(unary_fn, angle_stub); +DECLARE_DISPATCH(unary_fn, conj_physical_stub); +DECLARE_DISPATCH(unary_fn, acos_stub); +DECLARE_DISPATCH(unary_fn, acosh_stub); +DECLARE_DISPATCH(unary_fn, asinh_stub); +DECLARE_DISPATCH(unary_fn, atanh_stub); +DECLARE_DISPATCH(unary_fn, asin_stub); +DECLARE_DISPATCH(unary_fn, atan_stub); +DECLARE_DISPATCH(unary_fn, bitwise_not_stub); +DECLARE_DISPATCH(unary_fn, logical_not_stub); +DECLARE_DISPATCH(unary_fn, ceil_stub); +DECLARE_DISPATCH(unary_fn, cos_stub); +DECLARE_DISPATCH(unary_fn, cosh_stub); +DECLARE_DISPATCH(unary_fn, digamma_stub); +DECLARE_DISPATCH(unary_fn, special_entr_stub); +DECLARE_DISPATCH(unary_fn, special_erfcx_stub); +DECLARE_DISPATCH(unary_fn, erf_stub); +DECLARE_DISPATCH(unary_fn, erfc_stub); +DECLARE_DISPATCH(unary_fn, erfinv_stub); +DECLARE_DISPATCH(unary_fn, exp_stub); +DECLARE_DISPATCH(unary_fn, exp2_stub); +DECLARE_DISPATCH(unary_fn, expm1_stub); +DECLARE_DISPATCH(unary_fn, floor_stub); +DECLARE_DISPATCH(unary_fn, frac_stub); +DECLARE_DISPATCH(unary_fn, frexp_stub); +DECLARE_DISPATCH(unary_fn, i0_stub); +DECLARE_DISPATCH(unary_fn, special_i0e_stub); +DECLARE_DISPATCH(unary_fn, special_i1_stub); +DECLARE_DISPATCH(unary_fn, special_i1e_stub); +DECLARE_DISPATCH(unary_fn, log_stub); +DECLARE_DISPATCH(unary_fn, log10_stub); +DECLARE_DISPATCH(unary_fn, log1p_stub); +DECLARE_DISPATCH(unary_fn, log2_stub); +DECLARE_DISPATCH(unary_fn, special_ndtri_stub); +DECLARE_DISPATCH(unary_fn, special_log_ndtr_stub); +DECLARE_DISPATCH(unary_fn, neg_stub); + +DECLARE_DISPATCH(unary_fn, reciprocal_stub); +DECLARE_DISPATCH(unary_fn, round_stub); +DECLARE_DISPATCH(unary_fn, rsqrt_stub); +DECLARE_DISPATCH(unary_fn, sigmoid_stub); +DECLARE_DISPATCH(unary_fn_with_scalar, logit_stub); +DECLARE_DISPATCH(unary_fn, sign_stub); +DECLARE_DISPATCH(unary_fn, signbit_stub); +DECLARE_DISPATCH(unary_fn, sgn_stub); +DECLARE_DISPATCH(unary_fn, sin_stub); +DECLARE_DISPATCH(unary_fn, sinc_stub); +DECLARE_DISPATCH(unary_fn, sinh_stub); +DECLARE_DISPATCH(unary_fn, sqrt_stub); +DECLARE_DISPATCH(unary_fn, tan_stub); +DECLARE_DISPATCH(unary_fn, tanh_stub); +DECLARE_DISPATCH(unary_fn, trigamma_stub); +DECLARE_DISPATCH(unary_fn, trunc_stub); +DECLARE_DISPATCH(unary_fn, lgamma_stub); +DECLARE_DISPATCH(unary_fn, special_airy_ai_stub); +DECLARE_DISPATCH(unary_fn, special_bessel_j0_stub); +DECLARE_DISPATCH(unary_fn, special_bessel_j1_stub); +DECLARE_DISPATCH(unary_fn, special_bessel_y0_stub); +DECLARE_DISPATCH(unary_fn, special_bessel_y1_stub); +DECLARE_DISPATCH(unary_fn, special_modified_bessel_i0_stub); +DECLARE_DISPATCH(unary_fn, special_modified_bessel_i1_stub); +DECLARE_DISPATCH(unary_fn, special_modified_bessel_k0_stub); +DECLARE_DISPATCH(unary_fn, special_modified_bessel_k1_stub); +DECLARE_DISPATCH(unary_fn, special_scaled_modified_bessel_k0_stub); +DECLARE_DISPATCH(unary_fn, special_scaled_modified_bessel_k1_stub); +DECLARE_DISPATCH(unary_fn, special_spherical_bessel_j0_stub); + +// NB: these are actually defined in Distribution +DECLARE_DISPATCH(void(*)(const TensorBase&, const TensorBase&, c10::optional), bernoulli_tensor_stub); +DECLARE_DISPATCH(void(*)(const TensorBase&, const double, c10::optional), bernoulli_scalar_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional), cauchy_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, c10::optional), exponential_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, c10::optional), geometric_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional), log_normal_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional), uniform_stub); +DECLARE_DISPATCH(void(*)(const TensorBase&, const double, const double, c10::optional), normal_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const uint64_t, const int64_t, c10::optional), random_from_to_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, c10::optional), random_full_64_bits_range_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, c10::optional), random_stub); + +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const int64_t, const double), kaiser_window_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const int64_t), polygamma_stub); +DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const Scalar& a, const Scalar& b), clamp_stub); +DECLARE_DISPATCH( + void (*)(Tensor&, const Tensor&, int64_t, c10::optional), + multinomial_with_replacement_stub); +DECLARE_DISPATCH( + void (*)( + TensorIteratorBase&, + c10::optional, + c10::optional, + c10::optional), + nan_to_num_stub); +DECLARE_DISPATCH(void (*)(TensorIteratorBase&, int64_t), round_decimals_stub); + +// Missing unary functions +// digamma +// lgamma +// erfinv +// clone +// contiguous +// zero +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/Unfold2d.h b/voice_bridge/torch/include/ATen/native/Unfold2d.h new file mode 100644 index 0000000000000000000000000000000000000000..2ea27e0cadedf2ae8abf01d2b706b391020815a0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Unfold2d.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include +#include + +namespace at { namespace native { + +using unfold2d_fn = void (*)( + ScalarType dtype, + void *finput, + void *input, + int64_t kH, + int64_t kW, + int64_t dH, + int64_t dW, + int64_t padH, + int64_t padW, + int64_t n_input_plane, + int64_t input_height, + int64_t input_width, + int64_t output_height, + int64_t output_width, + bool is_channels_last +); + +DECLARE_DISPATCH(unfold2d_fn, unfolded2d_copy_stub); +DECLARE_DISPATCH(unfold2d_fn, unfolded2d_acc_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/Unfold3d.h b/voice_bridge/torch/include/ATen/native/Unfold3d.h new file mode 100644 index 0000000000000000000000000000000000000000..51eb89f9b810fa8199ec4562c4e1450f127212a0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/Unfold3d.h @@ -0,0 +1,51 @@ +#pragma once + +#include + +namespace at { +namespace native { + +void Unfold3dCopyCPU( + ScalarType dtype, + const void *src, + int64_t C, + int64_t X_D, + int64_t X_H, + int64_t X_W, + int64_t Y_D, + int64_t Y_H, + int64_t Y_W, + int64_t kernel_d, + int64_t kernel_h, + int64_t kernel_w, + int64_t stride_d, + int64_t stride_h, + int64_t stride_w, + int64_t pad_d, + int64_t pad_h, + int64_t pad_w, + void* dst); + +void Unfold3dAccCPU( + ScalarType dtype, + const void *src, + int64_t C, + int64_t X_D, + int64_t X_H, + int64_t X_W, + int64_t Y_D, + int64_t Y_H, + int64_t Y_W, + int64_t kernel_d, + int64_t kernel_h, + int64_t kernel_w, + int64_t stride_d, + int64_t stride_h, + int64_t stride_w, + int64_t pad_d, + int64_t pad_h, + int64_t pad_w, + void *dst); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/UnfoldBackward.h b/voice_bridge/torch/include/ATen/native/UnfoldBackward.h new file mode 100644 index 0000000000000000000000000000000000000000..1f6c8fa1b289cf70babb14bbba14076369547637 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/UnfoldBackward.h @@ -0,0 +1,186 @@ +#pragma once + +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at { namespace native { + +using unfold_backward_fn = void (*)( + Tensor& grad_in, + const Tensor& grad, + int64_t dim, + int64_t size, + int64_t step +); + +DECLARE_DISPATCH(unfold_backward_fn, unfold_backward_stub); + +namespace { + +// Note on naming: it is unconventional. +// grad_in does not mean that it is a gradient wrt to input, +// grad_in/grad_out is just an input/output of unfold_backward kernel. + +static C10_UNUSED TensorIterator _make_unfold_backward_iter_over_grad_out( + Tensor& grad_out, + const Tensor& grad_in, + int64_t dim, + int64_t size, + int64_t step +) { + dim = maybe_wrap_dim(dim, grad_out.dim()); + // last dim stores the folds + + auto grad_out_dim_size = ensure_nonempty_size(grad_out, dim); + auto grad_in_dim_size = ensure_nonempty_size(grad_in, dim); + // dictates the number of elements to iterate over + // in dimension `dim` + auto iter_dim_size = std::min( + grad_out_dim_size, + (grad_in_dim_size - 1) * step + size + ); + + /* prepare grad_out for TensorIterator { */ + auto grad_out_strides = ensure_nonempty_vec(grad_out.strides().vec()); + auto grad_out_sizes = ensure_nonempty_vec(grad_out.sizes().vec()); + grad_out_sizes[dim] = iter_dim_size; + auto grad_out_restrided = grad_out.as_strided( + grad_out_sizes, grad_out_strides + ); + /* } */ + + /* prepare grad_in for TensorIterator { */ + auto grad_in_strides = ensure_nonempty_vec(grad_in.strides().vec()); + auto grad_in_sizes = ensure_nonempty_vec(grad_in.sizes().vec()); + + // set strides for dim to 0 + // and size to 1 because + // this dimension is indexed inside the kernel + grad_in_strides[dim] = 0; + grad_in_sizes[dim] = 1; + + grad_in_strides.pop_back(); + grad_in_sizes.pop_back(); + + auto grad_in_restrided = grad_in.squeeze(-1).as_strided( + grad_in_sizes, grad_in_strides + ); + /* } */ + + // During the TensorIterator iteration we have to know + // i_dim in grad_out[i_1,...,i_dim,...i_n], + // idx_dim stores this information + /* prepare idx_dim for TensorIterator { */ + auto idx_dim = at::arange( + 0, iter_dim_size, grad_in.options().dtype(at::kLong) + ); + + auto grad_out_dim = ensure_nonempty_dim(grad_out.dim()); + + auto idx_dim_strides = std::vector(grad_out_dim, 0); + auto idx_dim_sizes = std::vector(grad_out_dim, 1); + + idx_dim_strides[dim] = 1; + idx_dim_sizes[dim] = iter_dim_size; + + // idx_dim size will broadcast over determined by grad_out sizes in TensorIterator + auto idx_dim_restrided = idx_dim.as_strided(idx_dim_sizes, idx_dim_strides); + /* } */ + + auto iter = TensorIteratorConfig() + .set_check_mem_overlap(false) + .check_all_same_dtype(false) + .resize_outputs(false) + .add_owned_output(grad_out_restrided) + .add_owned_input(grad_in_restrided) + .add_owned_input(idx_dim_restrided) + .build(); + + return iter; +} + +static C10_UNUSED TensorIterator _make_unfold_backward_iter_over_grad_in( + Tensor& grad_out, + const Tensor& grad_in, + int64_t dim, + int64_t /*size*/, + int64_t /*step*/ +) { + dim = maybe_wrap_dim(dim, grad_out.dim()); + // last dim stores the folds + auto last_dim = maybe_wrap_dim(-1, grad_in.dim()); + + auto grad_in_dim = ensure_nonempty_dim(grad_in.dim()); + auto grad_in_dim_size = ensure_nonempty_size(grad_in, dim); + auto grad_in_last_dim_size = ensure_nonempty_size(grad_in, last_dim); + + /* prepare grad_out for TensorIterator { */ + auto grad_out_restrided = grad_out.unsqueeze(-1); + + auto grad_out_strides = ensure_nonempty_vec(grad_out_restrided.strides().vec()); + auto grad_out_sizes = ensure_nonempty_vec(grad_out_restrided.sizes().vec()); + + grad_out_strides[dim] = 0; + grad_out_strides[last_dim] = 0; + + grad_out_sizes[dim] = grad_in_dim_size; + grad_out_sizes[last_dim] = grad_in_last_dim_size; + + grad_out_restrided = grad_out_restrided.as_strided(grad_out_sizes, grad_out_strides); + /* } */ + + // for each element grad_out[i_1,...,i_dim,...,i_last_dim] + // we have to know i_dim and i_last_dim. + // This information is stored in Tensors + // idx_dim and idx_last_dim + /* prepare idx_dim and idx_last_dim for TensorIterator { */ + auto idx_dim = at::arange( + 0, grad_in_dim_size, grad_in.options().dtype(at::kLong) + ); + + auto idx_dim_strides = std::vector(grad_in_dim, 0); + auto idx_dim_sizes = std::vector(grad_in_dim, 1); + + idx_dim_strides[dim] = 1; + idx_dim_sizes[dim] = grad_in_dim_size; + + auto idx_dim_restrided = idx_dim.as_strided(idx_dim_sizes, idx_dim_strides); + + auto idx_last_dim = at::arange( + 0, grad_in_last_dim_size, grad_in.options().dtype(at::kLong) + ); + + auto idx_last_dim_strides = std::vector(grad_in_dim, 0); + auto idx_last_dim_sizes = std::vector(grad_in_dim, 1); + + idx_last_dim_strides[last_dim] = 1; + idx_last_dim_sizes[last_dim] = grad_in_last_dim_size; + + auto idx_last_dim_restrided = idx_last_dim.as_strided(idx_last_dim_sizes, idx_last_dim_strides); + /* } */ + + auto iter = TensorIteratorConfig() + .set_check_mem_overlap(false) + .check_all_same_dtype(false) + .resize_outputs(false) + .add_owned_output(grad_out_restrided) + .add_owned_input(grad_in) + .add_owned_input(idx_dim_restrided) + .add_owned_input(idx_last_dim_restrided) + .build(); + + return iter; +} + +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/UpSample.h b/voice_bridge/torch/include/ATen/native/UpSample.h new file mode 100644 index 0000000000000000000000000000000000000000..f3dd836444d1312f178ba966b458200d9c165e80 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/UpSample.h @@ -0,0 +1,463 @@ +#pragma once + +#include + +#include +#include +#include +#include + +/** + * Note [compute_scales_value] + * Note [area_pixel_compute_scale] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Interpolate with scale_factor can have different behaviors + * depending on the value of recompute_scale_factor: + * + * - With recompute_scale_factor = True (current default behavior): + * the scale_factor, when provided by the user, are used to calculate + * the output size. The input size and the computed output_size + * are then used to infer new values for the scales which are + * used in the interpolation. Because floating-point math is not exact, + * this may be a different value from the user-supplied scales. + * + * - With recompute_scale_factor = False (which will be the default + * behavior starting 1.5.0): + * the behavior follows opencv logic, and the scales provided by + * the user are the ones used in the interpolation calculations. + * + * If the scales are not provided or if they are provided but + * recompute_scale_factor is set to True (default behavior), the scales + * are computed from the input and the output size; + * + * + * When the scales are inferred from the input and output sizes, + * we view each pixel as an area, idx + 0.5 as its center index. + * Here is an example formula in 1D case. + * if align_corners: center of two corner pixel areas are preserved, + * (0.5, 0.5) -> (0.5, 0.5), + * (input_size - 0.5, 0.5) -> (output_size - 0.5) + * scale = (input_size - 0.5 - 0.5) / (output_size - 0.5 - 0.5) + * src_index + 0.5 - 0.5 = scale * (dst_index + 0.5 - 0.5) + * if not align_corners: the whole range is scaled accordingly + * scale = input_size / output_size + * src_idx + 0.5 = scale * (dst_index + 0.5) + */ + +namespace at { +namespace native { + +namespace upsample { + +TORCH_API c10::SmallVector compute_output_size( + c10::IntArrayRef input_size, // Full input tensor size. + at::OptionalIntArrayRef output_size, + c10::optional> scale_factors); + +inline c10::optional get_scale_value(c10::optional> scales, int idx) { + if (!scales) { + return nullopt; + } + return scales->at(idx); +} + +} // namespace upsample + +using scale_t = c10::optional; +using upsampling_nearest1d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_w); +using _upsampling_nearest_exact1d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_w); +using upsampling_nearest2d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_h, scale_t scales_w); +using _upsampling_nearest_exact2d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_h, scale_t scales_w); +using upsampling_nearest3d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_d, scale_t scales_h, scale_t scales_w); +using _upsampling_nearest_exact3d = void(*)(const Tensor& output, const Tensor& input, scale_t scales_d, scale_t scales_h, scale_t scales_w); +using upsampling_linear1d = void(*)(const Tensor& output, const Tensor& input, bool align_corners, scale_t scales_w); +using upsampling_bilinear2d = void(*)(const Tensor& output, const Tensor& input, bool align_corners, scale_t scales_h, scale_t scales_w); +using _upsampling_bilinear2d_aa = void(*)(const Tensor& output, const Tensor& input, bool align_corners, scale_t scales_h, scale_t scales_w); +using upsampling_trilinear3d = void(*)(const Tensor& output, const Tensor& input, bool align_corners, scale_t scales_d, scale_t scales_h, scale_t scales_w); +using upsampling_bicubic2d = void(*)(const Tensor& output, const Tensor& input, bool align_corners, scale_t scales_h, scale_t scales_w); +using _upsampling_bicubic2d_aa = void(*)(const Tensor& output, const Tensor& input, bool align_corners, scale_t scales_h, scale_t scales_w); +DECLARE_DISPATCH(upsampling_nearest1d, upsample_nearest1d_kernel); +DECLARE_DISPATCH(_upsampling_nearest_exact1d, _upsample_nearest_exact1d_kernel); +DECLARE_DISPATCH(upsampling_nearest2d, upsample_nearest2d_kernel); +DECLARE_DISPATCH(_upsampling_nearest_exact2d, _upsample_nearest_exact2d_kernel); +DECLARE_DISPATCH(upsampling_nearest3d, upsample_nearest3d_kernel); +DECLARE_DISPATCH(_upsampling_nearest_exact3d, _upsample_nearest_exact3d_kernel); +DECLARE_DISPATCH(upsampling_nearest1d, upsample_nearest1d_backward_kernel); +DECLARE_DISPATCH(_upsampling_nearest_exact1d, _upsample_nearest_exact1d_backward_kernel); +DECLARE_DISPATCH(upsampling_nearest2d, upsample_nearest2d_backward_kernel); +DECLARE_DISPATCH(_upsampling_nearest_exact2d, _upsample_nearest_exact2d_backward_kernel); +DECLARE_DISPATCH(upsampling_nearest3d, upsample_nearest3d_backward_kernel); +DECLARE_DISPATCH(_upsampling_nearest_exact3d, _upsample_nearest_exact3d_backward_kernel); +DECLARE_DISPATCH(upsampling_linear1d, upsample_linear1d_kernel); +DECLARE_DISPATCH(upsampling_bilinear2d, upsample_bilinear2d_kernel); +DECLARE_DISPATCH(_upsampling_bilinear2d_aa, _upsample_bilinear2d_aa_kernel); +DECLARE_DISPATCH(upsampling_trilinear3d, upsample_trilinear3d_kernel); +DECLARE_DISPATCH(upsampling_linear1d, upsample_linear1d_backward_kernel); +DECLARE_DISPATCH(upsampling_bilinear2d, upsample_bilinear2d_backward_kernel); +DECLARE_DISPATCH(_upsampling_bilinear2d_aa, _upsample_bilinear2d_aa_backward_kernel); +DECLARE_DISPATCH(upsampling_trilinear3d, upsample_trilinear3d_backward_kernel); +DECLARE_DISPATCH(upsampling_bicubic2d, upsample_bicubic2d_kernel); +DECLARE_DISPATCH(_upsampling_bicubic2d_aa, _upsample_bicubic2d_aa_kernel); +DECLARE_DISPATCH(_upsampling_bicubic2d_aa, _upsample_bicubic2d_aa_backward_kernel); + +static C10_UNUSED std::array upsample_1d_common_check(IntArrayRef input_size, IntArrayRef output_size) { + TORCH_CHECK( + output_size.size() == 1, + "It is expected output_size equals to 1, but got size ", + output_size.size()); + + TORCH_CHECK( + input_size.size() == 3, + "It is expected input_size equals to 3, but got size ", + input_size.size()); + + int64_t output_width = output_size[0]; + + int64_t nbatch = input_size[0]; + int64_t channels = input_size[1]; + int64_t input_width = input_size[2]; + + TORCH_CHECK( + input_width > 0 && output_width > 0, + "Input and output sizes should be greater than 0, but got input (W: ", + input_width, + ") and output (W: ", + output_width, + ")"); + + return {nbatch, channels, output_width}; +} + +static C10_UNUSED std::array upsample_2d_common_check(IntArrayRef input_size, IntArrayRef output_size) { + TORCH_CHECK( + output_size.size() == 2, + "It is expected output_size equals to 2, but got size ", + output_size.size()); + + TORCH_CHECK( + input_size.size() == 4, + "It is expected input_size equals to 4, but got size ", + input_size.size()); + + int64_t output_height = output_size[0]; + int64_t output_width = output_size[1]; + + int64_t nbatch = input_size[0]; + int64_t channels = input_size[1]; + int64_t input_height = input_size[2]; + int64_t input_width = input_size[3]; + + TORCH_CHECK( + input_height > 0 && input_width > 0 && output_height > 0 && + output_width > 0, + "Input and output sizes should be greater than 0," + " but got input (H: ", + input_height, + ", W: ", + input_width, + ") output (H: ", + output_height, + ", W: ", + output_width, + ")"); + + return {nbatch, channels, output_height, output_width}; +} + +static C10_UNUSED +std::array upsample_3d_common_check(IntArrayRef input_size, IntArrayRef output_size) { + TORCH_CHECK( + output_size.size() == 3, + "It is expected output_size equals to 3, but got size ", + output_size.size()); + + TORCH_CHECK( + input_size.size() == 5, + "It is expected input_size equals to 5, but got size ", + input_size.size()); + + int64_t output_depth = output_size[0]; + int64_t output_height = output_size[1]; + int64_t output_width = output_size[2]; + + int64_t nbatch = input_size[0]; + int64_t channels = input_size[1]; + int64_t input_depth = input_size[2]; + int64_t input_height = input_size[3]; + int64_t input_width = input_size[4]; + + TORCH_CHECK( + input_depth > 0 && input_height > 0 && input_width > 0 && + output_depth > 0 && output_height > 0 && output_width > 0, + "Input and output sizes should be greater than 0, but got input (D: ", + input_depth, + ", H: ", + input_height, + ", W: ", + input_width, + ") output (D: ", + output_depth, + ", H: ", + output_height, + ", W: ", + output_width, + ")"); + + + return {nbatch, channels, output_depth, output_height, output_width}; +} + +static inline void upsample_2d_shape_check( + const Tensor& input, + const Tensor& grad_output, + int64_t nbatch, + int64_t nchannels, + int64_t input_height, + int64_t input_width, + int64_t output_height, + int64_t output_width) { + TORCH_CHECK( + input_height > 0 && input_width > 0 && output_height > 0 && + output_width > 0, + "Input and output sizes should be greater than 0," + " but got input (H: ", + input_height, + ", W: ", + input_width, + ") output (H: ", + output_height, + ", W: ", + output_width, + ")"); + + if (input.defined()) { + // Allow for empty batch size but not other dimensions + TORCH_CHECK( + (input.numel() != 0 || + (input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0) + ) && + input.dim() == 4, + "Non-empty 4D data tensor expected but got a tensor with sizes ", + input.sizes()); + } else if (grad_output.defined()) { + check_dim_size(grad_output, 4, 0, nbatch); + check_dim_size(grad_output, 4, 1, nchannels); + check_dim_size(grad_output, 4, 2, output_height); + check_dim_size(grad_output, 4, 3, output_width); + } +} + +template +static inline scalar_t compute_scales_value( + const c10::optional scale, + int64_t input_size, + int64_t output_size) { + // see Note [compute_scales_value] + // FIXME: remove magic > 0 after we ensure no models were serialized with -1 defaults. + return (scale.has_value() && scale.value() > 0.) + ? static_cast(1.0 / scale.value()) + : (static_cast(input_size) / output_size); +} + +template +static inline scalar_t area_pixel_compute_scale( + int64_t input_size, + int64_t output_size, + bool align_corners, + const c10::optional scale) { + // see Note [area_pixel_compute_scale] + if(align_corners){ + if(output_size > 1) { + return static_cast(input_size - 1) / (output_size - 1); + } + else { + return static_cast(0); + } + } + else{ + return compute_scales_value(scale, input_size, output_size); + } +} + +template +static inline scalar_t area_pixel_compute_source_index( + scalar_t scale, + int64_t dst_index, + bool align_corners, + bool cubic) { + if (align_corners) { + return scale * dst_index; + } else { + scalar_t src_idx = scale * (dst_index + static_cast(0.5)) - + static_cast(0.5); + // [Note] Follow Opencv resize logic: + // We allow negative src_idx here and later will use + // dx = src_idx - floorf(src_idx) + // to compute the "distance"(which affects weights). + // For linear modes, weight distribution doesn't matter + // for negative indices as they use 2 pixels to interpolate. + // For example, [-1, 0], they both use pixel 0 value so it + // doesn't affect if we bound the src_idx to 0 or not. + // TODO: Our current linear mode impls use unbound indices + // where we should and then remove this cubic flag. + // This matters in cubic mode, as we might need [-1, 0, 1, 2] + // to interpolate and the weights can be affected. + return (!cubic && src_idx < static_cast(0)) ? scalar_t(0) + : src_idx; + } +} + +static inline int64_t nearest_neighbor_compute_source_index( + const float scale, + int64_t dst_index, + int64_t input_size) { + // Index computation matching OpenCV INTER_NEAREST + // which is buggy and kept for BC + const int64_t src_index = + std::min(static_cast(floorf(dst_index * scale)), input_size - 1); + return src_index; +} + +static inline int64_t nearest_neighbor_exact_compute_source_index( + const float scale, + int64_t dst_index, + int64_t input_size) { + // index_f32 = (output_index + 0.5) * scale - 0.5 + // input_index = round(index_f32) + // Same as Pillow and Scikit-Image/Scipy ndi.zoom + const int64_t src_index = + std::min(static_cast(floorf((dst_index + 0.5) * scale)), input_size - 1); + return src_index; +} + +static inline int64_t nearest_idx( + int64_t output_index, + int64_t input_size, + int64_t output_size, + c10::optional scales) { + // This method specificly treats cases: output_size == input_size or + // output_size == 2 * input_size, that we would like to get rid of + // We keep this method for BC and consider as deprecated. + // See nearest_exact_idx as replacement + if (output_size == input_size) { + // scale_factor = 1, simply copy + return output_index; + } else if (output_size == 2 * input_size) { + // scale_factor = 2, shift input index + return output_index >> 1; + } else { + float scale = compute_scales_value(scales, input_size, output_size); + return nearest_neighbor_compute_source_index(scale, output_index, input_size); + } +} + +static inline int64_t nearest_exact_idx( + int64_t output_index, + int64_t input_size, + int64_t output_size, + c10::optional scales) { + float scale = compute_scales_value(scales, input_size, output_size); + return nearest_neighbor_exact_compute_source_index(scale, output_index, input_size); +} + +// Define a typedef to dispatch to nearest_idx or nearest_exact_idx +typedef int64_t (*nearest_idx_fn_t)(int64_t, int64_t, int64_t, c10::optional); + +template +static scalar_t upsample_get_value_bounded( + scalar_t* data, + int64_t width, + int64_t height, + int64_t x, + int64_t y) { + int64_t access_x = std::max(std::min(x, width - 1), static_cast(0)); + int64_t access_y = std::max(std::min(y, height - 1), static_cast(0)); + return data[access_y * width + access_x]; +} + +template +static void upsample_increment_value_bounded( + scalar_t* data, + int64_t width, + int64_t height, + int64_t x, + int64_t y, + scalar_t value) { + int64_t access_x = std::max(std::min(x, width - 1), static_cast(0)); + int64_t access_y = std::max(std::min(y, height - 1), static_cast(0)); + data[access_y * width + access_x] += value; +} + +// Based on +// https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm +template +static inline scalar_t cubic_convolution1(scalar_t x, scalar_t A) { + return ((A + 2) * x - (A + 3)) * x * x + 1; +} + +template +static inline scalar_t cubic_convolution2(scalar_t x, scalar_t A) { + return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; +} + +template +static inline void get_cubic_upsample_coefficients( + scalar_t coeffs[4], + scalar_t t) { + scalar_t A = -0.75; + + scalar_t x1 = t; + coeffs[0] = cubic_convolution2(x1 + 1.0, A); + coeffs[1] = cubic_convolution1(x1, A); + + // opposite coefficients + scalar_t x2 = 1.0 - t; + coeffs[2] = cubic_convolution1(x2, A); + coeffs[3] = cubic_convolution2(x2 + 1.0, A); +} + +template +static inline scalar_t cubic_interp1d( + scalar_t x0, + scalar_t x1, + scalar_t x2, + scalar_t x3, + scalar_t t) { + scalar_t coeffs[4]; + get_cubic_upsample_coefficients(coeffs, t); + + return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; +} + +template +static inline void compute_source_index_and_lambda( + int64_t& input_index0, + int64_t& input_index1, + scalar_t& lambda0, + scalar_t& lambda1, + scalar_t ratio, + int64_t output_index, + int64_t input_size, + int64_t output_size, + bool align_corners) { + if (output_size == input_size) { + // scale_factor = 1, simply copy + input_index0 = output_index; + input_index1 = output_index; + lambda0 = static_cast(1); + lambda1 = static_cast(0); + } else { + using accscalar_t = at::acc_type; + const accscalar_t real_input_index = + area_pixel_compute_source_index( + ratio, output_index, align_corners, /*cubic=*/false); + input_index0 = static_cast(real_input_index); + int64_t offset = (input_index0 < input_size - 1) ? 1 : 0; + input_index1 = input_index0 + offset; + lambda1 = real_input_index - input_index0; + lambda0 = static_cast(1.) - lambda1; + } +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/batch_norm.h b/voice_bridge/torch/include/ATen/native/batch_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..b729dfe199b0d7943515ae3614093d0c1e975358 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/batch_norm.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include + +namespace at { + +namespace native { + +using batch_norm_fn = void (*)(Tensor&, const Tensor&, const Tensor&, + const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, bool, double); +using batch_norm_collect_stats_fn = void (*)(Tensor&, Tensor&, const Tensor&); +using batch_norm_backward_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&, + const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, bool, double); + +DECLARE_DISPATCH(batch_norm_fn, batch_norm_cpu_stub); +DECLARE_DISPATCH(batch_norm_collect_stats_fn, batch_norm_cpu_collect_stats_stub); +DECLARE_DISPATCH(batch_norm_backward_fn, batch_norm_cpu_backward_stub); + +// TensorAccessor when it is defined to work around undefined... +template +static TensorAccessor conditional_accessor_1d(const Tensor& t) { + if (! t.defined()) { + return TensorAccessor(nullptr, nullptr, nullptr); + } + return t.accessor(); +} + +template +static scalar_t* conditional_data_ptr(const Tensor& t) { + return t.defined() ? t.contiguous().data_ptr() + : nullptr; +} + +} // namespace native + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cpu/AtomicAddFloat.h b/voice_bridge/torch/include/ATen/native/cpu/AtomicAddFloat.h new file mode 100644 index 0000000000000000000000000000000000000000..5b24ee4821c45baab25f37a3bfa3399eff8a1716 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/AtomicAddFloat.h @@ -0,0 +1,37 @@ +#ifndef ATOMIC_ADD_FLOAT +#define ATOMIC_ADD_FLOAT + +#if (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)) +#include +#else +#define _mm_pause() +#endif + +#include + +static inline void cpu_atomic_add_float(float* dst, float fvalue) +{ + typedef union { + unsigned intV; + float floatV; + } uf32_t; + + uf32_t new_value, old_value; + std::atomic* dst_intV = (std::atomic*)(dst); + + old_value.floatV = *dst; + new_value.floatV = old_value.floatV + fvalue; + + unsigned* old_intV = (unsigned*)(&old_value.intV); + while (!std::atomic_compare_exchange_strong(dst_intV, old_intV, new_value.intV)) { +#ifdef __aarch64__ + __asm__ __volatile__("yield;" : : : "memory"); +#else + _mm_pause(); +#endif + old_value.floatV = *dst; + new_value.floatV = old_value.floatV + fvalue; + } +} + +#endif diff --git a/voice_bridge/torch/include/ATen/native/cpu/CatKernel.h b/voice_bridge/torch/include/ATen/native/cpu/CatKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..aedb4aec4f574700ab1060dd17d0c5dcd9846f79 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/CatKernel.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include +#include + +namespace at { namespace native { + +using cat_serial_fn = void(*)(const Tensor &, const MaterializedITensorListRef&, int64_t); +DECLARE_DISPATCH(cat_serial_fn, cat_serial_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cpu/ChannelShuffleKernel.h b/voice_bridge/torch/include/ATen/native/cpu/ChannelShuffleKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..10e592cf59eb751bbd556597905b4c4279229eaa --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/ChannelShuffleKernel.h @@ -0,0 +1,14 @@ +#pragma once +#include +#include + +namespace at { +class TensorBase; +} + +namespace at { namespace native { + +using channel_shuffle_fn = void(*)(TensorBase&, const TensorBase&, int64_t); +DECLARE_DISPATCH(channel_shuffle_fn, channel_shuffle_kernel); + +}} // at::native diff --git a/voice_bridge/torch/include/ATen/native/cpu/CopyKernel.h b/voice_bridge/torch/include/ATen/native/cpu/CopyKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..9d2affd6101ab9d838789e6ab674a011c0490e3d --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/CopyKernel.h @@ -0,0 +1,12 @@ +#pragma once + +namespace at { +struct TensorIteratorBase; + +namespace native { +inline namespace CPU_CAPABILITY { + +void direct_copy_kernel(TensorIteratorBase &iter); +void copy_kernel(TensorIterator& iter, bool /*non_blocking*/); + +}}} // namespace at::native::CPU_CAPABILITY diff --git a/voice_bridge/torch/include/ATen/native/cpu/DepthwiseConvKernel.h b/voice_bridge/torch/include/ATen/native/cpu/DepthwiseConvKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..56956b443386d33027c3faa72eb4be9a76eab8af --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/DepthwiseConvKernel.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +/* + Depthwise 3x3 Winograd convolution operator +*/ + +namespace at { +class Tensor; + +namespace native { + +using convolution_depthwise3x3_winograd_fn = + Tensor (*)(const Tensor &, const Tensor &, const Tensor &,IntArrayRef, IntArrayRef, int64_t); + +DECLARE_DISPATCH(convolution_depthwise3x3_winograd_fn, convolution_depthwise3x3_winograd_stub); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cpu/DistributionTemplates.h b/voice_bridge/torch/include/ATen/native/cpu/DistributionTemplates.h new file mode 100644 index 0000000000000000000000000000000000000000..37c799803eaf2e75f3960431b24aaee74c49efbe --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/DistributionTemplates.h @@ -0,0 +1,364 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CPU_CAPABILITY_AVX2 +#include +#include +#endif + + +namespace at { +namespace native { +namespace templates { +namespace cpu { +namespace { + +// ==================================================== Random ======================================================== + +template +void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, RNG generator) { + AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "random_from_to_kernel_cpu", [&] { + std::lock_guard lock(generator->mutex_); + cpu_serial_kernel(iter, [range, base, generator]() -> scalar_t { + uniform_int_from_to_distribution random(range, base); + return random(generator); + }); + }); +} + +// This is the special kernel to handle single specific case: +// from(inclusive) = std::numeric_limits::lowest() +// to(exclusive) = None (= std::numeric_limits::max() + 1) +template +void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG generator) { + AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::BFloat16, iter.dtype(), "random_full_64_bits_range_kernel_cpu", [&] { + std::lock_guard lock(generator->mutex_); + if (std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value) { + cpu_serial_kernel(iter, [generator]() -> scalar_t { + uniform_int_full_range_distribution random; + return random(generator); + }); + } else { + TORCH_CHECK(false, "random_full_64_bits_range_kernel_cpu handles only int64, double, float and bfloat16"); + } + }); +} + +template +struct RandomFromToKernel { + void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional gen) { + random_from_to_kernel(iter, range, base, check_generator(gen)); + } + void operator()(TensorIteratorBase& iter, c10::optional gen) { + random_full_64_bits_range_kernel(iter, check_generator(gen)); + } +}; + +template +void random_kernel(TensorIteratorBase& iter, RNG generator) { + std::lock_guard lock(generator->mutex_); + AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "random_kernel_cpu", [&] { + cpu_serial_kernel(iter, [generator]() -> scalar_t { + uniform_int_distribution random; + return random(generator); + }); + }); +} + +template +struct RandomKernel { + void operator()(TensorIteratorBase& iter, c10::optional gen) { + random_kernel(iter, check_generator(gen)); + } +}; + +// ==================================================== Normal ======================================================== + +#ifdef CPU_CAPABILITY_AVX2 +static void normal_fill_16_AVX2(float *data, + const __m256* two_pi, + const __m256* one, + const __m256* minus_two, + const __m256* mean, + const __m256* std_v) { + const __m256 u1 = _mm256_sub_ps(*one, _mm256_loadu_ps(data)); + const __m256 u2 = _mm256_loadu_ps(data + 8); + // sincos256_ps and log256_ps are from avx_mathfun.h + const __m256 radius = _mm256_sqrt_ps(_mm256_mul_ps(*minus_two, log256_ps(u1))); + const __m256 theta = _mm256_mul_ps(*two_pi, u2); + __m256 sintheta, costheta; + sincos256_ps(theta, &sintheta, &costheta); + const __m256 n1 = _mm256_mul_ps(radius, costheta); + const __m256 n2 = _mm256_mul_ps(radius, sintheta); + _mm256_storeu_ps(data, _mm256_fmadd_ps(n1, *std_v, *mean)); + _mm256_storeu_ps(data + 8, _mm256_fmadd_ps(n2, *std_v, *mean)); +} + +template +void normal_fill_AVX2(const TensorBase &self, const float mean, const float std, RNG generator) { + float *data = self.data_ptr(); + auto size = self.numel(); + std::lock_guard lock(generator->mutex_); + for (const auto i : c10::irange(size)) { + at::uniform_real_distribution uniform(0, 1); + data[i] = uniform(generator); + } + const __m256 two_pi = _mm256_set1_ps(2.0f * c10::pi); + const __m256 one = _mm256_set1_ps(1.0f); + const __m256 minus_two = _mm256_set1_ps(-2.0f); + const __m256 mean_v = _mm256_set1_ps(mean); + const __m256 std_v = _mm256_set1_ps(std); + + for (int64_t i = 0; i < size - 15; i += 16) { + normal_fill_16_AVX2(data + i, &two_pi, &one, &minus_two, &mean_v, &std_v); + } + + if (size % 16 != 0) { + // Recompute the last 16 values. + data = data + size - 16; + for (const auto i : c10::irange(16)) { + at::uniform_real_distribution uniform(0, 1); + data[i] = uniform(generator); + } + normal_fill_16_AVX2(data, &two_pi, &one, &minus_two, &mean_v, &std_v); + } +} +#endif + +template +static void normal_fill_16(scalar_t *data, const scalar_t mean, const scalar_t std) { + for (const auto j : c10::irange(8)) { + const scalar_t u1 = 1 - data[j]; // [0, 1) -> (0, 1] for log. + const scalar_t u2 = data[j + 8]; + const scalar_t radius = std::sqrt(-2 * std::log(u1)); + const scalar_t theta = 2.0f * c10::pi * u2; + data[j] = radius * std::cos(theta) * std + mean; + data[j + 8] = radius * std::sin(theta) * std + mean; + } +} + +template +void normal_fill(const TensorBase &self, const scalar_t mean, const scalar_t std, RNG generator) { + scalar_t *data = self.data_ptr(); + auto size = self.numel(); + std::lock_guard lock(generator->mutex_); + for (const auto i : c10::irange(size)) { + at::uniform_real_distribution uniform(0, 1); + data[i] = uniform(generator); + } + + for (int64_t i = 0; i < size - 15; i += 16) { + normal_fill_16(data + i, mean, std); + } + if (size % 16 != 0) { + // Recompute the last 16 values. + data = data + size - 16; + for (const auto i : c10::irange(16)) { + at::uniform_real_distribution uniform(0, 1); + data[i] = uniform(generator); + } + normal_fill_16(data, mean, std); + } +} + +template +void normal_kernel(const TensorBase &self, double mean, double std, RNG generator) { + auto size = self.numel(); + if (self.scalar_type() == ScalarType::Float && size >= 16 && self.is_contiguous()) { +#ifdef CPU_CAPABILITY_AVX2 + normal_fill_AVX2(self, static_cast(mean), static_cast(std), generator); +#else + normal_fill(self, static_cast(mean), static_cast(std), generator); +#endif + } else { + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, self.scalar_type(), "normal_kernel_cpu", [&] { + if (size >= 16 && self.is_contiguous()) { + normal_fill(self, static_cast(mean), static_cast(std), generator); + } else { + auto iter = TensorIterator::borrowing_nullary_op(self); + std::lock_guard lock(generator->mutex_); + cpu_serial_kernel(iter, [mean, std, generator]() -> scalar_t { + at::normal_distribution normal(mean, std); + return static_cast(normal(generator)); + }); + } + }); + } +} + +template +struct NormalKernel { + void operator()(Tensor& self, double mean, double std, c10::optional gen) { + normal_kernel(self, mean, std, check_generator(gen)); + } +}; + +// ==================================================== Uniform ======================================================= + +template +void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG generator) { + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "uniform_kernel_cpu", [&]() { + std::lock_guard lock(generator->mutex_); + auto from = static_cast(from_); + auto to = static_cast(to_); + at::uniform_real_distribution uniform(from, to); + cpu_serial_kernel(iter, [&uniform, generator]() -> scalar_t { + return static_cast(uniform(generator)); + }); + }); +} + +template +struct UniformKernel { + void operator()(TensorIteratorBase& iter, double from, double to, c10::optional gen) { + uniform_kernel(iter, from, to, check_generator(gen)); + } +}; + +// ==================================================== Cauchy ======================================================== + +template +void cauchy_kernel(TensorIteratorBase& iter, double median, double sigma, RNG generator) { + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "cauchy_cpu", [&]() { + std::lock_guard lock(generator->mutex_); + at::cauchy_distribution cauchy(median, sigma); + cpu_serial_kernel(iter, [&cauchy, generator]() -> scalar_t { + return static_cast(cauchy(generator)); + }); + }); +} + +template +struct CauchyKernel { + void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) { + cauchy_kernel(iter, median, sigma, check_generator(gen)); + } +}; + +// ================================================== LogNormal ======================================================= + +template +void log_normal_kernel(TensorIteratorBase& iter, double mean, double std, RNG generator) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "log_normal_cpu", [&]() { + std::lock_guard lock(generator->mutex_); + at::lognormal_distribution logNormal(mean, std); + cpu_serial_kernel(iter, [&logNormal, generator]() -> scalar_t { + return static_cast(logNormal(generator)); + }); + }); +} + +template +struct LogNormalKernel { + void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional gen) { + log_normal_kernel(iter, mean, std, check_generator(gen)); + } +}; + +// =================================================== Geometric ====================================================== + +template +void geometric_kernel(TensorIteratorBase& iter, double p, RNG generator) { + AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "geometric_cpu", [&]() { + std::lock_guard lock(generator->mutex_); + at::geometric_distribution geometric(p); + cpu_serial_kernel(iter, [&geometric, generator]() -> scalar_t { + return static_cast(geometric(generator)); + }); + }); +} + +template +struct GeometricKernel { + void operator()(TensorIteratorBase& iter, double p, c10::optional gen) { + geometric_kernel(iter, p, check_generator(gen)); + } +}; + +// ================================================== Exponential ===================================================== + +template +void exponential_kernel(TensorIteratorBase& iter, double lambda, RNG generator) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exponential_cpu", [&]() { + std::lock_guard lock(generator->mutex_); + at::exponential_distribution exponential(lambda); + cpu_serial_kernel(iter, [&exponential, generator]() -> scalar_t { + return static_cast(exponential(generator)); + }); + }); +} + +template +struct ExponentialKernel { + void operator()(TensorIteratorBase& iter, double lambda, c10::optional gen) { + exponential_kernel(iter, lambda, check_generator(gen)); + } +}; + +// ================================================== Bernoulli ======================================================= + +template +void bernoulli_kernel(const TensorBase &self, const TensorBase &p_, RNG generator) { + AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::BFloat16, self.scalar_type(), "bernoulli_tensor_cpu_self_", [&] { + // See Note [Acquire lock when using random generators] + std::lock_guard lock(generator->mutex_); + using self_t = scalar_t; + auto p_cpu = p_.to(kCPU); + auto p = expand_inplace(self, p_cpu); + auto iter = TensorIteratorConfig() + .add_output(self) + .add_input(*p) + .check_all_same_dtype(false) + .build(); + if (p->scalar_type() == kDouble) { + cpu_serial_kernel(iter, [&](const double p_val) -> self_t { + at::bernoulli_distribution bernoulli(p_val); + return static_cast(bernoulli(generator)); + }); + } else { + AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::BFloat16, p->scalar_type(), "bernoulli_tensor_cpu_p_", [&] { + using p_t = scalar_t; + cpu_serial_kernel(iter, [&](const p_t p_val) -> self_t { + at::bernoulli_distribution bernoulli(p_val); + return static_cast(bernoulli(generator)); + }); + }); + } + }); +} + +template +void bernoulli_kernel(const TensorBase &self, double p, RNG generator) { + AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::BFloat16, self.scalar_type(), "bernoulli_scalar_cpu_", [&] { + // See Note [Acquire lock when using random generators] + std::lock_guard lock(generator->mutex_); + auto iter = TensorIterator::borrowing_nullary_op(self); + cpu_serial_kernel(iter, [p, generator]() -> scalar_t { + at::bernoulli_distribution bernoulli(p); + return static_cast(bernoulli(generator)); + }); + }); +} + +template +struct BernoulliKernel { + void operator()(const TensorBase &self, double p, c10::optional gen) { + bernoulli_kernel(self, p, check_generator(gen)); + } + void operator()(const TensorBase &self, const TensorBase &p_, c10::optional gen) { + bernoulli_kernel(self, p_, check_generator(gen)); + } +}; + +}}}}} diff --git a/voice_bridge/torch/include/ATen/native/cpu/GridSamplerKernel.h b/voice_bridge/torch/include/ATen/native/cpu/GridSamplerKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..b1830fcd3911ec871ee9f1728f2cfcbf1c625031 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/GridSamplerKernel.h @@ -0,0 +1,34 @@ +#pragma once + +#include + +#include +#include + +namespace at { +class TensorBase; +} + +namespace at { namespace native { + +using forward_2d_fn = void (*) ( + const TensorBase &output, + const TensorBase &input, + const TensorBase &grid, + int64_t interpolation_mode, + int64_t padding_mode, + bool align_corners); +using backward_2d_fn = void (*) ( + const TensorBase &grad_input, + const TensorBase &grad_grid, + const TensorBase &grad_output, + const TensorBase &input, + const TensorBase &grid, + int64_t interpolation_mode, + int64_t padding_mode, + bool align_corners, + std::array output_mask); +DECLARE_DISPATCH(forward_2d_fn, grid_sampler_2d_cpu_kernel); +DECLARE_DISPATCH(backward_2d_fn, grid_sampler_2d_backward_cpu_kernel); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cpu/IndexKernelUtils.h b/voice_bridge/torch/include/ATen/native/cpu/IndexKernelUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..9e46b4fec2bbc62188c24c99b876935863ca8b74 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/IndexKernelUtils.h @@ -0,0 +1,94 @@ +#pragma once +#include +#include + +namespace at { +namespace native { + +namespace { +static bool is_constant_index(int ntensor, const int64_t* strides) { + AT_ASSERT(ntensor >= 3); + for (const auto arg : c10::irange(2, ntensor)) { + if (strides[arg] != 0) { + return false; + } + } + return true; +} + + +struct Indexer { + Indexer(int64_t num_indexers, char** indexers, const int64_t* indexer_strides, + IntArrayRef original_sizes, IntArrayRef original_strides) + : num_indexers(num_indexers) + , indexers(indexers) + , indexer_strides(indexer_strides) + , original_strides(original_strides.data()) + , original_sizes(original_sizes.data()) { + AT_ASSERT(static_cast(original_strides.size()) == num_indexers); + AT_ASSERT(static_cast(original_sizes.size()) == num_indexers); + } + + int64_t num_indexers; + char** indexers; + const int64_t* indexer_strides; + const int64_t* original_strides; + const int64_t* original_sizes; + + int64_t get(int64_t idx) { + int64_t offset = 0; + for (const auto j : c10::irange(num_indexers)) { + int64_t value = *(int64_t*)&indexers[j][idx * indexer_strides[j]]; + int64_t size = original_sizes[j]; + TORCH_CHECK_INDEX(value >= -size && value < size, + "index ", value, " is out of bounds for dimension ", j, " with size ", size); + if (value < 0) { + value += size; + } + offset += value * original_strides[j]; + } + return offset; + } +}; +} // anonymous namespace + +template +void cpu_index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride, + const func_t& f, bool serial_execution=false) +{ + int ntensor = iter.ntensors(); + // When launch the index parallel version, set a relative samll grain size less than the INTERNAL::GRAIN_SIZE + // to make the whole available thread numbers get more balanced work load and a better cache location. + // The grain size here is chosen by the op benchmark to overcome the thread launch overhead + const int index_parallel_grain_size = 3000; + auto loop = [&](char** data, const int64_t* strides, int64_t n) { + auto indexer = Indexer(ntensor - 2, &data[2], &strides[2], index_size, index_stride); + char* dst = data[0]; + char* src = data[1]; + if (is_constant_index(ntensor, strides)) { + // specialization for when every element uses the same index + int64_t offset = indexer.get(0); + if (strides[0] == sizeof(scalar_t) && strides[1] == sizeof(scalar_t)) { + for (const auto i : c10::irange(n)) { + f(dst + strides[0] * i, src + strides[1] * i, offset); + } + } else { + for (const auto i : c10::irange(n)) { + f(dst + strides[0] * i, src + strides[1] * i, offset); + } + } + } else { + for (const auto i : c10::irange(n)) { + int64_t offset = indexer.get(i); + f(dst + strides[0] * i, src + strides[1] * i, offset); + } + } + }; + if (serial_execution) { + iter.serial_for_each(loop, {0, iter.numel()}); + } else { + iter.for_each(loop, index_parallel_grain_size); + } +} +} // at +} // native diff --git a/voice_bridge/torch/include/ATen/native/cpu/Intrinsics.h b/voice_bridge/torch/include/ATen/native/cpu/Intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..f3b35328f1882729a9158eaed7eb2abf77097484 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/Intrinsics.h @@ -0,0 +1,33 @@ +#pragma once + +#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__)) +/* Clang-compatible compiler, targeting x86/x86-64 */ +#include +#elif defined(_MSC_VER) +/* Microsoft C/C++-compatible compiler */ +#include +#if _MSC_VER <= 1900 +#define _mm256_extract_epi64(X, Y) (((uint64_t*)&X)[Y]) +#endif +#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) +/* GCC-compatible compiler, targeting x86/x86-64 */ +#include +#elif defined(__GNUC__) && defined(__ARM_NEON__) +/* GCC-compatible compiler, targeting ARM with NEON */ +#include +#elif defined(__GNUC__) && defined(__IWMMXT__) +/* GCC-compatible compiler, targeting ARM with WMMX */ +#include +#elif (defined(__GNUC__) || defined(__xlC__)) && \ + (defined(__VEC__) || defined(__ALTIVEC__)) +/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */ +#include +/* We need to undef those tokens defined by to avoid conflicts + with the C++ types. => Can still use __bool/__vector */ +#undef bool +#undef vector +#undef pixel +#elif defined(__GNUC__) && defined(__SPE__) +/* GCC-compatible compiler, targeting PowerPC with SPE */ +#include +#endif diff --git a/voice_bridge/torch/include/ATen/native/cpu/IsContiguous.h b/voice_bridge/torch/include/ATen/native/cpu/IsContiguous.h new file mode 100644 index 0000000000000000000000000000000000000000..192177cc9bcfb0988171fb68554ab56a7120ed4c --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/IsContiguous.h @@ -0,0 +1,62 @@ +#pragma once + +namespace at { namespace native { inline namespace CPU_CAPABILITY { + +// n: number of function arguments (arity) +// traits: function_traits (see FunctionTraits.h) +// s: index of scalar argument or -1 +template +struct IsContiguous { + static bool eval(const int64_t* strides) { + using type = typename traits::template arg::type; + return strides[stride_index] == (s == n ? 0 : sizeof(type)) && + IsContiguous::eval(strides); + } +}; + +// will be called when there is an output exists +template +struct IsContiguous<0, 0, traits, s> { + static bool eval(const int64_t* strides) { + return strides[0] == sizeof(typename traits::result_type); + } +}; + +// will be called when there is no output +template +struct IsContiguous<0, -1, traits, s> { + static bool eval(const int64_t* /*strides*/) { + return true; + } +}; + +// output and all inputs are contiguous +template ::value>::type* = nullptr> +static inline bool is_contiguous(const int64_t* strides) { + return IsContiguous::eval(strides); +} + +template ::value>::type* = nullptr> +static inline bool is_contiguous(const int64_t* strides) { + return IsContiguous::eval(strides); +} + +// input at `s` is scalar (stride 0); output and other inputs are contiguous +// NB: output is typically at strides[0] so first input corresponds to s=1 +template ::value>::type* = nullptr> +static inline bool is_contiguous_scalar(const int64_t* strides) { + static_assert(s > 0 && s <= traits::arity, "scalar argument index out of bounds"); + return IsContiguous::eval(strides); +} + +template ::value>::type* = nullptr> +static inline bool is_contiguous_scalar(const int64_t* strides) { + static_assert(s > 0 && s <= traits::arity, "scalar argument index out of bounds"); + return IsContiguous::eval(strides); +} + +}}} diff --git a/voice_bridge/torch/include/ATen/native/cpu/Loops.h b/voice_bridge/torch/include/ATen/native/cpu/Loops.h new file mode 100644 index 0000000000000000000000000000000000000000..8e76cca50f01a35651471d024f211b620af66b12 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/Loops.h @@ -0,0 +1,392 @@ +#pragma once + +// This file provides two functions to help write elementwise kernels: +// +// cpu_kernel(TensorIterator iter, ) +// cpu_kernel_vec(TensorIterator iter, , ) +// +// Both functions may generate vectorized code. The cpu_kernel implementation +// relies on the compiler's auto-vectorization. The cpu_kernel_vec +// implementation uses x86 SIMD intrinsics when available. These functions +// are only intended to be used in the ATen/native/cpu subdirectory, since files +// in other directories are not compiled with AVX/AVX2 enabled. See README.md +// for more details. +// +// For example, to write a multiplication kernel for float: +// +// cpu_kernel(iter, [](float a, float b) { return a * b; }); +// +// Or you may write: +// +// cpu_kernel_vec(iter, +// [](float a, float b) { return a * b; }, +// [](Vectorized a, Vectorized b) { return a * b; }); +// +// See BinaryOpsKernel.cpp for the complete implementation +// +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { inline namespace CPU_CAPABILITY { + +using namespace vec; + +template +typename traits::ArgsTuple +dereference_impl(char* C10_RESTRICT data[], const int64_t* strides, int64_t i, + std::index_sequence) { + return std::make_tuple( + c10::load::type>( + data[INDEX] + i * strides[INDEX])...); +} + +template +typename traits::ArgsTuple +dereference(char* C10_RESTRICT data[], const int64_t* strides, int64_t i) { + using Indices = std::make_index_sequence; + return dereference_impl(data, strides, i, Indices{}); +} + +template +typename traits::ArgsTuple +dereference_vec_impl(char* C10_RESTRICT data[], + const typename traits::result_type& opt_scalar, + size_t S, + int64_t i, + std::index_sequence) { + using Vec = typename traits::result_type; + using scalar_t = typename Vec::value_type; + return std::make_tuple( + S == INDEX + 1 ? + opt_scalar : + Vec::loadu(data[INDEX] + i * sizeof(scalar_t))...); +} + +template +typename traits::ArgsTuple +dereference_vec(char* C10_RESTRICT data[], const typename traits::result_type& opt_scalar, size_t S, int64_t i) { + using Indices = std::make_index_sequence; + return dereference_vec_impl(data, opt_scalar, S, i, Indices{}); +} + +template ::result_type>::value>::type* = nullptr> +static inline void +execute_op(char* C10_RESTRICT data[], const int64_t* strides, int64_t i, int64_t n, func_t&& op) { + using traits = function_traits; + using result_type = typename traits::result_type; + for (; i < n; i++) { + result_type* out_ptr = (result_type*)(data[0] + i * strides[0]); + *out_ptr = c10::guts::apply(std::forward(op), dereference( + &data[1], + &strides[1], + i)); + } +} + +template ::result_type>::value>::type* = nullptr> +static inline void +execute_op(char* C10_RESTRICT data[], const int64_t* strides, int64_t i, int64_t n, func_t&& op) { + using traits = function_traits; + for (; i < n; i++) { + c10::guts::apply(std::forward(op), dereference( + &data[0], + &strides[0], + i)); + } +} + +// Basic loop operation (one output, N inputs). May be auto-vectorized +// by the compiler. Supports inputs and outputs of different types. +template +static inline void +basic_loop(char* C10_RESTRICT data[], const int64_t* strides_, int64_t i, int64_t n, func_t&& op) { + using traits = function_traits; + constexpr int ntensors = traits::arity + 1; + + // Copying strides to temporary array helps auto vectorization in older GCC + // versions. + int64_t strides[ntensors]; + for (const auto arg : c10::irange(ntensors)) { + strides[arg] = strides_[arg]; + } + + execute_op(data, strides, i, n, std::forward(op)); +} + +// the recursive variadic template for iterating over the returned tuple +template +struct TupleOutput { + static void handle(char *C10_RESTRICT data[], const int64_t *strides, int64_t i, + const T &tuple) { + TupleOutput::handle(data, strides, i, tuple); + + auto output = std::get(tuple); + using output_type = decltype(output); + output_type * out_ptr = (output_type *)(data[N - 1] + i * strides[N - 1]); + *out_ptr = output; + } +}; + +// Base case for the above recursive template +template +struct TupleOutput { + static void handle(char *C10_RESTRICT data[], const int64_t *strides, int64_t i, + const T &tuple) { + auto output = std::get<0>(tuple); + using output_type = decltype(output); + output_type* out_ptr = (output_type *)(data[0] + i * strides[0]); + *out_ptr = output; + } +}; + +template +void handle_tuple_outputs(char* C10_RESTRICT data[], + const int64_t* strides, + int64_t i, + const std::tuple &tuple) { + TupleOutput::handle(data, strides, i, tuple); +} + +// Loop operation for `cpu_kernel_multiple_outputs`. +// 1. Use `c10::guts::apply` to make dynamic method invocation +// for the lambda passed in `cpu_kernel_multiple_outputs`. +// 2. Iterate over the members of the returned tuple, set the corresponding +// output tensor by the tuple member in `handle_tuple_outputs` function. +template +static inline void +multiple_outputs_loop(char* C10_RESTRICT data[], const int64_t* strides_, int64_t i, int64_t n, func_t&& op) { + using traits = function_traits; + + using result_type = typename traits::result_type; + constexpr int num_outputs = std::tuple_size::value; + constexpr int ntensors = traits::arity + num_outputs; + + // Copying strides to temporary array helps auto vectorization in older GCC + // versions. + int64_t strides[ntensors]; + for (const auto arg : c10::irange(ntensors)) { + strides[arg] = strides_[arg]; + } + + for (; i < n; i++) { + auto output = c10::guts::apply(op, dereference( + &data[num_outputs], + &strides[num_outputs], + i)); + handle_tuple_outputs(data, strides, i, output); + } +} + +// Explicitly vectorized loop implementation. All inputs and outputs must be +// the same type and contiguous with one exception: a single input may be +// a scalar (stride 0). It's position is indicated by the argument `S`. If `S` +// is 0, then there are no scalar inputs. +template +static inline void +vectorized_loop(char** C10_RESTRICT data_, int64_t n, int64_t S, func_t&& op, vec_func_t&& vop) { + using traits = function_traits; + using scalar_t = typename function_traits::result_type; + using Vec = Vectorized; + constexpr int ntensors = traits::arity + 1; + + char* C10_RESTRICT data[ntensors]; + for (const auto arg : c10::irange(ntensors)) { + data[arg] = data_[arg]; + } + + Vec opt_scalar = Vec(S > 0 ? *(scalar_t*)data[S] : scalar_t(0)); + int64_t i = 0; + for (; i <= n - 2 * Vec::size(); i += 2 * Vec::size()) { + auto args1 = dereference_vec(&data[1], opt_scalar, S, i); + auto args2 = dereference_vec(&data[1], opt_scalar, S, i + Vec::size()); + auto out1 = c10::guts::apply(std::forward(vop), std::move(args1)); + auto out2 = c10::guts::apply(std::forward(vop), std::move(args2)); + out1.store(data[0] + i * sizeof(scalar_t)); + out2.store(data[0] + (i + Vec::size()) * sizeof(scalar_t)); + } + if (i < n) { + int64_t strides[ntensors]; + for (const auto arg : c10::irange(ntensors)) { + strides[arg] = (S > 0 && arg == S) ? 0 : sizeof(scalar_t); + } + basic_loop(data, strides, i, n, std::forward(op)); + } +} + + +template +static inline void unroll_contiguous_scalar_checks( + const int64_t* /*strides*/, + std::index_sequence<>, + cb_t&& cb) { + cb(0); +} + +template +static inline void unroll_contiguous_scalar_checks( + const int64_t* strides, + std::index_sequence, + cb_t&& cb) { + if (is_contiguous_scalar(strides)) { + cb(INDEX0 + 1); + } else { + unroll_contiguous_scalar_checks(strides, std::index_sequence{}, std::forward(cb)); + } +} + +template +struct VectorizedLoop2d { + op_t op; + vop_t vop; + + using traits = function_traits; + static constexpr int ntensors = traits::arity + 1; + using data_t = std::array; + + VectorizedLoop2d(const op_t &op, const vop_t &vop): + op(op), vop(vop) {} + + static void advance(data_t &data, const int64_t *outer_strides) { + for (const auto arg : c10::irange(data.size())) { + data[arg] += outer_strides[arg]; + } + } + + void operator()(char** base, const int64_t *strides, int64_t size0, int64_t size1) { + data_t data; + std::copy_n(base, ntensors, data.data()); + const int64_t *outer_strides = &strides[ntensors]; + + if (is_contiguous(strides)) { + for (const auto i C10_UNUSED : c10::irange(size1)) { + vectorized_loop(data.data(), size0, 0, op, vop); + advance(data, outer_strides); + } + } else { + using Indices = std::make_index_sequence; + unroll_contiguous_scalar_checks(strides, Indices{}, [&](size_t idx) { + if (idx) { + for (const auto i C10_UNUSED : c10::irange(size1)) { + vectorized_loop(data.data(), size0, idx, op, vop); + advance(data, outer_strides); + } + } else { + for (const auto i C10_UNUSED : c10::irange(size1)) { + basic_loop(data.data(), strides, 0, size0, op); + advance(data, outer_strides); + } + } + }); + } + } +}; + +template +VectorizedLoop2d make_vectorized_loop2d( + const op_t &op, const vop_t &vop) { + return VectorizedLoop2d(op, vop); +} + +template +void cpu_kernel(TensorIteratorBase& iter, func_t&& op, int64_t grain_size = at::internal::GRAIN_SIZE) { + using traits = function_traits; + // this could be extended to work with void return types + TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity); + TORCH_INTERNAL_ASSERT(iter.noutputs() == 1); + // dynamic casting not currently supported on CPU + TORCH_INTERNAL_ASSERT(!needs_dynamic_casting::check(iter)); + + iter.for_each([&](char** data, const int64_t* strides, int64_t n) { + // basic loop can handle 1d slices with arbitrary strides, and 1d slices is all that + // iter.for_each is ever sending to the loop lambda + basic_loop(data, strides, 0, n, std::forward(op)); + }, grain_size); + iter.cast_outputs(); +} + +// This function helps write elementwise kernels that requires multiple outputs. +// It follows the similar structure of cpu_kernel. +// Instead of `basic_loop` function, a new `multiple_outputs_loop` function is +// manipulated to handle multiple return values. +// For now `needs_dynamic_casting` check is not added as the passed lambda (`func_t`) +// of `multiple_outputs_loop` returns `std::tuple` instead of `scalar_t`. +// The `gpu_kernel_multiple_outputs` is also implemented without this check, +// We could extend `needs_dynamic_casting` to support both `std::tuple` and +// `thrust::tuple` in the future. +template +void cpu_kernel_multiple_outputs(TensorIteratorBase& iter, func_t&& op, int64_t grain_size = at::internal::GRAIN_SIZE) { + using traits = function_traits; + TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity); + + iter.for_each([&](char** data, const int64_t* strides, int64_t n) { + multiple_outputs_loop(data, strides, 0, n, std::forward(op)); + }, grain_size); + iter.cast_outputs(); +} + +template +void cpu_kernel_vec(TensorIteratorBase& iter, func_t&& op, vec_func_t&& vop, int64_t grain_size = at::internal::GRAIN_SIZE) { + using traits = function_traits; + // this could be extended to work with void return types + TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity); + TORCH_INTERNAL_ASSERT(iter.noutputs() == 1); + // dynamic casting not currently supported on CPU, but some kernels (like Fill) + // explicitly dynamic_cast, so we give the opt-out of checking. + c10::guts::if_constexpr([&] { + TORCH_INTERNAL_ASSERT(!needs_dynamic_casting::check(iter)); + }); + + iter.for_each(make_vectorized_loop2d(op, vop), grain_size); + iter.cast_outputs(); +} + +template +void cpu_serial_kernel(TensorIteratorBase& iter, func_t&& op, const Range& range) { + using traits = function_traits; + constexpr bool result_void = std::is_void::value; + TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity && + ((result_void && iter.noutputs() == 0) || (!result_void && iter.noutputs() == 1))); + // dynamic casting not currently supported on CPU + TORCH_INTERNAL_ASSERT(!needs_dynamic_casting::check(iter)); + + iter.serial_for_each([&](char** data, const int64_t* strides, int64_t n) { + basic_loop(data, strides, 0, n, std::forward(op)); + }, range); + iter.cast_outputs(); +} + +template +void cpu_serial_kernel(TensorIteratorBase& iter, func_t&& op) { + cpu_serial_kernel(iter, op, {0, iter.numel()}); +} + +template +void cpu_serial_kernel_vec(TensorIteratorBase& iter, func_t&& op, vec_func_t&& vop, const Range& range) { + using traits = function_traits; + // this could be extended to work with void return types + TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity); + TORCH_INTERNAL_ASSERT(iter.noutputs() == 1); + // dynamic casting not currently supported on CPU + TORCH_INTERNAL_ASSERT(!needs_dynamic_casting::check(iter)); + + iter.serial_for_each(make_vectorized_loop2d(op, vop), range); + iter.cast_outputs(); +} + +template +void cpu_serial_kernel_vec(TensorIteratorBase& iter, func_t&& op, vec_func_t&& vop) { + cpu_serial_kernel_vec(iter, op, vop, {0, iter.numel()}); +} + +}}} // namespace at::native:: diff --git a/voice_bridge/torch/include/ATen/native/cpu/MaxUnpoolKernel.h b/voice_bridge/torch/include/ATen/native/cpu/MaxUnpoolKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..1c6507909ca4aa7e49fbaa420e407b211023b1b7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/MaxUnpoolKernel.h @@ -0,0 +1,14 @@ +#pragma once +#include + +namespace at { +class Tensor; + +namespace native { + +using max_unpooling_fn = void(*)(Tensor&, const Tensor&, const Tensor&); + +DECLARE_DISPATCH(max_unpooling_fn, max_unpool2d_kernel); +DECLARE_DISPATCH(max_unpooling_fn, max_unpool3d_kernel); + +}} // at::native diff --git a/voice_bridge/torch/include/ATen/native/cpu/PixelShuffleKernel.h b/voice_bridge/torch/include/ATen/native/cpu/PixelShuffleKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..c015e674a24c597aae9475995612a93271c5ce72 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/PixelShuffleKernel.h @@ -0,0 +1,14 @@ +#pragma once +#include + +namespace at { +class TensorBase; +} + +namespace at { namespace native { + +using pixel_shuffle_fn = void(*)(TensorBase&, const TensorBase&, int64_t); +DECLARE_DISPATCH(pixel_shuffle_fn, pixel_shuffle_kernel); +DECLARE_DISPATCH(pixel_shuffle_fn, pixel_unshuffle_kernel); + +}} // at::native diff --git a/voice_bridge/torch/include/ATen/native/cpu/Reduce.h b/voice_bridge/torch/include/ATen/native/cpu/Reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..fdb1c0d1a0fce1bd3e938e97f0fafcf3dbd4f558 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/Reduce.h @@ -0,0 +1,313 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace at { namespace native { inline namespace CPU_CAPABILITY { + +using namespace vec; + +#define VEC_LOOP_HEADER(func_t, data) \ + using scalar_t = typename function_traits::result_type; \ + using Vec = Vectorized; \ + char* out_ptr = data[0]; \ + (void) out_ptr; + +// reduction that is contiguous over the input in dim 0 +template +static inline bool is_contiguous_reduction(const int64_t* strides) { + return strides[0] == 0 && + strides[1] == sizeof(typename traits::arg2_t); +} + +// reduction that is contiguous over the input in dim 1 +template +static inline bool is_outer_reduction(const int64_t* strides) { + return strides[0] == 0 && + strides[2] == sizeof(typename traits::result_type) && + strides[3] == sizeof(typename traits::arg2_t); +} + +template +static inline void vectorized_reduction(char** data, int64_t n, int64_t stride, + func_t op, vec_func_t vop, bool reduce) { + VEC_LOOP_HEADER(func_t, data) + const char* in1_ptr = data[1]; + Vec acc[4]; + for (const auto j : c10::irange(4)) { + acc[j] = Vec::loadu(in1_ptr + j * Vec::size() * sizeof(scalar_t)); + } + for (const auto i : c10::irange(1, n)) { + const char* ptr = in1_ptr + stride * i; + acc[0] = vop(acc[0], Vec::loadu(ptr + (0 * Vec::size() * sizeof(scalar_t)))); + acc[1] = vop(acc[1], Vec::loadu(ptr + (1 * Vec::size() * sizeof(scalar_t)))); + acc[2] = vop(acc[2], Vec::loadu(ptr + (2 * Vec::size() * sizeof(scalar_t)))); + acc[3] = vop(acc[3], Vec::loadu(ptr + (3 * Vec::size() * sizeof(scalar_t)))); + } + if (reduce) { + scalar_t buffer[Vec::size()]; + acc[0] = vop(vop(acc[0], acc[1]), vop(acc[2], acc[3])); + acc[0].store(buffer); + for (const auto j : c10::irange(1, Vec::size())) { + buffer[0] = op(buffer[0], buffer[j]); + } + auto dst = (scalar_t*)out_ptr; + *dst = op(*dst, buffer[0]); + } else { + for (const auto j : c10::irange(4)) { + auto dst = out_ptr + j * Vec::size() * sizeof(scalar_t); + acc[j] = vop(acc[j], Vec::loadu(dst)); + acc[j].store(dst); + } + } +} + +template +static inline void UNARY_OUTER_LOOP(char* data[2], const int64_t strides[2], int64_t n, F f) { + for (const auto j C10_UNUSED : c10::irange(n)) { + f(); + data[0] += strides[0]; + data[1] += strides[1]; + } +} + +// computes the reduction out = op(out, in) +template +static inline void vectorized_inner_reduction(char** data, int64_t n, func_t op, vec_func_t vop) { + VEC_LOOP_HEADER(func_t, data) + int64_t vector_stride = 4 * Vec::size() * sizeof(scalar_t); + int64_t count = n / (4 * Vec::size()); + if (count > 0) { + vectorized_reduction(data, count, vector_stride, op, vop, /*reduce=*/true); + } + char* ptrs[3] = { data[0], data[0], data[1] }; + int64_t strides[] = { 0, 0, sizeof(scalar_t) }; + basic_loop(ptrs, strides, count * 4 * Vec::size(), n, op); +} + +// computes the reduction out = op(out, in) +template +static inline void vectorized_outer_reduction(char** data, int64_t inner_stride, int64_t size0, int64_t size1, func_t op, vec_func_t vop) { + VEC_LOOP_HEADER(func_t, data) + + // reduce down each column of 4 * Vec::size() elements (128 or 256 bytes) +#if defined(CPU_CAPABILITY_AVX512) + int64_t outer_stride[2] = { 256, 256 }; +#else + int64_t outer_stride[2] = { 128, 128 }; +#endif + UNARY_OUTER_LOOP(data, outer_stride, size1 / (4 * Vec::size()), [&] { + vectorized_reduction(data, size0, inner_stride, op, vop, /*reduce=*/false); + }); + + // reduce down the remaining columns + int64_t step[] = { sizeof(scalar_t), sizeof(scalar_t) }; + int64_t remaining = size1 % (4 * Vec::size()); + UNARY_OUTER_LOOP(data, step, remaining, [&] { + char* ptrs[3] = { data[0], data[0], data[1] }; + int64_t strides[] = { 0, 0, inner_stride }; + basic_loop(ptrs, strides, 0, size0, op); + }); +} + +template +static void set_result(const int index, const res_t result, const TensorIteratorBase &iter, const int num_outputs) { + // static_assert(std::is_same::value, "data types must match"); + if (index < num_outputs) { + char *out = (char *) iter.data_ptr(index); + *(res_t *) out = result; + } +} + +template +static void set_results(const res_t result, const TensorIteratorBase &iter, const int num_outputs) { + AT_ASSERT(num_outputs == 1); + set_result(0, result, iter, num_outputs); +} + +template +static inline typename std::enable_if::type +for_each_in_tuple(const std::tuple& /*t*/, const TensorIteratorBase& /*iter*/, const int /*num_outputs*/) { + return i; +} + +template +static inline typename std::enable_if::type +for_each_in_tuple(const std::tuple& t, const TensorIteratorBase &iter, const int num_outputs) { + if (i < (size_t)num_outputs) { + set_result(i, std::get(t), iter, num_outputs); + return for_each_in_tuple(t, iter, num_outputs); + } + return i; +} + +template +static void set_results(const std::tuple& result, const TensorIteratorBase &iter, const int num_outputs) { + AT_ASSERT(num_outputs >= 1); + std::size_t result_size = for_each_in_tuple(result, iter, num_outputs); + AT_ASSERT((size_t)num_outputs == result_size); +} + +template +struct all_same : guts::conjunction< + std::is_same... +> {}; + +// data_t is the input/output data type. +// acc_t is a type that contains all the necessary data +// to continue reducing. +// index_t is a one-dimensional index +// +// ops_t is such that &ops_t::reduce, &ops_t::combine, and &ops_t::project exist and satisfy +// the following. +// reduce: (acc_t, data_t, index_t) -> acc_t adds one data point to the accumulated value. +// combine: (acc_t, acc_t) -> acc_t combines two accumulated values into one. +// project: acc_t -> out_t finishes the reduction, getting the required output. +// +// Additionally, acc_t must be default-constructible: +// acc_t {} is an identity for combine, +// and project(acc_t {}) is the value of the operation on zero elements. +// +// The point of `combine` is to support parallelization - +// the idea is to one sequence of `reduce` calls per thread of execution, +// and then to combine them at the end with `combine`. +// +// If there is more than one output element, +// our parallelization strategy is to use one thread for each of them, +// which means that `combine` will never be called. +// +// If, on the other hand, there is only one, then we split the input into +// into several pieces, reduce each separately, and then combine them. + +template +void binary_kernel_reduce(TensorIteratorBase& iter, ops_t ops, init_t init) { + using rf_t = decltype(&ops_t::reduce); + using cf_t = decltype(&ops_t::combine); + using pf_t = decltype(&ops_t::project); + using r_traits = binary_function_traits; + using c_traits = binary_function_traits; + using p_traits = unary_function_traits; + using acc_t = typename p_traits::arg1_t; + using data_t = typename r_traits::arg2_t; + static_assert( + all_same< + acc_t, + init_t, + typename r_traits::arg1_t, + typename r_traits::result_type, + typename c_traits::arg1_t, + typename c_traits::arg2_t, + typename c_traits::result_type>::value, + "all accumulate types must match"); + static_assert( + std::is_default_constructible::value, + "the accumulate type must be default-constructible" + ); + const int num_outputs = iter.noutputs(); + iter.foreach_reduced_elt([&ops, &init, num_outputs](TensorIteratorBase &sub_iter) { + auto reduction_body = [&ops, &sub_iter, num_outputs](acc_t acc, int64_t begin, int64_t end) -> acc_t { + int ntensors = sub_iter.ntensors(); + sub_iter.serial_for_each([&acc, &ops, num_outputs, ntensors, begin](char** data, const int64_t* strides, int64_t size) { + AT_ASSERT(ntensors - num_outputs == 1); + char *in = data[ntensors - 1]; + int64_t stride = strides[ntensors - 1]; + for (const auto i : c10::irange(size)) { + acc = ops.reduce(acc, c10::load(in), begin + i); + in += stride; + } + }, {begin, end}); + return ops.translate_idx(acc, sub_iter.view_offsets()[0]); + }; + acc_t total_acc = init; + auto numel = sub_iter.numel(); + if (numel < at::internal::GRAIN_SIZE || at::get_num_threads() == 1 || + at::in_parallel_region()) { + total_acc = reduction_body(total_acc, 0, numel); + } else { + int max_threads = at::get_num_threads(); + AT_ASSERT(max_threads > 0); + static_assert( + !std::is_same::value, + "Concurrently modifying different references into std::vector is UB." + ); + std::vector buffer((unsigned)max_threads, init); + at::parallel_for(0, numel, internal::GRAIN_SIZE, + [&](int64_t begin, int64_t end) { + auto& acc = buffer[at::get_thread_num()]; + acc = reduction_body(acc, begin, end); + } + ); + for (const auto i : c10::irange(max_threads)) { + total_acc = ops.combine(total_acc, buffer[i]); + } + } + set_results(ops.project(total_acc), sub_iter, num_outputs); + }); +} + +template +void binary_kernel_reduce_vec(TensorIteratorBase& iter, func_t op, vec_func_t vop, double ident = 0) { + using traits = binary_function_traits; + static_assert( + all_same< + typename traits::result_type, + typename traits::arg1_t, + typename traits::arg2_t>::value, + "all types must match"); + + iter.output_base().fill_(ident); + iter.parallel_reduce([&](char** data, const int64_t* strides, int64_t size0, int64_t size1) { + int64_t outer_strides[] = { strides[2], strides[3] }; + if (is_contiguous_reduction(strides)) { + // input is contiguous in dim 0, output is reduced in dim 0 + UNARY_OUTER_LOOP(data, outer_strides, size1, [&] { + vectorized_inner_reduction(data, size0, op, vop); + }); + } else if (is_outer_reduction(strides)) { + // input and output are contiguous in dim 1 + int64_t inner_stride = strides[1]; // stride of input in dim 0 + vectorized_outer_reduction(data, inner_stride, size0, size1, op, vop); + } else { + UNARY_OUTER_LOOP(data, outer_strides, size1, [&] { + char* ptrs[3] = { data[0], data[0], data[1] }; + int64_t inner_strides[3] = { strides[0], strides[0], strides[1] }; + basic_loop(ptrs, inner_strides, 0, size0, op); + }); + } + }); +} + +// when reduction is on most inner dimension (dim 0 in TensorIterator) +// and input has contiguous most inner dimension, `binary_kernel_reduce_lastdim` +// can be used. +static inline bool is_reduce_lastdim(TensorIteratorBase& iter) { + return iter.num_reduce_dims() == 1 && iter.is_dim_reduced(0) + && iter.ninputs() == 1 && iter.strides(1)[0] == iter.element_size(1); +} + +template +void binary_kernel_reduce_lastdim(TensorIteratorBase& iter, reduce_func_t reduce_op) { + auto shape = iter.shape(); + int64_t dim_size = shape[0]; + int64_t grain_size = std::max((int64_t) 1, at::internal::GRAIN_SIZE / dim_size); + TensorIterator sub_iter(iter); + // create sub iterator to parallel on all non-reduce-dims + sub_iter.narrow(0, 0, 1); + auto loop = [&](char** data, const int64_t* strides, int64_t size) { + char* out = data[0]; + char* in = data[1]; + for (int64_t i = 0; i < size; ++i) { + reduce_op(out, in, dim_size); + out += strides[0]; + in += strides[1]; + } + }; + sub_iter.for_each(loop, grain_size); +} + +}}} // namespace at::native:: diff --git a/voice_bridge/torch/include/ATen/native/cpu/SerialStackImpl.h b/voice_bridge/torch/include/ATen/native/cpu/SerialStackImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..ddd914d310d532840bed989897a33a45fb0d4b55 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/SerialStackImpl.h @@ -0,0 +1,144 @@ +// Copyright 2004-present Facebook. All Rights Reserved. +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { namespace detail { + +struct InputMeta { + void* data_ptr; + int64_t inner_size; + + InputMeta(const Tensor& t, int64_t dim, int64_t inner) + : data_ptr(t.data_ptr()), inner_size(t.sizes()[dim] * inner) {} +}; + +// This kernel is used by two TensorList types: +// 1. stack_serial_kernel uses at::ArrayRef +// 2. Static runtime calls this kernel directly (csrc/jit/runtime/static/ops.cpp) with +// ProcessedNodeInputWrapper. +// When making changes, make sure that they are compatible with both types! +template +void stack_serial_kernel_impl(Tensor& result, TensorListType tensors, int64_t dim) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + dim >= 0 && dim <= result.dim(), + "dim out of range in stack_serial_kernel_impl"); + int64_t outer = + result.numel() / (result.sizes()[dim] * result.strides()[dim]); + scalar_t* result_data = result.data_ptr(); + int64_t ninputs = tensors.size(); + std::vector inputs; + inputs.reserve(ninputs); + for (const auto& tensor : tensors) { + inputs.emplace_back(tensor, dim, tensor.strides()[dim]); + } + + using Vec = vec::Vectorized; + scalar_t* result_ptr = result_data; + for (const auto i : c10::irange(outer)) { + for (const auto j : c10::irange(ninputs)) { + int64_t local_inner = inputs[j].inner_size; + scalar_t* input_ptr = (scalar_t*)(inputs[j].data_ptr) + i * local_inner; + + if (local_inner < Vec::size()) { + for (const auto k : c10::irange(local_inner)) { + result_ptr[k] = input_ptr[k]; + } + } else { + vec::map( + [](Vec x) { return x; }, result_ptr, input_ptr, local_inner); + } + result_ptr += local_inner; + } + } +} + +// Checks to see whether native stack can be invoked under these conditions: +// - result and input tensors are contiguous +// - only one thread is used +// - no type promotion has to occur +// - tensors dtype is Double or Float +template +bool can_use_native_serial_stack_impl(Tensor& result, TensorListType tensors, int64_t dim) { + TORCH_CHECK(tensors.size() > 0, "expected a non-empty list of Tensors"); + const Tensor& first_tensor = tensors[0]; + // stack dimension should be in range [0,firstTensor.dim()) + // dim == firstTensor.dim() is a valid input, but it is handled by default code path + // that uses unsqueeze + if (dim >= first_tensor.dim()) return false; + // Native stack doesn't apply any tensor is skipped. + if (first_tensor.numel() == 0 && first_tensor.dim() == 1) return false; + // there should be no type promotion + if (result.dtype() != first_tensor.dtype()) return false; + + auto first_tensor_mem_format = first_tensor.suggest_memory_format(); + ScalarType dtype = first_tensor.scalar_type(); + + if (!result.is_contiguous(first_tensor_mem_format)) { + return false; + } + + // fast path only works for Double and Float + if (dtype != ScalarType::Double && dtype != ScalarType::Float) { + return false; + } + + // check remainder of inputs + auto const &first_tensor_shape = first_tensor.sizes(); + for (const auto i : c10::irange(1, tensors.size())) { + auto const &tensor = tensors[i]; + TORCH_CHECK(tensors[i].sizes() == first_tensor.sizes(), + "stack expects each tensor to be equal size, but got ", first_tensor_shape, + " at entry 0 and ", tensor.sizes(), " at entry ", i); + + // every tensor must be contiguous + // tensor sizes and strides must be the same + // there should be no type promotion + if (!tensor.is_contiguous(first_tensor_mem_format) || + tensor.strides() != first_tensor.strides() || + tensor.dtype() != dtype) { + return false; + } + } + + // fast native stack should only be used when it is not worth using multiple threads + // or there is only one thread. Note that we aren't checking result.numel() here because + // it may not have been resized and we want to defer that cost till later. + int64_t numel_in_stack = first_tensor.numel() * tensors.size(); + return numel_in_stack < at::internal::GRAIN_SIZE || at::get_num_threads() == 1; +} + +template +struct CanUseNativeSerialStack; + +template +struct CanUseNativeSerialStack { + static bool call(Tensor& result, TensorListType tensors, int64_t dim) { + // Inputs cannot alias the output tensor + for (const auto i : c10::irange(tensors.size())) { + auto lap = at::get_overlap_status(result, tensors[i]); + TORCH_CHECK(lap != at::MemOverlapStatus::Partial && + lap != at::MemOverlapStatus::Full, 0, + "unsupported operation: the input tensors cannot refer to any of the " + "output memory locations. Found overlap in input tensor ", i); + } + + return can_use_native_serial_stack_impl(result, tensors, dim); + } +}; + +template +struct CanUseNativeSerialStack { + static bool call(Tensor& result, TensorListType tensors, int64_t dim) { + return can_use_native_serial_stack_impl(result, tensors, dim); + } +}; + +}}} // namespace at::native::detail diff --git a/voice_bridge/torch/include/ATen/native/cpu/SoftmaxKernel.h b/voice_bridge/torch/include/ATen/native/cpu/SoftmaxKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..ee9fac647ad6241c97e28a7af6f091d5d613bc3a --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/SoftmaxKernel.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include + +namespace at { +class Tensor; + +namespace native { + +using forward_fn = void (*)(const Tensor&, const Tensor&); +using backward_fn = void(*)(const Tensor &, const Tensor &, const Tensor&); + +DECLARE_DISPATCH(forward_fn, softmax_lastdim_kernel); +DECLARE_DISPATCH(forward_fn, log_softmax_lastdim_kernel); +DECLARE_DISPATCH(backward_fn, softmax_backward_lastdim_kernel); +DECLARE_DISPATCH(backward_fn, log_softmax_backward_lastdim_kernel); + +using forward_fn_with_dim = void(*)(const Tensor &, const Tensor &, const int64_t); +using backward_fn_with_dim = + void (*)(const Tensor&, const Tensor&, const Tensor&, const int64_t); + +DECLARE_DISPATCH(forward_fn_with_dim, softmax_kernel); +DECLARE_DISPATCH(forward_fn_with_dim, log_softmax_kernel); +DECLARE_DISPATCH(backward_fn_with_dim, softmax_backward_kernel); +DECLARE_DISPATCH(backward_fn_with_dim, log_softmax_backward_kernel); +} +} diff --git a/voice_bridge/torch/include/ATen/native/cpu/StackKernel.h b/voice_bridge/torch/include/ATen/native/cpu/StackKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..4e9a45e4dd12baf48be5fe72b0abda2915ef38f1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/StackKernel.h @@ -0,0 +1,12 @@ +// Copyright 2004-present Facebook. All Rights Reserved. +#pragma once + +#include +#include + +namespace at { namespace native { + +using stack_serial_fn = void(*)(Tensor &, TensorList, int64_t); +DECLARE_DISPATCH(stack_serial_fn, stack_serial_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cpu/WeightNormKernel.h b/voice_bridge/torch/include/ATen/native/cpu/WeightNormKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..6e1f3ec3b029177763568e01f63d7d1467483ccb --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/WeightNormKernel.h @@ -0,0 +1,20 @@ +#pragma once +#include +#include + +namespace at { +class TensorBase; +} + +namespace at { namespace native { + +using weight_norm_fn = void(*)( + TensorBase&, TensorBase&, const TensorBase&, const TensorBase&, int64_t); +using weight_norm_backward_fn = void(*)( + TensorBase&, TensorBase&, const TensorBase&, const TensorBase&, + const TensorBase&, const TensorBase&, int64_t); + +DECLARE_DISPATCH(weight_norm_fn, weight_norm_stub); +DECLARE_DISPATCH(weight_norm_backward_fn, weight_norm_backward_stub); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cpu/avx_mathfun.h b/voice_bridge/torch/include/ATen/native/cpu/avx_mathfun.h new file mode 100644 index 0000000000000000000000000000000000000000..080cd833d3a109f4625dc589b3c2076cbfa6cb8e --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/avx_mathfun.h @@ -0,0 +1,522 @@ +#pragma once +/* + AVX implementation of sin, cos, sincos, exp and log + + Based on "sse_mathfun.h", by Julien Pommier + http://gruntthepeon.free.fr/ssemath/ + + Copyright (C) 2012 Giovanni Garberoglio + Interdisciplinary Laboratory for Computational Science (LISC) + Fondazione Bruno Kessler and University of Trento + via Sommarive, 18 + I-38123 Trento (Italy) + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + (this is the zlib license) +*/ + +#include + +/* The original source of this file has been modified. */ +#if defined(CPU_CAPABILITY_AVX2) + +#if defined(__GNUC__) +# define ALIGN32_BEG __attribute__((aligned(32))) +#elif defined(_WIN32) +# define ALIGN32_BEG __declspec(align(32)) +#endif + +typedef __m256 v8sf; // vector of 8 float (avx2) +typedef __m256i v8si; // vector of 8 int (avx2) + +/* declare some AVX constants -- why can't I figure a better way to do that? */ +#define _PS256_CONST(Name, Val) \ + static const ALIGN32_BEG float _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val } +#define _PI32_CONST256(Name, Val) \ + static const ALIGN32_BEG int _pi32_256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val } +#define _PS256_CONST_TYPE(Name, Type, Val) \ + static const ALIGN32_BEG Type _ps256_##Name[8] = { Val, Val, Val, Val, Val, Val, Val, Val } + +_PS256_CONST(1 , 1.0f); +_PS256_CONST(0p5, 0.5f); +/* the smallest non denormalized float number */ +_PS256_CONST_TYPE(min_norm_pos, int, 0x00800000); +_PS256_CONST_TYPE(mant_mask, int, 0x7f800000); +_PS256_CONST_TYPE(inv_mant_mask, int, ~0x7f800000); + +_PS256_CONST_TYPE(sign_mask, int, (int)0x80000000); +_PS256_CONST_TYPE(inv_sign_mask, int, ~0x80000000); + +_PI32_CONST256(0, 0); +_PI32_CONST256(1, 1); +_PI32_CONST256(inv1, ~1); +_PI32_CONST256(2, 2); +_PI32_CONST256(4, 4); +_PI32_CONST256(0x7f, 0x7f); + +_PS256_CONST(cephes_SQRTHF, 0.707106781186547524); +_PS256_CONST(cephes_log_p0, 7.0376836292E-2); +_PS256_CONST(cephes_log_p1, - 1.1514610310E-1); +_PS256_CONST(cephes_log_p2, 1.1676998740E-1); +_PS256_CONST(cephes_log_p3, - 1.2420140846E-1); +_PS256_CONST(cephes_log_p4, + 1.4249322787E-1); +_PS256_CONST(cephes_log_p5, - 1.6668057665E-1); +_PS256_CONST(cephes_log_p6, + 2.0000714765E-1); +_PS256_CONST(cephes_log_p7, - 2.4999993993E-1); +_PS256_CONST(cephes_log_p8, + 3.3333331174E-1); +_PS256_CONST(cephes_log_q1, -2.12194440e-4); +_PS256_CONST(cephes_log_q2, 0.693359375); + + +/* natural logarithm computed for 8 simultaneous float + return NaN for x <= 0 +*/ +inline v8sf log256_ps(v8sf x) { + v8si imm0; + v8sf one = *(v8sf*)_ps256_1; + + //v8sf invalid_mask = _mm256_cmple_ps(x, _mm256_setzero_ps()); + v8sf invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_LE_OS); + + x = _mm256_max_ps(x, *(v8sf*)_ps256_min_norm_pos); /* cut off denormalized stuff */ + + // can be done with AVX2 + imm0 = _mm256_srli_epi32(_mm256_castps_si256(x), 23); + + /* keep only the fractional part */ + x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_mant_mask); + x = _mm256_or_ps(x, *(v8sf*)_ps256_0p5); + + // this is again another AVX2 instruction + imm0 = _mm256_sub_epi32(imm0, *(v8si*)_pi32_256_0x7f); + v8sf e = _mm256_cvtepi32_ps(imm0); + + e = _mm256_add_ps(e, one); + + /* part2: + if( x < SQRTHF ) { + e -= 1; + x = x + x - 1.0; + } else { x = x - 1.0; } + */ + //v8sf mask = _mm256_cmplt_ps(x, *(v8sf*)_ps256_cephes_SQRTHF); + v8sf mask = _mm256_cmp_ps(x, *(v8sf*)_ps256_cephes_SQRTHF, _CMP_LT_OS); + v8sf tmp = _mm256_and_ps(x, mask); + x = _mm256_sub_ps(x, one); + e = _mm256_sub_ps(e, _mm256_and_ps(one, mask)); + x = _mm256_add_ps(x, tmp); + + v8sf z = _mm256_mul_ps(x,x); + + v8sf y = *(v8sf*)_ps256_cephes_log_p0; + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p1); + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p2); + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p3); + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p4); + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p5); + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p6); + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p7); + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_log_p8); + y = _mm256_mul_ps(y, x); + + y = _mm256_mul_ps(y, z); + + tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q1); + y = _mm256_add_ps(y, tmp); + + + tmp = _mm256_mul_ps(z, *(v8sf*)_ps256_0p5); + y = _mm256_sub_ps(y, tmp); + + tmp = _mm256_mul_ps(e, *(v8sf*)_ps256_cephes_log_q2); + x = _mm256_add_ps(x, y); + x = _mm256_add_ps(x, tmp); + x = _mm256_or_ps(x, invalid_mask); // negative arg will be NAN + return x; +} + +_PS256_CONST(exp_hi, 88.3762626647949f); +_PS256_CONST(exp_lo, -88.3762626647949f); + +_PS256_CONST(cephes_LOG2EF, 1.44269504088896341); +_PS256_CONST(cephes_exp_C1, 0.693359375); +_PS256_CONST(cephes_exp_C2, -2.12194440e-4); + +_PS256_CONST(cephes_exp_p0, 1.9875691500E-4); +_PS256_CONST(cephes_exp_p1, 1.3981999507E-3); +_PS256_CONST(cephes_exp_p2, 8.3334519073E-3); +_PS256_CONST(cephes_exp_p3, 4.1665795894E-2); +_PS256_CONST(cephes_exp_p4, 1.6666665459E-1); +_PS256_CONST(cephes_exp_p5, 5.0000001201E-1); + +inline v8sf exp256_ps(v8sf x) { + v8sf tmp = _mm256_setzero_ps(), fx; + v8si imm0; + v8sf one = *(v8sf*)_ps256_1; + + x = _mm256_min_ps(x, *(v8sf*)_ps256_exp_hi); + x = _mm256_max_ps(x, *(v8sf*)_ps256_exp_lo); + + /* express exp(x) as exp(g + n*log(2)) */ + fx = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_LOG2EF); + fx = _mm256_add_ps(fx, *(v8sf*)_ps256_0p5); + + /* how to perform a floorf with SSE: just below */ + //imm0 = _mm256_cvttps_epi32(fx); + //tmp = _mm256_cvtepi32_ps(imm0); + + tmp = _mm256_floor_ps(fx); + + /* if greater, subtract 1 */ + //v8sf mask = _mm256_cmpgt_ps(tmp, fx); + v8sf mask = _mm256_cmp_ps(tmp, fx, _CMP_GT_OS); + mask = _mm256_and_ps(mask, one); + fx = _mm256_sub_ps(tmp, mask); + + tmp = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C1); + v8sf z = _mm256_mul_ps(fx, *(v8sf*)_ps256_cephes_exp_C2); + x = _mm256_sub_ps(x, tmp); + x = _mm256_sub_ps(x, z); + + z = _mm256_mul_ps(x,x); + + v8sf y = *(v8sf*)_ps256_cephes_exp_p0; + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p1); + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p2); + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p3); + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p4); + y = _mm256_mul_ps(y, x); + y = _mm256_add_ps(y, *(v8sf*)_ps256_cephes_exp_p5); + y = _mm256_mul_ps(y, z); + y = _mm256_add_ps(y, x); + y = _mm256_add_ps(y, one); + + /* build 2^n */ + imm0 = _mm256_cvttps_epi32(fx); + // another two AVX2 instructions + imm0 = _mm256_add_epi32(imm0, *(v8si*)_pi32_256_0x7f); + imm0 = _mm256_slli_epi32(imm0, 23); + v8sf pow2n = _mm256_castsi256_ps(imm0); + y = _mm256_mul_ps(y, pow2n); + return y; +} + +_PS256_CONST(minus_cephes_DP1, -0.78515625); +_PS256_CONST(minus_cephes_DP2, -2.4187564849853515625e-4); +_PS256_CONST(minus_cephes_DP3, -3.77489497744594108e-8); +_PS256_CONST(sincof_p0, -1.9515295891E-4); +_PS256_CONST(sincof_p1, 8.3321608736E-3); +_PS256_CONST(sincof_p2, -1.6666654611E-1); +_PS256_CONST(coscof_p0, 2.443315711809948E-005); +_PS256_CONST(coscof_p1, -1.388731625493765E-003); +_PS256_CONST(coscof_p2, 4.166664568298827E-002); +_PS256_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI + + +/* evaluation of 8 sines at onces using AVX intrisics + + The code is the exact rewriting of the cephes sinf function. + Precision is excellent as long as x < 8192 (I did not bother to + take into account the special handling they have for greater values + -- it does not return garbage for arguments over 8192, though, but + the extra precision is missing). + + Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the + surprising but correct result. + +*/ +inline v8sf sin256_ps(v8sf x) { // any x + v8sf xmm1, xmm2 = _mm256_setzero_ps(), xmm3, sign_bit, y; + v8si imm0, imm2; + + sign_bit = x; + /* take the absolute value */ + x = _mm256_and_ps(x, *(v8sf*)_ps256_inv_sign_mask); + /* extract the sign bit (upper one) */ + sign_bit = _mm256_and_ps(sign_bit, *(v8sf*)_ps256_sign_mask); + + /* scale by 4/Pi */ + y = _mm256_mul_ps(x, *(v8sf*)_ps256_cephes_FOPI); + + /* + Here we start a series of integer operations, which are in the + realm of AVX2. + If we don't have AVX, let's perform them using SSE2 directives + */ + + /* store the integer part of y in mm0 */ + imm2 = _mm256_cvttps_epi32(y); + /* j=(j+1) & (~1) (see the cephes sources) */ + // another two AVX2 instruction + imm2 = _mm256_add_epi32(imm2, *(v8si*)_pi32_256_1); + imm2 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_inv1); + y = _mm256_cvtepi32_ps(imm2); + + /* get the swap sign flag */ + imm0 = _mm256_and_si256(imm2, *(v8si*)_pi32_256_4); + imm0 = _mm256_slli_epi32(imm0, 29); + /* get the polynom selection mask + there is one polynom for 0 <= x <= Pi/4 + and another one for Pi/4 + +namespace at { namespace native { + +inline ScalarType first_type() { + return ScalarType::Undefined; +} + +template +inline ScalarType first_type(const Tensor& arg, const Args&... parameters) { + return arg.defined() ? arg.scalar_type() : first_type(parameters...); +} + +template +inline bool is_mixed_type(const Tensor& input, const Args&... parameters) { + const auto parameter_type = first_type(parameters...); + return ((parameter_type != ScalarType::Undefined) && + (parameter_type != input.scalar_type())); +} + +// currently on CPU, mixed data type is only supported +// when input is 'BFloat16' and parameters are 'Float' +inline void check_mixed_data_type(const Tensor& input) { + TORCH_CHECK(input.scalar_type() == ScalarType::BFloat16, + "mixed dtype (CPU): expect input to have scalar type of BFloat16"); +} + +template +inline void check_mixed_data_type(const Tensor& input, const Tensor& parameter, const Args&... parameters) { + TORCH_CHECK(!parameter.defined() || parameter.scalar_type() == ScalarType::Float, + "mixed dtype (CPU): expect parameter to have scalar type of Float"); + check_mixed_data_type(input, parameters...); +} + +inline ScalarType param_scalar_type(const Tensor& t, bool is_mixed_type) { + return is_mixed_type ? ScalarType::Float : t.scalar_type(); +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cpu/moments_utils.h b/voice_bridge/torch/include/ATen/native/cpu/moments_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..18e68996190466b54009493bf00125062bc96d1e --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/moments_utils.h @@ -0,0 +1,155 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace at { +namespace native { +namespace utils { + +constexpr int64_t kChunkSize = 16; + +template +void AddMoments( + int64_t m0_add, + const T& m1_add, + const T& m2_add, + int64_t& m0, + T& m1, + T& m2) { + const int64_t n = m0 + m0_add; + const T c = n == 0 ? static_cast(0) : static_cast(m0_add) / static_cast(n); + const T delta = m1_add - m1; + m1 += c * delta; + m2 += m2_add + delta * delta * c * static_cast(m0); + m0 = n; +} + +template +C10_ALWAYS_INLINE void AddMomentsVec( + int64_t m0_add, + const vec::Vectorized& m1_add, + const vec::Vectorized& m2_add, + int64_t& m0, + vec::Vectorized& m1, + vec::Vectorized& m2) { + using Vec = vec::Vectorized; + const int64_t n = m0 + m0_add; + const T c = n == 0 ? static_cast(0) : static_cast(m0_add) / static_cast(n); + const Vec c_vec(c); + const Vec delta = m1_add - m1; + m1 += c_vec * delta; + m2 += m2_add + delta * delta * c_vec * Vec(static_cast(m0)); + m0 = n; +} + +// Compute rowwise moments by Welford algorithm and cascade sum to improve +// numerical stability. +// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance +// https://en.wikipedia.org/wiki/Pairwise_summation +template +std::pair RowwiseMomentsImpl(const T* X, int64_t N, int64_t ddof = 0) { + using Vec = vec::Vectorized; + + constexpr int64_t kVecSize = Vec::size(); + const int64_t n = N / kVecSize; + const int64_t m = divup(n, kChunkSize); + const int64_t depth = CeilLog2(m); + + const Vec kZeroVec(T(0)); + c10::SmallVector m0_stk(depth, 0); + c10::SmallVector m1_stk(depth, kZeroVec); + c10::SmallVector m2_stk(depth, kZeroVec); + + for (const auto i : c10::irange(m)) { + const T* X_ptr = X + i * kChunkSize * kVecSize; + const int64_t m0 = std::min(kChunkSize, n - i * kChunkSize); + static std::array c_vecs = ([]() { + std::array result; + for (const auto i : c10::irange(kChunkSize)) { + result[i] = Vec(T(1) / static_cast(i + 1)); + } + return result; + })(); + Vec m1_vec(0); + Vec m2_vec(0); + for (const auto j : c10::irange(m0)) { + const Vec x_vec = Vec::loadu(X_ptr + j * kVecSize); + const Vec delta_vec = x_vec - m1_vec; + m1_vec += delta_vec * c_vecs[j]; + m2_vec += delta_vec * (x_vec - m1_vec); + } + AddMomentsVec(m0, m1_vec, m2_vec, m0_stk[0], m1_stk[0], m2_stk[0]); + int64_t mask = i + 1; + for (int64_t j = 1; j < depth && (mask & 1) == 0; ++j) { + AddMomentsVec( + m0_stk[j - 1], + m1_stk[j - 1], + m2_stk[j - 1], + m0_stk[j], + m1_stk[j], + m2_stk[j]); + m0_stk[j - 1] = 0; + m1_stk[j - 1] = kZeroVec; + m2_stk[j - 1] = kZeroVec; + mask >>= 1; + } + } + for (const auto i : c10::irange(1, depth)) { + AddMomentsVec( + m0_stk[i], m1_stk[i], m2_stk[i], m0_stk[0], m1_stk[0], m2_stk[0]); + } + + std::array m1_arr{}; + std::array m2_arr{}; + m1_stk[0].store(m1_arr.data()); + m2_stk[0].store(m2_arr.data()); + + int64_t m0 = 0; + T m1 = 0; + T m2 = 0; + for (int64_t i = n * kVecSize; i < N; ++i) { + const T delta = X[i] - m1; + ++m0; + m1 += delta / static_cast(m0); + m2 += delta * (X[i] - m1); + } + for (const auto i : c10::irange(kVecSize)) { + AddMoments(n, m1_arr[i], m2_arr[i], m0, m1, m2); + } + + return std::make_pair(m1, m2 / static_cast(N - ddof)); +} + +template +std::pair RowwiseMoments(const T* X, int64_t N, int64_t ddof = 0) { + using Vec = vec::Vectorized; + constexpr int64_t kVecSize = Vec::size(); + const int64_t n = N / kVecSize; + const int64_t m = divup(n, kChunkSize); + const int64_t depth = CeilLog2(m); + if (depth <= 4) { + return RowwiseMomentsImpl(X, N, ddof); + } else if (depth <= 8) { + return RowwiseMomentsImpl(X, N, ddof); + } else if (depth <= 16) { + return RowwiseMomentsImpl(X, N, ddof); + } else if (depth <= 32) { + return RowwiseMomentsImpl(X, N, ddof); + } else { + return RowwiseMomentsImpl(X, N, ddof); + } +} + +} // namespace utils +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cpu/utils.h b/voice_bridge/torch/include/ATen/native/cpu/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..5c607f06b3a5afece5b58097561b69836950b776 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/utils.h @@ -0,0 +1,101 @@ +#pragma once + +#include +#include + +#ifdef USE_FBGEMM +#include +#endif + +namespace at { +namespace native { + +inline namespace CPU_CAPABILITY { + +template +inline T data_index_init(T offset) { + return offset; +} + +template +inline T data_index_init(T offset, T& x, const T& X, Args&&... args) { + offset = data_index_init(offset, std::forward(args)...); + x = offset % X; + return offset / X; +} + +inline bool data_index_step() { + return true; +} + +template +inline bool data_index_step(T& x, const T& X, Args&&... args) { + if (data_index_step(std::forward(args)...)) { + x = ((x + 1) == X) ? 0 : (x + 1); + return x == 0; + } + return false; +} + +// Helper struct for bfloat16 vectorization +// Useful when you need float as immediate dtype or accumulate dtype +using namespace vec; +struct Vec2 { + Vectorized val0, val1; + Vec2(Vectorized v0, Vectorized v1) : val0(v0), val1(v1) {} + Vec2(float v) : val0(v), val1(v) {} + static Vec2 loadu(const BFloat16* ptr) { + Vectorized v0, v1; + std::tie(v0, v1) = convert_bfloat16_float(Vectorized::loadu(ptr)); + return {v0, v1}; + } + void store(BFloat16* ptr) const { + Vectorized val = convert_float_bfloat16(val0, val1); + val.store(ptr); + } +}; +inline Vec2 operator+(const Vec2& a, const Vec2& b) { return {a.val0 + b.val0, a.val1 + b.val1}; } +inline Vec2 operator*(const Vec2& a, const Vec2& b) { return {a.val0 * b.val0, a.val1 * b.val1}; } + +template struct VectorizedType { using type = Vectorized; }; +template <> struct VectorizedType { using type = Vec2; }; +template using VecType = typename VectorizedType::type; + +} // namespace + +namespace utils { + +template +T CeilLog2(const T& x) { + if (x <= 2) { + return 1; + } + // Last set bit is floor(log2(x)), floor + 1 is ceil + // except when x is an exact powers of 2, so subtract 1 first + return static_cast(llvm::findLastSet(static_cast(x) - 1)) + 1; +} + +// matrix transpose: +// src has shape of M by N, with leading dimension of ld_src +// dst has shape of N by M, with leading dimension of ld_dst +template +inline void transpose(int64_t M, int64_t N, const T* src, int64_t ld_src, T* dst, int64_t ld_dst) { + for (int64_t j = 0; j < N; j++) { + for (int64_t i = 0; i < M; i++) { + dst[j * ld_dst + i] = src[i * ld_src + j]; + } + } +} + +#ifdef USE_FBGEMM +template <> +inline void transpose(int64_t M, int64_t N, const float* src, int64_t ld_src, float* dst, int64_t ld_dst) { + TORCH_CHECK(fbgemm::fbgemmSupportedCPU(), "Your CPU does not support FBGEMM."); + fbgemm::transpose_simd(M, N, src, ld_src, dst, ld_dst); +} +#endif + +} // namespace utils + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cpu/zmath.h b/voice_bridge/torch/include/ATen/native/cpu/zmath.h new file mode 100644 index 0000000000000000000000000000000000000000..3f3971e6e76e2ec65de228d788c9cdeb1ed93cb3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cpu/zmath.h @@ -0,0 +1,251 @@ +#pragma once + +// Complex number math operations that act as no-ops for other dtypes. +#include +#include +#include +#include + +namespace at { namespace native { +inline namespace CPU_CAPABILITY { + +template +inline VALUE_TYPE zabs (SCALAR_TYPE z) { + return z; +} + +template<> +inline c10::complex zabs > (c10::complex z) { + return c10::complex(std::abs(z)); +} + +template<> +inline float zabs , float> (c10::complex z) { + return std::abs(z); +} + +template<> +inline c10::complex zabs > (c10::complex z) { + return c10::complex(std::abs(z)); +} + +template<> +inline double zabs , double> (c10::complex z) { + return std::abs(z); +} + +// This overload corresponds to non-complex dtypes. +// The function is consistent with its NumPy equivalent +// for non-complex dtypes where `pi` is returned for +// negative real numbers and `0` is returned for 0 or positive +// real numbers. +// Note: `nan` is propagated. +template +inline VALUE_TYPE angle_impl (SCALAR_TYPE z) { + if (at::_isnan(z)) { + return z; + } + return z < 0 ? c10::pi : 0; +} + +template<> +inline c10::complex angle_impl > (c10::complex z) { + return c10::complex(std::arg(z), 0.0); +} + +template<> +inline float angle_impl , float> (c10::complex z) { + return std::arg(z); +} + +template<> +inline c10::complex angle_impl > (c10::complex z) { + return c10::complex(std::arg(z), 0.0); +} + +template<> +inline double angle_impl , double> (c10::complex z) { + return std::arg(z); +} + +template +constexpr VALUE_TYPE real_impl (SCALAR_TYPE z) { + return z; //No-Op +} + +template<> +constexpr c10::complex real_impl > (c10::complex z) { + return c10::complex(z.real(), 0.0); +} + +template<> +constexpr float real_impl , float> (c10::complex z) { + return z.real(); +} + +template<> +constexpr c10::complex real_impl > (c10::complex z) { + return c10::complex(z.real(), 0.0); +} + +template<> +constexpr double real_impl , double> (c10::complex z) { + return z.real(); +} + +template +constexpr VALUE_TYPE imag_impl (SCALAR_TYPE /*z*/) { + return 0; +} + +template<> +constexpr c10::complex imag_impl > (c10::complex z) { + return c10::complex(z.imag(), 0.0); +} + +template<> +constexpr float imag_impl , float> (c10::complex z) { + return z.imag(); +} + +template<> +constexpr c10::complex imag_impl > (c10::complex z) { + return c10::complex(z.imag(), 0.0); +} + +template<> +constexpr double imag_impl , double> (c10::complex z) { + return z.imag(); +} + +template +inline TYPE conj_impl (TYPE z) { + return z; //No-Op +} + +template<> +inline c10::complex conj_impl > (c10::complex z) { + return c10::complex{z.real(), -z.imag()}; +} + +template<> +inline c10::complex conj_impl > (c10::complex z) { + return c10::complex(z.real(), -z.imag()); +} + +template<> +inline c10::complex conj_impl > (c10::complex z) { + return c10::complex(z.real(), -z.imag()); +} + +template +inline TYPE ceil_impl (TYPE z) { + return std::ceil(z); +} + +template <> +inline c10::complex ceil_impl (c10::complex z) { + return c10::complex(std::ceil(z.real()), std::ceil(z.imag())); +} + +template <> +inline c10::complex ceil_impl (c10::complex z) { + return c10::complex(std::ceil(z.real()), std::ceil(z.imag())); +} + +template +inline c10::complex sgn_impl (c10::complex z) { + if (z == c10::complex(0, 0)) { + return c10::complex(0, 0); + } else { + return z / zabs(z); + } +} + +template +inline TYPE floor_impl (TYPE z) { + return std::floor(z); +} + +template <> +inline c10::complex floor_impl (c10::complex z) { + return c10::complex(std::floor(z.real()), std::floor(z.imag())); +} + +template <> +inline c10::complex floor_impl (c10::complex z) { + return c10::complex(std::floor(z.real()), std::floor(z.imag())); +} + +template +inline TYPE round_impl (TYPE z) { + return std::nearbyint(z); +} + +template <> +inline c10::complex round_impl (c10::complex z) { + return c10::complex(std::nearbyint(z.real()), std::nearbyint(z.imag())); +} + +template <> +inline c10::complex round_impl (c10::complex z) { + return c10::complex(std::nearbyint(z.real()), std::nearbyint(z.imag())); +} + +template +inline TYPE trunc_impl (TYPE z) { + return std::trunc(z); +} + +template <> +inline c10::complex trunc_impl (c10::complex z) { + return c10::complex(std::trunc(z.real()), std::trunc(z.imag())); +} + +template <> +inline c10::complex trunc_impl (c10::complex z) { + return c10::complex(std::trunc(z.real()), std::trunc(z.imag())); +} + +template ::value, int> = 0> +inline TYPE max_impl (TYPE a, TYPE b) { + if (_isnan(a) || _isnan(b)) { + return std::numeric_limits::quiet_NaN(); + } else { + return std::max(a, b); + } +} + +template ::value, int> = 0> +inline TYPE max_impl (TYPE a, TYPE b) { + if (_isnan(a)) { + return a; + } else if (_isnan(b)) { + return b; + } else { + return std::abs(a) > std::abs(b) ? a : b; + } +} + +template ::value, int> = 0> +inline TYPE min_impl (TYPE a, TYPE b) { + if (_isnan(a) || _isnan(b)) { + return std::numeric_limits::quiet_NaN(); + } else { + return std::min(a, b); + } +} + +template ::value, int> = 0> +inline TYPE min_impl (TYPE a, TYPE b) { + if (_isnan(a)) { + return a; + } else if (_isnan(b)) { + return b; + } else { + return std::abs(a) < std::abs(b) ? a : b; + } +} + +} // end namespace +}} //end at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/Activation.h b/voice_bridge/torch/include/ATen/native/cuda/Activation.h new file mode 100644 index 0000000000000000000000000000000000000000..5fc52ff257ce1979d8d04d691ab3d686b2c3f15f --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/Activation.h @@ -0,0 +1,31 @@ +#pragma once +#include +#include + +namespace at { +struct TensorIteratorBase; +class TensorBase; +} + +namespace at { namespace native { + +void launch_glu_backward_kernel(const TensorIteratorBase& iter, + int64_t gI_stride, int64_t I_stride); + +void launch_log_sigmoid_forward_kernel(TensorIteratorBase& iter); + +void launch_prelu_cuda_kernel_share_weights( + TensorIteratorBase &iter, const TensorBase &weight); +void launch_prelu_cuda_kernel_multi_weights( + const TensorBase &result, const TensorBase &input, const TensorBase &weight); + +void launch_prelu_cuda_backward_kernel_share_weights( + TensorIteratorBase &iter, const TensorBase &weight); +void launch_prelu_cuda_backward_kernel_multi_weights( + const TensorBase &input, const TensorBase &weight, const TensorBase &grad_out, + const TensorBase &input_grad, const TensorBase &weight_grad_collector); + +void GeluCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate); +void GeluBackwardCUDAKernelImpl(TensorIteratorBase& it, GeluType approximate); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/BinaryInternal.h b/voice_bridge/torch/include/ATen/native/cuda/BinaryInternal.h new file mode 100644 index 0000000000000000000000000000000000000000..e098d32b114d604f6d9a1b5160dbe87de52c4595 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/BinaryInternal.h @@ -0,0 +1,48 @@ +// DON'T include this except from Binary*.cu files. It should not leak into +// headers. +#pragma once +#define TORCH_ASSERT_NO_OPERATORS +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace at { +namespace native { +namespace binary_internal { + +template +struct DivFunctor { + __device__ scalar_t operator()(scalar_t a, scalar_t b) const { + return a / b; + } +}; + +template +struct MulFunctor { + __device__ T operator()(T a, T b) const { + return a * b; + } +}; + +// Workaround for the error: '*' in boolean context, suggest '&&' instead +// [-Werror=int-in-bool-context] +template <> +struct MulFunctor { + __device__ bool operator()(bool a, bool b) const { + return a && b; + } +}; +void div_true_kernel_cuda(TensorIteratorBase& iter); +void div_trunc_kernel_cuda(TensorIteratorBase& iter); +} // namespace binary_internal +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cuda/CUDAJitLoops.cuh b/voice_bridge/torch/include/ATen/native/cuda/CUDAJitLoops.cuh new file mode 100644 index 0000000000000000000000000000000000000000..830a3024a98390e29381952213d8469690bed44f --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/CUDAJitLoops.cuh @@ -0,0 +1,292 @@ +#pragma once +#include + +// Jiterator functions are guarded behind this macro +#if AT_USE_JITERATOR() + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace at { +namespace native { + +template +constexpr auto tuple_to_array_helper(Tuple& t, std::index_sequence seq) { + constexpr auto size = seq.size(); + (void)t; // warning : unused parameter when tuple is empty. + return std::array{static_cast(&std::get(t))...}; +} + +// Helper function convert tuple to std::array +// for passing the arguments to CUDA Kernel +// NOTE: We capture tuple by reference, +// so the pointers in returned array are only valid +// till tuple is alive. +template +constexpr auto tuple_to_array(std::tuple& extra_args) { + constexpr auto tuple_size = sizeof...(Args); + return tuple_to_array_helper(extra_args, std::make_index_sequence{}); +} + +struct JittedVecKernelCache { + // Different kernels are compiled depending on what we're vectorizing up to (1, 2 or 4 elements) + at::cuda::jit::NvrtcFunction vec1; + at::cuda::jit::NvrtcFunction vec2; + at::cuda::jit::NvrtcFunction vec4; +}; + +struct JittedKernelVariantCache { + JittedVecKernelCache vec; + at::cuda::jit::NvrtcFunction noncontiguous; + at::cuda::jit::NvrtcFunction dynamic_contiguous; + at::cuda::jit::NvrtcFunction dynamic_noncontiguous; +}; + +inline c10::SmallBuffer pack_kernel_args( + std::initializer_list args, + c10::ArrayRef extra_args) { + c10::SmallBuffer ret(args.size() + extra_args.size()); + std::copy(args.begin(), args.end(), ret.data()); + std::copy(extra_args.begin(), extra_args.end(), ret.data() + args.size()); + return ret; +} + +template +void launch_jitted_unrolled_kernel( + std::mutex &jiterator_mutex, + at::cuda::jit::NvrtcFunction &fn_cache, + const at::cuda::jit::KernelDescriptor &desc, + int64_t N, + array_t data, + inp_calc_t ic, + out_calc_t oc, + loader_t l, + storer_t s, + bool contiguous, + at::cuda::jit::BinaryFuncVariant scalar_pos, + void* scalar_val, + c10::ArrayRef extra_args) { + + TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits::max()); + //casting result to int is always safe, intermediate is int64 and won't overflow + const uint32_t grid = (N + block_work_size() - 1) / block_work_size(); + + if (!fn_cache.function) { + const std::lock_guard lock{jiterator_mutex}; + if (!fn_cache.function) { + constexpr bool dynamic_casting = !std::is_same() || + !std::is_same(); + auto code = at::cuda::jit::generate_code( + desc, contiguous, dynamic_casting, scalar_pos); + fn_cache = at::cuda::jit::jit_pwise_function(code, desc.name); + } + } + + auto args = pack_kernel_args({&N, &data, &ic, &oc, &l, &s, scalar_val}, extra_args); + at::cuda::jit::launch_jitted_pwise_function(fn_cache, args.data(), {grid, 1u, 1u}, + {num_threads(), 1u, 1u}); +} + +template +void launch_jitted_vectorized_kernel( + std::mutex &jiterator_mutex, JittedVecKernelCache &fn_cache, + const at::cuda::jit::KernelDescriptor &desc, int64_t N, array_t data, + at::cuda::jit::BinaryFuncVariant scalar_pos, + void *scalar_val, c10::ArrayRef extra_args) { + TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits::max()); + // N is still int64_t for the computation, but it's always safe to cast result to int + const uint32_t grid = (N + block_work_size() - 1) / block_work_size(); + const int vec_size = at::cuda::jit::can_vectorize_up_to( + desc, c10::ArrayRef(data.data, data.size())); + + // Different kernels are compiled depending on what we're vectorizing up to (1, 2 or 4 elements) + // fn_ptr is set to the appropriate function based on the vec size and GPU used + at::cuda::jit::NvrtcFunction* fn_ptr; + if (vec_size == 4) { + fn_ptr = &fn_cache.vec4; + } else if (vec_size == 2) { + fn_ptr = &fn_cache.vec2; + } else if (vec_size ==1) { + fn_ptr = &fn_cache.vec1; + } else { + TORCH_INTERNAL_ASSERT(false, "unexpected vec_size for jitter vectorized kernel"); + } + + bool vectorized = vec_size > 1; + + if (!fn_ptr->function) { + const std::lock_guard lock{jiterator_mutex}; + if (!fn_ptr->function) { // cache miss! + + // Generates program + auto code = at::cuda::jit::generate_code( + desc, /*contiguous=*/true, /*dynamic_casting=*/false, + scalar_pos, vectorized, vec_size); + std::string kernel_name = vectorized ? desc.name + "_vectorized" + std::to_string(vec_size) : desc.name; + + // Acquires the program + *fn_ptr = at::cuda::jit::jit_pwise_function(code, kernel_name); + } + } + + if (vectorized) { + auto args = pack_kernel_args({&N, &data, scalar_val}, extra_args); + at::cuda::jit::launch_jitted_pwise_function( + *fn_ptr, args.data(), {grid, 1u, 1u}, {num_threads(), 1u, 1u}); + } else { + auto ic = TrivialOffsetCalculator(); + auto oc = TrivialOffsetCalculator<1>(); + auto l = memory::LoadWithoutCast(); + auto s = memory::StoreWithoutCast(); + + auto args = pack_kernel_args( + {&N, &data, &ic, &oc, &l, &s, scalar_val}, extra_args); + at::cuda::jit::launch_jitted_pwise_function( + *fn_ptr, args.data(), {grid, 1u, 1u}, {num_threads(), 1u, 1u}); + } +} + +template +void jitted_gpu_kernel_generic( + std::mutex &jiterator_mutex, + JittedKernelVariantCache &cache, + const at::cuda::jit::KernelDescriptor &desc, + at::cuda::jit::BinaryFuncVariant scalar_pos, + c10::ArrayRef extra_args, + TensorIteratorBase& iter, + const bool dynamic_casting, + void *scalar_val) { + TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing()); + TORCH_INTERNAL_ASSERT(iter.ninputs() == arity); + TORCH_INTERNAL_ASSERT(iter.noutputs() == 1); + + constexpr int ntensors = arity + 1; + at::detail::Array data; + for (auto i : c10::irange(ntensors)) { + data[i] = (char*)iter.data_ptr(i); + } + + int64_t numel = iter.numel(); + bool contiguous = iter.is_contiguous(); + + // Decides which of 4 kernel types to launch + // Variations are: + // - Case 1: no dynamic casting and contiguous + // - Case 2: no dynamic casting and noncontiguous + // - Case 3: dynamic casting and contiguous + // - Case 4: dynamic casting and noncontiguous + // These cases align with the non-jitted CUDALoops.cuh cases in gpu_kernel_impl + + if (!dynamic_casting) { + if (contiguous) { + // Case 1: no dynamic casting and contiguous + launch_jitted_vectorized_kernel( + jiterator_mutex, cache.vec, desc, + numel, data, scalar_pos, scalar_val, extra_args); + return; + } + + // Case 2: no dynamic casting and noncontiguous + auto input_offset_calculator = make_input_offset_calculator(iter); + auto output_offset_calculator = make_output_offset_calculator(iter); + auto loader = memory::LoadWithoutCast(); + auto storer = memory::StoreWithoutCast(); + launch_jitted_unrolled_kernel( + jiterator_mutex, cache.noncontiguous, desc, numel, data, + input_offset_calculator, output_offset_calculator, loader, + storer, contiguous, scalar_pos, scalar_val, extra_args); + return; + } + + // Cases 3 and 4 are handled below + // Both require construction of a storer (this asserts 1 output) and one or more loaders + + // Creates store cast to output (the zeroth tensor in TensorIterator) + auto storer = memory::StoreWithCast<1>(iter); + + // Creates load casts from inputs (note offset indexing into the iterators 1...n tensors) + auto loader = memory::LoadWithCast(iter); + + if (contiguous) { + // Case 3: dynamic casting and contiguous + auto input_offset_calculator = TrivialOffsetCalculator(); + auto output_offset_calculator = TrivialOffsetCalculator<1>(); + launch_jitted_unrolled_kernel( + jiterator_mutex, cache.dynamic_contiguous, desc, numel, data, input_offset_calculator, + output_offset_calculator, loader, storer, contiguous, scalar_pos, scalar_val, extra_args); + return; + } + + // Case 4: dynamic casting and noncontiguous + auto input_offset_calculator = make_input_offset_calculator(iter); + auto output_offset_calculator = make_output_offset_calculator(iter); + launch_jitted_unrolled_kernel( + jiterator_mutex, cache.dynamic_noncontiguous, desc, numel, data, input_offset_calculator, + output_offset_calculator, loader, storer, contiguous, scalar_pos, scalar_val, extra_args); +} + +// NOTE: static to reduce chances of name collision. +template < + char const* name, + typename result_type, + typename f_inputs_type, + int arity, + at::cuda::jit::BinaryFuncVariant scalar_pos = + at::cuda::jit::BinaryFuncVariant::NoScalar, + typename... ExtraArgs> +static void jitted_gpu_kernel_impl( + TensorIteratorBase& iter, + const std::string &f, + const bool dynamic_casting, + at::opmath_type scalar_val, + std::tuple extra_args) { + + // TODO: Memory use can probably be optimized by re-using kernels across GPUs with + // the same compute capability + static std::mutex jiterator_mutex; + static std::vector device_caches(c10::cuda::device_count()); + + constexpr int nInputs = arity; + constexpr int nOutputs = 1; // TODO: Support more than 1 output + static const auto desc = at::cuda::jit::make_kernel_descriptor< + result_type, f_inputs_type, ExtraArgs...>(name, f, nInputs, nOutputs); + + auto &cache = device_caches[iter.device().index()]; + auto extra_args_array = tuple_to_array(extra_args); + return jitted_gpu_kernel_generic( + jiterator_mutex, + cache, + desc, + scalar_pos, + extra_args_array, + iter, + dynamic_casting, + &scalar_val + ); +} + +}} // at::native + +#endif // AT_USE_JITERATOR() diff --git a/voice_bridge/torch/include/ATen/native/cuda/CUDALoops.cuh b/voice_bridge/torch/include/ATen/native/cuda/CUDALoops.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a3378f7ee2b8b4ca21e715c53ce6c32b1c7db8c1 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/CUDALoops.cuh @@ -0,0 +1,247 @@ +#pragma once + +// This file provides two functions to help write GPU elementwise kernels: +// +// gpu_kernel(TensorIterator iter, ) +// gpu_kernel_with_scalars(TensorIterator iter, ) +// +// The gpu_kernel_with_scalars generates specializations that support a +// single scalar CPU argument, such as from `cuda_tensor + 5`. The CPU scalar +// is lifted to a kernel parameter instead of copying to device memory. +// This should be used in conjunction with TensorIterator::allow_cpu_scalars_, +// which is the default for TensorIterator::binary_op. Otherwise, all inputs +// and the output must be on the GPU. +// +// For example, to write a reciprocal kernel for GPU float Tensors: +// +// gpu_kernel(iter, []GPU_LAMBDA(float a) { +// return 1.0f / a; +// }); +// +// To write a multiplication kernel for GPU float Tensors where one argument +// may be a CPU scalar: +// +// gpu_kernel_with_scalars(iter, []GPU_LAMBDA(float a, float b) { +// return a * b; +// }); +// +// See BinaryOpsKernel.cu for the complete implementation +// + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifdef __NVCC__ +#define ASSERT_HOST_DEVICE_LAMBDA(type) \ + static_assert(__nv_is_extended_host_device_lambda_closure_type(type), \ + #type " must be a __host__ __device__ lambda") +#else +#define ASSERT_HOST_DEVICE_LAMBDA(type) +#endif + + +namespace at { namespace native { + +template +C10_LAUNCH_BOUNDS_1(num_threads()) +__global__ void vectorized_elementwise_kernel(int N, func_t f, array_t data) { + using traits = function_traits; + int remaining = N - block_work_size() * blockIdx.x; + + if (remaining < block_work_size()) { // if this block handles the reminder, just do a naive unrolled loop + auto input_calc = TrivialOffsetCalculator(); + auto output_calc = TrivialOffsetCalculator<1>(); + auto loader = memory::LoadWithoutCast(); + auto storer = memory::StoreWithoutCast(); + auto policy = memory::policies::unroll( + data, remaining, input_calc, output_calc, loader, storer); + elementwise_kernel_helper(f, policy); + } else { // if this block has a full `block_work_size` data to handle, use vectorized memory access + elementwise_kernel_helper(f, memory::policies::vectorized(data)); + } +} + +template +C10_LAUNCH_BOUNDS_1(num_threads()) +__global__ void unrolled_elementwise_kernel(int N, func_t f, array_t data, + inp_calc_t ic, out_calc_t oc, loader_t l, storer_t s) +{ + int remaining = N - block_work_size() * blockIdx.x; + auto policy = memory::policies::unroll(data, remaining, ic, oc, l, s); + elementwise_kernel_helper(f, policy); +} + +// this function assume trivial 1d and no dynamic casting +template +static inline void launch_vectorized_kernel(int64_t N, const func_t& f, array_t data) { + TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits::max()); + using traits = function_traits; + int64_t grid = (N + block_work_size() - 1) / block_work_size(); + auto stream = at::cuda::getCurrentCUDAStream(); + int vec_size = memory::can_vectorize_up_to(data); + + switch (vec_size) { + case 4: + vectorized_elementwise_kernel<4, func_t, array_t><<>>(N, f, data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + break; + case 2: + vectorized_elementwise_kernel<2, func_t, array_t><<>>(N, f, data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + break; + case 1: { + auto input_calc = TrivialOffsetCalculator(); + auto output_calc = TrivialOffsetCalculator<1>(); + auto loader = memory::LoadWithoutCast(); + auto storer = memory::StoreWithoutCast(); + unrolled_elementwise_kernel<<>>(N, f, data, input_calc, output_calc, loader, storer); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + break; + } + default: + TORCH_INTERNAL_ASSERT(false, "Unexpected vectorization size"); + } +} + + +template +static inline void launch_unrolled_kernel(int64_t N, const func_t& f, array_t data, + inp_calc_t ic, out_calc_t oc, loader_t l, storer_t s) +{ + TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits::max()); + int64_t grid = (N + block_work_size() - 1) / block_work_size(); + auto stream = at::cuda::getCurrentCUDAStream(); + unrolled_elementwise_kernel<<>>(N, f, data, ic, oc, l, s); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +C10_LAUNCH_BOUNDS_2(nt, 4) +__global__ void elementwise_kernel(int N, func_t f) { + int tid = threadIdx.x; + int nv = nt * vt; + int idx = nv * blockIdx.x + tid; + #pragma unroll + for (int i = 0; i < vt; i++) { + if (idx < N) { + f(idx); + idx += nt; + } + } +} + +template +static void launch_legacy_kernel(int64_t N, const func_t& f) { + TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits::max()); + if (N == 0) { + return; + } + dim3 block(nt); + dim3 grid((N + block.x * vt - 1) / (block.x * vt)); + auto stream = at::cuda::getCurrentCUDAStream(); + elementwise_kernel<<>>(N, f); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +C10_HOST_DEVICE typename traits::result_type +invoke_impl(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], int i, + std::index_sequence) { + (void)strides; + (void)i; + return f(c10::load::type>(data[INDEX] + i * strides[INDEX])...); +} + +template > +C10_HOST_DEVICE typename traits::result_type +invoke(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], int i) { + using Indices = std::make_index_sequence; + return invoke_impl(f, data, strides, i, Indices{}); +} + +template +C10_HOST_DEVICE typename traits::result_type +invoke_impl(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], const ScalarType dtypes[], int i, + std::index_sequence) { + (void)strides; + (void)i; + return f(c10::fetch_and_cast::type>(dtypes[I], data[I] + i * strides[I])...); +} + +template > +C10_HOST_DEVICE typename traits::result_type +invoke(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], const ScalarType dtypes[], int i) { + using Indices = std::make_index_sequence; + return invoke_impl(f, data, strides, dtypes, i, Indices{}); +} + + +template +void gpu_kernel_impl(TensorIteratorBase& iter, const func_t& f) { + using traits = function_traits; + using arg0_t = typename traits::result_type; + constexpr int ntensors = traits::arity + 1; + + TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing()); + TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity); + TORCH_INTERNAL_ASSERT(iter.noutputs() == 1); + + at::detail::Array data; + for (int i = 0; i < ntensors; i++) { + data[i] = (char*)iter.data_ptr(i); + } + + int64_t numel = iter.numel(); + + bool contiguous = iter.is_contiguous(); + bool dynamic_casting = needs_dynamic_casting::check(iter); + + if (!dynamic_casting) { + if (contiguous) { + launch_vectorized_kernel(numel, f, data); + } else { + auto offset_calc = ::make_offset_calculator(iter); + constexpr int unroll_factor = sizeof(arg0_t) >= 4 ? 2 : 4; + launch_legacy_kernel<128,unroll_factor>(numel, [=]GPU_LAMBDA(int idx) { + auto offsets = offset_calc.get(idx); + arg0_t* out = (arg0_t*)(data[0] + offsets[0]); + *out = invoke(f, &data.data[1], &offsets.data[1], 1); + }); + } + } else { + if (contiguous) { + auto loader = memory::LoadWithCast(iter); + auto storer = memory::StoreWithCast<1>(iter); + auto input_offset_calculator = TrivialOffsetCalculator(); + auto output_offset_calculator = TrivialOffsetCalculator<1>(); + launch_unrolled_kernel(numel, f, data, input_offset_calculator, output_offset_calculator, loader, storer); + } else { + at::detail::Array dtypes; + for (int i = 0; i < ntensors; i++) { + dtypes[i] = iter.dtype(i); + } + auto offset_calc = ::make_offset_calculator(iter); + launch_legacy_kernel<128, 4>(numel, [=]GPU_LAMBDA(int idx) { + auto offsets = offset_calc.get(idx); + void* out = data[0] + offsets[0]; + arg0_t result = invoke(f, &data.data[1], &offsets.data[1], &dtypes.data[1], 1); + c10::cast_and_store(dtypes[0], out, result); + }); + } + } +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/CompositeRandomAccessor.h b/voice_bridge/torch/include/ATen/native/cuda/CompositeRandomAccessor.h new file mode 100644 index 0000000000000000000000000000000000000000..d47a7fa776f1b681b26dc5ec8b4548604d359946 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/CompositeRandomAccessor.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include + +namespace at { namespace native { + +struct TupleInfoCPU { + template + using tuple = thrust::tuple; + + template + static constexpr auto tie(Types&... args) noexcept { + return thrust::tie(args...); + } +}; + +template +using CompositeRandomAccessorCPU = + CompositeRandomAccessor; + +template +void swap( + references_holder rh1, + references_holder rh2 +) { + return thrust::swap(rh1.data(), rh2.data()); +} + +template +auto get(references_holder rh) -> decltype(thrust::get(rh.data())) { + return thrust::get(rh.data()); +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/Copy.h b/voice_bridge/torch/include/ATen/native/cuda/Copy.h new file mode 100644 index 0000000000000000000000000000000000000000..5639567d666686dd81ca5b4b032fb44f039eb782 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/Copy.h @@ -0,0 +1,10 @@ +#pragma once + +namespace at { +struct TensorIteratorBase; + +namespace native { + +void direct_copy_kernel_cuda(TensorIteratorBase &iter); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/CuFFTPlanCache.h b/voice_bridge/torch/include/ATen/native/cuda/CuFFTPlanCache.h new file mode 100644 index 0000000000000000000000000000000000000000..9897fbeb51e7879f2326f4d87671a3b8ce4c9520 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/CuFFTPlanCache.h @@ -0,0 +1,532 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { namespace detail { + +// Enum representing the FFT type +enum class CuFFTTransformType : int8_t { + C2C, // Complex-to-complex + R2C, // Real-to-complex + C2R, // Complex-to-real +}; + +// This struct is used to let us easily compute hashes of the +// parameters. +// It will be the **key** to the plan cache. +struct CuFFTParams +{ + int64_t signal_ndim_; // between 1 and max_rank, i.e., 1 <= signal_ndim <= 3 + // These include additional batch dimension as well. + int64_t sizes_[max_rank + 1]; + int64_t input_strides_[max_rank + 1]; + int64_t output_strides_[max_rank + 1]; + CuFFTTransformType fft_type_; + ScalarType value_type_; + + CuFFTParams() = default; + + CuFFTParams(IntArrayRef in_strides, IntArrayRef out_strides, + IntArrayRef signal_sizes, CuFFTTransformType fft_type, ScalarType value_type) { + // Padding bits must be zeroed for hashing + memset(this, 0, sizeof(*this)); + signal_ndim_ = signal_sizes.size() - 1; + fft_type_ = fft_type; + value_type_ = value_type; + + TORCH_INTERNAL_ASSERT(in_strides.size() == signal_sizes.size()); + TORCH_INTERNAL_ASSERT(out_strides.size() == signal_sizes.size()); + TORCH_INTERNAL_ASSERT(1 <= signal_ndim_ && signal_ndim_ <= max_rank); + + std::copy(signal_sizes.cbegin(), signal_sizes.cend(), sizes_); + std::copy(in_strides.cbegin(), in_strides.cend(), input_strides_); + std::copy(out_strides.cbegin(), out_strides.cend(), output_strides_); + } +}; + +static_assert(std::is_trivial::value, ""); + +// Returns true if the transform type has complex input +inline bool cufft_complex_input(CuFFTTransformType type) { + switch (type) { + case CuFFTTransformType::C2C: + case CuFFTTransformType::C2R: + return true; + + case CuFFTTransformType::R2C: + return false; + } + TORCH_INTERNAL_ASSERT(false); +} + +// Returns true if the transform type has complex output +inline bool cufft_complex_output(CuFFTTransformType type) { + switch (type) { + case CuFFTTransformType::C2C: + case CuFFTTransformType::R2C: + return true; + + case CuFFTTransformType::C2R: + return false; + } + TORCH_INTERNAL_ASSERT(false); +} + +// Create transform type enum from bools representing if input and output are complex +inline CuFFTTransformType GetCuFFTTransformType(bool complex_input, bool complex_output) { + if (complex_input && complex_output) { + return CuFFTTransformType::C2C; + } else if (complex_input && !complex_output) { + return CuFFTTransformType::C2R; + } else if (!complex_input && complex_output) { + return CuFFTTransformType::R2C; + } + TORCH_INTERNAL_ASSERT(false, "Real to real FFTs are not supported"); +} + + +class CuFFTHandle { + ::cufftHandle handle_; +public: + + CuFFTHandle() { + CUFFT_CHECK(cufftCreate(&handle_)); + } + + ::cufftHandle & get() { return handle_; } + const ::cufftHandle & get() const { return handle_; } + + ~CuFFTHandle() { +// Not using fftDestroy() for rocFFT to work around double freeing of handles +#if !defined(USE_ROCM) + cufftDestroy(handle_); +#endif + } +}; + +__forceinline__ +static bool is_pow_of_two(int64_t x) { + return (x & (x - 1)) == 0; +} + +#if defined(USE_ROCM) + using cufft_size_type = int; +#else + using cufft_size_type = long long int; +#endif + +using CuFFTDimVector = c10::SmallVector; + +// Struct representing a tensor in CuFFT's data layout for planning transforms +// See NOTE [ cuFFT Embedded Strides ]. +struct CuFFTDataLayout { + CuFFTDimVector embed; + cufft_size_type stride, dist; + bool must_clone, simple; +}; + +// Returns a cufft embedding for a contiguous signal of the given size. +// e.g. if the input is cloned, this will be the resulting data layout +// See NOTE [ cuFFT Embedded Strides ]. +inline CuFFTDataLayout cufft_simple_embed(IntArrayRef sizes, bool onesided) { + CuFFTDataLayout layout; + layout.simple = true; + layout.must_clone = false; + layout.embed.assign(sizes.cbegin() + 1, sizes.cend()); + if (onesided) { + layout.embed.back() = sizes.back() / 2 + 1; + } + layout.stride = 1; + layout.dist = 1; + for (const auto& len : layout.embed) { + layout.dist *= len; + } + return layout; +} + +// Convert strides to a CuFFT embedded representation. +// If strides cannot be embedded, returns a simple layout and sets must_clone flag +// See NOTE [ cuFFT Embedded Strides ]. +inline CuFFTDataLayout as_cufft_embed(IntArrayRef strides, IntArrayRef sizes, bool onesided) { + const auto signal_ndim = strides.size() - 1; + CuFFTDataLayout layout; + auto last_stride = strides[signal_ndim]; + layout.must_clone = (last_stride <= 0); + + const auto last_dim_size = onesided ? + sizes[signal_ndim] / 2 + 1 : sizes[signal_ndim]; + const auto signal_numel = c10::multiply_integers(sizes.slice(1, sizes.size() - 2)) * last_dim_size; + + // Zero stides are not allowed, even if the batch size is one. + // If that happens just set a dummy case + if (sizes[0] == 1) { + layout.dist = signal_numel; + } else if (strides[0] == 0) { + layout.must_clone = true; + } else { + layout.dist = strides[0]; + } + + // Calculate the embedding shape, or set must_clone if the strides cannot be embedded + layout.embed.resize(signal_ndim); + for (auto i = signal_ndim - 1; !layout.must_clone && i > 0; i--) { + auto stride = strides[i]; + if (sizes[i] == 1) { + layout.embed[i] = 1; + } else if (stride > 0 && stride % last_stride == 0) { + layout.embed[i] = stride / last_stride; + last_stride = stride; + } else { + layout.must_clone = true; + } + } + + if (layout.must_clone) { + // If the input needs to be cloned, assume it will be contiguous + layout = cufft_simple_embed(sizes, onesided); + layout.must_clone = true; + } else { + layout.embed[0] = sizes[1]; + layout.stride = strides[signal_ndim]; + // Determine if layout represents a simple embedding (contiguous data) + layout.simple = [&] { + for (const auto i : c10::irange(1, signal_ndim - 1)) { + if (layout.embed[i] != sizes[i + 1]) { + return false; + } + } + + return (layout.stride == 1 && layout.dist == signal_numel && + layout.embed.back() == last_dim_size); + }(); + } + return layout; +} + +// This class contains all the information needed to execute a cuFFT plan: +// 1. the plan +// 2. whether to clone input before executing the plan +// 3. the workspace size needed +// +// This class will be the **value** in the plan cache. +// It **owns** the raw plan via a unique_ptr. +class CuFFTConfig { +public: + + // Only move semantics is enought for this class. Although we already use + // unique_ptr for the plan, still remove copy constructor and assignment op so + // we don't accidentally copy and take perf hit. + CuFFTConfig(const CuFFTConfig&) = delete; + CuFFTConfig& operator=(CuFFTConfig const&) = delete; + + explicit CuFFTConfig(const CuFFTParams& params): + CuFFTConfig( + IntArrayRef(params.input_strides_, params.signal_ndim_ + 1), + IntArrayRef(params.output_strides_, params.signal_ndim_ + 1), + IntArrayRef(params.sizes_, params.signal_ndim_ + 1), + params.fft_type_, + params.value_type_) {} + + // For complex types, strides are in units of 2 * element_size(dtype) + // sizes are for the full signal, including batch size and always two-sided + CuFFTConfig(IntArrayRef in_strides, IntArrayRef out_strides, + IntArrayRef sizes, CuFFTTransformType fft_type, ScalarType dtype): + fft_type_(fft_type), value_type_(dtype) { + + // signal sizes (excluding batch dim) + CuFFTDimVector signal_sizes(sizes.begin() + 1, sizes.end()); + + // input batch size + const int64_t batch = sizes[0]; + const int64_t signal_ndim = sizes.size() - 1; + + // Since cuFFT has limited non-unit stride support and various constraints, we + // use a flag to keep track throughout this function to see if we need to + // input = input.clone(); + +#if defined(USE_ROCM) + // clone input to avoid issues with hipfft clobering the input and failing tests + clone_input = true; +#else + clone_input = false; +#endif + + // For half, base strides on the real part of real-to-complex and + // complex-to-real transforms are not supported. Since our output is always + // contiguous, only need to check real-to-complex case. + if (dtype == ScalarType::Half) { + // cuFFT on half requires compute capability of at least SM_53 + auto dev_prop = at::cuda::getCurrentDeviceProperties(); + TORCH_CHECK(dev_prop->major >= 5 && !(dev_prop->major == 5 && dev_prop->minor < 3), + "cuFFT doesn't support signals of half type with compute " + "capability less than SM_53, but the device containing input half " + "tensor only has SM_", dev_prop->major, dev_prop->minor); + for (const auto i : c10::irange(signal_ndim)) { + TORCH_CHECK(is_pow_of_two(sizes[i + 1]), + "cuFFT only supports dimensions whose sizes are powers of two when" + " computing in half precision, but got a signal size of", + sizes.slice(1)); + } + clone_input |= in_strides.back() != 1; + } + + CuFFTDataLayout in_layout; + if (clone_input) { + in_layout = cufft_simple_embed(sizes, fft_type == CuFFTTransformType::C2R); + } else { + in_layout = as_cufft_embed(in_strides, sizes, fft_type == CuFFTTransformType::C2R); + } + auto out_layout = as_cufft_embed(out_strides, sizes, fft_type == CuFFTTransformType::R2C); + TORCH_INTERNAL_ASSERT(!out_layout.must_clone, "Out strides cannot be represented as CuFFT embedding"); + clone_input |= in_layout.must_clone; + + // Check if we can take advantage of simple data layout. + // + // See NOTE [ cuFFT Embedded Strides ] in native/cuda/SpectralOps.cu. + + const bool simple_layout = in_layout.simple && out_layout.simple; + +#if defined(USE_ROCM) + hipfftType exec_type = [&]{ + if (dtype == kFloat) { + switch (fft_type) { + case CuFFTTransformType::C2C: return HIPFFT_C2C; + case CuFFTTransformType::R2C: return HIPFFT_R2C; + case CuFFTTransformType::C2R: return HIPFFT_C2R; + } + } else if (dtype == kDouble) { + switch (fft_type) { + case CuFFTTransformType::C2C: return HIPFFT_Z2Z; + case CuFFTTransformType::R2C: return HIPFFT_D2Z; + case CuFFTTransformType::C2R: return HIPFFT_Z2D; + } + } + TORCH_CHECK(false, "hipFFT doesn't support transforms of type: ", dtype); + }(); +#else + cudaDataType itype, otype, exec_type; + const auto complex_input = cufft_complex_input(fft_type); + const auto complex_output = cufft_complex_output(fft_type); + if (dtype == ScalarType::Float) { + itype = complex_input ? CUDA_C_32F : CUDA_R_32F; + otype = complex_output ? CUDA_C_32F : CUDA_R_32F; + exec_type = CUDA_C_32F; + } else if (dtype == ScalarType::Double) { + itype = complex_input ? CUDA_C_64F : CUDA_R_64F; + otype = complex_output ? CUDA_C_64F : CUDA_R_64F; + exec_type = CUDA_C_64F; + } else if (dtype == ScalarType::Half) { + itype = complex_input ? CUDA_C_16F : CUDA_R_16F; + otype = complex_output ? CUDA_C_16F : CUDA_R_16F; + exec_type = CUDA_C_16F; + } else { + TORCH_CHECK(false, "cuFFT doesn't support tensor of type: ", dtype); + } +#endif + + // disable auto allocation of workspace to use THC allocator + CUFFT_CHECK(cufftSetAutoAllocation(plan(), /* autoAllocate */ 0)); + + size_t ws_size_t; + + // make plan + if (simple_layout) { + // If with unit-stride, we tell cuFFT by setting inembed == onembed == NULL. + // In such case, cuFFT ignores istride, ostride, idist, and odist + // by assuming istride = ostride = 1. + // + // See NOTE [ cuFFT Embedded Strides ] in native/cuda/SpectralOps.cu. +#if defined(USE_ROCM) + CUFFT_CHECK(hipfftMakePlanMany(plan(), signal_ndim, signal_sizes.data(), + /* inembed */ nullptr, /* base_istride */ 1, /* idist */ 1, + /* onembed */ nullptr, /* base_ostride */ 1, /* odist */ 1, + exec_type, batch, &ws_size_t)); +#else + CUFFT_CHECK(cufftXtMakePlanMany(plan(), signal_ndim, signal_sizes.data(), + /* inembed */ nullptr, /* base_istride */ 1, /* idist */ 1, itype, + /* onembed */ nullptr, /* base_ostride */ 1, /* odist */ 1, otype, + batch, &ws_size_t, exec_type)); +#endif + } else { +#if defined(USE_ROCM) + CUFFT_CHECK(hipfftMakePlanMany(plan(), signal_ndim, signal_sizes.data(), + in_layout.embed.data(), in_layout.stride, in_layout.dist, + out_layout.embed.data(), out_layout.stride, out_layout.dist, + exec_type, batch, &ws_size_t)); +#else + CUFFT_CHECK(cufftXtMakePlanMany(plan(), signal_ndim, signal_sizes.data(), + in_layout.embed.data(), in_layout.stride, in_layout.dist, itype, + out_layout.embed.data(), out_layout.stride, out_layout.dist, otype, + batch, &ws_size_t, exec_type)); +#endif + } + ws_size = static_cast(ws_size_t); + } + + const cufftHandle &plan() const { return plan_ptr.get(); } + + CuFFTTransformType transform_type() const { return fft_type_; } + ScalarType data_type() const { return value_type_; } + bool should_clone_input() const { return clone_input; } + int64_t workspace_size() const { return ws_size; } + +private: + CuFFTHandle plan_ptr; + bool clone_input; + int64_t ws_size; + CuFFTTransformType fft_type_; + ScalarType value_type_; +}; + +#if (defined(CUDA_VERSION) && CUDA_VERSION < 10000) || defined(USE_ROCM) + // Note that the max plan number for CUDA version < 10 has to be 1023 + // due to a bug that fails on the 1024th plan + constexpr int64_t CUFFT_MAX_PLAN_NUM = 1023; + constexpr int64_t CUFFT_DEFAULT_CACHE_SIZE = CUFFT_MAX_PLAN_NUM; +#else + constexpr int64_t CUFFT_MAX_PLAN_NUM = std::numeric_limits::max(); + // The default max cache size chosen for CUDA version > 10 is arbitrary. + // This number puts a limit on how big of a plan cache should we maintain by + // default. Users can always configure it via cufft_set_plan_cache_max_size. + constexpr int64_t CUFFT_DEFAULT_CACHE_SIZE = 4096; +#endif +static_assert(0 <= CUFFT_MAX_PLAN_NUM && CUFFT_MAX_PLAN_NUM <= std::numeric_limits::max(), + "CUFFT_MAX_PLAN_NUM not in size_t range"); +static_assert(CUFFT_DEFAULT_CACHE_SIZE >= 0 && CUFFT_DEFAULT_CACHE_SIZE <= CUFFT_MAX_PLAN_NUM, + "CUFFT_DEFAULT_CACHE_SIZE not in [0, CUFFT_MAX_PLAN_NUM] range"); + +// This cache assumes that the mapping from key to value never changes. +// This is **NOT** thread-safe. Please use a mutex when using it **AND** the +// value returned from try_emplace_value. +// The contract of using this cache is that try_emplace_value should only be +// used when the max_size is positive. +class CuFFTParamsLRUCache { +public: + using kv_t = typename std::pair; + using map_t = typename std::unordered_map, + typename std::list::iterator, + ParamsHash, + ParamsEqual>; + using map_kkv_iter_t = typename map_t::iterator; + + + CuFFTParamsLRUCache() : CuFFTParamsLRUCache(CUFFT_DEFAULT_CACHE_SIZE) {} + + CuFFTParamsLRUCache(int64_t max_size) { + _set_max_size(max_size); + } + + CuFFTParamsLRUCache(CuFFTParamsLRUCache&& other) noexcept : + _usage_list(std::move(other._usage_list)), + _cache_map(std::move(other._cache_map)), + _max_size(other._max_size) {} + + CuFFTParamsLRUCache& operator=(CuFFTParamsLRUCache&& other) noexcept { + _usage_list = std::move(other._usage_list); + _cache_map = std::move(other._cache_map); + _max_size = other._max_size; + return *this; + } + + // If key is in this cache, return the cached config. Otherwise, emplace the + // config in this cache and return it. + // Return const reference because CuFFTConfig shouldn't be tampered with once + // created. + const CuFFTConfig &lookup(CuFFTParams params) { + AT_ASSERT(_max_size > 0); + + map_kkv_iter_t map_it = _cache_map.find(params); + // Hit, put to list front + if (map_it != _cache_map.end()) { + _usage_list.splice(_usage_list.begin(), _usage_list, map_it->second); + return map_it->second->second; + } + + // Miss + // remove if needed + if (_usage_list.size() >= _max_size) { + auto last = _usage_list.end(); + last--; + _cache_map.erase(last->first); + _usage_list.pop_back(); + } + + // construct new plan at list front, then insert into _cache_map + _usage_list.emplace_front(std::piecewise_construct, + std::forward_as_tuple(params), + std::forward_as_tuple(params)); + auto kv_it = _usage_list.begin(); + _cache_map.emplace(std::piecewise_construct, + std::forward_as_tuple(kv_it->first), + std::forward_as_tuple(kv_it)); + return kv_it->second; + } + + void clear() { + _cache_map.clear(); + _usage_list.clear(); + } + + void resize(int64_t new_size) { + _set_max_size(new_size); + auto cur_size = _usage_list.size(); + if (cur_size > _max_size) { + auto delete_it = _usage_list.end(); + for (size_t i = 0; i < cur_size - _max_size; i++) { + delete_it--; + _cache_map.erase(delete_it->first); + } + _usage_list.erase(delete_it, _usage_list.end()); + } + } + + size_t size() const { return _cache_map.size(); } + + size_t max_size() const noexcept { return _max_size; } + + std::mutex mutex; + +private: + // Only sets size and does value check. Does not resize the data structures. + void _set_max_size(int64_t new_size) { + // We check that 0 <= new_size <= CUFFT_MAX_PLAN_NUM here. Since + // CUFFT_MAX_PLAN_NUM is of type size_t, we need to do non-negativity check + // first. + TORCH_CHECK(new_size >= 0, + "cuFFT plan cache size must be non-negative, but got ", new_size); + TORCH_CHECK(new_size <= CUFFT_MAX_PLAN_NUM, + "cuFFT plan cache size can not be larger than ", CUFFT_MAX_PLAN_NUM, ", but got ", new_size); + _max_size = static_cast(new_size); + } + + std::list _usage_list; + map_t _cache_map; + size_t _max_size; +}; + +// Since ATen is separated into CPU build and CUDA build, we need a way to call +// these functions only when CUDA is loaded. We use CUDA hooks for this purpose +// (at cuda/detail/CUDAHooks.cpp), and call the hooked functions from the actual +// native function counterparts (at native/SpectralOps.cpp), i.e., +// _cufft_get_plan_cache_max_size, _cufft_set_plan_cache_max_size +// _cufft_get_plan_cache_size, and _cufft_clear_plan_cache. +int64_t cufft_get_plan_cache_max_size_impl(int64_t device_index); +void cufft_set_plan_cache_max_size_impl(int64_t device_index, int64_t max_size); +int64_t cufft_get_plan_cache_size_impl(int64_t device_index); +void cufft_clear_plan_cache_impl(int64_t device_index); + +}}} // namespace at::native::detail diff --git a/voice_bridge/torch/include/ATen/native/cuda/CuFFTUtils.h b/voice_bridge/torch/include/ATen/native/cuda/CuFFTUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..4b02f914d7e20ff914e248d203be3f9434bacb3b --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/CuFFTUtils.h @@ -0,0 +1,73 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +namespace at { namespace native { + +// This means that max dim is 3 + 2 = 5 with batch dimension and possible +// complex dimension +constexpr int max_rank = 3; + +static inline std::string _cudaGetErrorEnum(cufftResult error) +{ + switch (error) + { + case CUFFT_SUCCESS: + return "CUFFT_SUCCESS"; + case CUFFT_INVALID_PLAN: + return "CUFFT_INVALID_PLAN"; + case CUFFT_ALLOC_FAILED: + return "CUFFT_ALLOC_FAILED"; + case CUFFT_INVALID_TYPE: + return "CUFFT_INVALID_TYPE"; + case CUFFT_INVALID_VALUE: + return "CUFFT_INVALID_VALUE"; + case CUFFT_INTERNAL_ERROR: + return "CUFFT_INTERNAL_ERROR"; + case CUFFT_EXEC_FAILED: + return "CUFFT_EXEC_FAILED"; + case CUFFT_SETUP_FAILED: + return "CUFFT_SETUP_FAILED"; + case CUFFT_INVALID_SIZE: + return "CUFFT_INVALID_SIZE"; + case CUFFT_UNALIGNED_DATA: + return "CUFFT_UNALIGNED_DATA"; + case CUFFT_INCOMPLETE_PARAMETER_LIST: + return "CUFFT_INCOMPLETE_PARAMETER_LIST"; + case CUFFT_INVALID_DEVICE: + return "CUFFT_INVALID_DEVICE"; + case CUFFT_PARSE_ERROR: + return "CUFFT_PARSE_ERROR"; + case CUFFT_NO_WORKSPACE: + return "CUFFT_NO_WORKSPACE"; + case CUFFT_NOT_IMPLEMENTED: + return "CUFFT_NOT_IMPLEMENTED"; +#if !defined(USE_ROCM) + case CUFFT_LICENSE_ERROR: + return "CUFFT_LICENSE_ERROR"; +#endif + case CUFFT_NOT_SUPPORTED: + return "CUFFT_NOT_SUPPORTED"; + default: + std::ostringstream ss; + ss << "unknown error " << error; + return ss.str(); + } +} + +static inline void CUFFT_CHECK(cufftResult error) +{ + if (error != CUFFT_SUCCESS) { + std::ostringstream ss; + ss << "cuFFT error: " << _cudaGetErrorEnum(error); + AT_ERROR(ss.str()); + } +} + +}} // at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/DeviceSqrt.cuh b/voice_bridge/torch/include/ATen/native/cuda/DeviceSqrt.cuh new file mode 100644 index 0000000000000000000000000000000000000000..38a7804015be1822f4012f74319a459daeb5e885 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/DeviceSqrt.cuh @@ -0,0 +1,25 @@ +#pragma once + +namespace at { namespace native { +#if defined(USE_ROCM) +// take these out when ROCm implements std:: math functions +#include +template +static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val); + +template <> +__forceinline__ __device__ float device_sqrt(float val) { + return ::sqrtf(val); +} + +template <> +__forceinline__ __device__ double device_sqrt(double val) { + return ::sqrt(val); +} +#else +template +__forceinline__ __device__ double device_sqrt(scalar_t val) { + return std::sqrt(val); +} +#endif +}} diff --git a/voice_bridge/torch/include/ATen/native/cuda/DistributionTemplates.h b/voice_bridge/torch/include/ATen/native/cuda/DistributionTemplates.h new file mode 100644 index 0000000000000000000000000000000000000000..6a096b42f719189999bdc01767fcbcdfbb23c9ae --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/DistributionTemplates.h @@ -0,0 +1,666 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +namespace { + +// launch bounds used for kernels utilizing TensorIterator +const uint32_t block_size_bound = 256; +const uint32_t grid_size_bound = 4; +// number of randoms given by distributions like curand_uniform4, curand_uniform2_double +// used in calculating philox offset. +const uint32_t curand4_engine_calls = 4; + +// utility function that calculates proper philox_offset +// for distributions utilizing TensorIterator. For distributions using +// TensorIterator, we are using a grid-stride loop with each +// thread yielding one element per thread. For the edge of the grid-stride +// loop, if the tensor size is large, the unroll loop will kick in and the float4 +// from curand4 will start getting utilized (for common tensor sizes, we end up +// using rand.x from each thread). Hence, the philox_offset is +// (number of elements per thread * number of engine calls), which makes +// sure that philox offset increment is not less than the number of randoms used +// in each thread. +std::tuple calc_execution_policy(int64_t total_elements) { + const uint64_t numel = static_cast(total_elements); + const uint32_t block_size = block_size_bound; + const uint32_t unroll = curand4_engine_calls; + dim3 dim_block(block_size); + dim3 grid((numel + block_size - 1) / block_size); + uint32_t blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / block_size; + grid.x = std::min( + static_cast(at::cuda::getCurrentDeviceProperties()->multiProcessorCount) * blocks_per_sm, + grid.x); + //number of times random will be generated per thread, to offset philox counter in thc random state + uint64_t counter_offset = ((numel - 1) / (block_size * grid.x * unroll) + 1) + * curand4_engine_calls; + return std::make_tuple(counter_offset, grid, dim_block); +} + +// grid stride loop kernel for distributions +template +C10_LAUNCH_BOUNDS_2(block_size_bound, grid_size_bound) +__global__ void distribution_elementwise_grid_stride_kernel(int numel, + PhiloxCudaState philox_args, + const dist_t dist_func, + const transform_t transform_func) { + auto seeds = at::cuda::philox::unpack(philox_args); + int idx = blockIdx.x * blockDim.x + threadIdx.x; + curandStatePhilox4_32_10_t state; + curand_init(std::get<0>(seeds), + idx, + std::get<1>(seeds), + &state); + + int rounded_size = ((numel - 1)/(blockDim.x * gridDim.x * unroll_factor)+1) * + blockDim.x * gridDim.x * unroll_factor; + for(int linear_index = idx; linear_index < rounded_size; linear_index += blockDim.x * gridDim.x * unroll_factor) { + auto rand = dist_func(&state); + #pragma unroll + for (int ii = 0; ii < unroll_factor; ii++) { + int li = linear_index + blockDim.x * gridDim.x * ii; + if (li < numel) { + transform_func(li, static_cast((&rand.x)[ii])); + } + } + __syncthreads(); + } +} + +/** + * distribution_nullary_kernel is analogous to gpu_kernel in + * ATen/native/cuda/Loops.cuh. Like gpu_kernel, it uses + * TensorIterator to launch a kernel. However, the differences are + * - it launches a grid-stride loop based kernel. The kernel is not + * generic like elementwise_kernel in Loops.cuh and is specialized + * for the distribution kernels here. + * - For big size tensors, we can launch multiple kernels recursively + * (i.e. if (!iter.can_use_32bit_indexing())) and hence, the philox + * offset calculation is done in this function. + * + * FIXME: Can we specialize elementwise_kernel and launch_kernel in Loops.cuh + * to have grid-stride loop kernel and then use that to launch our distribution + * kernels? Note that we need a grid-stride loop kernel because, we found by testing + * that it achieves peak effective bandwidth. + */ +template +void distribution_nullary_kernel(at::TensorIteratorBase& iter, + RNG gen, + const dist_t& dist_func, + const transform_t transform_func) { + static_assert(unroll_factor >= 1, "unroll_factor must be >= 1."); + int64_t numel = iter.numel(); + if (numel == 0) { + return; + } + + auto execution_policy = calc_execution_policy(numel); + auto counter_offset = std::get<0>(execution_policy); + auto grid = std::get<1>(execution_policy); + auto block = std::get<2>(execution_policy); + PhiloxCudaState rng_engine_inputs; + { + // See Note [Acquire lock when using random generators] + std::lock_guard lock(gen->mutex_); + rng_engine_inputs = gen->philox_cuda_state(counter_offset); + } + + if (!iter.can_use_32bit_indexing()) { + for (auto& sub_iter : iter.with_32bit_indexing()) { + distribution_nullary_kernel(sub_iter, + gen, dist_func, transform_func); + } + return; + } + + char* out_data = (char*)iter.data_ptr(0); + + auto stream = at::cuda::getCurrentCUDAStream(); + if (iter.is_trivial_1d()) { + auto strides = iter.get_inner_strides(); + int stride0 = strides[0]; + distribution_elementwise_grid_stride_kernel<<>>( + numel, + rng_engine_inputs, + dist_func, + [=]__device__(int idx, accscalar_t rand) { + scalar_t* out = (scalar_t*)&out_data[stride0 * idx]; + *out = transform_func(rand); + } + ); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } else { + auto offset_calc = make_offset_calculator<1>(iter); + distribution_elementwise_grid_stride_kernel<<>>( + numel, + rng_engine_inputs, + dist_func, + [=]__device__(int idx, accscalar_t rand) { + auto offsets = offset_calc.get(idx); + scalar_t* out = (scalar_t*)&out_data[offsets[0]]; + *out = transform_func(rand); + } + ); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } +} + +// Binary kernel +template +__global__ void distribution_binary_elementwise_kernel( + int numel, + func_t f, + PhiloxCudaState philox_args, + typename function_traits::result_type *output_data, + const typename function_traits::template arg<1>::type *input_data_1, + const typename function_traits::template arg<2>::type *input_data_2, + inp_offset_calc_t inp_calc, + out_offset_calc_t out_calc) { + auto seeds = at::cuda::philox::unpack(philox_args); + + using input_t_1 = typename function_traits::template arg<1>::type; + using input_t_2 = typename function_traits::template arg<2>::type; + + input_t_1 inputs_1[thread_work_size()]; + input_t_2 inputs_2[thread_work_size()]; + + int base_index = block_work_size() * blockIdx.x; + int remaining = std::min(numel - base_index, block_work_size()); + + curandStatePhilox4_32_10_t state; + curand_init(std::get<0>(seeds), + blockIdx.x * blockDim.x + threadIdx.x, + std::get<1>(seeds), + &state); + + // load data into registers + int thread_idx = threadIdx.x; + #pragma unroll + for (int i = 0; i < thread_work_size(); i++) { + if (thread_idx >= remaining) { + break; + } + int input_idx = thread_idx + base_index; + auto offsets = inp_calc.get(input_idx); + inputs_1[i] = input_data_1[offsets[0]]; + inputs_2[i] = input_data_2[offsets[1]]; + + thread_idx += num_threads(); + } + + // compute and store + thread_idx = threadIdx.x; + #pragma unroll + for (int i = 0; i < thread_work_size(); i++) { + if (thread_idx >= remaining) { + break; + } + int input_idx = thread_idx + base_index; + auto offsets = out_calc.get(input_idx); + output_data[offsets[0]] = f(state, inputs_1[i], inputs_2[i]); + thread_idx += num_threads(); + } +} + +template +void distribution_binary_kernel(TensorIteratorBase &iter, PhiloxCudaState philox_args, const func_t &f) { + static_assert(std::is_same::template arg<0>::type, curandStatePhilox4_32_10_t&>::value, "the first argument of functor must be curandStatePhilox4_32_10_t"); + using input_t_1 = typename function_traits::template arg<1>::type; + using input_t_2 = typename function_traits::template arg<2>::type; + using output_t = typename function_traits::result_type; + + if (!iter.can_use_32bit_indexing()) { + for (auto& sub_iter : iter.with_32bit_indexing()) { + distribution_binary_kernel(sub_iter, philox_args, f); + } + return; + } + + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(iter.can_use_32bit_indexing()); + + int64_t numel = iter.numel(); + if (numel == 0) { + return; + } + + output_t *output_data = static_cast(iter.data_ptr(0)); + const input_t_1 *input_data_1 = static_cast(iter.data_ptr(1)); + const input_t_2 *input_data_2 = static_cast(iter.data_ptr(2)); + + int64_t grid = (numel + block_work_size() - 1) / block_work_size(); + auto stream = at::cuda::getCurrentCUDAStream(); + + if (iter.is_contiguous()) { + distribution_binary_elementwise_kernel<<>>( + numel, f, philox_args, output_data, input_data_1, input_data_2, + TrivialOffsetCalculator<2>(), TrivialOffsetCalculator<1>()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } else { + distribution_binary_elementwise_kernel<<>>( + numel, f, philox_args, output_data, input_data_1, input_data_2, + make_input_offset_calculator<2>(iter), make_output_offset_calculator(iter)); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } +} + +} // namespace +}} // namespace at::native + + +namespace at { +namespace native { +namespace templates { +namespace cuda { + +// ==================================================== Random ======================================================== + +template +void random_from_to_kernel(TensorIteratorBase& iter, uint64_t range, int64_t base, RNG gen) { + AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "random_from_to_kernel_cuda", [&] { + if (( + std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value) && range >= 1ULL << 32) + { + // define lambda to mod with range and add base + auto random_func = [range, base] __device__ (uint64_t rand) { + return transformation::uniform_int_from_to(rand, range, base); + }; + distribution_nullary_kernel(iter, + gen, + [] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 { + ulonglong2 ret; + uint4 rand_val = curand4(state); + ret.x = (static_cast(rand_val.x) << 32) | rand_val.y; + ret.y = (static_cast(rand_val.z) << 32) | rand_val.w; + return ret; + }, + random_func); + } else { + auto random_func = [range, base] __device__ (uint32_t rand) { + return transformation::uniform_int_from_to(rand, range, base); + }; + distribution_nullary_kernel(iter, + gen, + [] __device__ (curandStatePhilox4_32_10_t* state) { + return curand4(state); + }, + random_func); + } + }); +} + +// This is the special kernel to handle single specific case: +// from(inclusive) = std::numeric_limits::lowest() +// to(exclusive) = None (= std::numeric_limits::max() + 1) +template +void random_full_64_bits_range_kernel(TensorIteratorBase& iter, RNG gen) { + AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::BFloat16, iter.dtype(), "random_full_64_bits_range_kernel_cuda", [&] { + if (std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value) { + auto random_func = [] __device__ (uint64_t rand) { + return transformation::uniform_int_full_range(rand); + }; + distribution_nullary_kernel(iter, + gen, + [] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 { + ulonglong2 ret; + uint4 rand_val = curand4(state); + ret.x = (static_cast(rand_val.x) << 32) | rand_val.y; + ret.y = (static_cast(rand_val.z) << 32) | rand_val.w; + return ret; + }, + random_func); + } else { + TORCH_CHECK(false, "random_full_64_bits_range_kernel_cuda handles only int64, double, float and bfloat16"); + } + }); +} + +template +struct RandomFromToKernel { + void operator()(TensorIteratorBase& iter, uint64_t range, int64_t base, c10::optional gen) { + random_from_to_kernel(iter, range, base, check_generator(gen)); + } + void operator()(TensorIteratorBase& iter, c10::optional gen) { + random_full_64_bits_range_kernel(iter, check_generator(gen)); + } +}; + +template +void random_kernel(TensorIteratorBase& iter, RNG gen) { + AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "random_kernel_cuda", [&] { + if (std::is_same::value || std::is_same::value) { + auto random_func = [] __device__ (uint64_t rand) { + return transformation::uniform_int(rand); + }; + distribution_nullary_kernel(iter, gen, + [] __device__ (curandStatePhilox4_32_10_t* state) -> ulonglong2 { + ulonglong2 ret; + uint4 rand_val = curand4(state); + ret.x = (static_cast(rand_val.x) << 32) | rand_val.y; + ret.y = (static_cast(rand_val.z) << 32) | rand_val.w; + return ret; + }, + random_func); + } else { + auto random_func = [] __device__ (uint32_t rand) { + return transformation::uniform_int(rand); + }; + distribution_nullary_kernel(iter, + gen, + [] __device__ (curandStatePhilox4_32_10_t* state) { + return curand4(state); + }, + random_func); + } + }); +} + +template +struct RandomKernel { + void operator()(TensorIteratorBase& iter, RNG gen) { + random_kernel(iter, gen); + } +}; + +// ==================================================================================================================== + +template +void uniform_and_transform(TensorIteratorBase& iter, RNG gen, transform_t transform) { + if (std::is_same::value) { + distribution_nullary_kernel(iter, + gen, + [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); }, + transform); + } else { + distribution_nullary_kernel(iter, + gen, + [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); }, + transform); + } +} + +template +void normal_and_transform(TensorIteratorBase& iter, RNG gen, transform_t transform) { + if (std::is_same::value) { + distribution_nullary_kernel(iter, + gen, + [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal2_double(state); }, + transform); + } else { + distribution_nullary_kernel(iter, + gen, + [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_normal4(state); }, + transform); + } +} + +// ==================================================== Normal ======================================================== + +template +void normal_kernel(const TensorBase &self, double mean_, double std_, RNG gen) { + auto iter = TensorIterator::borrowing_nullary_op(self); + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "normal_kernel_cuda", [&] { + using accscalar_t = at::acc_type; + auto mean = static_cast(mean_); + auto std = static_cast(std_); + // define lambda to multiply std and add mean + auto normal_func = [mean, std] __device__ (accscalar_t rand) { + return static_cast(transformation::normal(rand, mean, std)); + }; + normal_and_transform(iter, gen, normal_func); + }); +} + +template +struct NormalKernel { + void operator()(const TensorBase &self, double mean, double std, c10::optional gen) { + normal_kernel(self, mean, std, check_generator(gen)); + } +}; + +// ==================================================== Uniform ======================================================== + +template +void uniform_kernel(TensorIteratorBase& iter, double from_, double to_, RNG gen) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "uniform_kernel_cuda", [&] { + auto from = static_cast(from_); + auto to = static_cast(to_); + using accscalar_t = at::acc_type; + auto range = static_cast(to-from); + // define lambda to reverse bounds, multiply 'range' and add 'from_' + auto uniform_func = [range, from] __device__ (accscalar_t rand) { + // reverse the bounds of curand4 from (0, 1] to [0, 1) + // Note that this method is from legacy THCTensorRandom and is likely to give + // you more 0-s, since, the probability of gettings 1-s is higher than 0-s and + // by reversing the bounds, we are flipping the probabilities of 1-s and 0-s. + // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/16706 + auto reverse_bound_rand = rand == static_cast(1.0) ? static_cast(0.0) : rand; + return static_cast(reverse_bound_rand * range + from); + }; + uniform_and_transform(iter, gen, uniform_func); + }); +} + +template +struct UniformKernel { + void operator()(TensorIteratorBase& iter, double from, double to, c10::optional gen) { + uniform_kernel(iter, from, to, check_generator(gen)); + } +}; + +// ================================================== LogNormal ======================================================= + +template +void log_normal_kernel(TensorIteratorBase& iter, double mean_, double std_, RNG gen) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "log_normal_cuda", [&] { + using accscalar_t = at::acc_type; + auto mean = static_cast(mean_); + auto std = static_cast(std_); + // define lambda for log_normal transformation + auto log_normal_func = [mean, std] __device__ (accscalar_t rand) { + return static_cast(transformation::log_normal(transformation::normal(rand, mean, std))); + }; + normal_and_transform(iter, gen, log_normal_func); + }); +} + +template +struct LogNormalKernel { + void operator()(TensorIteratorBase& iter, double mean, double std, c10::optional gen) { + log_normal_kernel(iter, mean, std, check_generator(gen)); + } +}; + +// =================================================== Geometric ====================================================== + +template +void geometric_kernel(TensorIteratorBase& iter, double p, RNG gen) { + AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "geometric_cuda", [&] { + using accscalar_t = at::DiscreteDistributionType::type; + // define lambda for geometric transformation + auto geometric_func = [p] __device__ (accscalar_t rand) { + return static_cast(transformation::geometric(rand, p)); + }; + uniform_and_transform(iter, gen, geometric_func); + }); +} + +template +struct GeometricKernel { + void operator()(TensorIteratorBase& iter, double p, c10::optional gen) { + geometric_kernel(iter, p, check_generator(gen)); + } +}; + +// ================================================== Exponential ===================================================== + +template +void exponential_kernel(TensorIteratorBase& iter, double lambda_, RNG gen) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exponential_cuda", [&] { + using accscalar_t = at::acc_type; + auto lambda = static_cast(lambda_); + // define lambda for exponential transformation + auto exponential_func = [lambda] __device__ (accscalar_t rand) { + return static_cast(transformation::exponential(rand, lambda)); + }; + uniform_and_transform(iter, gen, exponential_func); + }); +} + +template +struct ExponentialKernel { + void operator()(TensorIteratorBase& iter, double lambda, c10::optional gen) { + exponential_kernel(iter, lambda, check_generator(gen)); + } +}; + +// ==================================================== Cauchy ======================================================== + +template +void cauchy_kernel(TensorIteratorBase& iter, double median_, double sigma_, RNG gen) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "cauchy_cuda", [&] { + using accscalar_t = at::acc_type; + auto median = static_cast(median_); + auto sigma = static_cast(sigma_); + // define lambda for cauchy transformation + auto cauchy_func = [median, sigma] __device__ (accscalar_t rand) { + return static_cast(transformation::cauchy(rand, median, sigma)); + }; + uniform_and_transform(iter, gen, cauchy_func); + }); +} + +template +struct CauchyKernel { + void operator()(TensorIteratorBase& iter, double median, double sigma, c10::optional gen) { + cauchy_kernel(iter, median, sigma, check_generator(gen)); + } +}; + +// ==================================================== Bernoulli ===================================================== + +template +void bernoulli_tensor_cuda_kernel( + const TensorBase &ret, const at::TensorBase &p, + PhiloxCudaState philox_args) { + auto functor = [philox_args] __device__( + int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4, + const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) { + auto seeds = at::cuda::philox::unpack(philox_args); + curandStatePhilox4_32_10_t state; + curand_init(std::get<0>(seeds), + blockIdx.x * blockDim.x + threadIdx.x, + std::get<1>(seeds), + &state); + + // See Note [Register spilling in curand call for CUDA < 10] + float4 rand = curand_uniform4(&state); + switch (n) { + case 4: { + CUDA_KERNEL_ASSERT(0 <= p4 && p4 <= 1); + v4 = static_cast(rand.w <= p4); + // fallthrough + } + case 3: { + CUDA_KERNEL_ASSERT(0 <= p3 && p3 <= 1); + v3 = static_cast(rand.z <= p3); + // fallthrough + } + case 2: { + CUDA_KERNEL_ASSERT(0 <= p2 && p2 <= 1); + v2 = static_cast(rand.y <= p2); + // fallthrough + } + case 1: { + CUDA_KERNEL_ASSERT(0 <= p1 && p1 <= 1); + v1 = static_cast(rand.x <= p1); + } + } + }; + // The template argument `4` below indicates that we want to operate on four + // element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details. + at::cuda::CUDA_tensor_apply2(ret, p, functor); +} + +template +void bernoulli_kernel(const TensorBase &self, const TensorBase &p_, RNG gen) { + PhiloxCudaState rng_engine_inputs; + { + // See Note [Acquire lock when using random generators] + std::lock_guard lock(gen->mutex_); + rng_engine_inputs = gen->philox_cuda_state(10); + } + TORCH_CHECK(at::isFloatingType(p_.scalar_type()), "expected probabilities tensor to have floating type, got ", p_.scalar_type()); + // cast probabilities tensor to double for double `self` tensor, and to `float` for everything else + const auto p_type = self.dtype() == at::kDouble ? at::kDouble : at::kFloat; + auto p_cuda = p_.to(TensorOptions().device(self.device()).dtype(p_type)); + auto p = expand_inplace(self, p_cuda); + AT_DISPATCH_ALL_TYPES_AND3( + at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] { + if (std::is_same::value) { + return bernoulli_tensor_cuda_kernel(self, *p, rng_engine_inputs); + } else { + return bernoulli_tensor_cuda_kernel(self, *p, rng_engine_inputs); + } + }); +} + +template +void bernoulli_kernel(TensorIteratorBase& iter, double p, RNG gen) { + AT_DISPATCH_ALL_TYPES_AND3( + at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool, iter.dtype(), "bernoulli_scalar_cuda_", [&] { + using accscalar_t = at::DiscreteDistributionType::type; + // define lambda for bernoulli transformation + auto bernoulli_func = [p] __device__ (accscalar_t rand) { + return static_cast(transformation::bernoulli(rand, p)); + }; + uniform_and_transform(iter, gen, bernoulli_func); + }); +} + +template +struct BernoulliKernel { + void operator()(TensorIteratorBase& iter, double p, c10::optional gen) { + bernoulli_kernel(iter, p, check_generator(gen)); + } + void operator()(const TensorBase &self, const TensorBase &p_, c10::optional gen) { + bernoulli_kernel(self, p_, check_generator(gen)); + } +}; + +}}}} diff --git a/voice_bridge/torch/include/ATen/native/cuda/Distributions.h b/voice_bridge/torch/include/ATen/native/cuda/Distributions.h new file mode 100644 index 0000000000000000000000000000000000000000..1a34fdfdf31494faab439544578be8aaf950dc32 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/Distributions.h @@ -0,0 +1,25 @@ +#pragma once + +namespace at { +struct CUDAGeneratorImpl; +struct TensorIteratorBase; +class TensorBase; + +namespace native { + +void launch_poisson_cuda_kernel( + const TensorBase &ret, const TensorBase &lambda, CUDAGeneratorImpl *gen); + +void launch_gamma_kernel( + const TensorBase &ret, const TensorBase &alpha, CUDAGeneratorImpl *gen); + +void launch_binomial_cuda_kernel( + TensorIteratorBase &iter, CUDAGeneratorImpl *gen); + +void launch_dirichlet_kernel(TensorIteratorBase &iter); + +void launch_standard_gamma_grad_kernel(TensorIteratorBase &iter); + +void launch_dirichlet_grad_kernel(TensorIteratorBase &iter); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/EmbeddingBackwardKernel.cuh b/voice_bridge/torch/include/ATen/native/cuda/EmbeddingBackwardKernel.cuh new file mode 100644 index 0000000000000000000000000000000000000000..0d8d45c1defb90af4da7d2c39d914d3d88ddafc3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/EmbeddingBackwardKernel.cuh @@ -0,0 +1,22 @@ +#pragma once +#include +#include +#include +#include + +namespace at { +namespace native { + +Tensor embedding_backward_cuda_kernel( + const Tensor &grad, + const Tensor &orig_indices, + const Tensor &sorted_indices, + const Tensor &count, + int64_t num_weights, + int padding_idx = -1, + bool mode_mean = false, + const Tensor &offset2bag = Tensor(), + const Tensor &bag_size = Tensor(), + const Tensor &per_sample_weights = Tensor()); + +}} diff --git a/voice_bridge/torch/include/ATen/native/cuda/ForeachFunctors.cuh b/voice_bridge/torch/include/ATen/native/cuda/ForeachFunctors.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a72c33ac6960e868b22d589c708dc4fb9577ebe7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/ForeachFunctors.cuh @@ -0,0 +1,437 @@ +#pragma once +#include +#include +#include + +namespace at { namespace native { + +namespace { + +// Initializes args and checks if all args are aligned +template +__device__ bool init_args( + T** args, + TensorListMetadata& tl, + int chunk_idx, + int chunk_size, + int tensor_loc) { + bool all_aligned = true; + for (int i = 0; i < depth; i++) { + args[i] = (T*)tl.addresses[i][tensor_loc]; + args[i] += chunk_idx * chunk_size; + + if (!is_aligned(args[i])) { + all_aligned = false; + } + } + return all_aligned; +} + +// Initializes args and checks if all args are aligned +template +__device__ bool init_args( + T** args, + TensorListScalarListMetadata& tl, + int chunk_idx, + int chunk_size, + int tensor_loc) { + bool all_aligned = true; + for (int i = 0; i < depth; i++) { + args[i] = (T*)tl.addresses[i][tensor_loc]; + args[i] += chunk_idx * chunk_size; + + if (!is_aligned(args[i])) { + all_aligned = false; + } + } + return all_aligned; +} + +template +__device__ bool init_args( + T** args, + FusedOptimizerTensorListMetadata& tl, + int chunk_idx, + int chunk_size, + int tensor_loc) { + bool all_aligned = true; + for (int i = 0; i < depth; i++) { + args[i] = (T*)tl.addresses[i][tensor_loc]; + args[i] += chunk_idx * chunk_size; + + if (!is_aligned(args[i])) { + all_aligned = false; + } + } + return all_aligned; +} + +template +__device__ void load_args(T r_args[][kILP], T** args, int i_start, int chunk_size, int n) { +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + int i = i_start + threadIdx.x + ii * blockDim.x; + for (int r_index = 0; r_index < depth; r_index++) { + r_args[r_index][ii] = 0; + if(i < n && i < chunk_size) { + r_args[r_index][ii] = args[r_index][i]; + } + } + } +} + +template +__device__ void store_args(T* dst, T* src, int i_start, int chunk_size, int n) { +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + int i = i_start + threadIdx.x + ii * blockDim.x; + if(i < n && i < chunk_size) + dst[i] = src[ii]; + } +} + +template +__device__ __forceinline__ void binary_op_scalar( + T r_args[][kILP], + T** args, + opmath_t scalar, + int n, + int chunk_size, + bool all_aligned, + Op op) { + // to make things simple, we put aligned case in a different code path + if(n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) { + for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { + // load + load_store(r_args[0], args[0], 0, i_start); +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + r_args[0][ii] = static_cast(op(static_cast(r_args[0][ii]), + static_cast(scalar))); + } + // store + load_store(args[res_arg_index], r_args[0], i_start, 0); + } + } + else { + for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { + // Regardless if depth is 1 (for inplace) or 2 (for out of place), r_args has depth 1 + load_args<1>(r_args, args, i_start, chunk_size, n); +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + r_args[0][ii] = static_cast(op(static_cast(r_args[0][ii]), + static_cast(scalar))); + } + store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n); + } + } +} + +template +__device__ __forceinline__ void pointwise_op_scalar( + T r_args[][kILP], + T** args, + opmath_t scalar, + int n, + int chunk_size, + bool all_aligned, + Op op) { + // to make things simple, we put aligned case in a different code path + if(n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) { + for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { + // load + load_store(r_args[0], args[0], 0, i_start); + load_store(r_args[1], args[1], 0, i_start); + load_store(r_args[2], args[2], 0, i_start); +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + r_args[0][ii] = static_cast(static_cast(r_args[0][ii]) + + scalar * op(static_cast(r_args[1][ii]), + static_cast(r_args[2][ii]))); + } + // store + load_store(args[res_arg_index], r_args[0], i_start, 0); + } + } + else { + for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { + // Regardless if depth is 3 (for inplace) or 4 (for out of place), r_args has depth 3 + load_args<3>(r_args, args, i_start, chunk_size, n); +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + r_args[0][ii] = static_cast(static_cast(r_args[0][ii]) + + scalar * op(static_cast(r_args[1][ii]), + static_cast(r_args[2][ii]))); + } + store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n); + } + } +} + +// +// Binary Functors +// +template +struct BinaryOpScalarFunctor { + using opmath_t = at::opmath_type; + template __device__ __forceinline__ void operator() ( + int chunk_size, + TensorListMetadata& tl, + Op op, + opmath_t scalar) { + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.numel_for_tensor[tensor_loc]; + + T* args[depth]; + bool all_aligned = init_args(args, tl, chunk_idx, chunk_size, tensor_loc); + n -= chunk_idx * chunk_size; + T r_args[r_args_depth][kILP]; + + binary_op_scalar(r_args, args, scalar, n, chunk_size, all_aligned, op); + } +}; + +template +struct BinaryOpScalarListFunctor { + using opmath_t = at::opmath_type; + template __device__ __forceinline__ void operator() ( + int chunk_size, + TensorListScalarListMetadata& tl, + Op op) { + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.numel_for_tensor[tensor_loc]; + + T* args[depth]; + bool all_aligned = init_args(args, tl, chunk_idx, chunk_size, tensor_loc); + opmath_t scalar = tl.scalar_vals[tensor_loc]; + n -= chunk_idx * chunk_size; + T r_args[r_args_depth][kILP]; + + binary_op_scalar(r_args, args, scalar, n, chunk_size, all_aligned, op); + } +}; + +template +struct BinaryOpListAlphaFunctor { + using opmath_t = at::opmath_type; + template __device__ __forceinline__ void operator() ( + int chunk_size, + TensorListMetadata& tl, + Op op, + opmath_t alpha) { + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.numel_for_tensor[tensor_loc]; + + T* args[depth]; + bool all_aligned = init_args(args, tl, chunk_idx, chunk_size, tensor_loc); + n -= chunk_idx * chunk_size; + T r_args[r_args_depth][kILP]; + + // to make things simple, we put aligned case in a different code path + if(n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) { + for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { + // load + load_store(r_args[0], args[0], 0, i_start); + load_store(r_args[1], args[1], 0, i_start); +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + r_args[0][ii] = static_cast(op(static_cast(r_args[0][ii]), + alpha * static_cast(r_args[1][ii]))); + } + // store + load_store(args[res_arg_index], r_args[0], i_start , 0); + } + } + else { + for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { + load_args(r_args, args, i_start, chunk_size, n); +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + r_args[0][ii] = static_cast(op(static_cast(r_args[0][ii]), + alpha * static_cast(r_args[1][ii]))); + } + store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n); + } + } + } +}; + +// +// Unary Functors +// + +template +struct ZeroFunctor { + __device__ __forceinline__ void operator() ( + int chunk_size, + TensorListMetadata<1>& tl) { + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.numel_for_tensor[tensor_loc]; + + T* args[depth]; + bool all_aligned = init_args(args, tl, chunk_idx, chunk_size, tensor_loc); + n -= chunk_idx * chunk_size; + T r_args[r_args_depth][kILP]; + + // to make things simple, we put aligned case in a different code path + if(n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) { + for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + r_args[0][ii] = 0; + } + // store + load_store(args[0], r_args[0], i_start, 0); + } + } + else { + for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + r_args[0][ii] = 0; + } + store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n); + } + } + } +}; + +template +struct UnaryOpFunctor { + using opmath_t = at::opmath_type; + template __device__ __forceinline__ void operator() ( + int chunk_size, + TensorListMetadata& tl, + Op op) { + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.numel_for_tensor[tensor_loc]; + + T* args[depth]; + bool all_aligned = init_args(args, tl, chunk_idx, chunk_size, tensor_loc); + n -= chunk_idx * chunk_size; + T r_args[r_args_depth][kILP]; + + // to make things simple, we put aligned case in a different code path + if(n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) { + for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { + // load + load_store(r_args[0], args[0], 0, i_start); +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + r_args[0][ii] = static_cast(op(static_cast(r_args[0][ii]))); + } + // store + load_store(args[res_arg_index], r_args[0], i_start, 0); + } + } + else { + for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { + load_args(r_args, args, i_start, chunk_size, n); +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + r_args[0][ii] = static_cast(op(static_cast(r_args[0][ii]))); + } + store_args(args[res_arg_index], r_args[0], i_start, chunk_size, n); + } + } + } +}; + +// +// Pointwise Functors +// + +template +struct PointwiseOpScalarFunctor { + using opmath_t = at::opmath_type; + template __device__ __forceinline__ void operator() ( + int chunk_size, + TensorListMetadata& tl, + Op op, + opmath_t scalar) { + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.numel_for_tensor[tensor_loc]; + + T* args[depth]; + bool all_aligned = init_args(args, tl, chunk_idx, chunk_size, tensor_loc); + n -= chunk_idx * chunk_size; + T r_args[r_args_depth][kILP]; + + pointwise_op_scalar(r_args, args, scalar, n, chunk_size, all_aligned, op); + } +}; + +template +struct PointwiseOpScalarListFunctor { + using opmath_t = at::opmath_type; + template __device__ __forceinline__ void operator() ( + int chunk_size, + TensorListScalarListMetadata& tl, + Op op) { + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.numel_for_tensor[tensor_loc]; + + T* args[depth]; + bool all_aligned = init_args(args, tl, chunk_idx, chunk_size, tensor_loc); + opmath_t scalar = tl.scalar_vals[tensor_loc]; + n -= chunk_idx * chunk_size; + T r_args[r_args_depth][kILP]; + + pointwise_op_scalar(r_args, args, scalar, n, chunk_size, all_aligned, op); + } +}; + +template +struct PointwiseOpListFunctor { + using opmath_t = at::opmath_type; + template __device__ __forceinline__ void operator() ( + int chunk_size, + TensorListMetadata& tl, + Op op) { + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.numel_for_tensor[tensor_loc]; + + T* args[depth]; + bool all_aligned = init_args(args, tl, chunk_idx, chunk_size, tensor_loc); + n -= chunk_idx * chunk_size; + T r_args[depth - 1][kILP]; + + // to make things simple, we put aligned case in a different code path + if(n % kILP == 0 && chunk_size % kILP == 0 && all_aligned) { + for(int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { + // load + load_store(r_args[0], args[0], 0, i_start); + load_store(r_args[1], args[1], 0, i_start); +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + r_args[0][ii] = static_cast(op(static_cast(r_args[0][ii]), + static_cast(r_args[1][ii]))); + } + // store + load_store(args[2], r_args[0], i_start , 0); + } + } + else { + for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { + load_args(r_args, args, i_start, chunk_size, n); +#pragma unroll + for(int ii = 0; ii < kILP; ii++) { + r_args[0][ii] = static_cast(op(static_cast(r_args[0][ii]), + static_cast(r_args[1][ii]))); + } + store_args(args[2], r_args[0], i_start, chunk_size, n); + } + } + } +}; + +} // namespace +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/GridSampler.cuh b/voice_bridge/torch/include/ATen/native/cuda/GridSampler.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a0e3b16c3a43ac28fc9091c80f0b2650526a551d --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/GridSampler.cuh @@ -0,0 +1,321 @@ +#pragma once +#include +#include + +namespace at { namespace native { + +using detail::GridSamplerInterpolation; +using detail::GridSamplerPadding; + +// Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value, +// where we view each pixel as an area between (idx - 0.5) and (idx + 0.5). +// if align_corners: -1 and +1 get sent to the centers of the corner pixels +// -1 --> 0 +// +1 --> (size - 1) +// scale_factor = (size - 1) / 2 +// if not align_corners: -1 and +1 get sent to the image edges +// -1 --> -0.5 +// +1 --> (size - 1) + 0.5 == size - 0.5 +// scale_factor = size / 2 +template +static __forceinline__ __device__ +scalar_t grid_sampler_unnormalize(scalar_t coord, int size, bool align_corners) { + if (align_corners) { + // unnormalize coord from [-1, 1] to [0, size - 1] + return ((coord + 1.f) / 2) * (size - 1); + } else { + // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] + return ((coord + 1.f) * size - 1) / 2; + } +} + +// grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize +// except that it also returns the `d output / d input` via pointer argument +// `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static __forceinline__ __device__ +scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int size, + bool align_corners, scalar_t *grad_in) { + if (align_corners) { + // unnormalize coord from [-1, 1] to [0, size - 1] + *grad_in = static_cast(size - 1) / 2; + return ((coord + 1.f) / 2) * (size - 1); + } else { + // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] + *grad_in = static_cast(size) / 2; + return ((coord + 1.f) * size - 1) / 2; + } +} + +// Clips coordinates to between 0 and clip_limit - 1 +template +static __forceinline__ __device__ +scalar_t clip_coordinates(scalar_t in, int clip_limit) { + return ::min(static_cast(clip_limit - 1), ::max(in, static_cast(0))); +} + +// clip_coordinates_set_grad works similarly to clip_coordinates except that +// it also returns the `d output / d input` via pointer argument `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static __forceinline__ __device__ +scalar_t clip_coordinates_set_grad(scalar_t in, int clip_limit, scalar_t *grad_in) { + // Note that it is important for the gradient calculation that borders + // are considered out of bounds. + if (in <= static_cast(0)) { + *grad_in = static_cast(0); + return static_cast(0); + } else { + scalar_t max = static_cast(clip_limit - 1); + if (in >= max) { + *grad_in = static_cast(0); + return max; + } else { + *grad_in = static_cast(1); + return in; + } + } +} + +// Reflects coordinates until they fall between low and high (inclusive). +// The bounds are passed as twice their value so that half-integer values +// can be represented as ints. +template +static __forceinline__ __device__ +scalar_t reflect_coordinates(scalar_t in, int twice_low, int twice_high) { + if (twice_low == twice_high) { + return static_cast(0); + } + scalar_t min = static_cast(twice_low) / 2; + scalar_t span = static_cast(twice_high - twice_low) / 2; + in = ::fabs(in - min); + // `fmod` returns same sign as `in`, which is positive after the `fabs` above. + scalar_t extra = ::fmod(in, span); + int flips = static_cast(::floor(in / span)); + if (flips % 2 == 0) { + return extra + min; + } else { + return span - extra + min; + } +} + +// reflect_coordinates_set_grad works similarly to reflect_coordinates except +// that it also returns the `d output / d input` via pointer argument +// `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static __forceinline__ __device__ +scalar_t reflect_coordinates_set_grad(scalar_t in, int twice_low, int twice_high, + scalar_t *grad_in) { + if (twice_low == twice_high) { + *grad_in = static_cast(0); + return static_cast(0); + } + int grad_in_mult_; + scalar_t min = static_cast(twice_low) / 2; + scalar_t span = static_cast(twice_high - twice_low) / 2; + in = in - min; + if (in < static_cast(0)) { + grad_in_mult_ = -1; + in = -in; + } else { + grad_in_mult_ = 1; + } + // `fmod` returns same sign as `in`, which is positive after the `if` above. + scalar_t extra = ::fmod(in, span); + int flips = static_cast(::floor(in / span)); + if (flips % 2 == 0) { + *grad_in = static_cast(grad_in_mult_); + return extra + min; + } else { + *grad_in = static_cast(-grad_in_mult_); + return span - extra + min; + } +} + +template +static __forceinline__ __device__ +scalar_t safe_downgrade_to_int_range(scalar_t x){ + // -100.0 does not have special meaning. This is just to make sure + // it's not within_bounds_2d or within_bounds_3d, and does not cause + // undefined behavior. See #35506. + if (x > INT_MAX-1 || x < INT_MIN || !::isfinite(static_cast(x))) + return static_cast(-100.0); + return x; +} + +template +static __forceinline__ __device__ +scalar_t compute_coordinates(scalar_t coord, int size, + GridSamplerPadding padding_mode, + bool align_corners) { + if (padding_mode == GridSamplerPadding::Border) { + // clip coordinates to image borders + coord = clip_coordinates(coord, size); + } else if (padding_mode == GridSamplerPadding::Reflection) { + // reflect coordinates by image borders + if (align_corners) { + coord = reflect_coordinates(coord, 0, 2*(size - 1)); + } else { + coord = reflect_coordinates(coord, -1, 2*size - 1); + } + // clip coordinates to image borders + coord = clip_coordinates(coord, size); + } + + coord = safe_downgrade_to_int_range(coord); + return coord; +} + +// Computes the pixel source index value for a grid coordinate +template +static __forceinline__ __device__ +scalar_t grid_sampler_compute_source_index( + scalar_t coord, + int size, + GridSamplerPadding padding_mode, + bool align_corners) { + coord = grid_sampler_unnormalize(coord, size, align_corners); + coord = compute_coordinates(coord, size, padding_mode, align_corners); + return coord; +} + +// grid_sampler_compute_source_index_set_grad works similarly to +// grid_sampler_compute_source_index except that it also returns the +// `d output / d input` via pointer argument `grad_in`. +// This is useful in the backward pass of grid_sampler. +template +static __forceinline__ __device__ +scalar_t grid_sampler_compute_source_index_set_grad( + scalar_t coord, + int size, + GridSamplerPadding padding_mode, + bool align_corners, + scalar_t *grad_in) { + scalar_t grad_clip, grad_refl; + coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in); + if (padding_mode == GridSamplerPadding::Border) { + // clip coordinates to image borders + coord = clip_coordinates_set_grad(coord, size, &grad_clip); + *grad_in = (*grad_in) * grad_clip; + } else if (padding_mode == GridSamplerPadding::Reflection) { + // reflect coordinates by image borders + if (align_corners) { + coord = reflect_coordinates_set_grad(coord, 0, 2*(size - 1), &grad_refl); + } else { + coord = reflect_coordinates_set_grad(coord, -1, 2*size - 1, &grad_refl); + } + // clip coordinates to image borders + coord = clip_coordinates_set_grad(coord, size, &grad_clip); + *grad_in = (*grad_in) * grad_refl * grad_clip; + } + + coord = safe_downgrade_to_int_range(coord); + return coord; +} + +static __forceinline__ __device__ +bool within_bounds_2d(int h, int w, int H, int W) { + return h >= 0 && h < H && w >= 0 && w < W; +} + +static __forceinline__ __device__ +bool within_bounds_3d(int d, int h, int w, int D, int H, int W) { + return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W; +} + +template +static __forceinline__ __device__ +scalar_t get_value_bounded( + scalar_t *data, scalar_t x, scalar_t y, int W, int H, int sW, int sH, + GridSamplerPadding padding_mode, + bool align_corners) { + + x = compute_coordinates(x, W, padding_mode, align_corners); + y = compute_coordinates(y, H, padding_mode, align_corners); + + int ix = static_cast(x); + int iy = static_cast(y); + + if (within_bounds_2d(iy, ix, H, W)) { + return data[iy * sH + ix * sW]; + } + return static_cast(0); +} + +template +static __forceinline__ __device__ +void safe_add_2d(scalar_t *data, int h, int w, + int sH, int sW, int H, int W, + scalar_t delta, + const index_t NC_offset, + const index_t memory_span) { + if (within_bounds_2d(h, w, H, W)) { + fastAtomicAdd(data, + NC_offset + h * sH + w * sW, + memory_span, + delta, + true); + } +} + +template +static __forceinline__ __device__ +void safe_add_3d(scalar_t *data, int d, int h, int w, + int sD, int sH, int sW, int D, int H, int W, + scalar_t delta, + const index_t NC_offset, + const index_t memory_span) { + if (within_bounds_3d(d, h, w, D, H, W)) { + fastAtomicAdd(data, + NC_offset + d * sD + h * sH + w * sW, + memory_span, + delta, + true); + } +} + +template +static __forceinline__ __device__ +void add_value_bounded( + scalar_t* data, scalar_t x, scalar_t y, int W, int H, int sW, int sH, + scalar_t delta, + GridSamplerPadding padding_mode, + bool align_corners, + const index_t NC_offset, + const index_t memory_span) { + + x = compute_coordinates(x, W, padding_mode, align_corners); + y = compute_coordinates(y, H, padding_mode, align_corners); + + int ix = static_cast(x); + int iy = static_cast(y); + + safe_add_2d(data, iy, ix, sH, sW, H, W, delta, NC_offset, memory_span); +} + +// Calculate the differential of the cubic convolution, i.e. `d coeff / d x` +template +static __forceinline__ __device__ +void get_cubic_coefficients_grad( + scalar_t coeffs[4], + scalar_t t) { + + // Must be the same as forward calculation in + // aten/src/ATen/native/cuda/UpSample.cuh:get_cubic_upsample_coefficients + scalar_t A = -0.75; + + scalar_t x; + x = -1 - t; // 1 < x = |-1 - tx| < 2 + coeffs[0] = (-3 * A * x - 10 * A ) * x - 8 * A; + x = -t; // x = |0 - tx| <= 1 + coeffs[1] = (-3 * (A + 2) * x - 2 * (A + 3)) * x; + x = 1 - t; // x = |1 - tx| <= 1 + coeffs[2] = (3 * (A + 2) * x - 2 * (A + 3)) * x; + x = 2 - t; // 1 < x = |2 - tx| < 2 + coeffs[3] = (3 * A * x - 10 * A) * x + 8 * A; +} + + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/GridSampler.h b/voice_bridge/torch/include/ATen/native/cuda/GridSampler.h new file mode 100644 index 0000000000000000000000000000000000000000..aace9c30b0a7e9d08de71c4baf1490d45ff6d36e --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/GridSampler.h @@ -0,0 +1,32 @@ +#pragma once +#include +#include + +namespace at { +class TensorBase; +} + +namespace at { +namespace native { + +void launch_grid_sampler_2d_forward_kernel( + const TensorBase &output, const TensorBase &input, const TensorBase &grid, + int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + +void launch_grid_sampler_3d_forward_kernel( + const TensorBase &output, const TensorBase &input, const TensorBase &grid, + int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + +void launch_grid_sampler_2d_backward_kernel( + const TensorBase &grad_input, const TensorBase &grad_grid, + const TensorBase &grad_output, const TensorBase &input, + const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode, + bool align_corners, std::array output_mask); + +void launch_grid_sampler_3d_backward_kernel( + const TensorBase &grad_input, const TensorBase &grad_grid, + const TensorBase &grad_output, const TensorBase &input, + const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode, + bool align_corners, std::array output_mask); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/IndexKernel.h b/voice_bridge/torch/include/ATen/native/cuda/IndexKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..92aa9d340f7f38d9b1924767fab7284da874c3a4 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/IndexKernel.h @@ -0,0 +1,15 @@ +#pragma once +#include +#include + +namespace at { +struct TensorIteratorBase; +class TensorBase; +} + +namespace at { +namespace native { +void launch_masked_scatter_kernel( + const TensorBase &self, const TensorBase &mask, + const TensorBase &maskPrefixSum, const TensorBase &source); +}} diff --git a/voice_bridge/torch/include/ATen/native/cuda/JitLoops.cuh b/voice_bridge/torch/include/ATen/native/cuda/JitLoops.cuh new file mode 100644 index 0000000000000000000000000000000000000000..6f350c550ce93667bdf21b8b0c7d9798fdf5f15f --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/JitLoops.cuh @@ -0,0 +1,187 @@ +#pragma once + +#include + +#if AT_USE_JITERATOR() + +#include + +#include +#include +#include + +#include + +#include + +namespace at { +namespace native { + +/* Note [Jiterator] +The "jiterator" simply just-in-time compiles the same kernels that +Loops.cuh (and CUDALoops.cuh) usually build. This reduces build time, +build size, and initial CUDA context size. + +By default on non-Windows systems, it also caches compiled kernels in ~/.cache/torch/kernels. +This behavior is controlled with two environment variables: + - USE_PYTORCH_KERNEL_CACHE, if set to zero then this will disable all cache use + - PYTORCH_KERNEL_CACHE_PATH, if set specifies the folder to use for cached kernels + +The jiterator currently has some limitations, however. It cannot: + - handle math on complex datatypes + - handle kernels with scalar parameters + +These improvements will likely come soon. + +For examples of how to use the jiterator see the i1 and gcd kernel +implementations, which pass jittable strings implementing their +operations instead of the typical CUDA functors. + +To pass a runtime argument (similar to lambda captures in non-JIT kernels), +we need to pass to additional arguments to `jitted_gpu_kernel` by value. +Currently only primitive C++ types used for computation are valid. +The order of these extra arguments should be same as the order they appear +in kernel's function signature. (look at polygamma for example) + +NOTE: One big restriction being that these arguments should be after the +arguments provided by TensorIterator. Eg. While capturing `n`, where +`scalar_t x` and `scalar_t y` are provided by TensorIterator, +* foo(scalar_t x, scalar_t y, int n) works! +* foo(int n, scalar_t x, scalar_y) doesn't work +* foo(scalar_t x, int n, scalar_y) doesn't work + +*/ + +// Entrypoint for jitted GPU kernels. +// Only handles elementwise unary and binary kernels with a +// common dtype and a single output. +// NOTE: this assumes the op's iterator has a common_dtype. +// NOTE: We use std::tuple instead of parameter pack +// for `extra_args` due to following +// bug on older versions of clang +// https://bugs.llvm.org/show_bug.cgi?id=23029 +template < + char const* name, + typename return_type, + typename f_inputs_type, + int arity, + typename... Args> +void jitted_gpu_kernel( + TensorIteratorBase& iter, + const std::string& f, + at::cuda::jit::BinaryFuncVariant scalar_pos = + at::cuda::jit::BinaryFuncVariant::NoScalar, + at::opmath_type scalar_val = 0, + std::tuple extra_args = std::make_tuple()) { + // TODO: much of preamble is common to both jitted_gpu_kernel and gpu_kernel + // Maybe it could be refactored? + for (int arg = 0; arg < iter.ntensors(); arg++) { + TORCH_INTERNAL_ASSERT( + iter.device(arg).is_cuda(), + "argument ", arg, ": expected a CUDA device but found ", iter.device(arg)); + } + + if (iter.numel() == 0) { + return; + } + + if (!iter.can_use_32bit_indexing()) { + for (auto& sub_iter : iter.with_32bit_indexing()) { + jitted_gpu_kernel( + sub_iter, f, scalar_pos, scalar_val, extra_args); + } + + return; + } + + // Computes if dynamic casting is needed + // Dynamic casting is needed if an input's dtype differs from the common dtype + // or if the result dtype differs from the output's dtype + // Note: this is intentionally divergent from calling needs_dynamic_casting, + // which is more general and inspects a lambda to determine if dynamic + // casting is needed. + bool needs_dynamic_casting = false; + + // Checks output + const ScalarType return_scalar_type = c10::CppTypeToScalarType::value; + const auto dtype0 = iter.dtype(0); + if (dtype0 != return_scalar_type) { + needs_dynamic_casting = true; + } + + // Checks input(s) + const ScalarType inputs_scalar_type = c10::CppTypeToScalarType::value; + for (auto i = decltype(arity){1}; i < (arity + 1); ++i) { + const auto dtypei = iter.dtype(i); + if (dtypei != inputs_scalar_type) { + needs_dynamic_casting = true; + break; + } + } + if (scalar_pos == at::cuda::jit::BinaryFuncVariant::NoScalar) { + // NOTE: With `scalar_pos=NoScalar`,`scalar_val` is not used + // for computation in the generated code and hence we pass a dummy + // value of `0`. + jitted_gpu_kernel_impl< + /*name*/ name, + /*return_type=*/return_type, + /*f_inputs_type=*/f_inputs_type, + arity, + at::cuda::jit::BinaryFuncVariant::NoScalar>( + iter, f, needs_dynamic_casting, /*scalar_val=*/scalar_val, extra_args); + } else if (scalar_pos == at::cuda::jit::BinaryFuncVariant::RhsScalar) { + jitted_gpu_kernel_impl< + /*name*/ name, + /*return_type=*/return_type, + /*f_inputs_type=*/f_inputs_type, + arity, + at::cuda::jit::BinaryFuncVariant::RhsScalar>( + iter, + f, + needs_dynamic_casting, + scalar_val, + extra_args); + + } else { + jitted_gpu_kernel_impl< + /*name*/ name, + /*return_type=*/return_type, + /*f_inputs_type=*/f_inputs_type, + arity, + at::cuda::jit::BinaryFuncVariant::LhsScalar>( + iter, + f, + needs_dynamic_casting, + scalar_val, + extra_args); + } +} + +// TODO: support runtime state capture similar to `jitted_gpu_kernel`. +template +void opmath_jitted_gpu_kernel_with_scalars(TensorIteratorBase& iter, const std::string& f) { + TORCH_INTERNAL_ASSERT(iter.ntensors() == 3); + //currently jiterator only handles binary functions where both inputs are of the same type (f_inputs_type) + using opmath_t = at::opmath_type; + if (iter.is_cpu_scalar(1)) { + auto scalar_val = iter.scalar_value(1); + iter.remove_operand(1); + // TODO: When all kernels that use gpu_kernel_with_scalars are + // ported to structured, this device guard can be deleted. This + // works around incorrect device guard generation for pre-structured + // kernels device guards, but structured kernels do it right and + // we can assume the device is already set correctly + const OptionalDeviceGuard device_guard(iter.device(1)); + jitted_gpu_kernel(iter, f, at::cuda::jit::BinaryFuncVariant::LhsScalar, scalar_val); + } else if (iter.is_cpu_scalar(2)) { + auto scalar_val = iter.scalar_value(2); + iter.remove_operand(2); + jitted_gpu_kernel(iter, f, at::cuda::jit::BinaryFuncVariant::RhsScalar, scalar_val); + } else { + jitted_gpu_kernel(iter, f); + } +} + +}} // at::native + +#endif // AT_USE_JITERATOR() diff --git a/voice_bridge/torch/include/ATen/native/cuda/KernelUtils.cuh b/voice_bridge/torch/include/ATen/native/cuda/KernelUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..1e36e2db74d541b804e04aab9bc1c91828306571 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/KernelUtils.cuh @@ -0,0 +1,94 @@ +#pragma once +#include + +namespace at { +namespace native { + +__device__ __forceinline__ size_t +idx(const size_t nc, + const size_t height, + const size_t width, + const size_t h, + const size_t w) { + return (nc * height + h) * width + w; +} + +// for channels-last +__device__ __forceinline__ size_t +idx_cl( + const size_t n, const size_t h, const size_t w, const size_t c, + const size_t height, const size_t width, const size_t channel +) { + return ((n * height + h) * width + w) * channel + c; +} + +template < + typename scalar_t, + typename index_t, + typename std::enable_if::value>::type* = + nullptr> +__device__ __forceinline__ void fastSpecializedAtomicAdd( + scalar_t* tensor, + index_t index, + const index_t numel, + scalar_t value) { +#if ( \ + (defined(USE_ROCM)) || \ + (defined(CUDA_VERSION) && (CUDA_VERSION < 10000)) || \ + (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700))) + gpuAtomicAddNoReturn( + reinterpret_cast(tensor) + index, + static_cast(value)); +#else + // Accounts for the chance tensor falls on an odd 16 bit alignment (ie, not 32 bit aligned) + __half* target_addr = reinterpret_cast<__half*>(tensor + index); + bool low_byte = (reinterpret_cast(target_addr) % sizeof(__half2) == 0); + + if (low_byte && index < (numel - 1)) { + __half2 value2; + value2.x = value; + value2.y = __int2half_rz(0); + atomicAdd(reinterpret_cast<__half2*>(target_addr), value2); + + } else if (!low_byte && index > 0) { + __half2 value2; + value2.x = __int2half_rz(0); + value2.y = value; + atomicAdd(reinterpret_cast<__half2*>(target_addr - 1), value2); + + } else { + atomicAdd( + reinterpret_cast<__half*>(tensor) + index, static_cast<__half>(value)); + } +#endif +} + +template < + typename scalar_t, + typename index_t, + typename std::enable_if::value>::type* = + nullptr> +__device__ __forceinline__ void fastSpecializedAtomicAdd( + scalar_t* tensor, + index_t index, + const index_t numel, + scalar_t value) { + gpuAtomicAddNoReturn(tensor + index, value); +} + +template +__device__ __forceinline__ void fastAtomicAdd( + scalar_t* tensor, + index_t index, + const index_t numel, + scalar_t value, + bool fast_atomics) { + if (fast_atomics) { + fastSpecializedAtomicAdd(tensor, index, numel, value); + } else { + gpuAtomicAddNoReturn(tensor + index, value); + } +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cuda/LaunchUtils.h b/voice_bridge/torch/include/ATen/native/cuda/LaunchUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..c9640b15b18c8a2d6d4f3dd92379701ae1ec5164 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/LaunchUtils.h @@ -0,0 +1,18 @@ +#pragma once +#include + +namespace at { +namespace native { + +// returns 2**floor(log2(n)) +static int lastPow2(unsigned int n) { + n |= (n >> 1); + n |= (n >> 2); + n |= (n >> 4); + n |= (n >> 8); + n |= (n >> 16); + return std::max(1, n - (n >> 1)); +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cuda/Loops.cuh b/voice_bridge/torch/include/ATen/native/cuda/Loops.cuh new file mode 100644 index 0000000000000000000000000000000000000000..af1eeaa5349a9e9dbc1ff0e5086ac99ec4398563 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/Loops.cuh @@ -0,0 +1,313 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include + + +namespace at { namespace native { + +template +static OffsetCalculator make_input_offset_calculator(const TensorIteratorBase& iter) { + // array size can not be 0, this happens when N == 0 + constexpr int array_size = std::max(N, 1); + TORCH_INTERNAL_ASSERT(N == iter.ntensors() - iter.noutputs()); + std::array strides; + int64_t element_sizes[array_size]; + for (int i = 0; i < N; i++) { + strides[i] = iter.strides(i + iter.noutputs()).data(); + element_sizes[i] = iter.element_size(i + iter.noutputs()); + } + return OffsetCalculator(iter.ndim(), iter.shape().data(), strides.data(), element_sizes); +} + +template +static OffsetCalculator make_output_offset_calculator(const TensorIteratorBase& iter) { + TORCH_INTERNAL_ASSERT(num_outputs == iter.noutputs()); + std::array strides; + int64_t element_sizes[num_outputs]; + for (int i = 0; i < num_outputs; i++) { + strides[i] = iter.strides(i).data(); + element_sizes[i] = iter.element_size(i); + } + return OffsetCalculator(iter.ndim(), iter.shape().data(), strides.data(), element_sizes); +} + +template +__device__ inline void elementwise_kernel_helper(func_t f, policy_t policy) { + using traits = function_traits; + using return_t = typename traits::result_type; + using args_t = typename traits::ArgsTuple; + + int idx = blockIdx.x; + + return_t results[thread_work_size()]; + args_t args[thread_work_size()]; + + // load + policy.load(args, idx); + + // compute + #pragma unroll + for (int i = 0; i < thread_work_size(); i++) { + if (policy.check_inbounds(i)) { + results[i] = c10::guts::apply(f, args[i]); + } + } + + // store + policy.store(results, idx); +} + +}} // namespace at::native + +// Note: +// CUDA and ROCm get diverged in this PR: +// https://github.com/pytorch/pytorch/pull/32383 +// Because for some reason trying to enable vectorized +// memory access introduce regression on ROCm. + +#if !defined(USE_ROCM) + #include +#else + #include +#endif + +namespace at { namespace native { + +template +void gpu_kernel(TensorIteratorBase& iter, const func_t& f) { + + for (int arg = 0; arg < iter.ntensors(); arg++) { + TORCH_INTERNAL_ASSERT( + iter.device(arg).is_cuda(), + "argument ", arg, ": expected a CUDA device but found ", iter.device(arg)); + } + + if (iter.numel() == 0) { + return; + } + + if (!iter.can_use_32bit_indexing()) { + for (auto& sub_iter : iter.with_32bit_indexing()) { + gpu_kernel(sub_iter, f); + } + return; + } + + gpu_kernel_impl(iter, f); +} + +template +struct AUnaryFunctor { + using traits = function_traits; + using opmath_arg1_t = typename traits::template arg<0>::type; + __device__ return_t operator()(arg2_t b) const { + return f(a, b); + } + // NB: scalar is stored in higher precision! + AUnaryFunctor(func_t f_, opmath_arg1_t a_): f(f_), a(a_) {} + private: + func_t f; + opmath_arg1_t a; +}; + +template +struct BUnaryFunctor { + using traits = function_traits; + using opmath_arg2_t = typename traits::template arg<1>::type; + __device__ return_t operator()(arg1_t a) const { + return f(a, b); + } + // NB: scalar is stored in higher precision! + BUnaryFunctor(func_t f_, opmath_arg2_t b_): f(f_), b(b_) {} + private: + func_t f; + opmath_arg2_t b; +}; + +// Though seemingly noop, this inserts casts from arg1_t to func_t's type +// (which may be higher precision), as well as casts to return_t +template +struct BinaryFunctor { + __device__ return_t operator()(arg1_t a, arg2_t b) const { + return f(a, b); + } + BinaryFunctor(func_t f_): f(f_) {} + private: + func_t f; +}; + +// Unlike gpu_kernel_with_scalars, this allows you to pass a func_t which +// accepts inputs at higher precision (typically opmath_t), but then +// ensure that we load from memory at the correct precision (scalar_t) +// to avoid expensive loads. For the whole sordid story see +// https://dev-discuss.pytorch.org/t/cuda-loops-case-study-code-generation-vs-templates/302 +template +void opmath_gpu_kernel_with_scalars(TensorIteratorBase& iter, const func_t& f) { + TORCH_INTERNAL_ASSERT(iter.ntensors() == 3); + + using traits = function_traits; + using opmath_arg1_t = typename traits::template arg<0>::type; + using opmath_arg2_t = typename traits::template arg<1>::type; + static_assert( + traits::arity == 2, + "gpu_kernel_with_scalars only supports two input arguments"); + + if (iter.is_cpu_scalar(1)) { + AUnaryFunctor af(f, iter.scalar_value(1)); + iter.remove_operand(1); + // TODO: When all kernels that use gpu_kernel_with_scalars are + // ported to structured, this device guard can be deleted. This + // works around incorrect device guard generation for pre-structured + // kernels device guards, but structured kernels do it right and + // we can assume the device is already set correctly + const OptionalDeviceGuard device_guard(iter.device(1)); + gpu_kernel(iter, af); + } else if (iter.is_cpu_scalar(2)) { + BUnaryFunctor bf(f, iter.scalar_value(2)); + iter.remove_operand(2); + gpu_kernel(iter, bf); + } else { + gpu_kernel(iter, BinaryFunctor(f)); + } +} + +template +void opmath_symmetric_gpu_kernel_with_scalars(TensorIteratorBase& iter, const func_t& f) { + // Use symmetric property of the functor to reduce number of kernels, + // requires f(a, b) == f(b, a) + TORCH_INTERNAL_ASSERT(iter.ntensors() == 3); + + using traits = function_traits; + using opmath_arg_t = typename traits::template arg<0>::type; + static_assert( + traits::arity == 2, + "gpu_kernel_with_scalars only supports two input arguments"); + static_assert(std::is_same::type>::value, + "f is not symmetric"); + + OptionalDeviceGuard device_guard; + opmath_arg_t scalar_val{}; + + if (iter.is_cpu_scalar(1)) { + scalar_val = iter.scalar_value(1); + iter.remove_operand(1); + + // TODO: When all kernels that use gpu_kernel_with_scalars are + // ported to structured, this device guard can be deleted. This + // works around incorrect device guard generation for pre-structured + // kernels device guards, but structured kernels do it right and + // we can assume the device is already set correctly + device_guard.reset_device(iter.device(1)); + } else if (iter.is_cpu_scalar(2)) { + scalar_val = iter.scalar_value(2); + iter.remove_operand(2); + } + + if (iter.ninputs() == 2) { + gpu_kernel(iter, BinaryFunctor(f)); + } else { + AUnaryFunctor unary_f(f, scalar_val); + gpu_kernel(iter, unary_f); + } +} + +// Legacy variant that assumes that func_t has the correct types +// that we expect to load from memory +template +void gpu_kernel_with_scalars(TensorIteratorBase& iter, const func_t& f) { + using traits = function_traits; + static_assert( + traits::arity == 2, + "gpu_kernel_with_scalars only supports two input arguments"); + using arg1_t = typename traits::template arg<0>::type; + using arg2_t = typename traits::template arg<1>::type; + using return_t = typename traits::result_type; + opmath_gpu_kernel_with_scalars(iter, f); +} + +namespace { // functions for `gpu_kernel_multiple_outputs`. + +// check the return type is `thrust::tuple`, not `std::tuple`. +template struct is_tuple: std::false_type {}; + +template struct is_tuple>: std::true_type {}; + +template +C10_LAUNCH_BOUNDS_1(num_threads()) +__global__ void unrolled_elementwise_kernel_for_multi_outputs(int N, func_t f, array_t data, inp_calc_t ic, out_calc_t oc) { + int remaining = N - block_work_size() * blockIdx.x; + elementwise_kernel_helper(f, memory::policies::multi_outputs_unroll(data, remaining, ic, oc)); +} + +template +static inline void launch_unrolled_kernel_for_multi_outputs(int64_t N, const func_t& f, array_t data, inp_calc_t ic, out_calc_t oc) { + TORCH_INTERNAL_ASSERT(N > 0 && N <= std::numeric_limits::max()); + int64_t grid = (N + block_work_size() - 1) / block_work_size(); + auto stream = at::cuda::getCurrentCUDAStream(); + unrolled_elementwise_kernel_for_multi_outputs<<>>(N, f, data, ic, oc); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +void gpu_kernel_multiple_outputs_impl(TensorIteratorBase& iter, const func_t& f) { + using traits = function_traits; + using output_t = typename traits::result_type; + static_assert(is_tuple::value, "f's return type must be `thrust::tuple`"); + constexpr int num_outputs = thrust::tuple_size::value; + constexpr int num_inputs = traits::arity; + constexpr int ntensors = num_outputs + num_inputs; + + TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing()); + TORCH_INTERNAL_ASSERT(iter.ntensors() == ntensors); + + at::detail::Array data; + for (int i = 0; i < ntensors; i++) { + data[i] = (char*)iter.data_ptr(i); + } + + int64_t numel = iter.numel(); + + if (iter.is_contiguous()) { + auto input_calc = TrivialOffsetCalculator(); + auto output_calc = TrivialOffsetCalculator(); + launch_unrolled_kernel_for_multi_outputs(numel, f, data, input_calc, output_calc); + } else { + auto input_calc = make_input_offset_calculator(iter); + auto output_calc = make_output_offset_calculator(iter); + launch_unrolled_kernel_for_multi_outputs(numel, f, data, input_calc, output_calc); + } +} +} // namespace + +template +void gpu_kernel_multiple_outputs(TensorIteratorBase& iter, const func_t& f) { + ASSERT_HOST_DEVICE_LAMBDA(func_t); + + for (int arg = 0; arg < iter.ntensors(); arg++) { + TORCH_INTERNAL_ASSERT(iter.device(arg).is_cuda()); + } + + if (iter.numel() == 0) { + return; + } + + if (!iter.can_use_32bit_indexing()) { + for (auto& sub_iter : iter.with_32bit_indexing()) { + gpu_kernel_multiple_outputs(sub_iter, f); + } + return; + } + + gpu_kernel_multiple_outputs_impl(iter, f); +} + +}} //namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/Math.cuh b/voice_bridge/torch/include/ATen/native/cuda/Math.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c2a4870d7f5dde361edc595cdda5b8a28165b33f --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/Math.cuh @@ -0,0 +1,3361 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { +namespace native { +// See note [Jiterator] +// TODO: elaborate in this comment on the structure of math.cuh +#if AT_USE_JITERATOR() + +const auto ndtri_string = jiterator_stringify( + /* + * This function is derived from the implementation of the digamma function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Evaluates polynomial of degree N: + * + * 2 N + * y = C + C x + C x +...+ C x + * 0 1 2 N + * + * Coefficients are stored in reverse order: + * + * coef[0] = C , ..., coef[N] = C . + * N 0 + */ + template + T polevl(const T x, const T A[], const int len) { + // NOTE: This `polevl` is different from other `polevl` + // implementation (in PyTorch) which expect the `len` to be + // `len(A) - 1` instead of `len(A)`. + T result = 0; + for (int i = 0; i < len; ++i) { + result = result * x + A[i]; + } + return result; + } + + /* + * This function is derived from the implementation of the i1e function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + * + * Computes the argument, x, for which the area under the Gaussian probability density function + * (integrated from minus infinity to x) is equal to y. + */ + template + T ndtri(T y0) { + + constexpr T zero = 0; + constexpr T one = 1; + + // Handles special cases + if (y0 == zero) { + return NEG_INFINITY; + } + if (y0 == one) { + return POS_INFINITY; + } + if (y0 < zero || y0 > one) { + return NAN; + } + + bool code = true; + T y = y0; + // Note: the constant 0.135... is equal to exp(-2) + if (y > one - T{0.13533528323661269189}) { + y = one - y; + code = false; + } + + if (y > T{0.13533528323661269189}) { + /* approximation for 0 <= |y - 0.5| <= 3/8 */ + static const T P0[5] = { + -5.99633501014107895267E1, + 9.80010754185999661536E1, + -5.66762857469070293439E1, + 1.39312609387279679503E1, + -1.23916583867381258016E0, + }; + + static const T Q0[9] = { + 1.00000000000000000000E0, + 1.95448858338141759834E0, + 4.67627912898881538453E0, + 8.63602421390890590575E1, + -2.25462687854119370527E2, + 2.00260212380060660359E2, + -8.20372256168333339912E1, + 1.59056225126211695515E1, + -1.18331621121330003142E0, + }; + + /* sqrt(2pi) */ + constexpr T s2pi = 2.50662827463100050242E0; + + y = y - T{0.5}; + const T y2 = y * y; + T x = y + y * (y2 * polevl(y2, P0, int{5}) / polevl(y2, Q0, int{9})); + return x * s2pi; + } + + T x = sqrt(T{-2.} * log(y)); + const T x0 = x - (log(x) / x); + + const T z = one / x; + T x1; + + /* y > exp(-32) = 1.2664165549e-14 */ + if (x < T{8.0}) { + /* Approximation for interval z = sqrt(-2 log y ) between 2 and 8 + * i.e., y between exp(-2) = .135 and exp(-32) = 1.27e-14. + */ + static const T P1[9] = { + 4.05544892305962419923E0, + 3.15251094599893866154E1, + 5.71628192246421288162E1, + 4.40805073893200834700E1, + 1.46849561928858024014E1, + 2.18663306850790267539E0, + -1.40256079171354495875E-1, + -3.50424626827848203418E-2, + -8.57456785154685413611E-4, + }; + + static const T Q1[9] = { + 1.00000000000000000000E0, + 1.57799883256466749731E1, + 4.53907635128879210584E1, + 4.13172038254672030440E1, + 1.50425385692907503408E1, + 2.50464946208309415979E0, + -1.42182922854787788574E-1, + -3.80806407691578277194E-2, + -9.33259480895457427372E-4, + }; + + x1 = z * polevl(z, P1, int{9}) / polevl(z, Q1, int{9}); + } else { + /* Approximation for interval z = sqrt(-2 log y ) between 8 and 64 + * i.e., y between exp(-32) = 1.27e-14 and exp(-2048) = 3.67e-890. + */ + static const T P2[9] = { + 3.23774891776946035970E0, + 6.91522889068984211695E0, + 3.93881025292474443415E0, + 1.33303460815807542389E0, + 2.01485389549179081538E-1, + 1.23716634817820021358E-2, + 3.01581553508235416007E-4, + 2.65806974686737550832E-6, + 6.23974539184983293730E-9, + }; + + static const T Q2[9] = { + 1.00000000000000000000E0, + 6.02427039364742014255E0, + 3.67983563856160859403E0, + 1.37702099489081330271E0, + 2.16236993594496635890E-1, + 1.34204006088543189037E-2, + 3.28014464682127739104E-4, + 2.89247864745380683936E-6, + 6.79019408009981274425E-9, + }; + + x1 = z * polevl(z, P2, int{9}) / polevl(z, Q2, int{9}); + } + + x = x0 - x1; + return (!code) ? x : -x; + } +); // ndtri_string + +const auto log_ndtr_string = jiterator_stringify( + template + T log_ndtr(T x) { + constexpr T SQRT1_2{0.707106781186547524400844362104849039}; // 1/sqrt(2) + T t = x * SQRT1_2; + if (x < T{-1.0}) { + return log(erfcx(-t) / 2) - t * t; + } else { + return log1p(-erfc(t) / 2); + } + } +); // log_ndtr_string + +const auto gcd_string = jiterator_stringify( + template + T gcd(const T a_in, const T b_in) { + T a = abs(a_in); + T b = abs(b_in); + + while (a != T{0}) { + T c = a; + a = b % a; + b = c; + } + + return b; + } +); // gcd_string + +const auto lcm_string = jiterator_stringify( + template + T gcd(const T a_in, const T b_in) { + T a = abs(a_in); + T b = abs(b_in); + + while (a != T{0}) { + T c = a; + a = b % a; + b = c; + } + + return b; + } + + template + T lcm(const T a, const T b) { + T g = gcd(a, b); + return (g == T{0}) ? T{0} : abs(a / g * b); + } +); // lcm_string + +/* + * For licensing information, please refer to the the cpu implementation located in "ATen/native/Math.h". + */ +// [C++ Standard Reference: Gamma Function] https://en.cppreference.com/w/cpp/numeric/math/tgamma +const auto digamma_string = jiterator_stringify( + template + T digamma(T x) { + static const double PI_f64 = 3.14159265358979323846; + + // Short-circuits if x is +/- 0 and returns -/+ ∞ per the C++ standard + if (x == 0) { + return copysign(POS_INFINITY, -x); + } + + T result = 0; + if (x < 0) { + // Short-circuits if x is a negative integer and returns NaN + // per the C++ standard + const bool x_is_integer = (x == trunc(x)); + if (x_is_integer) { + return NAN; + } + + // Extracts the fractional part of x as r, since tan(pi * r) is more numerically + // accurate than tan(pi * x). While these operations are mathematically equivalent + // since both x and r are in radians and tan() has a periodicity of pi, in practice + // the computation of pi * x is a source of error (when |x| > 1). + double q, r; + r = modf(static_cast(x), &q); + result = - PI_f64 / tan(PI_f64 * r); + x = 1 - x; + } + + while (x < T{10}) { + result -= T{1} / x; + x += T{1}; + } + + if (x == T{10}) { + return result + T{2.25175258906672110764}; + } + + T y = 0; + if (x < T{1.0e17}) { + const T A[] = { + 8.33333333333333333333E-2, + -2.10927960927960927961E-2, + 7.57575757575757575758E-3, + -4.16666666666666666667E-3, + 3.96825396825396825397E-3, + -8.33333333333333333333E-3, + 8.33333333333333333333E-2, + }; + + + T z = T{1} / (x * x); + + T polevl_result = 0; + for (int i = 0; i <= 6; i++) { + polevl_result = polevl_result * z + A[i]; + } + y = z * polevl_result; + } + + return log(x) - (T{0.5} / x) - y + result; + } +); // digamma_string + +/* + * This function is derived from the implementation of the zeta function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + */ +const auto zeta_string = jiterator_stringify( + template + T zeta(T x, T q) { + const T MACHEP{1.11022302462515654042E-16}; + constexpr T zero{0}; + constexpr T half{0.5}; + constexpr T one{1}; + static const T A[] = { + 12.0, + -720.0, + 30240.0, + -1209600.0, + 47900160.0, + -1.8924375803183791606e9, /*1.307674368e12/691*/ + 7.47242496e10, + -2.950130727918164224e12, /*1.067062284288e16/3617*/ + 1.1646782814350067249e14, /*5.109094217170944e18/43867*/ + -4.5979787224074726105e15, /*8.028576626982912e20/174611*/ + 1.8152105401943546773e17, /*1.5511210043330985984e23/854513*/ + -7.1661652561756670113e18 /*1.6938241367317436694528e27/236364091*/ + }; + + int i = 0; + T a, b, k, s, t, w; + + // Short-circuits x -> +infty + if (x == one) { + return POS_INFINITY; + } + + // Short-circuits x < 1 -> NaN + if (x < one) { + return NAN; + } + + // Short-circuits negative q integers map to +infty, + // negative q non-integers map to NaN + if (q <= zero) { + if (q == floor(q)) { + return POS_INFINITY; + } + if (x != floor(x)) { + return NAN; + } + } + + s = pow(q, -x); + a = q; + i = 0; + b = zero; + while ((i < 9) || (a <= T{9.0})) { + i += 1; + a += one; + b = pow(a, -x); + s += b; + if ((-MACHEP * s < b) && (b < MACHEP * s)) { + return s; + } + }; + + w = a; + s += b * w / (x - one); + s -= half * b; + a = one; + k = zero; + for (int i = 0; i < 12; i++) { + a *= x + k; + b /= w; + t = a * b / A[i]; + s = s + t; + t = fabs(t / s); + + if (t < MACHEP) { + return s; + } + + k += one; + a *= x + k; + b /= w; + k += one; + } + + return s; + } +); // zeta_string + +const auto trigamma_string = jiterator_stringify( + template + T trigamma(T x) { + const T PI{3.14159265358979323846}; + T sign = 1; + T result = 0; + + if (x < T{0.5}) { + sign = -1; + T sin_pi_x = sin(PI * x); + result -= (PI * PI) / (sin_pi_x * sin_pi_x); + x = 1 - x; + } + + for (int i = 0; i < 6; ++i) { + result += T{1} / (x * x); + x += 1; + } + + const T one{1}; + const T ixx = one / (x*x); + result += (one + one / (T{2}*x) + ixx * (one/T{6} - ixx * (one/T{30} - ixx * (one/T{42})))) / x; + return sign * result; +} +); // trigamma_string + +const auto lgamma_string = jiterator_stringify( + template + T lgamma_kernel(T a) { + return lgamma(a); + } +); // lgamma_string + +const auto polygamma_string = zeta_string + jiterator_stringify( + template + T polygamma(T x, int n) { + // already blocked if n <= 1 + const auto one = T{1}; + return ((n % 2) ? one : -one) * exp(lgamma(static_cast(n) + one)) * + zeta(static_cast(n + 1), x); + } +); // polygamma_string + +const auto exp2_string = jiterator_stringify( + template + T exp2_kernel(T a) { + return exp2(a); + } +); // exp2_string + +const auto erfc_string = jiterator_stringify( + template + T erfc_kernel(T a) { + return erfc(a); + } +); // erfc_string + +const auto erfinv_string = jiterator_stringify( + template + T erfinv_kernel(T a) { + return erfinv(a); + } +); // erfinv_string + +const auto entr_string = jiterator_stringify( + template + T entr(T a) { + if (a != a) { + return a; + } + + if (a > 0) { + return -a * log(a); + } + + if (a == 0) { + return 0; + } + + return NEG_INFINITY; + } +); // entr_string + +// NOTE: `kaiser_window_string` depends on `i0_string` +// for its implementation. +const auto i0_string = jiterator_stringify( + template + T chbevl(T x, const T array[], const int len) { + + T b0, b1, b2; + + b0 = array[0]; + b1 = 0; + + for (int i = 1; i < len; ++i) { + b2 = b1; + b1 = b0; + b0 = x * b1 - b2 + array[i]; + } + + return T{0.5} * (b0 - b2); + } + + template + T i0(T _x) { + T x = fabs(_x); + + if (x <= T{8.0}) { + /* Chebyshev coefficients for exp(-x) I0(x) + * in the interval [0,8]. + * + * lim(x->0){ exp(-x) I0(x) } = 1. + */ + static const T A[] = { + -4.41534164647933937950E-18, 3.33079451882223809783E-17, + -2.43127984654795469359E-16, 1.71539128555513303061E-15, + -1.16853328779934516808E-14, 7.67618549860493561688E-14, + -4.85644678311192946090E-13, 2.95505266312963983461E-12, + -1.72682629144155570723E-11, 9.67580903537323691224E-11, + -5.18979560163526290666E-10, 2.65982372468238665035E-9, + -1.30002500998624804212E-8, 6.04699502254191894932E-8, + -2.67079385394061173391E-7, 1.11738753912010371815E-6, + -4.41673835845875056359E-6, 1.64484480707288970893E-5, + -5.75419501008210370398E-5, 1.88502885095841655729E-4, + -5.76375574538582365885E-4, 1.63947561694133579842E-3, + -4.32430999505057594430E-3, 1.05464603945949983183E-2, + -2.37374148058994688156E-2, 4.93052842396707084878E-2, + -9.49010970480476444210E-2, 1.71620901522208775349E-1, + -3.04682672343198398683E-1, 6.76795274409476084995E-1}; + + T y = (x / T{2.0}) - T{2.0}; + return exp(x) * chbevl(y, A, int{30}); + } + + // Handles x > 8 case + /* Chebyshev coefficients for exp(-x) sqrt(x) I0(x) + * in the inverted interval [8,infinity]. + * + * lim(x->inf){ exp(-x) sqrt(x) I0(x) } = 1/sqrt(2pi). + */ + const T B[] = { + -7.23318048787475395456E-18, -4.83050448594418207126E-18, + 4.46562142029675999901E-17, 3.46122286769746109310E-17, + -2.82762398051658348494E-16, -3.42548561967721913462E-16, + 1.77256013305652638360E-15, 3.81168066935262242075E-15, + -9.55484669882830764870E-15, -4.15056934728722208663E-14, + 1.54008621752140982691E-14, 3.85277838274214270114E-13, + 7.18012445138366623367E-13, -1.79417853150680611778E-12, + -1.32158118404477131188E-11, -3.14991652796324136454E-11, + 1.18891471078464383424E-11, 4.94060238822496958910E-10, + 3.39623202570838634515E-9, 2.26666899049817806459E-8, + 2.04891858946906374183E-7, 2.89137052083475648297E-6, + 6.88975834691682398426E-5, 3.36911647825569408990E-3, + 8.04490411014108831608E-1}; + + return (exp(x) * chbevl(T{32.0} / x - T{2.0}, B, int{25})) / sqrt(x); + } +); // i0_string + +const auto i1_string = jiterator_stringify( + template + T chbevl(const T x, const T array[], const int len) { + T b0, b1, b2; + + b0 = array[0]; + b1 = 0; + + for (int i = 1; i < len; ++i) { + b2 = b1; + b1 = b0; + b0 = x * b1 - b2 + array[i]; + } + + return T{0.5} * (b0 - b2); + } + + template + T i1(T _x) { + const T x = fabs(_x); + + if (x <= T{8.0}) { + // Chebyshev coefficients for exp(-x) i1(x) in the internal [0, 8] + // lim(x->0){ exp(-x) i1(x) / x } = 1/2 + static const T coefficients[] = { + 2.77791411276104639959E-18, -2.11142121435816608115E-17, + 1.55363195773620046921E-16, -1.10559694773538630805E-15, + 7.60068429473540693410E-15, -5.04218550472791168711E-14, + 3.22379336594557470981E-13, -1.98397439776494371520E-12, + 1.17361862988909016308E-11, -6.66348972350202774223E-11, + 3.62559028155211703701E-10, -1.88724975172282928790E-9, + 9.38153738649577178388E-9, -4.44505912879632808065E-8, + 2.00329475355213526229E-7, -8.56872026469545474066E-7, + 3.47025130813767847674E-6, -1.32731636560394358279E-5, + 4.78156510755005422638E-5, -1.61760815825896745588E-4, + 5.12285956168575772895E-4, -1.51357245063125314899E-3, + 4.15642294431288815669E-3, -1.05640848946261981558E-2, + 2.47264490306265168283E-2, -5.29459812080949914269E-2, + 1.02643658689847095384E-1, -1.76416518357834055153E-1, + 2.52587186443633654823E-1}; + const T y = x / T{2.0} - T{2.0}; + const T out = exp(x) * x * chbevl(y, coefficients, int{29}); + return (_x < T{0.0}) ? -out : out; + } + + // Chebyshev coefficients for exp(-x) sqrt(x) i1(x) + // in the inverted interval [8, infinity] + // lim(x->inf){ exp(-x) sqrt(x) i1(x) } = 1/sqrt(2pi) + static const T coefficients[] = { + 7.51729631084210481353E-18, 4.41434832307170791151E-18, + -4.65030536848935832153E-17, -3.20952592199342395980E-17, + 2.96262899764595013876E-16, 3.30820231092092828324E-16, + -1.88035477551078244854E-15, -3.81440307243700780478E-15, + 1.04202769841288027642E-14, 4.27244001671195135429E-14, + -2.10154184277266431302E-14, -4.08355111109219731823E-13, + -7.19855177624590851209E-13, 2.03562854414708950722E-12, + 1.41258074366137813316E-11, 3.25260358301548823856E-11, + -1.89749581235054123450E-11, -5.58974346219658380687E-10, + -3.83538038596423702205E-9, -2.63146884688951950684E-8, + -2.51223623787020892529E-7, -3.88256480887769039346E-6, + -1.10588938762623716291E-4, -9.76109749136146840777E-3, + 7.78576235018280120474E-1}; + const T out = (exp(x) * chbevl(T{32.} / x - T{2.}, coefficients, int{25})) / sqrt(x); + return (_x < T{0.}) ? -out : out; + } +); // i1_string + +const auto i1e_string = jiterator_stringify( + template + T chbevl(const T x, const T array[], const int len) { + T b0, b1, b2; + + b0 = array[0]; + b1 = 0; + + for (int i = 1; i < len; ++i) { + b2 = b1; + b1 = b0; + b0 = x * b1 - b2 + array[i]; + } + + return T{0.5} * (b0 - b2); + } + + // See double and float instantiations below + template + T i1e(T _x) { } + + // Double specialization (uses different coefficients than the float version) + template<> + double i1e(double _x) { + const double x = fabs(_x); + if (x <= double{8.}) { + // Chebyshev double coefficients for exp(-x) i1(x) in the interval [0,8]. + // Note: lim(x->0){ exp(-x) i1(x) / x } = 1/2. + static const double coefficients[] = { + 2.77791411276104639959E-18, -2.11142121435816608115E-17, + 1.55363195773620046921E-16, -1.10559694773538630805E-15, + 7.60068429473540693410E-15, -5.04218550472791168711E-14, + 3.22379336594557470981E-13, -1.98397439776494371520E-12, + 1.17361862988909016308E-11, -6.66348972350202774223E-11, + 3.62559028155211703701E-10, -1.88724975172282928790E-9, + 9.38153738649577178388E-9, -4.44505912879632808065E-8, + 2.00329475355213526229E-7, -8.56872026469545474066E-7, + 3.47025130813767847674E-6, -1.32731636560394358279E-5, + 4.78156510755005422638E-5, -1.61760815825896745588E-4, + 5.12285956168575772895E-4, -1.51357245063125314899E-3, + 4.15642294431288815669E-3, -1.05640848946261981558E-2, + 2.47264490306265168283E-2, -5.29459812080949914269E-2, + 1.02643658689847095384E-1, -1.76416518357834055153E-1, + 2.52587186443633654823E-1}; + const double y = x / double{2.} - double{2.}; + const double out = chbevl(y, coefficients, int{29}) * x; + return (_x < 0.) ? -out : out; + } + + // Chebyshev coefficients for exp(-x) sqrt(x) i1(x) + // in the inverted interval (8, infinity]. + // Note: lim(x->inf){ exp(-x) sqrt(x) i1(x) } = 1/sqrt(2pi). + // TODO: what's an "inverted interval"? Open on the left + // and closed on the right? + static const double coefficients[] = { + 7.51729631084210481353E-18, 4.41434832307170791151E-18, + -4.65030536848935832153E-17, -3.20952592199342395980E-17, + 2.96262899764595013876E-16, 3.30820231092092828324E-16, + -1.88035477551078244854E-15, -3.81440307243700780478E-15, + 1.04202769841288027642E-14, 4.27244001671195135429E-14, + -2.10154184277266431302E-14, -4.08355111109219731823E-13, + -7.19855177624590851209E-13, 2.03562854414708950722E-12, + 1.41258074366137813316E-11, 3.25260358301548823856E-11, + -1.89749581235054123450E-11, -5.58974346219658380687E-10, + -3.83538038596423702205E-9, -2.63146884688951950684E-8, + -2.51223623787020892529E-7, -3.88256480887769039346E-6, + -1.10588938762623716291E-4, -9.76109749136146840777E-3, + 7.78576235018280120474E-1}; + + const double out = chbevl(double{32.} / x - double{2.}, coefficients, int{25}) / sqrt(x); + return (_x < double{0.}) ? -out : out; + } + + // Float specialization (uses different coefficients than the double version) + template<> + float i1e(float _x) { + const float x = fabsf(_x); + if (x <= float{8.}) { + // Chebyshev double coefficients for exp(-x) i1(x) in the interval [0,8]. + // Note: lim(x->0){ exp(-x) i1(x) / x } = 1/2. + static const float coefficients[] = { + 9.38153738649577178388E-9f, + -4.44505912879632808065E-8f, + 2.00329475355213526229E-7f, + -8.56872026469545474066E-7f, + 3.47025130813767847674E-6f, + -1.32731636560394358279E-5f, + 4.78156510755005422638E-5f, + -1.61760815825896745588E-4f, + 5.12285956168575772895E-4f, + -1.51357245063125314899E-3f, + 4.15642294431288815669E-3f, + -1.05640848946261981558E-2f, + 2.47264490306265168283E-2f, + -5.29459812080949914269E-2f, + 1.02643658689847095384E-1f, + -1.76416518357834055153E-1f, + 2.52587186443633654823E-1f}; + const float y = x / float{2.} - float{2.}; + const float out = chbevl(y, coefficients, int{17}) * x; + return (_x < 0.) ? -out : out; + } + + // Chebyshev coefficients for exp(-x) sqrt(x) i1(x) + // in the inverted interval (8, infinity]. + // Note: lim(x->inf){ exp(-x) sqrt(x) i1(x) } = 1/sqrt(2pi). + // TODO: what's an "inverted interval"? Open on the left + // and closed on the right? + static const float coefficients[] = { + -3.83538038596423702205E-9f, + -2.63146884688951950684E-8f, + -2.51223623787020892529E-7f, + -3.88256480887769039346E-6f, + -1.10588938762623716291E-4f, + -9.76109749136146840777E-3f, + 7.78576235018280120474E-1f}; + + const float out = chbevl(float{32.} / x - float{2.}, coefficients, int{7}) / sqrt(x); + return (_x < float{0.}) ? -out : out; + } +); // i1e_string + +const auto kaiser_window_string = i0_string + jiterator_stringify( + template + T kaiser_window(T a, T inv_alpha, T beta, T inv_i0_beta) { + T x = a * inv_alpha - T{1}; + T y = max(T{0}, T{1} - x * x); + return i0(beta * sqrt(y)) * inv_i0_beta; + } +); // kaiser_window_string + +const auto sinc_string = jiterator_stringify( + template + T sinc(T a) { + if (a == T(0)) { + return T(1); + } else { + constexpr T pi = T(3.14159265358979323846L); + T product = pi * a; + return std::sin(product) / product; + } + } +); // sinc_string + +const auto erfcx_string = jiterator_stringify( + /* The next function is taken from http://ab-initio.mit.edu/Faddeev */ + + /* Copyright (c) 2012 Massachusetts Institute of Technology + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + + /* erfcx(x) = exp(x^2) erfc(x) function, for real x, written by + Steven G. Johnson, October 2012. + + This function combines a few different ideas. + + First, for x > 50, it uses a continued-fraction expansion (same as + for the Faddeeva function, but with algebraic simplifications for z=i*x). + + Second, for 0 <= x <= 50, it uses Chebyshev polynomial approximations, + but with two twists: + + a) It maps x to y = 4 / (4+x) in [0,1]. This simple transformation, + inspired by a similar transformation in the octave-forge/specfun + erfcx by Soren Hauberg, results in much faster Chebyshev convergence + than other simple transformations I have examined. + + b) Instead of using a single Chebyshev polynomial for the entire + [0,1] y interval, we break the interval up into 100 equal + subintervals, with a switch/lookup table, and use much lower + degree Chebyshev polynomials in each subinterval. This greatly + improves performance in my tests. + + For x < 0, we use the relationship erfcx(-x) = 2 exp(x^2) - erfc(x), + with the usual checks for overflow etcetera. + + Performance-wise, it seems to be substantially faster than either + the SLATEC DERFC function [or an erfcx function derived therefrom] + or Cody's CALERF function (from netlib.org/specfun), while + retaining near machine precision in accuracy. + */ + + /* Given y100 = 100 * y, where y = 4 / (4 + x) for x >= 0, compute erfc(x). + + Uses a look-up table of 100 different Chebyshev polynomials + for y intervals [0,0.01], [0.01,0.02], ...., [0.99,1], generated + with the help of Maple and a little shell script. This allows + the Chebyshev polynomials to be of significantly lower degree (about 1/4) + compared to fitting the whole [0,1] interval with a single polynomial. + */ + + // TODO: review if this is computing in double when given a float input + template + T erfcx_y100(T y100) { + switch (static_cast(y100)) { + case 0: { + T t = 2*y100 - 1; + return 0.70878032454106438663e-3 + (0.71234091047026302958e-3 + (0.35779077297597742384e-5 + (0.17403143962587937815e-7 + (0.81710660047307788845e-10 + (0.36885022360434957634e-12 + 0.15917038551111111111e-14 * t) * t) * t) * t) * t) * t; + } + case 1: { + T t = 2*y100 - 3; + return 0.21479143208285144230e-2 + (0.72686402367379996033e-3 + (0.36843175430938995552e-5 + (0.18071841272149201685e-7 + (0.85496449296040325555e-10 + (0.38852037518534291510e-12 + 0.16868473576888888889e-14 * t) * t) * t) * t) * t) * t; + } + case 2: { + T t = 2*y100 - 5; + return 0.36165255935630175090e-2 + (0.74182092323555510862e-3 + (0.37948319957528242260e-5 + (0.18771627021793087350e-7 + (0.89484715122415089123e-10 + (0.40935858517772440862e-12 + 0.17872061464888888889e-14 * t) * t) * t) * t) * t) * t; + } + case 3: { + T t = 2*y100 - 7; + return 0.51154983860031979264e-2 + (0.75722840734791660540e-3 + (0.39096425726735703941e-5 + (0.19504168704300468210e-7 + (0.93687503063178993915e-10 + (0.43143925959079664747e-12 + 0.18939926435555555556e-14 * t) * t) * t) * t) * t) * t; + } + case 4: { + T t = 2*y100 - 9; + return 0.66457513172673049824e-2 + (0.77310406054447454920e-3 + (0.40289510589399439385e-5 + (0.20271233238288381092e-7 + (0.98117631321709100264e-10 + (0.45484207406017752971e-12 + 0.20076352213333333333e-14 * t) * t) * t) * t) * t) * t; + } + case 5: { + T t = 2*y100 - 11; + return 0.82082389970241207883e-2 + (0.78946629611881710721e-3 + (0.41529701552622656574e-5 + (0.21074693344544655714e-7 + (0.10278874108587317989e-9 + (0.47965201390613339638e-12 + 0.21285907413333333333e-14 * t) * t) * t) * t) * t) * t; + } + case 6: { + T t = 2*y100 - 13; + return 0.98039537275352193165e-2 + (0.80633440108342840956e-3 + (0.42819241329736982942e-5 + (0.21916534346907168612e-7 + (0.10771535136565470914e-9 + (0.50595972623692822410e-12 + 0.22573462684444444444e-14 * t) * t) * t) * t) * t) * t; + } + case 7: { + T t = 2*y100 - 15; + return 0.11433927298290302370e-1 + (0.82372858383196561209e-3 + (0.44160495311765438816e-5 + (0.22798861426211986056e-7 + (0.11291291745879239736e-9 + (0.53386189365816880454e-12 + 0.23944209546666666667e-14 * t) * t) * t) * t) * t) * t; + } + case 8: { + T t = 2*y100 - 17; + return 0.13099232878814653979e-1 + (0.84167002467906968214e-3 + (0.45555958988457506002e-5 + (0.23723907357214175198e-7 + (0.11839789326602695603e-9 + (0.56346163067550237877e-12 + 0.25403679644444444444e-14 * t) * t) * t) * t) * t) * t; + } + case 9: { + T t = 2*y100 - 19; + return 0.14800987015587535621e-1 + (0.86018092946345943214e-3 + (0.47008265848816866105e-5 + (0.24694040760197315333e-7 + (0.12418779768752299093e-9 + (0.59486890370320261949e-12 + 0.26957764568888888889e-14 * t) * t) * t) * t) * t) * t; + } + case 10: { + T t = 2*y100 - 21; + return 0.16540351739394069380e-1 + (0.87928458641241463952e-3 + (0.48520195793001753903e-5 + (0.25711774900881709176e-7 + (0.13030128534230822419e-9 + (0.62820097586874779402e-12 + 0.28612737351111111111e-14 * t) * t) * t) * t) * t) * t; + } + case 11: { + T t = 2*y100 - 23; + return 0.18318536789842392647e-1 + (0.89900542647891721692e-3 + (0.50094684089553365810e-5 + (0.26779777074218070482e-7 + (0.13675822186304615566e-9 + (0.66358287745352705725e-12 + 0.30375273884444444444e-14 * t) * t) * t) * t) * t) * t; + } + case 12: { + T t = 2*y100 - 25; + return 0.20136801964214276775e-1 + (0.91936908737673676012e-3 + (0.51734830914104276820e-5 + (0.27900878609710432673e-7 + (0.14357976402809042257e-9 + (0.70114790311043728387e-12 + 0.32252476000000000000e-14 * t) * t) * t) * t) * t) * t; + } + case 13: { + T t = 2*y100 - 27; + return 0.21996459598282740954e-1 + (0.94040248155366777784e-3 + (0.53443911508041164739e-5 + (0.29078085538049374673e-7 + (0.15078844500329731137e-9 + (0.74103813647499204269e-12 + 0.34251892320000000000e-14 * t) * t) * t) * t) * t) * t; + } + case 14: { + T t = 2*y100 - 29; + return 0.23898877187226319502e-1 + (0.96213386835900177540e-3 + (0.55225386998049012752e-5 + (0.30314589961047687059e-7 + (0.15840826497296335264e-9 + (0.78340500472414454395e-12 + 0.36381553564444444445e-14 * t) * t) * t) * t) * t) * t; + } + case 15: { + T t = 2*y100 - 31; + return 0.25845480155298518485e-1 + (0.98459293067820123389e-3 + (0.57082915920051843672e-5 + (0.31613782169164830118e-7 + (0.16646478745529630813e-9 + (0.82840985928785407942e-12 + 0.38649975768888888890e-14 * t) * t) * t) * t) * t) * t; + } + case 16: { + T t = 2*y100 - 33; + return 0.27837754783474696598e-1 + (0.10078108563256892757e-2 + (0.59020366493792212221e-5 + (0.32979263553246520417e-7 + (0.17498524159268458073e-9 + (0.87622459124842525110e-12 + 0.41066206488888888890e-14 * t) * t) * t) * t) * t) * t; + } + case 17: { + T t = 2*y100 - 35; + return 0.29877251304899307550e-1 + (0.10318204245057349310e-2 + (0.61041829697162055093e-5 + (0.34414860359542720579e-7 + (0.18399863072934089607e-9 + (0.92703227366365046533e-12 + 0.43639844053333333334e-14 * t) * t) * t) * t) * t) * t; + } + case 18: { + T t = 2*y100 - 37; + return 0.31965587178596443475e-1 + (0.10566560976716574401e-2 + (0.63151633192414586770e-5 + (0.35924638339521924242e-7 + (0.19353584758781174038e-9 + (0.98102783859889264382e-12 + 0.46381060817777777779e-14 * t) * t) * t) * t) * t) * t; + } + case 19: { + T t = 2*y100 - 39; + return 0.34104450552588334840e-1 + (0.10823541191350532574e-2 + (0.65354356159553934436e-5 + (0.37512918348533521149e-7 + (0.20362979635817883229e-9 + (0.10384187833037282363e-11 + 0.49300625262222222221e-14 * t) * t) * t) * t) * t) * t; + } + case 20: { + T t = 2*y100 - 41; + return 0.36295603928292425716e-1 + (0.11089526167995268200e-2 + (0.67654845095518363577e-5 + (0.39184292949913591646e-7 + (0.21431552202133775150e-9 + (0.10994259106646731797e-11 + 0.52409949102222222221e-14 * t) * t) * t) * t) * t) * t; + } + case 21: { + T t = 2*y100 - 43; + return 0.38540888038840509795e-1 + (0.11364917134175420009e-2 + (0.70058230641246312003e-5 + (0.40943644083718586939e-7 + (0.22563034723692881631e-9 + (0.11642841011361992885e-11 + 0.55721092871111111110e-14 * t) * t) * t) * t) * t) * t; + } + case 22: { + T t = 2*y100 - 45; + return 0.40842225954785960651e-1 + (0.11650136437945673891e-2 + (0.72569945502343006619e-5 + (0.42796161861855042273e-7 + (0.23761401711005024162e-9 + (0.12332431172381557035e-11 + 0.59246802364444444445e-14 * t) * t) * t) * t) * t) * t; + } + case 23: { + T t = 2*y100 - 47; + return 0.43201627431540222422e-1 + (0.11945628793917272199e-2 + (0.75195743532849206263e-5 + (0.44747364553960993492e-7 + (0.25030885216472953674e-9 + (0.13065684400300476484e-11 + 0.63000532853333333334e-14 * t) * t) * t) * t) * t) * t; + } + case 24: { + T t = 2*y100 - 49; + return 0.45621193513810471438e-1 + (0.12251862608067529503e-2 + (0.77941720055551920319e-5 + (0.46803119830954460212e-7 + (0.26375990983978426273e-9 + (0.13845421370977119765e-11 + 0.66996477404444444445e-14 * t) * t) * t) * t) * t) * t; + } + case 25: { + T t = 2*y100 - 51; + return 0.48103121413299865517e-1 + (0.12569331386432195113e-2 + (0.80814333496367673980e-5 + (0.48969667335682018324e-7 + (0.27801515481905748484e-9 + (0.14674637611609884208e-11 + 0.71249589351111111110e-14 * t) * t) * t) * t) * t) * t; + } + case 26: { + T t = 2*y100 - 53; + return 0.50649709676983338501e-1 + (0.12898555233099055810e-2 + (0.83820428414568799654e-5 + (0.51253642652551838659e-7 + (0.29312563849675507232e-9 + (0.15556512782814827846e-11 + 0.75775607822222222221e-14 * t) * t) * t) * t) * t) * t; + } + case 27: { + T t = 2*y100 - 55; + return 0.53263363664388864181e-1 + (0.13240082443256975769e-2 + (0.86967260015007658418e-5 + (0.53662102750396795566e-7 + (0.30914568786634796807e-9 + (0.16494420240828493176e-11 + 0.80591079644444444445e-14 * t) * t) * t) * t) * t) * t; + } + case 28: { + T t = 2*y100 - 57; + return 0.55946601353500013794e-1 + (0.13594491197408190706e-2 + (0.90262520233016380987e-5 + (0.56202552975056695376e-7 + (0.32613310410503135996e-9 + (0.17491936862246367398e-11 + 0.85713381688888888890e-14 * t) * t) * t) * t) * t) * t; + } + case 29: { + T t = 2*y100 - 59; + return 0.58702059496154081813e-1 + (0.13962391363223647892e-2 + (0.93714365487312784270e-5 + (0.58882975670265286526e-7 + (0.34414937110591753387e-9 + (0.18552853109751857859e-11 + 0.91160736711111111110e-14 * t) * t) * t) * t) * t) * t; + } + case 30: { + T t = 2*y100 - 61; + return 0.61532500145144778048e-1 + (0.14344426411912015247e-2 + (0.97331446201016809696e-5 + (0.61711860507347175097e-7 + (0.36325987418295300221e-9 + (0.19681183310134518232e-11 + 0.96952238400000000000e-14 * t) * t) * t) * t) * t) * t; + } + case 31: { + T t = 2*y100 - 63; + return 0.64440817576653297993e-1 + (0.14741275456383131151e-2 + (0.10112293819576437838e-4 + (0.64698236605933246196e-7 + (0.38353412915303665586e-9 + (0.20881176114385120186e-11 + 0.10310784480000000000e-13 * t) * t) * t) * t) * t) * t; + } + case 32: { + T t = 2*y100 - 65; + return 0.67430045633130393282e-1 + (0.15153655418916540370e-2 + (0.10509857606888328667e-4 + (0.67851706529363332855e-7 + (0.40504602194811140006e-9 + (0.22157325110542534469e-11 + 0.10964842115555555556e-13 * t) * t) * t) * t) * t) * t; + } + case 33: { + T t = 2*y100 - 67; + return 0.70503365513338850709e-1 + (0.15582323336495709827e-2 + (0.10926868866865231089e-4 + (0.71182482239613507542e-7 + (0.42787405890153386710e-9 + (0.23514379522274416437e-11 + 0.11659571751111111111e-13 * t) * t) * t) * t) * t) * t; + } + case 34: { + T t = 2*y100 - 69; + return 0.73664114037944596353e-1 + (0.16028078812438820413e-2 + (0.11364423678778207991e-4 + (0.74701423097423182009e-7 + (0.45210162777476488324e-9 + (0.24957355004088569134e-11 + 0.12397238257777777778e-13 * t) * t) * t) * t) * t) * t; + } + case 35: { + T t = 2*y100 - 71; + return 0.76915792420819562379e-1 + (0.16491766623447889354e-2 + (0.11823685320041302169e-4 + (0.78420075993781544386e-7 + (0.47781726956916478925e-9 + (0.26491544403815724749e-11 + 0.13180196462222222222e-13 * t) * t) * t) * t) * t) * t; + } + case 36: { + T t = 2*y100 - 73; + return 0.80262075578094612819e-1 + (0.16974279491709504117e-2 + (0.12305888517309891674e-4 + (0.82350717698979042290e-7 + (0.50511496109857113929e-9 + (0.28122528497626897696e-11 + 0.14010889635555555556e-13 * t) * t) * t) * t) * t) * t; + } + case 37: { + T t = 2*y100 - 75; + return 0.83706822008980357446e-1 + (0.17476561032212656962e-2 + (0.12812343958540763368e-4 + (0.86506399515036435592e-7 + (0.53409440823869467453e-9 + (0.29856186620887555043e-11 + 0.14891851591111111111e-13 * t) * t) * t) * t) * t) * t; + } + case 38: { + T t = 2*y100 - 77; + return 0.87254084284461718231e-1 + (0.17999608886001962327e-2 + (0.13344443080089492218e-4 + (0.90900994316429008631e-7 + (0.56486134972616465316e-9 + (0.31698707080033956934e-11 + 0.15825697795555555556e-13 * t) * t) * t) * t) * t) * t; + } + case 39: { + T t = 2*y100 - 79; + return 0.90908120182172748487e-1 + (0.18544478050657699758e-2 + (0.13903663143426120077e-4 + (0.95549246062549906177e-7 + (0.59752787125242054315e-9 + (0.33656597366099099413e-11 + 0.16815130613333333333e-13 * t) * t) * t) * t) * t) * t; + } + case 40: { + T t = 2*y100 - 81; + return 0.94673404508075481121e-1 + (0.19112284419887303347e-2 + (0.14491572616545004930e-4 + (0.10046682186333613697e-6 + (0.63221272959791000515e-9 + (0.35736693975589130818e-11 + 0.17862931591111111111e-13 * t) * t) * t) * t) * t) * t; + } + case 41: { + T t = 2*y100 - 83; + return 0.98554641648004456555e-1 + (0.19704208544725622126e-2 + (0.15109836875625443935e-4 + (0.10567036667675984067e-6 + (0.66904168640019354565e-9 + (0.37946171850824333014e-11 + 0.18971959040000000000e-13 * t) * t) * t) * t) * t) * t; + } + case 42: { + T t = 2*y100 - 85; + return 0.10255677889470089531e0 + (0.20321499629472857418e-2 + (0.15760224242962179564e-4 + (0.11117756071353507391e-6 + (0.70814785110097658502e-9 + (0.40292553276632563925e-11 + 0.20145143075555555556e-13 * t) * t) * t) * t) * t) * t; + } + case 43: { + T t = 2*y100 - 87; + return 0.10668502059865093318e0 + (0.20965479776148731610e-2 + (0.16444612377624983565e-4 + (0.11700717962026152749e-6 + (0.74967203250938418991e-9 + (0.42783716186085922176e-11 + 0.21385479360000000000e-13 * t) * t) * t) * t) * t) * t; + } + case 44: { + T t = 2*y100 - 89; + return 0.11094484319386444474e0 + (0.21637548491908170841e-2 + (0.17164995035719657111e-4 + (0.12317915750735938089e-6 + (0.79376309831499633734e-9 + (0.45427901763106353914e-11 + 0.22696025653333333333e-13 * t) * t) * t) * t) * t) * t; + } + case 45: { + T t = 2*y100 - 91; + return 0.11534201115268804714e0 + (0.22339187474546420375e-2 + (0.17923489217504226813e-4 + (0.12971465288245997681e-6 + (0.84057834180389073587e-9 + (0.48233721206418027227e-11 + 0.24079890062222222222e-13 * t) * t) * t) * t) * t) * t; + } + case 46: { + T t = 2*y100 - 93; + return 0.11988259392684094740e0 + (0.23071965691918689601e-2 + (0.18722342718958935446e-4 + (0.13663611754337957520e-6 + (0.89028385488493287005e-9 + (0.51210161569225846701e-11 + 0.25540227111111111111e-13 * t) * t) * t) * t) * t) * t; + } + case 47: { + T t = 2*y100 - 95; + return 0.12457298393509812907e0 + (0.23837544771809575380e-2 + (0.19563942105711612475e-4 + (0.14396736847739470782e-6 + (0.94305490646459247016e-9 + (0.54366590583134218096e-11 + 0.27080225920000000000e-13 * t) * t) * t) * t) * t) * t; + } + case 48: { + T t = 2*y100 - 97; + return 0.12941991566142438816e0 + (0.24637684719508859484e-2 + (0.20450821127475879816e-4 + (0.15173366280523906622e-6 + (0.99907632506389027739e-9 + (0.57712760311351625221e-11 + 0.28703099555555555556e-13 * t) * t) * t) * t) * t) * t; + } + case 49: { + T t = 2*y100 - 99; + return 0.13443048593088696613e0 + (0.25474249981080823877e-2 + (0.21385669591362915223e-4 + (0.15996177579900443030e-6 + (0.10585428844575134013e-8 + (0.61258809536787882989e-11 + 0.30412080142222222222e-13 * t) * t) * t) * t) * t) * t; + } + case 50: { + T t = 2*y100 - 101; + return 0.13961217543434561353e0 + (0.26349215871051761416e-2 + (0.22371342712572567744e-4 + (0.16868008199296822247e-6 + (0.11216596910444996246e-8 + (0.65015264753090890662e-11 + 0.32210394506666666666e-13 * t) * t) * t) * t) * t) * t; + } + case 51: { + T t = 2*y100 - 103; + return 0.14497287157673800690e0 + (0.27264675383982439814e-2 + (0.23410870961050950197e-4 + (0.17791863939526376477e-6 + (0.11886425714330958106e-8 + (0.68993039665054288034e-11 + 0.34101266222222222221e-13 * t) * t) * t) * t) * t) * t; + } + case 52: { + T t = 2*y100 - 105; + return 0.15052089272774618151e0 + (0.28222846410136238008e-2 + (0.24507470422713397006e-4 + (0.18770927679626136909e-6 + (0.12597184587583370712e-8 + (0.73203433049229821618e-11 + 0.36087889048888888890e-13 * t) * t) * t) * t) * t) * t; + } + case 53: { + T t = 2*y100 - 107; + return 0.15626501395774612325e0 + (0.29226079376196624949e-2 + (0.25664553693768450545e-4 + (0.19808568415654461964e-6 + (0.13351257759815557897e-8 + (0.77658124891046760667e-11 + 0.38173420035555555555e-13 * t) * t) * t) * t) * t) * t; + } + case 54: { + T t = 2*y100 - 109; + return 0.16221449434620737567e0 + (0.30276865332726475672e-2 + (0.26885741326534564336e-4 + (0.20908350604346384143e-6 + (0.14151148144240728728e-8 + (0.82369170665974313027e-11 + 0.40360957457777777779e-13 * t) * t) * t) * t) * t) * t; + } + case 55: { + T t = 2*y100 - 111; + return 0.16837910595412130659e0 + (0.31377844510793082301e-2 + (0.28174873844911175026e-4 + (0.22074043807045782387e-6 + (0.14999481055996090039e-8 + (0.87348993661930809254e-11 + 0.42653528977777777779e-13 * t) * t) * t) * t) * t) * t; + } + case 56: { + T t = 2*y100 - 113; + return 0.17476916455659369953e0 + (0.32531815370903068316e-2 + (0.29536024347344364074e-4 + (0.23309632627767074202e-6 + (0.15899007843582444846e-8 + (0.92610375235427359475e-11 + 0.45054073102222222221e-13 * t) * t) * t) * t) * t) * t; + } + case 57: { + T t = 2*y100 - 115; + return 0.18139556223643701364e0 + (0.33741744168096996041e-2 + (0.30973511714709500836e-4 + (0.24619326937592290996e-6 + (0.16852609412267750744e-8 + (0.98166442942854895573e-11 + 0.47565418097777777779e-13 * t) * t) * t) * t) * t) * t; + } + case 58: { + T t = 2*y100 - 117; + return 0.18826980194443664549e0 + (0.35010775057740317997e-2 + (0.32491914440014267480e-4 + (0.26007572375886319028e-6 + (0.17863299617388376116e-8 + (0.10403065638343878679e-10 + 0.50190265831111111110e-13 * t) * t) * t) * t) * t) * t; + } + case 59: { + T t = 2*y100 - 119; + return 0.19540403413693967350e0 + (0.36342240767211326315e-2 + (0.34096085096200907289e-4 + (0.27479061117017637474e-6 + (0.18934228504790032826e-8 + (0.11021679075323598664e-10 + 0.52931171733333333334e-13 * t) * t) * t) * t) * t) * t; + } + case 60: { + T t = 2*y100 - 121; + return 0.20281109560651886959e0 + (0.37739673859323597060e-2 + (0.35791165457592409054e-4 + (0.29038742889416172404e-6 + (0.20068685374849001770e-8 + (0.11673891799578381999e-10 + 0.55790523093333333334e-13 * t) * t) * t) * t) * t) * t; + } + case 61: { + T t = 2*y100 - 123; + return 0.21050455062669334978e0 + (0.39206818613925652425e-2 + (0.37582602289680101704e-4 + (0.30691836231886877385e-6 + (0.21270101645763677824e-8 + (0.12361138551062899455e-10 + 0.58770520160000000000e-13 * t) * t) * t) * t) * t) * t; + } + case 62: { + T t = 2*y100 - 125; + return 0.21849873453703332479e0 + (0.40747643554689586041e-2 + (0.39476163820986711501e-4 + (0.32443839970139918836e-6 + (0.22542053491518680200e-8 + (0.13084879235290858490e-10 + 0.61873153262222222221e-13 * t) * t) * t) * t) * t) * t; + } + case 63: { + T t = 2*y100 - 127; + return 0.22680879990043229327e0 + (0.42366354648628516935e-2 + (0.41477956909656896779e-4 + (0.34300544894502810002e-6 + (0.23888264229264067658e-8 + (0.13846596292818514601e-10 + 0.65100183751111111110e-13 * t) * t) * t) * t) * t) * t; + } + case 64: { + T t = 2*y100 - 129; + return 0.23545076536988703937e0 + (0.44067409206365170888e-2 + (0.43594444916224700881e-4 + (0.36268045617760415178e-6 + (0.25312606430853202748e-8 + (0.14647791812837903061e-10 + 0.68453122631111111110e-13 * t) * t) * t) * t) * t) * t; + } + case 65: { + T t = 2*y100 - 131; + return 0.24444156740777432838e0 + (0.45855530511605787178e-2 + (0.45832466292683085475e-4 + (0.38352752590033030472e-6 + (0.26819103733055603460e-8 + (0.15489984390884756993e-10 + 0.71933206364444444445e-13 * t) * t) * t) * t) * t) * t; + } + case 66: { + T t = 2*y100 - 133; + return 0.25379911500634264643e0 + (0.47735723208650032167e-2 + (0.48199253896534185372e-4 + (0.40561404245564732314e-6 + (0.28411932320871165585e-8 + (0.16374705736458320149e-10 + 0.75541379822222222221e-13 * t) * t) * t) * t) * t) * t; + } + case 67: { + T t = 2*y100 - 135; + return 0.26354234756393613032e0 + (0.49713289477083781266e-2 + (0.50702455036930367504e-4 + (0.42901079254268185722e-6 + (0.30095422058900481753e-8 + (0.17303497025347342498e-10 + 0.79278273368888888890e-13 * t) * t) * t) * t) * t) * t; + } + case 68: { + T t = 2*y100 - 137; + return 0.27369129607732343398e0 + (0.51793846023052643767e-2 + (0.53350152258326602629e-4 + (0.45379208848865015485e-6 + (0.31874057245814381257e-8 + (0.18277905010245111046e-10 + 0.83144182364444444445e-13 * t) * t) * t) * t) * t) * t; + } + case 69: { + T t = 2*y100 - 139; + return 0.28426714781640316172e0 + (0.53983341916695141966e-2 + (0.56150884865255810638e-4 + (0.48003589196494734238e-6 + (0.33752476967570796349e-8 + (0.19299477888083469086e-10 + 0.87139049137777777779e-13 * t) * t) * t) * t) * t) * t; + } + case 70: { + T t = 2*y100 - 141; + return 0.29529231465348519920e0 + (0.56288077305420795663e-2 + (0.59113671189913307427e-4 + (0.50782393781744840482e-6 + (0.35735475025851713168e-8 + (0.20369760937017070382e-10 + 0.91262442613333333334e-13 * t) * t) * t) * t) * t) * t; + } + case 71: { + T t = 2*y100 - 143; + return 0.30679050522528838613e0 + (0.58714723032745403331e-2 + (0.62248031602197686791e-4 + (0.53724185766200945789e-6 + (0.37827999418960232678e-8 + (0.21490291930444538307e-10 + 0.95513539182222222221e-13 * t) * t) * t) * t) * t) * t; + } + case 72: { + T t = 2*y100 - 145; + return 0.31878680111173319425e0 + (0.61270341192339103514e-2 + (0.65564012259707640976e-4 + (0.56837930287837738996e-6 + (0.40035151353392378882e-8 + (0.22662596341239294792e-10 + 0.99891109760000000000e-13 * t) * t) * t) * t) * t) * t; + } + case 73: { + T t = 2*y100 - 147; + return 0.33130773722152622027e0 + (0.63962406646798080903e-2 + (0.69072209592942396666e-4 + (0.60133006661885941812e-6 + (0.42362183765883466691e-8 + (0.23888182347073698382e-10 + 0.10439349811555555556e-12 * t) * t) * t) * t) * t) * t; + } + case 74: { + T t = 2*y100 - 149; + return 0.34438138658041336523e0 + (0.66798829540414007258e-2 + (0.72783795518603561144e-4 + (0.63619220443228800680e-6 + (0.44814499336514453364e-8 + (0.25168535651285475274e-10 + 0.10901861383111111111e-12 * t) * t) * t) * t) * t) * t; + } + case 75: { + T t = 2*y100 - 151; + return 0.35803744972380175583e0 + (0.69787978834882685031e-2 + (0.76710543371454822497e-4 + (0.67306815308917386747e-6 + (0.47397647975845228205e-8 + (0.26505114141143050509e-10 + 0.11376390933333333333e-12 * t) * t) * t) * t) * t) * t; + } + case 76: { + T t = 2*y100 - 153; + return 0.37230734890119724188e0 + (0.72938706896461381003e-2 + (0.80864854542670714092e-4 + (0.71206484718062688779e-6 + (0.50117323769745883805e-8 + (0.27899342394100074165e-10 + 0.11862637614222222222e-12 * t) * t) * t) * t) * t) * t; + } + case 77: { + T t = 2*y100 - 155; + return 0.38722432730555448223e0 + (0.76260375162549802745e-2 + (0.85259785810004603848e-4 + (0.75329383305171327677e-6 + (0.52979361368388119355e-8 + (0.29352606054164086709e-10 + 0.12360253370666666667e-12 * t) * t) * t) * t) * t) * t; + } + case 78: { + T t = 2*y100 - 157; + return 0.40282355354616940667e0 + (0.79762880915029728079e-2 + (0.89909077342438246452e-4 + (0.79687137961956194579e-6 + (0.55989731807360403195e-8 + (0.30866246101464869050e-10 + 0.12868841946666666667e-12 * t) * t) * t) * t) * t) * t; + } + case 79: { + T t = 2*y100 - 159; + return 0.41914223158913787649e0 + (0.83456685186950463538e-2 + (0.94827181359250161335e-4 + (0.84291858561783141014e-6 + (0.59154537751083485684e-8 + (0.32441553034347469291e-10 + 0.13387957943111111111e-12 * t) * t) * t) * t) * t) * t; + } + case 80: { + T t = 2*y100 - 161; + return 0.43621971639463786896e0 + (0.87352841828289495773e-2 + (0.10002929142066799966e-3 + (0.89156148280219880024e-6 + (0.62480008150788597147e-8 + (0.34079760983458878910e-10 + 0.13917107176888888889e-12 * t) * t) * t) * t) * t) * t; + } + case 81: { + T t = 2*y100 - 163; + return 0.45409763548534330981e0 + (0.91463027755548240654e-2 + (0.10553137232446167258e-3 + (0.94293113464638623798e-6 + (0.65972492312219959885e-8 + (0.35782041795476563662e-10 + 0.14455745872000000000e-12 * t) * t) * t) * t) * t) * t; + } + case 82: { + T t = 2*y100 - 165; + return 0.47282001668512331468e0 + (0.95799574408860463394e-2 + (0.11135019058000067469e-3 + (0.99716373005509038080e-6 + (0.69638453369956970347e-8 + (0.37549499088161345850e-10 + 0.15003280712888888889e-12 * t) * t) * t) * t) * t) * t; + } + case 83: { + T t = 2*y100 - 167; + return 0.49243342227179841649e0 + (0.10037550043909497071e-1 + (0.11750334542845234952e-3 + (0.10544006716188967172e-5 + (0.73484461168242224872e-8 + (0.39383162326435752965e-10 + 0.15559069118222222222e-12 * t) * t) * t) * t) * t) * t; + } + case 84: { + T t = 2*y100 - 169; + return 0.51298708979209258326e0 + (0.10520454564612427224e-1 + (0.12400930037494996655e-3 + (0.11147886579371265246e-5 + (0.77517184550568711454e-8 + (0.41283980931872622611e-10 + 0.16122419680000000000e-12 * t) * t) * t) * t) * t) * t; + } + case 85: { + T t = 2*y100 - 171; + return 0.53453307979101369843e0 + (0.11030120618800726938e-1 + (0.13088741519572269581e-3 + (0.11784797595374515432e-5 + (0.81743383063044825400e-8 + (0.43252818449517081051e-10 + 0.16692592640000000000e-12 * t) * t) * t) * t) * t) * t; + } + case 86: { + T t = 2*y100 - 173; + return 0.55712643071169299478e0 + (0.11568077107929735233e-1 + (0.13815797838036651289e-3 + (0.12456314879260904558e-5 + (0.86169898078969313597e-8 + (0.45290446811539652525e-10 + 0.17268801084444444444e-12 * t) * t) * t) * t) * t) * t; + } + case 87: { + T t = 2*y100 - 175; + return 0.58082532122519320968e0 + (0.12135935999503877077e-1 + (0.14584223996665838559e-3 + (0.13164068573095710742e-5 + (0.90803643355106020163e-8 + (0.47397540713124619155e-10 + 0.17850211608888888889e-12 * t) * t) * t) * t) * t) * t; + } + case 88: { + T t = 2*y100 - 177; + return 0.60569124025293375554e0 + (0.12735396239525550361e-1 + (0.15396244472258863344e-3 + (0.13909744385382818253e-5 + (0.95651595032306228245e-8 + (0.49574672127669041550e-10 + 0.18435945564444444444e-12 * t) * t) * t) * t) * t) * t; + } + case 89: { + T t = 2*y100 - 179; + return 0.63178916494715716894e0 + (0.13368247798287030927e-1 + (0.16254186562762076141e-3 + (0.14695084048334056083e-5 + (0.10072078109604152350e-7 + (0.51822304995680707483e-10 + 0.19025081422222222222e-12 * t) * t) * t) * t) * t) * t; + } + case 90: { + T t = 2*y100 - 181; + return 0.65918774689725319200e0 + (0.14036375850601992063e-1 + (0.17160483760259706354e-3 + (0.15521885688723188371e-5 + (0.10601827031535280590e-7 + (0.54140790105837520499e-10 + 0.19616655146666666667e-12 * t) * t) * t) * t) * t) * t; + } + case 91: { + T t = 2*y100 - 183; + return 0.68795950683174433822e0 + (0.14741765091365869084e-1 + (0.18117679143520433835e-3 + (0.16392004108230585213e-5 + (0.11155116068018043001e-7 + (0.56530360194925690374e-10 + 0.20209663662222222222e-12 * t) * t) * t) * t) * t) * t; + } + case 92: { + T t = 2*y100 - 185; + return 0.71818103808729967036e0 + (0.15486504187117112279e-1 + (0.19128428784550923217e-3 + (0.17307350969359975848e-5 + (0.11732656736113607751e-7 + (0.58991125287563833603e-10 + 0.20803065333333333333e-12 * t) * t) * t) * t) * t) * t; + } + case 93: { + T t = 2*y100 - 187; + return 0.74993321911726254661e0 + (0.16272790364044783382e-1 + (0.20195505163377912645e-3 + (0.18269894883203346953e-5 + (0.12335161021630225535e-7 + (0.61523068312169087227e-10 + 0.21395783431111111111e-12 * t) * t) * t) * t) * t) * t; + } + case 94: { + T t = 2*y100 - 189; + return 0.78330143531283492729e0 + (0.17102934132652429240e-1 + (0.21321800585063327041e-3 + (0.19281661395543913713e-5 + (0.12963340087354341574e-7 + (0.64126040998066348872e-10 + 0.21986708942222222222e-12 * t) * t) * t) * t) * t) * t; + } + case 95: { + T t = 2*y100 - 191; + return 0.81837581041023811832e0 + (0.17979364149044223802e-1 + (0.22510330592753129006e-3 + (0.20344732868018175389e-5 + (0.13617902941839949718e-7 + (0.66799760083972474642e-10 + 0.22574701262222222222e-12 * t) * t) * t) * t) * t) * t; + } + case 96: { + T t = 2*y100 - 193; + return 0.85525144775685126237e0 + (0.18904632212547561026e-1 + (0.23764237370371255638e-3 + (0.21461248251306387979e-5 + (0.14299555071870523786e-7 + (0.69543803864694171934e-10 + 0.23158593688888888889e-12 * t) * t) * t) * t) * t) * t; + } + case 97: { + T t = 2*y100 - 195; + return 0.89402868170849933734e0 + (0.19881418399127202569e-1 + (0.25086793128395995798e-3 + (0.22633402747585233180e-5 + (0.15008997042116532283e-7 + (0.72357609075043941261e-10 + 0.23737194737777777778e-12 * t) * t) * t) * t) * t) * t; + } + case 98: { + T t = 2*y100 - 197; + return 0.93481333942870796363e0 + (0.20912536329780368893e-1 + (0.26481403465998477969e-3 + (0.23863447359754921676e-5 + (0.15746923065472184451e-7 + (0.75240468141720143653e-10 + 0.24309291271111111111e-12 * t) * t) * t) * t) * t) * t; + } + case 99: { + T t = 2*y100 - 199; + return 0.97771701335885035464e0 + (0.22000938572830479551e-1 + (0.27951610702682383001e-3 + (0.25153688325245314530e-5 + (0.16514019547822821453e-7 + (0.78191526829368231251e-10 + 0.24873652355555555556e-12 * t) * t) * t) * t) * t) * t; + } + } + + // we only get here if y = 1, i.e. |x| < 4*eps, in which case + // erfcx is within 1e-15 of 1.. + return 1.; + } + + template + T erfcx(T x) { + // Short-circuits on NaN (returning NaN) + if (x != x) { + return x; + } + + if (x >= 0) { + if (x > T{50}) { // continued-fraction expansion is faster + const T ispi = 0.56418958354775628694807945156; // 1 / sqrt(pi) + + if (x > T{5e7}) { // 1-term expansion, important to avoid overflow + return ispi / x; + } + + /* 5-term expansion (rely on compiler for CSE), simplified from: + ispi / (x+0.5/(x+1/(x+1.5/(x+2/x)))) */ + return ispi * ((x*x) * (x*x+T{4.5}) + T{2}) / (x * ((x*x) * (x*x+T{5}) + T{3.75})); + } + + // x >= 0 x <= 50 + return erfcx_y100(T{400} / (T{4} + x)); + } + + // x < 0 + if (x < T{-26.7}) { + return POS_INFINITY; + } else if (x < T{-6.1}) { + return T{2} * exp(x * x); + } + + // x < 0 and x >= -6.1 + return T{2} * exp(x * x) - erfcx_y100(T{400} / (T{4} - x)); + } +); // erfcx_string + +const auto airy_ai_string = jiterator_stringify( + template + T airy_ai_forward(T x) { + static const T AN[] = { + +3.46538101525629032477e-01, + +1.20075952739645805542e+01, + +7.62796053615234516538e+01, + +1.68089224934630576269e+02, + +1.59756391350164413639e+02, + +7.05360906840444183113e+01, + +1.40264691163389668864e+01, + +9.99999999999999995305e-01, + }; + + static const T AD[] = { + +5.67594532638770212846e-01, + +1.47562562584847203173e+01, + +8.45138970141474626562e+01, + +1.77318088145400459522e+02, + +1.64234692871529701831e+02, + +7.14778400825575695274e+01, + +1.40959135607834029598e+01, + +1.00000000000000000470e+00, + }; + + static const T AFN[] = { + -1.31696323418331795333e-01, + -6.26456544431912369773e-01, + -6.93158036036933542233e-01, + -2.79779981545119124951e-01, + -4.91900132609500318020e-02, + -4.06265923594885404393e-03, + -1.59276496239262096340e-04, + -2.77649108155232920844e-06, + -1.67787698489114633780e-08, + }; + + static const T AFD[] = { + +1.33560420706553243746e+01, + +3.26825032795224613948e+01, + +2.67367040941499554804e+01, + +9.18707402907259625840e+00, + +1.47529146771666414581e+00, + +1.15687173795188044134e-01, + +4.40291641615211203805e-03, + +7.54720348287414296618e-05, + +4.51850092970580378464e-07, + }; + + static const T AGN[] = { + +1.97339932091685679179e-02, + +3.91103029615688277255e-01, + +1.06579897599595591108e+00, + +9.39169229816650230044e-01, + +3.51465656105547619242e-01, + +6.33888919628925490927e-02, + +5.85804113048388458567e-03, + +2.82851600836737019778e-04, + +6.98793669997260967291e-06, + +8.11789239554389293311e-08, + +3.41551784765923618484e-10, + }; + + static const T AGD[] = { + +9.30892908077441974853e+00, + +1.98352928718312140417e+01, + +1.55646628932864612953e+01, + +5.47686069422975497931e+00, + +9.54293611618961883998e-01, + +8.64580826352392193095e-02, + +4.12656523824222607191e-03, + +1.01259085116509135510e-04, + +1.17166733214413521882e-06, + +4.91834570062930015649e-09, + }; + + int domain_flag = 0; + + T ai; + + if (isinf(x)) { + return NAN; + } + + if (x > T(103.892)) { + return T(0.0); + } + + T f; + T g; + T k; + + if (x < T(-2.09)) { + T z = T(1.0) / (T(-2.0) * x * sqrt(-x) / T(3.0)); + + T afn = 0.0; + + for (uint8_t index = 0; index <= 8; index++) { + afn = afn * (z * z) + AFN[index]; + } + + T afd = 0.0; + + for (uint8_t index = 0; index <= 8; index++) { + afd = afd * (z * z) + AFD[index]; + } + + T agn = 0.0; + + for (uint8_t index = 0; index <= 10 + 0; index++) { + agn = agn * (z * z) + AGN[index]; + } + + T agd = 0.0; + + for (uint8_t index = 0; index <= 10 - 1; index++) { + agd = agd * (z * z) + AGD[index]; + } + + T t = T(-2.0) * x * sqrt(-x) / T(3.0) + T(0.25) * T(3.14159265358979323846); + + return T(5.64189583547756286948e-01) / sqrt(sqrt(-x)) * (sin(t) * (T(1.0) + z * z * afn / afd) - cos(t) * (z * agn / agd)); + } + + if (x >= T(2.09)) { + domain_flag = 5; + + T zeta = T(2.0) * x * sqrt(x) / T(3.0); + + T an = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + an = an * (T(1.0) / zeta) + AN[index]; + } + + T ad = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + ad = ad * (T(1.0) / zeta) + AD[index]; + } + + ai = T(5.64189583547756286948e-01) * (an / ad) / (T(2.0) * sqrt(sqrt(x)) * exp(zeta)); + + if (x > T(8.3203353)) { + return ai; + } + } + + f = 1.0; + g = x; + k = 1.0; + + T m = 1.0; + T n = x; + T t = 1.0; + T z = x * x * x; + + while (t > T(1.11022302462515654042e-16)) { + m *= z; + k += T(1.0); + m /= k; + n *= z; + k += T(1.0); + n /= k; + m /= k; + f += m; + k += T(1.0); + n /= k; + g += n; + + t = abs(m / f); + } + + if ((domain_flag & 1) == 0) { + return T(0.355028053887817239260) * f - T(0.258819403792806798405) * g; + } + + return ai; + } // T airy_ai(T x) +); // airy_ai_string + +const auto bessel_j0_string = jiterator_stringify( + template + T bessel_j0_forward(T x) { + static const T PP[] = { + +7.96936729297347051624e-04, + +8.28352392107440799803e-02, + +1.23953371646414299388e+00, + +5.44725003058768775090e+00, + +8.74716500199817011941e+00, + +5.30324038235394892183e+00, + +9.99999999999999997821e-01, + }; + + static const T PQ[] = { + +9.24408810558863637013e-04, + +8.56288474354474431428e-02, + +1.25352743901058953537e+00, + +5.47097740330417105182e+00, + +8.76190883237069594232e+00, + +5.30605288235394617618e+00, + +1.00000000000000000218e+00, + }; + + static const T QP[] = { + -1.13663838898469149931e-02, + -1.28252718670509318512e+00, + -1.95539544257735972385e+01, + -9.32060152123768231369e+01, + -1.77681167980488050595e+02, + -1.47077505154951170175e+02, + -5.14105326766599330220e+01, + -6.05014350600728481186e+00, + }; + + static const T QQ[] = { + +6.43178256118178023184e+01, + +8.56430025976980587198e+02, + +3.88240183605401609683e+03, + +7.24046774195652478189e+03, + +5.93072701187316984827e+03, + +2.06209331660327847417e+03, + +2.42005740240291393179e+02, + }; + + static const T RP[] = { + -4.79443220978201773821e+09, + +1.95617491946556577543e+12, + -2.49248344360967716204e+14, + +9.70862251047306323952e+15, + }; + + static const T RQ[] = { + +4.99563147152651017219e+02, + +1.73785401676374683123e+05, + +4.84409658339962045305e+07, + +1.11855537045356834862e+10, + +2.11277520115489217587e+12, + +3.10518229857422583814e+14, + +3.18121955943204943306e+16, + +1.71086294081043136091e+18, + }; + + if (x < T(0)) { + x = -x; + } + + if (x <= T(5.0)) { + if (x < T(0.00001)) { + return T(1.0) - x * x / T(4.0); + } + + T rp = 0.0; + + for (uint8_t index = 0; index <= 3; index++) { + rp = rp * (x * x) + RP[index]; + } + + T rq = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + rq = rq * (x * x) + RQ[index]; + } + + return (x * x - T(5.78318596294678452118e+00)) * (x * x - T(3.04712623436620863991e+01)) * rp / rq; + } + + T pp = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pp = pp * (T(25.0) / (x * x)) + PP[index]; + } + + T pq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pq = pq * (T(25.0) / (x * x)) + PQ[index]; + } + + T qp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + qp = qp * (T(25.0) / (x * x)) + QP[index]; + } + + T qq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + qq = qq * (T(25.0) / (x * x)) + QQ[index]; + } + + return (pp / pq * cos(x - T(0.785398163397448309615660845819875721)) - T(5.0) / x * (qp / qq) * sin(x - T(0.785398163397448309615660845819875721))) * T(0.797884560802865355879892119868763737) / sqrt(x); + } // bessel_j0_forward(T x) +); // bessel_j0_string + +const auto bessel_y0_string = bessel_j0_string + jiterator_stringify( + template + T bessel_y0_forward(T x) { + static const T PP[] = { + +7.96936729297347051624e-04, + +8.28352392107440799803e-02, + +1.23953371646414299388e+00, + +5.44725003058768775090e+00, + +8.74716500199817011941e+00, + +5.30324038235394892183e+00, + +9.99999999999999997821e-01, + }; + + static const T PQ[] = { + +9.24408810558863637013e-04, + +8.56288474354474431428e-02, + +1.25352743901058953537e+00, + +5.47097740330417105182e+00, + +8.76190883237069594232e+00, + +5.30605288235394617618e+00, + +1.00000000000000000218e+00, + }; + + static const T QP[] = { + -1.13663838898469149931e-02, + -1.28252718670509318512e+00, + -1.95539544257735972385e+01, + -9.32060152123768231369e+01, + -1.77681167980488050595e+02, + -1.47077505154951170175e+02, + -5.14105326766599330220e+01, + -6.05014350600728481186e+00, + }; + + static const T QQ[] = { + +6.43178256118178023184e+01, + +8.56430025976980587198e+02, + +3.88240183605401609683e+03, + +7.24046774195652478189e+03, + +5.93072701187316984827e+03, + +2.06209331660327847417e+03, + +2.42005740240291393179e+02, + }; + + static const T YP[] = { + +1.55924367855235737965e+04, + -1.46639295903971606143e+07, + +5.43526477051876500413e+09, + -9.82136065717911466409e+11, + +8.75906394395366999549e+13, + -3.46628303384729719441e+15, + +4.42733268572569800351e+16, + -1.84950800436986690637e+16, + }; + + static const T YQ[] = { + +1.04128353664259848412e+03, + +6.26107330137134956842e+05, + +2.68919633393814121987e+08, + +8.64002487103935000337e+10, + +2.02979612750105546709e+13, + +3.17157752842975028269e+15, + +2.50596256172653059228e+17, + }; + + if (x <= T(5.0)) { + if (x == T(0.0)) { + return NEG_INFINITY; + } + + if (x < T(0.0)) { + NAN; + } + + T yp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + yp = yp * (x * x) + YP[index]; + } + + T yq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + yq = yq * (x * x) + YQ[index]; + } + + return yp / yq + (T(0.636619772367581343075535053490057448) * log(x) * bessel_j0_forward(x)); + } + + T pp = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pp = pp * (T(25.0) / (x * x)) + PP[index]; + } + + T pq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pq = pq * (T(25.0) / (x * x)) + PQ[index]; + } + + T qp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + qp = qp * (T(25.0) / (x * x)) + QP[index]; + } + + T qq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + qq = qq * (T(25.0) / (x * x)) + QQ[index]; + } + + return (pp / pq * sin(x - T(0.785398163397448309615660845819875721)) + T(5.0) / x * (qp / qq) * cos(x - T(0.785398163397448309615660845819875721))) * T(0.797884560802865355879892119868763737) / sqrt(x); + } // bessel_y0_forward(T x) +); // bessel_y0_string + +const auto bessel_j1_string = jiterator_stringify( + template + T bessel_j1_forward(T x) { + static const T PP[] = { + +7.62125616208173112003e-04, + +7.31397056940917570436e-02, + +1.12719608129684925192e+00, + +5.11207951146807644818e+00, + +8.42404590141772420927e+00, + +5.21451598682361504063e+00, + +1.00000000000000000254e+00, + }; + + static const T PQ[] = { + +5.71323128072548699714e-04, + +6.88455908754495404082e-02, + +1.10514232634061696926e+00, + +5.07386386128601488557e+00, + +8.39985554327604159757e+00, + +5.20982848682361821619e+00, + +9.99999999999999997461e-01, + }; + + static const T QP[] = { + +5.10862594750176621635e-02, + +4.98213872951233449420e+00, + +7.58238284132545283818e+01, + +3.66779609360150777800e+02, + +7.10856304998926107277e+02, + +5.97489612400613639965e+02, + +2.11688757100572135698e+02, + +2.52070205858023719784e+01, + }; + + static const T QQ[] = { + +7.42373277035675149943e+01, + +1.05644886038262816351e+03, + +4.98641058337653607651e+03, + +9.56231892404756170795e+03, + +7.99704160447350683650e+03, + +2.82619278517639096600e+03, + +3.36093607810698293419e+02, + }; + + static const T RP[] = { + -8.99971225705559398224e+08, + +4.52228297998194034323e+11, + -7.27494245221818276015e+13, + +3.68295732863852883286e+15, + }; + + static const T RQ[] = { + +6.20836478118054335476e+02, + +2.56987256757748830383e+05, + +8.35146791431949253037e+07, + +2.21511595479792499675e+10, + +4.74914122079991414898e+12, + +7.84369607876235854894e+14, + +8.95222336184627338078e+16, + +5.32278620332680085395e+18, + }; + + if (x < T(0.0)) { + return -bessel_j1_forward(-x); + } + + if (x <= T(5.0)) { + T rp = 0.0; + + for (uint8_t index = 0; index <= 3; index++) { + rp = rp * (x * x) + RP[index]; + } + + T rq = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + rq = rq * (x * x) + RQ[index]; + } + + return rp / rq * x * (x * x - T(1.46819706421238932572e+01)) * (x * x - T(4.92184563216946036703e+01)); + } + + T pp = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pp = pp * (T(5.0) / x * (T(5.0) / x)) + PP[index]; + } + + T pq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pq = pq * (T(5.0) / x * (T(5.0) / x)) + PQ[index]; + } + + T qp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + qp = qp * (T(5.0) / x * (T(5.0) / x)) + QP[index]; + } + + T qq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + qq = qq * (T(5.0) / x * (T(5.0) / x)) + QQ[index]; + } + + return (pp / pq * cos(x - T(2.356194490192344928846982537459627163)) - T(5.0) / x * (qp / qq) * sin(x - T(2.356194490192344928846982537459627163))) * T(0.797884560802865355879892119868763737) / sqrt(x); + } // bessel_j1_forward(T x) +); // bessel_j1_string + +const auto bessel_y1_string = bessel_j1_string + jiterator_stringify( + template + T bessel_y1_forward(T x) { + static const T PP[] = { + +7.62125616208173112003e-04, + +7.31397056940917570436e-02, + +1.12719608129684925192e+00, + +5.11207951146807644818e+00, + +8.42404590141772420927e+00, + +5.21451598682361504063e+00, + +1.00000000000000000254e+00, + }; + + static const T PQ[] = { + +5.71323128072548699714e-04, + +6.88455908754495404082e-02, + +1.10514232634061696926e+00, + +5.07386386128601488557e+00, + +8.39985554327604159757e+00, + +5.20982848682361821619e+00, + +9.99999999999999997461e-01, + }; + + static const T QP[] = { + +5.10862594750176621635e-02, + +4.98213872951233449420e+00, + +7.58238284132545283818e+01, + +3.66779609360150777800e+02, + +7.10856304998926107277e+02, + +5.97489612400613639965e+02, + +2.11688757100572135698e+02, + +2.52070205858023719784e+01, + }; + + static const T QQ[] = { + +7.42373277035675149943e+01, + +1.05644886038262816351e+03, + +4.98641058337653607651e+03, + +9.56231892404756170795e+03, + +7.99704160447350683650e+03, + +2.82619278517639096600e+03, + +3.36093607810698293419e+02, + }; + + static const T YP[] = { + +1.26320474790178026440e+09, + -6.47355876379160291031e+11, + +1.14509511541823727583e+14, + -8.12770255501325109621e+15, + +2.02439475713594898196e+17, + -7.78877196265950026825e+17, + }; + + static const T YQ[] = { + +5.94301592346128195359e+02, + +2.35564092943068577943e+05, + +7.34811944459721705660e+07, + +1.87601316108706159478e+10, + +3.88231277496238566008e+12, + +6.20557727146953693363e+14, + +6.87141087355300489866e+16, + +3.97270608116560655612e+18, + }; + + if (x <= T(5.0)) { + if (x == T(0.0)) { + return NEG_INFINITY; + } + + if (x <= T(0.0)) { + return NAN; + } + + T yp = 0.0; + + for (uint8_t index = 0; index <= 5; index++) { + yp = yp * (x * x) + YP[index]; + } + + T yq = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + yq = yq * (x * x) + YQ[index]; + } + + return x * (yp / yq) + (T(0.636619772367581343075535053490057448) * (bessel_j1_forward(x) * log(x) - T(1.0) / x)); + } + + T pp = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pp = pp * (T(5.0) / x * (T(5.0) / x)) + PP[index]; + } + + T pq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + pq = pq * (T(5.0) / x * (T(5.0) / x)) + PQ[index]; + } + + T qp = 0.0; + + for (uint8_t index = 0; index <= 7; index++) { + qp = qp * (T(5.0) / x * (T(5.0) / x)) + QP[index]; + } + + T qq = 0.0; + + for (uint8_t index = 0; index <= 6; index++) { + qq = qq * (T(5.0) / x * (T(5.0) / x)) + QQ[index]; + } + + return (pp / pq * sin(x - T(2.356194490192344928846982537459627163)) + T(5.0) / x * (qp / qq) * cos(x - T(2.356194490192344928846982537459627163))) * T(0.797884560802865355879892119868763737) / sqrt(x); + } // bessel_y1_forward(T x) +); // bessel_y1_string + +const auto chebyshev_polynomial_t_string = jiterator_stringify( + template + T chebyshev_polynomial_t_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (abs(x) == T(1.0)) { + if (x > T(0.0) || n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if ((n > 6) && (abs(x) < T(1.0))) { + return cos(n * acos(x)); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x; + } + + T p = T(1.0); + T q = x; + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x) * q - p; + p = q; + q = r; + } + + return r; + } // chebyshev_polynomial_t_forward(T x, int64_t n) + + template + T chebyshev_polynomial_t_forward(T x, T n) { + return chebyshev_polynomial_t_forward(x, static_cast(n)); + } // chebyshev_polynomial_t_forward(T x, T n) +); // chebyshev_polynomial_t_string + +const auto chebyshev_polynomial_u_string = jiterator_stringify( + template + T chebyshev_polynomial_u_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (abs(x) == T(1.0)) { + if (x > T(0.0) || n % 2 == 0) { + return n + 1; + } + + return -(n + 1); + } + + if ((n > 8) && (abs(x) < T(1.0))) { + if (sin(acos(x)) != T(0.0)) { + return sin((n + 1) * acos(x)) / sin(acos(x)); + } + + return (n + 1) * cos((n + 1) * acos(x)) / x; + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x; + } + + T p = T(1.0); + T q = x + x; + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x) * q - p; + p = q; + q = r; + } + + return r; + } // chebyshev_polynomial_u_forward(T x, int64_t n) + + template + T chebyshev_polynomial_u_forward(T x, T n) { + return chebyshev_polynomial_u_forward(x, static_cast(n)); + } // chebyshev_polynomial_u_forward(T x, T n) +); // chebyshev_polynomial_u_string + +const auto chebyshev_polynomial_v_string = jiterator_stringify( + template + T chebyshev_polynomial_v_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (abs(x) == T(1.0)) { + if (x > T(0.0)) { + return T(1.0); + } + + if (n % 2 == 0) { + return n + n + 1; + } + + return -(n + n + 1); + } + + if ((n > 8) && (abs(x) < T(1.0))) { + if (sin(acos(x) / T(2.0)) != T(1.0)) { + return cos((n + T(0.5)) * acos(x)) / cos(acos(x) / T(2.0)); + } + + if (n % 2 == 0) { + return n + n + 1; + } + + return -(n + n + 1); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0); + } + + T p = T(1.0); + T q = x + x - T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x) * q - p; + p = q; + q = r; + } + + return r; + } // chebyshev_polynomial_v_forward(T x, int64_t n) + + template + T chebyshev_polynomial_v_forward(T x, T n) { + return chebyshev_polynomial_v_forward(x, static_cast(n)); + } // chebyshev_polynomial_v_forward(T x, T n) +); // chebyshev_polynomial_v_string + +const auto chebyshev_polynomial_w_string = jiterator_stringify( + template + T chebyshev_polynomial_w_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (abs(x) == T(1.0)) { + if (x > T(0.0)) { + return n + n + 1; + } + + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if ((n > 8) && (abs(x) < T(1.0))) { + if (cos(acos(x) / T(2.0)) != T(1.0)) { + return sin((n + T(0.5)) * acos(x)) / sin(acos(x) / T(2.0)); + } + + if (x > T(0.0)) { + return n + n + 1; + } + + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x + T(1.0); + } + + T p = T(1.0); + T q = x + x + T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x) * q - p; + p = q; + q = r; + } + + return r; + } // chebyshev_polynomial_w_forward(T x, int64_t n) + + template + T chebyshev_polynomial_w_forward(T x, T n) { + return chebyshev_polynomial_w_forward(x, static_cast(n)); + } // chebyshev_polynomial_w_forward(T x, T n) +); // chebyshev_polynomial_w_string + +const auto hermite_polynomial_h_string = jiterator_stringify( + template + T hermite_polynomial_h_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x; + } + + T p = T(1.0); + T q = x + x; + T r; + + for (int64_t k = 2; k < n + n; k += 2) { + r = (x + x) * q - k * p; + p = q; + q = r; + } + + return r; + } // hermite_polynomial_h_forward(T x, int64_t n) + + template + T hermite_polynomial_h_forward(T x, T n) { + return hermite_polynomial_h_forward(x, static_cast(n)); + } // hermite_polynomial_h_forward(T x, T n) +); // hermite_polynomial_h_string + +const auto hermite_polynomial_he_string = jiterator_stringify( + template + T hermite_polynomial_he_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x; + } + + T p = T(1.0); + T q = x; + T r; + + for (int64_t k = 1; k < n; k++) { + r = x * q - k * p; + p = q; + q = r; + } + + return r; + } // hermite_polynomial_he_forward(T x, int64_t n) + + template + T hermite_polynomial_he_forward(T x, T n) { + return hermite_polynomial_he_forward(x, static_cast(n)); + } // hermite_polynomial_he_forward(T x, T n) +); // hermite_polynomial_he_string + +const auto laguerre_polynomial_l_string = jiterator_stringify( + template + T laguerre_polynomial_l_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (abs(x) == T(0.0)) { + return T(1.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return T(1.0) - x; + } + + T p = T(1.0); + T q = T(1.0) - x; + T r; + + for (int64_t k = 1; k < n; k++) { + r = (((k + k) + (T(1.0) - x)) * q - k * p) / (k + 1); + p = q; + q = r; + } + + return r; + } // laguerre_polynomial_l_forward(T x, int64_t n) + + template + T laguerre_polynomial_l_forward(T x, T n) { + return laguerre_polynomial_l_forward(x, static_cast(n)); + } // laguerre_polynomial_l_forward(T x, T n) +); // laguerre_polynomial_l_string + +const auto legendre_polynomial_p_string = jiterator_stringify( + template + T legendre_polynomial_p_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (abs(x) == T(1.0)) { + if (x > T(0.0) || n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x; + } + + T p = T(1.0); + T q = x; + T r; + + for (int64_t k = 1; k < n; k++) { + r = ((k + k + 1) * x * q - k * p) / (k + 1); + p = q; + q = r; + } + + return r; + } // legendre_polynomial_p_forward(T x, int64_t n) + + template + T legendre_polynomial_p_forward(T x, T n) { + return legendre_polynomial_p_forward(x, static_cast(n)); + } // legendre_polynomial_p_forward(T x, T n) +); // legendre_polynomial_p_string + +const auto modified_bessel_i0_string = jiterator_stringify( + template + T modified_bessel_i0_forward(T x) { + static const T A[] = { + -4.41534164647933937950e-18, + +3.33079451882223809783e-17, + -2.43127984654795469359e-16, + +1.71539128555513303061e-15, + -1.16853328779934516808e-14, + +7.67618549860493561688e-14, + -4.85644678311192946090e-13, + +2.95505266312963983461e-12, + -1.72682629144155570723e-11, + +9.67580903537323691224e-11, + -5.18979560163526290666e-10, + +2.65982372468238665035e-09, + -1.30002500998624804212e-08, + +6.04699502254191894932e-08, + -2.67079385394061173391e-07, + +1.11738753912010371815e-06, + -4.41673835845875056359e-06, + +1.64484480707288970893e-05, + -5.75419501008210370398e-05, + +1.88502885095841655729e-04, + -5.76375574538582365885e-04, + +1.63947561694133579842e-03, + -4.32430999505057594430e-03, + +1.05464603945949983183e-02, + -2.37374148058994688156e-02, + +4.93052842396707084878e-02, + -9.49010970480476444210e-02, + +1.71620901522208775349e-01, + -3.04682672343198398683e-01, + +6.76795274409476084995e-01, + }; + + static const T B[] = { + -7.23318048787475395456e-18, + -4.83050448594418207126e-18, + +4.46562142029675999901e-17, + +3.46122286769746109310e-17, + -2.82762398051658348494e-16, + -3.42548561967721913462e-16, + +1.77256013305652638360e-15, + +3.81168066935262242075e-15, + -9.55484669882830764870e-15, + -4.15056934728722208663e-14, + +1.54008621752140982691e-14, + +3.85277838274214270114e-13, + +7.18012445138366623367e-13, + -1.79417853150680611778e-12, + -1.32158118404477131188e-11, + -3.14991652796324136454e-11, + +1.18891471078464383424e-11, + +4.94060238822496958910e-10, + +3.39623202570838634515e-09, + +2.26666899049817806459e-08, + +2.04891858946906374183e-07, + +2.89137052083475648297e-06, + +6.88975834691682398426e-05, + +3.36911647825569408990e-03, + +8.04490411014108831608e-01, + }; + + T p; + T q = 0.0; + + if (abs(x) <= T(8.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 30; index++) { + p = q; + q = a; + a = ((abs(x) / T(2.0)) - T(2.0)) * q - p + A[index]; + } + + return exp(abs(x)) * (T(0.5) * (a - p)); + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(32.0) / abs(x) - T(2.0)) * q - p + B[index]; + } + + return exp(abs(x)) * (T(0.5) * (b - p)) / sqrt(abs(x)); + } // modified_bessel_i0_forward(T x) +); // modified_bessel_i0_string + +const auto modified_bessel_i1_string = jiterator_stringify( + template + T modified_bessel_i1_forward(T x) { + static const T A[] = { + +2.77791411276104639959e-18, + -2.11142121435816608115e-17, + +1.55363195773620046921e-16, + -1.10559694773538630805e-15, + +7.60068429473540693410e-15, + -5.04218550472791168711e-14, + +3.22379336594557470981e-13, + -1.98397439776494371520e-12, + +1.17361862988909016308e-11, + -6.66348972350202774223e-11, + +3.62559028155211703701e-10, + -1.88724975172282928790e-09, + +9.38153738649577178388e-09, + -4.44505912879632808065e-08, + +2.00329475355213526229e-07, + -8.56872026469545474066e-07, + +3.47025130813767847674e-06, + -1.32731636560394358279e-05, + +4.78156510755005422638e-05, + -1.61760815825896745588e-04, + +5.12285956168575772895e-04, + -1.51357245063125314899e-03, + +4.15642294431288815669e-03, + -1.05640848946261981558e-02, + +2.47264490306265168283e-02, + -5.29459812080949914269e-02, + +1.02643658689847095384e-01, + -1.76416518357834055153e-01, + +2.52587186443633654823e-01, + }; + + static const T B[] = { + +7.51729631084210481353e-18, + +4.41434832307170791151e-18, + -4.65030536848935832153e-17, + -3.20952592199342395980e-17, + +2.96262899764595013876e-16, + +3.30820231092092828324e-16, + -1.88035477551078244854e-15, + -3.81440307243700780478e-15, + +1.04202769841288027642e-14, + +4.27244001671195135429e-14, + -2.10154184277266431302e-14, + -4.08355111109219731823e-13, + -7.19855177624590851209e-13, + +2.03562854414708950722e-12, + +1.41258074366137813316e-11, + +3.25260358301548823856e-11, + -1.89749581235054123450e-11, + -5.58974346219658380687e-10, + -3.83538038596423702205e-09, + -2.63146884688951950684e-08, + -2.51223623787020892529e-07, + -3.88256480887769039346e-06, + -1.10588938762623716291e-04, + -9.76109749136146840777e-03, + +7.78576235018280120474e-01, + }; + + T p; + T q = 0.0; + + if (abs(x) <= T(8.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 29; index++) { + p = q; + q = a; + a = ((abs(x) / T(2.0)) - T(2.0)) * q - p + A[index]; + } + + if (x < T(0.0)) { + return -(T(0.5) * (a - p) * abs(x) * exp(abs(x))); + } + + return T(0.5) * (a - p) * abs(x) * exp(abs(x)); + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(32.0) / abs(x) - T(2.0)) * q - p + B[index]; + } + + if (x < T(0.0)) { + return -(exp(abs(x)) * (T(0.5) * (b - p)) / sqrt(abs(x))); + } + + return exp(abs(x)) * (T(0.5) * (b - p)) / sqrt(abs(x)); + } // modified_bessel_i1_forward(T x) +); // modified_bessel_i1_string + +const auto modified_bessel_k0_string = modified_bessel_i0_string + jiterator_stringify( + template + T modified_bessel_k0_forward(T x) { + static const T A[] = { + +1.37446543561352307156e-16, + +4.25981614279661018399e-14, + +1.03496952576338420167e-11, + +1.90451637722020886025e-09, + +2.53479107902614945675e-07, + +2.28621210311945178607e-05, + +1.26461541144692592338e-03, + +3.59799365153615016266e-02, + +3.44289899924628486886e-01, + -5.35327393233902768720e-01, + }; + + static const T B[] = { + +5.30043377268626276149e-18, + -1.64758043015242134646e-17, + +5.21039150503902756861e-17, + -1.67823109680541210385e-16, + +5.51205597852431940784e-16, + -1.84859337734377901440e-15, + +6.34007647740507060557e-15, + -2.22751332699166985548e-14, + +8.03289077536357521100e-14, + -2.98009692317273043925e-13, + +1.14034058820847496303e-12, + -4.51459788337394416547e-12, + +1.85594911495471785253e-11, + -7.95748924447710747776e-11, + +3.57739728140030116597e-10, + -1.69753450938905987466e-09, + +8.57403401741422608519e-09, + -4.66048989768794782956e-08, + +2.76681363944501510342e-07, + -1.83175552271911948767e-06, + +1.39498137188764993662e-05, + -1.28495495816278026384e-04, + +1.56988388573005337491e-03, + -3.14481013119645005427e-02, + +2.44030308206595545468e+00, + }; + + if (x == T(0.0)) { + return INFINITY; + } + + if (x < T(0.0)) { + return NAN; + } + + T p; + T q = 0.0; + + if (x <= T(2.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 10; index++) { + p = q; + q = a; + a = (x * x - T(2.0)) * q - p + A[index]; + } + + return T(0.5) * (a - p) - log(0.5 * x) * modified_bessel_i0_forward(x); + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(8.0) / x - T(2.0)) * q - p + B[index]; + } + + return exp(-x) * (T(0.5) * (b - p)) / sqrt(x); + } // modified_bessel_k0_forward(T x) +); // modified_bessel_k0_string + +const auto scaled_modified_bessel_k0_string = modified_bessel_i0_string + jiterator_stringify( + template + T scaled_modified_bessel_k0_forward(T x) { + static const T A[] = { + +1.37446543561352307156e-16, + +4.25981614279661018399e-14, + +1.03496952576338420167e-11, + +1.90451637722020886025e-09, + +2.53479107902614945675e-07, + +2.28621210311945178607e-05, + +1.26461541144692592338e-03, + +3.59799365153615016266e-02, + +3.44289899924628486886e-01, + -5.35327393233902768720e-01, + }; + + static const T B[] = { + +5.30043377268626276149e-18, + -1.64758043015242134646e-17, + +5.21039150503902756861e-17, + -1.67823109680541210385e-16, + +5.51205597852431940784e-16, + -1.84859337734377901440e-15, + +6.34007647740507060557e-15, + -2.22751332699166985548e-14, + +8.03289077536357521100e-14, + -2.98009692317273043925e-13, + +1.14034058820847496303e-12, + -4.51459788337394416547e-12, + +1.85594911495471785253e-11, + -7.95748924447710747776e-11, + +3.57739728140030116597e-10, + -1.69753450938905987466e-09, + +8.57403401741422608519e-09, + -4.66048989768794782956e-08, + +2.76681363944501510342e-07, + -1.83175552271911948767e-06, + +1.39498137188764993662e-05, + -1.28495495816278026384e-04, + +1.56988388573005337491e-03, + -3.14481013119645005427e-02, + +2.44030308206595545468e+00, + }; + + if (x == T(0.0)) { + return INFINITY; + } + + if (x < T(0.0)) { + return NAN; + } + + T p; + T q = 0.0; + + if (x <= T(2.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 10; index++) { + p = q; + q = a; + a = (x * x - T(2.0)) * q - p + A[index]; + } + + return (T(0.5) * (a - p) - log(T(0.5) * x) * modified_bessel_i0_forward(x)) * exp(x); + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(8.0) / x - T(2.0)) * q - p + B[index]; + } + + return T(0.5) * (b - p) / sqrt(x); + } // T scaled_modified_bessel_k0_forward(T x) +); // scaled_modified_bessel_k0_string + +const auto modified_bessel_k1_string = modified_bessel_i1_string + jiterator_stringify( + template + T modified_bessel_k1_forward(T x) { + static const T A[] = { + -7.02386347938628759343e-18, + -2.42744985051936593393e-15, + -6.66690169419932900609e-13, + -1.41148839263352776110e-10, + -2.21338763073472585583e-08, + -2.43340614156596823496e-06, + -1.73028895751305206302e-04, + -6.97572385963986435018e-03, + -1.22611180822657148235e-01, + -3.53155960776544875667e-01, + +1.52530022733894777053e+00, + }; + + static const T B[] = { + -5.75674448366501715755e-18, + +1.79405087314755922667e-17, + -5.68946255844285935196e-17, + +1.83809354436663880070e-16, + -6.05704724837331885336e-16, + +2.03870316562433424052e-15, + -7.01983709041831346144e-15, + +2.47715442448130437068e-14, + -8.97670518232499435011e-14, + +3.34841966607842919884e-13, + -1.28917396095102890680e-12, + +5.13963967348173025100e-12, + -2.12996783842756842877e-11, + +9.21831518760500529508e-11, + -4.19035475934189648750e-10, + +2.01504975519703286596e-09, + -1.03457624656780970260e-08, + +5.74108412545004946722e-08, + -3.50196060308781257119e-07, + +2.40648494783721712015e-06, + -1.93619797416608296024e-05, + +1.95215518471351631108e-04, + -2.85781685962277938680e-03, + +1.03923736576817238437e-01, + +2.72062619048444266945e+00, + }; + + if (x == T(0.0)) { + return INFINITY; + } + + if (x < T(0.0)) { + return NAN; + } + + T p; + T q = 0.0; + + if (x <= T(2.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 11; index++) { + p = q; + q = a; + a = (x * x - T(2.0)) * q - p + A[index]; + } + + return log(T(0.5) * x) * modified_bessel_i1_forward(x) + T(0.5) * (a - p) / x; + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(8.0) / x - T(2.0)) * q - p + B[index]; + } + + return exp(-x) * (T(0.5) * (b - p)) / sqrt(x); + } // modified_bessel_k1_forward(T x) +); // modified_bessel_k1_string + +const auto scaled_modified_bessel_k1_string = modified_bessel_i1_string + jiterator_stringify( + template + T scaled_modified_bessel_k1_forward(T x) { + static const T A[] = { + -7.02386347938628759343e-18, + -2.42744985051936593393e-15, + -6.66690169419932900609e-13, + -1.41148839263352776110e-10, + -2.21338763073472585583e-08, + -2.43340614156596823496e-06, + -1.73028895751305206302e-04, + -6.97572385963986435018e-03, + -1.22611180822657148235e-01, + -3.53155960776544875667e-01, + +1.52530022733894777053e+00, + }; + + static const T B[] = { + -5.75674448366501715755e-18, + +1.79405087314755922667e-17, + -5.68946255844285935196e-17, + +1.83809354436663880070e-16, + -6.05704724837331885336e-16, + +2.03870316562433424052e-15, + -7.01983709041831346144e-15, + +2.47715442448130437068e-14, + -8.97670518232499435011e-14, + +3.34841966607842919884e-13, + -1.28917396095102890680e-12, + +5.13963967348173025100e-12, + -2.12996783842756842877e-11, + +9.21831518760500529508e-11, + -4.19035475934189648750e-10, + +2.01504975519703286596e-09, + -1.03457624656780970260e-08, + +5.74108412545004946722e-08, + -3.50196060308781257119e-07, + +2.40648494783721712015e-06, + -1.93619797416608296024e-05, + +1.95215518471351631108e-04, + -2.85781685962277938680e-03, + +1.03923736576817238437e-01, + +2.72062619048444266945e+00, + }; + + if (x == T(0.0)) { + return INFINITY; + } + + if (x < T(0.0)) { + return NAN; + } + + T p; + T q = 0.0; + + if (x <= T(2.0)) { + T a = A[0]; + + for (uint8_t index = 1; index < 11; index++) { + p = q; + q = a; + a = (x * x - T(2.0)) * q - p + A[index]; + } + + return (log(T(0.5) * x) * modified_bessel_i1_forward(x) + T(0.5) * (a - p) / x) * exp(x); + } + + T b = B[0]; + + for (uint8_t index = 1; index < 25; index++) { + p = q; + q = b; + b = (T(8.0) / x - T(2.0)) * q - p + B[index]; + } + + return (T(0.5) * (b - p) / sqrt(x)); + } // T scaled_modified_bessel_k1_forward(T x) +); // scaled_modified_bessel_k1_string + +const auto shifted_chebyshev_polynomial_t_string = jiterator_stringify( + template + T shifted_chebyshev_polynomial_t_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (x == T(1.0)) { + return T(1.0); + } + + if (x == T(0.0)) { + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if ((n > 6) && (abs(x + x - T(1.0)) < T(1.0))) { + return cos(n * acos(x + x - T(1.0))); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0); + } + + T p = T(1.0); + T q = x + x - T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x - T(1.0) + (x + x - T(1.0))) * q - p; + p = q; + q = r; + } + + return r; + } // shifted_chebyshev_polynomial_t_forward(T x, int64_t n) + + template + T shifted_chebyshev_polynomial_t_forward(T x, T n) { + return shifted_chebyshev_polynomial_t_forward(x, static_cast(n)); + } // shifted_chebyshev_polynomial_t_forward(T x, T n) +); // shifted_chebyshev_polynomial_t_string + +const auto shifted_chebyshev_polynomial_u_string = jiterator_stringify( + template + T shifted_chebyshev_polynomial_u_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (x == T(1.0)) { + return n + 1; + } + + if (x == T(0.0)) { + if (n % 2 == 0) { + return n + 1; + } + + return -(n + 1); + } + + if ((n > 6) && (abs(x + x - T(1.0)) < T(1.0))) { + if (sin(acos(x + x - T(1.0))) != T(0.0)) { + return sin((n + 1) * acos(x + x - T(1.0))) / sin(acos(x + x - T(1.0))); + } + + return (n + 1) * cos((n + 1) * acos(x + x - T(1.0))) / (x + x - T(1.0)); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0) + (x + x - T(1.0)); + } + + T p = T(1.0); + T q = x + x - T(1.0) + (x + x - T(1.0)); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x - T(1.0) + (x + x - T(1.0))) * q - p; + p = q; + q = r; + } + + return r; + } // shifted_chebyshev_polynomial_u_forward(T x, int64_t n) + + template + T shifted_chebyshev_polynomial_u_forward(T x, T n) { + return shifted_chebyshev_polynomial_u_forward(x, static_cast(n)); + } // shifted_chebyshev_polynomial_u_forward(T x, T n) +); // shifted_chebyshev_polynomial_u_string + +const auto shifted_chebyshev_polynomial_v_string = jiterator_stringify( + template + T shifted_chebyshev_polynomial_v_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (x == T(1.0)) { + return T(1.0); + } + + if (x == T(0.0)) { + if (n % 2 == 0) { + return (n + n + 1); + } + + return -(n + n + 1); + } + + if ((n > 6) && (abs(x + x - T(1.0)) < T(1.0))) { + if (sin(acos(x + x - T(1.0)) / T(2.0)) != T(1.0)) { + return cos(((n) + T(0.5)) * acos(x + x - T(1.0))) / cos(acos(x + x - T(1.0)) / T(2.0)); + } + + if (n % 2 == 0) { + return n + n + 1; + } + + return -(n + n + 1); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0) + (x + x - T(1.0)) - T(1.0); + } + + T p = T(1.0); + T q = x + x - T(1.0) + (x + x - T(1.0)) - T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x - T(1.0) + (x + x - T(1.0))) * q - p; + p = q; + q = r; + } + + return r; + } // shifted_chebyshev_polynomial_v_forward(T x, int64_t n) + + template + T shifted_chebyshev_polynomial_v_forward(T x, T n) { + return shifted_chebyshev_polynomial_v_forward(x, static_cast(n)); + } // shifted_chebyshev_polynomial_v_forward(T x, T n) +); // shifted_chebyshev_polynomial_v_string + +const auto shifted_chebyshev_polynomial_w_string = jiterator_stringify( + template + T shifted_chebyshev_polynomial_w_forward(T x, int64_t n) { + if (n < 0) { + return T(0.0); + } + + if (x == T(1.0)) { + return n + n + 1; + } + + if (x == T(0.0)) { + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if ((n > 4) && (abs(x + x - T(1.0)) < T(1.0))) { + if (cos(acos(x + x - T(1.0)) / T(2.0)) != T(1.0)) { + return sin((n + T(0.5)) * acos(x + x - T(1.0))) / sin(acos(x + x - T(1.0)) / T(2.0)); + } + + if (n % 2 == 0) { + return T(1.0); + } + + return T(-1.0); + } + + if (n == 0) { + return T(1.0); + } + + if (n == 1) { + return x + x - T(1.0) + (x + x - T(1.0)) + T(1.0); + } + + T p = T(1.0); + T q = x + x - T(1.0) + (x + x - T(1.0)) + T(1.0); + T r; + + for (int64_t k = 2; k <= n; k++) { + r = (x + x - T(1.0) + (x + x - T(1.0))) * q - p; + p = q; + q = r; + } + + return r; + } // shifted_chebyshev_polynomial_w_forward(T x, int64_t n) + + template + T shifted_chebyshev_polynomial_w_forward(T x, T n) { + return shifted_chebyshev_polynomial_w_forward(x, static_cast(n)); + } // shifted_chebyshev_polynomial_w_forward(T x, T n) +); // shifted_chebyshev_polynomial_w_string + +const auto spherical_bessel_j0_string = jiterator_stringify( + template + T spherical_bessel_j0_forward(T x) { + if (isinf(x)) { + return T(0.0); + } + + if (abs(x) < T(0.5)) { + return T(1.0) + x * x * (T(-1.0) / T(6.0) + x * x * (T(1.0) / T(120.0) + x * x * (T(-1.0) / T(5040.0) + x * x * (T(1.0) / T(362880.0) + x * x * (T(-1.0) / T(39916800.0) + x * x * (T(1.0) / T(6227020800.0))))))); + } + + return sin(x) / x; + } // T spherical_bessel_j0_forward(T x) +); // spherical_bessel_j0_string + +#else // !AT_USE_JITERATOR() -- kernels must be precompiled + +template +static inline C10_HOST_DEVICE scalar_t calc_gcd(scalar_t a_in, scalar_t b_in) { + scalar_t a = ::abs(a_in); + scalar_t b = ::abs(b_in); + while (a != 0) { + scalar_t c = a; + a = b % a; + b = c; + } + return b; +} + +/* + * For licensing information, please refer to the the cpu implementation located in "ATen/native/Math.h". + */ +template +static inline C10_HOST_DEVICE scalar_t calc_digamma(scalar_t in) { + // [C++ Standard Reference: Gamma Function] https://en.cppreference.com/w/cpp/numeric/math/tgamma + using accscalar_t = at::acc_type; + static const double PI_f64 = 3.14159265358979323846; + const accscalar_t PSI_10 = 2.25175258906672110764; + const accscalar_t A[] = { + 8.33333333333333333333E-2, + -2.10927960927960927961E-2, + 7.57575757575757575758E-3, + -4.16666666666666666667E-3, + 3.96825396825396825397E-3, + -8.33333333333333333333E-3, + 8.33333333333333333333E-2, + }; + + accscalar_t x = static_cast(in); + if (x == 0) { + // As per C++ standard for gamma related functions and SciPy, + // If the argument is ±0, ±∞ is returned + return std::copysign(static_cast(INFINITY), -x); + } + + bool x_is_integer = x == ::trunc(x); + accscalar_t result = 0; + if (x < 0) { + if (x_is_integer) { + // As per C++ standard for gamma related functions and SciPy, + // If the argument is a negative integer, NaN is returned + return static_cast(NAN); + } + // Extracts the fractional part of x as r, since tan(pi * r) is more numerically + // accurate than tan(pi * x). While these operations are mathematically equivalent + // since both x and r are in radians and tan() has a periodicity of pi, in practice + // the computation of pi * x is a source of error (when |x| > 1). + double q, r; + r = ::modf(static_cast(x), &q); + result = static_cast(- PI_f64 / ::tan(PI_f64 * r)); + x = 1 - x; + } + + while (x < 10) { + result -= 1 / x; + x += 1; + } + if (x == 10) { + return static_cast(result + PSI_10); + } + + accscalar_t y = 0; + if (x < 1.0e17) { + accscalar_t z = 1 / (x * x); + + accscalar_t polevl_result = 0; + for (int i = 0; i <= 6; i++) { + polevl_result = polevl_result * z + A[i]; + } + y = z * polevl_result; + } + + return static_cast(::log(x) - (static_cast(0.5) / x) - y + result); +} + +template +static inline C10_HOST_DEVICE scalar_t calc_trigamma(scalar_t in) { + using accscalar_t = at::acc_type; + const accscalar_t PI = 3.14159265358979323846; + accscalar_t x = static_cast(in); + accscalar_t sign = +1; + accscalar_t result = 0; + if (x < 0.5f) { + sign = -1; + accscalar_t sin_pi_x = ::sin(PI * x); + result -= (PI * PI) / (sin_pi_x * sin_pi_x); + x = 1 - x; + } + for (int i = 0; i < 6; ++i) { + result += 1 / (x * x); + x += 1; + } + const accscalar_t one = static_cast(1); + const accscalar_t ixx = 1 / (x*x); + result += (1 + 1 / (2*x) + ixx * (one/6 - ixx * (one/30 - ixx * (one/42)))) / x; + return static_cast(sign * result); +} + +/* + * For licensing information and documentation, please refer to the the cpu implementation located in "ATen/native/Math.h". + */ +template +static inline C10_HOST_DEVICE scalar_t +chbevl(scalar_t _x, const scalar_t array[], size_t len) { + static_assert(!std::is_same() && !std::is_same(), "don't instantiate with low precision type"); + + scalar_t b0, b1, b2; + + b0 = array[0]; + b1 = 0; + + for (size_t i = 1; i < len; ++i) { + b2 = b1; + b1 = b0; + b0 = _x * b1 - b2 + array[i]; + } + + return (0.5 * (b0 - b2)); +} + +/* + * For licensing information and documentation, please refer to the the cpu implementation located in "ATen/native/Math.h". + */ +template +C10_HOST_DEVICE inline std::tuple chebyshev_coefficients_i0e_A() { + /* Chebyshev coefficients for exp(-x) I0(x) + * in the interval [0,8]. + * + * lim(x->0){ exp(-x) I0(x) } = 1. + */ + static const T coefficients[] = { + -4.41534164647933937950E-18, 3.33079451882223809783E-17, + -2.43127984654795469359E-16, 1.71539128555513303061E-15, + -1.16853328779934516808E-14, 7.67618549860493561688E-14, + -4.85644678311192946090E-13, 2.95505266312963983461E-12, + -1.72682629144155570723E-11, 9.67580903537323691224E-11, + -5.18979560163526290666E-10, 2.65982372468238665035E-9, + -1.30002500998624804212E-8, 6.04699502254191894932E-8, + -2.67079385394061173391E-7, 1.11738753912010371815E-6, + -4.41673835845875056359E-6, 1.64484480707288970893E-5, + -5.75419501008210370398E-5, 1.88502885095841655729E-4, + -5.76375574538582365885E-4, 1.63947561694133579842E-3, + -4.32430999505057594430E-3, 1.05464603945949983183E-2, + -2.37374148058994688156E-2, 4.93052842396707084878E-2, + -9.49010970480476444210E-2, 1.71620901522208775349E-1, + -3.04682672343198398683E-1, 6.76795274409476084995E-1}; + + return std::make_tuple(coefficients, 30); +} + +template +C10_HOST_DEVICE inline std::tuple chebyshev_coefficients_i0e_B() { + /* Chebyshev coefficients for exp(-x) sqrt(x) I0(x) + * in the inverted interval [8,infinity]. + * + * lim(x->inf){ exp(-x) sqrt(x) I0(x) } = 1/sqrt(2pi). + */ + static const T coefficients[] = { + -7.23318048787475395456E-18, -4.83050448594418207126E-18, + 4.46562142029675999901E-17, 3.46122286769746109310E-17, + -2.82762398051658348494E-16, -3.42548561967721913462E-16, + 1.77256013305652638360E-15, 3.81168066935262242075E-15, + -9.55484669882830764870E-15, -4.15056934728722208663E-14, + 1.54008621752140982691E-14, 3.85277838274214270114E-13, + 7.18012445138366623367E-13, -1.79417853150680611778E-12, + -1.32158118404477131188E-11, -3.14991652796324136454E-11, + 1.18891471078464383424E-11, 4.94060238822496958910E-10, + 3.39623202570838634515E-9, 2.26666899049817806459E-8, + 2.04891858946906374183E-7, 2.89137052083475648297E-6, + 6.88975834691682398426E-5, 3.36911647825569408990E-3, + 8.04490411014108831608E-1}; + + return std::make_tuple(coefficients, 25); +} + +template +static inline C10_HOST_DEVICE scalar_t calc_i0(scalar_t _x) { + static_assert(!std::is_same() && !std::is_same(), "don't instantiate with low precision type"); + // Upcast input for numerical accuracy purposes + // Needed for accurate results if input is bfloat16 or float16 + scalar_t x = ::abs(_x); + + if (x <= scalar_t{8.0}) { + auto coeff_pair = chebyshev_coefficients_i0e_A(); + auto A = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + scalar_t y = (x / scalar_t{2.0}) - scalar_t{2.0}; + return (::exp(x) * chbevl(y, A, len)); + } + + auto coeff_pair = chebyshev_coefficients_i0e_B(); + auto B = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + return (::exp(x) * chbevl(scalar_t{32.0} / x - scalar_t{2.0}, B, len) / ::sqrt(x)); +} + +template +C10_HOST_DEVICE inline + typename std::enable_if::value, std::tuple>::type + chebyshev_coefficients_i1e_A() { + /* Chebyshev coefficients for exp(-x) I1(x) + * in the interval [0,8]. + * + * lim(x->0){ exp(-x) I1(x) / x } = 1/2. + */ + static const T coefficients[] = { + 2.77791411276104639959E-18, -2.11142121435816608115E-17, + 1.55363195773620046921E-16, -1.10559694773538630805E-15, + 7.60068429473540693410E-15, -5.04218550472791168711E-14, + 3.22379336594557470981E-13, -1.98397439776494371520E-12, + 1.17361862988909016308E-11, -6.66348972350202774223E-11, + 3.62559028155211703701E-10, -1.88724975172282928790E-9, + 9.38153738649577178388E-9, -4.44505912879632808065E-8, + 2.00329475355213526229E-7, -8.56872026469545474066E-7, + 3.47025130813767847674E-6, -1.32731636560394358279E-5, + 4.78156510755005422638E-5, -1.61760815825896745588E-4, + 5.12285956168575772895E-4, -1.51357245063125314899E-3, + 4.15642294431288815669E-3, -1.05640848946261981558E-2, + 2.47264490306265168283E-2, -5.29459812080949914269E-2, + 1.02643658689847095384E-1, -1.76416518357834055153E-1, + 2.52587186443633654823E-1}; + + return std::make_tuple(coefficients, 29); +} + +template +C10_HOST_DEVICE inline + typename std::enable_if::value, std::tuple>::type + chebyshev_coefficients_i1e_A() { + /* Chebyshev coefficients for exp(-x) I1(x) + * in the interval [0,8]. + * + * lim(x->0){ exp(-x) I1(x) / x } = 1/2. + */ + static const T coeff[] = { + 9.38153738649577178388E-9f, + -4.44505912879632808065E-8f, + 2.00329475355213526229E-7f, + -8.56872026469545474066E-7f, + 3.47025130813767847674E-6f, + -1.32731636560394358279E-5f, + 4.78156510755005422638E-5f, + -1.61760815825896745588E-4f, + 5.12285956168575772895E-4f, + -1.51357245063125314899E-3f, + 4.15642294431288815669E-3f, + -1.05640848946261981558E-2f, + 2.47264490306265168283E-2f, + -5.29459812080949914269E-2f, + 1.02643658689847095384E-1f, + -1.76416518357834055153E-1f, + 2.52587186443633654823E-1f}; + return std::make_tuple(coeff, 17); +}; + +template +C10_HOST_DEVICE inline + typename std::enable_if::value, std::tuple>::type + chebyshev_coefficients_i1e_B() { + /* Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + * in the inverted interval [8,infinity]. + * + * lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + */ + static const T coefficients[] = { + 7.51729631084210481353E-18, 4.41434832307170791151E-18, + -4.65030536848935832153E-17, -3.20952592199342395980E-17, + 2.96262899764595013876E-16, 3.30820231092092828324E-16, + -1.88035477551078244854E-15, -3.81440307243700780478E-15, + 1.04202769841288027642E-14, 4.27244001671195135429E-14, + -2.10154184277266431302E-14, -4.08355111109219731823E-13, + -7.19855177624590851209E-13, 2.03562854414708950722E-12, + 1.41258074366137813316E-11, 3.25260358301548823856E-11, + -1.89749581235054123450E-11, -5.58974346219658380687E-10, + -3.83538038596423702205E-9, -2.63146884688951950684E-8, + -2.51223623787020892529E-7, -3.88256480887769039346E-6, + -1.10588938762623716291E-4, -9.76109749136146840777E-3, + 7.78576235018280120474E-1}; + + return std::make_tuple(coefficients, 25); +} + +template +C10_HOST_DEVICE inline + typename std::enable_if::value, std::tuple>::type + chebyshev_coefficients_i1e_B() { + /* Chebyshev coefficients for exp(-x) sqrt(x) I1(x) + * in the inverted interval [8,infinity]. + * + * lim(x->inf){ exp(-x) sqrt(x) I1(x) } = 1/sqrt(2pi). + */ + static const T coeff[] = { + -3.83538038596423702205E-9f, + -2.63146884688951950684E-8f, + -2.51223623787020892529E-7f, + -3.88256480887769039346E-6f, + -1.10588938762623716291E-4f, + -9.76109749136146840777E-3f, + 7.78576235018280120474E-1f}; + + return std::make_tuple(coeff, 7); +}; + +template +static inline C10_HOST_DEVICE scalar_t calc_i1(scalar_t _x) { + const auto x = ::abs(_x); + if (x <= scalar_t{8.0}) { + auto coeff_pair = chebyshev_coefficients_i1e_A(); + auto A = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + scalar_t y = x / scalar_t{2.0} - scalar_t{2.0}; + const scalar_t out = ::exp(x) * x * chbevl(y, A, len); + return (_x < scalar_t{0.0}) ? -out : out; + } + + auto coeff_pair = chebyshev_coefficients_i1e_B(); + auto B = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + const scalar_t out = (::exp(x) * chbevl(scalar_t{32.0} / x - scalar_t{2.0}, B, len)) / ::sqrt(x); + return (_x < scalar_t{0.0}) ? -out : out; +} + +template +static inline C10_HOST_DEVICE scalar_t calc_i1e(scalar_t _x) { + const auto x = ::abs(_x); + if (x <= scalar_t{8.0}) { + auto coeff_pair = chebyshev_coefficients_i1e_A(); + auto A = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + const scalar_t y = x / scalar_t{2.0} - scalar_t{2.0}; + const scalar_t out = chbevl(y, A, len) * x; + return (_x < scalar_t{0.0}) ? -out : out; + } + + auto coeff_pair = chebyshev_coefficients_i1e_B(); + auto B = std::get<0>(coeff_pair); + auto len = std::get<1>(coeff_pair); + const scalar_t out = chbevl(scalar_t{32.0} / x - scalar_t{2.0}, B, len) / ::sqrt(x); + return (_x < scalar_t{0.0}) ? -out : out; +} + +#endif // AT_USE_JITERATOR() (this closes the "else" branch of a if/else preprocessor directive) + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cuda/MemoryAccess.cuh b/voice_bridge/torch/include/ATen/native/cuda/MemoryAccess.cuh new file mode 100644 index 0000000000000000000000000000000000000000..355db3439d07b82a420d7a417a131963f473eb90 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/MemoryAccess.cuh @@ -0,0 +1,385 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +// References: +// https://devblogs.nvidia.com/cuda-pro-tip-increase-performance-with-vectorized-memory-access/ + +namespace at { namespace native { namespace memory { + +namespace detail { + +// What does the `static_unroll` do? +// +// We want to do something like: +// +// using args_t = typename traits::ArgsTuple; +// args_t args; +// #pragma unroll +// for (int i = 0; i < traits::arity; i++) { +// std::get(args) = .... +// } +// +// but unfortunately the above code does not work because +// the template argument has to be a compile time constant +// so `static_unroll` is created to simulate `#pragma unroll` +// using template metaprogramming. + +template typename func, int end, int current=0> +struct static_unroll { + template + static inline C10_HOST_DEVICE void with_args(Args&&... args) { + func::apply(std::forward(args)...); + static_unroll::with_args(args...); + } +}; + +template typename func, int end> +struct static_unroll { + template + static inline C10_HOST_DEVICE void with_args(Args... args) {} +}; + +// helper structs to be used with static_unroll to load arguments +// one by one + +template +struct vectorized_load_helper { + template + static __device__ void apply(policy_t &self, args_t *args, int idx) { + using arg_t = std::tuple_element_t; + // `data` hold the data_ptr for tensors [output, input0, input1, ...], so we + // need a +1 offset to get the input + auto ptr = reinterpret_cast(self.data[arg_index + 1]) + block_work_size() * idx; + auto args_accessor = [&args] __device__ (int thread_unroll_idx) -> arg_t & { return std::get(args[thread_unroll_idx]); }; + self.load_single_arg(args_accessor, ptr); + } +}; + +template +struct unroll_load_helper { + template + static __device__ void apply(policy_t &self, args_t *args, offset_t offset, loader_t loader, int j, int num_outputs) { + using arg_t = std::tuple_element_t; + // `data` hold the data_ptr for tensors [output, input0, input1, ...], so we + // need a +1 offset to get the input + std::get(args[j]) = loader.template load(self.data[arg_index + num_outputs], offset[arg_index], arg_index); + } +}; + +template +struct multi_outputs_store_helper { + template + C10_HOST_DEVICE static void apply( + at::detail::Array data, + at::detail::Array offsets, + thrust::tuple ret) { + using T = typename thrust::tuple_element>::type; + T *to = reinterpret_cast(data[current]) + offsets[current]; + *to = thrust::get(ret); + } +}; + +} // namespace detail + +struct LoadWithoutCast { + template + __device__ scalar_t load(char *base_ptr, uint32_t offset, int arg) { + return c10::load(reinterpret_cast(base_ptr) + offset); + } +}; + +template +struct LoadWithCast { + using array_t = at::detail::Array(N, 1)>; + using size_array_t = at::detail::Array(N, 1)>; + + array_t dtypes; + size_array_t element_sizes; + + LoadWithCast(const TensorIteratorBase& iter) { + assert(iter.ninputs() == N); + #pragma unroll + for (auto i = 0; i < N; ++i) { + this->dtypes[i] = iter.dtype(i + iter.noutputs()); + element_sizes[i] = c10::elementSize(iter.dtype(i + iter.noutputs())); + } + } + + template + __device__ scalar_t load(char *base_ptr, uint32_t offset, int arg) { + void *ptr = base_ptr + element_sizes[arg] * offset; + return c10::fetch_and_cast(dtypes[arg], ptr); + } +}; + +struct StoreWithoutCast { + template + __device__ void store(scalar_t value, char *base_ptr, uint32_t offset, int arg = 0) { + *(reinterpret_cast(base_ptr) + offset) = value; + } +}; + +template +struct StoreWithCast { + using array_t = at::detail::Array(N, 1)>; + using size_array_t = at::detail::Array(N, 1)>; + + array_t dtypes; + size_array_t element_sizes; + + StoreWithCast(const TensorIteratorBase& iter) { + assert(iter.noutputs() == N); + #pragma unroll + for (auto i = 0; i < N; ++i) { + this->dtypes[i] = iter.dtype(i); + element_sizes[i] = c10::elementSize(iter.dtype(i)); + } + } + + template + __device__ void store(scalar_t value, char *base_ptr, uint32_t offset, int arg = 0) { + void *ptr = base_ptr + element_sizes[arg] * offset; + c10::cast_and_store(dtypes[arg], ptr, value); + } +}; + +// aligned vector generates vectorized load/store on CUDA +template +struct alignas(sizeof(scalar_t) * vec_size) aligned_vector { + scalar_t val[vec_size]; +}; + +template +__device__ aligned_vector load_vector(const scalar_t *base_ptr, uint32_t offset) { + using vec_t = aligned_vector; + auto *from = reinterpret_cast(base_ptr); + return from[offset]; +} + +template +__device__ aligned_vector load_vector(const bool *base_ptr, uint32_t offset) { + // See NOTE [Loading boolean values] + auto tmp = load_vector(reinterpret_cast(base_ptr), offset); + aligned_vector ret; + for (int i = 0; i < vec_size; ++i) { + ret.val[i] = bool(tmp.val[i]); + } + return ret; +} + +namespace policies { + +// Assumption: +// all tensors are contiguous, that is: stride == sizeof(type) for all tensors +template +struct unroll { + + data_t data; + int remaining; + inp_calc_t input_offset_calculator; + out_calc_t output_offset_calculator; + loader_t loader; + storer_t storer; + + __device__ unroll(data_t data, int remaining, inp_calc_t ic, out_calc_t oc, loader_t l, storer_t s): + data(data), remaining(remaining), input_offset_calculator(ic), output_offset_calculator(oc), loader(l), storer(s) {} + + __device__ inline bool check_inbounds(int thread_work_elem) { + return ((threadIdx.x + thread_work_elem*num_threads()) < remaining); + } + + template + __device__ inline void load(args_t *args, int idx) { + constexpr int arity = std::tuple_size::value; + int thread_idx = threadIdx.x; + #pragma unroll + for (int i = 0; i < thread_work_size(); i++) { + if (thread_idx >= remaining) { + return; + } + int linear_idx = thread_idx + block_work_size() * idx; + auto offset = input_offset_calculator.get(linear_idx); + detail::static_unroll::with_args(*this, args, offset, loader, i, num_outputs); + thread_idx += num_threads(); + } + } + + template + __device__ inline void store(scalar_t *from, int idx) { + int thread_idx = threadIdx.x; + scalar_t *to = reinterpret_cast(data[0]) + block_work_size() * idx; + #pragma unroll + for (int i = 0; i < thread_work_size(); i++) { + if (thread_idx >= remaining) { + return; + } + int linear_idx = thread_idx + block_work_size() * idx; + int offset = output_offset_calculator.get(linear_idx)[0]; + storer.store(from[i], data[0], offset); + thread_idx += num_threads(); + } + } +}; + +// Assumption: +// all tensors are contiguous, that is: stride == sizeof(type) for all tensors +// Note: +// Functions in vectorized policy does not do boundary check. It assumes the whole block +// has its job to do. So the reminders should be handled by the the caller manually. +template // vec_size: number of scalars, can be 1, 2, or 4. +struct vectorized { + + static_assert(thread_work_size() % vec_size == 0, "The workload per thread must be a multiple of vec_size"); + static constexpr int loop_size = thread_work_size() / vec_size; + + data_t data; + + __device__ vectorized(data_t data) : data(data) {} + + __device__ inline constexpr bool check_inbounds(int thread_work_elem) { + return true; + } + + template + __device__ inline void load_single_arg(accessor_t to, scalar_t *from) { + int thread_idx = threadIdx.x; + #pragma unroll + for (int i = 0; i < loop_size; i++) { + int index = thread_idx + i * num_threads(); + auto v = load_vector(from, index); + #pragma unroll + for (int j = 0; j < vec_size; j++) { + to(vec_size * i + j) = v.val[j]; + } + } + } + + template + __device__ inline void load(args_t *args, int idx) { + constexpr int arity = std::tuple_size::value; + detail::static_unroll::with_args(*this, args, idx); + } + + template + __device__ inline void store(scalar_t *from, int idx) { + using vec_t = aligned_vector; + scalar_t *to = reinterpret_cast(data[0]) + block_work_size() * idx; + vec_t *to_ = reinterpret_cast(to); + int thread_idx = threadIdx.x; + #pragma unroll + for (int i = 0; i < loop_size; i++) { + int index = thread_idx + i * num_threads(); + vec_t v; + for (int j = 0; j < vec_size; j++) { + v.val[j] = from[vec_size * i + j]; + } + to_[index] = v; + } + } +}; + +template +struct multi_outputs_unroll { + //multi_outputs_unroll struct members and check_inbounds and load methods are copypasted from unroll struct + //we don't use inheritance because of compiler bug in cuda 10.2+ + data_t data; + int remaining; + inp_calc_t input_offset_calculator; + out_calc_t output_offset_calculator; + LoadWithoutCast loader; + StoreWithoutCast storer; + + __device__ multi_outputs_unroll(data_t data, int remaining, inp_calc_t ic, out_calc_t oc): + data(data), remaining(remaining), input_offset_calculator(ic), output_offset_calculator(oc) {} + + __device__ inline bool check_inbounds(int thread_work_elem) { + return ((threadIdx.x + thread_work_elem*num_threads()) < remaining); + } + + template + __device__ inline void load(args_t *args, int idx) { + constexpr int arity = std::tuple_size::value; + int thread_idx = threadIdx.x; + #pragma unroll + for (int i = 0; i < thread_work_size(); i++) { + if (thread_idx >= remaining) { + return; + } + int linear_idx = thread_idx + block_work_size() * idx; + auto offset = input_offset_calculator.get(linear_idx); + detail::static_unroll::with_args(*this, args, offset, loader, i, num_outputs); + thread_idx += num_threads(); + } + } + + + template + __device__ inline void store(return_t *from, int idx) { + int thread_idx = threadIdx.x; + #pragma unroll + for (int i = 0; i < thread_work_size(); i++) { + if (thread_idx >= this->remaining) { + return; + } + int linear_idx = thread_idx + block_work_size() * idx; + auto offsets = this->output_offset_calculator.get(linear_idx); + memory::detail::static_unroll::with_args(this->data, offsets, from[i]); + thread_idx += num_threads(); + } + } +}; + +} // namespace policies + +// This is only used in host, but we will wrap this into some templates +// which is C10_HOST_DEVICE, so we have to make this C10_HOST_DEVICE +// in order to compile +template +inline C10_HOST_DEVICE int can_vectorize_up_to(char *pointer) { + uint64_t address = reinterpret_cast(pointer); + constexpr int vec2_alignment = std::alignment_of>::value; + constexpr int vec4_alignment = std::alignment_of>::value; + if (address % vec4_alignment == 0) { + return 4; + } else if (address % vec2_alignment == 0) { + return 2; + } + return 1; +} + +template +struct can_vectorize_up_to_helper { + template + static C10_HOST_DEVICE void apply(int &result, array_t pointers, traits _) { + using arg_t = typename traits::template arg::type; + // `pointers` hold the data_ptr for tensors [output, input0, input1, ...], so we + // need a +1 offset to get the input + result = std::min(result, can_vectorize_up_to(pointers[i + 1])); + } +}; + +template +inline int can_vectorize_up_to(array_t pointers) { + using traits = function_traits; + using return_t = typename traits::result_type; + constexpr int arity = traits::arity; + int result = can_vectorize_up_to(pointers[0]); + // We need to get the type for each argument of `func_t`, this can only + // be done at compile time. + detail::static_unroll::with_args(result, pointers, traits()); + return result; +} + +}}} // namespace at::native::memory diff --git a/voice_bridge/torch/include/ATen/native/cuda/MiscUtils.h b/voice_bridge/torch/include/ATen/native/cuda/MiscUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..e616a7d1fcfb8254528dccc4e6b9d0658ffe1a3c --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/MiscUtils.h @@ -0,0 +1,32 @@ +#pragma once +#include +#include +#include +#include + +namespace at { +namespace native { + +static inline int cuda_int_cast(int64_t value, const char* varname) { + auto result = static_cast(value); + TORCH_CHECK(static_cast(result) == value, + "cuda_int_cast: The value of ", varname, "(", (long long)value, + ") is too large to fit into a int (", sizeof(int), " bytes)"); + return result; +} + +// Creates an array of size elements of type T, backed by pinned memory +// wrapped in a Storage +template +static inline Storage pin_memory(int64_t size) { + auto* allocator = cuda::getPinnedMemoryAllocator(); + int64_t adjusted_size = size * sizeof(T); + return Storage( + Storage::use_byte_size_t(), + adjusted_size, + allocator, + /*resizable=*/false); +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cuda/MultiTensorApply.cuh b/voice_bridge/torch/include/ATen/native/cuda/MultiTensorApply.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a74144974a486a92013fcd58ebdeedcd37342745 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/MultiTensorApply.cuh @@ -0,0 +1,257 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace at { namespace native { + +namespace { + +static constexpr int64_t kILP = 4; +static constexpr int64_t kChunkSize = 65536; +static constexpr int64_t kBlockSize = 512; + +template +__device__ __forceinline__ bool is_aligned(T* p){ + return ((uint64_t)p) % (kILP * sizeof(T)) == 0; +} + +template +__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){ + using LT = at::native::memory::aligned_vector; + ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; +} + +// TODO(crcrpar): Add `n>5` for `low prec params & their higher prec copy` +// TensorListMetadata has to be < 4KB - the limit for kernel launch argument +static constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30}; +static constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320}; +static constexpr int depth_to_max_tensors_scalarlist[5] = {96, 64, 48, 36, 30}; + +template struct TensorListMetadata +{ + void* addresses[n][depth_to_max_tensors[n-1]]; + int numel_for_tensor[depth_to_max_tensors[n-1]]; + unsigned char block_to_tensor[depth_to_max_blocks[n-1]]; + int block_to_chunk[depth_to_max_blocks[n-1]]; + int start_tensor_this_launch; +}; + +// NOTE(crcrpar): This is a conservative resolution to handle `state_steps` +// whose each element is `at::Tensor` of 1 element representing the number of `step`s called so far. +template struct FusedOptimizerTensorListMetadata +{ + void* addresses[n][depth_to_max_tensors[n-1]]; + int numel_for_tensor[depth_to_max_tensors[n-1]]; + void* state_steps_addresses[depth_to_max_tensors_scalarlist[n-1]]; + unsigned char block_to_tensor[depth_to_max_blocks[n-1]]; + int block_to_chunk[depth_to_max_blocks[n-1]]; + int start_tensor_this_launch; +}; + +template struct TensorListScalarListMetadata +{ + void* addresses[n][depth_to_max_tensors_scalarlist[n-1]]; + int numel_for_tensor[depth_to_max_tensors_scalarlist[n-1]]; + scalar_vals_t scalar_vals[depth_to_max_tensors_scalarlist[n-1]]; + unsigned char block_to_tensor[depth_to_max_blocks[n-1]]; + int block_to_chunk[depth_to_max_blocks[n-1]]; +}; + +// note(mkozuki): `n` of 96 and `scalar_vals_t` of `c10::complex` +// violates the cuda kernel argument size limitation of 4kb. +// 80 is a number that does not violate this limitation. +template<> struct TensorListScalarListMetadata, 1> +{ + void* addresses[1][80]; + int numel_for_tensor[80]; + c10::complex scalar_vals[80]; + unsigned char block_to_tensor[depth_to_max_blocks[1-1]]; + int block_to_chunk[depth_to_max_blocks[1-1]]; +}; + +template +C10_LAUNCH_BOUNDS_1(kBlockSize) +__global__ void +multi_tensor_apply_kernel( + T tensorListMeta, + U callable, + ArgTypes... args) { + // Hand the chunk information to the user-supplied functor to process however it likes. + callable(kChunkSize, tensorListMeta, args...); +} + +template +void multi_tensor_apply( + std::vector>& tensor_lists, + at::ArrayRef scalars, + T callable, + ArgTypes... args) { + TORCH_CHECK(tensor_lists.size() == depth, "Number of tensor lists has to match the depth."); + size_t n_tensors = tensor_lists[0].size(); + using scalar_vals_t = typename T::opmath_t; + TensorListScalarListMetadata tensorListMeta; + + int loc_block_info = 0; + int loc_tensor_info = 0; + for(size_t t = 0; t < n_tensors; t++) { + + tensorListMeta.scalar_vals[loc_tensor_info] = scalars[t].to(); + + tensorListMeta.numel_for_tensor[loc_tensor_info] = tensor_lists[0][t].numel(); + for (int d = 0; d < depth; d++) { + tensorListMeta.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr(); + } + loc_tensor_info++; + + int chunks = (tensor_lists[0][t].numel() + kChunkSize - 1)/kChunkSize; + for (int chunk = 0; chunk < chunks; chunk++) { + tensorListMeta.block_to_tensor[loc_block_info] = loc_tensor_info - 1; + tensorListMeta.block_to_chunk[loc_block_info] = chunk; + loc_block_info++; + + bool tensors_full = (loc_tensor_info == depth_to_max_tensors_scalarlist[depth-1] && + chunk == chunks - 1); + bool blocks_full = (loc_block_info == depth_to_max_blocks[depth-1]); + bool last_chunk = (t == n_tensors - 1 && chunk == chunks - 1); + + if (tensors_full || blocks_full || last_chunk) { + multi_tensor_apply_kernel<<>>( + tensorListMeta, + callable, + args...); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + // Reset. + loc_block_info = 0; + if(chunk == chunks - 1) { + loc_tensor_info = 0; + } + else { + tensorListMeta.numel_for_tensor[0] = tensorListMeta.numel_for_tensor[loc_tensor_info-1]; + tensorListMeta.scalar_vals[0] = tensorListMeta.scalar_vals[loc_tensor_info-1]; + for(int d = 0; d < depth; d++) { + tensorListMeta.addresses[d][0] = tensorListMeta.addresses[d][loc_tensor_info-1]; + } + loc_tensor_info = 1; + } + } + } + } + } + + +template +void multi_tensor_apply( + std::vector>& tensor_lists, + T callable, + ArgTypes... args) { + TORCH_CHECK(tensor_lists.size() == depth, "Number of tensor lists has to match the depth."); + size_t n_tensors = tensor_lists[0].size(); + TensorListMetadata tensorListMeta; + tensorListMeta.start_tensor_this_launch = 0; + + int loc_block_info = 0; + int loc_tensor_info = 0; + for(size_t t = 0; t < n_tensors; t++) { + tensorListMeta.numel_for_tensor[loc_tensor_info] = tensor_lists[0][t].numel(); + for (int d = 0; d < depth; d++) { + tensorListMeta.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr(); + } + loc_tensor_info++; + + int chunks = (tensor_lists[0][t].numel() + kChunkSize - 1)/kChunkSize; + for (int chunk = 0; chunk < chunks; chunk++) { + tensorListMeta.block_to_tensor[loc_block_info] = loc_tensor_info - 1; + tensorListMeta.block_to_chunk[loc_block_info] = chunk; + loc_block_info++; + + bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth-1] && + chunk == chunks - 1); + bool blocks_full = (loc_block_info == depth_to_max_blocks[depth-1]); + bool last_chunk = (t == n_tensors - 1 && chunk == chunks - 1); + + if (tensors_full || blocks_full || last_chunk) { + multi_tensor_apply_kernel<<>>( + tensorListMeta, + callable, + args...); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + // Reset. + loc_block_info = 0; + if(chunk == chunks - 1) { + loc_tensor_info = 0; + tensorListMeta.start_tensor_this_launch = t + 1; + } + else { + tensorListMeta.numel_for_tensor[0] = tensorListMeta.numel_for_tensor[loc_tensor_info-1]; + for(int d = 0; d < depth; d++) { + tensorListMeta.addresses[d][0] = tensorListMeta.addresses[d][loc_tensor_info-1]; + } + loc_tensor_info = 1; + tensorListMeta.start_tensor_this_launch = t; + } + } + } + } +} + +template +void multi_tensor_apply_for_fused_optimizer( + std::vector>& tensor_lists, + at::TensorList state_steps, + T callable, + ArgTypes... args) { + TORCH_CHECK(tensor_lists.size() == depth, "Number of tensor lists has to match the depth"); + const auto num_tensors = tensor_lists[0].size(); + FusedOptimizerTensorListMetadata tensorListMeta; + + int loc_block_info = 0; + int loc_tensor_info = 0; + for (const auto & tensor_index : c10::irange(num_tensors)) { + tensorListMeta.state_steps_addresses[loc_tensor_info] = state_steps[tensor_index].data_ptr(); + tensorListMeta.numel_for_tensor[loc_tensor_info] = tensor_lists[0][tensor_index].numel(); + for (const auto & d : c10::irange(depth)) { + tensorListMeta.addresses[d][loc_tensor_info] = tensor_lists[d][tensor_index].data_ptr(); + } + loc_tensor_info++; + + const auto chunks = (tensor_lists[0][tensor_index].numel() + kChunkSize - 1) / kChunkSize; + for (const auto & chunk : c10::irange(chunks)) { + tensorListMeta.block_to_tensor[loc_block_info] = loc_tensor_info - 1; + tensorListMeta.block_to_chunk[loc_block_info] = chunk; + loc_block_info++; + + const auto tensor_full = (loc_tensor_info == depth_to_max_tensors[depth - 1] && chunk == chunks - 1); + const auto blocks_full = loc_block_info == depth_to_max_blocks[depth - 1]; + const auto last_chunk = (tensor_index == num_tensors - 1 && chunk == chunks - 1); + + if (tensor_full || blocks_full || last_chunk) { + multi_tensor_apply_kernel<<>>( + tensorListMeta, + callable, + args...); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + // Reset. + loc_block_info = 0; + if (chunk == chunks - 1) { + loc_tensor_info = 0; + } else { + tensorListMeta.numel_for_tensor[0] = tensorListMeta.numel_for_tensor[loc_tensor_info - 1]; + tensorListMeta.state_steps_addresses[0] = tensorListMeta.state_steps_addresses[loc_tensor_info - 1]; + for (const auto & d : c10::irange(depth)) { + tensorListMeta.addresses[d][0] = tensorListMeta.addresses[d][loc_tensor_info - 1]; + } + loc_tensor_info = 1; + } + } + } + } +} + +} // namespace +}} // at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/Normalization.cuh b/voice_bridge/torch/include/ATen/native/cuda/Normalization.cuh new file mode 100644 index 0000000000000000000000000000000000000000..cc79284fea4db466bfbfa6880dd8793710f68efb --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/Normalization.cuh @@ -0,0 +1,1742 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#include +#endif + +namespace at { namespace native { + +// The maximum number of threads in a block +#if defined(USE_ROCM) +constexpr int MAX_BLOCK_SIZE = 256; +#else +constexpr int MAX_BLOCK_SIZE = 512; +#endif + +constexpr unsigned MAX_GRID_SIZE = 65535u; + +// Number of threads in a block given an input size up to MAX_BLOCK_SIZE +static int getNumThreads(int nElem) { +#if defined(USE_ROCM) + int threadSizes[5] = { 16, 32, 64, 128, MAX_BLOCK_SIZE }; +#else + int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; +#endif + for (int i = 0; i != 5; ++i) { + if (nElem <= threadSizes[i]) { + return threadSizes[i]; + } + } + return MAX_BLOCK_SIZE; +} + +// Returns the index of the most significant 1 bit in `val`. +__device__ __forceinline__ int getMSB(int val) { + return 31 - __clz(val); +} + +template +struct Float2 { + accscalar_t v1, v2; + __device__ Float2() {} + __device__ Float2(scalar_t v1, scalar_t v2) : v1(static_cast(v1)), v2(static_cast(v2)) {} + __device__ Float2(int v) : v1(static_cast(v)), v2(static_cast(v)) {} + __device__ Float2& operator+=(const Float2& a) { + v1 += a.v1; + v2 += a.v2; + return *this; + } + __device__ friend Float2 operator+(Float2 a, const Float2& b) { + a += b; + return a; + } +}; + +template +struct GradOp { + __device__ GradOp(accscalar_t m, const PTA& i, const PTA& g) + : mean(m), input(i), grad_output(g) {} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + accscalar_t g = grad_output[batch][plane][n]; + accscalar_t c = static_cast(input[batch][plane][n]) - mean; + return Float2(g, g * c); + } + const accscalar_t mean; + const PTA& input; + const PTA& grad_output; +}; + +template +struct SumReduceOp { + __device__ __forceinline__ acc_t combine(acc_t a, acc_t b) const { return a + b; } + + __device__ __forceinline__ acc_t warp_shfl_down(acc_t data, int offset) const { + return WARP_SHFL_DOWN(data, offset); + } +}; + +template +struct SumReduceOp> { + using acc_t = Float2; + + __device__ __forceinline__ acc_t combine(acc_t a, acc_t b) const { return a + b; } + + __device__ __forceinline__ acc_t warp_shfl_down(acc_t data, int offset) const { + return {WARP_SHFL_DOWN(data.v1, offset), WARP_SHFL_DOWN(data.v2, offset)}; + } +}; + +// Sum across (batch, x/y/z) applying Op() pointwise +// this works by first having each thread sum it's part +// of the data. Then there is a double-shuffling reduction. +// First each warp (of C10_WARP_SIZE threads) uses warpSum to reduce its +// data to the "warp leader", who writes its value into shared memory. +// Then a single warp reads the remaining (at most C10_WARP_SIZE) items +// and reduces them using another warpSum. +// The implicit assumption is that there are no more +// than C10_WARP_SIZE**2 threads. +template +__device__ scalar_t reduce(Op op, PTA tensor, int plane) { + // first the reductions each thread does separately + scalar_t sum = static_cast(0); + for (int batch = threadIdx.y; batch < tensor.size(0); batch += blockDim.y) { + for (int x = threadIdx.x; x < tensor.size(2); x += blockDim.x) { + sum += op(batch, plane, x); + } + } + __shared__ scalar_t shared[C10_WARP_SIZE]; + SumReduceOp reduce_op; + sum = cuda_utils::BlockReduce, cuda_utils::Block2D>(sum, reduce_op, 0, shared); + if (threadIdx.x == 0 && threadIdx.y == 0) { + shared[0] = sum; + } + __syncthreads(); + // Everyone picks it up, should be broadcast into the whole grad_input + return shared[0]; +} + +constexpr int ELEMENTS_PER_ITER = 4; // enables concurrency within each thread to hide latency +constexpr int ELEMENTS_PER_THREAD = 16; +constexpr int OPTIMAL_TILE_W = 32; +constexpr int MAX_H_BLOCK = 128; + +__host__ void flexible_launch_configs( + const int reduction, + const int stride, + dim3 &block, + dim3 &grid, + const bool coop_flag = false) { + int block_x = std::min(lastPow2(stride), OPTIMAL_TILE_W); + int block_y = std::min(lastPow2(at::ceil_div(reduction , ELEMENTS_PER_THREAD)), + MAX_BLOCK_SIZE / block_x); + if (block_x * block_y != MAX_BLOCK_SIZE) { + block_x = std::min(lastPow2(stride), MAX_BLOCK_SIZE / block_y); + } + + int grid_x = at::ceil_div(stride, block_x); + int grid_y = std::min(at::ceil_div(reduction, block_y * ELEMENTS_PER_THREAD), MAX_H_BLOCK); + if (coop_flag) { + // it's not worth having a grid reduction if the reduction dimension is not big enough + grid_y = grid_y < 8 ? 1 : grid_y; + } + + block.x = block_x; + block.y = block_y; + block.z = 1; + grid.x = grid_x; + grid.y = grid_y; + grid.z = 1; +} + +template +__device__ __forceinline__ void welford_merge_element(C& count, + T& mean, + T& m2n, + const C& count_new, + const T& mean_new, + const T& m2n_new) { + T factor = T(1.0) / ::max(1, (count + count_new)); + T delta0 = mean - mean_new; + mean = (mean_new * count_new + mean * count) * factor; + m2n += m2n_new + delta0 * delta0 * count_new * count * factor; + count += count_new; +} + +// merge mean/m2n among threadIdx.y within block +template +__device__ __forceinline__ void welford_merge_block_vertical(C& count, + T& mean, + T& m2n, + C* shmem_count, + T* shmem_mean, + T* shmem_m2n) { + // write to shared memory + auto address_base = threadIdx.x + threadIdx.y * blockDim.x; + +#pragma unroll + for (int offset = blockDim.y/2; offset > 0; offset >>= 1) { + if (threadIdx.y < offset*2) { + shmem_mean[address_base] = mean; + shmem_m2n[address_base] = m2n; + shmem_count[address_base] = count; + } + __syncthreads(); + if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) { + auto address = address_base + offset * blockDim.x; + // read shared memory back to register for reduction + auto count_new = shmem_count[address]; + auto mean_new = shmem_mean[address]; + auto m2n_new = shmem_m2n[address]; + + welford_merge_element(count, mean, m2n, count_new, mean_new, m2n_new); + } + } +} + +template +__global__ void batch_norm_transform_input_kernel( + const GenericPackedTensorAccessor input, + GenericPackedTensorAccessor output, + const GenericPackedTensorAccessor::type, 1, RestrictPtrTraits, index_t> mean_, + const GenericPackedTensorAccessor::type, 1, RestrictPtrTraits, index_t> var_or_invstd, + const GenericPackedTensorAccessor weight, + const GenericPackedTensorAccessor bias, + stat_accscalar_t epsilon) { + + index_t plane = blockIdx.x; + + if (plane >= input.size(1)) { + return; + } + + stat_accscalar_t gamma = weight.size(0) > 0 ? static_cast(weight[plane]) : static_cast(1); + stat_accscalar_t beta = bias.size(0) > 0 ? static_cast(bias[plane]) : static_cast(0); + stat_accscalar_t mean = static_cast(mean_[plane]); + stat_accscalar_t invstd; + if (train) { + invstd = var_or_invstd[plane]; + } else { + invstd = static_cast(1) / device_sqrt(static_cast(var_or_invstd[plane]) + epsilon); + } + + index_t bs = input.size(0); + index_t fs = input.size(2); + + index_t bstep = blockDim.y * gridDim.y; + for (index_t batch = threadIdx.y + blockIdx.y * blockDim.y; batch < bs; batch += bstep) { + auto o = output[batch][plane]; + auto i = input[batch][plane]; + for (index_t feature = threadIdx.x; feature < fs; feature += blockDim.x) { + o[feature] = static_cast(gamma * (i[feature] - mean) * invstd + beta); + } + } +} + +struct InvStd { + template + __device__ __forceinline__ T operator()(T var, double epsilon) const { + T invstd = 0; + if (var != static_cast(0) || epsilon != static_cast(0)) { + invstd = static_cast(1) / device_sqrt(var + epsilon); + } + return invstd; + } +}; + +struct Var { + template + __device__ __forceinline__ T operator()(T var, double epsilon) const { + return var; + } +}; + +template +__global__ void batch_norm_collect_statistics_kernel( + const GenericPackedTensorAccessor input, + const stat_accscalar_t epsilon, + const stat_accscalar_t momentum, + GenericPackedTensorAccessor save_mean, + GenericPackedTensorAccessor save_transformed_var) { + + __shared__ int shared_n[2 * 2 * C10_WARP_SIZE + C10_WARP_SIZE]; + + int plane = blockIdx.x; + int N = input.size(0) * input.size(2); + int tid = threadIdx.x + threadIdx.y * blockDim.x; + + // Compute the mean and variance across (batch, x/y/z) + // this uses the Welford (in the for loop)/parallel algorithm (to sum across the block) + // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_Online_algorithm + // and the parallel algorithm on the same page. + // We use two shuffles to reduce across the entire block. + // https://devblogs.nvidia.com/faster-parallel-reductions-kepler/ has a description. + stat_accscalar_t* shared_avg_var = (stat_accscalar_t*) &shared_n[C10_WARP_SIZE]; + + // first the reductions each thread does separately + stat_accscalar_t avg = 0; + stat_accscalar_t var_n = 0; + int n = 0; + for (int batch = threadIdx.y; batch < input.size(0); batch += blockDim.y) { + for (int x = threadIdx.x; x < input.size(2); x += blockDim.x) { + stat_accscalar_t v = input[batch][plane][x]; + stat_accscalar_t d1 = v - avg; + n++; + avg += d1 / n; + var_n += d1 * (v - avg); + } + } + + // first warpSum to get one value per thread to + // one value per warp + for (int i = 0; i < getMSB(C10_WARP_SIZE); ++i) { + stat_accscalar_t o_avg = WARP_SHFL_XOR(avg, 1 << i, C10_WARP_SIZE); + int o_n = WARP_SHFL_XOR(n, 1 << i, C10_WARP_SIZE); + stat_accscalar_t factor = 1.0 / fmaxf(1.0, n+o_n); + var_n += WARP_SHFL_XOR(var_n, 1 << i, C10_WARP_SIZE) + (avg - o_avg) * (avg - o_avg) * n * o_n * factor; + avg = (n * avg + o_n * o_avg) * factor; + n += o_n; + } + + // this writes each warps item into shared memory + // there are at most C10_WARP_SIZE items left because + // there are at most C10_WARP_SIZE**2 threads at the beginning + __syncthreads(); + if (tid % C10_WARP_SIZE == 0) { + shared_n[tid / C10_WARP_SIZE] = n; + shared_avg_var[tid / C10_WARP_SIZE * 2] = avg; + shared_avg_var[tid / C10_WARP_SIZE * 2 + 1] = var_n; + } + __syncthreads(); + // now have a second warpSum to reduce the intermediate values + // from shared memory to a single number. The very first + // thread writes it to shared memory. + + if (tid < C10_WARP_SIZE) { + n = (tid < blockDim.x * blockDim.y / C10_WARP_SIZE ? shared_n[tid] : 0); + avg = (tid < blockDim.x * blockDim.y / C10_WARP_SIZE ? shared_avg_var[2 * tid] : stat_accscalar_t(0)); + var_n = (tid < blockDim.x * blockDim.y / C10_WARP_SIZE ? shared_avg_var[2 * tid + 1] : stat_accscalar_t(0)); + } + for (int i = 0; i < getMSB(C10_WARP_SIZE); ++i) { + stat_accscalar_t o_avg = WARP_SHFL_XOR(avg, 1 << i, C10_WARP_SIZE); + int o_n = WARP_SHFL_XOR(n, 1 << i, C10_WARP_SIZE); + stat_accscalar_t factor = 1.0 / fmaxf(1.0, n+o_n); + var_n += WARP_SHFL_XOR(var_n, 1 << i, C10_WARP_SIZE) + (avg - o_avg) * (avg - o_avg) * n * o_n * factor; + avg = (n * avg + o_n * o_avg) * factor; + n += o_n; + } + + // Save the mean, variance, and moving averages + if (tid == 0) { + if (save_mean.data() != NULL) { + save_mean[plane] = avg; + } + if (save_transformed_var.data() != NULL) { + save_transformed_var[plane] = VarTransform{}(var_n / N, epsilon); + } + } + +} + +template +__global__ void batch_norm_backward_kernel( + const GenericPackedTensorAccessor input, + const GenericPackedTensorAccessor grad_output, + GenericPackedTensorAccessor grad_input, + GenericPackedTensorAccessor grad_weight, + GenericPackedTensorAccessor grad_bias, + const GenericPackedTensorAccessor weight, + const GenericPackedTensorAccessor running_mean, + const GenericPackedTensorAccessor running_var, + const GenericPackedTensorAccessor save_mean, + const GenericPackedTensorAccessor save_invstd, + bool train, + stat_accscalar_t epsilon) { + + index_t plane = blockIdx.x; + index_t N = grad_output.size(0) * grad_output.size(2); + + stat_accscalar_t mean, invstd; + if (train) { + mean = save_mean[plane]; + invstd = save_invstd[plane]; + } else { + mean = static_cast(running_mean[plane]); + invstd = static_cast(1) / device_sqrt(static_cast(running_var[plane]) + epsilon); + } + + stat_accscalar_t weight_val = weight.size(0) > 0 ? static_cast(weight[plane]) : stat_accscalar_t(1); + stat_accscalar_t norm = stat_accscalar_t(1) / N; + + // Compute two values across (batch, x/y/z) in one pass: + // 1. Sum(grad_output) + // 2. DotProduct(input - mean, grad_output) + GradOp> g(mean, input, grad_output); + auto res = reduce>(g, grad_output, plane); + + stat_accscalar_t grad_output_sum = res.v1; + stat_accscalar_t dot_p = res.v2; + + stat_accscalar_t grad_mean = grad_output_sum * norm; + stat_accscalar_t proj_scale = dot_p * norm * invstd * invstd; + stat_accscalar_t grad_scale = invstd * weight_val; + + if (grad_input.data() != NULL) { + for (int batch = threadIdx.y; batch < grad_output.size(0); batch += blockDim.y) { + for (int x = threadIdx.x; x < grad_output.size(2); x += blockDim.x) { + input_scalar_t go = grad_output[batch][plane][x]; + if (train) { + stat_accscalar_t inp = input[batch][plane][x]; + stat_accscalar_t proj = (inp - mean) * proj_scale; + grad_input[batch][plane][x] = static_cast((go - proj - grad_mean) * grad_scale); + } else { + grad_input[batch][plane][x] = static_cast(go * grad_scale); + } + } + } + } + + if (grad_weight.size(0) > 0) { + if (threadIdx.x == 0) { + grad_weight[plane] = static_cast(dot_p * invstd); + } + } + + if (grad_bias.size(0) > 0) { + if (threadIdx.x == 0) { + grad_bias[plane] = static_cast(grad_output_sum); + } + } +} + +template +__global__ void batch_norm_reduce_statistics_kernel( + const GenericPackedTensorAccessor vec_mean, + const GenericPackedTensorAccessor vec_invstd, + GenericPackedTensorAccessor mean, + GenericPackedTensorAccessor invstd, + GenericPackedTensorAccessor running_mean, + GenericPackedTensorAccessor running_var, + const accscalar_t epsilon, + const accscalar_t momentum, + const GenericPackedTensorAccessor counts) { + + int feature_size = vec_mean.size(1); + int world_size = vec_mean.size(0); + + int bid = blockIdx.x; + int tid = threadIdx.x; + + // first the reductions each thread does separately + for (int i = bid*blockDim.x+tid; i < feature_size; i += gridDim.x*blockDim.x) { + accscalar_t avg = 0; + accscalar_t var_n = 0; + index_t n = 0; + for (int j = 0; j < world_size; j++) { + scalar_t count = counts[j]; + accscalar_t m = vec_mean[j][i]; + accscalar_t v = accscalar_t(1.0) / (vec_invstd[j][i]); + v = (v * v - epsilon) * count; + accscalar_t factor = 1.0 / (n + count); + var_n += v + (avg - m) * (avg - m) * n * count * factor; + avg = n * factor * avg + count * factor * m; + n += count; + } + mean[i] = avg; + invstd[i] = static_cast(1) / device_sqrt(var_n / n + epsilon); + if (running_mean.data() != NULL) { + running_mean[i] = static_cast((1 - momentum) * running_mean[i] + momentum * avg); + } + accscalar_t unbiasedVar = var_n / (n - 1); + if (running_var.data() != NULL) { + running_var[i] = static_cast((1 - momentum) * running_var[i] + momentum * unbiasedVar); + } + } + +} + +template +__global__ void batch_norm_backward_reduce_kernel( + const GenericPackedTensorAccessor input, + const GenericPackedTensorAccessor grad_output, + GenericPackedTensorAccessor mean, + GenericPackedTensorAccessor invstd, + GenericPackedTensorAccessor sum_dy, + GenericPackedTensorAccessor sum_dy_xmu, + GenericPackedTensorAccessor grad_weight, + GenericPackedTensorAccessor grad_bias) { + + index_t plane = blockIdx.x; + + stat_accscalar_t r_mean = mean[plane]; + stat_accscalar_t factor = invstd[plane]; + + GradOp> g(r_mean, input, grad_output); + auto res = reduce>(g, grad_output, plane); + + if (threadIdx.x == 0) { + if (grad_weight.size(0) > 0) { + grad_weight[plane] = static_cast(res.v2 * factor); + } + if (grad_bias.size(0) > 0) { + grad_bias[plane] = static_cast(res.v1); + } + if (sum_dy.size(0) > 0) { + sum_dy[plane] = static_cast(res.v1); + } + if (sum_dy_xmu.size(0) > 0) { + sum_dy_xmu[plane] = static_cast(res.v2); + } + } +} + +template +__device__ __forceinline__ void batch_norm_backward_elemt_kernel_impl( + const GenericPackedTensorAccessor input, + const GenericPackedTensorAccessor grad_output, + const GenericPackedTensorAccessor mean, + const GenericPackedTensorAccessor invstd, + const GenericPackedTensorAccessor weight, + const GenericPackedTensorAccessor sum_dy, + const GenericPackedTensorAccessor sum_dy_xmu, + GenericPackedTensorAccessor grad_input, + const stat_accscalar_t norm_fct) { + index_t plane = blockIdx.x; + + if (plane >= input.size(1)) { + return; + } + + stat_accscalar_t m_c = mean[plane]; + stat_accscalar_t m_dy_c = sum_dy[plane] * norm_fct; + stat_accscalar_t factor_1_c = invstd[plane]; + stat_accscalar_t factor_2_c = weight.size(0) > 0 ? static_cast(weight[plane]) : stat_accscalar_t(1); + factor_2_c *= factor_1_c; + factor_1_c = factor_1_c * factor_1_c * sum_dy_xmu[plane] * norm_fct; + + index_t bs = input.size(0); + index_t fs = input.size(2); + + index_t bstep = blockDim.y * gridDim.y; + for (index_t batch = threadIdx.y + blockIdx.y * blockDim.y; batch < bs; batch += bstep) { + auto g_i = grad_input[batch][plane]; + auto g_o = grad_output[batch][plane]; + auto i = input[batch][plane]; + for (index_t feature = threadIdx.x; feature < fs; feature += blockDim.x) { + g_i[feature] = static_cast((g_o[feature] - m_dy_c - (i[feature] - m_c) * factor_1_c) * factor_2_c); + } + } +} + +template +__global__ void batch_norm_backward_elemt_kernel( + const GenericPackedTensorAccessor input, + const GenericPackedTensorAccessor grad_output, + const GenericPackedTensorAccessor mean, + const GenericPackedTensorAccessor invstd, + const GenericPackedTensorAccessor weight, + const GenericPackedTensorAccessor sum_dy, + const GenericPackedTensorAccessor sum_dy_xmu, + GenericPackedTensorAccessor grad_input, + const int* __restrict__ numel, const int world_size) { + int64_t total_numel = 0; + for (int i = 0; i < world_size; i ++) { + total_numel += numel[i]; + } + + const stat_accscalar_t norm_fct = + static_cast(1) / static_cast(total_numel); + batch_norm_backward_elemt_kernel_impl( + input, grad_output, mean, invstd, weight, sum_dy, sum_dy_xmu, grad_input, norm_fct); +} + +template +__global__ void batch_norm_backward_elemt_kernel( + const GenericPackedTensorAccessor input, + const GenericPackedTensorAccessor grad_output, + const GenericPackedTensorAccessor mean, + const GenericPackedTensorAccessor invstd, + const GenericPackedTensorAccessor weight, + const GenericPackedTensorAccessor sum_dy, + const GenericPackedTensorAccessor sum_dy_xmu, + GenericPackedTensorAccessor grad_input, + const stat_accscalar_t norm_fct) { + batch_norm_backward_elemt_kernel_impl( + input, grad_output, mean, invstd, weight, sum_dy, sum_dy_xmu, grad_input, norm_fct); +} + +template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> +static GenericPackedTensorAccessor get_packed_accessor( + const Tensor& t, c10::string_view var_name) { + constexpr auto expect_type = c10::CppTypeToScalarType::value; + const auto actual_type = t.scalar_type(); + TORCH_CHECK(actual_type == expect_type, "Expected ", var_name, + " to have type ", expect_type, " but got ", actual_type); + return t.generic_packed_accessor(); +} + +template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> +static GenericPackedTensorAccessor packed_accessor_or_dummy( + const Tensor& t, c10::string_view var_name) { + if (!t.defined()) { + const std::array zeros{{0}}; + return GenericPackedTensorAccessor(nullptr, zeros.data(), zeros.data()); + } + return get_packed_accessor(t, var_name); +} + +template +std::tuple batch_norm_backward_cuda_template(const Tensor& grad_out_, const Tensor& input_, const Tensor& weight_, + const Tensor& running_mean_, const Tensor& running_var_, const Tensor& save_mean_, const Tensor& save_invstd_, + bool train, double epsilon, std::array grad_input_mask) { + + using accscalar_t = at::acc_type; + Tensor grad_input_; + Tensor grad_input_reshaped; + Tensor grad_weight_; + Tensor grad_bias_; + auto input_reshaped = input_.reshape({input_.size(0), input_.size(1), -1}); + auto grad_output_reshaped = grad_out_.reshape(input_reshaped.sizes()); + + if (grad_input_mask[0]) { + grad_input_ = at::empty_like(input_, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + grad_input_reshaped = grad_input_.view(input_reshaped.sizes()); + } + if (grad_input_mask[1]) { + grad_weight_ = at::empty_like(weight_, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + } + if (grad_input_mask[2]) { + grad_bias_ = at::empty_like(weight_, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + } + + auto input = get_packed_accessor< + input_scalar_t, 3, DefaultPtrTraits, index_t>(input_reshaped, "input"); + auto grad_output = get_packed_accessor< + input_scalar_t, 3, DefaultPtrTraits, index_t>(grad_output_reshaped, "grad_output"); + auto grad_input = packed_accessor_or_dummy< + input_scalar_t, 3, DefaultPtrTraits, index_t>(grad_input_reshaped, "grad_input"); + auto weight = packed_accessor_or_dummy< + stat_scalar_t, 1, DefaultPtrTraits, index_t>(weight_, "weight"); + auto grad_weight = packed_accessor_or_dummy< + stat_scalar_t, 1, DefaultPtrTraits, index_t>(grad_weight_, "grad_weight"); + auto grad_bias = packed_accessor_or_dummy< + stat_scalar_t, 1, DefaultPtrTraits, index_t>(grad_bias_, "grad_bias"); + auto running_mean = packed_accessor_or_dummy< + stat_scalar_t, 1, DefaultPtrTraits, index_t>(running_mean_, "running_mean"); + auto running_var = packed_accessor_or_dummy< + stat_scalar_t, 1, DefaultPtrTraits, index_t>(running_var_, "running_var"); + auto save_mean = packed_accessor_or_dummy< + accscalar_t, 1, DefaultPtrTraits, index_t>(save_mean_, "save_mean"); + auto save_invstd = packed_accessor_or_dummy< + accscalar_t, 1, DefaultPtrTraits, index_t>(save_invstd_, "save_invstd"); + + auto stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input.size(1)); + int tf = getNumThreads(input.size(2)); + dim3 threads(tf, std::max(1, MAX_BLOCK_SIZE/tf)); + + batch_norm_backward_kernel <<>> + (input, grad_output, grad_input, grad_weight, grad_bias, weight, running_mean, running_var, + save_mean, save_invstd, train, epsilon); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + return std::make_tuple(grad_input_, grad_weight_, grad_bias_); +} + +template +void batch_norm_stats_cuda_template( + const Tensor& out_mean, const Tensor& out_invstd, const Tensor& input_, double epsilon) { + + using accscalar_t = at::acc_type; + int64_t n_input = input_.size(1); + Tensor dummy_mean_; + Tensor dummy_var_; + auto input_reshaped = input_.reshape({input_.size(0), input_.size(1), -1}); // internally we merge the feature dimensions + + resize_output(out_mean, {n_input}); + resize_output(out_invstd, {n_input}); + auto input = get_packed_accessor< + scalar_t, 3, RestrictPtrTraits, index_t>(input_reshaped, "input"); + TORCH_INTERNAL_ASSERT(out_invstd.dim() == 1 && out_invstd.is_contiguous() && + out_invstd.sizes()[0]); + TORCH_INTERNAL_ASSERT(out_mean.dim() == 1 && out_mean.is_contiguous() && + out_mean.sizes()[0]); + + auto mean = packed_accessor_or_dummy< + accscalar_t, 1, RestrictPtrTraits, index_t>(out_mean, "out_mean"); + auto invstd = packed_accessor_or_dummy< + accscalar_t, 1, RestrictPtrTraits, index_t>(out_invstd, "out_invstd"); + auto stream = at::cuda::getCurrentCUDAStream(); + + dim3 blocks(input.size(1)); + int tf = getNumThreads(input.size(2)); + dim3 threads(tf, std::max(1, MAX_BLOCK_SIZE/tf)); + batch_norm_collect_statistics_kernel <<>> + (input, epsilon, 0.0, mean, invstd); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +void batch_norm_elemt_cuda_template(const Tensor& output_, const Tensor& input_, const Tensor& weight_, + const Tensor& bias_, const Tensor& mean_, const Tensor& invstd_) { + + using stat_accscalar_t = at::acc_type; + int64_t n_input = input_.size(1); + auto input_reshaped = input_.reshape({input_.size(0), input_.size(1), -1}); // internally we merge the feature dimensions + auto output_reshaped = output_.view({input_.size(0), input_.size(1), -1}); + + auto input = get_packed_accessor< + input_scalar_t, 3, RestrictPtrTraits, index_t>(input_reshaped, "input"); + auto output = get_packed_accessor< + input_scalar_t, 3, RestrictPtrTraits, index_t>(output_reshaped, "output"); + auto weight = packed_accessor_or_dummy< + stat_scalar_t, 1, RestrictPtrTraits, index_t>(weight_, "weight"); + auto bias = packed_accessor_or_dummy< + stat_scalar_t, 1, RestrictPtrTraits, index_t>(bias_, "bias"); + auto mean = packed_accessor_or_dummy< + stat_accscalar_t, 1, RestrictPtrTraits, index_t>(mean_, "mean"); + auto invstd = packed_accessor_or_dummy< + stat_accscalar_t, 1, RestrictPtrTraits, index_t>(invstd_, "invstd"); + auto stream = at::cuda::getCurrentCUDAStream(); + + // NOTE: We use transform_input_kernel in training mode, which ignores epsilon + const double dummy_epsilon = 1e-5; + + // The input_transform kernel is pointwise, but we need to balance reading parameters (save_var/mean, + // weight/bias) - which we only do once and have a for loop afterwards - with having many threads and blocks + // and good occupancy. Quiet likely, we could go with even more blocks than 1024. + // The various planes are independent, so we use blocks for them. + int tf = std::max(getNumThreads(input.size(2)/4), + std::min(getNumThreads(input.size(2)), 64)); + int tb = std::max(64/tf, 1); + dim3 blocks_trans(input.size(1), std::max(1, std::min((256*1024)/input.size(1), + (input.size(0)+tb-1)/tb))); + blocks_trans.y = std::min(blocks_trans.y, MAX_GRID_SIZE); + dim3 threads_trans(tf, tb); + batch_norm_transform_input_kernel <<>> + (input, output, mean, invstd, weight, bias, dummy_epsilon); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +std::tuple batch_norm_gather_stats_cuda_template(const Tensor& mean_, const Tensor& invstd_, + const Tensor& running_mean_, const Tensor& running_var_, + double momentum, double epsilon, const Tensor& counts_) { + + Tensor save_mean_; + Tensor save_invstd_; + + auto features = mean_.size(1); + auto input_options = mean_.options(); + if (mean_.scalar_type() == at::ScalarType::Half || mean_.scalar_type() == at::ScalarType::BFloat16) { + input_options = input_options.dtype(ScalarType::Float); + } + save_mean_ = at::empty({features}, input_options); + save_invstd_ = at::empty({features}, input_options); + + auto mean = packed_accessor_or_dummy< + accscalar_t, 2, RestrictPtrTraits, index_t>(mean_, "mean"); + auto invstd = packed_accessor_or_dummy< + accscalar_t, 2, RestrictPtrTraits, index_t>(invstd_, "invstd"); + auto running_mean = packed_accessor_or_dummy< + scalar_t, 1, RestrictPtrTraits, index_t>(running_mean_, "running_mean"); + auto running_var = packed_accessor_or_dummy< + scalar_t, 1, RestrictPtrTraits, index_t>(running_var_, "running_mean"); + auto counts = packed_accessor_or_dummy< + scalar_t, 1, RestrictPtrTraits, index_t>(counts_, "counts"); + + auto save_mean = get_packed_accessor< + accscalar_t, 1, RestrictPtrTraits, index_t>(save_mean_, "save_mean"); + auto save_invstd = get_packed_accessor< + accscalar_t, 1, RestrictPtrTraits, index_t>(save_invstd_, "save_invstd"); + auto stream = at::cuda::getCurrentCUDAStream(); + + int block = getNumThreads(features); + int grid = std::max(1, features/block); + batch_norm_reduce_statistics_kernel <<>> + (mean, invstd, save_mean, save_invstd, running_mean, running_var, epsilon, momentum, counts); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + return std::make_tuple(save_mean_, save_invstd_); +} + +template +std::tuple batch_norm_backward_reduce_cuda_template(const Tensor& grad_out_, const Tensor& input_, + const Tensor& mean_, const Tensor& invstd_, const Tensor& weight_, + const bool input_g, const bool weight_g, const bool bias_g) { + + using stat_accscalar_t = at::acc_type; + int64_t n_input = input_.size(1); + Tensor sum_dy_; + Tensor sum_dy_xmu_; + Tensor grad_weight_; + Tensor grad_bias_; + auto input_reshaped = input_.reshape({input_.size(0), input_.size(1), -1}); // internally we merge the feature dimensions + auto grad_output_reshaped = grad_out_.reshape(input_reshaped.sizes()); + + if (input_g) { + sum_dy_ = at::empty_like(mean_, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + sum_dy_xmu_ = at::empty_like(mean_, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + } + if (weight_g) { + grad_weight_ = at::empty({n_input}, weight_.options()); + } + if (bias_g) { + grad_bias_ = at::empty({n_input}, weight_.options()); + } + + auto input = get_packed_accessor< + input_scalar_t, 3, DefaultPtrTraits, index_t>(input_reshaped, "input"); + auto grad_output = get_packed_accessor< + input_scalar_t, 3, DefaultPtrTraits, index_t>(grad_output_reshaped, "grad_output"); + auto grad_weight = packed_accessor_or_dummy< + stat_scalar_t, 1, DefaultPtrTraits, index_t>(grad_weight_, "grad_weight"); + auto grad_bias = packed_accessor_or_dummy< + stat_scalar_t, 1, DefaultPtrTraits, index_t>(grad_bias_, "grad_bias"); + auto mean = packed_accessor_or_dummy< + stat_accscalar_t, 1, DefaultPtrTraits, index_t>(mean_, "mean"); + auto invstd = packed_accessor_or_dummy< + stat_accscalar_t, 1, DefaultPtrTraits, index_t>(invstd_, "invstd"); + auto sum_dy = packed_accessor_or_dummy< + stat_accscalar_t, 1, DefaultPtrTraits, index_t>(sum_dy_, "sum_dy"); + auto sum_dy_xmu = packed_accessor_or_dummy< + stat_accscalar_t, 1, DefaultPtrTraits, index_t>(sum_dy_xmu_, "sum_dy_xmu"); + + auto batch_size = input_reshaped.size(0); + auto feature_size = input_reshaped.size(2); + auto stream = at::cuda::getCurrentCUDAStream(); + + int warp_size = at::cuda::warp_size(); + int block_y = std::min(lastPow2(batch_size), MAX_BLOCK_SIZE/warp_size); + // We want block_x to be at least a warp width + int block_x = std::min(std::max(getNumThreads(feature_size), warp_size), MAX_BLOCK_SIZE/block_y); + const dim3 block(block_x, block_y); + const dim3 grid(n_input); + + batch_norm_backward_reduce_kernel <<>> + (input, grad_output, mean, invstd, sum_dy, sum_dy_xmu, grad_weight, grad_bias); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + return std::make_tuple(sum_dy_, sum_dy_xmu_, grad_weight_, grad_bias_); +} + +template +Tensor batch_norm_backward_elemt_cuda_template(const Tensor& grad_out_, const Tensor& input_, + const Tensor& mean_, const Tensor& invstd_, + const Tensor& weight_, const Tensor& sum_dy_, const Tensor& sum_dy_xmu_) { + + using stat_accscalar_t = at::acc_type; + int64_t n_input = input_.size(1); + auto input_reshaped = input_.reshape({input_.size(0), input_.size(1), -1}); // internally we merge the feature dimensions + auto grad_output_reshaped = grad_out_.reshape(input_reshaped.sizes()); + auto grad_input_reshaped = at::empty_like(input_reshaped, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + + auto input = get_packed_accessor< + input_scalar_t, 3, DefaultPtrTraits, index_t>(input_reshaped, "input"); + auto grad_input = get_packed_accessor< + input_scalar_t, 3, DefaultPtrTraits, index_t>(grad_input_reshaped, "grad_input"); + auto grad_output = get_packed_accessor< + input_scalar_t, 3, DefaultPtrTraits, index_t>(grad_output_reshaped, "grad_output"); + auto mean = packed_accessor_or_dummy< + stat_accscalar_t, 1, DefaultPtrTraits, index_t>(mean_, "mean"); + auto invstd = packed_accessor_or_dummy< + stat_accscalar_t, 1, DefaultPtrTraits, index_t>(invstd_, "invstd"); + auto weight = packed_accessor_or_dummy< + stat_scalar_t, 1, DefaultPtrTraits, index_t>(weight_, "weight"); + auto sum_dy = packed_accessor_or_dummy< + stat_accscalar_t, 1, DefaultPtrTraits, index_t>(sum_dy_, "sum_dy"); + auto sum_dy_xmu = packed_accessor_or_dummy< + stat_accscalar_t, 1, DefaultPtrTraits, index_t>(sum_dy_xmu_, "sum_dy_xmu"); + + auto stream = at::cuda::getCurrentCUDAStream(); + + // The kernel is pointwise, but we need to balance reading parameters (save_var/mean, + // weight/bias) - which we only do once and have a for loop afterwards - with having many threads and blocks + // and good occupancy. Quiet likely, we could go with even more blocks than 1024. + // The various planes are independent, so we use blocks for them. + int tf = std::max(getNumThreads(input.size(2)/4), + std::min(getNumThreads(input.size(2)), 64)); + int tb = std::max(64/tf, 1); + dim3 blocks_trans(input.size(1), std::max(1, std::min((256*1024)/input.size(1), + (input.size(0)+tb-1)/tb))); + blocks_trans.y = std::min(blocks_trans.y, MAX_GRID_SIZE); + dim3 threads_trans(tf, tb); + auto reduction_size = input_.numel() / n_input; + auto norm_fct = static_cast(1.0 / reduction_size); + batch_norm_backward_elemt_kernel + <<>> + (input, grad_output, mean, invstd, weight, sum_dy, sum_dy_xmu, grad_input, norm_fct); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + return grad_input_reshaped.view(input_.sizes()); +} + +template +Tensor batch_norm_backward_elemt_cuda_template(const Tensor& grad_out_, const Tensor& input_, + const Tensor& mean_, const Tensor& invstd_, + const Tensor& weight_, const Tensor& sum_dy_, const Tensor& sum_dy_xmu_, const Tensor& count) { + + using stat_accscalar_t = at::acc_type; + int64_t n_input = input_.size(1); + auto input_reshaped = input_.reshape({input_.size(0), input_.size(1), -1}); // internally we merge the feature dimensions + auto grad_output_reshaped = grad_out_.reshape(input_reshaped.sizes()); + auto grad_input_reshaped = at::empty_like(input_reshaped, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + + auto input = get_packed_accessor< + input_scalar_t, 3, DefaultPtrTraits, index_t>(input_reshaped, "input"); + auto grad_input = get_packed_accessor< + input_scalar_t, 3, DefaultPtrTraits, index_t>(grad_input_reshaped, "grad_input"); + auto grad_output = get_packed_accessor< + input_scalar_t, 3, DefaultPtrTraits, index_t>(grad_output_reshaped, "grad_output"); + auto mean = packed_accessor_or_dummy< + stat_accscalar_t, 1, DefaultPtrTraits, index_t>(mean_, "mean"); + auto invstd = packed_accessor_or_dummy< + stat_accscalar_t, 1, DefaultPtrTraits, index_t>(invstd_, "invstd"); + auto weight = packed_accessor_or_dummy< + stat_scalar_t, 1, DefaultPtrTraits, index_t>(weight_, "weight"); + auto sum_dy = packed_accessor_or_dummy< + stat_accscalar_t, 1, DefaultPtrTraits, index_t>(sum_dy_, "sum_dy"); + auto sum_dy_xmu = packed_accessor_or_dummy< + stat_accscalar_t, 1, DefaultPtrTraits, index_t>(sum_dy_xmu_, "sum_dy_xmu"); + + auto stream = at::cuda::getCurrentCUDAStream(); + + // The kernel is pointwise, but we need to balance reading parameters (save_var/mean, + // weight/bias) - which we only do once and have a for loop afterwards - with having many threads and blocks + // and good occupancy. Quiet likely, we could go with even more blocks than 1024. + // The various planes are independent, so we use blocks for them. + int tf = std::max(getNumThreads(input.size(2)/4), + std::min(getNumThreads(input.size(2)), 64)); + int tb = std::max(64/tf, 1); + dim3 blocks_trans(input.size(1), std::max(1, std::min((256*1024)/input.size(1), + (input.size(0)+tb-1)/tb))); + blocks_trans.y = std::min(blocks_trans.y, MAX_GRID_SIZE); + dim3 threads_trans(tf, tb); + batch_norm_backward_elemt_kernel <<>> + (input, grad_output, mean, invstd, weight, sum_dy, sum_dy_xmu, grad_input, count.data_ptr(), count.numel()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + return grad_input_reshaped.view(input_.sizes()); +} + +// welford kernel for c last tensor calculating mean/biased_variance/unbiased_variance +// original apex name: welford_kernel_c_last +template + +__global__ void +batch_norm_collect_statistics_channels_last_kernel( + const scalar_t* __restrict__ input, + accscalar_t* __restrict__ out_mean, + accscalar_t* __restrict__ out_invstd, + volatile accscalar_t* staging_data, + int* semaphores, + const int reduction_size, + const int stride, + accscalar_t epsilon) { + // hide latency with concurrency + accscalar_t x_mean[PARALLEL_LOADS]; + accscalar_t m_2_n[PARALLEL_LOADS]; + int count[PARALLEL_LOADS]; + +#pragma unroll + for (int i = 0; i < PARALLEL_LOADS; i++) { + x_mean[i] = accscalar_t(0); + m_2_n[i] = accscalar_t(0); + count[i] = accscalar_t(0); + } + // tensor dimension (m,c) + + // loop along m dimension + int inner_loop_stride = blockDim.y * gridDim.y; + + // offset along m dimension + int m_offset = blockIdx.y * blockDim.y + threadIdx.y; + int c_offset = blockIdx.x * blockDim.x + threadIdx.x; + + int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS); + int address_base = m_offset * stride + c_offset; + int address_increment = inner_loop_stride * stride; + + for (int i = 0; i < loop_count; i++) { + accscalar_t x_math[PARALLEL_LOADS]; + accscalar_t x_count_inv[PARALLEL_LOADS]; + accscalar_t is_valid[PARALLEL_LOADS]; + + // load multiple data in +#pragma unroll + for (int j = 0; j < PARALLEL_LOADS; j++) { + if (c_offset < stride && m_offset < reduction_size) { + x_math[j] = input[address_base]; + count[j]++; + x_count_inv[j] = accscalar_t(1) / count[j]; + is_valid[j] = accscalar_t(1); + } else { + x_math[j] = accscalar_t(0); + x_count_inv[j] = accscalar_t(0); + is_valid[j] = accscalar_t(0); + } + m_offset += inner_loop_stride; + address_base += address_increment; + } + + // calculate mean/m2n with welford +#pragma unroll + for (int j = 0; j < PARALLEL_LOADS; j++) { + accscalar_t delta0 = x_math[j] - x_mean[j]; + x_mean[j] += delta0 * x_count_inv[j]; + accscalar_t delta1 = x_math[j] - x_mean[j]; + m_2_n[j] += delta0 * delta1 * is_valid[j]; + } + } + + // thread reduction to accumulate mean/m_2_n/count between PARALLEL_LOADS +#pragma unroll + for (int j = 1; j < PARALLEL_LOADS; j++) { + welford_merge_element(count[0], x_mean[0], m_2_n[0], count[j], x_mean[j], m_2_n[j]); + } + + // release x_mean / m_2_n + auto mean_th = x_mean[0]; + auto m2_th = m_2_n[0]; + auto count_th = count[0]; + + // block-wise reduction with shared memory (since reduction cannot be done within a warp) + static __shared__ accscalar_t shmem_mean[MAX_BLOCK_SIZE]; + static __shared__ accscalar_t shmem_m2n[MAX_BLOCK_SIZE]; + static __shared__ int shmem_count[MAX_BLOCK_SIZE]; + + welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n); + + if (gridDim.y > 1) { + volatile accscalar_t* staging_mean = staging_data; + volatile accscalar_t* staging_m2n = &staging_data[stride*gridDim.y]; + volatile int* staging_count = reinterpret_cast(&staging_m2n[stride*gridDim.y]); + + address_base = c_offset + blockIdx.y * stride; + // write data to staging_data; + if (threadIdx.y == 0 && c_offset < stride) { + staging_mean[address_base] = mean_th; + staging_m2n[address_base] = m2_th; + staging_count[address_base] = count_th; + } + + __threadfence(); + __syncthreads(); // ensuring writes to staging_ is visible to all blocks + + __shared__ bool is_last_block_done; + // mark block done + if (threadIdx.x == 0 && threadIdx.y == 0) { + int old = atomicAdd(&semaphores[blockIdx.x], 1); + is_last_block_done = (old == (gridDim.y-1)); + } + + __syncthreads(); + + // check that all data is now available in global memory + if (is_last_block_done) { + count_th = 0; + mean_th = accscalar_t(0.0); + m2_th = accscalar_t(0.0); + + for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) { + address_base = c_offset + y * stride; + int count_new = c_offset < stride ? staging_count[address_base] : 0; + accscalar_t mean_new = c_offset < stride ? staging_mean[address_base] : accscalar_t(0.0); + accscalar_t m2n_new = c_offset < stride ? staging_m2n[address_base] : accscalar_t(0.0); + + welford_merge_element(count_th, mean_th, m2_th, count_new, mean_new, m2n_new); + } + + welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n); + if (threadIdx.y == 0 && c_offset < stride) { + out_mean[c_offset] = static_cast(mean_th); + out_invstd[c_offset] = VarTransform{}(m2_th/count_th, epsilon); + } + } + } else { + if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) { + out_mean[c_offset] = static_cast(mean_th); + out_invstd[c_offset] = VarTransform{}(m2_th/count_th, epsilon); + } + } +} + +// elementwise BN kernel +// original apex name: batchnorm_forward_c_last_kernel +template < + typename scalar_t, + typename accscalar_t, + typename layerscalar_t, + int PARALLEL_LOADS> +__global__ void batch_norm_transform_input_channels_last_kernel( + const scalar_t* __restrict__ input, + const scalar_t* __restrict__ z, + const accscalar_t* __restrict__ mean, + const accscalar_t* __restrict__ inv_std, + const layerscalar_t* __restrict__ weight, + const layerscalar_t* __restrict__ shift, + scalar_t* __restrict__ out, + const int reduction_size, + const int stride, + const bool fuse_relu) { + // tensor dimension (m,c) + // loop along m dimension + int inner_loop_stride = blockDim.y * gridDim.y; + + // offset along m dimension + int m_offset = blockIdx.y * blockDim.y + threadIdx.y; + int c_offset = blockIdx.x * blockDim.x + threadIdx.x; + + if (c_offset >= stride || m_offset >= reduction_size) { + return; + } + + auto m_c = mean[c_offset]; + auto inv_std_c = static_cast(inv_std[c_offset]); + auto w_c = weight == nullptr ? accscalar_t(1.0) : static_cast(weight[c_offset]); + auto s_c = shift == nullptr ? accscalar_t(0.0) : static_cast(shift[c_offset]); + + int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS); + int address_base = m_offset * stride + c_offset; + int address_increment = inner_loop_stride * stride; + + for (int i = 0; i < loop_count; i++) { +#pragma unroll + for (int j = 0; j < PARALLEL_LOADS; j++) { + if (c_offset < stride && m_offset < reduction_size) { + auto tmp = w_c * (static_cast(input[address_base]) - m_c ) * inv_std_c + s_c; + if (z != nullptr) { + tmp += z[address_base]; + } + out[address_base] = (fuse_relu && tmp <= accscalar_t(0.0) ? scalar_t(0.0) : static_cast(tmp)); + } + m_offset += inner_loop_stride; + address_base += address_increment; + } + } +} + +template +__device__ __forceinline__ void merge_block_vertical_backward(T& sum_dy, + T& sum_dy_xmu, + T* shmem_sum_dy, + T* shmem_sum_dy_xmu) { + // write to shared memory + auto address_base = threadIdx.x + threadIdx.y * blockDim.x; + +#pragma unroll + for (int offset = blockDim.y/2; offset > 0; offset >>= 1) { + if (threadIdx.y < offset*2) { + shmem_sum_dy[address_base] = sum_dy; + shmem_sum_dy_xmu[address_base] = sum_dy_xmu; + } + __syncthreads(); + if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) { + auto address = address_base + offset * blockDim.x; + + sum_dy += shmem_sum_dy[address]; + sum_dy_xmu += shmem_sum_dy_xmu[address]; + } + } +} + +// batchnorm backward kernel for c last tensor +// original apex name: reduce_bn_c_last_kernel +template < + int PARALLEL_LOADS, + typename scalar_t, + typename accscalar_t, + typename layerscalar_t> +__global__ void batch_norm_backward_reduce_channels_last_kernel( + const scalar_t* __restrict__ input, + const scalar_t* __restrict__ grad_output, + const accscalar_t* __restrict__ mean, + const accscalar_t* __restrict__ inv_std, + accscalar_t* __restrict__ sum_dy_o, + accscalar_t* __restrict__ sum_dy_xmu_o, + layerscalar_t* __restrict__ grad_weight, + layerscalar_t* __restrict__ grad_bias, + volatile accscalar_t* staging_data, + int* semaphores, + const int reduction_size, + const int stride) { + + // hide latency with concurrency + accscalar_t sum_dy[PARALLEL_LOADS]; + accscalar_t sum_dy_xmu[PARALLEL_LOADS]; + +#pragma unroll + for (int i = 0; i < PARALLEL_LOADS; i++) { + sum_dy[i] = accscalar_t(0); + sum_dy_xmu[i] = accscalar_t(0); + } + // tensor dimension (m,c) + + // loop along m dimension + int inner_loop_stride = blockDim.y * gridDim.y; + + // offset along m dimension + int m_offset = blockIdx.y * blockDim.y + threadIdx.y; + int c_offset = blockIdx.x * blockDim.x + threadIdx.x; + + if (c_offset >= stride || m_offset >= reduction_size) { + return; + } + + int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS); + int address_base = m_offset * stride + c_offset; + int address_increment = inner_loop_stride * stride; + + auto r_mean = mean[c_offset]; + auto factor = inv_std[c_offset]; + + for (int i = 0; i < loop_count; i++) { + accscalar_t x_input[PARALLEL_LOADS]; + accscalar_t x_grad_output[PARALLEL_LOADS]; + + // load multiple data in +#pragma unroll + for (int j = 0; j < PARALLEL_LOADS; j++) { + if (c_offset < stride && m_offset < reduction_size) { + x_input[j] = input[address_base]; + x_grad_output[j] = grad_output[address_base]; + } else { + x_input[j] = accscalar_t(0); + x_grad_output[j] = accscalar_t(0); + } + m_offset += inner_loop_stride; + address_base += address_increment; + } + + // calculate sum_dy / sum_dy_xmu +#pragma unroll + for (int j = 0; j < PARALLEL_LOADS; j++) { + sum_dy[j] += x_grad_output[j]; + sum_dy_xmu[j] += x_grad_output[j] * (x_input[j] - r_mean); + } + } + + // thread reduction to accumulate sum_dy / sum_dy_xmu between PARALLEL_LOADS +#pragma unroll + for (int j = 1; j < PARALLEL_LOADS; j++) { + sum_dy[0] += sum_dy[j]; + sum_dy_xmu[0] += sum_dy_xmu[j]; + } + + // release array of registers + auto sum_dy_th = sum_dy[0]; + auto sum_dy_xmu_th = sum_dy_xmu[0]; + + // block-wise reduction with shared memory (since reduction cannot be done within a warp) + static __shared__ accscalar_t shmem_sum_dy[MAX_BLOCK_SIZE]; + static __shared__ accscalar_t shmem_sum_dy_xmu[MAX_BLOCK_SIZE]; + + merge_block_vertical_backward(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu); + + if (gridDim.y > 1) { + volatile accscalar_t* staging_sum_dy = staging_data; + volatile accscalar_t* staging_sum_dy_xmu = &staging_data[stride*gridDim.y]; + + address_base = c_offset + blockIdx.y * stride; + // write data to staging_data; + if (threadIdx.y == 0 && c_offset < stride) { + staging_sum_dy[address_base] = sum_dy_th; + staging_sum_dy_xmu[address_base] = sum_dy_xmu_th; + } + + __threadfence(); + __syncthreads(); // ensuring writes to staging_ is visible to all blocks + + __shared__ bool is_last_block_done; + // mark block done + if (threadIdx.x == 0 && threadIdx.y == 0) { + int old = atomicAdd(&semaphores[blockIdx.x], 1); + is_last_block_done = (old == (gridDim.y-1)); + } + + __syncthreads(); + + // check that all data is now available in global memory + if (is_last_block_done) { + sum_dy_th = accscalar_t(0.0); + sum_dy_xmu_th = accscalar_t(0.0); + + for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) { + address_base = c_offset + y * stride; + sum_dy_th += (c_offset < stride ? staging_sum_dy[address_base] : accscalar_t(0.0)); + sum_dy_xmu_th += (c_offset < stride ? staging_sum_dy_xmu[address_base] : accscalar_t(0.0)); + } + + merge_block_vertical_backward(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu); + if (threadIdx.y == 0 && c_offset < stride) { + if (grad_bias != nullptr) { + grad_bias[c_offset] = static_cast(sum_dy_th); + } + if (grad_weight != nullptr) { + grad_weight[c_offset] = static_cast(sum_dy_xmu_th * factor); + } + //mean_dy[c_offset] = sum_dy_th / reduction_size; + //mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size; + sum_dy_o[c_offset] = sum_dy_th; + sum_dy_xmu_o[c_offset] = sum_dy_xmu_th; + } + } + } else { + if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) { + if (grad_bias != nullptr) { + grad_bias[c_offset] = static_cast(sum_dy_th); + } + if (grad_weight != nullptr) { + grad_weight[c_offset] = static_cast(sum_dy_xmu_th * factor); + } + //mean_dy[c_offset] = sum_dy_th / reduction_size; + //mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size; + sum_dy_o[c_offset] = sum_dy_th; + sum_dy_xmu_o[c_offset] = sum_dy_xmu_th; + } + } +} + +// elementwise BN kernel +// original apex name: batchnorm_backward_c_last_kernel +template < + int PARALLEL_LOADS, + typename scalar_t, + typename accscalar_t, + typename layerscalar_t> +__device__ __forceinline__ void batch_norm_backward_elemt_channels_last_kernel_impl( + const scalar_t* __restrict__ grad_output, + const scalar_t* __restrict__ input, + const accscalar_t* __restrict__ mean, + const accscalar_t* __restrict__ inv_std, + const layerscalar_t* __restrict__ weight, + const accscalar_t* __restrict__ sum_dy, + const accscalar_t* __restrict__ sum_dy_xmu, + scalar_t* __restrict__ grad_input, + const accscalar_t norm_fct, + const int reduction_size, + const int stride) { + // tensor dimension (m,c) + // loop along m dimension + int inner_loop_stride = blockDim.y * gridDim.y; + + // offset along m dimension + int m_offset = blockIdx.y * blockDim.y + threadIdx.y; + int c_offset = blockIdx.x * blockDim.x + threadIdx.x; + + if (c_offset >= stride || m_offset >= reduction_size) { + return; + } + + auto m_c = mean[c_offset]; + auto m_dy_c = sum_dy[c_offset] * norm_fct; + auto factor_1_c = inv_std[c_offset]; + auto factor_2_c = (weight == nullptr? accscalar_t(1.0) : static_cast(weight[c_offset])) * factor_1_c; + factor_1_c = factor_1_c * factor_1_c * sum_dy_xmu[c_offset] * norm_fct; + + int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS); + int address_base = m_offset * stride + c_offset; + int address_increment = inner_loop_stride * stride; + + for (int i = 0; i < loop_count; i++) { +#pragma unroll + for (int j = 0; j < PARALLEL_LOADS; j++) { + if (c_offset < stride && m_offset < reduction_size) { + grad_input[address_base] = static_cast( + (static_cast(grad_output[address_base]) - m_dy_c - + (static_cast(input[address_base]) - m_c) * factor_1_c) + * factor_2_c); + } + m_offset += inner_loop_stride; + address_base += address_increment; + } + } +} + +template < + int PARALLEL_LOADS, + typename scalar_t, + typename accscalar_t, + typename layerscalar_t> +__global__ void batch_norm_backward_elemt_channels_last_kernel( + const scalar_t* __restrict__ grad_output, + const scalar_t* __restrict__ input, + const accscalar_t* __restrict__ mean, + const accscalar_t* __restrict__ inv_std, + const layerscalar_t* __restrict__ weight, + const accscalar_t* __restrict__ sum_dy, + const accscalar_t* __restrict__ sum_dy_xmu, + const int* __restrict__ numel, + scalar_t* __restrict__ grad_input, + const int64_t world_size, + const int reduction_size, + const int stride) { + + int64_t total_numel = 0; + for (int i = 0; i < world_size; i++) { + total_numel += numel[i]; + } + + auto norm_fct = static_cast(1) / static_cast(total_numel); + batch_norm_backward_elemt_channels_last_kernel_impl( + grad_output, input, mean, inv_std, weight, sum_dy, sum_dy_xmu, + grad_input, norm_fct, reduction_size, stride); +} + +template < + int PARALLEL_LOADS, + typename scalar_t, + typename accscalar_t, + typename layerscalar_t> +__global__ void batch_norm_backward_elemt_channels_last_kernel( + const scalar_t* __restrict__ grad_output, + const scalar_t* __restrict__ input, + const accscalar_t* __restrict__ mean, + const accscalar_t* __restrict__ inv_std, + const layerscalar_t* __restrict__ weight, + const accscalar_t* __restrict__ sum_dy, + const accscalar_t* __restrict__ sum_dy_xmu, + scalar_t* __restrict__ grad_input, + const accscalar_t norm_fct, + const int reduction_size, + const int stride) { + batch_norm_backward_elemt_channels_last_kernel_impl( + grad_output, input, mean, inv_std, weight, sum_dy, sum_dy_xmu, + grad_input, norm_fct, reduction_size, stride); +} + +template +void batch_norm_stats_channels_last_cuda_template( + const Tensor& out_mean, const Tensor& out_invstd, const Tensor& input, double epsilon) { + using accscalar_t = at::acc_type; + + const auto stride = input.sizes()[1]; + const auto reduction_size = input.numel() / stride; + + resize_output(out_mean, {stride}); + resize_output(out_invstd, {stride}); + TORCH_INTERNAL_ASSERT(out_invstd.dim() == 1 && out_invstd.is_contiguous() && + out_invstd.sizes()[0]); + TORCH_INTERNAL_ASSERT(out_mean.dim() == 1 && out_mean.is_contiguous() && + out_mean.sizes()[0]); + + dim3 block; + dim3 grid; + flexible_launch_configs(reduction_size, stride, block, grid, true); + + at::Tensor staging_data; + at::Tensor semaphores; + if (grid.y > 1) { + staging_data = at::empty({4*stride*grid.y}, out_mean.options()); + semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt)); + } + + accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data_ptr() : nullptr; + int* semaphores_ptr = grid.y > 1 ? semaphores.data_ptr() : nullptr; + batch_norm_collect_statistics_channels_last_kernel + <<>>( + input.data_ptr(), + out_mean.data_ptr(), + out_invstd.data_ptr(), + staging_data_ptr, + semaphores_ptr, + reduction_size, + stride, + epsilon); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +void batch_norm_elemt_channels_last_cuda_template( + const at::Tensor& output, + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& shift, // bias of BN + const at::Tensor& mean, + const at::Tensor& inv_std, + const at::optional& z = c10::nullopt, // bias after BN + const bool fuse_relu = false) { + const auto stride = input.sizes()[1]; + const auto reduction_size = input.numel() / stride; + + dim3 block; + dim3 grid; + flexible_launch_configs(reduction_size, stride, block, grid); + + auto stream = at::cuda::getCurrentCUDAStream(); + const auto second_dtype = weight.defined() ? weight.scalar_type() : + (shift.defined() ? shift.scalar_type() : input.scalar_type()); + + if (input.scalar_type() != second_dtype) { + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batchnorm_forward", [&] { + using accscalar_t = at::acc_type; + batch_norm_transform_input_channels_last_kernel + <<>>( + input.data_ptr(), + z.has_value() ? z.value().data_ptr() : nullptr, + mean.data_ptr(), + inv_std.data_ptr(), + weight.defined() ? weight.data_ptr() : nullptr, + shift.defined() ? shift.data_ptr() : nullptr, + output.data_ptr(), + reduction_size, + stride, + fuse_relu); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + } else { + if (weight.defined()){ + TORCH_CHECK(input.scalar_type() == weight.scalar_type(), "batchnorm_forward: input.scalar_type() ", input.scalar_type(), + " is not supported with weight.scalar_type() ", weight.scalar_type()); + } + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batchnorm_forward", [&] { + using accscalar_t = at::acc_type; + batch_norm_transform_input_channels_last_kernel + <<>>( + input.data_ptr(), + z.has_value() ? z.value().data_ptr() : nullptr, + mean.data_ptr(), + inv_std.data_ptr(), + weight.defined() ? weight.data_ptr() : nullptr, + shift.defined() ? shift.data_ptr(): nullptr, + output.data_ptr(), + reduction_size, + stride, + fuse_relu); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + } +} + +std::tuple +batch_norm_backward_reduce_cuda_channels_last_template(const at::Tensor& grad_output, + const at::Tensor& input, + const at::Tensor& mean, + const at::Tensor& inv_std, + const at::Tensor& weight, + const bool input_g, const bool weight_g, const bool bias_g) { + const auto stride = input.sizes()[1]; + const auto reduction_size = input.numel() / stride; + + at::Tensor sumn_dy = at::empty({stride}, mean.options()); + at::Tensor sum_dy_xmu = at::empty({stride}, mean.options()); + + at::Tensor grad_weight; + at::Tensor grad_bias; + if (weight.defined()) { + grad_weight = at::empty({stride}, weight.options()); + grad_bias = at::empty({stride}, weight.options()); + } else { + // because I cannot return an uninitialized at::Tensor + grad_weight = at::empty({0}, mean.options()); + grad_bias = at::empty({0}, mean.options()); + } + + dim3 block; + dim3 grid; + flexible_launch_configs(reduction_size, stride, block, grid, true); + + at::Tensor staging_data; + at::Tensor semaphores; + if (grid.y > 1) { + staging_data = at::empty({2*stride*grid.y}, mean.options()); + semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt)); + } + auto stream = at::cuda::getCurrentCUDAStream(); + + if (weight.defined() && input.scalar_type() != weight.scalar_type()) { + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batchnorm_backward_reduce", [&] { + using accscalar_t = at::acc_type; + accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data_ptr() : nullptr; + int* semaphores_ptr = grid.y > 1 ? semaphores.data_ptr() : nullptr; + batch_norm_backward_reduce_channels_last_kernel + <<>>( + input.data_ptr(), + grad_output.data_ptr(), + mean.data_ptr(), + inv_std.data_ptr(), + sumn_dy.data_ptr(), + sum_dy_xmu.data_ptr(), + grad_weight.data_ptr(), + grad_bias.data_ptr(), + staging_data_ptr, + semaphores_ptr, + reduction_size, + stride); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + } else { + if (weight.defined()) { + TORCH_CHECK(input.scalar_type() == weight.scalar_type(), "batchnorm_backward_reduce: input.scalar_type() ", input.scalar_type(), + " is not supported with weight.scalar_type() ", weight.scalar_type()); + } + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batchnorm_backward_reduce", [&] { + using accscalar_t = at::acc_type; + accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data_ptr() : nullptr; + int* semaphores_ptr = grid.y > 1 ? semaphores.data_ptr() : nullptr; + batch_norm_backward_reduce_channels_last_kernel + <<>>( + input.data_ptr(), + grad_output.data_ptr(), + mean.data_ptr(), + inv_std.data_ptr(), + sumn_dy.data_ptr(), + sum_dy_xmu.data_ptr(), + weight.defined() ? grad_weight.data_ptr() : nullptr, + weight.defined() ? grad_bias.data_ptr() : nullptr, + staging_data_ptr, + semaphores_ptr, + reduction_size, + stride); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + } + + return std::make_tuple(sumn_dy, sum_dy_xmu, grad_weight, grad_bias); +} + +at::Tensor batch_norm_backward_elemt_channels_last_cuda_template( + const at::Tensor& grad_output, + const at::Tensor& input, + const at::Tensor& mean, + const at::Tensor& inv_std, + const at::Tensor& weight, + const at::Tensor& sum_dy, + const at::Tensor& sum_dy_xmu, + const at::Tensor& count) { + const auto stride = input.sizes()[1]; + const auto reduction_size = input.numel() / stride; + + // Input is guarunteed to be channels-last compatible + at::Tensor grad_input = at::empty_like(input); + + dim3 block; + dim3 grid; + flexible_launch_configs(reduction_size, stride, block, grid); + + auto stream = at::cuda::getCurrentCUDAStream(); + + if (weight.defined() && weight.scalar_type() != input.scalar_type()) { + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batchnorm_backward_element", [&] { + using accscalar_t = at::acc_type; + batch_norm_backward_elemt_channels_last_kernel + <<>>( + grad_output.data_ptr(), + input.data_ptr(), + mean.data_ptr(), + inv_std.data_ptr(), + weight.data_ptr(), + sum_dy.data_ptr(), + sum_dy_xmu.data_ptr(), + count.data_ptr(), + grad_input.data_ptr(), + count.numel(), + reduction_size, + stride); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + } else { + if (weight.defined()) { + TORCH_CHECK(input.scalar_type() == weight.scalar_type(), "batchnorm_backward_element: input.scalar_type() ", input.scalar_type(), + " is not supported with weight.scalar_type() ", weight.scalar_type()); + } + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "batchnorm_backward_element", [&] { + using accscalar_t = at::acc_type; + batch_norm_backward_elemt_channels_last_kernel + <<>>( + grad_output.data_ptr(), + input.data_ptr(), + mean.data_ptr(), + inv_std.data_ptr(), + weight.defined() ? weight.data_ptr() : nullptr, + sum_dy.data_ptr(), + sum_dy_xmu.data_ptr(), + count.data_ptr(), + grad_input.data_ptr(), + count.numel(), + reduction_size, + stride); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + } + + return grad_input; +} + +at::Tensor batch_norm_backward_elemt_channels_last_cuda_template( + const at::Tensor& grad_output, + const at::Tensor& input, + const at::Tensor& mean, + const at::Tensor& inv_std, + const at::Tensor& weight, + const at::Tensor& sum_dy, + const at::Tensor& sum_dy_xmu) { + const auto stride = input.sizes()[1]; + const auto reduction_size = input.numel() / stride; + auto norm_fct = 1.0 / reduction_size; + + // Input is guarunteed to be channels-last compatible + at::Tensor grad_input = at::empty_like(input); + + dim3 block; + dim3 grid; + flexible_launch_configs(reduction_size, stride, block, grid); + + auto stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "batchnorm_backward_element", [&] { + using accscalar_t = at::acc_type; + + if (weight.defined() && weight.scalar_type() != input.scalar_type()) { + batch_norm_backward_elemt_channels_last_kernel + <<>>( + grad_output.data_ptr(), + input.data_ptr(), + mean.data_ptr(), + inv_std.data_ptr(), + weight.data_ptr(), + sum_dy.data_ptr(), + sum_dy_xmu.data_ptr(), + grad_input.data_ptr(), + static_cast(norm_fct), + reduction_size, + stride); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } else { + batch_norm_backward_elemt_channels_last_kernel + <<>>( + grad_output.data_ptr(), + input.data_ptr(), + mean.data_ptr(), + inv_std.data_ptr(), + weight.defined() ? weight.data_ptr() : nullptr, + sum_dy.data_ptr(), + sum_dy_xmu.data_ptr(), + grad_input.data_ptr(), + static_cast(norm_fct), + reduction_size, + stride); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + }); + + return grad_input; +} + +} } // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/PersistentSoftmax.cuh b/voice_bridge/torch/include/ATen/native/cuda/PersistentSoftmax.cuh new file mode 100644 index 0000000000000000000000000000000000000000..5d3bea36e37a300630453a223085568de8eafb4e --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/PersistentSoftmax.cuh @@ -0,0 +1,402 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +namespace { + +int log2_ceil(int value) { + int log2_value = 0; + while ((1 << log2_value) < value) ++log2_value; + return log2_value; +} + +template +struct Add { + __device__ __forceinline__ T operator()(T a, T b) const { + return a + b; + } +}; + +template +struct Max { + __device__ __forceinline__ T operator()(T a, T b) const { + return a < b ? b : a; + } +}; + +template class ReduceOp> +__device__ __forceinline__ void warp_reduce(acc_t* sum) { + ReduceOp r; + #pragma unroll + for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) { + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + acc_t b = WARP_SHFL_XOR(sum[i], offset, WARP_SIZE); + sum[i] = r(sum[i], b); + } + } +} + +// The softmax_warp_* methods perform softmax forward and backward propagation on samples spanning the fast dimension. +// Each sample contains element_count scalar elements. element_count can be any integer value <= 1024. +// The template arguments have the following meaning: +// One "WARP" works on one "BATCH". One "BATCH" contains "WARP_BATCH" samples. +// WARP_BATCH is equal to 1 when element_count is large, and > 1 when element_count is small. +// A "WARP" contains "C10_WARPS_SIZE" threads, these treads are guaranteed to belong to the same warp. +// This is important because it means only __shfl_ instructions are required for reductions. +// Note that this means WARP_SIZE must be a power of two and <= architecture warp size. +// CUDA warp size is 32 for all existing GPU architectures, but there is no guarantee this will not change for future arch. +// ROCm warp size is 64 for all currently ROCm-supported GPU architectures, but this may change for future archs. +// is_log_softmax is a flag indicating whether SoftMax or LogSoftMax should be computed. +// is_masked is a flag indicating whether SoftMax or MaskedSoftMax should be computed. +// The template can be instantiated with any floating point type for the type arguments input_t, output_t and acc_t. +// This allows SoftMax to be fused with a cast immediately following the SoftMax. +// The mask should have the same shape as input, with a boolean indicate if the value is masked. +// The head_chunk_size is only used for transformer mask softmax, equals to H * D * D. +// For instance: +// input_t=half, acc_t=float, output_t=half => read half tensor, float accumulators, write half tensor. +// input_t=half, acc_t=float, output_t=float => read half tensor, float accumulators, write float tensor. +// input_t_float, acc_t=float, output_t=half => read float tensor, float accumulators, write half tensor. + +template +__global__ void softmax_warp_forward(output_t *dst, const input_t *src, int batch_size, int stride, int element_count, const bool *mask = nullptr, const int head_chunk_size = -1, bool is_transformer_mask = false) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method warp_softmax_forward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + + int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; + + // batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = batch_size - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x; + int idx_offset = first_batch * stride + local_idx; + + src += idx_offset; + dst += idx_offset; + + if (is_transformer_mask) { + mask += ((first_batch * stride) / head_chunk_size) * stride + local_idx; + } else { + mask += idx_offset; + } + // The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop, + // but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep + // the nested loops. + // This should have no impact on performance because the loops are unrolled anyway. + + // load data from global memory + acc_t elements[WARP_BATCH][WARP_ITERATIONS]; + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : element_count; + for (int it = 0; it < WARP_ITERATIONS; ++it) { + int element_index = local_idx + it * WARP_SIZE; + if (element_index < batch_element_count) { + elements[i][it] = src[i*element_count+it*WARP_SIZE]; + } else { + elements[i][it] = -std::numeric_limits::infinity(); + } + } + } + + // compute max_value + acc_t max_value[WARP_BATCH]; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : element_count; + bool is_meaningful_max = false; + max_value[i] = elements[i][0]; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + if (is_masked) { + int idx = it*WARP_SIZE; + if ((idx + local_idx) < batch_element_count) { + if (!is_transformer_mask) { + idx += i*element_count; + } + if (!mask[idx]) { + max_value[i] = (is_meaningful_max && max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it]; + is_meaningful_max = true; + } + } + } else { + max_value[i] = max_value[i] > elements[i][it] ? max_value[i] : elements[i][it]; + } + } + if (is_masked) { + if (!is_meaningful_max) { + max_value[i] = -std::numeric_limits::infinity(); + } + } + } + warp_reduce(max_value); + + acc_t sum[WARP_BATCH] { 0.0f }; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : element_count; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + if (!is_masked) { + if (is_log_softmax) { + sum[i] += std::exp(elements[i][it] - max_value[i]); + } else { + elements[i][it] = std::exp(elements[i][it] - max_value[i]); + sum[i] += elements[i][it]; + } + } else { + int idx = it*WARP_SIZE; + bool valid = (idx + local_idx) < batch_element_count; + if (!is_transformer_mask) { + idx += i*element_count; + } + if (valid) { + if (!mask[idx]) { + if (is_log_softmax) { + sum[i] += std::exp(elements[i][it] - max_value[i]); + } else { + elements[i][it] = std::exp(elements[i][it] - max_value[i]); + sum[i] += elements[i][it]; + } + } else { + if (!is_log_softmax) { + // Masked values are treated as -infinity, and std::exp(-infinity) is 0. + elements[i][it] = 0; + } + } + } else { + if (!is_log_softmax) { + elements[i][it] = 0.; + } + } + } + } + } + warp_reduce(sum); + + // store result + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; + if (is_log_softmax) sum[i] = std::log(sum[i]); + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + int element_index = local_idx + it * WARP_SIZE; + if (element_index < element_count) { + if (is_log_softmax) { + dst[i*element_count+it*WARP_SIZE] = elements[i][it] - max_value[i] - sum[i]; + } else if (sum[i] == 0) { + dst[i*element_count+it*WARP_SIZE] = std::numeric_limits::quiet_NaN(); + } else { + dst[i*element_count+it*WARP_SIZE] = elements[i][it] / sum[i]; + } + } else { + break; + } + } + } +} + +template +__global__ void softmax_warp_backward(output_t *gradInput, const input_t *grad, const input_t *output, int batch_size, int stride, int element_count, const bool *mask = nullptr) +{ + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and warp_size of method warp_softmax_backward_kernel. + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + + int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; + + // batch_size might not be a multiple of WARP_BATCH. Check how + // many batches have to computed within this WARP. + int local_batches = batch_size - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // there might be multiple batches per warp. compute the index within the batch + int local_idx = threadIdx.x % WARP_SIZE; + + // the first element to process by the current thread + int thread_offset = first_batch * stride + local_idx; + grad += thread_offset; + output += thread_offset; + gradInput += thread_offset; + if (is_masked) { + mask += thread_offset; + } + + // The nested loops over WARP_BATCH and then WARP_ITERATIONS can be simplified to one loop, + // but I think doing so would obfuscate the logic of the algorithm, thus I chose to keep + // the nested loops. + // This should have no impact on performance because the loops are unrolled anyway. + + // load data from global memory + acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS]; + acc_t output_reg[WARP_BATCH][WARP_ITERATIONS]; + for (int i = 0; i < WARP_BATCH; ++i) { + int batch_element_count = (i >= local_batches) ? 0 : element_count; + for (int it = 0; it < WARP_ITERATIONS; ++it) { + int element_index = local_idx + it * WARP_SIZE; + if (element_index < batch_element_count) { + grad_reg[i][it] = grad[i*element_count+it*WARP_SIZE]; + output_reg[i][it] = output[i*element_count+it*WARP_SIZE]; + } else { + grad_reg[i][it] = acc_t(0); + output_reg[i][it] = acc_t(0); + } + } + } + + acc_t sum[WARP_BATCH] { 0.0f }; + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + if (!is_masked || !mask[i*element_count+it*WARP_SIZE]) { + sum[i] += grad_reg[i][it]; + } + } + } + warp_reduce(sum); + + // store result + #pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; + #pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + int element_index = local_idx + it * WARP_SIZE; + if (element_index < element_count) { + if (is_masked && mask[i*element_count+it*WARP_SIZE]) { + gradInput[i*element_count+it*WARP_SIZE] = 0; + } + // compute gradients + else if (is_log_softmax) { + gradInput[i*element_count+it*WARP_SIZE] = (grad_reg[i][it] - std::exp(output_reg[i][it]) * sum[i]); + } else { + gradInput[i*element_count+it*WARP_SIZE] = (grad_reg[i][it] - output_reg[i][it] * sum[i]); + } + } + } + } +} + +} // end of anonymous namespace + +template +void dispatch_softmax_forward(output_t *dst, const input_t *src, int softmax_elements, int softmax_elements_stride, int batch_count, const bool *mask = nullptr, int chunk_size = -1, bool is_transformer_mask = false) +{ + TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 1024 ); + if (softmax_elements == 0) { + return; + } else { + int log2_elements = log2_ceil(softmax_elements); + const int next_power_of_two = 1 << log2_elements; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. + int warp_size = at::cuda::warp_size(); + warp_size = (next_power_of_two < warp_size) ? next_power_of_two : warp_size; + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + int blocks = (batch_count + batches_per_block - 1) / batches_per_block; + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + #define LAUNCH_SOFTMAX_WARP_FORWARD(L2E) case L2E: \ + softmax_warp_forward \ + <<>>(dst, \ + src, batch_count, softmax_elements_stride, softmax_elements, mask, chunk_size, is_transformer_mask); \ + C10_CUDA_KERNEL_LAUNCH_CHECK(); \ + break; + + LAUNCH_SOFTMAX_WARP_FORWARD(0); // 1 + LAUNCH_SOFTMAX_WARP_FORWARD(1); // 2 + LAUNCH_SOFTMAX_WARP_FORWARD(2); // 4 + LAUNCH_SOFTMAX_WARP_FORWARD(3); // 8 + LAUNCH_SOFTMAX_WARP_FORWARD(4); // 16 + LAUNCH_SOFTMAX_WARP_FORWARD(5); // 32 + LAUNCH_SOFTMAX_WARP_FORWARD(6); // 64 + LAUNCH_SOFTMAX_WARP_FORWARD(7); // 128 + LAUNCH_SOFTMAX_WARP_FORWARD(8); // 256 + LAUNCH_SOFTMAX_WARP_FORWARD(9); // 512 + LAUNCH_SOFTMAX_WARP_FORWARD(10); ; // 1024 + default: + break; + } + } +} + +template +void dispatch_softmax_backward(output_t *grad_input, const input_t *grad, const input_t *output, int softmax_elements, int softmax_elements_stride, int batch_count, const bool *mask = nullptr) +{ + TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 1024 ); + if (softmax_elements == 0) { + return; + } else { + int log2_elements = log2_ceil(softmax_elements); + const int next_power_of_two = 1 << log2_elements; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward. + int warp_size = at::cuda::warp_size(); + warp_size = (next_power_of_two < warp_size) ? next_power_of_two : warp_size; + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + int blocks = (batch_count + batches_per_block - 1) / batches_per_block; + dim3 threads(warp_size, warps_per_block, 1); + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + #define LAUNCH_SOFTMAX_WARP_BACKWARD(L2E) case L2E: \ + softmax_warp_backward \ + <<>> \ + (grad_input, grad, output, batch_count, softmax_elements_stride, \ + softmax_elements, mask); \ + C10_CUDA_KERNEL_LAUNCH_CHECK(); \ + break; + + LAUNCH_SOFTMAX_WARP_BACKWARD(0); // 1 + LAUNCH_SOFTMAX_WARP_BACKWARD(1); // 2 + LAUNCH_SOFTMAX_WARP_BACKWARD(2); // 4 + LAUNCH_SOFTMAX_WARP_BACKWARD(3); // 8 + LAUNCH_SOFTMAX_WARP_BACKWARD(4); // 16 + LAUNCH_SOFTMAX_WARP_BACKWARD(5); // 32 + LAUNCH_SOFTMAX_WARP_BACKWARD(6); // 64 + LAUNCH_SOFTMAX_WARP_BACKWARD(7); // 128 + LAUNCH_SOFTMAX_WARP_BACKWARD(8); // 256 + LAUNCH_SOFTMAX_WARP_BACKWARD(9); // 512 + LAUNCH_SOFTMAX_WARP_BACKWARD(10); // 1024 + default: + break; + } + } +} diff --git a/voice_bridge/torch/include/ATen/native/cuda/Pow.cuh b/voice_bridge/torch/include/ATen/native/cuda/Pow.cuh new file mode 100644 index 0000000000000000000000000000000000000000..9530b0ede27459d33fe9c8a01b71129621da499c --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/Pow.cuh @@ -0,0 +1,58 @@ +#pragma once +#include +#include + +namespace at { namespace native { + +namespace { + + +// SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt. +// So we need to define the functions with the explicit function signatures. +// As for pow, the following signatures are defined as the device function: +// pow(float, int) +// pow(double, int) +// pow(float, float) +// pow(double, double) +#ifdef _MSC_VER +// Functions for pow +// pow for at::Half +static inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) { + return static_cast(std::pow(static_cast(base), static_cast(exp))); +} +// pow for at::BFloat16 +static inline __host__ __device__ at::BFloat16 pow_(at::BFloat16 base, at::BFloat16 exp) { + return static_cast(std::pow(static_cast(base), static_cast(exp))); +} +// pow (floating, floating/int) +template +static inline __host__ __device__ typename std::enable_if::value && (std::is_same::value || std::is_same::value), Base_type>::type + pow_(Base_type base, Exp_type exp) { + return std::pow(base, exp); +} +// pow (Otherwise) +template +static inline __host__ __device__ typename std::enable_if::value && !std::is_same::value, Base_type>::type + pow_(Base_type base, Exp_type exp) { + return static_cast(std::pow(static_cast(base), static_cast(exp))); +} +#else +template +static inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) { + return ::pow(base, exp); +} +#endif + +template +static inline __host__ __device__ std::enable_if_t::value, T> pow_( + T base, T exp) { + return at::native::powi(base, exp); +} + +template +static inline __host__ __device__ c10::complex pow_(c10::complex base, c10::complex exp) { + return c10_complex_math::pow(base, exp); +} + +} // namespace +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/ROCmLoops.cuh b/voice_bridge/torch/include/ATen/native/cuda/ROCmLoops.cuh new file mode 100644 index 0000000000000000000000000000000000000000..586c47abe001862e0a033b5d65c1839415e10283 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/ROCmLoops.cuh @@ -0,0 +1,364 @@ +#pragma once + +// This file provides two functions to help write GPU elementwise kernels: +// +// gpu_kernel(TensorIterator iter, ) +// gpu_kernel_with_scalars(TensorIterator iter, ) +// +// The gpu_kernel_with_scalars generates specializations that support a +// single scalar CPU argument, such as from `cuda_tensor + 5`. The CPU scalar +// is lifted to a kernel parameter instead of copying to device memory. +// This should be used in conjunction with TensorIterator::allow_cpu_scalars_, +// which is the default for TensorIterator::binary_op. Otherwise, all inputs +// and the output must be on the GPU. +// +// For example, to write a reciprocal kernel for GPU float Tensors: +// +// gpu_kernel(iter, []GPU_LAMBDA(float a) { +// return 1.0f / a; +// }); +// +// To write a multiplication kernel for GPU float Tensors where one argument +// may be a CPU scalar: +// +// gpu_kernel_with_scalars(iter, []GPU_LAMBDA(float a, float b) { +// return a * b; +// }); +// +// See BinaryOpsKernel.cu for the complete implementation +// + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifdef __NVCC__ +#define ASSERT_HOST_DEVICE_LAMBDA(type) \ + static_assert(__nv_is_extended_host_device_lambda_closure_type(type), \ + #type " must be a __host__ __device__ lambda") +#else +#define ASSERT_HOST_DEVICE_LAMBDA(type) +#endif + +static constexpr int launch_size_1d = 512; +static constexpr int launch_size_nd = 128; +static constexpr int launch_bound2 = 4; + + +namespace at { namespace native { + +// See [NOTE: Complex Operator Unification] +// std::complex and thrust::complex don't work with some !needs_dynamic_casting optimizations. +// They always currently map to !needs_dynamic_casting even though we sometimes rely on the ability +// to reinterpret_cast between these representations. +// In order to separate these concerns, we have a check for non-c10 complex separately. +template::arity> +struct uses_non_c10_complex { + constexpr static bool check() { + using traits = function_traits; + using type = typename traits::template arg::type; + constexpr bool non_c10_complex = + std::is_same, type>::value + || std::is_same, type>::value + || std::is_same, type>::value + || std::is_same, type>::value; + + return c10::guts::if_constexpr([]() { + return true; + }, /* else */ []() { + return uses_non_c10_complex::check(); + }); + } +}; + +template +struct uses_non_c10_complex { + constexpr static bool check() { + using traits = function_traits; + using type = typename traits::result_type; + constexpr bool non_c10_complex = + std::is_same, type>::value + || std::is_same, type>::value + || std::is_same, type>::value + || std::is_same, type>::value; + + return non_c10_complex; + } +}; + +// NOTE: @zasdfgbnm is currently working on rewriting the gpu loops. +// Some of the old codes has been moved to namespace legacy, and +// new codes will be put into namespace modern. These two namespaces +// will coexists for a while until the rewrite is done. Once the rewrite +// is done, we will remove the legacy and modern namespace and everything +// will be in at::native directly. +namespace legacy { + +template +C10_LAUNCH_BOUNDS_2(nt, launch_bound2) +__global__ void elementwise_kernel(int N, func_t f) { + int tid = threadIdx.x; + int nv = nt * vt; + int idx = nv * blockIdx.x + tid; + #pragma unroll + for (int i = 0; i < vt; i++) { + if (idx < N) { + f(idx); + idx += nt; + } + } +} + +template +static void launch_kernel(int64_t N, const func_t& f) { + TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits::max()); + if (N == 0) { + return; + } + dim3 block(nt); + dim3 grid((N + block.x * vt - 1) / (block.x * vt)); + auto stream = at::cuda::getCurrentCUDAStream(); + elementwise_kernel<<>>(N, f); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +C10_HOST_DEVICE typename traits::result_type +invoke_impl(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], int i, + std::index_sequence) { + return f(c10::load::type>(data[INDEX] + i * strides[INDEX])...); +} + +template > +C10_HOST_DEVICE typename traits::result_type +invoke(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], int i) { + using Indices = std::make_index_sequence; + return invoke_impl(f, data, strides, i, Indices{}); +} + +template +C10_HOST_DEVICE typename traits::result_type +invoke_impl(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], const ScalarType dtypes[], int i, + std::index_sequence) { + return f(c10::fetch_and_cast::type>(dtypes[I], data[I] + i * strides[I])...); +} + +template > +C10_HOST_DEVICE typename traits::result_type +invoke(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], const ScalarType dtypes[], int i) { + using Indices = std::make_index_sequence; + return invoke_impl(f, data, strides, dtypes, i, Indices{}); +} + +} // namespace legacy + +// See the note for namespace legacy above. +namespace modern { + +namespace detail { + +template +__device__ inline constexpr decltype(auto) invoke_with_array_impl(func_t f, array_t t, std::index_sequence) +{ + return f(t[I]...); +} +template +__device__ inline constexpr decltype(auto) invoke_with_array(func_t f, array_t a) { + constexpr auto arity = function_traits::arity; + return invoke_with_array_impl(f, a, std::make_index_sequence{}); +} + +namespace arg_type { + +// We need a way to compute the argument type of a function. But +// for nullary function, it does not really have an argument type +// in this case, we still need to return a valid type, but we don't +// really care what type this is. + +struct dont_care {}; + +template +struct arg_type_helper { + using type = typename function_traits::template arg<0>::type; +}; + +template +struct arg_type_helper { + using type = dont_care; +}; + +template +using type = typename arg_type_helper::arity>::type; + +} // namespace arg_type + +template::arity-1> +struct has_same_arg_types { + using traits = function_traits; + static constexpr bool value = std::is_same< + typename traits::template arg::type, + typename traits::template arg::type + >::value && has_same_arg_types::value; +}; + +template +struct has_same_arg_types { + static constexpr bool value = true; +}; + +template +struct has_same_arg_types { + static constexpr bool value = true; +}; + +} // namespace detail + +template +C10_LAUNCH_BOUNDS_1(num_threads()) +__global__ void elementwise_kernel(int N, func_t f, array_t data) { + // Assumption: + // 1. all arguments of `f` have the same type, which could be different from the return type of `f` + // 2. all tensors are contiguous, that is: stride == sizeof(type) for all tensors + + using traits = function_traits; + using return_t = typename traits::result_type; + using arg_t = detail::arg_type::type; + constexpr int arity = traits::arity; + + // We need to create array to hold all the arguments, for nullary `f`, this means array of size 0. + // Unfortunately the compiler don't allow us to create array of 0 size, so for this case, we create + // an array of size 1 and just don't use it. + constexpr int nargs = traits::arity == 0 ? 1 : traits::arity; + + int tid = threadIdx.x; + int idx = block_work_size() * blockIdx.x + tid; + + // compute base pointers + return_t *result_base = reinterpret_cast(data[0]) + idx; + arg_t *args_base[nargs]; + #pragma unroll + for (int i = 0; i < arity; i++) { + args_base[i] = reinterpret_cast(data[i + 1]) + idx; + } + + // fetch data + return_t results[thread_work_size()]; + arg_t args[thread_work_size()][nargs]; + #pragma unroll + for (int i = 0; i < thread_work_size(); i++) { + if (idx + num_threads() * i < N) { + #pragma unroll + for (int j = 0; j < arity; j++) { + args[i][j] = c10::load(args_base[j] + i * num_threads()); + } + } + } + + // compute + #pragma unroll + for (int i = 0; i < thread_work_size(); i++) { + if (idx + num_threads() * i < N) { + results[i] = detail::invoke_with_array(f, args[i]); + } + } + + // store data + #pragma unroll + for (int i = 0; i < thread_work_size(); i++) { + if (idx + num_threads() * i < N) { + *(result_base + i * num_threads()) = results[i]; + } + } +} + +// TODO (@zasdfgbnm): this function assume trivial 1d and no dynamic casting +template::value, int> = 0> +static void launch_kernel(int64_t N, const func_t& f, array_t data) { + TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits::max()); + if (N == 0) { + return; + } + int64_t grid = (N + block_work_size() - 1) / block_work_size(); + auto stream = at::cuda::getCurrentCUDAStream(); + elementwise_kernel<<>>(N, f, data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template::value, int> = 0> +static void launch_kernel(int64_t N, const func_t& f, array_t data) {} + +} // namespace modern + + +template +void gpu_kernel_impl(TensorIteratorBase& iter, const func_t& f) { + using traits = function_traits; + using arg0_t = typename traits::result_type; + constexpr int ntensors = traits::arity + 1; + + TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing()); + TORCH_INTERNAL_ASSERT(iter.ntensors() == traits::arity + 1); + bool non_c10_complex = uses_non_c10_complex::check(); + + at::detail::Array data; + for (int i = 0; i < ntensors; i++) { + data[i] = (char*)iter.data_ptr(i); + } + + at::detail::Array dtypes; + for (int i = 0; i < ntensors; i++) { + dtypes[i] = iter.dtype(i); + } + + int64_t numel = iter.numel(); + if (iter.is_trivial_1d()) { + auto inner_strides = iter.get_inner_strides(); + at::detail::Array strides; + for (int i = 0; i < ntensors; i++) { + strides[i] = inner_strides[i]; + } + + // TODO: can non_c10_complex go through the other path? Need to verify. + if (needs_dynamic_casting::check(iter) || non_c10_complex) { + legacy::launch_kernel(numel, [=]GPU_LAMBDA(int idx) { + void* out = data[0] + strides[0] * idx; + arg0_t result = legacy::invoke(f, &data.data[1], &strides.data[1], &dtypes.data[1], idx); + c10::cast_and_store(dtypes[0], out, result); + }); + } else if (iter.has_contiguous_first_dim() && modern::detail::has_same_arg_types::value) { + modern::launch_kernel(numel, f, data); + } else { + legacy::launch_kernel(numel, [=]GPU_LAMBDA(int idx) { + arg0_t* out = (arg0_t*)(data[0] + strides[0] * idx); + *out = legacy::invoke(f, &data.data[1], &strides.data[1], idx); + }); + } + } else { + auto offset_calc = ::make_offset_calculator(iter); + // TODO: can non_c10_complex go through the other path? Need to verify. + if (needs_dynamic_casting::check(iter) || non_c10_complex) { + legacy::launch_kernel(numel, [=]GPU_LAMBDA(int idx) { + auto offsets = offset_calc.get(idx); + void* out = data[0] + offsets[0]; + arg0_t result = legacy::invoke(f, &data.data[1], &offsets.data[1], &dtypes.data[1], 1); + c10::cast_and_store(dtypes[0], out, result); + }); + } else { + legacy::launch_kernel(numel, [=]GPU_LAMBDA(int idx) { + auto offsets = offset_calc.get(idx); + arg0_t* out = (arg0_t*)(data[0] + offsets[0]); + *out = legacy::invoke(f, &data.data[1], &offsets.data[1], 1); + }); + } + } +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/Randperm.cuh b/voice_bridge/torch/include/ATen/native/cuda/Randperm.cuh new file mode 100644 index 0000000000000000000000000000000000000000..de5affebb8bd5bf065502b3af31a80dc9c562991 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/Randperm.cuh @@ -0,0 +1,58 @@ +#include +#include +#include + +#include +#include +#include + +namespace { + +// See note [Algorithm of randperm] +template +__global__ void randperm_handle_duplicate_keys_kernel(T *keys, scalar_t *data, T mask, int n, at::PhiloxCudaState philox_args) { + int tid = threadIdx.x + blockDim.x * blockIdx.x; + + // find the beginning of islands + if (tid >= n - 1) return; // out of range + if ((keys[tid] & mask) != (keys[tid + 1] & mask)) return; // not in an island + if (tid != 0 && (keys[tid] & mask) == (keys[tid - 1] & mask)) return; // not the beginning of an island + + // find the size of islands + int island_size = 0; + do { island_size++; } + while ((tid + island_size < n) && (keys[tid + island_size] & mask) == (keys[tid] & mask)); + + // do random permutation inside each island. + data += tid; + auto seeds = at::cuda::philox::unpack(philox_args); + curandStatePhilox4_32_10_t state; + curand_init(std::get<0>(seeds), tid, std::get<1>(seeds), &state); + for (int i = island_size - 1; i > 0; i--) { + unsigned int r = curand(&state) % (i + 1); + if (i != r) { + scalar_t tmp = data[i]; + data[i] = data[r]; + data[r] = tmp; + } + } +} + +// See note [Algorithm of randperm] +template +void randperm_handle_duplicate_keys(T *keys, scalar_t *data, int bits, int64_t n, c10::optional &gen_) { + auto gen = at::get_generator_or_default(gen_, at::cuda::detail::getDefaultCUDAGenerator()); + int64_t counter_offset = n; + at::PhiloxCudaState rng_engine_inputs; + { + // See Note [Acquire lock when using random generators] + std::lock_guard lock(gen->mutex_); + rng_engine_inputs = gen->philox_cuda_state(counter_offset); + } + T mask = static_cast((1UL << bits) - 1); + randperm_handle_duplicate_keys_kernel<<<(n + 511) / 512, 512, 0, at::cuda::getCurrentCUDAStream()>>>( + keys, data, mask, n, rng_engine_inputs); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +} diff --git a/voice_bridge/torch/include/ATen/native/cuda/Reduce.cuh b/voice_bridge/torch/include/ATen/native/cuda/Reduce.cuh new file mode 100644 index 0000000000000000000000000000000000000000..34e99ae57a59d1a49bee741ab7fe5c8daf912631 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/Reduce.cuh @@ -0,0 +1,1325 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace at { namespace native { + +using at::detail::Array; + +static inline int64_t div_up(int64_t a, int64_t b) { + return (a + b - 1) / b; +} + +// returns floor(log2(n)) +static inline int last_pow2(int n) { + n |= (n >> 1); + n |= (n >> 2); + n |= (n >> 4); + n |= (n >> 8); + n |= (n >> 16); + return std::max(1, n - (n >> 1)); +} + +// returns reduced fraction numerator & denominator +C10_HOST_DEVICE static void reduce_fraction(size_t &numerator, size_t &denominator) { + // get GCD of num and denom using Euclid's algorithm. + // Can replace this with std::gcd if we ever support c++17. + size_t a = denominator; + size_t b = numerator; + while (b != 0) { + a %= b; + // swap(a,b) + size_t tmp = a; + a = b; + b = tmp; + } + + // a is now the GCD + numerator /= a; + denominator /= a; +} + +//template for changing MAX_NUM_THREADS based on op dtype +template +struct mnt_wrapper { + static constexpr int MAX_NUM_THREADS = 512; +}; + +template <> +struct mnt_wrapper >{ + static constexpr int MAX_NUM_THREADS = 256; +}; + +constexpr int max_reduce_threads(c10::ScalarType type) { + return type == kComplexDouble ? 256 : 512; +} + +struct ReduceConfig { + static constexpr int BLOCK_X = 0; + static constexpr int BLOCK_Y = 1; + static constexpr int CTA = 2; + + static constexpr int input_vec_size = 4; + + ReduceConfig(int element_size_bytes, int num_outputs, int num_inputs) + : element_size_bytes(element_size_bytes) + , num_inputs(num_inputs) + , num_outputs(num_outputs) {} + int element_size_bytes; + int num_inputs; + int num_outputs; + int step_input = 1; + int step_output = 1; + int ctas_per_output = 1; + int input_mult[3] = {0, 0, 0}; + int output_mult[2] = {0, 0}; + + int block_width; + int block_height; + int num_threads; + + bool vectorize_input = false; + int output_vec_size = 1; + + template + void set_block_dimension(int64_t dim0, int64_t dim1) { + const int max_num_threads = mnt_wrapper::MAX_NUM_THREADS / output_vec_size; + int dim0_pow2 = dim0 < max_num_threads ? static_cast(last_pow2(dim0)) : max_num_threads; + int dim1_pow2 = dim1 < max_num_threads ? static_cast(last_pow2(dim1)) : max_num_threads; + block_width = std::min(dim0_pow2, int(at::cuda::warp_size())); + block_height = std::min(dim1_pow2, int(max_num_threads / block_width)); + block_width = std::min(dim0_pow2, int(max_num_threads / block_height)); + num_threads = block_width * block_height; + } + + int split_input(int parallelism) { + int step = step_input; + step_input *= parallelism; + return step; + } + + int split_output(int parallelism) { + int step = step_output; + step_output *= parallelism; + return step; + } + + dim3 block() const { + return dim3(block_width, block_height); + } + + dim3 grid() const { + return dim3(div_up(num_outputs / output_vec_size, step_output), ctas_per_output); + } + + C10_HOST_DEVICE bool should_block_x_reduce() const { + return input_mult[BLOCK_X] != 0; + } + + C10_HOST_DEVICE bool should_block_y_reduce() const { + return input_mult[BLOCK_Y] != 0; + } + + C10_HOST_DEVICE bool should_global_reduce() const { + return input_mult[CTA] != 0; + } + + C10_DEVICE bool should_store(int output_idx) const { + return output_idx < num_outputs && + (!should_block_x_reduce() || threadIdx.x == 0) && + (!should_block_y_reduce() || threadIdx.y == 0); + } + + C10_DEVICE bool should_reduce_tail() const { + return (!should_block_y_reduce() || threadIdx.y == 0) && + (!should_global_reduce() || blockIdx.y == 0); + } + + C10_HOST_DEVICE int input_idx() const { + int lane = threadIdx.x; + int warp = threadIdx.y; + int cta2 = blockIdx.y; + return (lane * input_mult[BLOCK_X] + + warp * input_mult[BLOCK_Y] + + cta2 * input_mult[CTA]); + } + + template + C10_HOST_DEVICE int output_idx() const { + int lane = threadIdx.x; + int warp = threadIdx.y; + int cta1 = blockIdx.x; + return (lane * output_mult[BLOCK_X] + + warp * output_mult[BLOCK_Y] + + cta1 * step_output) * output_vec_size; + } + + C10_DEVICE int shared_memory_offset(int offset) const { + return threadIdx.x + (threadIdx.y + offset) * blockDim.x; + } + + C10_DEVICE int staging_memory_offset(int cta2) const { + int offset = cta2 + blockIdx.x * gridDim.y; + if (!should_block_x_reduce()) { + offset = threadIdx.x + offset * blockDim.x; + } + return offset; + } + + int shared_memory_size() const { + if (!should_block_y_reduce() && + (!should_block_x_reduce() || + block_width <= at::cuda::warp_size())) { + return 0; + } + return element_size_bytes * num_threads * output_vec_size; + } + + int64_t global_memory_size() const { + if (!should_global_reduce()) { + return 0; + } + auto size = (int64_t)element_size_bytes * num_outputs * ctas_per_output; + if (!should_block_x_reduce()) { + size *= block().x * output_vec_size; + } + return size; + } + + int semaphore_size() const { + if (!should_global_reduce()) { + return 0; + } + return sizeof(int) * grid().x; + } + + int values_per_thread() const { + return div_up(num_inputs, step_input); + } +}; + +std::ostream& operator<<(std::ostream& out, const ReduceConfig& config); + +template +C10_LAUNCH_BOUNDS_2(nt, 4) +__global__ void reduce_kernel(R reduction) { + reduction.template run(); +} + +template +static OffsetCalculator<2, index_t> make_output_calculator(const TensorIterator& iter) { + int num_reduce_dims = iter.num_reduce_dims(); + int num_output_dims = iter.ndim() - num_reduce_dims; + int input_index = iter.ntensors() - 1; + int output_index = 0; + std::array strides = { + iter.strides(output_index).data() + num_reduce_dims, + iter.strides(input_index).data() + num_reduce_dims, + }; + auto shape = iter.shape().data() + num_reduce_dims; + return OffsetCalculator<2, index_t>(num_output_dims, shape, strides.data()); +} + +template +static OffsetCalculator<1, index_t> make_input_calculator(const TensorIterator& iter) { + int num_reduce_dims = iter.num_reduce_dims(); + int input_index = iter.ntensors() - 1; + std::array strides = { + iter.strides(input_index).data(), + }; + return OffsetCalculator<1, index_t>(num_reduce_dims, iter.shape().data(), strides.data()); +} + +template +struct func_wrapper_t { + using arg_t = typename binary_function_traits::arg1_t; + using scalar_t = typename binary_function_traits::arg2_t; + + func_t combine; + static inline __device__ out_scalar_t project(arg_t arg) { + return (out_scalar_t) arg; + } + static inline __device__ arg_t warp_shfl_down(arg_t arg, int offset) { + return WARP_SHFL_DOWN(arg, offset); + } + + static __device__ arg_t translate_idx(arg_t acc, int64_t /*idx*/) { + return acc; + } + + func_wrapper_t(const func_t& op) : combine(op) { + } + + // wrap a normal reduction that ignores the index + __device__ arg_t reduce(arg_t acc, scalar_t val, int64_t idx) const { + return combine(acc, val); + } +}; + +template +func_wrapper_t func_wrapper(const func_t& op) { + return func_wrapper_t { op }; +} + +template +struct ReduceJitOp { +//ReduceJitOp is almost like ReduceOp, but it doesn't have ops functor that specifies reduction operations +//Maybe we can find a way to unify ReduceOp and ReduceJitOp + using InputCalculator = OffsetCalculator<1, uint32_t>; + using OutputCalculator = OffsetCalculator<2, uint32_t>; + //TODO for now arg_t is always opmath_t of the input, later we'll need to change it + using arg_t = at::opmath_type; + + static constexpr int input_vec_size = ReduceConfig::input_vec_size; + //TODO - ReduceJitOp will probably need to be changed for reductions that need full functor, + //not just wrapper + arg_t ident; + ReduceConfig config; + InputCalculator input_calc; + OutputCalculator output_calc; + const void* src; + const char* dst[2]; //it accepts at most two destinations + // acc_buf used for accumulation among sub Tensor Iterator when accumulation on + // output is not permissible + void* acc_buf; + // cta_buf used for accumulation between blocks during global reduction + void* cta_buf; + int* semaphores; + int64_t base_idx; + bool accumulate; + bool final_output; + int noutputs; + + ReduceJitOp( + ReduceConfig config, + InputCalculator input_calc, + OutputCalculator output_calc, + const void* src, + char* dst0, + optional dst1, + void* acc_buf, + void* cta_buf, + int* semaphores, + arg_t ident, + int noutputs, + int64_t base_idx) + : ident(ident), + config(config), + input_calc(input_calc), + output_calc(output_calc), + src(src), + acc_buf(acc_buf), + cta_buf(cta_buf), + semaphores(semaphores), + base_idx(base_idx), + noutputs(noutputs) { + dst[0] = dst0; + if (dst1.has_value()) { + dst[1] = dst1.value(); + } + } +}; + +template +struct ReduceOp { + using traits = function_traits; + using arg_t = typename std::decay::type>::type; + + using InputCalculator = OffsetCalculator<1, index_t>; + using OutputCalculator = OffsetCalculator<2, index_t>; + + static constexpr bool can_accumulate_in_output = + std::is_convertible::value + && std::is_convertible::value; + + static constexpr int input_vec_size = ReduceConfig::input_vec_size; + + ops_t ops; + arg_t ident; + ReduceConfig config; + InputCalculator input_calc; + OutputCalculator output_calc; + const void* src; + const char* dst[2]; //it accepts at most two destinations + // acc_buf used for accumulation among sub Tensor Iterator when accumulation on + // output is not permissible + void* acc_buf; + // cta_buf used for accumulation between blocks during global reduction + void* cta_buf; + int* semaphores; + int64_t base_idx; + bool accumulate; + bool final_output; + int noutputs; + + ReduceOp( + ops_t ops, + ReduceConfig config, + InputCalculator input_calc, + OutputCalculator output_calc, + const void* src, + char* dst0, + optional dst1, + void* acc_buf, + void* cta_buf, + int* semaphores, + arg_t ident, + int noutputs, + int64_t base_idx) + : ops(ops), + ident(ident), + config(config), + input_calc(input_calc), + output_calc(output_calc), + src(src), + acc_buf(acc_buf), + cta_buf(cta_buf), + semaphores(semaphores), + base_idx(base_idx), + noutputs(noutputs) { + dst[0] = dst0; + if (dst1.has_value()) { + dst[1] = dst1.value(); + } + } + + template + C10_DEVICE void run() const { + extern __shared__ char shared_memory[]; + index_t output_idx = config.output_idx(); + index_t input_idx = config.input_idx(); + auto base_offsets1 = output_calc.get(output_idx)[1]; + + using arg_vec_t = at::detail::Array; + arg_vec_t value; + + if (output_idx < config.num_outputs && input_idx < config.num_inputs) { + const scalar_t* input_slice = (const scalar_t*)((const char*)src + base_offsets1); + value = thread_reduce(input_slice); + } + + if (config.should_block_y_reduce()) { + value = block_y_reduce(value, shared_memory); + } + if (config.should_block_x_reduce()) { + value = block_x_reduce(value, shared_memory); + } + + using out_ptr_vec_t = at::detail::Array; + using offset_vec_t = at::detail::Array; + offset_vec_t base_offsets; + out_ptr_vec_t out; + + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + base_offsets[i] = output_calc.get(output_idx + i)[0]; + out[i] = (out_scalar_t*)((char*)dst[0] + base_offsets[i]); + } + + arg_vec_t* acc = nullptr; + if (acc_buf != nullptr) { + size_t numerator = sizeof(arg_t); + size_t denominator = sizeof(out_scalar_t); + reduce_fraction(numerator, denominator); + acc = (arg_vec_t*)((char*)acc_buf + (base_offsets[0] * numerator / denominator)); + } + + if (config.should_global_reduce()) { + value = global_reduce(value, acc, shared_memory); + } else if (config.should_store(output_idx)) { + if (accumulate) { + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = ops.translate_idx(value[i], base_idx); + } + } + + if (acc == nullptr) { + if (accumulate) { + value = accumulate_in_output(out, value); + } + if (final_output) { + set_results_to_output(value, base_offsets); + } else { + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + *(out[i]) = get_accumulated_output(out[i], value[i]); + } + } + } else { + if (accumulate) { + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = ops.combine((*acc)[i], value[i]); + } + } + if (final_output) { + set_results_to_output(value, base_offsets); + } else { + *acc = value; + } + } + } + } + + template + C10_DEVICE at::detail::Array thread_reduce(const scalar_t* data) const { + if (config.vectorize_input) { + assert(output_vec_size == 1); + // reduce at the header of input_slice where memory is not aligned, + // so that thread_reduce will have an aligned memory to work on. + return {input_vectorized_thread_reduce_impl(data)}; + } else { + index_t element_stride = input_calc.strides_[0][0] / sizeof(scalar_t); + bool is_contiguous = (input_calc.dims == 1 && element_stride == 1); + if (is_contiguous) { + return thread_reduce_impl(data, [](index_t idx) { return idx; }); + } else if (input_calc.dims == 1) { + return thread_reduce_impl(data, [&](index_t idx) { return idx * element_stride; }); + } else { + return thread_reduce_impl(data, [&](index_t idx) { return input_calc.get(idx)[0] / sizeof(scalar_t); }); + } + } + } + + C10_DEVICE arg_t input_vectorized_thread_reduce_impl(const scalar_t* data) const { + index_t end = config.num_inputs; + + // Handle the head of input slice where data is not aligned + arg_t value = ident; + constexpr int align_bytes = alignof(at::native::memory::aligned_vector); + constexpr int align_elements = align_bytes / sizeof(scalar_t); + int shift = ((uint64_t)data) % align_bytes / sizeof(scalar_t); + if (shift > 0) { + data -= shift; + end += shift; + if(threadIdx.x >= shift && threadIdx.x < align_elements && config.should_reduce_tail()){ + value = ops.reduce(value, c10::load(data + threadIdx.x), threadIdx.x - shift); + } + end -= align_elements; + data += align_elements; + shift = align_elements - shift; + } + + // Do the vectorized reduction + using load_t = at::native::memory::aligned_vector; + + index_t idx = config.input_idx(); + const index_t stride = config.step_input; + + // Multiple accumulators to remove dependency between unrolled loops. + arg_t value_list[input_vec_size]; + value_list[0] = value; + + #pragma unroll + for (int i = 1; i < input_vec_size; i++) { + value_list[i] = ident; + } + + while (idx * input_vec_size + input_vec_size - 1 < end) { + const auto values_vec = memory::load_vector(data, idx); + #pragma unroll + for (index_t i = 0; i < input_vec_size; i++) { + value_list[i] = ops.reduce(value_list[i], values_vec.val[i], shift + idx * input_vec_size + i); + } + idx += stride; + } + + // tail + index_t tail_start = end - end % input_vec_size; + if (config.should_reduce_tail()) { + int idx = tail_start + threadIdx.x; + if (idx < end) { + const auto value = c10::load(data + idx); + value_list[0] = ops.reduce(value_list[0], value, idx + shift); + } + } + + // combine accumulators + #pragma unroll + for (int i = 1; i < input_vec_size; i++) { + value_list[0] = ops.combine(value_list[0], value_list[i]); + } + return value_list[0]; + } + + template + C10_DEVICE at::detail::Array thread_reduce_impl(const scalar_t* data_, offset_calc_t calc) const { + index_t idx = config.input_idx(); + const index_t end = config.num_inputs; + const index_t stride = config.step_input; + + using arg_vec_t = at::detail::Array; + using load_t = at::native::memory::aligned_vector; + + // Multiple accumulators to remove dependency between unrolled loops. + arg_vec_t value_list[vt0]; + + #pragma unroll + for (int i = 0; i < vt0; i++) { + #pragma unroll + for (int j = 0; j < output_vec_size; j++) { + value_list[i][j] = ident; + } + } + + load_t values[vt0]; + + while (idx + (vt0 - 1) * stride < end) { + #pragma unroll + for (index_t i = 0; i < vt0; i++) { + const auto offset = calc(idx + i * stride) / output_vec_size; + values[i] = memory::load_vector(data_, offset); + } + #pragma unroll + for (index_t i = 0; i < vt0; i++) { + #pragma unroll + for (index_t j = 0; j < output_vec_size; j++) { + value_list[i][j] = ops.reduce(value_list[i][j], values[i].val[j], idx + i * stride); + } + } + idx += stride * vt0; + } + + // tail + int idx_ = idx; + #pragma unroll + for (index_t i = 0; i < vt0; i++) { + if (idx >= end) { + break; + } + const auto offset = calc(idx) / output_vec_size; + values[i] = memory::load_vector(data_, offset); + idx += stride; + } + idx = idx_; + #pragma unroll + for (index_t i = 0; i < vt0; i++) { + if (idx >= end) { + break; + } + #pragma unroll + for (index_t j = 0; j < output_vec_size; j++) { + value_list[i][j] = ops.reduce(value_list[i][j], values[i].val[j], idx); + } + idx += stride; + } + + // combine accumulators + #pragma unroll + for (int i = 1; i < vt0; i++) { + #pragma unroll + for (index_t j = 0; j < output_vec_size; j++) { + value_list[0][j] = ops.combine(value_list[0][j], value_list[i][j]); + } + } + return value_list[0]; + } + + template + C10_DEVICE at::detail::Array block_x_reduce(at::detail::Array value, char* shared_memory) const { + using args_vec_t = at::detail::Array; + int dim_x = blockDim.x; + args_vec_t* shared = (args_vec_t*)shared_memory; + if (dim_x > warpSize) { + int address_base = threadIdx.x + threadIdx.y*blockDim.x; + shared[address_base] = value; + for (int offset = dim_x/2; offset >= warpSize; offset >>= 1) { + __syncthreads(); + if (threadIdx.x < offset && threadIdx.x + offset < blockDim.x) { + args_vec_t other = shared[address_base + offset]; + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = ops.combine(value[i], other[i]); + } + shared[address_base] = value; + } + } + dim_x = warpSize; + } + + __syncthreads(); + + for (int offset = 1; offset < dim_x; offset <<= 1) { + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + arg_t other = ops.warp_shfl_down(value[i], offset); + value[i] = ops.combine(value[i], other); + } + } + return value; + } + + template + C10_DEVICE at::detail::Array block_y_reduce(at::detail::Array value, char* shared_memory) const { + using args_vec_t = at::detail::Array; + args_vec_t* shared = (args_vec_t*)shared_memory; + shared[config.shared_memory_offset(0)] = value; + for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) { + __syncthreads(); + if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) { + args_vec_t other = shared[config.shared_memory_offset(offset)]; + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = ops.combine(value[i], other[i]); + } + shared[config.shared_memory_offset(0)] = value; + } + } + return value; + } + + C10_DEVICE bool mark_block_finished() const { + __shared__ bool is_last_block_done_shared; + + __syncthreads(); + if (threadIdx.x == 0 && threadIdx.y == 0) { + int prev_blocks_finished = atomicAdd(&semaphores[blockIdx.x], 1); + is_last_block_done_shared = (prev_blocks_finished == gridDim.y - 1); + } + + __syncthreads(); + + return is_last_block_done_shared; + } + + template + C10_DEVICE at::detail::Array accumulate_in_output( + at::detail::Array out, + at::detail::Array value, + typename std::enable_if::type* = nullptr + ) const { + at::detail::Array ret; + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + ret[i] = ops.combine(*(out[i]), value[i]); + } + return ret; + } + + template + C10_DEVICE out_scalar_t get_accumulated_output( + out_scalar_t* out, arg_t value, + typename std::enable_if::type* = nullptr + ) const { + assert(!final_output); + return (out_scalar_t)value; + } + + // This function should never be called -- + // it's the version of `accumulate_in_output` + // when accumulation in the output is not possible. + template + C10_DEVICE at::detail::Array accumulate_in_output( + at::detail::Array, + at::detail::Array, + typename std::enable_if::type* = nullptr + ) const { + assert(false); // can't use AT_ASSERT in Cuda. + return arg_t {}; + } + + // This function should never be called -- + // it's the version of `get_accumulated_output` + // when accumulation in the output is not possible. + template + C10_DEVICE out_scalar_t get_accumulated_output( + out_scalar_t* out, arg_t value, + typename std::enable_if::type* = nullptr + ) const { + assert(false); + return *out; + } + + template + C10_DEVICE void set_results(const T x, const index_t base_offset) const { + assert(noutputs == 1); + auto res = (out_scalar_t*)((char*)dst[0] + base_offset); + *res = x; + } + + //Currently implemented for max of two outputs + template + C10_DEVICE void set_results(const thrust::pair x, const index_t base_offset) const { + if (noutputs >= 1) { + auto res0 = (T1*)((char*)dst[0] + base_offset); + *res0 = x.first; + } + if (noutputs >= 2) { + // base offset is computed assuming element size being sizeof(T1), so we need to make a + // correction to obtain the correct base offset + auto res1 = (T2*) ((char *) dst[1] + base_offset / sizeof(T1) * sizeof(T2)); + *res1 = x.second; + } + } + + template + C10_DEVICE void set_results_to_output(at::detail::Array value, at::detail::Array base_offset) const { + assert(final_output); + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + set_results(ops.project(value[i]), base_offset[i]); + } + } + + template + C10_DEVICE at::detail::Array global_reduce(at::detail::Array value, at::detail::Array *acc, char* shared_memory) const { + using arg_vec_t = at::detail::Array; + using out_ptr_vec_t = at::detail::Array; + using offset_vec_t = at::detail::Array; + + arg_vec_t* reduce_buffer = (arg_vec_t*)cta_buf; + index_t output_idx = config.output_idx(); + offset_vec_t base_offsets; + out_ptr_vec_t out; + + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + base_offsets[i] = output_calc.get(output_idx + i)[0]; + out[i] = (out_scalar_t*)((char*)dst[0] + base_offsets[i]); + } + + bool should_store = config.should_store(output_idx); + if (should_store) { + index_t offset = config.staging_memory_offset(blockIdx.y); + reduce_buffer[offset] = value; + } + + __threadfence(); // make sure writes are globally visible + __syncthreads(); // if multiple warps in this block wrote to staging, make sure they're all done + bool is_last_block_done = mark_block_finished(); + + if (is_last_block_done) { + value = ident; + if (config.should_block_x_reduce()) { + index_t input_offset = threadIdx.x + threadIdx.y * blockDim.x; + index_t step = blockDim.x * blockDim.y; + for (; input_offset < config.ctas_per_output; input_offset += step) { + index_t idx = config.staging_memory_offset(input_offset); + arg_vec_t next = reduce_buffer[idx]; + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = ops.combine(value[i], next[i]); + } + } + } else { + index_t input_offset = threadIdx.y; + index_t step = blockDim.y; + for (; input_offset < config.ctas_per_output; input_offset += step) { + index_t idx = config.staging_memory_offset(input_offset); + arg_vec_t next = reduce_buffer[idx]; + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = ops.combine(value[i], next[i]); + } + } + } + value = block_y_reduce(value, shared_memory); + if (config.should_block_x_reduce()) { + value = block_x_reduce(value, shared_memory); + } + if (should_store) { + if (accumulate) { + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = ops.translate_idx(value[i], base_idx); + } + } + + if (acc == nullptr) { + if (accumulate) { + value = accumulate_in_output(out, value); + } + if (final_output) { + set_results_to_output(value, base_offsets); + } else { + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + *(out[i]) = get_accumulated_output(out[i], value[i]); + } + } + } else { + if (accumulate) { + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = ops.combine((*acc)[i], value[i]); + } + } + if (final_output) { + set_results_to_output(value, base_offsets); + } else { + *acc = value; + } + } + } + } + + return value; + } +}; + +template +static void launch_reduce_kernel(const ReduceConfig& config, const R& reduction) { + dim3 block = config.block(); + dim3 grid = config.grid(); + + auto stream = at::cuda::getCurrentCUDAStream(); + int shared_memory = config.shared_memory_size(); + + switch(config.output_vec_size) { + case 4: + reduce_kernel<<>>(reduction); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + break; + case 2: + reduce_kernel<<>>(reduction); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + break; + default: + reduce_kernel<<>>(reduction); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } +} + +inline void launch_jitted_reduce_kernel( + std::mutex &jiterator_mutex, + std::array &fn_cache, + const at::cuda::jit::KernelDescriptor &desc, + int vt0, const ReduceConfig& config, void *reduction) { + dim3 block = config.block(); + dim3 grid = config.grid(); + + int shared_memory = config.shared_memory_size(); + at::cuda::jit::NvrtcFunction* fn_ptr; + switch(config.output_vec_size) { + case 4: + fn_ptr = &fn_cache[0]; + break; + case 2: + fn_ptr = &fn_cache[1]; + break; + default: + fn_ptr = &fn_cache[2]; + } + if (!fn_ptr->function) { + int max_threads_codegen = + max_reduce_threads(desc.f_inputs_type) / config.output_vec_size; + auto code = at::cuda::jit::generate_reduction_code( + desc, vt0, true, false, config.output_vec_size, max_threads_codegen); + + *fn_ptr = at::cuda::jit::jit_pwise_function(code, "reduction_" + desc.name); + } + constexpr int kernel_args = 1; + void* args[kernel_args]; + args[0] = reduction; + at::cuda::jit::launch_jitted_pwise_function(*fn_ptr, args, grid, block, shared_memory); +} + + +class AccumulationBuffer { + public: + AccumulationBuffer() {} + + AccumulationBuffer(size_t acc_t_size, size_t out_t_size, char* out_ptr, int64_t size) { + out_ptr_ = (char*)out_ptr; + if (out_t_size >= acc_t_size) { + // reusing output buffer for accumulation. + acc_ptr_ = (char*)out_ptr; + numerator_ = 1; + denominator_ = 1; + } else { + auto& allocator = *c10::cuda::CUDACachingAllocator::get(); + buffer_ = allocator.allocate(size); + acc_ptr_ = (char*)buffer_.get(); + numerator_ = acc_t_size; + denominator_ = out_t_size; + reduce_fraction(numerator_, denominator_); + } + } + + char* get_acc_slice(char* out_ptr) { + if (acc_ptr_ == nullptr) { + return nullptr; + } + return acc_ptr_ + ((out_ptr - out_ptr_) * numerator_ / denominator_); + } + + private: + char* acc_ptr_ = nullptr; + char* out_ptr_ = nullptr; + size_t numerator_; + size_t denominator_; + at::DataPtr buffer_; +}; + +template +int get_output_vec_size(const TensorIterator &iter) { + int vec_size = 4; + auto update_vec_size = [&vec_size](uint64_t n) { + while(n % vec_size != 0) { + vec_size /= 2; + } + }; + + uint64_t base_address = reinterpret_cast(iter.data_ptr(iter.noutputs())) / sizeof(scalar_t); + update_vec_size(base_address); + + const int output_index = iter.num_reduce_dims(); + update_vec_size(iter.shape()[output_index]); + + int j = 0; + for(auto i : iter.strides(iter.noutputs())) { + if (j != output_index) { + update_vec_size(i / sizeof(scalar_t)); + } + j++; + } + return vec_size; +} + +template +ReduceConfig setReduceConfig(const TensorIterator& iter){ + // Start by assuming that each thread handles a single output and all + // the inputs for that output. + int64_t num_outputs = iter.num_output_elements(); + int64_t inputs_per_output = iter.numel() / num_outputs; + int input_index = iter.ntensors() - 1; + + auto config = ReduceConfig(sizeof(arg_t), num_outputs, inputs_per_output); + + int64_t dim0; + int64_t dim1; + int64_t fastest_moving_stride; + bool reduction_on_fastest_striding_dimension; + + if (iter.ndim() > 0) { + // Adjust block size to map block width to fastest changing dimension of input + // tensor. This grants the best possible memory accessing pattern, given that + // for non-contiguous tensor with space in between, we cannot have perfect + // memory coalescing. + reduction_on_fastest_striding_dimension = + (iter.num_reduce_dims() == iter.ndim()) || + (iter.strides(/*arg=*/input_index)[0] < + iter.strides(/*arg=*/input_index)[iter.num_reduce_dims()]); + // Notice that dim0 & dim1 does NOT guarantee any launch configuration here! + // dim0 & dim1 are more like the upper bound of the block dimension. The + // actual launch config and reduction scheme is determined by setting values + // to `config.input_mult` and `config.output_mult`. + // We try to max out dim1 so that we have enough threads per CTA to deliver + // performance for larger problem size. + if (reduction_on_fastest_striding_dimension) { + // Map block.x to the fastest reducing dimension. It implies: + // 1. block_x_reduce is required. + // 2. block.y now max out to num_outputs. + dim0 = inputs_per_output; + dim1 = num_outputs; + fastest_moving_stride = iter.strides(/*arg=*/input_index)[0]; + } else { + // Map block.x to the fastest non reducing dimension. It implies: + // 1. block_x_reduce is turned off. + // 2. block.y now max out to inputs_per_output. + dim0 = num_outputs; + dim1 = inputs_per_output; + fastest_moving_stride = iter.strides(/*arg=*/input_index)[iter.num_reduce_dims()]; + } + } else { + reduction_on_fastest_striding_dimension = true; + fastest_moving_stride = sizeof(scalar_t); + dim0 = 1; + dim1 = 1; + } + + // We do vectorization to gain better memory access, there are two cases which we call + // "vectorize along input" and "vectorize along output". Note that the "input/output" + // here does not mean we are vectorizing load/store instructions. We always only vectorize + // load instructions. + // + // Case 1: "vectorize along input" + // This case happens when we are reducing along fastest moving dimesion. In such case, threads + // with the same threadIdx.y works on the same reduction cooperatively and will produce results + // for the same ouput. In such case, values in each loaded vector always correspond to the same ouput. + // + // Case 2: "vectorize along output" + // This case happens when the fastest moving dimesion is not the dimension of reduction. In such case, + // threads with different threadIdx.x are independent and will produce results for different outputs. + // In such case, values in each loaded vector always correspond to different outputs. + if (fastest_moving_stride == sizeof(scalar_t)) { + if (reduction_on_fastest_striding_dimension && dim0 > 128 && iter.num_reduce_dims() == 1 && vt0 >= ReduceConfig::input_vec_size) { + // Case 1: "vectorize along input" + // Note that if vt0 < ReduceConfig::vec_size, then this means the register pressure could be high, in such case, + // we should avoid vectorization. + config.vectorize_input = true; + } else if (!reduction_on_fastest_striding_dimension) { + // Case 2: "vectorize along output" + config.output_vec_size = get_output_vec_size(iter); + dim0 /= config.output_vec_size; + } + } + + // Adjust block_width and block_height + config.set_block_dimension(dim0, dim1); + + int block_width = config.block_width; + int block_height = config.block_height; + + if (iter.ndim() == 0 || reduction_on_fastest_striding_dimension) { + // Split the input across lanes if the input is contiguous in the reduced + // dimension. This will require reduction between threads using warp + // shuffle instructions and shared memory (if block_width > warpSize). + config.input_mult[0] = config.split_input(block_width); + } else { + // Otherwise split the output across lanes in a warp. + config.output_mult[0] = config.split_output(block_width); + } + + if (config.values_per_thread() >= block_height * 16 || config.values_per_thread() >= 256) { + // Divide the input across warps in a thread-block, if that leaves at least + // 16 elements to be summed by each thread. This will require inter-warp + // reduction using shared memory. + config.input_mult[1] = config.split_input(block_height); + } else { + // Otherwise, each warp handles a separate output. + config.output_mult[1] = config.split_output(block_height); + } + + constexpr int min_values_per_thread = 16; + constexpr int max_values_per_thread = 256; + const int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / (block_width * block_height); + const int num_mp = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; + const int target_grid_size = num_mp * blocks_per_sm; + int grid = config.grid().x; + if (config.input_mult[1] != 0 && config.values_per_thread() >= max_values_per_thread && grid <= target_grid_size) { + // Divide the input across thread-blocks if the amount of work per-thread + // is large enough and the size of the output is small enough. This will + // require a reduction using global memory. + // If we decide to split input across blocks, as long as we can get enough + // number of blocks (`target_grid_size`) to balance SM, we should still + // make the number of values per thread large for best performance. + int ctas_per_output1 = div_up(target_grid_size, grid); + int ctas_per_output2 = div_up(config.values_per_thread(), min_values_per_thread); + int ctas_per_output3 = div_up(config.values_per_thread(), max_values_per_thread); + // We want the minimum of ctas_per_output1 and ctas_per_output2, so that each thread can have + // a large number of values to deal with. But we don't want values_per_thread to be larger than + // max_values_per_thread + config.ctas_per_output = std::max(std::min(ctas_per_output1, ctas_per_output2), ctas_per_output3); + if (config.ctas_per_output > 1) { + config.input_mult[2] = config.split_input(config.ctas_per_output); + } + } + return config; +}; + +template +inline void gpu_reduce_kernel(TensorIterator& iter, const ops_t& ops, ident_t ident=0, + AccumulationBuffer* acc_buf_ptr=nullptr, int64_t base_idx=0) { + AT_ASSERT(iter.numel() > 0 && iter.ntensors() - iter.noutputs() == 1 && iter.noutputs() >= 1); + + using traits = function_traits; + using arg_t = typename traits::template arg<0>::type; + static constexpr bool can_accumulate_in_output = + std::is_convertible::value; + + bool can_use_32bit_indexing = iter.can_use_32bit_indexing(); + std::unique_ptr owned_buf_ptr; + // The acc_buf_ptr is a shared pointer. It is create at the first entrance and + // reused by all recursive function calls. + if (acc_buf_ptr == NULL) { + // acc_buf_ptr holds buffer used for accumulation among multiple sub_iter + // when accumulation in output is not possible. + if (!can_accumulate_in_output && !can_use_32bit_indexing) { + int64_t output_memory_size = iter.element_size(0); + for (int dim = 0; dim < iter.ndim(); dim++) { + output_memory_size = std::max(output_memory_size, iter.shape()[dim] * iter.strides(0)[dim]); + } + output_memory_size /= iter.element_size(0); //iter.strides is in bytes + owned_buf_ptr.reset(new AccumulationBuffer(sizeof(arg_t), + sizeof(out_scalar_t), + (char*) iter.data_ptr(0), + output_memory_size * sizeof(arg_t))); + } else { + owned_buf_ptr.reset(new AccumulationBuffer()); + } + acc_buf_ptr = owned_buf_ptr.get(); + } + + if (!can_use_32bit_indexing) { + for (auto& sub_iter : iter.with_32bit_indexing()) { + int64_t sub_iter_base_idx = sub_iter.view_offsets()[0]; + + gpu_reduce_kernel(sub_iter, ops, ident, + acc_buf_ptr, sub_iter_base_idx); + } + return; + } + + const char* in_data = (char*)iter.data_ptr(iter.ntensors() - 1); + char* out_data = (char*)iter.data_ptr(0); + const auto noutputs = iter.noutputs(); + optional out_data_extra; + if (noutputs > 1) { + out_data_extra = (char*)iter.data_ptr(1); + } else { + out_data_extra = nullopt; + } + char* acc_data = acc_buf_ptr->get_acc_slice(out_data); + + ReduceConfig config = setReduceConfig(iter); + at::DataPtr buffer; + at::DataPtr semaphores; + if (config.should_global_reduce()) { + auto& allocator = *c10::cuda::CUDACachingAllocator::get(); + buffer = allocator.allocate(config.global_memory_size()); + semaphores = allocator.allocate(config.semaphore_size()); + + auto stream = at::cuda::getCurrentCUDAStream(); + AT_CUDA_CHECK(cudaMemsetAsync(semaphores.get(), 0, config.semaphore_size(), stream)); + } + + AT_ASSERT(can_use_32bit_indexing); + auto output_calc = make_output_calculator(iter); + auto input_calc = make_input_calculator(iter); + auto reduce = ReduceOp( + ops, + config, + input_calc, + output_calc, + in_data, + out_data, + out_data_extra, + acc_data, + buffer.get(), + (int*)semaphores.get(), + ident, + noutputs, + base_idx); + reduce.accumulate = iter.should_accumulate(); + reduce.final_output = iter.is_final_output(); + + launch_reduce_kernel::MAX_NUM_THREADS>(config, reduce); +} + +//TODO this is 100 lines of almost-copy-paste, because we have to have different template args for this function +//try unifying with gpu_reduce_kernel +template +inline void jitted_gpu_reduce_kernel(TensorIterator& iter, const std::string& func, ident_t ident=0, + AccumulationBuffer* acc_buf_ptr=nullptr, int64_t base_idx=0) { + AT_ASSERT(iter.numel() > 0 && iter.ntensors() - iter.noutputs() == 1 && iter.noutputs() >= 1); + + //TODO - this will be different for more complicated reductions, but for now reductions using + //func_wrapper all have arg_t = opmath + using arg_t = at::opmath_type; + static constexpr bool can_accumulate_in_output = + std::is_convertible::value; + static_assert(can_accumulate_in_output == true, "unsupported arg_t for jitted reduction"); + + bool can_use_32bit_indexing = iter.can_use_32bit_indexing(); + std::unique_ptr owned_buf_ptr; + + // The acc_buf_ptr is a shared pointer. It is create at the first entrance and + // reused by all recursive function calls. + if (acc_buf_ptr == NULL) { + // acc_buf_ptr holds buffer used for accumulation among multiple sub_iter + // when accumulation in output is not possible. + if (!can_accumulate_in_output && !can_use_32bit_indexing) { + int64_t output_memory_size = iter.element_size(0); + for (int dim = 0; dim < iter.ndim(); dim++) { + output_memory_size = std::max(output_memory_size, iter.shape()[dim] * iter.strides(0)[dim]); + } + output_memory_size /= iter.element_size(0); //iter.strides is in bytes + owned_buf_ptr.reset(new AccumulationBuffer(sizeof(out_scalar_t), //TODO + sizeof(out_scalar_t), + (char*) iter.data_ptr(0), + output_memory_size * sizeof(out_scalar_t))); //TODO + } else { + owned_buf_ptr.reset(new AccumulationBuffer()); + } + acc_buf_ptr = owned_buf_ptr.get(); + } + + if (!can_use_32bit_indexing) { + for (auto& sub_iter : iter.with_32bit_indexing()) { + int64_t sub_iter_base_idx = sub_iter.view_offsets()[0]; + + jitted_gpu_reduce_kernel(sub_iter, func, ident, + acc_buf_ptr, sub_iter_base_idx); + } + return; + } + + //TODO - for now we support a single input, we may be able to relax this constraint + const char* in_data = (char*)iter.data_ptr(iter.ntensors() - 1); + char* out_data = (char*)iter.data_ptr(0); + const auto noutputs = iter.noutputs(); + optional out_data_extra; + if (noutputs > 1) { + out_data_extra = (char*)iter.data_ptr(1); + } else { + out_data_extra = nullopt; + } + char* acc_data = acc_buf_ptr->get_acc_slice(out_data); + + ReduceConfig config = setReduceConfig(iter); + + at::DataPtr buffer; + at::DataPtr semaphores; + if (config.should_global_reduce()) { + auto& allocator = *c10::cuda::CUDACachingAllocator::get(); + buffer = allocator.allocate(config.global_memory_size()); + semaphores = allocator.allocate(config.semaphore_size()); + + auto stream = at::cuda::getCurrentCUDAStream(); + AT_CUDA_CHECK(cudaMemsetAsync(semaphores.get(), 0, config.semaphore_size(), stream)); + } + + AT_ASSERT(can_use_32bit_indexing); + auto output_calc = make_output_calculator(iter); + auto input_calc = make_input_calculator(iter); + auto reduce = ReduceJitOp( + config, + input_calc, + output_calc, + in_data, + out_data, + out_data_extra, + acc_data, + buffer.get(), + (int*)semaphores.get(), + ident, + noutputs, + base_idx); + reduce.accumulate = iter.should_accumulate(); + reduce.final_output = iter.is_final_output(); + + constexpr int nInputs = 1; + constexpr int nOutputs = 1; + static auto desc = at::cuda::jit::make_kernel_descriptor< + out_scalar_t, scalar_t>(name, func, nInputs, nOutputs); + + static std::mutex jiterator_mutex; + static std::vector> fn_cache(c10::cuda::device_count()); + auto &cache = fn_cache[iter.device().index()]; + + launch_jitted_reduce_kernel( + jiterator_mutex, cache, desc, vt0, config, &reduce); +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/ReduceOps.h b/voice_bridge/torch/include/ATen/native/cuda/ReduceOps.h new file mode 100644 index 0000000000000000000000000000000000000000..a67a019ae49e2ec73be935aa4cae703417e9bb19 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/ReduceOps.h @@ -0,0 +1,20 @@ + +namespace at { +struct TensorIterator; +} + +namespace c10 { +class Scalar; +} + +namespace at { namespace native { + +void norm_launch_kernel(TensorIterator &iter, double val); +void min_launch_kernel(TensorIterator &iter); +void max_launch_kernel(TensorIterator &iter); +void aminmax_launch_kernel(TensorIterator &iter); +void min_all_launch_kernel(TensorIterator &iter); +void max_all_launch_kernel(TensorIterator &iter); +void aminmax_allreduce_launch_kernel(TensorIterator &iter); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/Resize.h b/voice_bridge/torch/include/ATen/native/cuda/Resize.h new file mode 100644 index 0000000000000000000000000000000000000000..569b145fa61d991472f589a777b2f74b4a277857 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/Resize.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include + +#include + +namespace at { namespace native { + +TORCH_CUDA_CPP_API void resize_bytes_cuda(StorageImpl* storage, size_t size_bytes); + +static inline void maybe_resize_storage_cuda(TensorImpl* self, size_t new_size_bytes) { + // It does not make sense to try to resize a storage + // to hold 0 elements, and this can break + // if storage_offset is positive but + // new_size is 0, so just bail in that case + // (same comment is in Resize.h) + if (self->numel() == 0) { + return; + } + + const Storage &storage = self->unsafe_storage(); + TORCH_CHECK(storage, "Tensor: invalid null storage"); + if (new_size_bytes > storage.nbytes()) { + resize_bytes_cuda(storage.unsafeGetStorageImpl(), new_size_bytes); + } +} + +inline TensorImpl* resize_impl_cuda_( + TensorImpl* self, + IntArrayRef size, + at::OptionalIntArrayRef stride, + bool device_guard = true) { + if (self->sizes() == size && (!stride || self->strides() == stride)) { + return self; + } + + // NB: We don't need to hold the device guard when calling from TH + cuda::OptionalCUDAGuard guard; + if (device_guard) { + guard.set_index(self->storage().device().index()); + } + + const auto itemsize = self->dtype().itemsize(); + const auto storage_offset = self->storage_offset(); + size_t storage_size = 1; + if (stride) { + self->set_sizes_and_strides(size, *stride); + storage_size = at::detail::computeStorageNbytes( + size, *stride, itemsize, storage_offset); + } else { + self->set_sizes_contiguous(size); + storage_size = at::detail::computeStorageNbytesContiguous( + size, itemsize, storage_offset); + } + maybe_resize_storage_cuda(self, storage_size); + + return self; +} + +}} diff --git a/voice_bridge/torch/include/ATen/native/cuda/ScanKernels.h b/voice_bridge/torch/include/ATen/native/cuda/ScanKernels.h new file mode 100644 index 0000000000000000000000000000000000000000..28e65372511bc7b50390a134e01554b6fa9ee171 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/ScanKernels.h @@ -0,0 +1,18 @@ +#pragma once +#include + +namespace at { +class TensorBase; + +namespace native { + +// NOTE: these functions require output tensors to be contiguous +void launch_cummax_cuda_kernel(const TensorBase& self, const TensorBase& values, + const TensorBase& indices, int64_t dim); +void launch_cummin_cuda_kernel(const TensorBase& self, const TensorBase& values, + const TensorBase& indices, int64_t dim); +void launch_logcumsumexp_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim); +void launch_cumsum_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim); +void launch_cumprod_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/ScanUtils.cuh b/voice_bridge/torch/include/ATen/native/cuda/ScanUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..ba27a245172b5bdb75d6dcae3ada10dc8a918c8a --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/ScanUtils.cuh @@ -0,0 +1,452 @@ +#pragma once +#include +#include +#include +#include + +#include +#include +#include + +namespace at { +namespace native { + +template +constexpr inline integer ceil_div(integer n, integer m) { + return (n + m - 1) / m; +} + +template +__device__ void binary_op_update(const scalar_t lhs, scalar_t& rhs, const idx_t lhs_idx, idx_t& rhs_idx, BinaryOperation binary_op) { + if(!at::_isnan(rhs) && (at::_isnan(lhs) || !binary_op(rhs, lhs))) { + rhs = lhs; + rhs_idx = lhs_idx; + } +} +/* Perform an inclusive scan along the innermost dimension of a tensor. + * + * - num_rows is the size of the flattened outer dimensions; + * - row_size is the size of the innermost dimension; + * + * The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is + * considered as having 'num_rows' rows of size 'row_size'. + * Each thread block processes one or more sets of contiguous rows (processing multiple rows + * per thread block is quicker than processing a single row, especially for short rows). + */ +template +__global__ void tensor_kernel_scan_innermost_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_, + int num_rows, int row_size, + scalar_t init, BinaryFunction binary_op) { + __shared__ scalar_t vbuf[num_threads_y][2 * num_threads_x]; + __shared__ int64_t ibuf[num_threads_y][2 * num_threads_x]; + scalar_t* row_buf = vbuf[threadIdx.y]; + int64_t* row_idx_buf = ibuf[threadIdx.y]; + + for (int block_row = blockIdx.x * blockDim.y; + block_row < num_rows; + block_row += blockDim.y * gridDim.x) { + int row = block_row + threadIdx.y; + const scalar_t *row_self = self_ + row * row_size; + scalar_t *row_values = values_ + row * row_size; + int64_t *row_indices = indices_ + row * row_size; + scalar_t block_total = init; + int64_t block_idx_final = 0; + // Perform scan on one block at a time, keeping track of the total value of + // all blocks processed so far. + for (int block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) { + // Load data into shared memory (two values per thread). + int col1 = block_col + threadIdx.x; + int col2 = block_col + num_threads_x + threadIdx.x; + if (row < num_rows) { + if (col1 < row_size) { + row_buf[threadIdx.x] = c10::load(&row_self[col1]); + row_idx_buf[threadIdx.x] = col1; + } else { + row_buf[threadIdx.x] = init; + // No need to set the index here as the value in init will never be selected + } + + if (col2 < row_size) { + row_buf[num_threads_x + threadIdx.x] = c10::load(&row_self[col2]); + row_idx_buf[num_threads_x + threadIdx.x] = col2; + } else { + row_buf[num_threads_x + threadIdx.x] = init; + // No need to set the index here as the value in init will never be selected + } + + // Add the total value of all previous blocks to the first value of this block. + if (threadIdx.x == 0) { + binary_op_update(block_total, row_buf[0], block_idx_final, row_idx_buf[0], binary_op); + } + } + __syncthreads(); + + // Parallel reduction (up-sweep). + for (int s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) { + if (row < num_rows && threadIdx.x < s) { + int offset = (2 * threadIdx.x + 1) * d - 1; + binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op); + } + __syncthreads(); + } + + // Down-sweep. + for (int s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) { + if (row < num_rows && threadIdx.x < s - 1) { + int offset = 2 * (threadIdx.x + 1) * d - 1; + binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op); + } + __syncthreads(); + } + + // Write back to output. + if (row < num_rows) { + if (col1 < row_size){ + row_values[col1] = row_buf[threadIdx.x]; + row_indices[col1] = row_idx_buf[threadIdx.x]; + } + if (col2 < row_size) { + row_values[col2] = row_buf[num_threads_x + threadIdx.x]; + row_indices[col2] = row_idx_buf[num_threads_x + threadIdx.x]; + } + } + block_total = row_buf[2 * num_threads_x - 1]; + block_idx_final = row_idx_buf[2 * num_threads_x - 1]; + __syncthreads(); + } + } +} + +/* Perform an inclusive scan along an outer dimension of a tensor. + * + * - num_orows is the size of the flattened outer dimensions; + * - num_irows is the size of the flattened inner dimensions; + * - row_size is the size of the dimension along which to compute the variance; + * + * The dimensions to the outside and inside of the specified dimension are considered as flattened. + * Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened + * outer dimensions, which contains several "inner rows"). + * Each thread processes a single inner row at a time. + */ +template +__global__ void tensor_kernel_scan_outer_dim_with_indices(scalar_t *self_, scalar_t *values_, int64_t *indices_, + const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, scalar_t init, BinaryFunction binary_op) { + for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) { + for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) { + scalar_t *self = self_ + orow * row_size * num_irows + irow; + scalar_t *values = values_ + orow * row_size * num_irows + irow; + int64_t *indices = indices_ + orow * row_size * num_irows + irow; + scalar_t out = init; + int64_t out_idx = 0; + + for (auto col = decltype(row_size){0}; col < row_size; ++col) { + const auto val = c10::load(self); + if(at::_isnan(val) || (!at::_isnan(out) && binary_op(val, out))) { + out = val; + out_idx = col; + } + *values = out; + *indices = out_idx; + self += num_irows; + values += num_irows; + indices += num_irows; + } + } + } +} + +inline void check_fits_in_unsigned(int64_t val, const char* name) { + constexpr auto umax = std::numeric_limits::max(); + TORCH_CHECK( + val >= 0 && val <= umax, name, " must fit in a 32-bit uint32_t value"); +} + + +template +__host__ void scan_outer_dim_with_indices( + const TensorBase& self, const TensorBase& values, const TensorBase& indices, + int dim, scalar_t init, BinaryFunction binary_op) { + int64_t row_size = self.size(dim); + auto sizes = self.sizes(); + + // Treat all outer dimensions (i.e. dim_ < dim) as one. + const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim); + + // Treat all inner dimensions (i.e. dim > dimension) as one. + const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end()); + //for performance reasons, cuda kernels use uint32_t for loops over irows, orows and row, + //make sure that input is not bigger than supported by uint32_t + check_fits_in_unsigned(num_irows, "num_irows"); + check_fits_in_unsigned(num_orows, "num_orows"); + check_fits_in_unsigned(row_size, "row_size"); + + + dim3 threads(std::min(512, int(num_irows))); + int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; + dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x}))); + tensor_kernel_scan_outer_dim_with_indices<<>>( + self.data_ptr(), values.data_ptr(), indices.data_ptr(), + num_orows, num_irows, row_size, init, binary_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +__host__ void scan_innermost_dim_with_indices( + const TensorBase& self, const TensorBase& values, const TensorBase& indices, + scalar_t init, BinaryFunction binary_op) { + int ndim = self.dim(); + // Treat all outer dimensions as a single dimension. + int row_size = self.size(ndim - 1); + int num_rows = self.numel() / row_size; + + dim3 threads(16, 32); + dim3 grid(std::min(at::cuda::getCurrentDeviceProperties()->maxGridSize[0], ceil_div(num_rows, int(threads.y)))); + + tensor_kernel_scan_innermost_dim_with_indices<<>>( + self.data_ptr(), values.data_ptr(), indices.data_ptr(), + num_rows, row_size, init, binary_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +void scan_dim_with_indices(const TensorBase& self, const TensorBase& values, const TensorBase& indices, //int64_t dim) { + int64_t dim, scalar_t init, BinaryFunction binary_op) { + int ndim = self.dim(); + auto self_ = self.expect_contiguous(); + TORCH_INTERNAL_ASSERT(values.is_contiguous() && indices.is_contiguous()); + if (dim == ndim - 1) { + scan_innermost_dim_with_indices(*self_, values, indices, init, binary_op); + } else { + scan_outer_dim_with_indices(*self_, values, indices, dim, init, binary_op); + } +} + +// TODO: The implementation of `tensor_kernel_scan_outer_dim` and +// `tensor_kernel_scan_innermost_dim` is similar to +// `tensor_kernel_scan_outer_dim_with_indices` +// `tensor_kernel_scan_outer_dim_with_indices` and should be refactored to +// remove the duplication. + +/* Perform an inclusive scan along an outer dimension of a tensor. + * + * - num_orows is the size of the flattened outer dimensions; + * - num_irows is the size of the flattened inner dimensions; + * - row_size is the size of the dimension along which to scan; + * + * The dimensions to the outside and inside of the specified dimension are considered as flattened. + * Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened + * outer dimensions, which contains several "inner rows"). + * Each thread processes a single inner row at a time. + */ +template +__global__ void tensor_kernel_scan_outer_dim(scalar_t *tgt_, scalar_t *src_, + const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, + const scalar_t init, BinaryOp binary_op) +{ + for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) { + for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) { + scalar_t *src = src_ + orow * row_size * num_irows + irow; + scalar_t *tgt = tgt_ + orow * row_size * num_irows + irow; + scalar_t acc = init; + + for (uint32_t col = 0; col < row_size; ++col) { + acc = binary_op(acc, c10::load(src)); + *tgt = acc; + + src += num_irows; + tgt += num_irows; + } + } + } +} + +/* Perform an inclusive scan along the innermost dimension of a tensor. + * + * - num_rows is the size of the flattened outer dimensions; + * - row_size is the size of the innermost dimension; + * + * The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is + * considered as having 'num_rows' rows of size 'row_size'. + * Each thread block processes one or more sets of contiguous rows (processing multiple rows + * per thread block is quicker than processing a single row, especially for short rows). + */ +template +__device__ void tensor_kernel_scan_innermost_dim_impl(T* row_buf, T *tgt_, T *src_, + const uint32_t num_rows, const uint32_t row_size, + T init, BinaryFunction binary_op){ + for (uint32_t block_row = blockIdx.x * blockDim.y; + block_row < num_rows; + block_row += blockDim.y * gridDim.x) { + uint32_t row = block_row + threadIdx.y; + T block_total = init; + + T *row_src = src_ + row * row_size; + T *row_tgt = tgt_ + row * row_size; + + // Perform scan on one block at a time, keeping track of the total value of + // all blocks processed so far. + for (uint32_t block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) { + // Load data into shared memory (two values per thread). + uint32_t col1 = block_col + threadIdx.x; + uint32_t col2 = block_col + num_threads_x + threadIdx.x; + if (row < num_rows) { + if (col1 < row_size) { + row_buf[threadIdx.x] = row_src[col1]; + } else { + row_buf[threadIdx.x] = init; + } + + if (col2 < row_size) { + row_buf[num_threads_x + threadIdx.x] = row_src[col2]; + } else { + row_buf[num_threads_x + threadIdx.x] = init; + } + + // Add the total value of all previous blocks to the first value of this block. + if (threadIdx.x == 0) { + row_buf[0] = binary_op(row_buf[0], block_total); + } + } + __syncthreads(); + + // Parallel reduction (up-sweep). + for (uint32_t s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) { + if (row < num_rows && threadIdx.x < s) { + uint32_t offset = (2 * threadIdx.x + 1) * d - 1; + row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]); + } + __syncthreads(); + } + + // Down-sweep. + for (uint32_t s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) { + if (row < num_rows && threadIdx.x < s - 1) { + uint32_t offset = 2 * (threadIdx.x + 1) * d - 1; + row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]); + } + __syncthreads(); + } + + // Write back to output. + if (row < num_rows) { + if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x]; + if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x]; + } + block_total = row_buf[2 * num_threads_x - 1]; + __syncthreads(); + } + } +} + +template < + typename T, + int num_threads_x, + int num_threads_y, + class BinaryFunction> +__global__ typename std::enable_if::value, void>::type +tensor_kernel_scan_innermost_dim( + T* tgt_, + T* src_, + const uint32_t num_rows, + const uint32_t row_size, + T init, + BinaryFunction binary_op) { + __shared__ T sbuf[num_threads_y][2 * num_threads_x]; + T* row_buf = sbuf[threadIdx.y]; + + tensor_kernel_scan_innermost_dim_impl( + row_buf, tgt_, src_, num_rows, row_size, init, binary_op); +} + +template < + typename T, + int num_threads_x, + int num_threads_y, + class BinaryFunction> +__global__ typename std::enable_if::value, void>::type +tensor_kernel_scan_innermost_dim( + T* tgt_, + T* src_, + const uint32_t num_rows, + const uint32_t row_size, + T init, + BinaryFunction binary_op) { + // As we cannot directly initialize shared array for complex types + // Reference: + // `error: initializer not allowed for __shared__ variable` + // We instead get the base scalar type and allocate twice number of + // elements required of base type and reinterpret them as complex. + using base_t = typename scalar_value_type::type; + __shared__ base_t sbuf[num_threads_y][4 * num_threads_x]; + + T* row_buf = reinterpret_cast(sbuf[threadIdx.y]); + + tensor_kernel_scan_innermost_dim_impl( + row_buf, tgt_, src_, num_rows, row_size, init, binary_op); +} + + +template +__host__ void scan_outer_dim(const TensorBase& self, const TensorBase& result, + int dim, scalar_t init, BinaryFunction binary_op) { + const int64_t row_size = self.size(dim); + auto sizes = self.sizes(); + + // Treat all outer dimensions (i.e. dim_ < dim) as one. + const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim); + + // Treat all inner dimensions (i.e. dim > dimension) as one. + const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end()); + + dim3 threads(std::min(512, int(num_irows))); + int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; + dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x}))); + + check_fits_in_unsigned(num_irows, "num_irows"); + check_fits_in_unsigned(num_orows, "num_orows"); + check_fits_in_unsigned(row_size, "row_size"); + + tensor_kernel_scan_outer_dim<<>>( + result.data_ptr(), self.data_ptr(), + num_orows, num_irows, row_size, init, binary_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +void scan_innermost_dim(const TensorBase& self, const TensorBase& result, + scalar_t init, BinaryFunction binary_op) { + int64_t ndim = self.dim(); + // Treat all outer dimensions as a single dimension. + int64_t row_size = self.size(ndim - 1); + int64_t num_rows = self.numel() / row_size; + + dim3 threads(16, 32); + int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0]; + dim3 grid(std::min(maxGridDim, ceil_div(num_rows, int64_t{threads.y}))); + + check_fits_in_unsigned(num_rows, "Number of rows (self.numel()/self.size(self.dim()-1))"); + check_fits_in_unsigned(row_size, "row_size"); + + tensor_kernel_scan_innermost_dim<<>>( + result.data_ptr(), self.data_ptr(), + num_rows, row_size, init, binary_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +void scan_dim(const TensorBase& self, const TensorBase& result, + int64_t dim, scalar_t init, BinaryFunction binary_op) { + int ndim = self.dim(); + auto self_ = self.expect_contiguous(); + TORCH_INTERNAL_ASSERT(result.is_contiguous()); + + if (self.numel() == self.size(dim)) { + cuda::cub::inclusive_scan(self_->data_ptr(), result.data_ptr(), binary_op, self.numel()); + } else if (dim == ndim - 1) { + scan_innermost_dim(*self_, result, init, binary_op); + } else { + scan_outer_dim(*self_, result, dim, init, binary_op); + } +} + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/Sort.h b/voice_bridge/torch/include/ATen/native/cuda/Sort.h new file mode 100644 index 0000000000000000000000000000000000000000..656b4ce2c2bbac167457f31e8f554a5e409a2940 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/Sort.h @@ -0,0 +1,17 @@ +#pragma once +#include +#include +#include + +namespace at { +namespace native { + +inline bool should_use_small_sort(const TensorBase &self, int64_t dim) { + return self.size(dim) <= 4096; +} + +void sortKeyValueInplace( + const TensorBase &key, const TensorBase &value, int dim, + bool descending, bool stable=false); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/SortStable.h b/voice_bridge/torch/include/ATen/native/cuda/SortStable.h new file mode 100644 index 0000000000000000000000000000000000000000..039c4307c522c9f81bf88554483f67a26127561a --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/SortStable.h @@ -0,0 +1,19 @@ +#pragma once +#include +#include + +namespace at { +namespace native { + +// Stable-sort self into values, and set indices to the +// inverse-permutation from values back to self. +// Output tensors must be pre-allocated and contiguous. +void launch_stable_sort_kernel( + const TensorBase& self, + int64_t dim, + bool descending, + const TensorBase& values, + const TensorBase& indices); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cuda/SortUtils.cuh b/voice_bridge/torch/include/ATen/native/cuda/SortUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a1d309ce709e220b69cadbf45fa95a3b968fa6f8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/SortUtils.cuh @@ -0,0 +1,253 @@ +#pragma once +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { + +template +__device__ inline void swapVars(T& t1, T& t2) { + T tmp = t1; + t1 = t2; + t2 = tmp; +} + +template +__device__ inline void bitonicSwap(K& kA, V& vA, bool& validA, + K& kB, V& vB, bool& validB, + bool dir, + const Comparator& comp) { + // Invalid entries always sort to the end + bool swap = (comp(kA, kB) && validA) || !validB; + if (swap == dir) { + swapVars(kA, kB); + swapVars(vA, vB); + swapVars(validA, validB); + } +}; + +template +__device__ inline void bitonicSort(K *keys, + V *values, + bool *valid, + const Comparator& comp) { +#if !defined(USE_ROCM) +#pragma unroll +#endif + for (unsigned int size = 2; size < Power2SortSize; size *= 2) { + bool flag = ((threadIdx.x & (size / 2)) != 0); + +#if !defined(USE_ROCM) +#pragma unroll +#endif + for (unsigned int stride = size / 2; stride > 0; stride /= 2) { + + __syncthreads(); + + unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); + bitonicSwap( + keys[pos], values[pos], valid[pos], + keys[pos + stride], values[pos + stride], valid[pos + stride], + flag, comp); + } + } + +#if !defined(USE_ROCM) +#pragma unroll +#endif + for (unsigned int stride = Power2SortSize / 2; stride > 0; stride /= 2) { + + __syncthreads(); + + unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); + bitonicSwap( + keys[pos], values[pos], valid[pos], + keys[pos + stride], values[pos + stride], valid[pos + stride], + false, comp); + } + + __syncthreads(); + +} + +// at::cuda::detail::TensorInfo version +// Sorts (key, value) pairs (in different tensors) in-place; i.e., +// modifies the input `keys` and `values` +template +C10_LAUNCH_BOUNDS_1(block_dim_x * max_block_dim_y) +__global__ void +bitonicSortKVInPlace(at::cuda::detail::TensorInfo keys, + IndexType keySlices, + IndexType keySliceSize, + IndexType keySliceStride, + at::cuda::detail::TensorInfo values, + IndexType valueSliceStride, + Comparator comp) { + // Find the slice of the tensor that we are sorting + // NOTE: blockDim.y may be less max_block_dim_y + const IndexType blockIndex = getLinearBlockId(); + const IndexType linearIndex = blockIndex * blockDim.y + threadIdx.y; + + // If the entire block is out of bounds exit early + if (blockIndex * blockDim.y >= keySlices) { + return; + } + // It's also possible for some rows of a block to be out of bounds + // but all thread need to run for __syncthreads to work. + const bool row_valid = linearIndex < keySlices; + + constexpr int items_per_thread = 2; + constexpr int Power2SortSize = block_dim_x * items_per_thread; + + // Storage for max_block_dim_y sorts performed in parallel + __shared__ K blockSharedKeys[max_block_dim_y][Power2SortSize]; + __shared__ V blockSharedValues[max_block_dim_y][Power2SortSize]; + __shared__ bool blockSharedValid[max_block_dim_y][Power2SortSize]; + + auto sharedKeys = blockSharedKeys[threadIdx.y]; + auto sharedValues = blockSharedValues[threadIdx.y]; + auto sharedValid = blockSharedValid[threadIdx.y]; + + const IndexType keyStartOffset = + at::cuda::detail::IndexToOffset::get(linearIndex, keys); + const IndexType valueStartOffset = + at::cuda::detail::IndexToOffset::get(linearIndex, values); + + // Load 2 values per thread into the shared workspace + #pragma unroll + for (int k = 0; k < items_per_thread; ++k) { + auto idx = threadIdx.x + k * blockDim.x; + bool valid = row_valid && idx < keySliceSize; + + sharedKeys[idx] = valid ? + keys.data[idx * keySliceStride + keyStartOffset] : K{}; + sharedValues[idx] = valid ? + values.data[idx * valueSliceStride + valueStartOffset] : V{}; + sharedValid[idx] = valid; + } + + // Sort! + bitonicSort( + sharedKeys, sharedValues, sharedValid, comp); + + if (!row_valid) { + return; + } + + // Store outputs + #pragma unroll + for (int k = 0; k < items_per_thread; ++k) { + auto idx = threadIdx.x + k * blockDim.x; + if (idx < keySliceSize) { + keys.data[idx * keySliceStride + keyStartOffset] = sharedKeys[idx]; + values.data[idx * valueSliceStride + valueStartOffset] = sharedValues[idx]; + } + } +} + +template +C10_LAUNCH_BOUNDS_1(block_size) +__global__ void +radixSortKVInPlace(at::cuda::detail::TensorInfo keys, + IndexType keySlices, + IndexType keySliceSize, + IndexType keySliceStride, + at::cuda::detail::TensorInfo values, + IndexType valueSliceStride, + bool descending) { + static_assert(block_size > 0, ""); + + // Find the slice of the tensor that we are sorting + const IndexType linearIndex = getLinearBlockId(); + // Tiling the slices could have us be out of bounds, if there are a + // lot of slices to sort + if (linearIndex >= keySlices) { + return; + } + + const IndexType keyStartOffset = + at::cuda::detail::IndexToOffset::get(linearIndex, keys); + const IndexType valueStartOffset = + at::cuda::detail::IndexToOffset::get(linearIndex, values); + + K *keys_slice = &keys.data[keyStartOffset]; + V *values_slice = &values.data[valueStartOffset]; + + StridedRandomAccessor keys_iter(keys_slice, keySliceStride); + StridedRandomAccessor values_iter(values_slice, valueSliceStride); + + namespace cub = ROCM_HIPCUB(at_cuda_detail::cub); + + using key_t = typename at::cuda::cub::detail::cuda_type::type; + using LoadKeys = cub::BlockLoad; + using LoadValues = cub::BlockLoad; + using Sort = cub::BlockRadixSort; + using StoreKeys = cub::BlockStore; + using StoreValues = cub::BlockStore; + + __shared__ union { + typename LoadKeys::TempStorage load_keys; + typename LoadValues::TempStorage load_values; + typename Sort::TempStorage sort; + typename StoreKeys::TempStorage store_keys; + typename StoreValues::TempStorage store_values; + } tmp_storage; + + // cub's Block operations operate on a fixed number of items, but the + // actual slice we are sorting might be smaller. So, we need to make + // up the difference with keys that will always sort higher. + const K invalid_key = [descending] { + using radix_t = typename cub::Traits::UnsignedBits; + union { + K key; + radix_t radix; + } tmp; + tmp.radix = descending ? + cub::Traits::LOWEST_KEY : + cub::Traits::MAX_KEY; + return tmp.key; + }(); + const V invalid_value = static_cast(0); + + // Load inputs + K local_keys[items_per_thread]; + V local_values[items_per_thread]; + + LoadKeys(tmp_storage.load_keys).Load(keys_iter, local_keys, keySliceSize, invalid_key); + __syncthreads(); + LoadValues(tmp_storage.load_values).Load(values_iter, local_values, keySliceSize, invalid_value); + __syncthreads(); + + // Sort! + if (descending) { + Sort(tmp_storage.sort).SortDescending( + reinterpret_cast(local_keys), + local_values); + } else { + Sort(tmp_storage.sort).Sort( + reinterpret_cast(local_keys), + local_values); + } + __syncthreads(); + + // Store outputs + StoreKeys(tmp_storage.store_keys).Store(keys_iter, local_keys, keySliceSize); + __syncthreads(); + StoreValues(tmp_storage.store_values).Store(values_iter, local_values, keySliceSize); +} + +}} // at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/Sorting.h b/voice_bridge/torch/include/ATen/native/cuda/Sorting.h new file mode 100644 index 0000000000000000000000000000000000000000..bd10ffb1a0274182c77bebe1097169f891dad3d3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/Sorting.h @@ -0,0 +1,18 @@ +#pragma once +#include + +namespace at { +class TensorBase; +} + +namespace at { +namespace native { + +void launch_kthvalue_kernel( + const TensorBase &values, const TensorBase &indices, + const TensorBase &self, int64_t dim, int64_t k); +void launch_median_kernel( + const TensorBase &vals, const TensorBase &inds, + const TensorBase &in, int64_t dim, bool ignore_nan); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/SortingCommon.cuh b/voice_bridge/torch/include/ATen/native/cuda/SortingCommon.cuh new file mode 100644 index 0000000000000000000000000000000000000000..2e032677bb50b0a3b250ad9851f0e06c65dc04ab --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/SortingCommon.cuh @@ -0,0 +1,194 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +// Is this questionable namespace pollution? +#if defined(USE_ROCM) +constexpr int MAX_BLOCK_SIZE = 256; + +#else +constexpr int MAX_BLOCK_SIZE = 1024; +#endif + +// Maximum size per grid dimension that we assume (compute capability >= 2.0) +constexpr int64_t MAX_GRID_SIZE = 65535LL; + +static bool getGridFromTiles(int64_t gridTiles, dim3& grid) { + if (gridTiles > MAX_GRID_SIZE * MAX_GRID_SIZE * MAX_GRID_SIZE) { + return false; + } + + int64_t gridX = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; + int64_t gridY = 1; + int64_t gridZ = 1; + + if (gridTiles > MAX_GRID_SIZE) { + gridTiles = ceil_div(gridTiles, MAX_GRID_SIZE); + gridY = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; + + if (gridTiles > MAX_GRID_SIZE) { + gridTiles = ceil_div(gridTiles, MAX_GRID_SIZE); + gridZ = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles; + } + } + + grid = dim3(gridX, gridY, gridZ); + return true; +} + +template +struct GTOp { + __device__ bool operator()(const scalar_t& lhs, const scalar_t& rhs) const { + return (handleNaN && at::_isnan(lhs) && !at::_isnan(rhs)) || (lhs > rhs); + } +}; + +template +struct LTOp { + __device__ bool operator()(const scalar_t& lhs, const scalar_t& rhs) const { + return (handleNaN && at::_isnan(rhs) && !at::_isnan(lhs)) || (lhs < rhs); + } +}; + +template +__device__ __forceinline__ index_t getLinearBlockId() { + return blockIdx.z * gridDim.y * gridDim.x + blockIdx.y * gridDim.x + + blockIdx.x; +} + +// For slice sorting in Thrust; extracts a slice index from a linear +// index and uses that for comparison +struct SliceComp { + SliceComp(int64_t size) : sliceSize(size) {} + + __device__ bool operator()(const int64_t& a, const int64_t& b) const { + // Since the slices are guaranteed to be innermost, + // the segment is just via int64_t division + int64_t segA = a / sliceSize; + int64_t segB = b / sliceSize; + return segA < segB; + } + + const int64_t sliceSize; +}; + +// For sorting in Thurst; extracts a within-slice index from a linear index +struct GlobalIndexToPerSliceIndex { + GlobalIndexToPerSliceIndex(int64_t size) : sliceSize(size) {} + + __device__ inline void operator()(int64_t& v) const { + v = v % sliceSize; + } + + const int64_t sliceSize; +}; + +// Returns 2^(ceil(lg(n)) from Stanford bit twiddling hacks +static uint64_t nextHighestPowerOf2(uint64_t n) { + n--; + n |= n >> 1; + n |= n >> 2; + n |= n >> 4; + n |= n >> 8; + n |= n >> 16; +#ifndef _MSC_VER + n |= n >> 32; +#endif + n++; + + return n; +} + + +// WARNING: This function assumes input tensors are contiguous +template +void run_launcher( + const TensorBase &values, + const TensorBase &indices, + const TensorBase &self, + int64_t dim, + Launcher l) { + auto self_info = cuda::detail::getTensorInfo(self); + auto values_info = cuda::detail::getTensorInfo(values); + auto indices_info = cuda::detail::getTensorInfo(indices); + + int64_t slice_size = self.size(dim); + /* We use these structures solely to find the offset to */ + /* each slice we are operating on */ + self_info.reduceDim(dim); + values_info.reduceDim(dim); + indices_info.reduceDim(dim); + + /* Collapse all other dims */ + int collapse_self_dim = self_info.collapseDims(dim); + int collapse_values_dim = values_info.collapseDims(dim); + int collapse_indices_dim = indices_info.collapseDims(dim); + + int64_t num_slices = 1; + for (int i = 0; i < self_info.dims; ++i) { + num_slices *= self_info.sizes[i]; + } + + /* This is used as a template parameter to calculate indices. */ + /* We only specialize it if all collapsed dim sizes are the */ + /* same; otherwise, we use -1 which is the specialization */ + /* parameter for arbitrary dimensions */ + int all_dims = self_info.dims; + if (values_info.dims != all_dims || indices_info.dims != all_dims) { + all_dims = -1; + } + + if (all_dims == 1) { + l.template launch( + values_info, + collapse_values_dim, + indices_info, + collapse_indices_dim, + self_info, + collapse_self_dim, + num_slices, + slice_size); + } else if (all_dims == 2) { + l.template launch( + values_info, + collapse_values_dim, + indices_info, + collapse_indices_dim, + self_info, + collapse_self_dim, + num_slices, + slice_size); + } else if (all_dims == 3) { + l.template launch( + values_info, + collapse_values_dim, + indices_info, + collapse_indices_dim, + self_info, + collapse_self_dim, + num_slices, + slice_size); + } else { + l.template launch( + values_info, + collapse_values_dim, + indices_info, + collapse_indices_dim, + self_info, + collapse_self_dim, + num_slices, + slice_size); + } +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cuda/SortingRadixSelect.cuh b/voice_bridge/torch/include/ATen/native/cuda/SortingRadixSelect.cuh new file mode 100644 index 0000000000000000000000000000000000000000..e10f9d6831f5b2cd165de013cf4221faf62595eb --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/SortingRadixSelect.cuh @@ -0,0 +1,420 @@ +#include +#include +#include +#include + +namespace at { +namespace native { + +template +struct TopKTypeConfig {}; + +template <> +struct TopKTypeConfig { + typedef uint32_t RadixType; + + // Converts a float to an integer representation with the same + // sorting; i.e., for floats f1, f2: + // if f1 < f2 then convert(f1) < convert(f2) + // We use this to enable radix selection of floating-point values. + // This also gives a relative order for NaNs, but that's ok, as they + // will all be adjacent + // neg inf: signbit=1 exp=ff fraction=0 --> radix = 0 00 ff.. + // pos inf: signbit=0 exp=ff fraction=0 --> radix = 1 ff 00.. + // pos nan: signbit=0 exp=ff fraction>0 --> radix = 1 ff x>0 + // neg nan: signbit=1 exp=ff fraction>0 --> radix = 0 00 x +struct TopKTypeConfig { + typedef uint32_t RadixType; + + static inline __device__ RadixType convert(uint8_t v) { + return v; + } + + static inline __device__ uint8_t deconvert(RadixType v) { + return v; + } +}; + +template <> +struct TopKTypeConfig { + typedef uint32_t RadixType; + + static inline __device__ RadixType convert(int8_t v) { + return 128u + v; + } + + static inline __device__ int8_t deconvert(RadixType v) { + return v - 128; + } +}; + +template <> +struct TopKTypeConfig { + typedef uint32_t RadixType; + + static inline __device__ RadixType convert(int16_t v) { + static_assert(sizeof(short) == 2, ""); + return 32768u + v; + } + + static inline __device__ int16_t deconvert(RadixType v) { + return v - 32768; + } +}; + +template <> +struct TopKTypeConfig { + typedef uint32_t RadixType; + + static inline __device__ RadixType convert(int32_t v) { + static_assert(sizeof(int) == 4, ""); + return 2147483648u + v; + } + + static inline __device__ int32_t deconvert(RadixType v) { + return v - 2147483648u; + } +}; + +template <> +struct TopKTypeConfig { + typedef uint64_t RadixType; + + static inline __device__ RadixType convert(int64_t v) { + static_assert(sizeof(int64_t) == 8, ""); + return 9223372036854775808ull + v; + } + + static inline __device__ int64_t deconvert(RadixType v) { + return v - 9223372036854775808ull; + } +}; + +template <> +struct TopKTypeConfig { + typedef uint64_t RadixType; + + static inline __device__ RadixType convert(double v) { + RadixType x = __double_as_longlong(v); + RadixType mask = -((x >> 63)) | 0x8000000000000000; + return (v == v) ? (x ^ mask) : 0xffffffffffffffff; + } + + static inline __device__ double deconvert(RadixType v) { + RadixType mask = ((v >> 63) - 1) | 0x8000000000000000; + return __longlong_as_double(v ^ mask); + } +}; + +template <> +struct TopKTypeConfig { + typedef uint32_t RadixType; + + static inline __device__ RadixType convert(at::Half v) { +#if defined(__CUDA_ARCH__) || defined(USE_ROCM) + RadixType x = __half_as_ushort(v); + RadixType mask = (x & 0x00008000) ? 0x0000ffff : 0x00008000; + return (v == v) ? (x ^ mask) : 0xffff; +#else + assert(false); + return 0u; +#endif + } + + static inline __device__ at::Half deconvert(RadixType v) { +#if defined(__CUDA_ARCH__) || defined(USE_ROCM) + RadixType mask = (v & 0x00008000) ? 0x00008000 : 0x0000ffff; + return __ushort_as_half(v ^ mask); +#else + assert(false); + return static_cast(0); +#endif + } +}; + +template <> +struct TopKTypeConfig { + typedef uint32_t RadixType; + + static inline __device__ RadixType convert(at::BFloat16 v) { + RadixType x = v.x; + RadixType mask = (x & 0x00008000) ? 0x0000ffff : 0x00008000; + return (v == v) ? (x ^ mask) : 0xffff; + } + + static inline __device__ at::BFloat16 deconvert(RadixType v) { + RadixType mask = (v & 0x00008000) ? 0x00008000 : 0x0000ffff; + at::BFloat16 r; + r.x = (v ^ mask); + return r; + } +}; + +// This function counts the distribution of all input values in a +// slice we are selecting by radix digit at `radixDigitPos`, but only +// those that pass the filter `((v & desiredMask) == desired)`. +// This produces and broadcasts the seen counts for a single block only. +// `smem` must have at least `RadixSize` elements. +template < + typename scalar_t, + typename bitwise_t, + typename index_t, + typename CountType, + int RadixSize, + int RadixBits> +__device__ void countRadixUsingMask( + CountType counts[RadixSize], + CountType* smem, + bitwise_t desired, + bitwise_t desiredMask, + int radixDigitPos, + index_t sliceSize, + index_t withinSliceStride, + scalar_t* data) { + // Clear out per-thread counts from a previous round +#pragma unroll + for (int i = 0; i < RadixSize; ++i) { + counts[i] = 0; + } + + if (threadIdx.x < RadixSize) { + smem[threadIdx.x] = 0; + } + __syncthreads(); + + // Scan over all the data. Upon a read, the warp will accumulate + // counts per each digit in the radix using warp voting. + for (index_t i = threadIdx.x; i < sliceSize; i += blockDim.x) { + bitwise_t val = + TopKTypeConfig::convert(doLdg(&data[i * withinSliceStride])); + + bool hasVal = ((val & desiredMask) == desired); + bitwise_t digitInRadix = at::cuda::Bitfield::getBitfield( + val, radixDigitPos, RadixBits); + +#pragma unroll + for (uint32_t j = 0; j < RadixSize; ++j) { + bool vote = hasVal && (digitInRadix == j); +#if defined(USE_ROCM) + counts[j] += __popcll(WARP_BALLOT(vote)); +#else + counts[j] += __popc(WARP_BALLOT(vote, ACTIVE_MASK())); +#endif + } + } + + // Now, for each warp, sum values + if (at::cuda::getLaneId() == 0) { +#pragma unroll + for (uint32_t i = 0; i < RadixSize; ++i) { + gpuAtomicAddNoReturn(&smem[i], counts[i]); + } + } + + __syncthreads(); + + // For each thread, read in the total counts +#pragma unroll + for (uint32_t i = 0; i < RadixSize; ++i) { + counts[i] = smem[i]; + } + + __syncthreads(); +} + +// Over what radix we are selecting values +constexpr int RADIX_BITS = 2; // digits are base-(2 ^ RADIX_BITS) +constexpr int RADIX_SIZE = 4; // 2 ^ RADIX_BITS +constexpr int RADIX_MASK = (RADIX_SIZE - 1); + +// This finds the unique value `v` that matches the pattern +// ((v & desired) == desiredMask) in our sorted int format +template +__device__ scalar_t findPattern( + scalar_t* smem, + scalar_t* data, + index_t sliceSize, + index_t withinSliceStride, + bitwise_t desired, + bitwise_t desiredMask) { + if (threadIdx.x < 2) { + smem[threadIdx.x] = static_cast(0); + } + __syncthreads(); + + // All threads participate in the loop, in order to sync on the flag + index_t numIterations = + round_up(sliceSize, static_cast(blockDim.x)); + for (index_t i = threadIdx.x; i < numIterations; i += blockDim.x) { + bool inRange = (i < sliceSize); + scalar_t v = inRange ? doLdg(&data[i * withinSliceStride]) + : static_cast(0); + + if (inRange && + ((TopKTypeConfig::convert(v) & desiredMask) == desired)) { + // There should not be conflicts if we are using findPattern, + // since the result is unique + smem[0] = static_cast(1); + smem[1] = v; // can't use val as the flag, since it could be 0 + } + + __syncthreads(); + + scalar_t found = smem[0]; + scalar_t val = smem[1]; + + __syncthreads(); + + // Check to see if a thread found the value + if (found != static_cast(0)) { + // all threads return this value + return val; + } + } + + // should not get here + assert(false); + return static_cast(0); +} + +// Returns the top-Kth element found in the data using radix selection +template +__device__ void radixSelect( + scalar_t* data, + index_t k, + bool largest, + index_t sliceSize, + index_t withinSliceStride, + int* smem, + scalar_t* topK) { + // Per-thread buckets into which we accumulate digit counts in our + // radix + int counts[RADIX_SIZE]; + + // We only consider elements x such that (x & desiredMask) == desired + // Initially, we consider all elements of the array, so the above + // statement is true regardless of input. + bitwise_t desired = 0; + bitwise_t desiredMask = 0; + + // We are looking for the top kToFind-th element when iterating over + // digits; this count gets reduced by elimination when counting + // successive digits + int kToFind = k; + + // We start at the most significant digit in our radix, scanning + // through to the least significant digit + for (int digitPos = sizeof(scalar_t) * 8 - RADIX_BITS; digitPos >= 0; + digitPos -= RADIX_BITS) { + // Count radix distribution for the current position and reduce + // across all threads + countRadixUsingMask< + scalar_t, + bitwise_t, + index_t, + int, + RADIX_SIZE, + RADIX_BITS>( + counts, + smem, + desired, + desiredMask, + digitPos, + sliceSize, + withinSliceStride, + data); + + auto found_unique = [&](int i, int count) -> bool { + /* All threads have the same value in counts here, so all */ + /* threads will return from the function. */ + if (count == 1 && kToFind == 1) { + /* There is a unique answer. */ + desired = at::cuda::Bitfield::setBitfield( + desired, i, digitPos, RADIX_BITS); + desiredMask = at::cuda::Bitfield::setBitfield( + desiredMask, RADIX_MASK, digitPos, RADIX_BITS); + + /* The answer is now the unique element v such that: */ + /* (v & desiredMask) == desired */ + /* However, we do not yet know what the actual element is. We */ + /* need to perform a search through the data to find the */ + /* element that matches this pattern. */ + *topK = findPattern( + (scalar_t*)smem, + data, + sliceSize, + withinSliceStride, + desired, + desiredMask); + return true; + } + return false; + }; + auto found_non_unique = [&](int i, int count) -> bool { + if (count >= kToFind) { + desired = + at::cuda::Bitfield::setBitfield( + desired, i, digitPos, RADIX_BITS); + desiredMask = at::cuda::Bitfield::setBitfield( + desiredMask, RADIX_MASK, digitPos, RADIX_BITS); + + /* The top-Kth element v must now be one such that: */ + /* (v & desiredMask == desired) */ + /* but we haven't narrowed it down; we must check the next */ + /* least-significant digit */ + return true; + } + kToFind -= count; + return false; // continue the loop + }; + + // All threads participate in the comparisons below to know the + // final result + if (largest) { + // Process in descending order +#pragma unroll + for (int i = RADIX_SIZE - 1; i >= 0; --i) { + int count = counts[i]; + if (found_unique(i, count)) { + return; + } + if (found_non_unique(i, count)) { + break; + } + } + } else { + // Process in ascending order +#pragma unroll + for (int i = 0; i < RADIX_SIZE; ++i) { + int count = counts[i]; + if (found_unique(i, count)) { + return; + } + if (found_non_unique(i, count)) { + break; + } + } + } + } // end digitPos for + + // There is no unique result, but there is a non-unique result + // matching `desired` exactly + *topK = TopKTypeConfig::deconvert(desired); +} +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cuda/TensorModeKernel.cuh b/voice_bridge/torch/include/ATen/native/cuda/TensorModeKernel.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c3220774ee202180b895648db409691b16180126 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/TensorModeKernel.cuh @@ -0,0 +1,435 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { +namespace native { + +// Used for a segmented reduction +struct ModeUnsignedBoolPair { + unsigned int val; + bool flag; +}; + +// In the kernel below, we have a common pattern of reducing (unsigned int, +// unsigned int) pairs of data +struct ModeUnsignedPair { + unsigned int val; + unsigned int index; +}; + +// Inclusive Scan via an upsweep/downsweep mechanism. Assumes: +// +// 1. Power2ScanSize is a power of 2. This code still works for collections that +// do not exactly contain a power of 2 number of elements, simply round up to +// the nearest power of 2 and then call. +// +// 2. That there are two-elements per thread, i.e. the size of the smem storage +// is 2 * blockDim.x * sizeof(T). +// +// Consider a (+)-Scan on the following elements: +// +// Upsweep: +// +// 0 1 2 3 4 5 6 7 +// 1 5 9 13 +// 6 22 +// 28 +// +// Downsweep: +// 15 +// 3 10 21 +template +__device__ void inclusivePrefixScan(T* smem, BinaryOp binop) { + // Reduce step ("upsweep") +#pragma unroll + for (int stride = 1; stride < Power2ScanSize; stride <<= 1) { + int index = (threadIdx.x + 1) * stride * 2 - 1; + if (index < Power2ScanSize) { + smem[index] = binop(smem[index], smem[index - stride]); + } + __syncthreads(); + } + + // Post-reduce step ("downsweep") +#pragma unroll + for (int stride = Power2ScanSize / 4; stride > 0; stride >>= 1) { + int index = (threadIdx.x + 1) * stride * 2 - 1; + if ((index + stride) < Power2ScanSize) { + smem[index + stride] = binop(smem[index + stride], smem[index]); + } + __syncthreads(); + } +} + +// Block-wide reduction where each thread locally reduces N +// values before letting a single warp take over - assumes +// threadVals is in registers, not shared memory +// +// If smem is not used again, there is no need to __syncthreads before this +// call. However, if smem will be used, e.g., this function is called in a loop, +// then __syncthreads is needed either before or afterwards to prevent non-0 +// threads overriding smem in the next loop before num-0 thread reads from it. +template +__device__ T reduceBlockWithNThreadLocalReductions( + T* smem, + T threadVals[N], + const unsigned int numVals, + ReduceOp reduceOp, + T init) { + int offset = threadIdx.x * N; + T local = offset < numVals ? threadVals[0] : init; + +#pragma unroll + for (int i = 1; i < N; ++i) { + ++offset; + T next = offset < numVals ? threadVals[i] : init; + local = reduceOp.combine(local, next); + } + + return cuda_utils::BlockReduce(local, reduceOp, init, smem); +} + +template +__device__ inline void swapVars(T& t1, T& t2) { + T tmp = t1; + t1 = t2; + t2 = tmp; +} + +template +__device__ inline void bitonicSwap( + K& kA, + V& vA, + bool& validA, + K& kB, + V& vB, + bool& validB, + bool dir, + const Comparator& comp) { + // Invalid entries always sort to the end + bool swap = (comp(kA, kB) && validA) || !validB; + if (swap == dir) { + swapVars(kA, kB); + swapVars(vA, vB); + swapVars(validA, validB); + } +}; + +template +__device__ inline void bitonicSwapKeys( + K& kA, + bool& validA, + K& kB, + bool& validB, + bool dir, + const Comparator& comp) { + bool swap = (comp(kA, kB) && validA) || !validB; + if (swap == dir) { + swapVars(kA, kB); + swapVars(validA, validB); + } +} + +template < + typename K, + typename IndexType, + int Power2SortSize, + typename Comparator> +__device__ inline void bitonicSortKeys( + K keys[Power2SortSize], + bool valid[Power2SortSize], + const Comparator& comp) { +#if !defined(USE_ROCM) +#pragma unroll +#endif + for (unsigned int size = 2; size < Power2SortSize; size *= 2) { + bool flag = ((threadIdx.x & (size / 2)) != 0); + +#if !defined(USE_ROCM) +#pragma unroll +#endif + for (unsigned int stride = size / 2; stride > 0; stride /= 2) { + __syncthreads(); + + unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); + bitonicSwapKeys( + keys[pos], + valid[pos], + keys[pos + stride], + valid[pos + stride], + flag, + comp); + } + } + +#if !defined(USE_ROCM) +#pragma unroll +#endif + for (unsigned int stride = Power2SortSize / 2; stride > 0; stride /= 2) { + __syncthreads(); + + unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); + bitonicSwapKeys( + keys[pos], + valid[pos], + keys[pos + stride], + valid[pos + stride], + false, + comp); + } + + __syncthreads(); +} + +// The mode kernel has the following characteristics: It uses internal shared +// memory buffers of Power2Size, which must be greater than the number of +// elements. Additionally, there is one block for every slice to calculate the +// mode for, and in each block there is one thread for every two elements. +// +// Both sorted and positions are assumed to be contiguous Tensors with the mode +// dimension as the innermost dim, such that we can get the particular slice for +// a Tensor via its linear block dimension * the slice size. +template +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11070 +__launch_bounds__(1024, 1) +#endif +__global__ void compute_mode( + T* input, + at::cuda::detail::TensorInfo values, + at::cuda::detail::TensorInfo indices, + int64_t sliceSize, + int64_t slices) { + int tidx = threadIdx.x; + int stidx = blockDim.x + threadIdx.x; // Second index this thread responsible for + + // First, we need to calculate the offset into the sorted Tensor that + // represents the start of the slice for this block to calculate the mode for. + // This offset is a combination of the gridIndices, and the number of elements + // in the slice. + unsigned int blockId = getLinearBlockId(); + unsigned int linearOffset = blockId * sliceSize; + + if (blockId >= slices) { + return; + } + + // shmem is a dynamically sized buffer we will use throughout the kernel to + // handle computation efficiently. The size of this shmem must be + // sizeof(T) * Power2Size + (2 * sizeof(unsigned int) * Power2Size) + // + // Initially, the buffer will be organized as follows: + // + // [smem (slice elements) | bmem (valid indices) | ] + extern __shared__ char shmem[]; + + // smem represents a proportion of the shared memory buffer that is used to + // store the elements from the slice: + T* smem = reinterpret_cast(shmem); + + // Each thread loads up to two elements from the Tensor into shared memory + if (tidx < sliceSize) { + smem[tidx] = c10::load(&input[linearOffset + tidx]); + } + if (stidx < sliceSize) { + smem[stidx] = c10::load(&input[linearOffset + stidx]); + } + + // Next, we initialize a boolean region of the buffer, offset by the loaded + // element smem region + bool* bmem = reinterpret_cast(&smem[Power2Size]); + + // The first use of this region stores bmem[i] = i < sliceSize to mark the + // valid components in the smem buffer + bmem[tidx] = tidx < sliceSize; + bmem[stidx] = stidx < sliceSize; + __syncthreads(); // barrier for smem, bmem initialization + + // First, sort the input slice in ascending order. smem contains the input + // elements, and bmem marks the valid indices + bitonicSortKeys( + smem, bmem, [&] GPU_LAMBDA(const auto& a, const auto& b) { + return a < b; + }); + __syncthreads(); // make no assumptions that the sort syncs at end + + // The next step of our algorithm is performing a block-wide comparison of + // neighboring elements. In particular, given an sorted input slice A, we + // produce an output slice B, such that B[i] = 1 if A[i-i] != A[i], otherwise + // 0. + // + // Given the input A = [0, 0, 1, 1, 2, 2, 2, 4, 5, 6, 6, 7, 8] + // B = [1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1] + // + // In particular, we can think of B[i] true indicating the start of a sequence + // of equal values in the sorted list. Similarly, we will also store the + // negation of B, which we'll call C. In particular, we can think of C[i] = + // true iff A[i-1] == A[i] in our original sorted slice. + // + // C = [0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0] + + // We overwrite bmem, and treat the rest of shared memory as a buffer of + // (index, flag) pairs where the index represents values from C, and the flag + // represents values from B. + // + // [smem (sorted slice) | ubpmem (index, flag pairs)] + + struct ModeUnsignedBoolPair* ubpmem = + reinterpret_cast(&smem[Power2Size]); + + if (tidx == 0) { + ubpmem[0].flag = true; + ubpmem[0].val = 0; + } + + // Compares elements (0, 1), (2, 3), ... and sets 1, 3, ... + ubpmem[tidx * 2 + 1].flag = + smem[tidx * 2] != smem[tidx * 2 + 1]; // (0, 1), (1, 2), etc. + ubpmem[tidx * 2 + 1].val = !ubpmem[tidx * 2 + 1].flag; + + // Compares elements (1, 2), (3, 4), ... and sets 2, 4, ... + if (((tidx + 1) * 2) < Power2Size) { + ubpmem[(tidx + 1) * 2].flag = + smem[((tidx + 1) * 2) - 1] != smem[(tidx + 1) * 2]; + ubpmem[(tidx + 1) * 2].val = !ubpmem[(tidx + 1) * 2].flag; + } + __syncthreads(); // barrier for ubpmem initialization + + // Next, we perform a segmented prefix sum on the neighboring elements, where + // the presence of a one indicates the start of a segment. In this case B acts + // as the segment start flags, and C is the buffer to be summed: + // + // Input (C) = [0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0] + // Flag (B) = [1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1] + // Output (C) = [0, 1, 0, 1, 0, 1, 2, 0, 0, 0, 1, 0, 0] + // + // Afterwards, the (index) components of the ubpmem buffer contain the lengths + // of the segments (minus 1), i.e. the counts of each element in the original + // input. + inclusivePrefixScan( + ubpmem, [=] GPU_LAMBDA(const auto& a, const auto& b) { + ModeUnsignedBoolPair c; + c.val = a.flag ? a.val : a.val + b.val; + c.flag = a.flag | b.flag; + return c; + }); + // assumes scan syncs at the end + + // Next, we reinterpret the ubpmem buffer as pairs of unsigned integers (i.e. + // we treat the boolean flag regions as integers). We initialize these to + // represent indices, and we'll call this buffer I + struct ModeUnsignedPair* uupmem = + reinterpret_cast(ubpmem); + + // At this point, we need to find the maximum element in lengths buffer C. + // This element will represent the count (-1) of the mode. Because of the + // way we have set up the problem, the index where this mode occurs will + // also be the location of the mode value in the sorted array, e.g. + // + // smem = [0, 0, 1, 1, 1, 2] + // C = [0, 1, 0, 1, 2, 0] + // I = [0, 1, 2, 3, 4, 5] + // ^ + // maximum value, also aligned with mode = 1 + // + // We perform a block wide max-reduction of the C buffer, but we also need the + // indices to come along with it, so we utilize the uupmem construction. + // + // At the end we need to return the ModeUnsignedPair containing index = 4, val + // = 2, which represents the max + + // In practice, we will make each thread locally reduce 2 values in its + // registers prior to the global block-wide reduction. Note that instead of + // tidx/stidx, we utilize tidx * 2, tidx * 2 + 1, so each thread deals with + // adjacent elements. This is because the reduce code below relies on thread + // elements to be adjacent. + struct ModeUnsignedPair uup[2]; + uup[0].index = tidx * 2; + uup[0].val = ubpmem[tidx * 2].val; + uup[1].index = tidx * 2 + 1; + uup[1].val = ubpmem[tidx * 2 + 1].val; + __syncthreads(); + + struct ModeUnsignedPair max = {0, 0}; + + struct MaxOp { + inline __device__ ModeUnsignedPair combine(ModeUnsignedPair a, ModeUnsignedPair b) const { + return b.val > a.val ? b : a; + } + + inline __device__ ModeUnsignedPair warp_shfl_down(ModeUnsignedPair acc, int offset) const { + ModeUnsignedPair ret; + ret.index = WARP_SHFL_DOWN(acc.index, offset); + ret.val = WARP_SHFL_DOWN(acc.val, offset); + return ret; + } + } max_op; + + max = reduceBlockWithNThreadLocalReductions<2>( + uupmem, + uup, + sliceSize, + max_op, + max); + + // Store the mode in shared memory for use in finding the mode in the input + // slice + __shared__ T mode; + + // Given the above constraints, the mode is the value at the reduced index in + // the original sorted element buffer + if (tidx == 0) { + mode = smem[max.index]; + } + __syncthreads(); // broadcast mode + + // Finally, we need to find "an" index of the mode in the input + // Tensor. The API does not constrain which index we pick, but here + // we always pick the largest index. We store the index if the value + // is the mode, or 0 otherwise. Then find the maximum value. + // + // Again we reduce 2 elements in the thread's registers prior to the + // block-wide reduction + unsigned mode_index[2] = {0u, 0u}; + if (tidx * 2 < sliceSize) { + const unsigned idx = tidx * 2; + mode_index[0] = c10::load(&input[linearOffset + idx]) == mode ? idx : 0u; + } + if (tidx * 2 + 1 < sliceSize) { + const unsigned idx = tidx * 2 + 1; + mode_index[1] = c10::load(&input[linearOffset + idx]) == mode ? idx : 0u; + } + + struct MaxIndexOp { + inline __device__ unsigned combine(unsigned a, unsigned b) const { + return b > a ? b : a; + } + + inline __device__ unsigned warp_shfl_down(unsigned acc, int offset) const { + return WARP_SHFL_DOWN(acc, offset); + } + } max_index_op; + + int64_t index = reduceBlockWithNThreadLocalReductions<2>( + reinterpret_cast(&shmem[0]), + mode_index, + sliceSize, + max_index_op, + 0u); + + // Finally, we have the mode, and an index where it occurs. We use a single + // thread to place this in the appropriate output position + if (tidx == 0) { + unsigned int outputOffset = + at::cuda::detail::IndexToOffset::get( + blockId, values); + values.data[outputOffset] = mode; + indices.data[outputOffset] = index; + } +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cuda/TensorModeKernel.h b/voice_bridge/torch/include/ATen/native/cuda/TensorModeKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..b5660747997d4eb1ad56f79ec2d1f519921c05c2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/TensorModeKernel.h @@ -0,0 +1,19 @@ +#pragma once +#include + +namespace at { +class TensorBase; +} + +namespace at { +namespace native { + +void launch_fused_mode_kernel( + const TensorBase &values, const TensorBase &indices, + const TensorBase &self, int64_t slice_size, int64_t slices); + +void launch_apply_mode_kernel( + const TensorBase &values, const TensorBase &indices, + const TensorBase &self, int64_t dim, int64_t ndim); + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/TensorTopK.h b/voice_bridge/torch/include/ATen/native/cuda/TensorTopK.h new file mode 100644 index 0000000000000000000000000000000000000000..9eebf2cd6040c4f2df9ad64599910ba0e0cee58f --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/TensorTopK.h @@ -0,0 +1,14 @@ +#pragma once +#include + +namespace at { +class TensorBase; +} + +namespace at { +namespace native { +void launch_gather_topk_kernel( + const TensorBase& self, + int64_t k, int64_t dim, bool largest, + const TensorBase& values, const TensorBase& indices); +}} diff --git a/voice_bridge/torch/include/ATen/native/cuda/UniqueCub.cuh b/voice_bridge/torch/include/ATen/native/cuda/UniqueCub.cuh new file mode 100644 index 0000000000000000000000000000000000000000..6e1cccc2e175cb26b1ee12690d67d1514e95f246 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/UniqueCub.cuh @@ -0,0 +1,16 @@ +#include + +namespace at { +namespace native { +namespace internal { + +template +std::tuple unique_cuda_template( + const Tensor& self, + const bool consecutive, + const bool return_inverse, + const bool return_counts); + +} // namespace internal +} // namespace at +} // namespace native diff --git a/voice_bridge/torch/include/ATen/native/cuda/UpSample.cuh b/voice_bridge/torch/include/ATen/native/cuda/UpSample.cuh new file mode 100644 index 0000000000000000000000000000000000000000..09e460640df8deeaddebcf230962a7a221a98786 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/UpSample.cuh @@ -0,0 +1,370 @@ +#pragma once +#include +#include + +#include +#include +#include +#include + +#include + +namespace at { +namespace native { + +namespace upsample { +// TODO: Remove duplicate declaration. +TORCH_API c10::SmallVector compute_output_size( + c10::IntArrayRef input_size, // Full input tensor size. + at::OptionalIntArrayRef output_size, + c10::optional> scale_factors); +} // namespace upsample + +namespace upsample_cuda { + +// TODO: Remove duplication with Upsample.h (CPU). +inline c10::optional get_scale_value(c10::optional> scales, int idx) { + if (!scales) { + return nullopt; + } + return scales->at(idx); +} + +} // namespace upsample_cuda + + +/* TODO: move this to a common place */ +template +__device__ inline scalar_t min(scalar_t a, scalar_t b) { + return a < b ? a : b; +} + +template +__device__ inline scalar_t max(scalar_t a, scalar_t b) { + return a > b ? a : b; +} + +// NOTE [ Nearest neighbor upsampling kernel implementation ] +// +// The nearest neighbor upsampling kernel implementation is symmetrical as +// expected. We launch kernels with threads mapping to destination tensors where +// kernels write data to, each thread reads data from the source tensor, this +// means: +// 1. In the forward kernel, +// src_xxx refers to properties of input tensors; +// dst_xxx refers to properties of output tensors; +// scale_factor is the ratio of src_size to dst_size; +// 2. In the backward kernel, +// src_xxx refers to properties of grad_output tensors; +// dst_xxx refers to properties of grad_input tensors; +// scale_factor is the ratio of src_size to dst_size; +// +// Because of this, we need to take the reciprocal of the scale defined by +// upsample layer during forward path. The motivation is to avoid slow +// division in the kernel code, so we can use faster multiplication instead. +// This is not necessary during backward path, since the scale_factor is already +// the reciprocal of corresponding scale_factor used in the forward path due to +// the swap of source and destination tensor. +// +// Similarly, since the mapping from grad_input to grad_output during backward +// is the reverse of the mapping of output to input, we need to have opposite +// mapping functions to compute the source index. + +// see NOTE [ Nearest neighbor upsampling kernel implementation ] +template +__host__ __forceinline__ static accscalar_t compute_scales_value( + const c10::optional scale, + int64_t src_size, + int64_t dst_size) { + // FIXME: remove magic > 0 after we ensure no models were serialized with -1 defaults. + return (scale.has_value() && scale.value() > 0.) ? (accscalar_t)(1.0 / scale.value()) + : (accscalar_t)src_size / dst_size; +} + +// see NOTE [ Nearest neighbor upsampling kernel implementation ] +template +__host__ __forceinline__ static accscalar_t compute_scales_value_backwards( + const c10::optional scale, + int64_t src_size, + int64_t dst_size) { + // FIXME: remove magic > 0 after we ensure no models were serialized with -1 defaults. + return (scale.has_value() && scale.value() > 0.) ? (accscalar_t)scale.value() + : (accscalar_t)src_size / dst_size; +} + +template +__host__ __forceinline__ static accscalar_t area_pixel_compute_scale( + int input_size, + int output_size, + bool align_corners, + const c10::optional scale) { + if(align_corners) { + if(output_size > 1) { + return (accscalar_t)(input_size - 1) / (output_size - 1); + } + else { + return static_cast(0); + } + } + else{ + return compute_scales_value(scale, input_size, output_size); + } +} + +template +__device__ __forceinline__ static accscalar_t area_pixel_compute_source_index( + accscalar_t scale, + int dst_index, + bool align_corners, + bool cubic) { + if (align_corners) { + return scale * dst_index; + } else { + accscalar_t src_idx = scale * (dst_index + static_cast(0.5)) - + static_cast(0.5); + // See Note[Follow Opencv resize logic] + return (!cubic && src_idx < static_cast(0)) + ? static_cast(0) + : src_idx; + } +} + +// see NOTE [ Nearest neighbor upsampling kernel implementation ] +__device__ __forceinline__ static int nearest_neighbor_compute_source_index( + const float scale, + int dst_index, + int input_size) { + // index_f32 = (output_index) * scale + // input_index = round(index_f32) + // Same as a buggy OpenCV INTER_NEAREST + // We keep this method for BC and consider as deprecated. + // See nearest_neighbor_exact_compute_source_index as replacement + const int src_index = + min(static_cast(floorf((dst_index) * scale)), input_size - 1); + return src_index; +} + +__device__ __forceinline__ static int nearest_neighbor_exact_compute_source_index( + const float scale, + int dst_index, + int input_size) { + // index_f32 = (output_index + 0.5) * scale - 0.5 + // input_index = round(index_f32) + // Same as Pillow and Scikit-Image/Scipy ndi.zoom + const int src_index = + min(static_cast(floorf((dst_index + static_cast(0.5)) * scale)), input_size - 1); + return src_index; +} + +// see NOTE [ Nearest neighbor upsampling kernel implementation ] +__device__ __forceinline__ static int nearest_neighbor_bw_compute_source_index( + const float scale, + int dst_index, + int output_size) { + // Equivalent to buggy OpenCV INTER_NEAREST + // We keep this method for BC and consider as deprecated. + // See nearest_neighbor_exact_bw_compute_source_index as replacement + const int src_index = + min(static_cast(ceilf(dst_index * scale)), output_size); + return src_index; +} + +// see NOTE [ Nearest neighbor upsampling kernel implementation ] +__device__ __forceinline__ static int nearest_neighbor_exact_bw_compute_source_index( + const float scale, + int dst_index, + int output_size) { + // Equivalent to Pillow and Scikit-Image/Scipy ndi.zoom + const int src_index = + min(static_cast(ceilf(dst_index * scale - static_cast(0.5))), output_size); + return src_index; +} + +/* Used by UpSampleBicubic2d.cu */ +template +__device__ __forceinline__ static scalar_t upsample_get_value_bounded( + const PackedTensorAccessor64& data, + int batch, + int channel, + int height, + int width, + int y, + int x) { + int access_y = max(min(y, height - 1), 0); + int access_x = max(min(x, width - 1), 0); + return data[batch][channel][access_y][access_x]; +} + +/* Used by UpSampleBicubic2d.cu */ +template +__device__ __forceinline__ static void upsample_increment_value_bounded( + PackedTensorAccessor64& data, + int batch, + int channel, + int height, + int width, + int y, + int x, + accscalar_t value) { + int access_y = max(min(y, height - 1), 0); + int access_x = max(min(x, width - 1), 0); + /* TODO: result here is truncated to scalar_t, + check: https://github.com/pytorch/pytorch/pull/19630#discussion_r281426912 + */ + gpuAtomicAddNoReturn( + &data[batch][channel][access_y][access_x], static_cast(value)); +} + +// Based on +// https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm +template +__device__ __forceinline__ static accscalar_t cubic_convolution1( + accscalar_t x, + accscalar_t A) { + return ((A + 2) * x - (A + 3)) * x * x + 1; +} + +template +__device__ __forceinline__ static accscalar_t cubic_convolution2( + accscalar_t x, + accscalar_t A) { + return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; +} + +template +__device__ __forceinline__ static void get_cubic_upsampling_coefficients( + accscalar_t coeffs[4], + accscalar_t t) { + accscalar_t A = -0.75; + + accscalar_t x1 = t; + coeffs[0] = cubic_convolution2(x1 + 1.0, A); + coeffs[1] = cubic_convolution1(x1, A); + + // opposite coefficients + accscalar_t x2 = 1.0 - t; + coeffs[2] = cubic_convolution1(x2, A); + coeffs[3] = cubic_convolution2(x2 + 1.0, A); +} + +template +__device__ __forceinline__ static accscalar_t cubic_interp1d( + scalar_t x0, + scalar_t x1, + scalar_t x2, + scalar_t x3, + accscalar_t t) { + accscalar_t coeffs[4]; + get_cubic_upsampling_coefficients(coeffs, t); + + return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; +} + +namespace upsample_antialias { + +// taken from +// https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/ +// src/libImaging/Resample.c#L20-L29 +struct BilinearFilterFunctor { + + template + __device__ accscalar_t operator()(accscalar_t x) const { + if (x < 0) { + x = -x; + } + if (x < 1) { + return 1 - x; + } + return 0; + } + + static const int size = 2; +}; + +// taken from +// https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/ +// src/libImaging/Resample.c#L46-L62 +struct BicubicFilterFunctor { + + template + __device__ accscalar_t operator()(accscalar_t x) const { + // https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm + const accscalar_t a = -0.5; + if (x < 0) { + x = -x; + } + if (x < 1) { + return ((a + 2) * x - (a + 3)) * x * x + 1; + } + if (x < 2) { + return (((x - 5) * x + 8) * x - 4) * a; + } + return 0; + } + + static const int size = 4; +}; + +template +__device__ __forceinline__ static void _compute_weights_span( + const int i, + const int input_size, + const accscalar_t scale, + const accscalar_t support, + int& xmin, + int& xsize, + accscalar_t& center) { + center = scale * (i + static_cast(0.5)); + xmin = max(static_cast(center - support + static_cast(0.5)), static_cast(0)); + xsize = min(static_cast(center + support + static_cast(0.5)), input_size) - xmin; +} + +template +__device__ __forceinline__ static void _compute_weights( + scalar_t* wt_ptr, + const accscalar_t scale, + int interp_size, + const interp_filter_t& interp_filter, + accscalar_t xmin_m_center, + int xsize) { + + accscalar_t invscale = (scale >= 1.0) ? 1.0 / scale : 1.0; + accscalar_t total_w = 0.0; + int j = 0; + for (j = 0; j < xsize; j++) { + accscalar_t w = interp_filter((j + xmin_m_center + static_cast(0.5)) * invscale); + wt_ptr[j] = static_cast(w); + total_w += w; + } + for (j = 0; j < xsize; j++) { + if (total_w != 0.0) { + wt_ptr[j] /= total_w; + } + } + for (; j < interp_size; j++) { + wt_ptr[j] = static_cast(0.0); + } +} + +template +__device__ __forceinline__ static accscalar_t interpolate_aa_single_dim( + const scalar_t* src, + const scalar_t* weights, + int size) { + scalar_t t = static_cast(*src); + scalar_t wts = static_cast(weights[0]); + accscalar_t output = t * wts; + + int j = 1; + for (; j < size; j++) { + wts = static_cast(weights[j]); + t = static_cast(*(src + j)); + output += t * wts; + } + return output; +} + +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cuda/block_reduce.cuh b/voice_bridge/torch/include/ATen/native/cuda/block_reduce.cuh new file mode 100644 index 0000000000000000000000000000000000000000..fa75c71f8acafdfa1afcb717bedd16c2befbf3ed --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/block_reduce.cuh @@ -0,0 +1,105 @@ +#pragma once + +#include + +#include +#include + +namespace at { +namespace native { +namespace cuda_utils { + +constexpr int kCUDABlockReduceNumThreads = 512; +// Algorithmic limitation: BlockReduce does two WarpReduce calls, each +// of which reduces C10_WARP_SIZE elements. So, at most +// C10_WARP_SIZE**2 elements can be reduced at a time. +// NOTE: This is >= the max block size on current hardware anyway (1024). +constexpr int kCUDABlockReduceMaxThreads = C10_WARP_SIZE * C10_WARP_SIZE; + +// Sums `val` accross all threads in a warp. +// +// Assumptions: +// - The size of each block should be a multiple of `C10_WARP_SIZE` +template +__inline__ __device__ T WarpReduceSum(T val) { +#pragma unroll + for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) { + val += WARP_SHFL_DOWN(val, offset); + } + return val; +} + +struct Block1D { + static __forceinline__ __device__ int Tid() { return threadIdx.x; } + + static __forceinline__ __device__ int Warps() { + return blockDim.x / C10_WARP_SIZE; + } +}; + +struct Block2D { + static __forceinline__ __device__ int Tid() { + return threadIdx.x + threadIdx.y * blockDim.x; + } + + static __forceinline__ __device__ int Warps() { + return blockDim.x * blockDim.y / C10_WARP_SIZE; + } +}; + +// Sums `val` across all threads in a block. +// +// Warning: the return value is only valid for thread 0. +// Assumptions: +// - The size of each block should be a multiple of `C10_WARP_SIZE` +// - `shared` should be a pointer to shared memory with size of, at least, +// `sizeof(T) * number_of_warps` +template +__inline__ __device__ T BlockReduceSum(T val, T* shared) { + const int tid = B::Tid(); + const int lid = tid % C10_WARP_SIZE; + const int wid = tid / C10_WARP_SIZE; + val = WarpReduceSum(val); + __syncthreads(); // prevent races when BlockReduces are called in a row. + if (lid == 0) { + shared[wid] = val; + } + __syncthreads(); + val = (tid < B::Warps()) ? shared[lid] : T(0); + if (wid == 0) { + val = WarpReduceSum(val); + } + return val; +} + +template +__inline__ __device__ T WarpReduce(T val, const ReduceOp& op) { +#pragma unroll + for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) { + val = op.combine(val, op.warp_shfl_down(val, offset)); + } + return val; +} + +template +__inline__ __device__ T +BlockReduce(T val, const ReduceOp& op, const T& identity_element, T* shared) { + const int tid = B::Tid(); + const int lid = tid % C10_WARP_SIZE; + const int wid = tid / C10_WARP_SIZE; + val = WarpReduce(val, op); + __syncthreads(); // prevent races when BlockReduces are called in a row. + if (lid == 0) { + shared[wid] = val; + } + __syncthreads(); + val = (tid < B::Warps()) ? shared[lid] : identity_element; + if (wid == 0) { + val = WarpReduce(val, op); + } + return val; +} + +} // namespace cuda_utils +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cuda/fused_adam_amsgrad_impl.cuh b/voice_bridge/torch/include/ATen/native/cuda/fused_adam_amsgrad_impl.cuh new file mode 100644 index 0000000000000000000000000000000000000000..46e893e564d906d83f5d9dd9db7945e85797b2b0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/fused_adam_amsgrad_impl.cuh @@ -0,0 +1,24 @@ +#pragma once +#include + +namespace at { namespace native { + +void _fused_adam_cuda_impl_( + at::TensorList params, + at::TensorList grads, + at::TensorList exp_avgs, + at::TensorList exp_avg_sqs, + at::TensorList max_exp_avg_sqs, + at::TensorList state_steps, + const double lr, + const double beta1, + const double beta2, + const double weight_decay, + const double eps, + const bool amsgrad, + const bool maximize, + const c10::optional& grad_scale, + const c10::optional& found_inf +); + +} } // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/fused_adam_impl.cuh b/voice_bridge/torch/include/ATen/native/cuda/fused_adam_impl.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a76ba566970ff938f0a0bf690b26f55c46bbbdd0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/fused_adam_impl.cuh @@ -0,0 +1,23 @@ +#pragma once +#include + +namespace at { namespace native { + +void _fused_adam_cuda_impl_( + at::TensorList params, + at::TensorList grads, + at::TensorList exp_avgs, + at::TensorList exp_avg_sqs, + at::TensorList state_steps, + const double lr, + const double beta1, + const double beta2, + const double weight_decay, + const double eps, + const bool amsgrad, + const bool maximize, + const c10::optional& grad_scale, + const c10::optional& found_inf +); + +} } // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/fused_adam_utils.cuh b/voice_bridge/torch/include/ATen/native/cuda/fused_adam_utils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..8d7c410915c119d143cdf288e82671a708d9acb6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/fused_adam_utils.cuh @@ -0,0 +1,166 @@ +#pragma once +#include +#include +#include +#include + + +namespace at { namespace native { + +namespace { + +constexpr uint8_t kParamIdx = 0; +constexpr uint8_t kGradIdx = 1; +constexpr uint8_t kExpAvgIdx = 2; +constexpr uint8_t kExpAvgSqIdx = 3; +constexpr uint8_t kMaxExpAvgSqIdx = 4; + +template +C10_DEVICE __forceinline__ void adam_math( + scalar_type r_args[depth][kILP], + const float* step_count, + const double lr, + const double beta1, + const double beta2, + const double weight_decay, + const double eps, + const bool maximize, + const bool amsgrad, + const float* grad_scale_ptr, + const float* found_inf_ptr +) { +#pragma unroll + for (int ii = 0; ii < kILP; ii++) { + // Load values. + opmath_t param = static_cast(r_args[kParamIdx][ii]); + opmath_t grad = static_cast(r_args[kGradIdx][ii]); + if (grad_scale_ptr) { + grad /= (static_cast(*grad_scale_ptr)); + } + const opmath_t grad_to_store = grad; + if (maximize) { + grad = -grad; + } + opmath_t exp_avg = static_cast(r_args[kExpAvgIdx][ii]); + opmath_t exp_avg_sq = static_cast(r_args[kExpAvgSqIdx][ii]); + opmath_t max_exp_avg_sq; + if (amsgrad) { + max_exp_avg_sq = static_cast(r_args[kMaxExpAvgSqIdx][ii]); + } + + // Update param, grad, 1st and 2nd order momentum. + if (weight_decay != 0) { + grad += param * weight_decay; + } + // todo(crcrpar): use lerp + // ref: https://developer.nvidia.com/blog/lerp-faster-cuda/ + exp_avg = beta1 * exp_avg + (1 - beta1) * grad; + exp_avg_sq = beta2 * exp_avg_sq + (1 - beta2) * grad * grad; + + if (amsgrad) { + max_exp_avg_sq = std::max(max_exp_avg_sq, exp_avg_sq); + } + + const opmath_t bias_correction1 = 1 - at::native::pow_(beta1, *step_count); + const opmath_t bias_correction2 = 1 - at::native::pow_(beta2, *step_count); + + const opmath_t step_size = lr / bias_correction1; + + const opmath_t bias_correction2_sqrt = std::sqrt(bias_correction2); + + opmath_t denom; + if (amsgrad) { + denom = (std::sqrt(max_exp_avg_sq) / bias_correction2_sqrt) + eps; + } else { + denom = (std::sqrt(exp_avg_sq) / bias_correction2_sqrt) + eps; + } + + param -= step_size * exp_avg / denom; + + // Store results. + r_args[kParamIdx][ii] = param; + if (grad_scale_ptr) { + r_args[kGradIdx][ii] = grad_to_store; + } + r_args[kExpAvgIdx][ii] = exp_avg; + r_args[kExpAvgSqIdx][ii] = exp_avg_sq; + if (amsgrad) { + r_args[kMaxExpAvgSqIdx][ii] = max_exp_avg_sq; + } + } +} + +// [note: Conditional Gradient Store when `optimizer.step` is called by GradScaler] +// When a user is training their model(s) with an FP16 AMP recipe, +// parameter updates are done via `grad_scaler.step(optimizer)` instead of `optimizer.step()`. +// For most optimizers, GradScaler unscales gradients on behalf of those optimizers. +// Also, before `.step`, it makes sure that all the gradients involved are finite, which incurs a device sync. +// On the other hand, fused optimizers set their member variable of `_step_supports_amp_scaling` to `True` +// in order to remove the device sync above. This means that fused optimizers have to have +// their CUDA kernels (a) unscale gradients and (b) skip parameter updates accordingly. +// To be functionally on par with `torch.optim` optimizers and `_multi_tensor` ones, +// the kernel below writes out gradients only when `grad_scale_ptr != nullptr. +template +struct FusedAdamMathFunctor { + static_assert(depth == 4 || depth == 5, "depth of 4 for Adam, depth of 5 for Adam with AMSGrad."); + using opmath_t = at::opmath_type; + C10_DEVICE __forceinline__ void operator()( + int chunk_size, + FusedOptimizerTensorListMetadata& tl, + const double lr, + const double beta1, + const double beta2, + const double weight_decay, + const double eps, + const bool maximize, + const bool amsgrad, + const float* grad_scale_ptr, + const float* found_inf_ptr + ) { + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.numel_for_tensor[tensor_loc]; + + if (found_inf_ptr && *found_inf_ptr == 1) { + return; + } + float *step_count = reinterpret_cast(tl.state_steps_addresses[tensor_loc]); + + scalar_type* args[depth]; + const bool all_aligned{init_args(args, tl, chunk_idx, chunk_size, tensor_loc)}; + n -= chunk_idx * chunk_size; + scalar_type r_args[depth][kILP]; + + if ((n % kILP == 0) && (chunk_size % kILP == 0) && all_aligned) { + for (int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) { +#pragma unroll + for (int i = 0; i < depth; i++) { + load_store(r_args[i], args[i], 0, i_start); + } + adam_math( + r_args, step_count, lr, beta1, beta2, weight_decay, eps, maximize, amsgrad, grad_scale_ptr, found_inf_ptr); +#pragma unroll + for (int i = 0; i < depth; i++) { + if (i != kGradIdx || grad_scale_ptr) { + load_store(args[i], r_args[i], i_start, 0); + } + } + } + } else { + for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) { + load_args(r_args, args, i_start, chunk_size, n); + adam_math( + r_args, step_count, lr, beta1, beta2, weight_decay, eps, maximize, amsgrad, grad_scale_ptr, found_inf_ptr); +#pragma unroll + for (int i = 0; i < depth; i++) { + if (i != kGradIdx || grad_scale_ptr) { + store_args(args[i], r_args[i], i_start, chunk_size, n); + } + } + } + } + } +}; +} // namespace + +}} // namespace at::native diff --git a/voice_bridge/torch/include/ATen/native/cuda/im2col.cuh b/voice_bridge/torch/include/ATen/native/cuda/im2col.cuh new file mode 100644 index 0000000000000000000000000000000000000000..06eef13208c67e88924dea3030ba732aa0671da0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/im2col.cuh @@ -0,0 +1,345 @@ +#pragma once + +#include +#include +#include + +#include + +namespace at { +namespace native { + +using namespace at::cuda::detail; + +// Kernel for fast unfold+copy +// (borrowed from Caffe: +// https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu) +// CUDA_NUM_THREADS = 1024 + +template +C10_LAUNCH_BOUNDS_1(1024) +__global__ void im2col_kernel( + const int64_t n, + const dt* data_im, + const int64_t height, + const int64_t width, + const int64_t kernel_height, + const int64_t kernel_width, + const int64_t pad_height, + const int64_t pad_width, + const int64_t stride_height, + const int64_t stride_width, + const int64_t dilation_height, + const int64_t dilation_width, + const int64_t height_col, + const int64_t width_col, + dt* data_col) { + CUDA_KERNEL_LOOP(index, n) { + int64_t w_out = index % width_col; + + int64_t idx = index / width_col; + + int64_t h_out = idx % height_col; + int64_t channel_in = idx / height_col; + int64_t channel_out = channel_in * kernel_height * kernel_width; + int64_t h_in = h_out * stride_height - pad_height; + int64_t w_in = w_out * stride_width - pad_width; + + dt* col = data_col + (channel_out * height_col + h_out) * width_col + w_out; + const dt* im = data_im + (channel_in * height + h_in) * width + w_in; + + for (int64_t i = 0; i < kernel_height; ++i) { + for (int64_t j = 0; j < kernel_width; ++j) { + int64_t h = h_in + i * dilation_height; + int64_t w = w_in + j * dilation_width; + *col = (h >= 0 && w >= 0 && h < height && w < width) + ? im[i * dilation_height * width + j * dilation_width] + : static_cast
(0); + col += height_col * width_col; + } + } + } +} + +template +void im2col( + cudaStream_t stream, + const dt* data_im, + const int64_t channels, + const int64_t height, + const int64_t width, + const int64_t height_col, + const int64_t width_col, + const int64_t kernel_height, + const int64_t kernel_width, + const int64_t pad_height, + const int64_t pad_width, + const int64_t stride_height, + const int64_t stride_width, + const int64_t dilation_height, + const int64_t dilation_width, + dt* data_col) { + // We are going to launch channels * height_col * width_col kernels, each + // kernel responsible for copying a single-channel grid. + int64_t num_kernels = channels * height_col * width_col; + // Launch CUDA_NUM_THREADS = 1024 + im2col_kernel<<>>( + num_kernels, + data_im, + height, + width, + kernel_height, + kernel_width, + pad_height, + pad_width, + stride_height, + stride_width, + dilation_height, + dilation_width, + height_col, + width_col, + data_col); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +__forceinline__ __device__ void col2im_device( + const int64_t index, + const dt* data_col, + const int64_t height, + const int64_t width, + const int64_t channels, + const int64_t kernel_h, + const int64_t kernel_w, + const int64_t pad_height, + const int64_t pad_width, + const int64_t stride_height, + const int64_t stride_width, + const int64_t dilation_height, + const int64_t dilation_width, + const int64_t height_col, + const int64_t width_col, + dt* data_im) { + accT val = static_cast(0); + const int64_t w_im = index % width + pad_width; + const int64_t h_im = (index / width) % height + pad_height; + const int64_t c_im = index / (width * height); + int64_t kernel_extent_w = (kernel_w - 1) * dilation_width + 1; + int64_t kernel_extent_h = (kernel_h - 1) * dilation_height + 1; + // compute the start and end of the output + const int64_t w_col_start = (w_im < kernel_extent_w) + ? 0 + : (w_im - kernel_extent_w) / stride_width + 1; + const int64_t w_col_end = ::min(w_im / stride_width + 1, width_col); + const int64_t h_col_start = (h_im < kernel_extent_h) + ? 0 + : (h_im - kernel_extent_h) / stride_height + 1; + const int64_t h_col_end = ::min(h_im / stride_height + 1, height_col); + + // TODO: use LCM of stride and dilation to avoid unnecessary loops + for (int64_t h_col = h_col_start; h_col < h_col_end; h_col += 1) { + for (int64_t w_col = w_col_start; w_col < w_col_end; w_col += 1) { + int64_t h_k = (h_im - h_col * stride_height); + int64_t w_k = (w_im - w_col * stride_width); + if (h_k % dilation_height == 0 && w_k % dilation_width == 0) { + h_k /= dilation_height; + w_k /= dilation_width; + int64_t data_col_index = + (((c_im * kernel_h + h_k) * kernel_w + w_k) * height_col + + h_col) * + width_col + + w_col; + val += data_col[data_col_index]; + } + } + } + data_im[index] = static_cast
(val); +} + +template +C10_LAUNCH_BOUNDS_1(512) +__global__ void col2im_kernel( + const int64_t n, + const dt* data_col, + const int64_t height, + const int64_t width, + const int64_t channels, + const int64_t kernel_h, + const int64_t kernel_w, + const int64_t pad_height, + const int64_t pad_width, + const int64_t stride_height, + const int64_t stride_width, + const int64_t dilation_height, + const int64_t dilation_width, + const int64_t height_col, + const int64_t width_col, + dt* data_im) { + CUDA_KERNEL_LOOP(index, n) { + col2im_device( + index, + data_col, + height, + width, + channels, + kernel_h, + kernel_w, + pad_height, + pad_width, + stride_height, + stride_width, + dilation_height, + dilation_width, + height_col, + width_col, + data_im); + } +} + +template +void col2im( + cudaStream_t stream, + const dt* data_col, + const int64_t channels, + const int64_t height, + const int64_t width, + const int64_t height_col, + const int64_t width_col, + const int64_t patch_height, + const int64_t patch_width, + const int64_t pad_height, + const int64_t pad_width, + const int64_t stride_height, + const int64_t stride_width, + const int64_t dilation_height, + const int64_t dilation_width, + dt* data_im) { + int64_t num_kernels = channels * height * width; + // To avoid involving atomic operations, we will launch one kernel per + // bottom dimension, and then in the kernel add up the top dimensions. + // CUDA_NUM_THREADS = 1024 + col2im_kernel + <<>>( + num_kernels, + data_col, + height, + width, + channels, + patch_height, + patch_width, + pad_height, + pad_width, + stride_height, + stride_width, + dilation_height, + dilation_width, + height_col, + width_col, + data_im); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +C10_LAUNCH_BOUNDS_1(512) +__global__ void col2im_batched_kernel( + const int64_t n, + const dt* data_col, + const int64_t col_batch_stride, + const int64_t nbatch, + const int64_t height, + const int64_t width, + const int64_t channels, + const int64_t kernel_h, + const int64_t kernel_w, + const int64_t pad_height, + const int64_t pad_width, + const int64_t stride_height, + const int64_t stride_width, + const int64_t dilation_height, + const int64_t dilation_width, + const int64_t height_col, + const int64_t width_col, + dt* data_im, + const int64_t im_batch_stride) { + using accT = at::acc_type; + const auto im_numel = n * nbatch; + + CUDA_KERNEL_LOOP_TYPE(index, im_numel, int64_t) { + const auto ibatch = index / n; + const auto slice_index = index % n; + + col2im_device( + slice_index, + data_col + ibatch * col_batch_stride, + height, + width, + channels, + kernel_h, + kernel_w, + pad_height, + pad_width, + stride_height, + stride_width, + dilation_height, + dilation_width, + height_col, + width_col, + data_im + ibatch * im_batch_stride); + } +} + +template +void col2im_batched( + cudaStream_t stream, + const dt* data_col, + const int64_t col_batch_stride, + const int64_t nbatch, + const int64_t channels, + const int64_t height, + const int64_t width, + const int64_t height_col, + const int64_t width_col, + const int64_t patch_height, + const int64_t patch_width, + const int64_t pad_height, + const int64_t pad_width, + const int64_t stride_height, + const int64_t stride_width, + const int64_t dilation_height, + const int64_t dilation_width, + dt* data_im, + const int64_t im_batch_stride) { + const int64_t num_kernels = channels * height * width; + const int64_t output_numel = nbatch * num_kernels; + if (output_numel == 0) { + return; // No work to do + } + + // To avoid involving atomic operations, we will launch one kernel per + // bottom dimension, and then in the kernel add up the top dimensions. + // CUDA_NUM_THREADS = 1024 + col2im_batched_kernel<<>>( + num_kernels, + data_col, + col_batch_stride, + nbatch, + height, + width, + channels, + patch_height, + patch_width, + pad_height, + pad_width, + stride_height, + stride_width, + dilation_height, + dilation_width, + height_col, + width_col, + data_im, + im_batch_stride); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/cuda/jit_utils.h b/voice_bridge/torch/include/ATen/native/cuda/jit_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..8206f67316e113cc2b8ba568a300ea4599379cc8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/jit_utils.h @@ -0,0 +1,201 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace at { namespace cuda { namespace jit { + +enum class BinaryFuncVariant {NoScalar, RhsScalar, LhsScalar}; + +struct NvrtcFunction { + CUmodule module = CUmodule(); + CUfunction function = nullptr; +}; + +struct KernelDescriptor { + std::string name; + std::string f; + c10::ScalarType f_inputs_type; + c10::ScalarType result_type; + c10::SmallVector extra_args_types; + int nInputs, nOutputs; +}; + +// Helper function to return a vector +// corresponding to the type of the arguments in parameter pack. +template +c10::SmallVector get_extra_args_types() { + return {c10::CppTypeToScalarType::value ...}; +} + +template < + typename result_type, + typename f_inputs_type, + typename... ExtraArgs> +KernelDescriptor make_kernel_descriptor( + std::string name, + std::string f, + int nInputs, + int nOutputs) { + KernelDescriptor ret; + ret.name = std::move(name); + ret.f = std::move(f); + ret.f_inputs_type = c10::CppTypeToScalarType::value; + ret.result_type = c10::CppTypeToScalarType::value; + ret.extra_args_types = get_extra_args_types(); + ret.nInputs = nInputs; + ret.nOutputs = nOutputs; + return ret; +} + +inline int can_vectorize_up_to(size_t default_alignment, void *pointer) { + auto ip = reinterpret_cast(pointer); + if (ip % (4 * default_alignment) == 0) { + return 4; + } + if (ip % (2 * default_alignment) == 0) { + return 2; + } + return 1; +} + +inline int can_vectorize_up_to(const KernelDescriptor &desc, c10::ArrayRef pointers) { + TORCH_INTERNAL_ASSERT(desc.nOutputs == 1); + TORCH_INTERNAL_ASSERT(static_cast(pointers.size()) == 1 + desc.nInputs); + + // Deals with output + auto result_size = c10::scalarTypeToTypeMeta(desc.result_type).itemsize(); + int result = can_vectorize_up_to(result_size, pointers[0]); + + // Incorporates input(s) + auto input_size = c10::scalarTypeToTypeMeta(desc.f_inputs_type).itemsize(); + for (auto i : c10::irange(1, pointers.size())) { + result = std::min(result, can_vectorize_up_to(input_size, pointers[i])); + } + + return result; +} + +std::string generate_code( + int nInputs, + int nOutputs, + const std::string& func, + const std::string& name, + const std::string& f_input_type, + const std::string& compute_type, + const std::string& result_type, + bool contiguous, + bool dynamic_casting, + BinaryFuncVariant scalar_pos, + c10::SmallVector& extra_args_typenames, + bool vectorized=false, + int vec_size=0, + bool return_by_ref=false); + +std::string generate_code( + const KernelDescriptor &desc, + bool contiguous, + bool dynamic_casting, + BinaryFuncVariant scalar_pos, + bool vectorized=false, + int vec_size=0, + bool return_by_ref=false); + +std::string generate_reduction_code( + int nOutputs, + const std::string& func, + const std::string& name, + const int vt0, + const std::string& f_inputs_type, + const std::string& reduction_accum_type, + const std::string& result_type, + bool contiguous, + bool vectorized, + int vec_size, + int max_threads_codegen); + +std::string generate_reduction_code( + const KernelDescriptor &desc, + const int vt0, + bool contiguous, + bool vectorized, + int vec_size, + int max_threads_codegen); + +NvrtcFunction jit_pwise_function( + const std::string& code, + const std::string& kernel_name); + +void launch_jitted_pwise_function( + NvrtcFunction function, + void* args[], + const dim3 nBlocks, + const dim3 kBlockSize, + const int smem=0); + +template +struct delayed_false : std::false_type { +}; + +// Defines type names +// NOTE: General case is instantiated only for invalid types. +// All the valid types have specialization using the TYPE_NAME_FN +// macro below. +template +inline std::string typeName() { + // we can't use static_assert(false) directly as the + // program will be not compiled even if the template is not + // instantiated, so we use `delayed_false` + // to make sure compiler doesn't eagerly raise + // fail this assertion. + static_assert(delayed_false::value, "invalid type for jiterator"); + return "void"; +} + +#define TYPE_NAME_FN(ctype, name) \ +template <> inline std::string typeName(){ \ + return std::string(#ctype); \ +} + +AT_FORALL_SCALAR_TYPES(TYPE_NAME_FN) +#undef TYPE_NAME_FN +// JIT uses std::complex directly, because nvRTC compile programs +// with -default-device, so there is no such issue like: +// "std::sin(complex) is __host__ only" +template <> inline std::string typeName(){ + return "bool"; +} +template <> inline std::string typeName>(){ + return "std::complex"; +} +template <> inline std::string typeName>(){ + return "std::complex"; +} +template <> inline std::string typeName>(){ + return "std::complex"; +} +template <> inline std::string typeName(){ + return "at::Half"; +} +template <> inline std::string typeName(){ + return "at::BFloat16"; +} + +#define TYPE_NAME_CASE(ctype, scalartype) \ + case ScalarType::scalartype: return typeName(); +inline std::string typeName(ScalarType t) { + switch (t) { + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(TYPE_NAME_CASE) + default: + TORCH_CHECK(false, "invalid type for jiterator"); + } +} +#undef TYPE_NAME_CASE + +}}} // namespace at::cuda::jit diff --git a/voice_bridge/torch/include/ATen/native/cuda/reduction_template.cuh b/voice_bridge/torch/include/ATen/native/cuda/reduction_template.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a38edb538256d6102a4a611e6c9582ddae1901d2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/reduction_template.cuh @@ -0,0 +1,680 @@ +namespace at { +namespace cuda { +//windows doesn't like large string literals, so split in two +const std::string reduction_template_0 = R"ESCAPE( + #define C10_HOST_DEVICE __host__ __device__ + #define C10_DEVICE __device__ + #if defined(__clang__) && defined(__HIP__) + #ifndef __forceinline__ + #define __forceinline__ inline __attribute__((always_inline)) + #endif + // until ROCm support for kernel asserts is restored + #define assert(expr) (static_cast(0)) + #endif + + template + __device__ __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) + { + #if defined(__clang__) && defined(__HIP__) + return __shfl_down(value, delta, width); + #else + return __shfl_down_sync(mask, value, delta, width); + #endif + } + + + #if ${complex} + template + __device__ __forceinline__ std::complex WARP_SHFL_DOWN(std::complex value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) + { + return std::complex( + #if defined(__clang__) && defined(__HIP__) + __shfl_down(value.real(), delta, width), + __shfl_down(value.imag(), delta, width)); + #else + __shfl_down_sync(mask, value.real(), delta, width), + __shfl_down_sync(mask, value.imag(), delta, width)); + #endif + } + #endif + + // aligned vector generates vectorized load/store on CUDA + template + struct alignas(sizeof(scalar_t) * vec_size) aligned_vector { + scalar_t val[vec_size]; + }; + + + C10_HOST_DEVICE static void reduce_fraction(size_t &numerator, size_t &denominator) { + // get GCD of num and denom using Euclid's algorithm. + // Can replace this with std::gcd if we ever support c++17. + size_t a = denominator; + size_t b = numerator; + while (b != 0) { + a %= b; + // swap(a,b) + size_t tmp = a; + a = b; + b = tmp; + } + + // a is now the GCD + numerator /= a; + denominator /= a; + } + + + + + struct ReduceConfig { + //has to match host-side ReduceConfig in the eager code + static constexpr int BLOCK_X = 0; + static constexpr int BLOCK_Y = 1; + static constexpr int CTA = 2; + + static constexpr int input_vec_size = 4; + int element_size_bytes; + int num_inputs; + int num_outputs; + int step_input = 1; + int step_output = 1; + int ctas_per_output = 1; + int input_mult[3] = {0, 0, 0}; + int output_mult[2] = {0, 0}; + + int block_width; + int block_height; + int num_threads; + + bool vectorize_input = false; + int output_vec_size = 1; + + C10_HOST_DEVICE bool should_block_x_reduce() const { + return input_mult[BLOCK_X] != 0; + } + + C10_HOST_DEVICE bool should_block_y_reduce() const { + return input_mult[BLOCK_Y] != 0; + } + + C10_HOST_DEVICE bool should_global_reduce() const { + return input_mult[CTA] != 0; + } + + C10_DEVICE bool should_store(int output_idx) const { + return output_idx < num_outputs && + (!should_block_x_reduce() || threadIdx.x == 0) && + (!should_block_y_reduce() || threadIdx.y == 0); + } + + C10_DEVICE bool should_reduce_tail() const { + return (!should_block_y_reduce() || threadIdx.y == 0) && + (!should_global_reduce() || blockIdx.y == 0); + } + + C10_HOST_DEVICE int input_idx() const { + int lane = threadIdx.x; + int warp = threadIdx.y; + int cta2 = blockIdx.y; + return (lane * input_mult[BLOCK_X] + + warp * input_mult[BLOCK_Y] + + cta2 * input_mult[CTA]); + } + + template + C10_HOST_DEVICE int output_idx() const { + int lane = threadIdx.x; + int warp = threadIdx.y; + int cta1 = blockIdx.x; + return (lane * output_mult[BLOCK_X] + + warp * output_mult[BLOCK_Y] + + cta1 * step_output) * output_vec_size; + } + + C10_DEVICE int shared_memory_offset(int offset) const { + return threadIdx.x + (threadIdx.y + offset) * blockDim.x; + } + + C10_DEVICE int staging_memory_offset(int cta2) const { + int offset = cta2 + blockIdx.x * gridDim.y; + if (!should_block_x_reduce()) { + offset = threadIdx.x + offset * blockDim.x; + } + return offset; + } + + + }; + + +//TODO this will need to be different for more generic reduction functions +namespace reducer { + + using scalar_t = ${scalar_type}; + using arg_t = ${reduction_accum_type}; + using out_scalar_t = ${result_type}; + + + inline __device__ ${functor} + + inline __device__ out_scalar_t project(arg_t arg) { + return (out_scalar_t) arg; + } + + inline __device__ arg_t warp_shfl_down(arg_t arg, int offset) { + return WARP_SHFL_DOWN(arg, offset); + } + + inline __device__ arg_t translate_idx(arg_t acc, int64_t /*idx*/) { + return acc; + } + + // wrap a normal reduction that ignores the index + inline __device__ arg_t reduce(arg_t acc, arg_t val, int64_t idx) { + return combine(acc, val); + } +} + + +struct ReduceJitOp { + using scalar_t = ${scalar_type}; + using arg_t = ${reduction_accum_type}; + using out_scalar_t = ${result_type}; + + using InputCalculator = OffsetCalculator<1>; + using OutputCalculator = OffsetCalculator<2>; + +// static constexpr bool can_accumulate_in_output = +// std::is_convertible::value +// && std::is_convertible::value; + + static constexpr int input_vec_size = ReduceConfig::input_vec_size; + + arg_t ident; + ReduceConfig config; + InputCalculator input_calc; + OutputCalculator output_calc; + const void* src; + const char* dst[2]; //it accepts at most two destinations + // acc_buf used for accumulation among sub Tensor Iterator when accumulation on + // output is not permissible + void* acc_buf; + // cta_buf used for accumulation between blocks during global reduction + void* cta_buf; + int* semaphores; + int64_t base_idx; + bool accumulate; + bool final_output; + int noutputs; + + + C10_DEVICE void run() const { + extern __shared__ char shared_memory[]; + uint32_t output_idx = config.output_idx<${output_vec_size}>(); + uint32_t input_idx = config.input_idx(); + auto base_offsets1 = output_calc.get(output_idx)[1]; + + using arg_vec_t = Array; + arg_vec_t value; + + if (output_idx < config.num_outputs && input_idx < config.num_inputs) { + const scalar_t* input_slice = (const scalar_t*)((const char*)src + base_offsets1); + + value = thread_reduce<${output_vec_size}>(input_slice); + } + + if (config.should_block_y_reduce()) { + value = block_y_reduce<${output_vec_size}>(value, shared_memory); + } + if (config.should_block_x_reduce()) { + value = block_x_reduce<${output_vec_size}>(value, shared_memory); + } + + using out_ptr_vec_t = Array; + using offset_vec_t = Array; + offset_vec_t base_offsets; + out_ptr_vec_t out; + + #pragma unroll + for (int i = 0; i < ${output_vec_size}; i++) { + base_offsets[i] = output_calc.get(output_idx + i)[0]; + out[i] = (out_scalar_t*)((char*)dst[0] + base_offsets[i]); + } + + arg_vec_t* acc = nullptr; + if (acc_buf != nullptr) { + size_t numerator = sizeof(arg_t); + size_t denominator = sizeof(out_scalar_t); + reduce_fraction(numerator, denominator); + acc = (arg_vec_t*)((char*)acc_buf + (base_offsets[0] * numerator / denominator)); + } + + if (config.should_global_reduce()) { + value = global_reduce<${output_vec_size}>(value, acc, shared_memory); + } else if (config.should_store(output_idx)) { + if (accumulate) { + #pragma unroll + for (int i = 0; i < ${output_vec_size}; i++) { + value[i] = reducer::translate_idx(value[i], base_idx); + } + } + + if (acc == nullptr) { + if (accumulate) { + value = accumulate_in_output<${output_vec_size}>(out, value); + } + if (final_output) { + set_results_to_output<${output_vec_size}>(value, base_offsets); + } else { + #pragma unroll + for (int i = 0; i < ${output_vec_size}; i++) { + *(out[i]) = get_accumulated_output(out[i], value[i]); + } + } + } else { + if (accumulate) { + #pragma unroll + for (int i = 0; i < ${output_vec_size}; i++) { + value[i] = reducer::combine((*acc)[i], value[i]); + } + } + if (final_output) { + set_results_to_output<${output_vec_size}>(value, base_offsets); + } else { + *acc = value; + } + } + } + } + + template + C10_DEVICE Array thread_reduce(const scalar_t* data) const { + if (config.vectorize_input) { + assert(output_vec_size == 1); + // reduce at the header of input_slice where memory is not aligned, + // so that thread_reduce will have an aligned memory to work on. + return {input_vectorized_thread_reduce_impl(data)}; + } else { + uint32_t element_stride = input_calc.strides_[0][0] / sizeof(scalar_t); + bool is_contiguous = (input_calc.dims == 1 && element_stride == 1); + if (is_contiguous) { + return thread_reduce_impl(data, [](uint32_t idx) { return idx; }); + } else if (input_calc.dims == 1) { + return thread_reduce_impl(data, [&](uint32_t idx) { return idx * element_stride; }); + } else { + return thread_reduce_impl(data, [&](uint32_t idx) { return input_calc.get(idx)[0] / sizeof(scalar_t); }); + } + } + } + + C10_DEVICE arg_t input_vectorized_thread_reduce_impl(const scalar_t* data) const { + uint32_t end = config.num_inputs; + + // Handle the head of input slice where data is not aligned + arg_t value = ident; + constexpr int align_bytes = alignof(aligned_vector); + constexpr int align_elements = align_bytes / sizeof(scalar_t); + int shift = ((int64_t)data) % align_bytes / sizeof(scalar_t); + if (shift > 0) { + data -= shift; + end += shift; + if(threadIdx.x >= shift && threadIdx.x < align_elements && config.should_reduce_tail()){ + value = reducer::reduce(value, data[threadIdx.x], threadIdx.x - shift); + } + end -= align_elements; + data += align_elements; + shift = align_elements - shift; + } + + // Do the vectorized reduction + using load_t = aligned_vector; + + uint32_t idx = config.input_idx(); + const uint32_t stride = config.step_input; + + // Multiple accumulators to remove dependency between unrolled loops. + arg_t value_list[input_vec_size]; + value_list[0] = value; + + #pragma unroll + for (int i = 1; i < input_vec_size; i++) { + value_list[i] = ident; + } + + scalar_t values[input_vec_size]; + + load_t *values_vector = reinterpret_cast(&values[0]); + + while (idx * input_vec_size + input_vec_size - 1 < end) { + *values_vector = reinterpret_cast(data)[idx]; + #pragma unroll + for (uint32_t i = 0; i < input_vec_size; i++) { + value_list[i] = reducer::reduce(value_list[i], values[i], shift + idx * input_vec_size + i); + } + idx += stride; + } + + // tail + uint32_t tail_start = end - end % input_vec_size; + if (config.should_reduce_tail()) { + int idx = tail_start + threadIdx.x; + if (idx < end) { + value_list[0] = reducer::reduce(value_list[0], data[idx], idx + shift); + } + } + + // combine accumulators + #pragma unroll + for (int i = 1; i < input_vec_size; i++) { + value_list[0] = reducer::combine(value_list[0], value_list[i]); + } + return value_list[0]; + } + + template + C10_DEVICE Array thread_reduce_impl(const scalar_t* data_, offset_calc_t calc) const { + uint32_t idx = config.input_idx(); + const uint32_t end = config.num_inputs; + const uint32_t stride = config.step_input; + const int vt0=${vt0}; + + using arg_vec_t = Array; + using load_t = aligned_vector; + const load_t* data = reinterpret_cast(data_); + + // Multiple accumulators to remove dependency between unrolled loops. + arg_vec_t value_list[vt0]; + + #pragma unroll + for (int i = 0; i < vt0; i++) { + #pragma unroll + for (int j = 0; j < output_vec_size; j++) { + value_list[i][j] = ident; + } + } + + load_t values[vt0]; + + while (idx + (vt0 - 1) * stride < end) { + #pragma unroll + for (uint32_t i = 0; i < vt0; i++) { + values[i] = data[calc(idx + i * stride) / output_vec_size]; + } + #pragma unroll + for (uint32_t i = 0; i < vt0; i++) { + #pragma unroll + for (uint32_t j = 0; j < output_vec_size; j++) { + value_list[i][j] = reducer::reduce(value_list[i][j], values[i].val[j], idx + i * stride); + } + } + idx += stride * vt0; + } + + // tail + int idx_ = idx; + #pragma unroll + for (uint32_t i = 0; i < vt0; i++) { + if (idx >= end) { + break; + } + values[i] = data[calc(idx) / output_vec_size]; + idx += stride; + } + idx = idx_; + #pragma unroll + for (uint32_t i = 0; i < vt0; i++) { + if (idx >= end) { + break; + } + #pragma unroll + for (uint32_t j = 0; j < output_vec_size; j++) { + value_list[i][j] = reducer::reduce(value_list[i][j], values[i].val[j], idx); + } + idx += stride; + } + + // combine accumulators + #pragma unroll + for (int i = 1; i < vt0; i++) { + #pragma unroll + for (uint32_t j = 0; j < output_vec_size; j++) { + value_list[0][j] = reducer::combine(value_list[0][j], value_list[i][j]); + } + } + return value_list[0]; + } + template + C10_DEVICE Array block_x_reduce(Array value, char* shared_memory) const { + using args_vec_t = Array; + int dim_x = blockDim.x; + args_vec_t* shared = (args_vec_t*)shared_memory; + if (dim_x > warpSize) { + int address_base = threadIdx.x + threadIdx.y*blockDim.x; + shared[address_base] = value; + for (int offset = dim_x/2; offset >= warpSize; offset >>= 1) { + __syncthreads(); + if (threadIdx.x < offset && threadIdx.x + offset < blockDim.x) { + args_vec_t other = shared[address_base + offset]; + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = reducer::combine(value[i], other[i]); + } + shared[address_base] = value; + } + } + dim_x = warpSize; + } + + __syncthreads(); + + for (int offset = 1; offset < dim_x; offset <<= 1) { + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + arg_t other = reducer::warp_shfl_down(value[i], offset); + value[i] = reducer::combine(value[i], other); + } + } + return value; + } + + template + C10_DEVICE Array block_y_reduce(Array value, char* shared_memory) const { + using args_vec_t = Array; + args_vec_t* shared = (args_vec_t*)shared_memory; + shared[config.shared_memory_offset(0)] = value; + for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) { + __syncthreads(); + if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) { + args_vec_t other = shared[config.shared_memory_offset(offset)]; + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = reducer::combine(value[i], other[i]); + } + shared[config.shared_memory_offset(0)] = value; + } + } + return value; + } + )ESCAPE"; + + const std::string reduction_template_1 = R"ESCAPE( + + C10_DEVICE bool mark_block_finished() const { + __shared__ bool is_last_block_done_shared; + + __syncthreads(); + if (threadIdx.x == 0 && threadIdx.y == 0) { + int prev_blocks_finished = atomicAdd(&semaphores[blockIdx.x], 1); + is_last_block_done_shared = (prev_blocks_finished == gridDim.y - 1); + } + + __syncthreads(); + + return is_last_block_done_shared; + } + + template + C10_DEVICE Array accumulate_in_output( + Array out, + Array value + ) const { + Array ret; + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + ret[i] = reducer::combine(*(out[i]), value[i]); + } + return ret; + } + + + C10_DEVICE out_scalar_t get_accumulated_output( + out_scalar_t* out, arg_t value + ) const { + assert(!final_output); + return (out_scalar_t)value; + } + + template + C10_DEVICE void set_results(const T x, const uint32_t base_offset) const { + assert(noutputs == 1); + auto res = (out_scalar_t*)((char*)dst[0] + base_offset); + *res = x; + } + +//TODO - multi-output reduction - we won't be able to use thrust::pair +//just explicitly specify typed output reads/writes +//Currently implemented for max of two outputs +// template +// C10_DEVICE void set_results(const thrust::pair x, const index_t base_offset) const { +// if (noutputs >= 1) { +// auto res0 = (T1*)((char*)dst[0] + base_offset); +// *res0 = x.first; +// } +// if (noutputs >= 2) { +// // base offset is computed assuming element size being sizeof(T1), so we need to make a +// // correction to obtain the correct base offset +// auto res1 = (T2*) ((char *) dst[1] + base_offset / sizeof(T1) * sizeof(T2)); +// *res1 = x.second; +// } +// } + + template + C10_DEVICE void set_results_to_output(Array value, Array base_offset) const { + assert(final_output); + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + set_results(reducer::project(value[i]), base_offset[i]); + } + } + + template + C10_DEVICE Array global_reduce(Array value, Array *acc, char* shared_memory) const { + using arg_vec_t = Array; + using out_ptr_vec_t = Array; + using offset_vec_t = Array; + + arg_vec_t* reduce_buffer = (arg_vec_t*)cta_buf; + uint32_t output_idx = config.output_idx(); + offset_vec_t base_offsets; + out_ptr_vec_t out; + + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + base_offsets[i] = output_calc.get(output_idx + i)[0]; + out[i] = (out_scalar_t*)((char*)dst[0] + base_offsets[i]); + } + + bool should_store = config.should_store(output_idx); + if (should_store) { + uint32_t offset = config.staging_memory_offset(blockIdx.y); + reduce_buffer[offset] = value; + } + + __threadfence(); // make sure writes are globally visible + __syncthreads(); // if multiple warps in this block wrote to staging, make sure they're all done + bool is_last_block_done = mark_block_finished(); + + if (is_last_block_done) { + value = ident; + if (config.should_block_x_reduce()) { + uint32_t input_offset = threadIdx.x + threadIdx.y * blockDim.x; + uint32_t step = blockDim.x * blockDim.y; + for (; input_offset < config.ctas_per_output; input_offset += step) { + uint32_t idx = config.staging_memory_offset(input_offset); + arg_vec_t next = reduce_buffer[idx]; + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = reducer::combine(value[i], next[i]); + } + } + } else { + uint32_t input_offset = threadIdx.y; + uint32_t step = blockDim.y; + for (; input_offset < config.ctas_per_output; input_offset += step) { + uint32_t idx = config.staging_memory_offset(input_offset); + arg_vec_t next = reduce_buffer[idx]; + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = reducer::combine(value[i], next[i]); + } + } + } + value = block_y_reduce(value, shared_memory); + if (config.should_block_x_reduce()) { + value = block_x_reduce(value, shared_memory); + } + if (should_store) { + if (accumulate) { + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = reducer::translate_idx(value[i], base_idx); + } + } + + if (acc == nullptr) { + if (accumulate) { + value = accumulate_in_output(out, value); + } + if (final_output) { + set_results_to_output(value, base_offsets); + } else { + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + *(out[i]) = get_accumulated_output(out[i], value[i]); + } + } + } else { + if (accumulate) { + #pragma unroll + for (int i = 0; i < output_vec_size; i++) { + value[i] = reducer::combine((*acc)[i], value[i]); + } + } + if (final_output) { + set_results_to_output(value, base_offsets); + } else { + *acc = value; + } + } + } + } + + return value; + } +}; + +extern "C" +__launch_bounds__(${max_threads_lb}, 4) +__global__ void reduction_${name}_kernel(ReduceJitOp r){ + r.run(); +} +)ESCAPE"; + +const std::string reduction_template = reduction_template_0 + reduction_template_1; + + +const std::string &get_reduction_template() { + return reduction_template; +} + +}} diff --git a/voice_bridge/torch/include/ATen/native/cuda/thread_constants.h b/voice_bridge/torch/include/ATen/native/cuda/thread_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..651053d663e4c204753cdfa4ae31ed60fed34152 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/thread_constants.h @@ -0,0 +1,22 @@ +#pragma once +#include + +// Marks a lambda as executable on both the host and device. The __host__ +// attribute is important so that we can access static type information from +// the host, even if the function is typically only executed on the device. +#ifndef GPU_LAMBDA +#define GPU_LAMBDA __host__ __device__ +#endif + +#if defined(USE_ROCM) +constexpr int num_threads() { + return 256; +} +#else +constexpr uint32_t num_threads() { + return C10_WARP_SIZE * 4; +} +#endif + +constexpr int thread_work_size() { return 4; } +constexpr int block_work_size() { return thread_work_size() * num_threads(); } diff --git a/voice_bridge/torch/include/ATen/native/cuda/vol2col.cuh b/voice_bridge/torch/include/ATen/native/cuda/vol2col.cuh new file mode 100644 index 0000000000000000000000000000000000000000..7ab719bc819ebfbc741fd0f29020a85cdf45f001 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/cuda/vol2col.cuh @@ -0,0 +1,263 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace at { +namespace native { + +using namespace at::cuda::detail; + +// Kernel for fast unfold+copy on volumes +template +__global__ void vol2col_kernel( + const int n, + const T* data_vol, + const int depth, + const int height, + const int width, + const int ksize_t, + const int ksize_h, + const int ksize_w, + const int pad_t, + const int pad_h, + const int pad_w, + const int stride_t, + const int stride_h, + const int stride_w, + const int dilation_t, + const int dilation_h, + const int dilation_w, + const int depth_col, + const int height_col, + const int width_col, + T* data_col) { + CUDA_KERNEL_LOOP(index, n) { + int w_out = index % width_col; + index /= width_col; + int h_out = index % height_col; + index /= height_col; + int t_out = index % depth_col; + int channel_in = index / depth_col; + int channel_out = channel_in * ksize_t * ksize_h * ksize_w; + int t_in = t_out * stride_t - pad_t; + int h_in = h_out * stride_h - pad_h; + int w_in = w_out * stride_w - pad_w; + data_col += + ((channel_out * depth_col + t_out) * height_col + h_out) * width_col + + w_out; + data_vol += ((channel_in * depth + t_in) * height + h_in) * width + w_in; + for (int i = 0; i < ksize_t; ++i) { + for (int j = 0; j < ksize_h; ++j) { + for (int k = 0; k < ksize_w; ++k) { + int t = t_in + i * dilation_t; + int h = h_in + j * dilation_h; + int w = w_in + k * dilation_w; + *data_col = (t >= 0 && h >= 0 && w >= 0 && t < depth && h < height && + w < width) + ? data_vol + [i * dilation_t * height * width + j * dilation_h * width + + k * dilation_w] + : static_cast(0); + data_col += depth_col * height_col * width_col; + } + } + } + } +} + +template +void vol2col( + cudaStream_t stream, + const T* data_vol, + const int channels, + const int depth, + const int height, + const int width, + const int depth_col, + const int height_col, + const int width_col, + const int ksize_t, + const int ksize_h, + const int ksize_w, + const int pad_t, + const int pad_h, + const int pad_w, + const int stride_t, + const int stride_h, + const int stride_w, + const int dilation_t, + const int dilation_h, + const int dilation_w, + T* data_col) { + // We are going to launch channels * depth_col * height_col * width_col + // kernels, each kernel responsible for copying a single-channel grid. + // We cast an operand to int64 so that the product will not overflow + const auto num_kernels = static_cast(channels) * depth_col * height_col * width_col; + // Launch + vol2col_kernel<<>>( + num_kernels, + data_vol, + depth, + height, + width, + ksize_t, + ksize_h, + ksize_w, + pad_t, + pad_h, + pad_w, + stride_t, + stride_h, + stride_w, + dilation_t, + dilation_h, + dilation_w, + depth_col, + height_col, + width_col, + data_col); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +__global__ void vol2im_kernel( + const unsigned n, + const T* data_col, + const unsigned depth, + const unsigned height, + const unsigned width, + const unsigned channels, + const unsigned kernel_t, + const unsigned kernel_h, + const unsigned kernel_w, + const unsigned pad_t, + const unsigned pad_h, + const unsigned pad_w, + const unsigned stride_t, + const unsigned stride_h, + const unsigned stride_w, + const unsigned dilation_t, + const unsigned dilation_h, + const unsigned dilation_w, + const unsigned depth_col, + const unsigned height_col, + const unsigned width_col, + T* data_vol) { + CUDA_KERNEL_LOOP(index, n) { + accT val = static_cast(0); + const unsigned w_im = index % width + pad_w; + const unsigned h_im = (index / width) % height + pad_h; + const unsigned t_im = (index / width / height) % depth + pad_t; + const unsigned c_im = index / (width * height * depth); + unsigned kernel_extent_w = (kernel_w - 1) * dilation_w + 1; + unsigned kernel_extent_h = (kernel_h - 1) * dilation_h + 1; + unsigned kernel_extent_t = (kernel_t - 1) * dilation_t + 1; + // compute the start and end of the output + const unsigned w_col_start = + (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1; + const unsigned w_col_end = std::min(w_im / stride_w + 1, width_col); + const unsigned h_col_start = + (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1; + const unsigned h_col_end = std::min(h_im / stride_h + 1, height_col); + const unsigned t_col_start = + (t_im < kernel_extent_t) ? 0 : (t_im - kernel_extent_t) / stride_t + 1; + const unsigned t_col_end = std::min(t_im / stride_t + 1, depth_col); + // TODO: use LCM of stride and dilation to avoid unnecessary loops + for (unsigned t_col = t_col_start; t_col < t_col_end; t_col += 1) { + for (unsigned h_col = h_col_start; h_col < h_col_end; h_col += 1) { + for (unsigned w_col = w_col_start; w_col < w_col_end; w_col += 1) { + unsigned t_k = (t_im - t_col * stride_t); + unsigned h_k = (h_im - h_col * stride_h); + unsigned w_k = (w_im - w_col * stride_w); + if (t_k % dilation_t == 0 && h_k % dilation_h == 0 && + w_k % dilation_w == 0) { + t_k /= dilation_t; + h_k /= dilation_h; + w_k /= dilation_w; + const int64_t idx_k = + ((c_im * kernel_t + t_k) * kernel_h + h_k) * kernel_w + w_k; + const int64_t data_col_index = + ((idx_k * depth_col + t_col) * + height_col + h_col) * + width_col + w_col; + val += data_col[data_col_index]; + } + } + } + } + data_vol[index] = static_cast(val); + } +} + +template +void col2vol( + cudaStream_t stream, + const T* data_col, + const int64_t channels, + const int64_t depth, + const int64_t height, + const int64_t width, + const int64_t output_depth, + const int64_t output_height, + const int64_t output_width, + const int64_t patch_t, + const int64_t patch_h, + const int64_t patch_w, + const int64_t pad_t, + const int64_t pad_h, + const int64_t pad_w, + const int64_t stride_t, + const int64_t stride_h, + const int64_t stride_w, + const int64_t dilation_t, + const int64_t dilation_h, + const int64_t dilation_w, + T* data_vol) { + const auto num_kernels = channels * depth * height * width; + + auto check_fits_in_unsigned = + [](int64_t val, const char * name) { + constexpr auto umax = std::numeric_limits::max(); + TORCH_CHECK(val >= 0 && val <= umax, + name, " must fit in a 32-bit unsigned value"); + }; + check_fits_in_unsigned(num_kernels, "input size"); + check_fits_in_unsigned( + channels * patch_t * patch_h * patch_w, "channels x kernel size"); + + // To avoid involving atomic operations, we will launch one kernel per + // bottom dimension, and then in the kernel add up the top dimensions. + vol2im_kernel + <<>>( + num_kernels, + data_col, + depth, + height, + width, + channels, + patch_t, + patch_h, + patch_w, + pad_t, + pad_h, + pad_w, + stride_t, + stride_h, + stride_w, + dilation_t, + dilation_h, + dilation_w, + output_depth, + output_height, + output_width, + data_vol); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/group_norm.h b/voice_bridge/torch/include/ATen/native/group_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..1673df9253eec782328db0f673e6526b698641d0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/group_norm.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include + +namespace at { +class Tensor; + +namespace native { + +using forward_fn = void (*)( + const Tensor& /* X */, + const Tensor& /* gamma */, + const Tensor& /* beta */, + int64_t /* N */, + int64_t /* C */, + int64_t /* HxW */, + int64_t /* group */, + double /* eps */, + Tensor& /* Y */, + Tensor& /* mean */, + Tensor& /* rstd */); + +using backward_fn = void (*)( + const Tensor& /* dY */, + const Tensor& /* X */, + const Tensor& /* mean */, + const Tensor& /* rstd */, + const Tensor& /* gamma */, + int64_t /* N */, + int64_t /* C */, + int64_t /* HxW */, + int64_t /* group */, + Tensor& /* dX */, + Tensor& /* dgamma */, + Tensor& /* dbeta */); + +DECLARE_DISPATCH(forward_fn, GroupNormKernel); +DECLARE_DISPATCH(backward_fn, GroupNormBackwardKernel); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/im2col.h b/voice_bridge/torch/include/ATen/native/im2col.h new file mode 100644 index 0000000000000000000000000000000000000000..c3daed3d4ffc620ea92f66e86d63af152bd7acb7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/im2col.h @@ -0,0 +1,151 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +namespace at { +namespace native { + +template +static void im2col( + const T* data_im, + const int64_t channels, + const int64_t height, + const int64_t width, + const int64_t output_height, + const int64_t output_width, + const int64_t kernel_h, + const int64_t kernel_w, + const int64_t pad_h, + const int64_t pad_w, + const int64_t stride_h, + const int64_t stride_w, + const int64_t dilation_h, + const int64_t dilation_w, + T* data_col, + bool is_channels_last = false) { + const int64_t height_col = output_height; + const int64_t width_col = output_width; + const int64_t channels_col = channels * kernel_h * kernel_w; + + if (is_channels_last) { + at::parallel_for(0, height_col * width_col, 0, [&](int64_t begin, int64_t end) { + int64_t h_col{0}, w_col{0}; + data_index_init(begin, h_col, height_col, w_col, width_col); + + for (const auto i_col : c10::irange(begin, end)) { + for (const auto h_offset : c10::irange(kernel_h)) { + int64_t h_im = h_col * stride_h - pad_h + h_offset * dilation_h; + for (const auto w_offset : c10::irange(kernel_w)) { + int64_t w_im = w_col * stride_w - pad_w + w_offset * dilation_w; + + const T* slice_im = data_im + (h_im * width + w_im) * channels; + T* slice_col = data_col + (i_col * kernel_h * kernel_w + h_offset * kernel_w + w_offset) * channels; + + if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + std::copy_n(slice_im, channels, slice_col); + } else { + std::fill_n(slice_col, channels, T(0)); + } + } + } + + // move the the next index + data_index_step(h_col, height_col, w_col, width_col); + } + }); + } else { + at::parallel_for(0, channels_col, 0, [&](int64_t begin, int64_t end) { + int64_t c_im{0}, h_offset{0}, w_offset{0}; + data_index_init(begin, c_im, channels, h_offset, kernel_h, w_offset, kernel_w); + + for (const auto c_col : c10::irange(begin, end)) { + for (const auto h_col : c10::irange(height_col)) { + int64_t h_im = h_col * stride_h - pad_h + h_offset * dilation_h; + for (const auto w_col : c10::irange(width_col)) { + int64_t w_im = w_col * stride_w - pad_w + w_offset * dilation_w; + data_col[(c_col * height_col + h_col) * width_col + w_col] = + (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) + ? data_im[(c_im * height + h_im) * width + w_im] + : static_cast(0); + } + } + + // move to the next index + data_index_step(c_im, channels, h_offset, kernel_h, w_offset, kernel_w); + } + }); + } +} + +template +static void col2im( + const T* data_col, + const int64_t channels, + const int64_t height, + const int64_t width, + const int64_t output_height, + const int64_t output_width, + const int64_t kernel_h, + const int64_t kernel_w, + const int64_t pad_h, + const int64_t pad_w, + const int64_t stride_h, + const int64_t stride_w, + const int64_t dilation_h, + const int64_t dilation_w, + T* data_im, + bool is_channels_last = false) { + std::fill_n(data_im, height * width * channels, T(0)); + + const int64_t height_col = output_height; + const int64_t width_col = output_width; + const int64_t channels_col = channels * kernel_h * kernel_w; + + if (is_channels_last) { + for (const auto h_col : c10::irange(height_col)) { + for (const auto w_col : c10::irange(width_col)) { + for (const auto h_offset : c10::irange(kernel_h)) { + int64_t h_im = h_col * stride_h - pad_h + h_offset * dilation_h; + for (const auto w_offset : c10::irange(kernel_w)) { + int64_t w_im = w_col * stride_w - pad_w + w_offset * dilation_w; + + T* slice_im = data_im + (h_im * width + w_im) * channels; + const T* slice_col = data_col + ((h_col * width_col + w_col) * kernel_h * kernel_w + + h_offset * kernel_w + w_offset) * channels; + + if (h_im >= 0 && h_im < height && w_im >= 0 && w_im < width) { + std::transform(slice_col, slice_col + channels, slice_im, slice_im, std::plus()); + } + } + } + } + } + } else { + for (const auto c_col : c10::irange(channels_col)) { + int64_t w_offset = c_col % kernel_w; + int64_t h_offset = (c_col / kernel_w) % kernel_h; + int64_t c_im = c_col / kernel_h / kernel_w; + + for (const auto h_col : c10::irange(height_col)) { + int64_t h_im = h_col * stride_h - pad_h + h_offset * dilation_h; + for (const auto w_col : c10::irange(width_col)) { + int64_t w_im = w_col * stride_w - pad_w + w_offset * dilation_w; + + if (h_im >= 0 && h_im < height && w_im >= 0 && w_im < width) + data_im[(c_im * height + h_im) * width + w_im] += + data_col[(c_col * height_col + h_col) * width_col + w_col]; + } + } + } + } +} + +} // native +} // at diff --git a/voice_bridge/torch/include/ATen/native/im2col_shape_check.h b/voice_bridge/torch/include/ATen/native/im2col_shape_check.h new file mode 100644 index 0000000000000000000000000000000000000000..62a0883c8b30f534ceb5493a6037343aada8516d --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/im2col_shape_check.h @@ -0,0 +1,233 @@ +#pragma once +#include +#include + +namespace at { +namespace native { + +static inline void col2im_shape_check( + const Tensor& input, + const Tensor& grad_output, + int64_t output_height, + int64_t output_width, + int64_t kernel_height, + int64_t kernel_width, + int64_t dilation_height, + int64_t dilation_width, + int64_t pad_height, + int64_t pad_width, + int64_t stride_height, + int64_t stride_width) { + TORCH_CHECK( + kernel_width > 0 && kernel_height > 0, + "kernel size should be greater than zero, but got kernel_height: ", + kernel_height, + " kernel_width: ", + kernel_width); + TORCH_CHECK( + stride_width > 0 && stride_height > 0, + "stride should be greater than zero, but got stride_height: ", + stride_height, + " stride_width: ", + stride_width); + TORCH_CHECK( + dilation_width > 0 && dilation_height > 0, + "dilation should be greater than zero, but got dilation_height: ", + dilation_height, + " dilation_width: ", + dilation_width); + TORCH_CHECK( + pad_width >= 0 && pad_height >= 0, + "padding should be non-negative, but got pad_height: ", + pad_height, + " pad_width: ", + pad_width); + + + int64_t ndim = input.ndimension(); + // allow dim=0 only the batch dimension. + TORCH_CHECK( + (ndim == 2 && input.size(0) != 0 && input.size(1) != 0) || + (ndim == 3 && input.size(1) != 0 && input.size(2) != 0), + "Expected 2D or 3D (batch mode) tensor for input with possibly 0 batch size and non-zero dimensions for input, but got: ", + input.sizes()); + + int64_t batch_dim = (ndim == 3) ? 0 : -1; + int64_t n_input_plane = input.size(batch_dim + 1); + + if (n_input_plane % (kernel_width * kernel_height) != 0) { + AT_ERROR( + "Expected size of input's dimension 1 to be divisible by the " + "product of kernel_size, but got input.size(1)=", + n_input_plane, + " and kernel_size=(", + kernel_height, + ", ", + kernel_width, + ")."); + } + + int64_t input_length = input.size(batch_dim + 2); + int64_t n_blocks_height = + div_rtn( + output_height + 2 * pad_height - + dilation_height * (kernel_height - 1) - 1, + stride_height) + + 1; + int64_t n_blocks_width = div_rtn( + output_width + 2 * pad_width - + dilation_width * (kernel_width - 1) - 1, + stride_width) + + 1; + + if (input_length != (n_blocks_height * n_blocks_width)) { + AT_ERROR( + "Given output_size=(", + output_height, + ", ", + output_width, + "), kernel_size=(", + kernel_height, + ", ", + kernel_width, + "), dilation=(", + dilation_height, + ", ", + dilation_width, + "), padding=(", + pad_height, + ", ", + pad_width, + "), stride=(", + stride_height, + ", ", + stride_width, + "), expected size of input's dimension 2 to match the calculated number of ", + "sliding blocks ", + n_blocks_height, + " * ", + n_blocks_width, + " = ", + (n_blocks_height * n_blocks_width), + ", but got input.size(2)=", + input_length, + "."); + } + + TORCH_CHECK( + n_blocks_height >= 1 && n_blocks_width >= 1, + "Given output_size=(", output_height, ", ", output_width, "), ", + "kernel_size=(", kernel_height, ", ", kernel_width, "), ", + "dilation=(", dilation_height, ", ", dilation_width, "), ", + "padding=(", pad_height, ", ", pad_width, "), ", + "stride=(", stride_height, ", ", stride_width, "), ", + "calculated shape of the array of sliding blocks as ", + "(", n_blocks_height, ", ", n_blocks_width, "), ", + "which is too small (non-positive)"); + + if (output_width < 1 || output_height < 1) { + AT_ERROR( + "Expected output spatial size to be positive, but got: output_size=(", + output_height, + ", ", + output_width, + ")."); + } +} + +static inline void im2col_shape_check( + const Tensor& input, + const Tensor& grad_output, + int64_t kernel_height, + int64_t kernel_width, + int64_t dilation_height, + int64_t dilation_width, + int64_t pad_height, + int64_t pad_width, + int64_t stride_height, + int64_t stride_width) { + TORCH_CHECK( + kernel_width > 0 && kernel_height > 0, + "kernel size should be greater than zero, but got kernel_height: ", + kernel_height, + " kernel_width: ", + kernel_width); + + TORCH_CHECK( + dilation_width > 0 && dilation_height > 0, + "dilation should be greater than zero, but got dilation_height: ", + dilation_height, + " dilation_width: ", + dilation_width); + + TORCH_CHECK( + pad_width >= 0 && pad_height >= 0, + "padding should be non-negative, but got pad_height: ", + pad_height, + " pad_width: ", + pad_width); + + TORCH_CHECK( + stride_width > 0 && stride_height > 0, + "stride should be greater than zero, but got stride_height: ", + stride_height, + " stride_width: ", + stride_width); + + int64_t ndim = input.ndimension(); + + // allow dim=0 only the batch dimension. + bool valid_dims = input.size(1) != 0 && input.size(2) != 0; + TORCH_CHECK( + (ndim == 3 && input.size(0) && valid_dims) || + (ndim == 4 && valid_dims && input.size(3) != 0), + "Expected 3D or 4D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", + input.sizes()); + + int64_t dim_batch = 0; + + if (ndim == 3) { + dim_batch = -1; + } + + int64_t input_height = input.size(dim_batch + 2); + int64_t input_width = input.size(dim_batch + 3); + int64_t output_height = div_rtn( + input_height + 2 * pad_height - + (dilation_height * (kernel_height - 1) + 1), + stride_height) + + 1; + int64_t output_width = div_rtn( + input_width + 2 * pad_width - + (dilation_width * (kernel_width - 1) + 1), + stride_width) + + 1; + + if (output_height < 1 || output_width < 1) { + AT_ERROR( + "Given input with spatial size (", + input_height, + ", ", + input_height, + "), kernel_size=(", + kernel_height, + ", ", + kernel_width, + "), dilation=(", + dilation_height, + ", ", + dilation_width, + "), padding=(", + pad_height, + ", ", + pad_width, + "), calculated shape of the array of sliding blocks as (", + output_height, + ", ", + output_width, + "), but its components must be at least one."); + } +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/layer_norm.h b/voice_bridge/torch/include/ATen/native/layer_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..629bc9ab3906b9cebd08211ffd89030246675b81 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/layer_norm.h @@ -0,0 +1,102 @@ +#pragma once + +#include +#include +#include + +namespace at { +namespace native { + +namespace { + +C10_ALWAYS_INLINE std::pair _check_layer_norm_inputs( + const Tensor& input, + IntArrayRef normalized_shape, + const Tensor& weight /* optional */, + const Tensor& bias /* optional */) { + + const int normalized_ndim = normalized_shape.size(); + TORCH_CHECK( + normalized_ndim >= 1, + "Expected normalized_shape to be at least 1-dimensional, i.e., ", + "containing at least one element, but got normalized_shape = ", + normalized_shape); + TORCH_CHECK( + !weight.defined() || weight.sizes().equals(normalized_shape), + "Expected weight to be of same shape as normalized_shape, but got ", + "weight of shape ", + weight.sizes(), + " and normalized_shape = ", + normalized_shape); + TORCH_CHECK( + !bias.defined() || bias.sizes().equals(normalized_shape), + "Expected bias to be of same shape as normalized_shape, but got ", + "bias of shape ", + bias.sizes(), + " and normalized_shape = ", + normalized_shape); + + const auto input_shape = input.sizes(); + const auto input_ndim = input.dim(); + + if (input_ndim < normalized_ndim || + !input_shape.slice(input_ndim - normalized_ndim) + .equals(normalized_shape)) { + std::stringstream ss; + ss << "Given normalized_shape=" << normalized_shape + << ", expected input with shape [*"; + for (auto size : normalized_shape) { + ss << ", " << size; + } + ss << "], but got input of size" << input_shape; + AT_ERROR(ss.str()); + } + + const int axis = input_ndim - normalized_ndim; + const int64_t M = + c10::multiply_integers(input_shape.cbegin(), input_shape.cbegin() + axis); + const int64_t N = + c10::multiply_integers(input_shape.cbegin() + axis, input_shape.cend()); + + return std::make_pair(M, N); +} + +} // namespace + +void layer_norm_cpu_out( + at::Tensor& out, + const at::Tensor& input, + const Tensor& gamma, + const Tensor& beta, + double eps, + int64_t M, + int64_t N); + +using forward_fn = void (*)( + const Tensor& /* X */, + const Tensor& /* gamma */, + const Tensor& /* beta */, + int64_t /* M */, + int64_t /* N */, + double /* eps */, + Tensor* /* Y */, + Tensor* /* mean */, + Tensor* /* rstd */); + +using backward_fn = void (*)( + const Tensor& /* dY */, + const Tensor& /* X */, + const Tensor& /* mean */, + const Tensor& /* rstd */, + const Tensor& /* gamma */, + int64_t /* M */, + int64_t /* N */, + Tensor* /* dX */, + Tensor* /* dgamma */, + Tensor* /* dbeta */); + +DECLARE_DISPATCH(forward_fn, LayerNormKernel); +DECLARE_DISPATCH(backward_fn, LayerNormBackwardKernel); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/quantized/AffineQuantizer.h b/voice_bridge/torch/include/ATen/native/quantized/AffineQuantizer.h new file mode 100644 index 0000000000000000000000000000000000000000..cd39e34240663bddc2a41b5ee30538cf31449666 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/AffineQuantizer.h @@ -0,0 +1,129 @@ +#pragma once + +#include +#include +#include + +namespace at { +namespace native { + +Tensor& quantize_tensor_per_tensor_affine( + const Tensor& rtensor, + Tensor& qtensor, + double scale, + int64_t zero_point); +Tensor& quantize_tensor_per_channel_affine( + const Tensor& rtensor, + Tensor& qtensor, + Tensor scales, + Tensor zero_points, + int64_t axis); + +Tensor& quantize_tensor_per_channel_float_qparams( + const Tensor& rtensor, + Tensor& qtensor, + Tensor scales, + Tensor zero_points, + int64_t axis); + +Tensor& dequantize_tensor_per_tensor_affine( + const Tensor& qtensor, + Tensor& rtensor, + double scale, + int64_t zero_point); +Tensor& dequantize_tensor_per_channel_affine( + const Tensor& qtensor, + Tensor& rtensor, + Tensor scales, + Tensor zero_points, + int64_t axis); +Tensor& dequantize_tensor_per_channel_float_qparams( + const Tensor& qtensor, + Tensor& rtensor, + Tensor scales, + Tensor zero_points, + int64_t axis); + +using quantize_tensor_per_tensor_affine_fn = + void (*)(const Tensor& rtensor, Tensor& qtensor, double scale, int64_t zero_point); + +using quantize_tensor_per_channel_affine_fn = void (*)( + const Tensor& rtensor, + Tensor& qtensor, + const Tensor& scales, + const Tensor& zero_points, + int64_t axis); + +using quantize_tensor_per_channel_float_qparams_fn = void (*)( + const Tensor& rtensor, + Tensor& qtensor, + const Tensor& scales, + const Tensor& zero_points, + int64_t axis); + +using dequantize_tensor_per_tensor_affine_fn = + void (*)(const Tensor& qtensor, Tensor& rtensor, double scale, int64_t zero_point); + +using dequantize_tensor_per_channel_affine_fn = void (*)( + const Tensor& qtensor, + Tensor& rtensor, + const Tensor& scales, + const Tensor& zero_points, + int64_t axis); + +using dequantize_tensor_per_channel_float_qparams_fn = void (*)( + const Tensor& qtensor, + Tensor& rtensor, + const Tensor& scales, + const Tensor& zero_points, + int64_t axis); + +using quantize_tensor_per_tensor_affine_sub_byte_fn = + void (*)(const Tensor& rtensor, Tensor& qtensor, float scale, float zero_point); + +using dequantize_tensor_per_tensor_affine_sub_byte_fn = + void (*)(const Tensor& qtensor, Tensor& rtensor, float scale, float zero_point); + +DECLARE_DISPATCH( + quantize_tensor_per_tensor_affine_fn, + quantize_tensor_per_tensor_affine_stub); +DECLARE_DISPATCH( + quantize_tensor_per_channel_affine_fn, + quantize_tensor_per_channel_affine_stub); +DECLARE_DISPATCH( + quantize_tensor_per_channel_float_qparams_fn, + quantize_tensor_per_channel_float_qparams_stub); + +DECLARE_DISPATCH( + dequantize_tensor_per_tensor_affine_fn, + dequantize_tensor_per_tensor_affine_stub); +DECLARE_DISPATCH( + dequantize_tensor_per_channel_affine_fn, + dequantize_tensor_per_channel_affine_stub); +DECLARE_DISPATCH( + dequantize_tensor_per_channel_float_qparams_fn, + dequantize_tensor_per_channel_float_qparams_stub); + +DECLARE_DISPATCH( + quantize_tensor_per_tensor_affine_sub_byte_fn, + quantize_tensor_per_tensor_affine_sub_byte_stub); + +DECLARE_DISPATCH( + dequantize_tensor_per_tensor_affine_sub_byte_fn, + dequantize_tensor_per_tensor_affine_sub_byte_stub); + +template +TORCH_API Tensor quantize_tensor( + Tensor rtensor, + Tensor qtensor, + double scale, + int64_t zero_point); +template +TORCH_API Tensor dequantize_tensor( + Tensor qtensor, + Tensor rtensor, + double scale, + int64_t zero_point); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/quantized/AffineQuantizerBase.h b/voice_bridge/torch/include/ATen/native/quantized/AffineQuantizerBase.h new file mode 100644 index 0000000000000000000000000000000000000000..31526c3ec3c52057463cd00f0dd8556160d4d2df --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/AffineQuantizerBase.h @@ -0,0 +1,47 @@ +#pragma once +#include +#include + +namespace at { +namespace native { + +// Quantize a float value into a uint value given scale and zero_point +template +TORCH_API T quantize_val(double scale, int64_t zero_point, float value); +// TODO combine this with quantize_val once the numerics for ARM are aligned +// with it +template +T quantize_val_arm( + const float scale, + const int32_t zero_point, + const float value); +template +void quantize_vec( + double scale, + int64_t zero_point, + const float* src, + T* dst, + size_t count = 8); +template +TORCH_API float dequantize_val(double scale, int64_t zero_point, T value); +template +TORCH_API float dequantize_vec( + double scale, + int64_t zero_point, + const T* src, + float* dst, + size_t count = 8); +template +TORCH_API DST_T requantize_val(double, int64_t, double, int64_t, SRC_T src); + +// Given a multiplier and a zero_point, requantize int32_t computed values back +// to quantized values. See comment above +// make_per_tensor_affine_quantizer function for the usage of int64_t +template +TORCH_API DST_T +requantize_from_int(double multiplier, int64_t zero_point, int64_t src); + +int quantize_val_float_qparams(float scale, float zero_point, float value, int qmin, int qmax); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/quantized/Copy.h b/voice_bridge/torch/include/ATen/native/quantized/Copy.h new file mode 100644 index 0000000000000000000000000000000000000000..d52c8ff0fb2c7f7f6eed17acceb660482144eef9 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/Copy.h @@ -0,0 +1,10 @@ +#pragma once + +#include + +namespace at { +namespace native { + +Tensor& quantized_copy_from_float_(Tensor& self, const Tensor& src); +} +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/quantized/FakeQuantAffine.h b/voice_bridge/torch/include/ATen/native/quantized/FakeQuantAffine.h new file mode 100644 index 0000000000000000000000000000000000000000..3b1dbf608c1394d68e4551321b329b23c04c8172 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/FakeQuantAffine.h @@ -0,0 +1,66 @@ +#pragma once + +#include +#include + +namespace at { + +struct TensorIterator; + +namespace native { + +using fake_quant_tensor_cachemask_fn = void (*)( + Tensor& output, + Tensor& mask, + const Tensor& input, + float sc, + int64_t z_point, + int64_t quant_min, + int64_t quant_max); + +using fake_quant_tensor_cachemask_tensor_qparams_fn = void (*)( + Tensor& output, + Tensor& mask, + const Tensor& input, + const Tensor& sc, + const Tensor& z_point, + const Tensor& fake_quant_enabled, + int64_t quant_min, + int64_t quant_max); + +using fake_quant_learnable_grad_tensor_fn = void (*)( + TensorIterator& iter, + float scale, + float inv_scale, + int64_t zero_point, + int64_t quant_min, + int64_t quant_max, + float grad_factor); + +DECLARE_DISPATCH(fake_quant_tensor_cachemask_fn, fake_quant_tensor_cachemask_stub); +DECLARE_DISPATCH(fake_quant_tensor_cachemask_tensor_qparams_fn, fake_quant_tensor_cachemask_tensor_qparams_stub); +DECLARE_DISPATCH(fake_quant_learnable_grad_tensor_fn, fake_quant_grad_learnable_tensor_stub); + +using fake_quant_per_channel_fn = void (*)( + TensorIterator &iter, + int64_t quant_min, + int64_t quant_max); + +using fake_quant_per_channel_cachemask_fn = void (*)( + TensorIterator &iter, + TensorIterator &iter_mask, + int64_t quant_min, + int64_t quant_max); + +DECLARE_DISPATCH(fake_quant_per_channel_cachemask_fn, fake_quant_per_channel_cachemask_stub); + +using fake_quant_learnable_per_channel_fn = void (*)( + TensorIterator &iter, + int64_t quant_min, + int64_t quant_max, + float grad_factor); + +DECLARE_DISPATCH(fake_quant_learnable_per_channel_fn, fake_quant_grad_learnable_channel_stub); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/quantized/IndexKernel.h b/voice_bridge/torch/include/ATen/native/quantized/IndexKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..0e240b5a8e9afc61f8828f4162f1b89c7ec06bb7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/IndexKernel.h @@ -0,0 +1,14 @@ +#pragma once +#include + +namespace at { +namespace native { +using masked_fill_kernel_quantized_fn = void(*)(TensorIterator& iter, const Scalar& value, double scale, int zero_point); +using index_put_kernel_quantized_fn = void(*)(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate, double scale, int zero_point); + +DECLARE_DISPATCH(masked_fill_kernel_quantized_fn, masked_fill_kernel_quantized_stub); +DECLARE_DISPATCH(index_put_kernel_quantized_fn, index_put_kernel_quantized_stub); + + +} // native +} // at diff --git a/voice_bridge/torch/include/ATen/native/quantized/PackedParams.h b/voice_bridge/torch/include/ATen/native/quantized/PackedParams.h new file mode 100644 index 0000000000000000000000000000000000000000..64d8ec840c46464c17fd506237ca46c126d94d91 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/PackedParams.h @@ -0,0 +1,98 @@ +#pragma once + +#include +#include + +struct LinearPackedParamsBase : public torch::jit::CustomClassHolder { + virtual at::Tensor apply( + at::Tensor input, + double output_scale, + int64_t output_zero_point) = 0; + virtual at::Tensor apply_relu( + at::Tensor input, + double output_scale, + int64_t output_zero_point) = 0; + + // out variant of LinearPackedParamsBase::apply + virtual at::Tensor& apply_out( + const at::Tensor& /*input*/, + double /*output_scale*/, + int64_t /*output_zero_point*/, + at::Tensor& output) { + throw std::runtime_error( + "apply_out is not implemented for this packed " + "parameter type"); + return output; + } + + virtual at::Tensor& apply_relu_out( + const at::Tensor& /*input*/, + double /*output_scale*/, + int64_t /*output_zero_point*/, + at::Tensor& output) { + throw std::runtime_error( + "apply_relu_out is not implemented for this packed " + "parameter type"); + return output; + } + + virtual at::Tensor apply_dynamic( + at::Tensor input, + bool reduce_range = false) = 0; + virtual at::Tensor apply_dynamic_relu( + at::Tensor input, + bool reduce_range = false) = 0; + + virtual at::Tensor& apply_dynamic_out( + const at::Tensor& /* input */, + at::Tensor& output, + bool /* reduce_range */) { + throw std::runtime_error( + "apply_dynamic_out is not implemented for this packed " + "parameter type"); + return output; + } + virtual at::Tensor& apply_dynamic_relu_out( + const at::Tensor& /* input */, + at::Tensor& output, + bool /* reduce_range */) { + throw std::runtime_error( + "apply_dynamic_relu_out is not implemented for this packed " + "parameter type"); + return output; + } + + virtual std::tuple> unpack() = 0; + + virtual c10::optional bias() = 0; + + virtual void set_bias(c10::optional /*bias*/) { + throw std::runtime_error( + "set_bias is not implemented for this packed " + "parameter type"); + } +}; + +template +struct ConvPackedParamsBase : public torch::jit::CustomClassHolder { + virtual at::Tensor apply( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point) = 0; + virtual at::Tensor apply_relu( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point) = 0; + virtual at::Tensor apply_dynamic( + const at::Tensor& input, + bool reduce_range) = 0; + + virtual std::tuple> unpack() = 0; + + virtual torch::List stride() const = 0; + virtual torch::List padding() const = 0; + virtual torch::List output_padding() const = 0; + virtual torch::List dilation() const = 0; + virtual int64_t groups() const = 0; + virtual bool transpose() const = 0; +}; diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/BinaryOps.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/BinaryOps.h new file mode 100644 index 0000000000000000000000000000000000000000..ada78c59f95cc9c8dc229cefbda34e3ee172c2f3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/BinaryOps.h @@ -0,0 +1,8 @@ +#include + +namespace at { +namespace native { +TORCH_API Tensor +quantized_add(Tensor qa, Tensor qb, double scale, int64_t zero_point); +} +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/EmbeddingPackedParams.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/EmbeddingPackedParams.h new file mode 100644 index 0000000000000000000000000000000000000000..945c8edf7c75ce88aaa4a35bfa8285af8cd8ca03 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/EmbeddingPackedParams.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include + +struct EmbeddingPackedParamsBase : public torch::jit::CustomClassHolder { + virtual at::Tensor embeddingbag_byte( + const at::Tensor& indices, + const c10::optional& offsets, + bool pruned_weights, + const c10::optional& per_sample_weights_, + const c10::optional& compressed_indices_mapping, + bool include_last_offset, + bool is_embedding_op) = 0; + + virtual at::Tensor embeddingbag_4bit( + const at::Tensor& indices, + const c10::optional& offsets, + bool pruned_weights, + const c10::optional& per_sample_weights_, + const c10::optional& compressed_indices_mapping, + bool include_last_offset, + bool is_embedding_op) = 0; + + virtual at::Tensor unpack() = 0; + + virtual int64_t bit_rate() const = 0; + virtual int64_t version() const = 0; +}; diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/OnednnUtils.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/OnednnUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..fefc2426e2d44b90b8f99d49065ffa2bc2feaf44 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/OnednnUtils.h @@ -0,0 +1,399 @@ +#pragma once + +#include +#if AT_MKLDNN_ENABLED() +#include +#include +#include +#include + +using PrimitiveCacheKey = std::tuple< + double, // input_scale + int64_t, // input_zero_point + std::vector, // input_shape + double, // output_scale + int64_t, // output_zero_point + int64_t>; // OMP_number_of_threads + +enum CacheKeyIndex { + InputScale, + InputZeroPoint, + InputShape, + OutputScale, + OutputZeroPoint, + NumOfThreads, +}; + +// Base class of primitive cache +struct PrimitiveCache { + PrimitiveCacheKey key; + + bool hit(const PrimitiveCacheKey& key) { + return this->key == key; + } +}; + +using LinearParams = ideep::matmul_forward_params; +using Conv = dnnl::convolution_forward; +using ConvDesc = dnnl::convolution_forward::primitive_desc; +using ConvParams = ideep::convolution_forward_params; +using Deconv = dnnl::deconvolution_forward; +using DeconvDesc = dnnl::deconvolution_forward::primitive_desc; +using DeconvParams = ideep::deconv_forward_params; + +struct LinearPrimitiveCache : PrimitiveCache { + LinearPrimitiveCache() {} + + LinearPrimitiveCache( + const PrimitiveCacheKey& key, + const LinearParams& param) { + this->key = key; + this->param = param; + } + + LinearPrimitiveCache( + const PrimitiveCacheKey& key, + const LinearParams& param, + const ideep::tensor& bias) { + this->key = key; + this->param = param; + if (!bias.is_empty()) { + expected_bias = + bias.reorder_if_differ_in(param.pd.bias_desc(), param.bias_attr); + } + } + + LinearParams param; + ideep::tensor expected_bias; + + // For dynamic qlinear, scale and zero point + // are set at execution time. So we only need to compare + // the rest part of key. + bool hit_dynamic(const PrimitiveCacheKey& new_key) { + auto cached_input_shape = std::get(this->key); + auto new_input_shape = std::get(new_key); + return ( + cached_input_shape == new_input_shape && + std::get(this->key) == std::get(new_key)); + } + + LinearParams& get_param() { + return param; + } + + ideep::tensor& get_expected_bias() { + return expected_bias; + } +}; + +struct ConvPrimitiveCache : PrimitiveCache { + ConvPrimitiveCache() {} + + ConvPrimitiveCache(const PrimitiveCacheKey& key, + const ConvDesc& conv_desc, + const ideep::tensor& bias, + const ideep::attr_t bias_attr) { + this->key = key; + this->primitive_desc = conv_desc; + this->primitive = Conv(this->primitive_desc); + // Construct tensor of input zero point + ideep::tensor::desc input_zp_desc = {{1}, ideep::data_type::s32, {1}}; + this->input_zp_tensor.init(input_zp_desc, ideep::engine::cpu_engine()); + auto zp_data_ptr = reinterpret_cast(this->input_zp_tensor.get_data_handle()); + zp_data_ptr[0] = std::get(key); + // Construct expected bias + this->expected_bias = bias.reorder_if_differ_in(conv_desc.bias_desc(), bias_attr); + } + + ConvDesc primitive_desc; + Conv primitive; + ideep::tensor input_zp_tensor; + ideep::tensor expected_bias; + + inline ConvDesc& get_primitive_desc() { + return primitive_desc; + } + + inline Conv& get_primitive() { + return primitive; + } + + inline ideep::tensor& get_src_zp_tensor() { + return input_zp_tensor; + } + + inline ideep::tensor& get_bias() { + return expected_bias; + } +}; + +struct DeconvPrimitiveCache : PrimitiveCache { + DeconvPrimitiveCache() {} + + DeconvPrimitiveCache(const PrimitiveCacheKey& key, + const DeconvDesc& deconv_desc, + const ideep::tensor& bias, + const ideep::attr_t bias_attr, + const ideep::tensor& input_zero_point) { + this->key = key; + this->primitive_desc = deconv_desc; + this->primitive = Deconv(this->primitive_desc); + this->input_zp_tensor = std::move(input_zero_point); + // Construct expected bias + this->expected_bias = bias.reorder_if_differ_in(deconv_desc.bias_desc(), bias_attr); + } + + DeconvDesc primitive_desc; + Deconv primitive; + ideep::tensor input_zp_tensor; + ideep::tensor expected_bias; + + inline DeconvDesc& get_primitive_desc() { + return primitive_desc; + } + + inline Deconv& get_primitive() { + return primitive; + } + + inline ideep::tensor& get_src_zp_tensor() { + return input_zp_tensor; + } + + inline ideep::tensor& get_bias() { + return expected_bias; + } +}; + +struct PackedLinearWeightsOnednn : public LinearPackedParamsBase { + PackedLinearWeightsOnednn( + std::unique_ptr weight, + c10::optional bias, + at::Tensor orig_weight, + c10::optional orig_bias) + : weight_(std::move(weight)), + bias_(std::move(bias)), + orig_weight_(std::move(orig_weight)), + orig_bias_(std::move(orig_bias)) { + cache_initialized_flag = std::make_unique(); + } + std::unique_ptr weight_; + c10::optional bias_; + at::Tensor orig_weight_; + c10::optional orig_bias_; + + at::Tensor apply( + at::Tensor input, + double output_scale, + int64_t output_zero_point) override; + at::Tensor apply_relu( + at::Tensor input, + double output_scale, + int64_t output_zero_point) override; + + at::Tensor apply_dynamic(at::Tensor input, bool reduce_range=false) override; + at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range=false) override; + + std::tuple> unpack() override; + + c10::optional bias() override { + return orig_bias_; + } + + static c10::intrusive_ptr prepack( + at::Tensor weight, + c10::optional bias); + + private: + LinearPrimitiveCache prim_cache; + std::unique_ptr cache_initialized_flag; + + template + at::Tensor apply_impl( + at::Tensor input, + double output_scale, + int64_t output_zero_point); + + template + at::Tensor apply_dynamic_impl(at::Tensor input, bool reduce_range=false); + + LinearPrimitiveCache& get_cache() { + return prim_cache; + } +}; + +template +struct PackedConvWeightsOnednn : public ConvPackedParamsBase { + PackedConvWeightsOnednn( + std::unique_ptr weight, + c10::optional bias, + at::Tensor orig_weight, + c10::optional orig_bias, + torch::List stride, + torch::List padding, + torch::List output_padding, + torch::List dilation, + int64_t groups, + uint8_t transpose) + : weight_(std::move(weight)), + bias_(std::move(bias)), + orig_weight_(std::move(orig_weight)), + orig_bias_(std::move(orig_bias)), + stride_(std::move(stride)), + padding_(std::move(padding)), + output_padding_(std::move(output_padding)), + dilation_(std::move(dilation)), + groups_(groups), + transpose_(transpose) { + cache_initialized_flag = std::make_unique(); + } + + std::unique_ptr weight_; + c10::optional bias_; + at::Tensor orig_weight_; + c10::optional orig_bias_; + torch::List stride_; + torch::List padding_; + torch::List output_padding_; + torch::List dilation_; + int64_t groups_; + uint8_t transpose_; + + at::Tensor apply( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point) override; + + at::Tensor apply_relu( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point) override; + + at::Tensor apply_dynamic( + const at::Tensor& input, + bool reduce_range) override; + + std::tuple> unpack() override; + + static c10::intrusive_ptr> prepack( + at::Tensor weight, + c10::optional bias, + torch::List stride, + torch::List padding, + torch::List output_padding, + torch::List dilation, + int64_t groups, + bool transpose); + + torch::List stride() const override { + return stride_; + } + + torch::List padding() const override { + return padding_; + } + + torch::List output_padding() const override { + return output_padding_; + } + + torch::List dilation() const override { + return dilation_; + } + + int64_t groups() const override { + return groups_; + } + + bool transpose() const override { + return (bool)transpose_; + } + + private: + ConvPrimitiveCache conv_prim_cache; + DeconvPrimitiveCache deconv_prim_cache; + std::unique_ptr cache_initialized_flag; + + template + at::Tensor apply_impl( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point); + + ConvPrimitiveCache& get_conv_cache() { + assert(!transpose()); + return conv_prim_cache; + } + + DeconvPrimitiveCache& get_deconv_cache() { + assert(transpose()); + return deconv_prim_cache; + } +}; + +namespace onednn_utils { + +// Try to reorder tensor to expected desc at runtime +// Do it in a `try...catch...` manner to avoid oneDNN's errors +// TODO: Move it to third_party/ideep +static void try_reorder( + ideep::tensor& t, + const ideep::tensor::desc&& desc, + ideep::scale_t scales) { + if (t.get_desc() != desc) { + try { + t = t.reorder_if_differ_in(desc); + } catch (...) { + ideep::tensor&& plain = t.to_public(nullptr, t.get_data_type()); + t = plain.reorder_if_differ_in(desc); + } + t.set_scale(scales); + } +} + +// ONEDNN requires symmetric quantization of weight +// Use this util function to check. +static bool is_weight_symmetric_quant( + const at::Tensor& weight, + bool is_transposed_conv) { + bool is_symmetric = true; + const auto qtype = weight.qscheme(); + if (qtype == c10::kPerTensorAffine) { + is_symmetric &= (weight.q_zero_point() == 0); + } else if (qtype == c10::kPerChannelAffine) { + if (is_transposed_conv) { + // This case is currently not supported in PyTorch + // but we do not want to raise an error in this util function. + is_symmetric = false; + } else { + auto output_channels = weight.size(0); + for (int i = 0; i < output_channels; ++i) { + auto zp = weight.q_per_channel_zero_points()[i].item(); + is_symmetric &= (zp == 0); + } + } + } else { + // This case is currently not supported in PyTorch + // but we do not want to raise an error in this util function. + is_symmetric = false; + } + return is_symmetric; +} + +// Check if onednn should be used w.r.t fbgemm +static bool should_use_onednn_quant( + const at::Tensor& weight, + bool is_transposed_conv, + int groups, + torch::List output_padding) { + bool vnni_available = cpuinfo_has_x86_avx512vnni(); + bool w_sym_quant = + is_weight_symmetric_quant(weight, is_transposed_conv); + bool opad_all_zero = + std::all_of(output_padding.begin(), output_padding.end(), [](int i) { return i==0; }); + return vnni_available && (groups <= 100) && w_sym_quant && opad_all_zero; +} + +} // onednn_utils + +#endif // #if AT_MKLDNN_ENABLED() diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/QnnpackUtils.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/QnnpackUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..799d159114c7a3603c215b5e788fb184cb3ae82b --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/QnnpackUtils.h @@ -0,0 +1,515 @@ +#pragma once + +#ifdef USE_PYTORCH_QNNPACK +#include +#include +#include +#include +#include +#include +#include + +#include + +struct QnnpackOperatorDeleter { + void operator()(pytorch_qnnp_operator_t op) { + pytorch_qnnp_delete_operator(op); + } +}; + +// PackedWeight struct for QNNPACK stores the original Weight and Bias as +// QNNPACK currently does not support an unpack function. +// For PyTorch Mobile, once the model is scripted and serialized we don't need +// to call unpack, so we can save some memory by checking for this case and free +// the original weights after packing. +// Input scale is set to null in pre-pack step. QNNPACK needs bias quantized +// with input scale which is available at runtime in pytorch. During runtime if +// input scale value changes then we requantize bias with the updated scale. For +// inference we expect the graph to be static so the input scale should not +// change across consecutive inference calls. +struct PackedLinearWeightsQnnp : public LinearPackedParamsBase { + PackedLinearWeightsQnnp( + std::unique_ptr w, + at::Tensor orig_weight, + at::Tensor bias, + c10::optional input_scale, + at::Tensor w_scales, + std::vector&& w_zps) + : w(std::move(w)), + orig_weight(std::move(orig_weight)), + bias_(at::native::mobile::allocate_padded_contiguous_if_needed( + bias, bias.suggest_memory_format())), + per_channel_(this->orig_weight.qscheme() == at::kPerChannelAffine), + input_scale(std::move(input_scale)), + w_scales(w_scales), + w_zero_points(std::move(w_zps)) {} + + std::unique_ptr w; + at::Tensor orig_weight; + at::Tensor bias_; + bool per_channel_; + c10::optional input_scale; + at::Tensor w_scales; + std::vector w_zero_points; + std::vector requantization_scales; + + at::Tensor apply( + at::Tensor input, + double output_scale, + int64_t output_zero_point) override; + at::Tensor apply_relu( + at::Tensor input, + double output_scale, + int64_t output_zero_point) override; + + at::Tensor apply_dynamic(at::Tensor input, bool reduce_range=false) override; + at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range=false) override; + + std::tuple> unpack() override; + + c10::optional bias() override { + return bias_; + } + + static c10::intrusive_ptr prepack( + at::Tensor weight, + c10::optional bias); + + bool per_channel() const { + return per_channel_; + } + + private: + std::mutex qnnp_mutex_; + +#ifdef USE_XNNPACK + xnnpack_operator xnnp_linear_op; + + template + at::Tensor apply_impl_xnnp( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point); +#endif // USE_XNNPACK + + template + at::Tensor apply_impl( + at::Tensor input, + double output_scale, + int64_t output_zero_point); + + template + at::Tensor apply_dynamic_impl(at::Tensor input, bool reduce_range); +}; + +template +struct PackedConvWeightsQnnp : public ConvPackedParamsBase { + PackedConvWeightsQnnp( + std::unique_ptr w, + at::Tensor orig_weight, + at::Tensor bias, + torch::List stride, + torch::List padding, + torch::List output_padding, + torch::List dilation, + int64_t groups, + bool transpose, + c10::optional input_scale, + std::vector kernel, + at::Tensor w_scale, + std::vector&& w_zps, + bool is_per_channel) + : w(std::move(w)), + orig_weight(std::move(orig_weight)), + bias(std::move(bias)), + stride_(std::move(stride)), + padding_(std::move(padding)), + output_padding_(std::move(output_padding)), + dilation_(std::move(dilation)), + groups_(groups), + transpose_(transpose), + is_per_channel_(is_per_channel), + input_scale(input_scale), + kernel_(std::move(kernel)), + w_scales(w_scale), + w_zero_points(std::move(w_zps)) { + const bool any_padding = std::any_of( + padding_.begin(), padding_.end(), [](const auto& e) { return e != 0; }); + const size_t kernel_size = + std::accumulate(kernel_.begin(), kernel_.end(), 1, std::multiplies<>()); + + const size_t group_input_channels = transpose + ? this->orig_weight.size(0) / groups + : this->orig_weight.size(1); + const size_t group_output_channels = transpose + ? this->orig_weight.size(1) + : this->orig_weight.size(0) / groups; + + const size_t kernel_depth = kSpatialDim == 3 ? kernel_[0] : 1; + const size_t kernel_height = kernel_[kSpatialDim - 2]; + const size_t kernel_width = kernel_[kSpatialDim - 1]; + + pytorch_qnnp_ukernel_type ukernel_type; + if (transpose_) { + ukernel_type = pytorch_qnnp_ukernel_type_conv; + } else { + ukernel_type = pytorch_qnnp_ukernel_type_none; + + const bool has_depthwise_dimensions = + (kSpatialDim == 2 && + ((kernel_height == 3 && kernel_width == 3) || + (kernel_height == 5 && kernel_width == 5))) || + (kSpatialDim == 3 && kernel_height == 3 && kernel_width == 3 && + kernel_depth == 3); + const bool has_depthwise_grouping = + group_input_channels == 1 && group_output_channels == 1 && groups > 1; + + if (has_depthwise_dimensions && has_depthwise_grouping) { + ukernel_type = pytorch_qnnp_ukernel_type_dwconv; + } else if ( + kernel_size == 1 && + std::all_of( + stride_.begin(), + stride_.end(), + [](const auto& e) { return e == 1; }) && + !any_padding) { + ukernel_type = group_input_channels >= SIZE_MAX + ? pytorch_qnnp_ukernel_type_xzp_gemm + : pytorch_qnnp_ukernel_type_gemm; + } else { + ukernel_type = pytorch_qnnp_ukernel_type_conv; + } + } + + if (is_per_channel && ukernel_type == pytorch_qnnp_ukernel_type_xzp_gemm) { + TORCH_INTERNAL_ASSERT( + false, "Per channel quantized weights are not supported for XZP kernels"); + } + + pytorch_qnnp_operator_t convolution{nullptr}; + // Initially all the params are set to zero. + convolution = static_cast( + calloc(1, sizeof(struct pytorch_qnnp_operator))); + if (convolution == nullptr) { + TORCH_INTERNAL_ASSERT( + false, "failed to allocate %zu bytes for pytorch_qnnp_operator structure", + sizeof(struct pytorch_qnnp_operator)); + } + + convolution_op = + std::unique_ptr( + convolution); + + // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) + convolution->ukernel_type = ukernel_type; + convolution->groups = groups; + convolution->group_input_channels = group_input_channels; + convolution->group_output_channels = group_output_channels; + convolution->kernel_depth = kernel_depth; + convolution->kernel_height = kernel_height; + convolution->kernel_width = kernel_width; + convolution->stride_depth = kSpatialDim == 3 ? stride_[0] : 1; + convolution->stride_height = stride_[kSpatialDim - 2]; + convolution->stride_width = stride_[kSpatialDim - 1]; + convolution->dilation_depth = kSpatialDim == 3 ? dilation_[0] : 1; + convolution->dilation_height = dilation_[kSpatialDim - 2]; + convolution->dilation_width = dilation_[kSpatialDim - 1]; + convolution->input_padding_height = padding_[kSpatialDim - 2]; + convolution->input_padding_width = padding_[kSpatialDim - 1]; + convolution->input_padding_depth = kSpatialDim == 3 ? padding_[0] : 0; + convolution->per_channel = is_per_channel_; + convolution->transpose = transpose_; + + const uint32_t kr = pytorch_qnnp_params.q8conv.kr; + const size_t k_stride = (group_input_channels + (kr - 1)) & -kr; + + size_t zero_size = sizeof(uint8_t) * k_stride; + size_t zero_offset = 0; + + if (transpose_) { + convolution->adjustment_width = output_padding_[1]; + convolution->adjustment_height = output_padding_[0]; + if (group_input_channels < 8) { + zero_size += 8; + zero_offset = 8; + } + } else { + zero_buffer_size = 0; + if (any_padding) { + zero_size = 0; + zero_offset = 0; + if (ukernel_type == pytorch_qnnp_ukernel_type_dwconv) { + const uint32_t cr = pytorch_qnnp_params.q8dw9.cr; + const size_t group_stride = (groups + (cr - 1)) & -cr; + if (groups >= 8) { + zero_size = sizeof(uint8_t) * group_stride; + zero_offset = 0; + } else { + zero_size = sizeof(uint8_t) * group_stride + 8; + zero_offset = sizeof(uint8_t) * 8; + } + } else if ( + ukernel_type == pytorch_qnnp_ukernel_type_conv || + ukernel_type == pytorch_qnnp_ukernel_type_gemm) { + if (group_input_channels >= 8) { + zero_size = sizeof(uint8_t) * k_stride; + zero_offset = 0; + } else { + zero_size = sizeof(uint8_t) * k_stride + 8; + zero_offset = 8; + } + } + } + } + + // NOLINTNEXTLINE(clang-analyzer-optin.portability.UnixAPI) + void* zero_buffer = malloc(zero_size); + if (zero_buffer == nullptr) { + pytorch_qnnp_delete_operator(convolution); + pytorch_qnnp_log_error( + "failed to allocate %zu bytes for zero padding", zero_size); + } + // Need to set to input zero point + // memset(zero_buffer, input_zero_point, zero_size); + zero_buffer_size = zero_size; + convolution->zero_buffer = zero_buffer; + convolution->zero_pointer = (void*)((uintptr_t)zero_buffer + zero_offset); + } + + std::unique_ptr convolution_op; + #ifdef USE_XNNPACK + xnnpack_operator xnnp_convolution_op; + #endif // USE_XNNPACK + std::unique_ptr w; + at::Tensor orig_weight; + at::Tensor bias; + torch::List stride_; + torch::List padding_; + torch::List output_padding_; + torch::List dilation_; + int64_t groups_; + bool transpose_; + bool is_per_channel_; + c10::optional input_scale; + std::vector kernel_; + at::Tensor w_scales; + std::vector w_zero_points; + std::vector requantization_scales; + size_t zero_buffer_size; + + at::Tensor apply( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point) override; + + at::Tensor apply_relu( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point) override; + + at::Tensor apply_dynamic( + const at::Tensor& input, + bool reduce_range=false) override; + + std::tuple> unpack() override; + + static c10::intrusive_ptr> prepack( + at::Tensor weight, + c10::optional bias, + torch::List stride, + torch::List padding, + torch::List output_padding, + torch::List dilation, + int64_t groups, + bool transpose); + + torch::List stride() const override { + return stride_; + } + + torch::List padding() const override { + return padding_; + } + + torch::List output_padding() const override { + return output_padding_; + } + + torch::List dilation() const override { + return dilation_; + } + + int64_t groups() const override { + return groups_; + } + + bool transpose() const override { + return transpose_; + } + + bool per_channel() const { + return is_per_channel_; + } + + private: + std::mutex qnnp_mutex_; + template + at::Tensor apply_impl( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point); + +#ifdef USE_XNNPACK + template + at::Tensor apply_impl_xnnp( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point); +#endif // USE_XNNPACK +}; + +enum class Activation : uint8_t { NONE = 0, RELU = 1 }; + +#if defined(__ANDROID__) && !defined(__NDK_MAJOR__) +template +inline float Round(const float x) { + return ::nearbyintf(x); +} +inline double Round(const double x) { + return ::nearbyint(x); +} +#else +template +inline T Round(const T x) { + return std::nearbyint(x); +} +#endif + +template +inline T QuantizeValue(float scale, int32_t zero_point, float value) { + const int32_t qmin = std::numeric_limits::min(); + const int32_t qmax = std::numeric_limits::max(); + auto r = zero_point + static_cast(Round(value / scale)); + r = std::max(r, qmin); + r = std::min(r, qmax); + return static_cast(r); +} + +template +inline std::pair activationLimits( + float scale, + int32_t zero_point, + Activation Ac) { + switch (Ac) { + case Activation::NONE: + return {std::numeric_limits::min(), + std::numeric_limits::max()}; + case Activation::RELU: + return {QuantizeValue(scale, zero_point, 0.0), + std::numeric_limits::max()}; + default: +#ifdef _MSC_VER + __assume(0); +#else + __builtin_unreachable(); +#endif + } +} + +namespace at { +namespace native { +namespace qnnp_avgpool_helper { +Tensor qnnpack_avg_pool2d( + Tensor input, + IntArrayRef kernel_size, + IntArrayRef stride, + IntArrayRef padding, + bool ceil_mode, + bool count_include_pad, + c10::optional divisor_override); +} // qnnp_avgpool_helper +} // namespace native +} // namespace at + +namespace { +C10_UNUSED std::vector generate_requantization_scales( + const at::Tensor& weight_scales, + const float input_scale, + const float output_scale, + std::vector& requant_scales) { + // Since weight scale is allocated with padding + // weight_scales.numel() gives us padded num elements. + const auto num_output_channels_padded = weight_scales.numel(); + float *const weight_scales_data = weight_scales.data_ptr(); + if (static_cast(requant_scales.size()) < num_output_channels_padded) { + requant_scales.resize(num_output_channels_padded); + } + for (const auto i : c10::irange(num_output_channels_padded)) { + const auto inverse_output_scale = 1.f /output_scale; + requant_scales[i] = (weight_scales_data[i] * input_scale) * inverse_output_scale; + TORCH_CHECK( + (requant_scales[i] > 0.0f && std::isnormal(requant_scales[i])), + "failed to create op with requantization scale: ", + requant_scales[i], + ": requantization scale must be finite and positive"); + } + return requant_scales; +} + +C10_UNUSED std::pair, at::Tensor> make_zero_points_and_scales_tensor( + const at::Tensor& weight_contig, + bool transpose = false, + uint32_t groups = 1 + ) { + const int out_ch_idx = transpose ? 1 : 0; + const auto num_output_channels = weight_contig.size(out_ch_idx) * (transpose ? groups : 1); + // Add 8 to account for bufferring needed by QNNPACK. + const auto num_output_channels_padded = num_output_channels + 8; + const auto qtype = weight_contig.qscheme(); + std::vector weight_zp(num_output_channels_padded, 0); + // Adjust weight zero point, similar to weight data. + if (qtype == at::kPerTensorAffine) { + for (const auto i : c10::irange(num_output_channels)) { + weight_zp[i] = (uint8_t)(weight_contig.q_zero_point() + 128); + } + } else if (qtype == at::kPerChannelAffine) { + TORCH_CHECK( + weight_contig.q_per_channel_zero_points().scalar_type() == at::kLong, + "Per channel zero points dtype must be long int."); + const int64_t* per_channel_zero_points = + weight_contig.q_per_channel_zero_points().data_ptr(); + for (const auto i : c10::irange(num_output_channels)) { + weight_zp[i] = (uint8_t)(per_channel_zero_points[i] + 128); + } + } else { + TORCH_INTERNAL_ASSERT(false, "Unsupported quantization scheme."); + } + at:: Tensor weight_scales = + at::empty( + {num_output_channels_padded}, + at::device(at::kCPU).dtype(at::kFloat)); + float *const weight_scales_data = weight_scales.data_ptr(); + if (qtype == at::kPerTensorAffine) { + for (const auto i : c10::irange(num_output_channels)) { + weight_scales_data[i] = weight_contig.q_scale(); + } + } else if (qtype == at::kPerChannelAffine) { + TORCH_CHECK( + weight_contig.q_per_channel_scales().scalar_type() == at::kDouble, + "Per channel scales dtype must be double."); + const double *const per_channel_scales = + weight_contig.q_per_channel_scales().data_ptr(); + for (const auto i : c10::irange(num_output_channels)) { + weight_scales_data[i] = static_cast(per_channel_scales[i]); + } + } else { + TORCH_INTERNAL_ASSERT(false, "Unsupported quantization scheme."); + } + for (const auto i : c10::irange(num_output_channels, num_output_channels_padded)) { + weight_scales_data[i] = 1.f; + } + return {weight_zp, weight_scales}; +} +} // namespace + +#endif diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/QuantUtils.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/QuantUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..f53efab900be1159bddeabf98114be8280579b96 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/QuantUtils.h @@ -0,0 +1,228 @@ +#pragma once + +#include +#include +#include +#include + +namespace quant_utils { +namespace { + float RawUint16ToFp16(unsigned short value) { + // Convert raw 16 bits half precision floating point number + // to single precision floating point number. + const unsigned short sign_bits = value >> 15; + const unsigned short exponent_bits = value >> 10 & 0x1f; + const unsigned short significand_bits = value & 0x3ff; + + const float sign = sign_bits ? -1 : 1; + const float significand = + 1 + significand_bits * 0.0009765625f; // 0.0009765625f = 0x1p-10 = 2^-10; + const float exponent = exponent_bits - 0xf; + + return sign * std::ldexp(significand, exponent); +} + +template +bool CheckAndSaturate(T max_val, T* element) { + if (*element > max_val) { + *element = max_val; + return true; + } + if (*element < -max_val) { + *element = -max_val; + return true; + } + return false; +} +} +using namespace std; +// A structure to hold quantization parameters 'scale' and 'zero_point'. +// The meaning of these values is as the constants in the quantization equation +// +// real_value = scale * (quantized_value - zero_point) +// +// In other words, 'zero_point' is the quantized value that corresponds +// to the real value 0, and 'scale' is the difference of real values +// corresponding to consecutive quantized values. +struct TensorQuantizationParams { + double scale; + std::int32_t zero_point; + int precision; +}; + +// Use fp16_min as the small scale cutoff because we don't want to use scales in +// fp16 subnormal range. This is to be consistent with Glow and FakeLowP +// implementation for NNPI. +constexpr float SMALL_SCALE_THRESHOLD = 6.1e-5f; + +// Following implementation should be identical to fbgemm::ChooseQuantizationParams +inline TensorQuantizationParams ChooseQuantizationParams( + float min, + float max, + int32_t qmin, + int32_t qmax, + bool preserve_sparsity = false, + bool force_scale_power_of_two = false, + bool reduce_range = false) { + TORCH_CHECK( + min <= max, + "In ChooseQuantizationParams, min should be less than or equal to max"); + + if (reduce_range) { + qmin = qmin/2; + qmax = qmax/2; + } + if (min < 0 && max > 0 && preserve_sparsity) { + int symmetric_qmin = -((qmax - qmin) / 2 + 1); + int symmetric_qmax = (qmax - qmin) / 2; + double max_scale = + std::max(fabs(min / symmetric_qmin), fabs(max / symmetric_qmax)); + min = max_scale * symmetric_qmin; + max = max_scale * symmetric_qmax; + } + + // We extend the [min, max] interval to ensure that it contains 0. + // Otherwise, we would not meet the requirement that 0 be an exactly + // representable value. + min = std::min(min, 0.f); + max = std::max(max, 0.f); + + TORCH_CHECK( + qmin < qmax, + "In ChooseQuantizationParams, qmin should be less than qmax"); + + // Use double precision for intermediate computation but use single precision + // in final number to reflect the actual number used during quantization. + double scale = (static_cast(max) - min) / (qmax - qmin); + // If scale is 0 or too small so its reciprocal is infinity, we arbitrary + // adjust the scale to 0.1 . We want to avoid scale's reciprocal being + // infinity because some of fbgemm code pre-computes scale's reciprocal to do + // multiplication instead of division in the time critical part of code. + if (float(scale) == 0.0f || std::isinf(1.0f / float(scale))) { + scale = 0.1; + } + TORCH_CHECK(scale > 0, "quantization scale should be > 0"); + + if (force_scale_power_of_two) { + if (scale < 1) { + scale = 1.0 / (1 << static_cast(floor(log(1.0 / scale) / log(2)))); + } else { + scale = 1 << static_cast(ceil(log(scale) / log(2))); + } + } + + // Cut off small scale + if (scale < SMALL_SCALE_THRESHOLD) { + float org_scale = scale; + scale = SMALL_SCALE_THRESHOLD; + // Adjust the min and max based on the new scale + if (min == 0.0f) { + max = SMALL_SCALE_THRESHOLD * (qmax - qmin); + } else if (max == 0.0f) { + min = -SMALL_SCALE_THRESHOLD * (qmax - qmin); + } else { + float amplifier = SMALL_SCALE_THRESHOLD / org_scale; + min *= amplifier; + max *= amplifier; + } + } + + // Zero-point computation. + // First the initial floating-point computation. The zero-point can be + // determined from solving an affine equation for any known pair + // (real value, corresponding quantized value). + // We know two such pairs: (rmin, qmin) and (rmax, qmax). + // The arithmetic error on the zero point computed from either pair + // will be roughly machine_epsilon * (sum of absolute values of terms) + // so we want to use the variant that adds the smaller terms. + double zero_point_from_min = qmin - min / static_cast(scale); + double zero_point_from_max = qmax - max / static_cast(scale); + double zero_point_from_min_error = + std::abs(qmin) - std::abs(min / static_cast(scale)); + double zero_point_from_max_error = + std::abs(qmax) - std::abs(max / static_cast(scale)); + double initial_zero_point = + zero_point_from_min_error < zero_point_from_max_error + ? zero_point_from_min + : zero_point_from_max; + + // for symmetric quantization (preserve_sparsity == true), we force zero_point + // to be a middle value between qmin and qmax. + // If either min or max is 0, then we just use 0 as zero_point. + if (min < 0 && max > 0 && preserve_sparsity) { + initial_zero_point = static_cast(qmin + qmax) / 2; + } + + // Now we need to nudge the zero point to be an integer + // (our zero points are integer, and this is motivated by the requirement + // to be able to represent the real value "0" exactly as a quantized value, + // which is required in multiple places, for example in Im2col with zero + // padding). + int32_t nudged_zero_point = 0; + if (initial_zero_point < qmin) { + nudged_zero_point = qmin; + } else if (initial_zero_point > qmax) { + nudged_zero_point = qmax; + } else { + nudged_zero_point = nearbyint(initial_zero_point); + } + + TensorQuantizationParams result; + result.scale = scale; + result.zero_point = nudged_zero_point; + return result; +} + +// This function helps to convert the Conv1D dimensions usable by the Conv2d op. +constexpr int64_t kConv1dSqueezeDim = 0; +static C10_UNUSED torch::List MakeArgForConv1d(const torch::List& arg, + int64_t base_value) { + TORCH_CHECK(arg.size() > 0, "Argument must have elements."); + torch::List result({arg.get(0), base_value}); + if (arg.size() == 1) { + result[1] = arg.get(0); + } else { + result[1] = arg.get(1); + } + result[kConv1dSqueezeDim] = base_value; + return result; +} + +// The range for using FP16 quantization of weights requires that the elements +// should be in the range of [5.96e-8, 65504]. If it is out of range, then the +// number will be saturated to max or min representable values by FP16. +inline void HandleWeightsSaturation(int64_t N, float* weight) { + const float kFp16Max = RawUint16ToFp16(0x7BFF); + bool found_out_of_range = false; + for (const auto i : c10::irange(N)) { + bool saturate = CheckAndSaturate(kFp16Max, weight + i); + if (saturate) { + found_out_of_range = true; + } + } + if (found_out_of_range) { + TORCH_WARN("FOUND weight out of range "); + } +} + +// Util function for quantizing bias. +inline at::Tensor QuantizeBias( + bool is_per_channel, + const at::Tensor& bias, + const at::Tensor& weight_contig, + double input_scale) { + at::Tensor qbias; + if (is_per_channel) { + auto bias_quant_scales = + weight_contig.q_per_channel_scales() * input_scale; + auto bias_zp = at::zeros(bias_quant_scales.sizes(), c10::kInt); + qbias = at::native::quantize_per_channel( + bias, bias_quant_scales, bias_zp, 0, c10::kQInt32); + } else { + qbias = at::native::quantize_per_tensor( + bias, weight_contig.q_scale() * input_scale, 0, c10::kQInt32); + } + return qbias; +} + +} // namespace quant_utils diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/QuantizedOps.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/QuantizedOps.h new file mode 100644 index 0000000000000000000000000000000000000000..94023b2f8e9730a37fdbe88f395547ba1d5dd9f7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/QuantizedOps.h @@ -0,0 +1,232 @@ +#include +#include +#include +#include + +namespace at { +namespace native { + +using qrelu_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/); +using qrelu_leaky_fn = void (*)(Tensor& /*out*/, const Tensor& /*qx*/, + const Scalar& /*negval_*/); +using qgelu_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/, GeluType /* approximate */); +using qsigmoid_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/, double output_scale, int64_t output_zero_point); +using qhardsigmoid_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/); +using qclamp_fn = void (*)( + const at::Tensor& /*qx*/, + const Scalar& min, + const Scalar& max, + at::Tensor& /*qy*/); +using qclamp_minmax_fn = void (*)( + const at::Tensor& /*qx*/, + const Scalar& /*min or max*/, + at::Tensor& /*qy*/); +using qthreshold_fn = void (*)( + const at::Tensor& /*qx*/, + const Scalar& threshold, + const Scalar& value, + at::Tensor& /*qy*/); +using qtanh_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/); +using qelu_fn = void(*)( + const at::Tensor& /*qx*/, + const Scalar& /*alpha*/, + const Scalar& /*scale*/, + const Scalar& /*input_scale*/, + at::Tensor& /*qy*/); +using qbinary_fn = + void (*)(Tensor& /*out*/, const Tensor& /*self*/, const Tensor& /*other*/); +using qadd_scalar_fn = + void (*)(Tensor& /*out*/, const Tensor& /*self*/, const Scalar& other /*other*/); +using qhardswish_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/); +using qdropout_fn = void(*)( + const at::Tensor& /*qx*/, + const Scalar& /*p*/, + bool training /*training*/, + at::Tensor& /*qy*/); +using qmaxpool_2d_fn = void (*)( + const Tensor& qx, + int64_t iC, // input/output channels + int64_t iH, + int64_t iW, // input sizes + int64_t oH, + int64_t oW, // output sizes + int64_t kH, + int64_t kW, // kernel size + int64_t sH, + int64_t sW, // strides + int64_t pH, + int64_t pW, // padding + int64_t dH, + int64_t dW, // dilation + Tensor& qy); +using qadaptive_avg_pool2d_fn = void (*)( + const Tensor& qx, + Tensor& qy, + int64_t sizeB, + int64_t sizeC, + int64_t isizeH, + int64_t isizeW, + int64_t osizeH, + int64_t osizeW, + int64_t istrideB, + int64_t istrideC, + int64_t istrideH, + int64_t istrideW); +using qadaptive_avg_pool3d_fn = void (*)( + const Tensor& qx, + Tensor& qy, + int64_t sizeB, + int64_t sizeC, + int64_t isizeD, + int64_t isizeH, + int64_t isizeW, + int64_t osizeD, + int64_t osizeH, + int64_t osizeW, + int64_t istrideB, + int64_t istrideC, + int64_t istrideD, + int64_t istrideH, + int64_t istrideW); +using qavg_pool2d_fn = void (*)( + const Tensor& qx, + Tensor& qy, + int64_t nBatch, + int64_t nInputPlane, + int64_t inputWidth, + int64_t inputHeight, + int64_t outputWidth, + int64_t outputHeight, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + bool count_include_pad, + c10::optional divisor_override); + +using qavg_pool3d_fn = void (*)( + const Tensor& qx, + Tensor& qy, + int64_t nBatch, + int64_t nInputPlane, + int64_t inputWidth, + int64_t inputHeight, + int64_t inputDepth, + int64_t outputWidth, + int64_t outputHeight, + int64_t outputDepth, + int kW, + int kH, + int kD, + int dW, + int dH, + int dD, + int padW, + int padH, + int padD, + bool count_include_pad, + c10::optional divisor_override); + +using qupsample_bilinear2d_fn = void (*)( + Tensor& output, + const Tensor& input, + int64_t input_height, + int64_t input_width, + int64_t output_height, + int64_t output_width, + int64_t nbatch, + int64_t channels, + bool align_corners, + c10::optional scales_h, + c10::optional scales_w); + +using qcat_nhwc_fn = Tensor (*)( + const MaterializedITensorListRef& qxs, + int64_t dim, + double scale, + int64_t zero_point); +using qtopk_fn = void(*)(Tensor&, Tensor&, const Tensor&, int64_t, int64_t, bool, bool); + +using qbatch_norm_fn = void(*)(int64_t, int64_t, int64_t, int64_t, int64_t, const Tensor&, const Tensor&, const Tensor&, Tensor&); + +using qnormalize_fn = void (*)( + const Tensor& /* X */, + const Tensor& /* gamma */, + const Tensor& /* beta */, + bool /* affine_per_channel */, + int /* num_channels */, + int /* num_groups */, + int64_t /* M */, + int64_t /* N */, + double /* eps */, + Tensor* /* Y */); + +using qmean_inner_dim_fn = void (*)( + const Tensor& /* X */, + OptionalIntArrayRef /* opt_dim */, + bool /* keepdim */, + c10::optional /* opt_dtype */, + Tensor& /* Y */); + +using qstd_inner_dim_fn = void (*)( + const Tensor& /* X */, + OptionalIntArrayRef /* dim */, + optional /* unbiased */, + bool /* keepdim */, + Tensor& /* Y */); + +using qnormalize_nhwc_fn = void (*)( + const Tensor& /* X */, + const Tensor& /* gamma */, + const Tensor& /* beta */, + bool /* affine_per_channel */, + int /* num_channels */, + int /* num_groups */, + int64_t /* M */, + int64_t /* N */, + double /* eps */, + Tensor* /* Y */); + +using qprelu_fn = void (*)(Tensor& /*out*/, const Tensor& /*qx*/, + const Tensor& /*qw*/); + +DECLARE_DISPATCH(qadaptive_avg_pool2d_fn, qadaptive_avg_pool2d_nhwc_stub); +DECLARE_DISPATCH(qadaptive_avg_pool3d_fn, qadaptive_avg_pool3d_ndhwc_stub); +DECLARE_DISPATCH(qadd_scalar_fn, qadd_scalar_relu_stub); +DECLARE_DISPATCH(qadd_scalar_fn, qadd_scalar_stub); +DECLARE_DISPATCH(qavg_pool2d_fn, qavg_pool2d_nhwc_stub); +DECLARE_DISPATCH(qavg_pool3d_fn, qavg_pool3d_nhwc_stub); +DECLARE_DISPATCH(qbatch_norm_fn, qbatch_norm_relu_stub); +DECLARE_DISPATCH(qbatch_norm_fn, qbatch_norm_stub); +DECLARE_DISPATCH(qbinary_fn, qadd_relu_stub); +DECLARE_DISPATCH(qbinary_fn, qadd_stub); +DECLARE_DISPATCH(qbinary_fn, qmul_relu_stub); +DECLARE_DISPATCH(qbinary_fn, qmul_stub); +DECLARE_DISPATCH(qcat_nhwc_fn, qcat_nhwc_stub); +DECLARE_DISPATCH(qcat_nhwc_fn, qcat_relu_nhwc_stub); +DECLARE_DISPATCH(qclamp_fn, qclamp_stub); +DECLARE_DISPATCH(qclamp_minmax_fn, qclamp_min_stub); +DECLARE_DISPATCH(qclamp_minmax_fn, qclamp_max_stub); +DECLARE_DISPATCH(qelu_fn, qelu_stub); +DECLARE_DISPATCH(qhardsigmoid_fn, qhardsigmoid_stub); +DECLARE_DISPATCH(qhardswish_fn, qhardswish_stub); +DECLARE_DISPATCH(qdropout_fn, qdropout_stub); +DECLARE_DISPATCH(qmaxpool_2d_fn, qmaxpool_2d_nhwc_stub); +DECLARE_DISPATCH(qnormalize_fn, quantized_normalize_stub); +DECLARE_DISPATCH(qnormalize_nhwc_fn, quantized_groupnorm_nhwc_stub); +DECLARE_DISPATCH(qrelu_fn, qrelu_stub); +DECLARE_DISPATCH(qrelu_leaky_fn, qrelu_leaky_stub); +DECLARE_DISPATCH(qgelu_fn, qgelu_stub); +DECLARE_DISPATCH(qsigmoid_fn, qsigmoid_stub); +DECLARE_DISPATCH(qtanh_fn, qtanh_stub); +DECLARE_DISPATCH(qthreshold_fn, qthreshold_stub); +DECLARE_DISPATCH(qtopk_fn, qtopk_stub); +DECLARE_DISPATCH(qupsample_bilinear2d_fn, qupsample_bilinear2d_nhwc_stub); +DECLARE_DISPATCH(qmean_inner_dim_fn, qmean_inner_dim_stub); +DECLARE_DISPATCH(qstd_inner_dim_fn, qstd_inner_dim_stub); +DECLARE_DISPATCH(qprelu_fn, qprelu_stub); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/RuyUtils.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/RuyUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..aeb332af4ecae37dde625267b466658f0d250560 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/RuyUtils.h @@ -0,0 +1,21 @@ +#pragma once + +#ifdef USE_RUY_QMATMUL + +#include + +namespace at { +namespace native { +namespace ruy_utils { + +ruy::Context* get_ruy_context(); + +void quantize_multiplier(double scale, + int* multiplier_fixedpoint, + int* multiplier_exponent); + +} // namespace ruy_utils +} // namespace native +} // namesplace + +#endif // USE_RUY_QMATMUL diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/XnnpackUtils.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/XnnpackUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..78f325263f4fc054e8d47bdbf67cad599dc3f3f3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/XnnpackUtils.h @@ -0,0 +1,279 @@ +#pragma once + +#ifdef USE_XNNPACK +#include + +#include +#include + +using xnnpack_operator = at::native::xnnpack::Operator; + +namespace at { +namespace native { +namespace xnnp_utils { + +/* + * Return shape in the same order as the memory format + * e.g. channels_last will return NHWC instead of NCHW + */ +std::vector get_mem_format_aware_shape(const at::Tensor& in); + +/* + * Input is always int8_t, output can be [int8_t, uint8_t]. + * input + offset = output + * int8_t + 128 = uint8_t + * int8_t + 0 = int8_t + */ +template +void q8_copy_int8_weight_and_add_offset(const at::Tensor& in, at::Tensor& out); + +template +Tensor convert_conv_weights_to_channel_last_tensor( + const at::Tensor& src, + int groups, + bool transpose); + +/* + * Series of create wrapper functions to call xnn_create_[de]conv* functions. + */ +C10_ALWAYS_INLINE +enum xnn_status xnnp_create_convolution2d_nhwc( + uint32_t pad_top, + uint32_t pad_right, + uint32_t pad_bottom, + uint32_t pad_left, + uint32_t kernel_h, + uint32_t kernel_w, + uint32_t stride_h, + uint32_t stride_w, + uint32_t dilation_h, + uint32_t dilation_w, + uint32_t groups, + size_t group_input_channels, + size_t group_output_channels, + size_t ip_chan_stride, + size_t op_chan_stride, + int8_t izp, + float ip_scale, + int8_t kzp, + const float* k_scales, + const int8_t* kernel, + const int32_t* bias, + int8_t ozp, + float op_scale, + int8_t op_min, + int8_t op_max, + uint32_t flags, + xnn_operator_t* op, + bool per_channel, + bool transpose) { + /* Symmetric quantization forces kzp = 0 */ + TORCH_CHECK(!kzp, "XNNPACK Q[SC]8 conv kernels expects kernel zero point to be zero." + "But got: ", kzp); + + if (transpose) { + TORCH_CHECK(!per_channel, "XNNPACK Q[SC]8 does not have a per channel deconvolution!"); + return xnn_create_deconvolution2d_nhwc_qs8( + pad_top, /* uint32_t output_padding_top */ + pad_right, /* uint32_t output_padding_right */ + pad_bottom, /* uint32_t output_padding_bottom */ + pad_left, /* uint32_t output_padding_left */ + kernel_h, /* uint32_t kernel_height */ + kernel_w, /* uint32_t kernel_width */ + stride_h, /* uint32_t stride_height */ + stride_w, /* uint32_t stride_width */ + dilation_h, /* uint32_t dilation_height */ + dilation_w, /* uint32_t dilation_width */ + groups, /* uint32_t groups */ + group_input_channels, /* size_t group_input_channels */ + group_output_channels, /* size_t group_output_channels */ + ip_chan_stride, /* size_t input_pixel_stride */ + op_chan_stride, /* size_t output_pixel_stride */ + izp, /* int8_t input_zero_point */ + ip_scale, /* float input_scale */ + k_scales[0], /* float kernel_scale */ + kernel, /* const int8_t* kernel */ + bias, /* const int32_t* bias */ + ozp, /* int8_t output_zero_point */ + op_scale, /* float output_scale */ + op_min, /* int8_t output_min */ + op_max, /* int8_t output_max */ + flags, /* uint32_t flags */ + op); /* xnn_operator_t* deconvolution_op_out */ + + } + + if (!per_channel) { + return xnn_create_convolution2d_nhwc_qs8( + pad_top, /* uint32_t input_padding_top */ + pad_right, /* uint32_t input_padding_right */ + pad_bottom, /* uint32_t input_padding_bottom */ + pad_left, /* uint32_t input_padding_left */ + kernel_h, /* uint32_t kernel_height */ + kernel_w, /* uint32_t kernel_width */ + stride_h, /* uint32_t subsampling_height */ + stride_w, /* uint32_t subsampling_width */ + dilation_h, /* uint32_t dilation_height */ + dilation_w, /* uint32_t dilation_width */ + groups, /* uint32_t groups */ + group_input_channels, /* size_t group_input_channels */ + group_output_channels, /* size_t group_output_channels*/ + ip_chan_stride, /* size_t input_channel_stride */ + op_chan_stride, /* size_t output_channel_stride */ + izp, /* int8_t input_zero_point */ + ip_scale, /* float input_scale */ + k_scales[0], /* float kernel_scale */ + kernel, /* const int8_t* kernel */ + bias, /* const int32_t* bias */ + ozp, /* int8_t output_zero_point */ + op_scale, /* float output_scale */ + op_min, /* int8_t output_min */ + op_max, /* int8_t output_max */ + flags, /* uint32_t flags */ + op); /* xnn_operator_t* convolution_op_out */ + } else { /* per_channel */ + return xnn_create_convolution2d_nhwc_qc8( + pad_top, /* uint32_t input_padding_top */ + pad_right, /* uint32_t input_padding_right */ + pad_bottom, /* uint32_t input_padding_bottom */ + pad_left, /* uint32_t input_padding_left */ + kernel_h, /* uint32_t kernel_height */ + kernel_w, /* uint32_t kernel_width */ + stride_h, /* uint32_t subsampling_height */ + stride_w, /* uint32_t subsampling_width */ + dilation_h, /* uint32_t dilation_height */ + dilation_w, /* uint32_t dilation_width */ + groups, /* uint32_t groups */ + group_input_channels, /* size_t group_input_channels */ + group_output_channels, /* size_t group_output_channels*/ + ip_chan_stride, /* size_t input_channel_stride */ + op_chan_stride, /* size_t output_channel_stride */ + izp, /* int8_t input_zero_point */ + ip_scale, /* float input_scale */ + k_scales, /* const float* kernel_scale */ + kernel, /* const int8_t* kernel */ + bias, /* const int32_t* bias */ + ozp, /* int8_t output_zero_point */ + op_scale, /* float output_scale */ + op_min, /* int8_t output_min */ + op_max, /* int8_t output_max */ + flags, /* uint32_t flags */ + op); /* xnn_operator_t* convolution_op_out */ + } +} + +/* + * Series of setup wrapper functions to call xnn_setup_[de]conv* functions. + */ +C10_ALWAYS_INLINE +enum xnn_status xnnp_setup_convolution2d_nhwc( + xnn_operator_t op, + size_t batch, + size_t in_h, + size_t in_w, + const int8_t* inp, + int8_t* outp, + pthreadpool_t pt_pool, + bool per_channel = false, + bool transpose = false, + uint32_t adj_h = 0, + uint32_t adj_w = 0) { + if(transpose) { + TORCH_CHECK(!per_channel, "XNNPACK Q[SC]8 does not have a per channel deconvolution!"); + return xnn_setup_deconvolution2d_nhwc_qs8( + op, /* xnn_operator_t deconvolution_op */ + batch, /* size_t batch_size */ + in_h, /* size_t input_height */ + in_w, /* size_t input_width */ + adj_h, /* uint32_t adjustment_height */ + adj_w, /* uint32_t adjustment_width */ + inp, /* const int8_t* input */ + outp, /* int8_t* output */ + pt_pool); /* pthreadpool_t threadpool */ + } + + if (!per_channel) { + return xnn_setup_convolution2d_nhwc_qs8( + op, /* xnn_operator_t convolution_op */ + batch, /* size_t batch_size */ + in_h, /* size_t input_height */ + in_w, /* size_t input_width */ + inp, /* const int8_t* input */ + outp, /* int8_t* output */ + pt_pool); /* pthreadpool_t threadpool */ + } else { /* per_channel */ + return xnn_setup_convolution2d_nhwc_qc8( + op, /* xnn_operator_t convolution_op */ + batch, /* size_t batch_size */ + in_h, /* size_t input_height */ + in_w, /* size_t input_width */ + inp, /* const int8_t* input */ + outp, /* int8_t* output */ + pt_pool); /* pthreadpool_t threadpool */ + } +} + + +/* + * Series of wrapper functions to call xnn_create* and xnn_setup* + * functions for linear + */ +C10_ALWAYS_INLINE +enum xnn_status xnnp_create_fully_connected_nc( + size_t input_channels, + size_t output_channels, + size_t input_stride, + size_t output_stride, + int8_t input_zero_point, + float input_scale, + int8_t kernel_zero_point, + float kernel_scale, + const int8_t* kernel, + const int32_t* bias, + int8_t output_zero_point, + float output_scale, + int8_t output_min, + int8_t output_max, + uint32_t flags, + xnn_operator_t* fully_connected_op_out) { + /* Symmetric quantization forces kzp = 0 */ + TORCH_CHECK(!kernel_zero_point, "XNNPACK QS8 linear kernel expects kernel zero point to be zero." + "But got: ", kernel_zero_point); + return xnn_create_fully_connected_nc_qs8( + input_channels, /* size_t input_channels */ + output_channels, /* size_t output_channels */ + input_stride, /* size_t input_stride */ + output_stride, /* size_t output_stride */ + input_zero_point, /* int8_t input_zero_point */ + input_scale, /* float input_scale */ + kernel_scale, /* float kernel_scale */ + kernel, /* const int8_t* kernel */ + bias, /* const int32_t* bias */ + output_zero_point, /* int8_t output_zero_point */ + output_scale, /* float output_scale */ + output_min, /* int8_t output_min */ + output_max, /* int8_t output_max */ + flags, /* uint32_t flags */ + fully_connected_op_out); /* xnn_operator_t* fully_connected_op_out */ +} + +C10_ALWAYS_INLINE +enum xnn_status xnnp_setup_fully_connected_nc( + xnn_operator_t fully_connected_op, + size_t batch_size, + const int8_t* input, + int8_t* output, + pthreadpool_t threadpool) { + return xnn_setup_fully_connected_nc_qs8( + fully_connected_op, /* xnn_operator_t fully_connected_op */ + batch_size, /* size_t batch_size */ + input, /* const int8_t* input */ + output, /* int8_t* output */ + threadpool); /* pthreadpool_t threadpool */ +} + +} // namespace xnnp_utils +} // namespace native +} // namespace at + +#endif // USE_XNNPACK diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/conv_serialization.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/conv_serialization.h new file mode 100644 index 0000000000000000000000000000000000000000..293aa50856caa12c0ec0e009035846c1c3e55c99 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/conv_serialization.h @@ -0,0 +1,415 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* Convolution prepacked parameters serialization. + * + * Version 1 + * + * - Fields: + * 1. weight + * 2. bias + * 3. stride x kSpatialDim + * 4. padding x kSpatialDim + * 5. dilation x kSpatialDim + * 6. groups + * + * Version 2 + * + * - Fields: + * 0. version (string) + * 1. list of non-optional tensors + * 0: packed parameters (int16_t) + * - kSpatialDim + * - stride x kSpatialDim + * - padding x kSpatialDim + * - dilation x kSpatialDim + * - output_padding x kSpatialDim + * - groups + * - transpose (0 or 1) + * 1: weight + * 2. list of optional tensors + * 0: bias + * + * Version 3 + * + * - Fields: + * 0. version (int64_t) + * 1. list of int64_t configuration values + * - kSpatialDim + * - stride x kSpatialDim + * - padding x kSpatialDim + * - dilation x kSpatialDim + * - output_padding x kSpatialDim + * - groups + * - flags (bitmask) + * - (1 << 0) transpose (1 = yes) + * 2. list of optional tensors + * 0: None (helps with type inference) + * 1: weight (this must be present) + * 2: bias + */ + +using ConvParamsSerializationTypeV2 = std::tuple< + // version, for versions 2 and up + std::string, + // non-optional tensors + std::vector, + // optional tensors + std::vector>>; + +using ConvParamsSerializationTypeV3 = std::tuple< + // version, int for versions 3 and up + int64_t, + // configuration values + std::vector, + // optional tensors + std::vector>>; + +// Parses any historical conv packed params format into +// the current format. +template +ConvParamsSerializationTypeV3 parse_conv_serialized_state(c10::IValue v) { + + // determine the version based on IValue contents + int version = -1; + if (v.isTuple()) { + const auto& elements = v.toTupleRef().elements(); + if (elements.size() > 0) { + auto firstElement = elements[0]; + if (firstElement.isTensor()) { + version = 1; + } else if (firstElement.isString()) { + std::string version_str = firstElement.toStringRef(); + // note: not parsing the string to automatically handle bad + // inputs + if (version_str == "2") { + version = 2; + } + } else if (firstElement.isInt()) { + auto raw_version = firstElement.toInt(); + if (raw_version == 3) { + version = 3; + } + } + } + } + TORCH_INTERNAL_ASSERT(version != -1, "Unable to parse serialization version"); + + if (version == 1) { + // version 1 - convert to version 3 manually + + const auto& elements = v.toTupleRef().elements(); + + at::Tensor weight = elements[0].toTensor(); + c10::optional bias = elements[1].toOptional(); + torch::List stride_x_kSpatialDim = elements[2].toTensorList(); + torch::List padding_x_kSpatialDim = elements[3].toTensorList(); + torch::List dilation_x_kSpatialDim = elements[4].toTensorList(); + at::Tensor groups = elements[5].toTensor(); + + std::vector non_optional; + std::vector> optional; + + std::vector config_vals; + config_vals.push_back(kSpatialDim); + for (const auto i : c10::irange(stride_x_kSpatialDim.size())) { + auto stride = stride_x_kSpatialDim.get(i); + config_vals.push_back(stride[0].item()); + } + for (const auto i : c10::irange(padding_x_kSpatialDim.size())) { + auto padding = padding_x_kSpatialDim.get(i); + config_vals.push_back(padding[0].item()); + } + for (const auto i : c10::irange(dilation_x_kSpatialDim.size())) { + auto dilation = dilation_x_kSpatialDim.get(i); + config_vals.push_back(dilation[0].item()); + } + // output_padding does not exist in v1, so we fill in a default value + for (const auto i : c10::irange(kSpatialDim)) { + (void)i; // Suppress unused variable + config_vals.push_back(0); + } + config_vals.push_back(groups[0].item()); + // transpose does not exist in v1, so we fill in a default value + config_vals.push_back(0); + + std::vector> tensors; + tensors.emplace_back(); + tensors.emplace_back(weight); + tensors.emplace_back(bias); + + int64_t version = 3; + return std::tie(version, config_vals, tensors); + } else if (version == 2) { + // version 2 + const auto& elements = v.toTupleRef().elements(); + std::vector non_optional = elements[1].toTensorList().vec(); + std::vector> optional; + + if (elements[2].isTensorList()) { + for (const auto& elem : elements[2].toTensorList()) { + optional.emplace_back(static_cast(elem)); + } + } else { + for (const auto& elem : elements[2].toList()) { + optional.emplace_back(static_cast(elem).toOptional()); + } + } + + auto config_a = non_optional[0].accessor(); + std::vector config_vals; + config_vals.reserve(config_a.size(0)); + for (const auto i : c10::irange(config_a.size(0))) { + config_vals.emplace_back(config_a[i]); + } + + auto weight = non_optional[1]; + auto bias = optional[0]; + + std::vector> tensors; + tensors.emplace_back(); + tensors.emplace_back(weight); + tensors.emplace_back(bias); + + int64_t version = 3; + return std::tie(version, config_vals, tensors); + } else if (version == 3) { + return v.to(); + } else { + TORCH_INTERNAL_ASSERT(false, "Unexpected serialized qconv version: ", + version); + } +} + +#define QCONV_SERIALIZATION_VERSION 2 + +#if QCONV_SERIALIZATION_VERSION == 2 +using ConvParamsSerializationType = ConvParamsSerializationTypeV2; + +template +ConvParamsSerializationTypeV2 serialize_conv( + const c10::intrusive_ptr>& params) { + + std::string version = "2"; + std::vector non_optional; + std::vector> optional; + + // create a packed int8_t tensor for conv params + std::vector params_vec; + params_vec.push_back(kSpatialDim); + auto stride = params->stride().vec(); + params_vec.insert(params_vec.end(), stride.begin(), stride.end()); + auto padding = params->padding().vec(); + params_vec.insert(params_vec.end(), padding.begin(), padding.end()); + auto dilation = params->dilation().vec(); + params_vec.insert(params_vec.end(), dilation.begin(), dilation.end()); + auto output_padding = params->output_padding().vec(); + params_vec.insert(params_vec.end(), output_padding.begin(), + output_padding.end()); + params_vec.push_back(params->groups()); + params_vec.push_back(params->transpose()); + int64_t vec_size = params_vec.size(); + at::Tensor params_tensor = at::from_blob( + params_vec.data(), {vec_size}, + at::TensorOptions().dtype(at::kShort)) + // clone to retain ownership of the data + .clone(); + + at::Tensor weight; + c10::optional bias; + std::tie(weight, bias) = params->unpack(); + + non_optional.emplace_back(std::move(params_tensor)); + non_optional.emplace_back(std::move(weight)); + optional.emplace_back(std::move(bias)); + + return std::tie(version, non_optional, optional); +} + +#elif QCONV_SERIALIZATION_VERSION == 3 +using ConvParamsSerializationType = ConvParamsSerializationTypeV3; + +template +ConvParamsSerializationTypeV3 serialize_conv( + const c10::intrusive_ptr>& params) { + std::vector config_vals; + config_vals.push_back(kSpatialDim); + auto stride = params->stride().vec(); + config_vals.insert(config_vals.end(), stride.begin(), stride.end()); + auto padding = params->padding().vec(); + config_vals.insert(config_vals.end(), padding.begin(), padding.end()); + auto dilation = params->dilation().vec(); + config_vals.insert(config_vals.end(), dilation.begin(), dilation.end()); + auto output_padding = params->output_padding().vec(); + config_vals.insert(config_vals.end(), output_padding.begin(), + output_padding.end()); + config_vals.push_back(params->groups()); + config_vals.push_back(params->transpose()); + + at::Tensor weight; + c10::optional bias; + std::tie(weight, bias) = params->unpack(); + + std::vector> tensors; + tensors.emplace_back(); + tensors.emplace_back(weight); + tensors.emplace_back(bias); + + int64_t version = 3; + return std::tie(version, config_vals, tensors); +} + +#else +#error "Invalid qconv serialization version." +#endif + +template +c10::intrusive_ptr> deserialize_conv( + ConvParamsSerializationTypeV3 state) { + + int64_t version; + std::vector config_vals; + std::vector> tensors; + + std::tie(version, config_vals, tensors) = state; + TORCH_INTERNAL_ASSERT(version == 3, "Unexpected serialized qconv version: ", version); + + TORCH_CHECK(tensors.size() == 3, "Wrong number of tensors", tensors.size()); + c10::optional weight = tensors[1]; + c10::optional bias = tensors[2]; + TORCH_INTERNAL_ASSERT(weight, "Weight should always be present in serialized qconv."); + + torch::List stride, padding, output_padding, dilation; + // skip kSpatialDim + int idx = 1; + for (const auto i : c10::irange(kSpatialDim)) { + (void)i; // Suppress unused variable + stride.emplace_back(config_vals.at(idx)); + idx++; + } + for (const auto i : c10::irange(kSpatialDim)) { + (void)i; // Suppress unused variable + padding.emplace_back(config_vals.at(idx)); + idx++; + } + for (const auto i : c10::irange(kSpatialDim)) { + (void)i; // Suppress unused variable + dilation.emplace_back(config_vals.at(idx)); + idx++; + } + for (const auto i : c10::irange(kSpatialDim)) { + (void)i; // Suppress unused variable + TORCH_INTERNAL_ASSERT(idx < static_cast(config_vals.size()), + "Unexpected index = ", idx, " for config_vals of size ", + config_vals.size()); + output_padding.emplace_back(config_vals.at(idx)); + idx++; + } + int64_t groups = config_vals.at(idx); + idx++; + int64_t flags = config_vals.at(idx); + idx++; + TORCH_INTERNAL_ASSERT(idx == static_cast(config_vals.size()), + "Unexpected length of config_vals, expected ", + idx, + " got ", + config_vals.size()); + + bool transpose = flags & (1 << 0); + + int64_t other_flags = flags & ~(1 << 0); + TORCH_INTERNAL_ASSERT(other_flags == 0, "Unexpected flags set in ", flags, "."); + + auto& ctx = at::globalContext(); + +#ifdef USE_FBGEMM + if (ctx.qEngine() == at::QEngine::X86) { +#if AT_MKLDNN_ENABLED() + bool use_onednn = onednn_utils::should_use_onednn_quant( + weight.value(), transpose, groups, output_padding); + if (use_onednn) { + return PackedConvWeightsOnednn::prepack( + weight.value(), + bias, + stride, + padding, + output_padding, + dilation, + groups, + transpose + ); + } +#endif + return PackedConvWeight::prepack( + weight.value(), + bias, + stride, + padding, + output_padding, + dilation, + groups, + transpose + ); + } // x86 +#endif + +#ifdef USE_FBGEMM + if (ctx.qEngine() == at::QEngine::FBGEMM) { + return PackedConvWeight::prepack( + weight.value(), + bias, + stride, + padding, + output_padding, + dilation, + groups, + transpose + ); + } +#endif // USE_FBGEMM +#ifdef USE_PYTORCH_QNNPACK + if (ctx.qEngine() == at::QEngine::QNNPACK) { + TORCH_CHECK( + kSpatialDim == 2, + "prepack/__setstate__: QNNPACK only supports Conv2d " + "now."); + return PackedConvWeightsQnnp::prepack( + weight.value(), + bias, + stride, + padding, + output_padding, + dilation, + groups, + transpose + ); + } +#endif // USE_PYTORCH_QNNPACK +#if AT_MKLDNN_ENABLED() + if (ctx.qEngine() == at::QEngine::ONEDNN) { + return PackedConvWeightsOnednn::prepack( + weight.value(), + bias, + stride, + padding, + output_padding, + dilation, + groups, + transpose + ); + } +#endif // AT_MKLDNN_ENABLED() +TORCH_CHECK( + false, + "Didn't find engine for when deserializing ConvPackedParams: ", + toString(ctx.qEngine())); +} diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/fbgemm_utils.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/fbgemm_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..d43409231ab69ee53743f79bbc4d52c8b3610414 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/fbgemm_utils.h @@ -0,0 +1,393 @@ +#pragma once + +#include +#include +#include +#include +#include + +#ifdef USE_FBGEMM +#include +#include +#include + +// The struct for the packed weight matrix (PackBMatrix) and the corresponding +// column offsets used for the fully connect layer, which are both prepared in +// the prepacking step to save the computations in the inference. Note the +// column offsets include the sum of the B columns as well as the scalar term +// B_zero_point * K, whereas the row offsets created by +// PackAWithQuantRowOffset/PackAWithIm2Col/PackAWithRowOffset are only the sum +// of the A rows. The column offsets are needed for the asymmetric quantization +// (affine quantization) of input matrix. +// Note that in JIT mode we can think of a way to fuse col_offsets with bias. +struct TORCH_API PackedLinearWeight : public LinearPackedParamsBase { + PackedLinearWeight( + std::unique_ptr> w, + c10::optional bias, + std::vector col_offsets, + std::vector w_scale, + std::vector w_zp, + c10::QScheme q_scheme) + : w(std::move(w)), + bias_(std::move(bias)), + col_offsets(std::move(col_offsets)), + w_scale(std::move(w_scale)), + w_zp(std::move(w_zp)), + q_scheme(std::move(q_scheme)) {} + std::unique_ptr> w; + c10::optional bias_; + std::vector col_offsets; + std::vector w_scale; + std::vector w_zp; + c10::QScheme q_scheme; + + at::Tensor apply( + at::Tensor input, + double output_scale, + int64_t output_zero_point) override; + at::Tensor apply_relu( + at::Tensor input, + double output_scale, + int64_t output_zero_point) override; + + at::Tensor& apply_out( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point, + at::Tensor& output) override; + + at::Tensor& apply_relu_out( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point, + at::Tensor& output) override; + + at::Tensor apply_dynamic(at::Tensor input, bool reduce_range = false) + override; + at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range = false) + override; + + std::tuple> unpack() override; + + c10::optional bias() override { + return bias_; + } + + static c10::intrusive_ptr prepack( + at::Tensor weight, + c10::optional bias); + + private: + template + at::Tensor& apply_impl( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point, + at::Tensor& output); + + template + at::Tensor apply_dynamic_impl(at::Tensor input, bool reduce_range = false); +}; + +struct TORCH_API PackedLinearWeightFp16 : public LinearPackedParamsBase { + PackedLinearWeightFp16( + std::unique_ptr w, + c10::optional bias) + : w(std::move(w)), bias_(std::move(bias)) {} + + std::unique_ptr w; + c10::optional bias_; + + at::Tensor apply( + at::Tensor /*input*/, + double /*output_scale*/, + int64_t /*output_zero_point*/) override { + TORCH_INTERNAL_ASSERT(false); + } + at::Tensor apply_relu( + at::Tensor /*input*/, + double /*output_scale*/, + int64_t /*output_zero_point*/) override { + TORCH_INTERNAL_ASSERT(false); + } + + at::Tensor apply_dynamic(at::Tensor input, bool reduce_range = false) + override; + at::Tensor apply_dynamic_relu(at::Tensor input, bool reduce_range = false) + override; + + at::Tensor& apply_dynamic_out( + const at::Tensor& input, + at::Tensor& output, + bool reduce_range = false) override; + at::Tensor& apply_dynamic_relu_out( + const at::Tensor& input, + at::Tensor& output, + bool reduce_range = false) override; + + std::tuple> unpack() override; + + c10::optional bias() override { + return bias_; + } + + static c10::intrusive_ptr prepack( + at::Tensor weight, + c10::optional bias); + + void set_bias(c10::optional bias) override; + + private: + template + at::Tensor& apply_dynamic_impl(const at::Tensor& input, at::Tensor& output); +}; + +template +struct TORCH_API PackedConvWeight : public ConvPackedParamsBase { + PackedConvWeight( + std::unique_ptr> w, + c10::optional bias, + torch::List stride, + torch::List padding, + torch::List output_padding, + torch::List dilation, + int64_t groups, + uint8_t transpose, + std::vector col_offsets, + std::vector kernel, + std::vector w_scale, + std::vector w_zp, + c10::QScheme q_scheme) + : w(std::move(w)), + bias(std::move(bias)), + stride_(std::move(stride)), + padding_(std::move(padding)), + output_padding_(std::move(output_padding)), + dilation_(std::move(dilation)), + groups_(groups), + transpose_(transpose), + col_offsets(std::move(col_offsets)), + kernel(std::move(kernel)), + w_scale(std::move(w_scale)), + w_zp(std::move(w_zp)), + q_scheme(q_scheme) {} + + std::unique_ptr> w; + c10::optional bias; + torch::List stride_; + torch::List padding_; + torch::List output_padding_; + torch::List dilation_; + int64_t groups_; + uint8_t transpose_; + std::vector col_offsets; + std::vector kernel; + std::vector w_scale; + std::vector w_zp; + c10::QScheme q_scheme; + + at::Tensor apply( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point) override; + + at::Tensor apply_relu( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point) override; + + at::Tensor apply_dynamic( + const at::Tensor& input, + bool reduce_range) override; + + std::tuple> unpack() override; + + static c10::intrusive_ptr> prepack( + at::Tensor weight, + c10::optional bias, + torch::List stride, + torch::List padding, + torch::List output_padding, + torch::List dilation, + int64_t groups, + bool transpose); + + const float* GetBiasData(at::Tensor* bias); + + void GetQuantizationParams( + float act_scale, + float out_scale, + std::vector* output_multiplier_float, + std::vector* act_times_w_scale); + + torch::List stride() const override { + return stride_; + } + + torch::List padding() const override { + return padding_; + } + + torch::List output_padding() const override { + return output_padding_; + } + + torch::List dilation() const override { + return dilation_; + } + + int64_t groups() const override { + return groups_; + } + + bool transpose() const override { + return (bool)transpose_; + } + + private: + template + at::Tensor apply_impl( + const at::Tensor& input, + double output_scale, + int64_t output_zero_point); +}; + +// PackWeight: Convert the weight from uint8 to int8. +inline void convert_uint8_int8( + int len, + const uint8_t* src_uint8, + int8_t* dst_int8) { + for (const auto i : c10::irange(len)) { + dst_int8[i] = static_cast(static_cast(src_uint8[i]) - 128); + } +} + +// UnpackWeight: Convert the weight from int8 to uint8. +inline void convert_int8_uint8( + int len, + const int8_t* src_int8, + uint8_t* dst_uint8) { + for (const auto i : c10::irange(len)) { + dst_uint8[i] = + static_cast(static_cast(src_int8[i]) + 128); + } +} + +namespace at { +namespace native { +namespace fbgemm_utils { + +template +fbgemm::conv_param_t MakeFbgemmConvParam( + int N, + int C, + int M, + const std::vector& image_shape, + int groups, + const std::vector& kernels, + const std::vector& strides, + const std::vector& pads, + const std::vector& dilations, + const std::vector& output_padding = std::vector(kSpatialDim, 0), + bool transposed = false); + +// TODO: Remove functions below when ChannelsLast3d is ready. +Tensor MakeStridedQTensorCPU( + const IntArrayRef& sizes, + const IntArrayRef& strides, + const TensorOptions& options, + QuantizerPtr quantizer); + +Tensor MakeEmptyAffineQuantizedChannelsLast3dTensor( + int64_t N, + int64_t C, + int64_t D, + int64_t H, + int64_t W, + const TensorOptions& options, + double scale, + int64_t zero_point); + +Tensor MakeEmptyPerChannelAffineQuantizedChannelsLast3dTensor( + int64_t N, + int64_t C, + int64_t D, + int64_t H, + int64_t W, + const TensorOptions& options, + const Tensor& scales, + const Tensor& zero_points); + +Tensor ConvertToChannelsLast3dTensor(const Tensor& src); + +template +Tensor TransposeConvTensorUnpackConversion(const Tensor& src, int groups); + +template +Tensor ConvertConvWeightsToChannelLastTensor( + const at::Tensor& src, + int groups, + bool transpose); +} // namespace fbgemm_utils +} // namespace native +} // namespace at + +#endif // USE_FBGEMM + +struct TORCH_API PackedEmbeddingBagWeight : public EmbeddingPackedParamsBase { + PackedEmbeddingBagWeight( + at::Tensor packed_w, + std::vector w_scale, + std::vector w_zp, + int64_t bit_rate, + c10::QScheme q_scheme, + int64_t version) + : packed_w(std::move(packed_w)), + w_scale(std::move(w_scale)), + w_zp(std::move(w_zp)), + bit_rate_(bit_rate), + q_scheme(q_scheme), + version_(version) { + // NOLINTNEXTLINE(clang-analyzer-cplusplus.Move) + if (!packed_w.is_contiguous()) { + packed_w = packed_w.contiguous(); + } + } + + at::Tensor packed_w; + std::vector w_scale; + std::vector w_zp; + int64_t bit_rate_; + c10::QScheme q_scheme; + int64_t version_; + + at::Tensor unpack() override; + static c10::intrusive_ptr prepack( + at::Tensor weight); + + int64_t bit_rate() const override { + return bit_rate_; + } + + int64_t version() const override { + return version_; + } + + at::Tensor embeddingbag_byte( + const at::Tensor& indices, + const c10::optional& offsets, + bool pruned_weights, + const c10::optional& per_sample_weights_, + const c10::optional& compressed_indices_mapping, + bool include_last_offset, + bool is_embedding_op) override; + + at::Tensor embeddingbag_4bit( + const at::Tensor& indices, + const c10::optional& offsets, + bool pruned_weights, + const c10::optional& per_sample_weights_, + const c10::optional& compressed_indices_mapping, + bool include_last_offset, + bool is_embedding_op) override; +}; diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/init_qnnpack.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/init_qnnpack.h new file mode 100644 index 0000000000000000000000000000000000000000..dbfb406ea55dbb50f97b1e86efb52c337af04847 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/init_qnnpack.h @@ -0,0 +1,13 @@ +#pragma once + +#ifdef USE_PYTORCH_QNNPACK + +namespace at { +namespace native { + +void initQNNPACK(); + +} // namespace native +} // namespace at + +#endif diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/qembeddingbag.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/qembeddingbag.h new file mode 100644 index 0000000000000000000000000000000000000000..301b025322a3f31531ae99a590cfbb7a35d9c1e6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/qembeddingbag.h @@ -0,0 +1,32 @@ +#include + +namespace at { +namespace native { +Tensor& embedding_bag_byte_rowwise_offsets_out( + Tensor& output, + const Tensor& weight, + const Tensor& indices, + const c10::optional& offsets_in, + const bool /* scale_grad_by_freq */, + const int64_t /* mode */, + bool pruned_weights, + const c10::optional& per_sample_weights_, + const c10::optional& compressed_indices_mapping, + bool include_last_offset); + +Tensor& embedding_bag_4bit_rowwise_offsets_out( + Tensor& output, + const Tensor& weight, + const Tensor& indices, + const c10::optional& offsets_in, + const bool /* scale_grad_by_freq */, + const int64_t /* mode */, + bool pruned_weights, + const c10::optional& per_sample_weights_, + const c10::optional& compressed_indices_mapping, + bool include_last_offset); + +Tensor& qembeddingbag_byte_unpack_out(Tensor& output, const Tensor& packed_weight); + +} // native +} // at diff --git a/voice_bridge/torch/include/ATen/native/quantized/cpu/qembeddingbag_prepack.h b/voice_bridge/torch/include/ATen/native/quantized/cpu/qembeddingbag_prepack.h new file mode 100644 index 0000000000000000000000000000000000000000..c52cbae4f2c808548d0460d29a31ec0ed5bb42c7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/quantized/cpu/qembeddingbag_prepack.h @@ -0,0 +1,11 @@ +#include + +namespace at { +namespace native { + +Tensor& qembeddingbag_byte_prepack_out(Tensor& output, const Tensor& weight); + +Tensor qembeddingbag_byte_prepack(const Tensor& weight); + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/native/verbose_wrapper.h b/voice_bridge/torch/include/ATen/native/verbose_wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..9c7ab363f7dd3c8e9f520d3fc3bccdb1e65fa8d6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/verbose_wrapper.h @@ -0,0 +1,11 @@ +#ifndef VERBOSE_WRAPPER_H +#define VERBOSE_WRAPPER_H + +namespace torch { +namespace verbose { +int _mkl_set_verbose(int enable); +int _mkldnn_set_verbose(int level); +} // namespace verbose +} // namespace torch + +#endif // VERBOSE_WRAPPER_H diff --git a/voice_bridge/torch/include/ATen/native/vol2col.h b/voice_bridge/torch/include/ATen/native/vol2col.h new file mode 100644 index 0000000000000000000000000000000000000000..12718a8f00afccf6450e70321f6a845a1b4793c6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/native/vol2col.h @@ -0,0 +1,114 @@ +#pragma once + +#include +#include +#include + +namespace at { +namespace native { + +template +static void vol2col( + const T* data_vol, + const int64_t channels, + const int64_t depth, + const int64_t height, + const int64_t width, + const int64_t depth_col, + const int64_t height_col, + const int64_t width_col, + const int64_t kT, + const int64_t kernel_height, + const int64_t kernel_width, + const int64_t pT, + const int64_t pH, + const int64_t pW, + const int64_t dT, + const int64_t dH, + const int64_t dW, + const int64_t dilationT, + const int64_t dilationH, + const int64_t dilationW, + T* data_col) { + int64_t c, t, h, w; + int64_t channels_col = channels * kT * kernel_height * kernel_width; + for (c = 0; c < channels_col; ++c) { + int64_t w_offset = c % kernel_width; + int64_t h_offset = (c / kernel_width) % kernel_height; + int64_t t_offset = (c / kernel_width / kernel_height) % kT; + int64_t c_vol = c / kT / kernel_height / kernel_width; + for (t = 0; t < depth_col; ++t) { + int64_t t_pad = t * dT - pT + t_offset * dilationT; + for (h = 0; h < height_col; ++h) { + int64_t h_pad = h * dH - pH + h_offset * dilationH; + for (w = 0; w < width_col; ++w) { + int64_t w_pad = w * dW - pW + w_offset * dilationW; + if (t_pad >= 0 && t_pad < depth && h_pad >= 0 && h_pad < height && + w_pad >= 0 && w_pad < width) + data_col[((c * depth_col + t) * height_col + h) * width_col + w] = + data_vol + [((c_vol * depth + t_pad) * height + h_pad) * width + + w_pad]; + else + data_col[((c * depth_col + t) * height_col + h) * width_col + w] = + 0; + } + } + } + } +} + +template +static void col2vol( + const T* data_col, + const int64_t channels, + const int64_t depth, + const int64_t height, + const int64_t width, + const int64_t out_depth, + const int64_t out_height, + const int64_t out_width, + const int64_t kT, + const int64_t kernel_height, + const int64_t kernel_width, + const int64_t pT, + const int64_t pH, + const int64_t pW, + const int64_t dT, + const int64_t dH, + const int64_t dW, + const int64_t dilationT, + const int64_t dilationH, + const int64_t dilationW, + T* data_vol) { + int64_t c, t, h, w; + memset(data_vol, 0, sizeof(T) * depth * height * width * channels); + int64_t depth_col = out_depth; + int64_t height_col = out_height; + int64_t width_col = out_width; + int64_t channels_col = channels * kT * kernel_height * kernel_width; + for (c = 0; c < channels_col; ++c) { + int64_t w_offset = c % kernel_width; + int64_t h_offset = (c / kernel_width) % kernel_height; + int64_t t_offset = (c / kernel_width / kernel_height) % kT; + int64_t c_vol = c / kT / kernel_height / kernel_width; + for (t = 0; t < depth_col; ++t) { + int64_t t_pad = t * dT - pT + t_offset * dilationT; + for (h = 0; h < height_col; ++h) { + int64_t h_pad = h * dH - pH + h_offset * dilationH; + for (w = 0; w < width_col; ++w) { + int64_t w_pad = w * dW - pW + w_offset * dilationW; + if (t_pad >= 0 && t_pad < depth && h_pad >= 0 && h_pad < height && + w_pad >= 0 && w_pad < width) + data_vol + [((c_vol * depth + t_pad) * height + h_pad) * width + w_pad] += + data_col + [((c * depth_col + t) * height_col + h) * width_col + w]; + } + } + } + } +} + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/abs.h b/voice_bridge/torch/include/ATen/ops/abs.h new file mode 100644 index 0000000000000000000000000000000000000000..0e6916d1548cf9984b2e41bbcc5af5d6a79c6db6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/abs.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::abs(Tensor self) -> Tensor +inline at::Tensor abs(const at::Tensor & self) { + return at::_ops::abs::call(self); +} + +// aten::abs_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & abs_(at::Tensor & self) { + return at::_ops::abs_::call(self); +} + +// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::abs_out::call(self, out); +} + +// aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::abs_out::call(self, out); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/abs_compositeexplicitautograd_dispatch.h b/voice_bridge/torch/include/ATen/ops/abs_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..525396d407c731a5137f91f307b58e6d620e097f --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/abs_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor abs(const at::Tensor & self); +TORCH_API at::Tensor & abs_(at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/abs_cpu_dispatch.h b/voice_bridge/torch/include/ATen/ops/abs_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0257e11bd0ce2b1d4712d6cd0c4b018110457583 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/abs_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/abs_cuda_dispatch.h b/voice_bridge/torch/include/ATen/ops/abs_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..00e5a7b1c264ce43bca5e01ecbe88a563d14c8e0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/abs_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/abs_native.h b/voice_bridge/torch/include/ATen/ops/abs_native.h new file mode 100644 index 0000000000000000000000000000000000000000..12c35067ab384b62d0fd22e1d59b36f3c393547b --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/abs_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor abs(const at::Tensor & self); +TORCH_API at::Tensor & abs_(at::Tensor & self); +TORCH_API at::Tensor & abs_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor abs_sparse(const at::Tensor & self); +TORCH_API at::Tensor & abs_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & abs_sparse_(at::Tensor & self); +TORCH_API at::Tensor abs_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & abs_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & abs_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/abs_ops.h b/voice_bridge/torch/include/ATen/ops/abs_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fb0a3bcf28c239d269e2cef4acbc1b20c1fe81b2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/abs_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API abs { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API abs_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API abs_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/absolute.h b/voice_bridge/torch/include/ATen/ops/absolute.h new file mode 100644 index 0000000000000000000000000000000000000000..3ccad9826edf3cca9c8c523d5f37aac33024efcd --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/absolute.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::absolute(Tensor self) -> Tensor +inline at::Tensor absolute(const at::Tensor & self) { + return at::_ops::absolute::call(self); +} + +// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & absolute_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::absolute_out::call(self, out); +} + +// aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & absolute_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::absolute_out::call(self, out); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/absolute_compositeimplicitautograd_dispatch.h b/voice_bridge/torch/include/ATen/ops/absolute_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e42fb8193b250136b6f6b5b96c95ac7a0a1b7834 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/absolute_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor absolute(const at::Tensor & self); +TORCH_API at::Tensor & absolute_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & absolute_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & absolute_(at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/absolute_native.h b/voice_bridge/torch/include/ATen/ops/absolute_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a2ae237bba787b66a9f92eff4ec1995308c84a2d --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/absolute_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor absolute(const at::Tensor & self); +TORCH_API at::Tensor & absolute_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & absolute_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/absolute_ops.h b/voice_bridge/torch/include/ATen/ops/absolute_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d310c4ceef4b812fd9670bb0d570fb3c2003c575 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/absolute_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API absolute { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::absolute") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "absolute(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API absolute_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::absolute_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "absolute_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API absolute_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::absolute") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/acos.h b/voice_bridge/torch/include/ATen/ops/acos.h new file mode 100644 index 0000000000000000000000000000000000000000..b9b366a8ebd5b7c7e7c2ddcac37ba1351722d8b6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acos.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::acos(Tensor self) -> Tensor +inline at::Tensor acos(const at::Tensor & self) { + return at::_ops::acos::call(self); +} + +// aten::acos_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & acos_(at::Tensor & self) { + return at::_ops::acos_::call(self); +} + +// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & acos_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::acos_out::call(self, out); +} + +// aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & acos_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::acos_out::call(self, out); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/acos_compositeexplicitautogradnonfunctional_dispatch.h b/voice_bridge/torch/include/ATen/ops/acos_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a18c200a82da74105a022caddeec7f9508359c45 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acos_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor acos(const at::Tensor & self); +TORCH_API at::Tensor & acos_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/acos_cpu_dispatch.h b/voice_bridge/torch/include/ATen/ops/acos_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5f98c953afb1bb664c888f33ad8c4e948409345e --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acos_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor acos(const at::Tensor & self); +TORCH_API at::Tensor & acos_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & acos_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & acos_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/acos_cuda_dispatch.h b/voice_bridge/torch/include/ATen/ops/acos_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..69b6f73e8566b7b889ba56626dbb7bb41fa415ef --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acos_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor acos(const at::Tensor & self); +TORCH_API at::Tensor & acos_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & acos_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & acos_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/acos_meta.h b/voice_bridge/torch/include/ATen/ops/acos_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..5f4b4b4e8035047fa48f4b634b3ea4b453f7b8fe --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acos_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_acos : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/acos_meta_dispatch.h b/voice_bridge/torch/include/ATen/ops/acos_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..923be85b5d4e77dc613f230790fd123a9e79c539 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acos_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor acos(const at::Tensor & self); +TORCH_API at::Tensor & acos_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & acos_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & acos_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/acos_native.h b/voice_bridge/torch/include/ATen/ops/acos_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1dbf39f8a4565bc4fa43caace5661a3631f34614 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acos_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_acos_out : public at::meta::structured_acos { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/acos_ops.h b/voice_bridge/torch/include/ATen/ops/acos_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..986f166cc893c0a15f85b9397b5393d778bf480c --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acos_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API acos { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::acos") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "acos(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API acos_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::acos_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "acos_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API acos_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::acos") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/acosh.h b/voice_bridge/torch/include/ATen/ops/acosh.h new file mode 100644 index 0000000000000000000000000000000000000000..851cb29b383dfdc1c5e9f493db36e2cb14bf87a9 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acosh.h @@ -0,0 +1,45 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::acosh(Tensor self) -> Tensor +inline at::Tensor acosh(const at::Tensor & self) { + return at::_ops::acosh::call(self); +} + +// aten::acosh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & acosh_(at::Tensor & self) { + return at::_ops::acosh_::call(self); +} + +// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & acosh_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::acosh_out::call(self, out); +} + +// aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & acosh_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::acosh_out::call(self, out); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h b/voice_bridge/torch/include/ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..893ba418cde8351a4662feb90b5481cf9bfa277c --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor acosh(const at::Tensor & self); +TORCH_API at::Tensor & acosh_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/acosh_cpu_dispatch.h b/voice_bridge/torch/include/ATen/ops/acosh_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ae4983cc9b3e8d29b47009639e4b2e99d9278333 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acosh_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor acosh(const at::Tensor & self); +TORCH_API at::Tensor & acosh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & acosh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & acosh_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/acosh_cuda_dispatch.h b/voice_bridge/torch/include/ATen/ops/acosh_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..043a1c9bb8abc6aba75c9dfde78fd831bdcc7d5a --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acosh_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor acosh(const at::Tensor & self); +TORCH_API at::Tensor & acosh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & acosh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & acosh_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/acosh_meta.h b/voice_bridge/torch/include/ATen/ops/acosh_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9e80ee6a2e53acabcb2ffe6e878a997f17ac5b82 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acosh_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_acosh : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/acosh_meta_dispatch.h b/voice_bridge/torch/include/ATen/ops/acosh_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f3c52a1e5ac31edcb83e150e3f8bf95b4b96633 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acosh_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor acosh(const at::Tensor & self); +TORCH_API at::Tensor & acosh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & acosh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & acosh_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/acosh_native.h b/voice_bridge/torch/include/ATen/ops/acosh_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bd58403bc4378afaa8df8b720b75a634660d5dcb --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acosh_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_acosh_out : public at::meta::structured_acosh { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/acosh_ops.h b/voice_bridge/torch/include/ATen/ops/acosh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..574217ea22005352e027d94a892dd6cf9eab649a --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/acosh_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API acosh { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::acosh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "acosh(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API acosh_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::acosh_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "acosh_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API acosh_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::acosh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool1d.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool1d.h new file mode 100644 index 0000000000000000000000000000000000000000..4561b19b04878e9a1aa6ae75781d1fc046f357a5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool1d.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor +inline at::Tensor adaptive_avg_pool1d(const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool1d::call(self, output_size); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool1d_compositeimplicitautograd_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool1d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e84837bf96056eab70a53f18ab8eebadc1bd5f8e --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool1d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor adaptive_avg_pool1d(const at::Tensor & self, at::IntArrayRef output_size); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool1d_native.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool1d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..902a06deada90dbf1e5021705f390ae3e78c361c --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool1d_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor adaptive_avg_pool1d(const at::Tensor & self, at::IntArrayRef output_size); +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool1d_ops.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..73f537de500cb75126ac6b6d71df3a701346c7ac --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool1d_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_avg_pool1d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef output_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d.h new file mode 100644 index 0000000000000000000000000000000000000000..26e6596ce4cc08bb7b07060a89b6c3d7cf7eec5f --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d.h @@ -0,0 +1,55 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool2d_out::call(self, c10::fromIntArrayRef(output_size), out); +} + +// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool2d_out::call(self, c10::fromIntArrayRef(output_size), out); +} + +// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::adaptive_avg_pool2d_out::call(self, output_size, out); +} + +// aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool2d_out::call(self, output_size, out); +} + +// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor +inline at::Tensor adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool2d::call(self, c10::fromIntArrayRef(output_size)); +} + +// aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor +inline at::Tensor adaptive_avg_pool2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size) { + return at::_ops::adaptive_avg_pool2d::call(self, output_size); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_compositeimplicitautograd_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..93b04891e650fa468be661ac36820ce3caa9f141 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor adaptive_avg_pool2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c9a4fe2b177167fc3517555c5993275e4b4d6bb6 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_cuda_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8b7db4ff42e70872c8a361d974018677adee0172 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_native.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ac37410439c051dc81c231c8ed2688fc552ceef4 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor adaptive_avg_pool2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool2d_out_cpu(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & adaptive_avg_pool2d_out_cuda(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & mkldnn_adaptive_avg_pool2d_out_stub(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_ops.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..eac5ea58c518f4dbf8ef07e41129edc396cc4953 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool2d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_avg_pool2d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); +}; + +struct TORCH_API adaptive_avg_pool2d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..56aa4a7b41796d5fe1a9f96412d25bb0c1c5e54e --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d_out::call(self, output_size, out); +} + +// aten::adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out) { + return at::_ops::adaptive_avg_pool3d_out::call(self, output_size, out); +} + +// aten::adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor +inline at::Tensor adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_avg_pool3d::call(self, output_size); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..89d784cd67777143839b3402cbc7bb3178890169 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::adaptive_avg_pool3d_backward_grad_input::call(grad_output, self, grad_input); +} + +// aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input) { + return at::_ops::adaptive_avg_pool3d_backward_grad_input::call(grad_output, self, grad_input); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0bdc3b57dbdc573db11947d8c9292e26fd3e3dd7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..19743d58631af08944eef29a372472e2a180a648 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward_native.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0eaf1492a857399a66187db290fa8863223fc5fe --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & adaptive_avg_pool3d_backward_out_cpu(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); +TORCH_API at::Tensor & adaptive_avg_pool3d_backward_out_cuda(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward_ops.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5f528f214f115d06ad722f21f0fdf36aba93f4ee --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_avg_pool3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_compositeimplicitautograd_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d0330cb68d986a60c62330822ca13223ec2bf989 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9b63d4019fad36ebe1802b8820bff3c3c5cb440e --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_cuda_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2c2a31bef056f0724c9fd6b69ba89a653899837b --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_native.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..973abcdf1efd6048ca54ebffd409a83d30c6ca2f --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool3d_out_cpu(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & adaptive_avg_pool3d_out_cuda(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & adaptive_avg_pool3d_out_quantized_cpu(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_ops.h b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e655b6c278d8d7c81a1bb17e09ddca22c468adb9 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_avg_pool3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_avg_pool3d_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +}; + +struct TORCH_API adaptive_avg_pool3d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_avg_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef output_size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool1d.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool1d.h new file mode 100644 index 0000000000000000000000000000000000000000..365370f05de4f1f6f6eb40026ec694f1e5360c02 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool1d.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) +inline ::std::tuple adaptive_max_pool1d(const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool1d::call(self, output_size); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool1d_compositeimplicitautograd_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool1d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..05fe922a69a6c7a24ff0587d77b0c982d70aca61 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool1d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple adaptive_max_pool1d(const at::Tensor & self, at::IntArrayRef output_size); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool1d_native.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool1d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dfb45a94ccf8386df4288e91340f73aa67ff29ba --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool1d_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple adaptive_max_pool1d(const at::Tensor & self, at::IntArrayRef output_size); +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool1d_ops.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..007d4c1b90af81317186dd13bedcc413d6367c69 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool1d_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_max_pool1d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef output_size); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d.h new file mode 100644 index 0000000000000000000000000000000000000000..fd5b24f1d442bf60b42a43e106df00c891cea211 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple adaptive_max_pool2d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool2d_out::call(self, output_size, out, indices); +} + +// aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple adaptive_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) { + return at::_ops::adaptive_max_pool2d_out::call(self, output_size, out, indices); +} + +// aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) +inline ::std::tuple adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool2d::call(self, output_size); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..b17a97e7636c4d25a95e59bd34004709f3891020 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool2d_backward_grad_input::call(grad_output, self, indices, grad_input); +} + +// aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & adaptive_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::adaptive_max_pool2d_backward_grad_input::call(grad_output, self, indices, grad_input); +} + +// aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor +inline at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool2d_backward::call(grad_output, self, indices); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8bc988555d4df387f3c54e96f577e88c87f38ac0 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca344b5ebe18466e734d683f09dda1cf55b87576 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_cuda_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d2464ca09e1416b25cd3beec54c4533c7d3451d5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_meta.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..f573dc40819b67370a65837dc77b3eb9bb60327f --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_adaptive_max_pool2d_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +}; + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_meta_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dc0e2c2c95a5a685421a3fdbea4121a2075f118d --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor adaptive_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_native.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b4516bf8904e1553e9cce2b806919e35ee9343ba --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_adaptive_max_pool2d_backward_out_cpu : public at::meta::structured_adaptive_max_pool2d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, const at::Tensor & grad_input); +}; +struct TORCH_API structured_adaptive_max_pool2d_backward_out_cuda : public at::meta::structured_adaptive_max_pool2d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_ops.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1d5976942a8760fa00b269000a1ad3b7d45605f8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_max_pool2d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); +}; + +struct TORCH_API adaptive_max_pool2d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7d5ec49e102c5aad2f38169383f551aaf4ec0e8f --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_cpu_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..61bb2f393fb4f3fc7af940efa3ae4ea983983734 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool2d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_cuda_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c18a5e143f144dad091e90230e93668e662681c3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool2d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_meta.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..215891fb78f2d629f6a5af4398a06f4a8b25adab --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_adaptive_max_pool2d : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::IntArrayRef output_size); +}; + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_meta_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f38ef3f53c5cdf8ee361c43897e2ca5aa78c9c6a --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool2d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + +} // namespace meta +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_native.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..86e691af45088836e54babde6d08e2215cf8c789 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_adaptive_max_pool2d_out_cpu : public at::meta::structured_adaptive_max_pool2d { +void impl(const at::Tensor & self, at::IntArrayRef output_size, const at::Tensor & out, const at::Tensor & indices); +}; +struct TORCH_API structured_adaptive_max_pool2d_out_cuda : public at::meta::structured_adaptive_max_pool2d { +void impl(const at::Tensor & self, at::IntArrayRef output_size, const at::Tensor & out, const at::Tensor & indices); +}; +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_ops.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2d581e0802bf5270b23981f7c682106e85b9e536 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool2d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_max_pool2d_out { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); +}; + +struct TORCH_API adaptive_max_pool2d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef output_size); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d.h new file mode 100644 index 0000000000000000000000000000000000000000..bfa5f528efa90d758590f80042de7dac686205b9 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple adaptive_max_pool3d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool3d_out::call(self, output_size, out, indices); +} + +// aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple adaptive_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_out::call(self, output_size, out, indices); +} + +// aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) +inline ::std::tuple adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size) { + return at::_ops::adaptive_max_pool3d::call(self, output_size); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..2967ff7d8c775db092f29b0d7960491057a4c7b3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_backward_grad_input::call(grad_output, self, indices, grad_input); +} + +// aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & adaptive_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input) { + return at::_ops::adaptive_max_pool3d_backward_grad_input::call(grad_output, self, indices, grad_input); +} + +// aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor +inline at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) { + return at::_ops::adaptive_max_pool3d_backward::call(grad_output, self, indices); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dbfdd08cbe32b7d2a5b0325b31558abdf5928bc2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_cpu_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b8a54daf60841981383e559fdfbd50d8c49ef71a --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_cuda_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..57c0d6e74969cdbd57373b088e9dd2fcc9b22354 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_meta.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..c95a0e3d2305340e7647f64c10d24938ae39d815 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_adaptive_max_pool3d_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +}; + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_meta_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..90f1538cbe1ef51fd4400f8e493ce7784541f5a8 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor adaptive_max_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +TORCH_API at::Tensor & adaptive_max_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_native.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..da3538f6581cd86387e9d75df2c3dff61bd5eac9 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_adaptive_max_pool3d_backward_out_cpu : public at::meta::structured_adaptive_max_pool3d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, const at::Tensor & grad_input); +}; +struct TORCH_API structured_adaptive_max_pool3d_backward_out_cuda : public at::meta::structured_adaptive_max_pool3d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_ops.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..23f2f1d6c90e4f59b438df4432edabe5169ade37 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_max_pool3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input); +}; + +struct TORCH_API adaptive_max_pool3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fb791ade54edefa89b33384498c10e2f819f80d5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_cpu_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4ca942ce0a005a46c4c49f7e3b793faaacc7b660 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool3d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_cuda_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b44a051c075db291286ad9e291f67ad7d092fc84 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool3d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_meta.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..d99cae2da8a555fb2d4b5df5e52f46267f2662b5 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_adaptive_max_pool3d : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::IntArrayRef output_size); +}; + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_meta_dispatch.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..39c6f984d3eba1778f46b1366f84741963cd6a0d --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple adaptive_max_pool3d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool3d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + +} // namespace meta +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_native.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..73bf982e94ddf290832d396d2a0c4c4d457a9004 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_adaptive_max_pool3d_out_cpu : public at::meta::structured_adaptive_max_pool3d { +void impl(const at::Tensor & self, at::IntArrayRef output_size, const at::Tensor & out, const at::Tensor & indices); +}; +struct TORCH_API structured_adaptive_max_pool3d_out_cuda : public at::meta::structured_adaptive_max_pool3d { +void impl(const at::Tensor & self, at::IntArrayRef output_size, const at::Tensor & out, const at::Tensor & indices); +}; +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_ops.h b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a4dd3bfe5635f18a68b2a93117c1b0fb8fd2ad26 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/adaptive_max_pool3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adaptive_max_pool3d_out { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); +}; + +struct TORCH_API adaptive_max_pool3d { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef output_size); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef output_size); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/add.h b/voice_bridge/torch/include/ATen/ops/add.h new file mode 100644 index 0000000000000000000000000000000000000000..bb18002f725e7bff89c63597b0376ca95ae64720 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/add.h @@ -0,0 +1,55 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +inline at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::add_Tensor::call(self, other, alpha); +} + +// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::add_out::call(self, other, alpha, out); +} + +// aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::add_out::call(self, other, alpha, out); +} + +// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +inline at::Tensor add(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::add_Scalar::call(self, other, alpha); +} + +// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::add_Scalar_out::call(self, other, alpha, out); +} + +// aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & add_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::add_Scalar_out::call(self, other, alpha, out); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/addbmm.h b/voice_bridge/torch/include/ATen/ops/addbmm.h new file mode 100644 index 0000000000000000000000000000000000000000..76a29d9a076e444e5d4af8275d0a762211d984c3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addbmm.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addbmm_out::call(self, batch1, batch2, beta, alpha, out); +} + +// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::addbmm_out::call(self, batch1, batch2, beta, alpha, out); +} + +// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/addbmm_cpu_dispatch.h b/voice_bridge/torch/include/ATen/ops/addbmm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..37f7c864c2ae13e0fe3b5d63e3b021b5a2af823f --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addbmm_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace cpu +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/addbmm_cuda_dispatch.h b/voice_bridge/torch/include/ATen/ops/addbmm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9149d75e1d5c56f5c1aa0c3ee9e38e0cdb598256 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addbmm_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/addbmm_meta_dispatch.h b/voice_bridge/torch/include/ATen/ops/addbmm_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4491fdd247be48c7d52b96254591a22014c558da --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addbmm_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace meta +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/addbmm_native.h b/voice_bridge/torch/include/ATen/ops/addbmm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b8d828727fbac53b7bba1a82d13fa747e484c957 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addbmm_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_out(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/addbmm_ops.h b/voice_bridge/torch/include/ATen/ops/addbmm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c094f1b32272fe3e61b2aca512f980bfb828b671 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addbmm_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API addbmm_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::addbmm_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); +}; + +struct TORCH_API addbmm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::addbmm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +}; + +struct TORCH_API addbmm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::addbmm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/addcdiv.h b/voice_bridge/torch/include/ATen/ops/addcdiv.h new file mode 100644 index 0000000000000000000000000000000000000000..0dfde6f97a15d6e40232760be023fd81f1c715cb --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addcdiv.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addcdiv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcdiv_out::call(self, tensor1, tensor2, value, out); +} + +// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addcdiv_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) { + return at::_ops::addcdiv_out::call(self, tensor1, tensor2, value, out); +} + +// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +inline at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcdiv::call(self, tensor1, tensor2, value); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h b/voice_bridge/torch/include/ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5126018e0a0d21e11f418cde969253e7b66ce5c7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addcdiv_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/addcdiv_cpu_dispatch.h b/voice_bridge/torch/include/ATen/ops/addcdiv_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3ad933aa205ae5be495d8df2166ec176fcf56880 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addcdiv_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace cpu +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/addcdiv_cuda_dispatch.h b/voice_bridge/torch/include/ATen/ops/addcdiv_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d5253848a8ebbe0e16d11e1ec1e6b14c1c79c949 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addcdiv_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace cuda +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/addcdiv_meta.h b/voice_bridge/torch/include/ATen/ops/addcdiv_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..714b89bfb8c0d6f99ae82fe42308af7d0c99d90c --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addcdiv_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_addcdiv : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value); +}; + +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/addcdiv_meta_dispatch.h b/voice_bridge/torch/include/ATen/ops/addcdiv_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cccb6190b5c1a61c1b8f0c38a87618f545ad83c7 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addcdiv_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcdiv_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & addcdiv_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace meta +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/addcdiv_native.h b/voice_bridge/torch/include/ATen/ops/addcdiv_native.h new file mode 100644 index 0000000000000000000000000000000000000000..578cf7d73b56de517af031db160113bbf79d5a8e --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addcdiv_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_addcdiv_out : public at::meta::structured_addcdiv { +void impl(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/voice_bridge/torch/include/ATen/ops/addcdiv_ops.h b/voice_bridge/torch/include/ATen/ops/addcdiv_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9f1f14aa9fcebe5cbd02646ae697817abb580125 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addcdiv_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API addcdiv_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::addcdiv") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out); +}; + +struct TORCH_API addcdiv { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::addcdiv") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value); +}; + +struct TORCH_API addcdiv_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::addcdiv_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value); +}; + +}} // namespace at::_ops diff --git a/voice_bridge/torch/include/ATen/ops/addcmul.h b/voice_bridge/torch/include/ATen/ops/addcmul.h new file mode 100644 index 0000000000000000000000000000000000000000..97ff72a12906a7f9a78c9806d721f84c55800a74 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addcmul.h @@ -0,0 +1,40 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addcmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcmul_out::call(self, tensor1, tensor2, value, out); +} + +// aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addcmul_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) { + return at::_ops::addcmul_out::call(self, tensor1, tensor2, value, out); +} + +// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +inline at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) { + return at::_ops::addcmul::call(self, tensor1, tensor2, value); +} + +} diff --git a/voice_bridge/torch/include/ATen/ops/addcmul_compositeexplicitautogradnonfunctional_dispatch.h b/voice_bridge/torch/include/ATen/ops/addcmul_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..29d73942a462dc5fff9c7793e87231f0b81742e2 --- /dev/null +++ b/voice_bridge/torch/include/ATen/ops/addcmul_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); +TORCH_API at::Tensor & addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/voice_bridge/torch/include/ATen/quantized/QTensorImpl.h b/voice_bridge/torch/include/ATen/quantized/QTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..127fa78de12d16fadf15ec9971f5f77112ffe580 --- /dev/null +++ b/voice_bridge/torch/include/ATen/quantized/QTensorImpl.h @@ -0,0 +1,125 @@ +#pragma once + +#include +#include +#include + +namespace at { + +/** + * QTensorImpl is a TensorImpl for Quantized Tensors, it stores Quantizer which + * specifies the quantization scheme and parameters, for more information please + * see ATen/quantized/Quantizer.h + * + * We'll use QTensor in code or documentation to refer to a Tensor with QTensorImpl. + */ +struct TORCH_API QTensorImpl : public c10::TensorImpl { + public: + QTensorImpl( + Storage&& storage, + DispatchKeySet key_set, + const caffe2::TypeMeta data_type, + QuantizerPtr quantizer); + + // See Note [Enum ImplType] + QTensorImpl( + ImplType type, + Storage&& storage, + DispatchKeySet key_set, + const caffe2::TypeMeta data_type, + QuantizerPtr quantizer); + + + // TODO: Expose in PyTorch Frontend + QuantizerPtr quantizer() { + return quantizer_; + } + + void set_quantizer_(QuantizerPtr quantizer) { + quantizer_ = quantizer; + } + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive( + Storage(storage()), key_set(), data_type_, quantizer_); + copy_tensor_metadata( + /*src_impl=*/this, + /*dest_impl=*/impl.get(), + /*version_counter=*/version_counter, + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + impl->refresh_contiguous(); + return impl; + } + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const override { + auto impl = c10::make_intrusive( + Storage(storage()), key_set(), data_type_, quantizer_); + copy_tensor_metadata( + /*src_impl=*/this, + /*dest_impl=*/impl.get(), + /*version_counter=*/std::move(version_counter), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); + impl->refresh_numel(); + impl->refresh_contiguous(); + return impl; + } + + /** + * Shallow-copies data from another TensorImpl into this TensorImpl. + * + * For why this function doesn't check this TensorImpl's `allow_tensor_metadata_change_`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + void shallow_copy_from(const c10::intrusive_ptr& impl) override { + AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set())); + auto q_impl = static_cast(impl.get()); + copy_tensor_metadata( + /*src_impl=*/q_impl, + /*dest_impl=*/this, + /*version_counter=*/version_counter(), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); + refresh_numel(); + refresh_contiguous(); + } + + private: + QuantizerPtr quantizer_; + + const char* tensorimpl_type_name() const override; + + /** + * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / storage_offset) + * from one TensorImpl to another TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE [ TensorImpl Shallow-Copying ]. + */ + static void copy_tensor_metadata( + const QTensorImpl* src_q_impl, + QTensorImpl* dest_q_impl, + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) { + TensorImpl::copy_tensor_metadata(src_q_impl, dest_q_impl, version_counter, allow_tensor_metadata_change); + + // OpaqueTensorImpl-specific fields. + dest_q_impl->quantizer_ = src_q_impl->quantizer_; + } +}; + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/quantized/Quantizer.h b/voice_bridge/torch/include/ATen/quantized/Quantizer.h new file mode 100644 index 0000000000000000000000000000000000000000..05bd39b71223a047a834ad1c5a978e4616328ce3 --- /dev/null +++ b/voice_bridge/torch/include/ATen/quantized/Quantizer.h @@ -0,0 +1,278 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include + +namespace at { + +/** + * UnknownQuantizer is a placeholder quantizer for functions that implement + * quantization in a two step process. First a tensor is allocated but with + * unknown quantizer, and then the quantization kernel decides what the final + * quantizer will be. + */ +struct TORCH_API UnknownQuantizer : public Quantizer { + explicit UnknownQuantizer(ScalarType scalar_type) + : Quantizer(scalar_type) {} + + Tensor quantize(const Tensor& tensor) override; + Tensor dequantize(const Tensor& qtensor) override; + Tensor& dequantize_out(Tensor& rtensor, const Tensor& qtensor) override; + QScheme qscheme() const override; + bool equalTo(QuantizerPtr other) const override; +}; + +/** + * UniformQuantizer is the parent class for all uniform quantizers. + * These quantization scheme will map float value uniformly to + * the quantized value. For example, affine quantizer is + * the most commonly used scheme in this category. + */ +struct TORCH_API UniformQuantizer : public Quantizer { + explicit UniformQuantizer(ScalarType scalar_type) : Quantizer(scalar_type) {} +}; + +/** + * NonUniformQuantizer is the parent class for all non-uniform quantizers. + * These quantization scheme may map float value non-uniformly to the quantized + * value. K-means quantization is a representative example in this category. + */ +struct TORCH_API NonUniformQuantizer : public Quantizer { + explicit NonUniformQuantizer(ScalarType scalar_type) : Quantizer(scalar_type) {} +}; + +// There is also StochasticQuantizer which is uniform but not affine + +/** + * AffineQuantizer uses affine transformation to do quantization. + * + * For quantize: + * Y = clamp(round(X / scale + zero_point), min, max) + * For dequantize: + * X = (Y - zero_point) * scale + */ +struct TORCH_API AffineQuantizer : public UniformQuantizer { + explicit AffineQuantizer(ScalarType scalar_type) : UniformQuantizer(scalar_type) {} +}; + +// Note that we will not have Symmetric Quantizer in backend to reduce +// complications in quantized kernel implementation. + +/** + * PerTensorAffineQuantizer stores a scale and a zero_point, which is used for + * all the values in the Tensor. + */ +struct TORCH_API PerTensorAffineQuantizer : public AffineQuantizer { + explicit PerTensorAffineQuantizer(ScalarType scalar_type, double scale, int64_t zero_point) + : AffineQuantizer(scalar_type), + scale_(scale), + zero_point_(zero_point) {} + + Tensor quantize(const Tensor& tensor) override; + Tensor dequantize(const Tensor& qtensor) override; + Tensor& dequantize_out(Tensor& rtensor, const Tensor& qtensor) override; + + QScheme qscheme() const override { + return kPerTensorAffine; + } + + double scale() const { + return scale_; + } + + int64_t zero_point() const { + return zero_point_; + } + + bool equalTo(QuantizerPtr other) const override { + if (!other.get() || other->qscheme() != kPerTensorAffine) { + return false; + } + auto* other_per_tensor_affine = + static_cast(other.get()); + return scalar_type() == other_per_tensor_affine->scalar_type() && + scale() == other_per_tensor_affine->scale() && + zero_point() == other_per_tensor_affine->zero_point(); + } + + private: + const double scale_; + // We use int64_t for consistency with Python + const int64_t zero_point_; +}; + +/** + * PerChannelAffineQuantizer is the same as PerTensorAffineQuantizer + * except that we have an independent scale and zero_point parameter + * for each channel. + * + * Also note that per channel quantization is mostly applied to output channels + * of weights since per-input channel of weight quantization or per-channel + * quantization for activations can't be efficiently supported in most of + * processors since it requires each multiplication result within a single + * dot-product to have a different scale. + */ +struct TORCH_API PerChannelAffineQuantizer : public AffineQuantizer { + explicit PerChannelAffineQuantizer( + ScalarType scalar_type, + Tensor scales, + Tensor zero_points, + int64_t axis) + : AffineQuantizer(scalar_type), + scales_(scales), + zero_points_(zero_points), + axis_(axis) {} + + QScheme qscheme() const override { + return kPerChannelAffine; + } + + Tensor scales() const { + return scales_; + } + + Tensor zero_points() const { + return zero_points_; + } + + int64_t axis() const { + return axis_; + } + + Tensor quantize(const Tensor& tensor) override; + Tensor dequantize(const Tensor& qtensor) override; + Tensor& dequantize_out(Tensor& rtensor, const Tensor& qtensor) override; + + bool equalTo(QuantizerPtr other) const override { + if (!other.get() || other->qscheme() != kPerChannelAffine) { + return false; + } + auto* other_per_channel_affine = + static_cast(other.get()); + return scalar_type() == other_per_channel_affine->scalar_type() && + scales().equal(other_per_channel_affine->scales()) && + zero_points().equal(other_per_channel_affine->zero_points()) && + axis() == other_per_channel_affine->axis(); + } + + protected: + Tensor scales_; + Tensor zero_points_; + const int64_t axis_; +}; + +/** + * PerChannelAffineFloatQParamsQuantizer is the same as PerChannelAffineQuantizer + * except that it expects both scale and zero point to be floating point values. + * + * This quantizer uses the kPerChannelAffineFloatQParams qscheme which is a variant of + * kPerChannelAffine. + * + * The quantize equation in this case looks like - + * Xq = (Xf - zero_point) * inv_scale, where inv_scale = 1.0/scale + * + * Note: Usage of floating point zero point is useful in cases where 0 doesn't need to + * be exactly represented in the quantized space. We can get additional precision by + * using floating point values for zero point. + */ +struct TORCH_API PerChannelAffineFloatQParamsQuantizer : public PerChannelAffineQuantizer { + explicit PerChannelAffineFloatQParamsQuantizer( + ScalarType scalar_type, + Tensor scales, + Tensor zero_points, + int64_t axis) + : PerChannelAffineQuantizer(scalar_type, + scales, + zero_points, + axis) {} + + QScheme qscheme() const override { + return kPerChannelAffineFloatQParams; + } + + Tensor quantize(const Tensor& tensor) override; + Tensor dequantize(const Tensor& qtensor) override; + Tensor& dequantize_out(Tensor& rtensor, const Tensor& qtensor) override; + + bool equalTo(QuantizerPtr other) const override { + if (!other.get() || other->qscheme() != kPerChannelAffineFloatQParams) { + return false; + } + auto* other_per_channel_float_qparams = + static_cast(other.get()); + return scalar_type() == other_per_channel_float_qparams->scalar_type() && + scales().equal(other_per_channel_float_qparams->scales()) && + zero_points().equal(other_per_channel_float_qparams->zero_points()) && + axis() == other_per_channel_float_qparams->axis(); + } +}; + +// This is an internal utility function for getting at the QTensorImpl, +// You should only use this for writing low level +// setters/getters for QTensorImpl fields; otherwise, you should use +// the low level setters/getters that were implemented using this. +// This may be called repeatedly, so make sure it's pretty cheap. +TORCH_API QTensorImpl* get_qtensorimpl(const TensorBase& self); + +// double and int64_t are because of the native function API, we only have these +// argument types right now in native functions +TORCH_API QuantizerPtr +make_per_tensor_affine_quantizer( + double scale, int64_t zero_point, ScalarType scalar_type); + +TORCH_API QuantizerPtr make_per_channel_affine_quantizer( + const Tensor& scales, + const Tensor& zero_points, + int64_t axis, + ScalarType scalar_type); + +TORCH_API QuantizerPtr make_unknown_quantizer(ScalarType scalar_type); + +// Create a Quantized Tensor given arguments for normal Tensor and a quantizer +TORCH_API Tensor new_qtensor( + IntArrayRef sizes, + const TensorOptions& options, + QuantizerPtr quantizer); + +TORCH_API void set_quantizer_(const Tensor& self, ConstQuantizerPtr quantizer); + +TORCH_API Tensor from_blob_quantized_per_tensor_affine( + void* data, + IntArrayRef sizes, + IntArrayRef strides, + std::function deleter, + const float scale, + const int64_t zeroPoint, + const TensorOptions& options); + +TORCH_API Tensor from_blob_quantized_per_tensor_affine( + void* data, + IntArrayRef sizes, + std::function deleter, + const float scale, + const int64_t zeroPoint, + const TensorOptions& options); + +TORCH_API Tensor from_blob_quantized_per_channel_affine( + void* data, + IntArrayRef sizes, + std::function deleter, + const Tensor& scales, + const Tensor& zero_points, + const int64_t axis, + const TensorOptions& options); + +} // namespace at diff --git a/voice_bridge/torch/include/ATen/record_function.h b/voice_bridge/torch/include/ATen/record_function.h new file mode 100644 index 0000000000000000000000000000000000000000..d0f68371357bdaf8455f3ef957432b433955cdc4 --- /dev/null +++ b/voice_bridge/torch/include/ATen/record_function.h @@ -0,0 +1,701 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace c10 { +class TORCH_API OperatorHandle; +} + +namespace at { + +// Kind of record function scope; +enum class C10_API_ENUM RecordScope : uint8_t { + // c10/ATen ops, autograd nodes + FUNCTION = 0, + // Functions/nodes called from the autograd + BACKWARD_FUNCTION, + // TorchScript functions, methods + TORCHSCRIPT_FUNCTION, + // Kernel Function dtype Tag + KERNEL_FUNCTION_DTYPE, + // Torchbind custom class, + CUSTOM_CLASS, + // Generic Build Feature + BUILD_FEATURE, + // Kernel Function dtype Tag + LITE_INTERPRETER, + // User defined scope (e.g. with record_function()) + USER_SCOPE, + // Scopes for static runtime, a specialized TorchScript interpreter + STATIC_RUNTIME_OP, + STATIC_RUNTIME_MODEL, + NUM_SCOPES, // must be the last in the list +}; + +} // namespace at + +namespace std { +template <> +struct hash { + size_t operator()(const at::RecordScope& sc) const { + return static_cast(sc); + } +}; +} // namespace std + +namespace at { + +struct TORCH_API StringView { + StringView() : StringView(nullptr) {} + explicit StringView(const char* str_ptr) + : owned_str_ptr_(nullptr), str_ptr_(str_ptr) {} + explicit StringView(std::string str) + : owned_str_ptr_(std::make_shared(std::move(str))), + str_ptr_(owned_str_ptr_->c_str()) {} + + const char* str() const { + return str_ptr_; + } + + friend std::ostream& operator<<(std::ostream& os, const StringView& dt) { + os << dt.str(); + return os; + } + + friend bool operator==(const StringView& lhs, const StringView& rhs) { + return strcmp(lhs.str(), rhs.str()) == 0; + } + + friend bool operator!=(const StringView& lhs, const StringView& rhs) { + return !(lhs == rhs); + } + + private: + std::shared_ptr owned_str_ptr_; + const char* str_ptr_; +}; + +// Soft limit on the number of callbacks to use; +constexpr std::size_t kSoftLimitCallbacks = 4; + +// An abstract base class for various observer contexts that can be attached to +// the RecordFunction. +struct ObserverContext { + virtual ~ObserverContext() {} + + protected: + ObserverContext() {} +}; + +typedef c10::SmallVector CallbackHandles; +typedef c10::SmallVector, kSoftLimitCallbacks> + ObserverContextList; +typedef uint64_t RecordFunctionHandle; +struct RecordFunction; + +// +// PyTorch callbacks/observers API: +// + +/** + * RecordFunctionCallback represents a pair of callbacks to be used with + * RecordFunction, members: + * start, end - the callbacks to run when entering and exiting the scope; + * optionally, the start callback may return an ObserverContext which will + * be passed to the end callback, use appropriate constructor accordingly. + * needs_inputs - whether the callbacks need the inputs passed from the + * observed function/range; NOTE: passing the inputs incurs an additional + * overhead; sampling_probability - if not 1.0, then the callback is + * probabilistically sampled to run; NOTE: start and end callbacks always run as + * a pair and are sampled together; scopes - types of scopes to execute the + * callbacks on (see RecordScope); passing empty set means the callbacks will be + * executed for all possible scope types should_run - optional function that + * returns whether this callback should run; overwrites the effect of setting + * sampling_probability + */ +class TORCH_API RecordFunctionCallback { + public: + using StartCallback = + std::unique_ptr (*)(const RecordFunction&); + using EndCallback = void (*)(const RecordFunction&, ObserverContext*); + + // This interface supports observers that require passing an ObserverContext + // between start and end callbacks. + explicit RecordFunctionCallback( + StartCallback start, + EndCallback end = nullptr) + : start_(start), end_(end) { + scopes_.fill(true); + } + + RecordFunctionCallback& needsInputs(bool needs_inputs) { + needs_inputs_ = needs_inputs; + return *this; + } + + RecordFunctionCallback& needsOutputs(bool needs_outputs) { + needs_outputs_ = needs_outputs; + return *this; + } + + RecordFunctionCallback& needsIds(bool needs_ids) { + needs_ids_ = needs_ids; + return *this; + } + + RecordFunctionCallback& samplingProb(double sampling_prob) { + TORCH_CHECK( + sampling_prob >= 0.0 && sampling_prob <= 1.0, + "Invalid sampling probability"); + sampling_prob_ = sampling_prob; + return *this; + } + + RecordFunctionCallback& scopes( + const std::unordered_set>& scopes) { + if (!scopes.empty()) { + scopes_.fill(false); + for (auto sc : scopes) { + scopes_[static_cast(sc)] = true; + } + } else { + scopes_.fill(true); + } + return *this; + } + + bool needsInputs() const { + return needs_inputs_; + } + + bool needsOutputs() const { + return needs_outputs_; + } + + bool needsIds() const { + return needs_ids_; + } + + double samplingProb() const { + return sampling_prob_; + } + + bool checkScope(RecordScope sc) const { + return scopes_[(size_t)sc]; + } + + StartCallback start() const { + return start_; + } + + EndCallback end() const { + return end_; + } + + private: + StartCallback start_; + EndCallback end_; + double sampling_prob_ = 1.0; + std::array(RecordScope::NUM_SCOPES)> scopes_ = {}; + bool needs_inputs_ = false; + bool needs_outputs_ = false; + bool needs_ids_ = false; +}; + +// Notes: +// - two types of callbacks are provided: thread local and global +// - thread local callbacks are added/removed only for the given thread +// and are stored locally for each thread and separately from the list +// of the global callbacks +// - global callbacks are stored in a single per process list and are +// invoked by every RecordFunction, in addition to the thread local +// callbacks specific to the given thread +// - we allow the added callbacks to be sampled, by specifying a sampling +// probability for each callback pair, if the start callback is +// not picked to run, the corresponding end callback won't be called +// - a typical use case for the global callbacks is passive monitoring +// in the background (e.g. fleet-wide monitoring), without focusing on +// the specific piece of code +// - in contrast, thread local callbacks are enabled locally, on demand, +// for the specific piece of code (range) and are not sampled +// - a typical use case for thread local callbacks is profiler and code +// execution tracer +// - note, thread local callbacks are automatically propagated with +// ThreadLocalState across JIT continuations and async tasks (at::launch) + +typedef uint64_t CallbackHandle; + +constexpr CallbackHandle INVALID_CALLBACK_HANDLE{0}; + +// It is unnecessary to use atomic operations for enabling +// thread-local function callbacks. Moreover, it prevents saving to +// ThreadLocalState because std::atomic is non-copyable. +struct RecordFunctionCallbacksEntry { + RecordFunctionCallbacksEntry(RecordFunctionCallback&& cb, CallbackHandle h) + : callback_(cb), handle_(h) {} + + RecordFunctionCallback callback_; + bool enabled_{true}; + CallbackHandle handle_; +}; + +// Holds pairs (callbacks, unique_id) +using RecordFunctionCallbacks = std::vector; + +// Generated by the callback managers to determine which functions to run. +struct StepCallbacks { + StepCallbacks() = default; + StepCallbacks(uint64_t thread_id, RecordScope scope) + : thread_id_{thread_id}, scope_{scope} {} + + bool empty() const { + return callbacks_.empty(); + } + + struct StartEndPair { + RecordFunctionCallback::StartCallback start_; + RecordFunctionCallback::EndCallback end_; + }; + + using StartEndPairs = c10::SmallVector; + + StartEndPairs callbacks_; + uint64_t thread_id_{0}; + RecordScope scope_{RecordScope::FUNCTION}; + bool needs_inputs_{false}; + bool needs_outputs_{false}; + bool needs_ids_{false}; +}; + +struct TORCH_API RecordFunction { + // Default constructor is used with before function called afterwards: + // scope - record scope that this function tracks + // pre_sampled - whether this RecordFunction was already pre-sampled with + // kLowProb probability + explicit RecordFunction(RecordScope scope = RecordScope::FUNCTION); + explicit RecordFunction(StepCallbacks&& step_callbacks); + + template + void before( + F fn, + c10::ArrayRef args, + int64_t current_sequence_nr = -1) { + if (!isActive()) { + return; + } + inputs_ = args; + before(fn, current_sequence_nr); + } + + template + void before( + F fn, + const std::vector* args, + int64_t current_sequence_nr = -1) { + before( + std::move(fn), + c10::ArrayRef(args->data(), args->size()), + current_sequence_nr); + } + + // Destructor calls end callbacks + virtual ~RecordFunction(); + + RecordFunction(const RecordFunction&) = delete; + RecordFunction& operator=(const RecordFunction&) = delete; + + const char* name() const; + + int64_t seqNr() const { + return sequence_nr_; + } + + c10::ArrayRef inputs() const { +#ifndef NDEBUG + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + inputs_valid_, "Called inputs() outside RecordFunction start callback"); +#endif + return inputs_; + } + + const std::vector& outputs() const { + return outputs_; + } + + void setOutputs(std::vector&& outputs) { + outputs_ = std::move(outputs); + } + + void setOutputs(c10::ArrayRef outputs) { + outputs_ = outputs.vec(); + } + + size_t num_inputs() const; + size_t num_outputs() const; + + // Retrieves the thread_id that this RecordFunction ran start callbacks with. + // Useful for writing thread safe end callbacks that may be potentially + // executed in a different thread (async ops) + uint64_t threadId() const { + return step_callbacks_.thread_id_; + } + + // For backward functions - thread id of the corresponding forward function, + // or zero otherwise; + // used alongside with sequence number to correlate backward functions with + // the forward ones + uint64_t forwardThreadId() const { + return fwd_thread_id_; + } + + void setForwardThreadId(uint64_t thread_id) { + fwd_thread_id_ = thread_id; + } + + RecordScope scope() const { + return step_callbacks_.scope_; + } + + // Returns logical thread_id for the current thread + static uint64_t currentThreadId(); + + // Internal functions, do not use directly; + // used in python's context manager + + // before functions initialize RecordFunction members and call + // start callbacks + using schema_ref_t = std::reference_wrapper; + void before(const char* name, int64_t sequence_nr = -1); + void before(std::string name, int64_t sequence_nr = -1); + void before(schema_ref_t schema, int64_t sequence_nr = -1); + + // Sets node ID for distributed profiling + static void setDefaultNodeId(int64_t defaultNodeId); + // Gets node ID for distributed profiling + static int64_t getDefaultNodeId(); + + // Calls end callbacks. After end(), accessors will no longer provide useful + // results. + void end(); + + // Internal-only, used only force async event for distributed events + // profiling. + void _setAsync(); + + // Returns whether this RecordFunction corresponds to an async event orn ot. + bool isAsync() const; + + // Internal-only, used to denote out variant used for Static Runtime execution + void _setStaticRuntimeOutVariant(); + bool isStaticRuntimeOutVariant() const; + + RecordFunctionHandle handle() const { + return handle_; + } + + c10::optional operator_name() const; + + // This method returns a copy of the FunctionSchema and can be expensive. + c10::optional operator_schema() const; + + void setHandle(RecordFunctionHandle handle) { + handle_ = handle; + } + + // Whether this RecordFunction runs any callbacks. + bool isActive() const { + return !step_callbacks_.empty(); + } + + bool needsInputs() const { + return step_callbacks_.needs_inputs_; + } + + bool needsOutputs() const { + return step_callbacks_.needs_outputs_; + } + + int64_t debugHandle() const { + return debug_handle_; + } + + void setDebugHandle(int64_t debug_handle) { + debug_handle_ = debug_handle; + } + + void invalidateInputs() { +#ifndef NDEBUG + inputs_valid_ = false; +#endif + } + + private: + void runStartCallbacks(); + + StepCallbacks step_callbacks_; + + // In cases when RecordFunction might be active but we chose not to + // use the observers (e.g. operator is not observed), this boolean + // flag is used to check whether the start callbacks were called + bool called_start_callbacks_ = false; + +#ifndef NDEBUG + bool inputs_valid_ = false; +#endif + + // Stores various ObserverContext objects with event metadata for callbacks. + ObserverContextList ctx_; + + c10::variant fn_; + + int64_t sequence_nr_ = -1; + c10::ArrayRef inputs_; + std::vector outputs_; + + // For backward functions - thread id of the the forward function + uint64_t fwd_thread_id_ = 0; + + // Unique id for this RecordFunction, used in callbacks to track start + // and end of ranges + RecordFunctionHandle handle_{0}; + + // Whether this record_function corresponds to an async event or not. Async + // events can complete in different threads or follow a future-like pattern + // of use. + bool is_async_{false}; + + // Debug handles are used for lazy annotation of module hierarchy + // and callstack. + // This is specifically is useful for mobile runtime, where generated + // debug handles can be lazily symbolicated using debug information + int64_t debug_handle_{-1}; + + // Whether this RecordFunction is used for an out variant run with + // Static Runtime + bool is_static_runtime_out_variant_{false}; +}; + +TORCH_API StepCallbacks getStepCallbacks(RecordScope scope); + +TORCH_API c10::optional getStepCallbacksUnlessEmpty( + RecordScope scope); + +namespace detail { +template +void record_function_with_scope( + RecordFunction& guard, + F fn, + const Inputs& inputs, + Args&&... args) { + if (guard.needsInputs()) { + guard.before( + fn, + c10::ArrayRef(inputs.data(), inputs.size()), + std::forward(args)...); + } else { + guard.before(fn, std::forward(args)...); + } +} + +template +void record_function_with_scope_and_debug_handle( + RecordFunction& guard, + F fn, + int64_t debug_handle, + const Inputs& inputs, + Args&&... args) { + guard.setDebugHandle(debug_handle); + if (guard.needsInputs()) { + guard.before( + fn, + c10::ArrayRef(inputs.data(), inputs.size()), + std::forward(args)...); + } else { + guard.before(fn, std::forward(args)...); + } +} + +template +void record_function_with_scope( + RecordFunction& guard, + F fn, + c10::ArrayRef inputs, + Args&&... args) { + return record_function_with_scope< + c10::ArrayRef, + F, + Args...>(guard, std::move(fn), inputs, std::forward(args)...); +} + +template +void record_function_with_scope_and_debug_handle( + RecordFunction& guard, + F fn, + int64_t debug_handle, + c10::ArrayRef inputs, + Args&&... args) { + return record_function_with_scope_and_debug_handle< + c10::ArrayRef, + F, + Args...>( + guard, std::move(fn), debug_handle, inputs, std::forward(args)...); +} + +} // namespace detail + +// optional argument - function's seq_no +#define RECORD_FUNCTION_WITH_SCOPE(scope, fn, inputs, ...) \ + at::RecordFunction guard(scope); \ + if (guard.isActive()) { \ + ::at::detail::record_function_with_scope( \ + guard, fn, inputs, ##__VA_ARGS__); \ + } + +#define RECORD_FUNCTION(fn, inputs, ...) \ + RECORD_FUNCTION_WITH_SCOPE( \ + at::RecordScope::FUNCTION, fn, inputs, ##__VA_ARGS__) + +#define RECORD_TORCHSCRIPT_FUNCTION(mn, inputs) \ + RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::TORCHSCRIPT_FUNCTION, mn, inputs) + +// Custom user scopes in C++; similar to Python's 'with record_function("..."):' +#define RECORD_USER_SCOPE(fn) \ + RECORD_FUNCTION_WITH_SCOPE( \ + at::RecordScope::USER_SCOPE, fn, c10::ArrayRef{}) + +// RECORD_USER_SCOPE with inputs +#define RECORD_USER_SCOPE_WITH_INPUTS(fn, inputs) \ + RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::USER_SCOPE, fn, inputs) + +// Helper macro to pass in debug handle that is used to +// post process events +#define RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( \ + scope, fn, debug_handle, inputs, ...) \ + at::RecordFunction guard(scope); \ + if (guard.isActive()) { \ + ::at::detail::record_function_with_scope_and_debug_handle( \ + guard, fn, debug_handle, inputs, ##__VA_ARGS__); \ + } + +// Helper macros to record LITE INTERPETER scope events with debug handles +#define RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS( \ + fn, debug_handle, inputs) \ + RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( \ + at::RecordScope::LITE_INTERPRETER, fn, debug_handle, inputs) + +/** + * addThreadLocalCallback adds a thread local callback to run with + * RecordFunction, returns handle to use with removeThreadLocalCallback + */ +TORCH_API CallbackHandle addThreadLocalCallback(RecordFunctionCallback cb); + +/** + * hasThreadLocalCallbacks returns whether there're callbacks registered + * with addThreadLocalCallback + */ +TORCH_API bool hasThreadLocalCallbacks(); + +/** + * clearThreadLocalCallbacks removes all thread local callbacks + */ +TORCH_API void clearThreadLocalCallbacks(); + +/** + * addGlobalCallback adds a global callback to run with RecordFunction: + * + * only during the program initialization + */ +TORCH_API CallbackHandle addGlobalCallback(RecordFunctionCallback cb); + +/** + * removeCallback removes a callback given the handle returned by + * addThreadLocalCallback or addGlobalCallback; + * + * no other code can run simultaneously + */ +TORCH_API void removeCallback(CallbackHandle handle); + +/** + * Prevent the given callback from executing. If handle is invalid, + * does nothing. + */ +TORCH_API void disableCallback(CallbackHandle handle); + +/** + * Allow the given callback, previously disabled with disableCallback, to + * execute again. If handle is invalid, does nothing. + */ +TORCH_API void reenableCallback(CallbackHandle handle); + +/** + * hasGlobalCallbacks returns whether there're global callbacks + * registered with pushGlobalCallback + */ +TORCH_API bool hasGlobalCallbacks(); + +/** + * clearGlobalCallbacks removes all global callbacks + */ +TORCH_API void clearGlobalCallbacks(); + +// for both thread local and global callbacks +TORCH_API bool hasCallbacks(); +TORCH_API void clearCallbacks(); + +/** + * enableRecordFunction enables RecordFunction thread locally + */ +TORCH_API void enableRecordFunction(bool enable = true); + +/** + * isRecordFunctionEnabled returns whether RecordFunction + * is enabled thread locally + */ +TORCH_API bool isRecordFunctionEnabled(); + +class TORCH_API RecordFunctionGuard { + public: + explicit RecordFunctionGuard(bool is_enabled = true) + : prev_value_(isRecordFunctionEnabled()) { + enableRecordFunction(is_enabled); + } + + virtual ~RecordFunctionGuard() { + enableRecordFunction(prev_value_); + } + + private: + bool prev_value_ = false; +}; + +class TORCH_API DisableRecordFunctionGuard : public RecordFunctionGuard { + public: + DisableRecordFunctionGuard() : RecordFunctionGuard(false) {} + virtual ~DisableRecordFunctionGuard() {} +}; + +struct TORCH_API RecordFunctionTLS { + // Thread local vector of callbacks, holds pairs (callbacks, unique_id); + // must be sorted in increasing handles order + RecordFunctionCallbacks sorted_tls_callbacks_; + + bool tls_record_function_enabled_ = true; +}; + +TORCH_API const RecordFunctionTLS& get_record_function_tls_(); + +TORCH_API void set_record_function_tls_(const RecordFunctionTLS& tls); + +TORCH_API void set_record_function_seed_for_testing(uint32_t seed); + +} // namespace at diff --git a/voice_bridge/torch/include/c10/core/Allocator.h b/voice_bridge/torch/include/c10/core/Allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..3ea27fcb892650b2a7b791a760f97505068ebe98 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/Allocator.h @@ -0,0 +1,269 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include + +namespace c10 { + +// A DataPtr is a unique pointer (with an attached deleter and some +// context for the deleter) to some memory, which also records what +// device is for its data. +// +// nullptr DataPtrs can still have a nontrivial device; this allows +// us to treat zero-size allocations uniformly with non-zero allocations. +// +class C10_API DataPtr { + private: + c10::detail::UniqueVoidPtr ptr_; + Device device_; + + public: + // Choice of CPU here is arbitrary; if there's an "undefined" device + // we could use that too + DataPtr() : ptr_(), device_(DeviceType::CPU) {} + DataPtr(void* data, Device device) : ptr_(data), device_(device) {} + DataPtr(void* data, void* ctx, DeleterFnPtr ctx_deleter, Device device) + : ptr_(data, ctx, ctx_deleter), device_(device) {} + void* operator->() const { + return ptr_.get(); + } + void clear() { + ptr_.clear(); + } + void* get() const { + return ptr_.get(); + } + void* get_context() const { + return ptr_.get_context(); + } + void* release_context() { + return ptr_.release_context(); + } + std::unique_ptr&& move_context() { + return ptr_.move_context(); + } + operator bool() const { + return static_cast(ptr_); + } + template + T* cast_context(DeleterFnPtr expected_deleter) const { + return ptr_.cast_context(expected_deleter); + } + DeleterFnPtr get_deleter() const { + return ptr_.get_deleter(); + } + /** + * Compare the deleter in a DataPtr to expected_deleter. + * If it matches, replace the deleter with new_deleter + * and return true; otherwise, does nothing and returns + * false. + * + * In general, it is not safe to unconditionally set the + * deleter on a DataPtr, because you don't know what + * the deleter is, and thus will have a hard time properly + * disposing of the deleter without storing the original + * deleter (this is difficult to do, because DeleterFnPtr + * is not a closure, and because the context on DataPtr is + * only a single word, you generally don't have enough + * space to store both the original deleter and its context). + * However, in some cases, you know /exactly/ what the deleter + * is, and you have a new deleter that manually wraps + * the old one. In this case, you can safely swap the deleter + * after asserting that the deleters line up. + * + * What are the requirements on new_deleter? It must still + * properly dispose of the void* pointer passed in as its argument, + * where void* is whatever the context of the original deleter + * is. So in general, you expect the new deleter to look something + * like this: + * + * [](void* ptr) { + * some_new_stuff(ptr); + * get_orig_allocator()->raw_deleter(ptr); + * } + * + * Note that it won't work to close over the original + * allocator; you don't have enough space to do that! Also, + * it's unsafe to assume that the passed in pointer in + * question is the memory pointer in question; it might not + * be; be sure to read the source code of the Allocator + * in question to confirm this. + */ + C10_NODISCARD bool compare_exchange_deleter( + DeleterFnPtr expected_deleter, + DeleterFnPtr new_deleter) { + return ptr_.compare_exchange_deleter(expected_deleter, new_deleter); + } + Device device() const { + return device_; + } + // Unsafely mutates the device on a DataPtr. Under normal use, + // you should never actually need to call this function. + // We need this for the implementation of the hack detailed + // in Note [Masquerading as CUDA] + void unsafe_set_device(Device device) { + device_ = device; + } +}; + +// NB: Device is NOT tested for here; a CUDA nullptr is as much a nullptr as a +// CPU nullptr + +inline bool operator==(const DataPtr& dp, std::nullptr_t) noexcept { + return !dp; +} +inline bool operator==(std::nullptr_t, const DataPtr& dp) noexcept { + return !dp; +} +inline bool operator!=(const DataPtr& dp, std::nullptr_t) noexcept { + return dp; +} +inline bool operator!=(std::nullptr_t, const DataPtr& dp) noexcept { + return dp; +} + +// Note [raw_allocate/raw_deallocate and Thrust] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Thrust's support for custom allocators requires us to write something +// like this: +// +// class ThrustAllocator { +// char* allocate(size_t); +// void deallocate(char*, size_t); +// }; +// +// This is not good for our unique_ptr based allocator interface, as +// there is no way to get to the context when we free. +// +// However, in some cases the context is exactly the same as +// the data pointer. In this case, we can support the "raw" +// allocate and deallocate interface. This is what +// raw_deleter signifies. By default, it returns a nullptr, which means that +// the raw interface is not implemented. Be sure to implement it whenever +// possible, or the raw interface will incorrectly reported as unsupported, +// when it is actually possible. + +struct C10_API Allocator { + virtual ~Allocator() = default; + + virtual DataPtr allocate(size_t n) const = 0; + + // If this returns a non nullptr, it means that allocate() + // is guaranteed to return a unique_ptr with this deleter attached; + // it means the rawAllocate and rawDeallocate APIs are safe to use. + // This function MUST always return the same BoundDeleter. + virtual DeleterFnPtr raw_deleter() const { + return nullptr; + } + void* raw_allocate(size_t n) { + auto dptr = allocate(n); + AT_ASSERT(dptr.get() == dptr.get_context()); + return dptr.release_context(); + } + void raw_deallocate(void* ptr) { + auto d = raw_deleter(); + AT_ASSERT(d); + d(ptr); + } +}; + +// This context is used to generate DataPtr which have arbitrary +// std::function deleters associated with them. In some user facing +// functions, we give a (user-friendly) interface for constructing +// tensors from external data which take an arbitrary std::function +// deleter. Grep for InefficientStdFunctionContext to find these +// occurrences. +// +// This context is inefficient because we have to do a dynamic +// allocation InefficientStdFunctionContext, on top of the dynamic +// allocation which is implied by std::function itself. +struct C10_API InefficientStdFunctionContext { + std::unique_ptr> ptr_; + InefficientStdFunctionContext( + std::unique_ptr>&& ptr) + : ptr_(std::move(ptr)) {} + static DataPtr makeDataPtr( + void* ptr, + const std::function& deleter, + Device device); +}; + +/** Set the allocator for DeviceType `t`. The passed in allocator pointer is + * expected to have static lifetime; this function does NOT take ownership + * of the raw pointer. (The reason for this is to prevent existing pointers + * to an allocator of a particular device from being invalidated when + * SetAllocator is called.) + * + * Also note that this is not thread-safe, and we assume this function will + * only be called during initialization. + * + * The 'priority' flag is introduced when we want to overwrite the default + * allocator, since the allocators are set statically. The default priority + * is 0, which means the lowest. Only higher or equal priority can overwrite + * existing ones. + */ +C10_API void SetAllocator(DeviceType t, Allocator* alloc, uint8_t priority = 0); +C10_API Allocator* GetAllocator(const DeviceType& t); + +template +struct AllocatorRegisterer { + explicit AllocatorRegisterer(Allocator* alloc) { + SetAllocator(t, alloc); + } +}; + +#define REGISTER_ALLOCATOR(t, f) \ + namespace { \ + static c10::AllocatorRegisterer g_allocator_d(f); \ + } + +// An interface for reporting thread local memory usage +// per device +struct C10_API MemoryReportingInfoBase : public c10::DebugInfoBase { + MemoryReportingInfoBase(); + virtual ~MemoryReportingInfoBase() {} + + /** + * alloc_size corresponds to the size of the ptr. + * + * total_allocated corresponds to total allocated memory. + * + * total_reserved corresponds to total size of memory pool, both used and + * unused, if applicable. + */ + virtual void reportMemoryUsage( + void* ptr, + int64_t alloc_size, + int64_t total_allocated, + int64_t total_reserved, + Device device) = 0; + + virtual void reportOutOfMemory( + int64_t alloc_size, + int64_t total_allocated, + int64_t total_reserved, + Device device); + + virtual bool memoryProfilingEnabled() const = 0; +}; + +C10_API bool memoryProfilingEnabled(); +C10_API void reportMemoryUsageToProfiler( + void* ptr, + int64_t alloc_size, + int64_t total_allocated, + int64_t total_reserved, + Device device); + +C10_API void reportOutOfMemoryToProfiler( + int64_t alloc_size, + int64_t total_allocated, + int64_t total_reserved, + Device device); + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/AutogradState.h b/voice_bridge/torch/include/c10/core/AutogradState.h new file mode 100644 index 0000000000000000000000000000000000000000..a1d13a42891da77d88ca940e07d648289c05a80a --- /dev/null +++ b/voice_bridge/torch/include/c10/core/AutogradState.h @@ -0,0 +1,50 @@ +#pragma once + +#include + +#include + +namespace c10 { + +// Structure used to pack all the thread local boolean +// flags used by autograd +struct C10_API AutogradState { + static AutogradState& get_tls_state(); + static void set_tls_state(AutogradState state); + + AutogradState(bool grad_mode, bool inference_mode, bool fw_grad_mode) + : grad_mode_(grad_mode), + inference_mode_(inference_mode), + fw_grad_mode_(fw_grad_mode) {} + + void set_grad_mode(bool enabled) { + grad_mode_ = enabled; + } + + void set_fw_grad_mode(bool enabled) { + fw_grad_mode_ = enabled; + } + + void set_inference_mode(bool enabled) { + inference_mode_ = enabled; + } + + bool get_grad_mode() const { + return grad_mode_; + } + + bool get_fw_grad_mode() const { + return fw_grad_mode_; + } + + bool get_inference_mode() const { + return inference_mode_; + } + + private: + bool grad_mode_ : 1; + bool inference_mode_ : 1; + bool fw_grad_mode_ : 1; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/Backend.h b/voice_bridge/torch/include/c10/core/Backend.h new file mode 100644 index 0000000000000000000000000000000000000000..331130879138d2db5af74f132757a5db5a5eec50 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/Backend.h @@ -0,0 +1,329 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace c10 { + +/** + * This legacy enum class defines the set of backends supported by old school, + * code generated Type-based ATen. A "backend" in this sense roughly + * corresponds to the cartesian product of (device type, layout), but restricted + * only to combinations which we actually have kernels for. Backend does NOT + * include dtype. + * + * The reason we are sunsetting this enum class is because it doesn't allow for + * open registration; e.g., if you want to add SparseXLA, you'd have to + * edit this enum; you wouldn't be able to do it out of tree. DispatchKey is + * the replacement for Backend which supports open registration. + * + * NB: The concept of 'Backend' here disagrees with the notion of backend + * exposed to users in torch.backends. Backend here is something like "CPU" + * or "SparseCUDA"; backend in torch.backends is something like "MKL" or + * "CUDNN". + */ +enum class Backend { + CPU, + CUDA, + HIP, + VE, + FPGA, + IPU, + XPU, + SparseCPU, + SparseCUDA, + SparseCsrCPU, + SparseCsrCUDA, + SparseHIP, + SparseVE, + SparseXPU, + ORT, + XLA, + Vulkan, + Metal, + Meta, + QuantizedCPU, + QuantizedCUDA, + QuantizedXPU, + Undefined, + MkldnnCPU, + MPS, + HPU, + Lazy, + PrivateUse1, + NumOptions +}; + +static inline Backend dispatchKeyToBackend(DispatchKey t) { + if (t == DispatchKey::CPU || t == DispatchKey::AutogradCPU) { + return Backend::CPU; + } else if (t == DispatchKey::CUDA || t == DispatchKey::AutogradCUDA) { + return Backend::CUDA; + } else if (t == DispatchKey::HIP) { + return Backend::HIP; + } else if (t == DispatchKey::VE) { + return Backend::VE; + } else if (t == DispatchKey::FPGA) { + return Backend::FPGA; + } else if (t == DispatchKey::ORT) { + return Backend::ORT; + } else if (t == DispatchKey::XLA || t == DispatchKey::AutogradXLA) { + return Backend::XLA; + } else if (t == DispatchKey::Lazy || t == DispatchKey::AutogradLazy) { + return Backend::Lazy; + } else if (t == DispatchKey::MPS || t == DispatchKey::AutogradMPS) { + return Backend::MPS; + } else if (t == DispatchKey::Vulkan) { + return Backend::Vulkan; + } else if (t == DispatchKey::Metal) { + return Backend::Metal; + } else if (t == DispatchKey::Meta) { + return Backend::Meta; + } else if (t == DispatchKey::SparseCPU) { + return Backend::SparseCPU; + } else if (t == DispatchKey::SparseCUDA) { + return Backend::SparseCUDA; + } else if (t == DispatchKey::SparseHIP) { + return Backend::SparseHIP; + } else if (t == DispatchKey::SparseVE) { + return Backend::SparseVE; + } else if (t == DispatchKey::SparseCsrCPU) { + return Backend::SparseCsrCPU; + } else if (t == DispatchKey::SparseCsrCUDA) { + return Backend::SparseCsrCUDA; + } else if (t == DispatchKey::MkldnnCPU) { + return Backend::MkldnnCPU; + } else if (t == DispatchKey::QuantizedCPU) { + return Backend::QuantizedCPU; + } else if (t == DispatchKey::QuantizedCUDA) { + return Backend::QuantizedCUDA; + } else if (t == DispatchKey::IPU || t == DispatchKey::AutogradIPU) { + return Backend::IPU; + } else if (t == DispatchKey::XPU || t == DispatchKey::AutogradXPU) { + return Backend::XPU; + } else if (t == DispatchKey::SparseXPU) { + return Backend::SparseXPU; + } else if (t == DispatchKey::QuantizedXPU) { + return Backend::QuantizedXPU; + } else if (t == DispatchKey::HPU || t == DispatchKey::AutogradHPU) { + return Backend::HPU; + } else if (t == DispatchKey::PrivateUse1) { + return Backend::PrivateUse1; + } else if (t == DispatchKey::Undefined) { + return Backend::Undefined; + } else { + TORCH_CHECK(false, "Unrecognized tensor type ID: ", t); + } +} + +static inline DispatchKey backendToDispatchKey(Backend b) { + switch (b) { + case Backend::CPU: + return DispatchKey::CPU; + case Backend::CUDA: + return DispatchKey::CUDA; + case Backend::HIP: + return DispatchKey::HIP; + case Backend::VE: + return DispatchKey::VE; + case Backend::FPGA: + return DispatchKey::FPGA; + case Backend::ORT: + return DispatchKey::ORT; + case Backend::XLA: + return DispatchKey::XLA; + case Backend::Lazy: + return DispatchKey::Lazy; + case Backend::IPU: + return DispatchKey::IPU; + case Backend::XPU: + return DispatchKey::XPU; + case Backend::SparseXPU: + return DispatchKey::SparseXPU; + case Backend::SparseCPU: + return DispatchKey::SparseCPU; + case Backend::SparseCUDA: + return DispatchKey::SparseCUDA; + case Backend::SparseHIP: + return DispatchKey::SparseHIP; + case Backend::SparseVE: + return DispatchKey::SparseVE; + case Backend::SparseCsrCPU: + return DispatchKey::SparseCsrCPU; + case Backend::SparseCsrCUDA: + return DispatchKey::SparseCsrCUDA; + case Backend::MkldnnCPU: + return DispatchKey::MkldnnCPU; + case Backend::Vulkan: + return DispatchKey::Vulkan; + case Backend::Metal: + return DispatchKey::Metal; + case Backend::Meta: + return DispatchKey::Meta; + case Backend::QuantizedCPU: + return DispatchKey::QuantizedCPU; + case Backend::QuantizedCUDA: + return DispatchKey::QuantizedCUDA; + case Backend::Undefined: + return DispatchKey::Undefined; + case Backend::MPS: + return DispatchKey::MPS; + case Backend::HPU: + return DispatchKey::HPU; + case Backend::PrivateUse1: + return DispatchKey::PrivateUse1; + default: + throw std::runtime_error("Unknown backend"); + } +} + +static inline DeviceType backendToDeviceType(Backend b) { + switch (b) { + case Backend::CPU: + return DeviceType::CPU; + case Backend::CUDA: + return DeviceType::CUDA; + case Backend::HIP: + return DeviceType::HIP; + case Backend::VE: + return DeviceType::VE; + case Backend::FPGA: + return DeviceType::FPGA; + case Backend::ORT: + return DeviceType::ORT; + case Backend::XLA: + return DeviceType::XLA; + case Backend::Lazy: + return DeviceType::Lazy; + case Backend::SparseCPU: + return DeviceType::CPU; + case Backend::SparseCUDA: + return DeviceType::CUDA; + case Backend::SparseHIP: + return DeviceType::HIP; + case Backend::SparseVE: + return DeviceType::VE; + case Backend::SparseCsrCPU: + return DeviceType::CPU; + case Backend::SparseCsrCUDA: + return DeviceType::CUDA; + case Backend::IPU: + return DeviceType::IPU; + case Backend::XPU: + case Backend::SparseXPU: + case Backend::QuantizedXPU: + return DeviceType::XPU; + case Backend::MkldnnCPU: + case Backend::QuantizedCPU: + return DeviceType::CPU; + case Backend::QuantizedCUDA: + return DeviceType::CUDA; + case Backend::Vulkan: + return DeviceType::Vulkan; + case Backend::Metal: + return DeviceType::Metal; + case Backend::Meta: + return DeviceType::Meta; + case Backend::MPS: + return DeviceType::MPS; + case Backend::HPU: + return DeviceType::HPU; + case Backend::PrivateUse1: + return DeviceType::PrivateUse1; + case Backend::Undefined: + TORCH_CHECK(false, "Undefined backend is not a valid device type"); + default: + TORCH_CHECK(false, "Unknown backend"); + } +} + +// TODO: This probably shouldn't actually be static inline +static inline const char* toString(Backend b) { + switch (b) { + case Backend::CPU: + return "CPU"; + case Backend::CUDA: + return "CUDA"; + case Backend::HIP: + return "HIP"; + case Backend::VE: + return "VE"; + case Backend::FPGA: + return "FPGA"; + case Backend::XPU: + return "XPU"; + case Backend::IPU: + return "IPU"; + case Backend::ORT: + return "ORT"; + case Backend::XLA: + return "XLA"; + case Backend::Lazy: + return "Lazy"; + case Backend::MPS: + return "MPS"; + case Backend::SparseCPU: + return "SparseCPU"; + case Backend::SparseCUDA: + return "SparseCUDA"; + case Backend::SparseHIP: + return "SparseHIP"; + case Backend::SparseVE: + return "SparseVE"; + case Backend::SparseXPU: + return "SparseXPU"; + case Backend::SparseCsrCPU: + return "SparseCsrCPU"; + case Backend::SparseCsrCUDA: + return "SparseCsrCUDA"; + case Backend::MkldnnCPU: + return "MkldnnCPU"; + case Backend::Vulkan: + return "Vulkan"; + case Backend::Metal: + return "Metal"; + case Backend::Meta: + return "Meta"; + case Backend::QuantizedCPU: + return "QuantizedCPU"; + case Backend::QuantizedCUDA: + return "QuantizedCUDA"; + case Backend::QuantizedXPU: + return "QuantizedXPU"; + case Backend::HPU: + return "HPU"; + case Backend::PrivateUse1: + return "PrivateUseOne"; + default: + return "UNKNOWN_BACKEND"; + } +} + +static inline bool isSparse(Backend b) { + switch (b) { + case Backend::SparseXPU: + case Backend::SparseCPU: + case Backend::SparseCUDA: + case Backend::SparseHIP: + case Backend::SparseVE: + return true; + default: + return false; + } +} + +static inline bool isSparseCsr(Backend b) { + switch (b) { + case Backend::SparseCsrCPU: + case Backend::SparseCsrCUDA: + return true; + default: + return false; + } +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/CPUAllocator.h b/voice_bridge/torch/include/c10/core/CPUAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..a899401298180c6127922cffb8beda32d5c9ac59 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/CPUAllocator.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include + +#include +#include // legacy, update dependents to include this directly +#include + +// TODO: rename to c10 +C10_DECLARE_bool(caffe2_report_cpu_memory_usage); + +namespace c10 { + +using MemoryDeleter = void (*)(void*); + +// A helper function that is basically doing nothing. +C10_API void NoDelete(void*); + +// A simple struct that is used to report C10's memory allocation, +// deallocation status and out-of-memory events to the profiler +class C10_API ProfiledCPUMemoryReporter { + public: + ProfiledCPUMemoryReporter() {} + void New(void* ptr, size_t nbytes); + void OutOfMemory(size_t nbytes); + void Delete(void* ptr); + + private: + std::mutex mutex_; + std::unordered_map size_table_; + size_t allocated_ = 0; + size_t log_cnt_ = 0; +}; + +C10_API ProfiledCPUMemoryReporter& profiledCPUMemoryReporter(); + +// Get the CPU Allocator. +C10_API at::Allocator* GetCPUAllocator(); +// Sets the CPU allocator to the given allocator: the caller gives away the +// ownership of the pointer. +C10_API void SetCPUAllocator(at::Allocator* alloc, uint8_t priority = 0); + +// Get the Default CPU Allocator +C10_API at::Allocator* GetDefaultCPUAllocator(); + +// Get the Default Mobile CPU Allocator +C10_API at::Allocator* GetDefaultMobileCPUAllocator(); + +// The CPUCachingAllocator is experimental and might disappear in the future. +// The only place that uses it is in StaticRuntime. +// Set the CPU Caching Allocator +C10_API void SetCPUCachingAllocator(Allocator* alloc, uint8_t priority = 0); +// Get the CPU Caching Allocator +C10_API Allocator* GetCPUCachingAllocator(); + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/CompileTimeFunctionPointer.h b/voice_bridge/torch/include/c10/core/CompileTimeFunctionPointer.h new file mode 100644 index 0000000000000000000000000000000000000000..6314e3e77082918fa2529d71ecdb9f886dcabc7e --- /dev/null +++ b/voice_bridge/torch/include/c10/core/CompileTimeFunctionPointer.h @@ -0,0 +1,56 @@ +#pragma once + +#include + +namespace c10 { + +/** + * Represent a function pointer as a C++ type. + * This allows using the function pointer as a type + * in a template and calling it from inside the template + * allows the compiler to inline the call because it + * knows the function pointer at compile time. + * + * Example 1: + * int add(int a, int b) {return a + b;} + * using Add = TORCH_FN_TYPE(add); + * template struct Executor { + * int execute(int a, int b) { + * return Func::func_ptr()(a, b); + * } + * }; + * Executor executor; + * EXPECT_EQ(3, executor.execute(1, 2)); + * + * Example 2: + * int add(int a, int b) {return a + b;} + * template int execute(Func, int a, int b) { + * return Func::func_ptr()(a, b); + * } + * EXPECT_EQ(3, execute(TORCH_FN(add), 1, 2)); + */ +template +struct CompileTimeFunctionPointer final { + static_assert( + guts::is_function_type::value, + "TORCH_FN can only wrap function types."); + using FuncType = FuncType_; + + static constexpr FuncType* func_ptr() { + return func_ptr_; + } +}; + +template +struct is_compile_time_function_pointer : std::false_type {}; +template +struct is_compile_time_function_pointer< + CompileTimeFunctionPointer> : std::true_type {}; + +} // namespace c10 + +#define TORCH_FN_TYPE(func) \ + ::c10::CompileTimeFunctionPointer< \ + std::remove_pointer_t>, \ + func> +#define TORCH_FN(func) TORCH_FN_TYPE(func)() diff --git a/voice_bridge/torch/include/c10/core/CopyBytes.h b/voice_bridge/torch/include/c10/core/CopyBytes.h new file mode 100644 index 0000000000000000000000000000000000000000..c49763f69dc34c7dedeceb8f388b609ad2380bed --- /dev/null +++ b/voice_bridge/torch/include/c10/core/CopyBytes.h @@ -0,0 +1,44 @@ +#pragma once + +#include + +namespace c10 { + +using CopyBytesFunction = void (*)( + size_t nbytes, + const void* src, + Device src_device, + void* dst, + Device dst_device); + +struct C10_API _CopyBytesFunctionRegisterer { + _CopyBytesFunctionRegisterer( + DeviceType from, + DeviceType to, + CopyBytesFunction func_sync, + CopyBytesFunction func_async = nullptr); +}; + +#define REGISTER_COPY_BYTES_FUNCTION(from, to, ...) \ + namespace { \ + static _CopyBytesFunctionRegisterer C10_ANONYMOUS_VARIABLE( \ + g_copy_function)(from, to, __VA_ARGS__); \ + } + +/* + * WARNING: Implementations for this function are currently registered from + * ATen and caffe2, not yet from c10. Don't use this if not either ATen + * or caffe2 is present as well. + * We can't move them yet, because the CUDA implementations aren't unified yet + * between ATen and caffe2. + * We're planning to move the implementations into c10/backend/xxx + * to make c10 self contained again. + */ +C10_API void CopyBytes( + size_t nbytes, + const void* src, + Device src_device, + void* dst, + Device dst_device, + bool async); +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/DefaultDtype.h b/voice_bridge/torch/include/c10/core/DefaultDtype.h new file mode 100644 index 0000000000000000000000000000000000000000..f2f95c0da157f1f9e4948b823d6e65e32a906010 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/DefaultDtype.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace caffe2 { +class TypeMeta; +} // namespace caffe2 + +namespace c10 { +C10_API void set_default_dtype(caffe2::TypeMeta dtype); +C10_API const caffe2::TypeMeta get_default_dtype(); +C10_API ScalarType get_default_dtype_as_scalartype(); +C10_API const caffe2::TypeMeta get_default_complex_dtype(); +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/DefaultTensorOptions.h b/voice_bridge/torch/include/c10/core/DefaultTensorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..36af26578473a6395644e06f8c0b91b3cbfb0b6b --- /dev/null +++ b/voice_bridge/torch/include/c10/core/DefaultTensorOptions.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { + +struct TensorOptions; + +/// Like TensorOptions, but all fields are guaranteed to be filled. +struct DefaultTensorOptions { + DefaultTensorOptions() = default; + + caffe2::TypeMeta dtype() const noexcept { + return dtype_; + } + Device device() const noexcept { + return device_; + } + Layout layout() const noexcept { + return layout_; + } + bool requires_grad() const noexcept { + return requires_grad_; + } + + // Defined in TensorOptions.h + inline DefaultTensorOptions& merge(const TensorOptions& options); + + private: + caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make(); // 64-bit + Device device_ = at::kCPU; // 32-bit + Layout layout_ = at::kStrided; // 8-bit + bool requires_grad_ = false; // 8-bit +}; + +inline const DefaultTensorOptions& getDefaultTensorOptions() { + static const auto options = DefaultTensorOptions(); + return options; +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/Device.h b/voice_bridge/torch/include/c10/core/Device.h new file mode 100644 index 0000000000000000000000000000000000000000..d53ab38ff9cb951d4b809d4c34a26dcc91d657fc --- /dev/null +++ b/voice_bridge/torch/include/c10/core/Device.h @@ -0,0 +1,205 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +namespace c10 { + +/// An index representing a specific device; e.g., the 1 in GPU 1. +/// A DeviceIndex is not independently meaningful without knowing +/// the DeviceType it is associated; try to use Device rather than +/// DeviceIndex directly. +using DeviceIndex = int8_t; + +/// Represents a a compute device on which a tensor is located. A device is +/// uniquely identified by a type, which specifies the type of machine it is +/// (e.g. CPU or CUDA GPU), and a device index or ordinal, which identifies the +/// specific compute device when there is more than one of a certain type. The +/// device index is optional, and in its defaulted state represents (abstractly) +/// "the current device". Further, there are two constraints on the value of the +/// device index, if one is explicitly stored: +/// 1. A negative index represents the current device, a non-negative index +/// represents a specific, concrete device, +/// 2. When the device type is CPU, the device index must be zero. +struct C10_API Device final { + using Type = DeviceType; + + /// Constructs a new `Device` from a `DeviceType` and an optional device + /// index. + /* implicit */ Device(DeviceType type, DeviceIndex index = -1) + : type_(type), index_(index) { + validate(); + } + + /// Constructs a `Device` from a string description, for convenience. + /// The string supplied must follow the following schema: + /// `(cpu|cuda)[:]` + /// where `cpu` or `cuda` specifies the device type, and + /// `:` optionally specifies a device index. + /* implicit */ Device(const std::string& device_string); + + /// Returns true if the type and index of this `Device` matches that of + /// `other`. + bool operator==(const Device& other) const noexcept { + return this->type_ == other.type_ && this->index_ == other.index_; + } + + /// Returns true if the type or index of this `Device` differs from that of + /// `other`. + bool operator!=(const Device& other) const noexcept { + return !(*this == other); + } + + /// Sets the device index. + void set_index(DeviceIndex index) { + index_ = index; + } + + /// Returns the type of device this is. + DeviceType type() const noexcept { + return type_; + } + + /// Returns the optional index. + DeviceIndex index() const noexcept { + return index_; + } + + /// Returns true if the device has a non-default index. + bool has_index() const noexcept { + return index_ != -1; + } + + /// Return true if the device is of CUDA type. + bool is_cuda() const noexcept { + return type_ == DeviceType::CUDA; + } + + /// Return true if the device is of MPS type. + bool is_mps() const noexcept { + return type_ == DeviceType::MPS; + } + + /// Return true if the device is of HIP type. + bool is_hip() const noexcept { + return type_ == DeviceType::HIP; + } + + /// Return true if the device is of VE type. + bool is_ve() const noexcept { + return type_ == DeviceType::VE; + } + + /// Return true if the device is of XPU type. + bool is_xpu() const noexcept { + return type_ == DeviceType::XPU; + } + + /// Return true if the device is of IPU type. + bool is_ipu() const noexcept { + return type_ == DeviceType::IPU; + } + + /// Return true if the device is of XLA type. + bool is_xla() const noexcept { + return type_ == DeviceType::XLA; + } + + /// Return true if the device is of HPU type. + bool is_hpu() const noexcept { + return type_ == DeviceType::HPU; + } + + /// Return true if the device is of Lazy type. + bool is_lazy() const noexcept { + return type_ == DeviceType::Lazy; + } + + /// Return true if the device is of Vulkan type. + bool is_vulkan() const noexcept { + return type_ == DeviceType::Vulkan; + } + + /// Return true if the device is of Metal type. + bool is_metal() const noexcept { + return type_ == DeviceType::Metal; + } + + /// Return true if the device is of ORT type. + bool is_ort() const noexcept { + return type_ == DeviceType::ORT; + } + + /// Return true if the device is of META type. + bool is_meta() const noexcept { + return type_ == DeviceType::Meta; + } + + /// Return true if the device is of CPU type. + bool is_cpu() const noexcept { + return type_ == DeviceType::CPU; + } + + /// Return true if the device supports arbirtary strides. + bool supports_as_strided() const noexcept { + return type_ != DeviceType::IPU && type_ != DeviceType::XLA && + type_ != DeviceType::Lazy; + } + + /// Same string as returned from operator<<. + std::string str() const; + + private: + DeviceType type_; + DeviceIndex index_ = -1; + void validate() { + // Removing these checks in release builds noticeably improves + // performance in micro-benchmarks. + // This is safe to do, because backends that use the DeviceIndex + // have a later check when we actually try to switch to that device. + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + index_ == -1 || index_ >= 0, + "Device index must be -1 or non-negative, got ", + (int)index_); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + !is_cpu() || index_ <= 0, + "CPU device index must be -1 or zero, got ", + (int)index_); + } +}; + +C10_API std::ostream& operator<<(std::ostream& stream, const Device& device); + +} // namespace c10 + +namespace std { +template <> +struct hash { + size_t operator()(c10::Device d) const noexcept { + // Are you here because this static assert failed? Make sure you ensure + // that the bitmasking code below is updated accordingly! + static_assert(sizeof(c10::DeviceType) == 1, "DeviceType is not 8-bit"); + static_assert(sizeof(c10::DeviceIndex) == 1, "DeviceIndex is not 8-bit"); + // Note [Hazard when concatenating signed integers] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // We must first convert to a same-sized unsigned type, before promoting to + // the result type, to prevent sign extension when any of the values is -1. + // If sign extension occurs, you'll clobber all of the values in the MSB + // half of the resulting integer. + // + // Technically, by C/C++ integer promotion rules, we only need one of the + // uint32_t casts to the result type, but we put in both for explicitness's + // sake. + uint32_t bits = static_cast(static_cast(d.type())) + << 16 | + static_cast(static_cast(d.index())); + return std::hash{}(bits); + } +}; +} // namespace std diff --git a/voice_bridge/torch/include/c10/core/DeviceArray.h b/voice_bridge/torch/include/c10/core/DeviceArray.h new file mode 100644 index 0000000000000000000000000000000000000000..0d4f77da6c203363e4f8118b7d45e4e70af19684 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/DeviceArray.h @@ -0,0 +1,24 @@ +#include + +namespace c10 { + +template +class DeviceArray { + public: + DeviceArray(c10::Allocator& allocator, size_t size) + : data_ptr_(allocator.allocate(size * sizeof(T))) { + static_assert(std::is_trivial::value, "T must be a trivial type"); + TORCH_INTERNAL_ASSERT( + 0 == (reinterpret_cast(data_ptr_.get()) % alignof(T)), + "c10::DeviceArray: Allocated memory is not aligned for this data type"); + } + + T* get() { + return static_cast(data_ptr_.get()); + } + + private: + c10::DataPtr data_ptr_; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/DeviceGuard.h b/voice_bridge/torch/include/c10/core/DeviceGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..ed627f03171dc6527dbaec6e1986471d62c0f611 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/DeviceGuard.h @@ -0,0 +1,195 @@ +#pragma once + +#include + +namespace c10 { + +/// RAII guard that sets a certain default device in its constructor, and +/// changes it back to the device that was originally active upon destruction. +/// +/// The device is always reset to the one that was active at the time of +/// construction of the guard. Even if you `set_device` after construction, the +/// destructor will still reset the device to the one that was active at +/// construction time. +/// +/// This device guard does NOT have an uninitialized state; it is guaranteed +/// to reset a device on exit. If you are in a situation where you *might* +/// want to setup a guard (i.e., are looking for the moral equivalent +/// of optional), see OptionalDeviceGuard. +class DeviceGuard { + public: + /// No default constructor; see Note [Omitted default constructor from RAII] + explicit DeviceGuard() = delete; + + /// Set the current device to the passed Device. + explicit DeviceGuard(Device device) : guard_(device) {} + + /// This constructor is for testing only. + explicit DeviceGuard( + Device device, + const impl::DeviceGuardImplInterface* impl) + : guard_(device, impl) {} + + /// Copy is disallowed + DeviceGuard(const DeviceGuard&) = delete; + DeviceGuard& operator=(const DeviceGuard&) = delete; + + /// Move is disallowed, as DeviceGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + DeviceGuard(DeviceGuard&& other) = delete; + DeviceGuard& operator=(DeviceGuard&& other) = delete; + + /// Sets the device to the given one. The specified device must be consistent + /// with the device type originally specified during guard construction. + /// + /// TODO: The consistency check here is inconsistent with StreamGuard's + /// behavior with set_stream, where a stream on a different device than + /// the original one isn't an error; we just reset the stream and then + /// switch devices. + void reset_device(at::Device device) { + guard_.reset_device(device); + } + + /// This method is for testing only. + void reset_device( + at::Device device, + const impl::DeviceGuardImplInterface* impl) { + guard_.reset_device(device, impl); + } + + /// Sets the device index to the given one. The device type is inferred + /// from the original device type the guard was constructed with. + void set_index(DeviceIndex index) { + guard_.set_index(index); + } + + /// Returns the device that was set at the time the guard was constructed. + Device original_device() const { + return guard_.original_device(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device. + Device current_device() const { + return guard_.current_device(); + } + + private: + impl::InlineDeviceGuard guard_; +}; + +/** + * A OptionalDeviceGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * Morally, a OptionalDeviceGuard is equivalent to optional, but + * with extra constructors and methods as appropriate. + * + * Besides its obvious use (optionally applying a DeviceGuard), + * OptionalDeviceGuard is often also used for the following idiom: + * + * OptionalDeviceGuard g; + * for (const auto& t : tensors) { + * g.set_device(t.device()); + * do_something_with(t); + * } + * + * This usage is marginally more efficient than constructing a DeviceGuard every + * iteration of the for loop, as it avoids an unnecessary device reset. + * + * Unlike DeviceGuard, a OptionalDeviceGuard may be uninitialized. This occurs + * when you use the nullary constructor, or pass a nullopt to the constructor. + * Uninitialized OptionalDeviceGuards do *nothing*; they do not know what the + * original device was and they do not reset on destruction. This is why + * original_device() and current_device() return optional rather than + * Device (as they do in DeviceGuard), and also is why we didn't just + * provide OptionalDeviceGuard by default and hide DeviceGuard from users. + * + * The semantics of an OptionalDeviceGuard are exactly explained by thinking + * of it as an optional. In particular, an initialized + * OptionalDeviceGuard doesn't restore device to its value at construction; it + * restores device to its value *at initialization*. So if you have the + * program: + * + * setDevice(1); + * OptionalDeviceGuard g; + * setDevice(2); + * g.reset_device(Device(DeviceType::CUDA, 3)); // initializes! + * + * On destruction, g will reset device to 2, rather than 1. + * + * An uninitialized OptionalDeviceGuard is distinct from a (initialized) + * DeviceGuard whose original_device_ and current_device_ match, since the + * DeviceGuard will still reset the device to original_device_. + */ +class OptionalDeviceGuard { + public: + /// Create an uninitialized guard. Set the guard later using reset_device. + explicit OptionalDeviceGuard() : guard_() {} + + /// Initialize the guard, setting the current device to the passed Device. + explicit OptionalDeviceGuard(Device device) : guard_(device) {} + + /// Initialize the guard if a Device is passed; otherwise leave the + /// guard uninitialized. + explicit OptionalDeviceGuard(optional device) : guard_(device) {} + + /// Constructor for testing only. + explicit OptionalDeviceGuard( + Device device, + const impl::DeviceGuardImplInterface* impl) + : guard_(device, impl) {} + + /// Copy is disallowed + OptionalDeviceGuard(const OptionalDeviceGuard&) = delete; + OptionalDeviceGuard& operator=(const OptionalDeviceGuard&) = delete; + + /// Move is disallowed + /// See Note [Explicit initialization of optional fields] + /// and // Note [Move construction for RAII guards is tricky] + /// for rationale. + OptionalDeviceGuard(OptionalDeviceGuard&& other) = delete; + OptionalDeviceGuard& operator=(OptionalDeviceGuard&& other) = delete; + + /// Sets the device to the given one. The specified device must be consistent + /// with the device type originally specified during guard construction. + void reset_device(at::Device device) { + guard_.reset_device(device); + } + + /// For testing only + void reset_device( + at::Device device, + const impl::DeviceGuardImplInterface* impl) { + guard_.reset_device(device, impl); + } + + /// Returns the device that was set at the time the guard was constructed. + optional original_device() const { + return guard_.original_device(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via reset_device. + optional current_device() const { + return guard_.current_device(); + } + + private: + impl::InlineOptionalDeviceGuard guard_; +}; + +// Note [Whither the DeviceGuard boilerplate] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Design note: in principle, we could avoid these wrappers using: +// +// using DeviceGuard = impl::InlineDeviceGuard; +// using OptionalDeviceGuard = +// impl::InlineOptionalDeviceGuard; +// +// But the error messages are worse, and our users can't just look at the +// header file to find out what's going on. Furthermore, for specializations +// like CUDAStreamGuard, it can be profitable to replace some interfaces with +// refined types (e.g., return CUDAStream instead of Stream). So, we eat +// the boilerplate and write out the API explicitly. + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/DeviceType.h b/voice_bridge/torch/include/c10/core/DeviceType.h new file mode 100644 index 0000000000000000000000000000000000000000..000ad331828b02535c3fca50a098b9d864198ec5 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/DeviceType.h @@ -0,0 +1,111 @@ +#pragma once + +// This is directly synchronized with caffe2/proto/caffe2.proto, but +// doesn't require me to figure out how to get Protobuf headers into +// ATen/core (which would require a lot more build system hacking.) +// If you modify me, keep me synchronized with that file. + +#include + +#include +#include + +namespace c10 { + +// These contains all device types that also have a BackendComponent +// and therefore participate in per-backend functionality dispatch keys. +// This is most backends except PrivateUse2 and PrivateUse3 +#define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) \ + _(CPU, extra) \ + _(CUDA, extra) \ + _(HIP, extra) \ + _(XLA, extra) \ + _(MPS, extra) \ + _(IPU, extra) \ + _(XPU, extra) \ + _(HPU, extra) \ + _(VE, extra) \ + _(Lazy, extra) \ + _(Meta, extra) \ + _(PrivateUse1, extra) + +enum class DeviceType : int8_t { + CPU = 0, + CUDA = 1, // CUDA. + MKLDNN = 2, // Reserved for explicit MKLDNN + OPENGL = 3, // OpenGL + OPENCL = 4, // OpenCL + IDEEP = 5, // IDEEP. + HIP = 6, // AMD HIP + FPGA = 7, // FPGA + ORT = 8, // ONNX Runtime / Microsoft + XLA = 9, // XLA / TPU + Vulkan = 10, // Vulkan + Metal = 11, // Metal + XPU = 12, // XPU + MPS = 13, // MPS + Meta = 14, // Meta (tensors with no data) + HPU = 15, // HPU / HABANA + VE = 16, // SX-Aurora / NEC + Lazy = 17, // Lazy Tensors + IPU = 18, // Graphcore IPU + PrivateUse1 = 19, // PrivateUse1 device + // NB: If you add more devices: + // - Change the implementations of DeviceTypeName and isValidDeviceType + // in DeviceType.cpp + // - Change the number below + COMPILE_TIME_MAX_DEVICE_TYPES = 20, +}; + +constexpr DeviceType kCPU = DeviceType::CPU; +constexpr DeviceType kCUDA = DeviceType::CUDA; +constexpr DeviceType kHIP = DeviceType::HIP; +constexpr DeviceType kFPGA = DeviceType::FPGA; +constexpr DeviceType kORT = DeviceType::ORT; +constexpr DeviceType kXLA = DeviceType::XLA; +constexpr DeviceType kMPS = DeviceType::MPS; +constexpr DeviceType kMeta = DeviceType::Meta; +constexpr DeviceType kVulkan = DeviceType::Vulkan; +constexpr DeviceType kMetal = DeviceType::Metal; +constexpr DeviceType kXPU = DeviceType::XPU; +constexpr DeviceType kHPU = DeviceType::HPU; +constexpr DeviceType kVE = DeviceType::VE; +constexpr DeviceType kLazy = DeviceType::Lazy; +constexpr DeviceType kIPU = DeviceType::IPU; +constexpr DeviceType kPrivateUse1 = DeviceType::PrivateUse1; + +// define explicit int constant +constexpr int COMPILE_TIME_MAX_DEVICE_TYPES = + static_cast(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES); + +static_assert( + COMPILE_TIME_MAX_DEVICE_TYPES <= 20, + "Hey! You seem to be adding a lot of new DeviceTypes. The intent was " + "for this constant to reflect the actual number of DeviceTypes we support " + "in PyTorch; it's important that this number is not too large as we " + "use this to allocate stack arrays in some places in our code. If you " + "are indeed just adding the 20th device type, feel free to change " + "the check to 32; but if you are adding some sort of extensible device " + "types registration, please be aware that you are affecting code that " + "this number is small. Try auditing uses of this constant."); + +C10_API std::string DeviceTypeName(DeviceType d, bool lower_case = false); + +C10_API bool isValidDeviceType(DeviceType d); + +C10_API std::ostream& operator<<(std::ostream& stream, DeviceType type); + +} // namespace c10 + +namespace std { +template <> +struct hash { + std::size_t operator()(c10::DeviceType k) const { + return std::hash()(static_cast(k)); + } +}; +} // namespace std + +namespace torch { +using c10::DeviceType; +} diff --git a/voice_bridge/torch/include/c10/core/DispatchKey.h b/voice_bridge/torch/include/c10/core/DispatchKey.h new file mode 100644 index 0000000000000000000000000000000000000000..536a15a24a0b965e5429faf0b615eea0e8256e3f --- /dev/null +++ b/voice_bridge/torch/include/c10/core/DispatchKey.h @@ -0,0 +1,704 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +// Semantically, each value of BackendComponent identifies a "backend" for our +// dispatch. Some functionalities that we may dispatch to are allowed to +// register different handlers for each backend. The BackendComponent is then +// used to figure out which backend implementation to dispatch to. + +// In implementation terms, the backend component identifies a specific "bit" in +// a DispatchKeySet. The bits in the DispatchKeySet are split between the bottom +// ~12 "BackendComponent" bits, while the remaining upper bits are assigned to +// functionalities. When we encounter a functionality bit that is known to be +// customizeable per-backend, then we also look at the lower BackendComponent +// bits and take the highest bit to determine which backend's implementation to +// use. + +// WARNING! If you add a new backend component to the end of this list, +// make sure you update PrivateUse3Bit. (But you shouldn't: private use +// keys should have higher precedence than all built-in keys) + +#define C10_FORALL_BACKEND_COMPONENTS(_, extra) \ + _(CPU, extra) \ + _(CUDA, extra) \ + _(HIP, extra) \ + _(XLA, extra) \ + _(MPS, extra) \ + _(IPU, extra) \ + _(XPU, extra) \ + _(HPU, extra) \ + _(VE, extra) \ + _(Lazy, extra) \ + _(Meta, extra) \ + _(PrivateUse1, extra) \ + _(PrivateUse2, extra) \ + _(PrivateUse3, extra) + +// WARNING! If we add a new per-backend functionality key that has higher +// priority than Autograd, then make sure you update EndOfRuntimeBackendKeys + +#define C10_FORALL_FUNCTIONALITY_KEYS(_) \ + _(Dense, ) \ + _(Quantized, Quantized) \ + _(Sparse, Sparse) \ + _(NestedTensor, NestedTensor) \ + _(AutogradFunctionality, Autograd) + +enum class BackendComponent : uint8_t { + + // A "backend" is colloquially used to refer to handlers for dispatch + // which actually implement the numerics of an operation in question. + // + // Due to the nature of the enum, these backends are specified in + // an ordered way, but for most backends this order is not semantically + // meaningful (e.g., it's valid to reorder these backends without changing + // semantics). The only situation when backend ordering is meaningful + // is when the backend participates in multiple dispatch with another + // backend; e.g., CPU and CUDA (cuda must have higher priority). + + // These keys don't correspond to individual kernels. + // Instead, they represent the backends that are allowed to override specific + // pieces of functionality: + // - dense kernels (e.g. DispatchKey::CPU) + // - sparse kernels (e.g. DispatchKey::SparseCPU) + // - quantized kernels (e.g. DispatchKey::QuantizedCPU) + // - autograd kernels (e.g. DispatchKey::AutogradCPU) + // We reserve space in the runtime operator table for this full cross product + // of + // [backends in this enum] x [keys below that are explicitly marked as having + // per-backend functionality] + // + // A meta tensor is a tensor without any data associated with it. (They + // have also colloquially been referred to as tensors on the "null" device). + // A meta tensor can be used to dry run operators without actually doing any + // computation, e.g., add on two meta tensors would give you another meta + // tensor with the output shape and dtype, but wouldn't actually add anything. + + InvalidBit = 0, +#define DEFINE_BACKEND_COMPONENT(n, _) n##Bit, + C10_FORALL_BACKEND_COMPONENTS(DEFINE_BACKEND_COMPONENT, unused) +#undef DEFINE_BACKEND_COMPONENT + + // Define an alias to represent end of backend dispatch keys. + // If you add new backend keys after PrivateUse3, please also update it here. + EndOfBackendKeys = PrivateUse3Bit, +}; + +// Semantically, a dispatch key identifies a possible "level" in our +// dispatch, for which a handler may be registered. Each handler corresponds +// to a type of functionality. +// +// In implementation terms, the dispatch key identifies a specific "bit" in a +// DispatchKeySet. Higher bit indexes get handled by dispatching first (because +// we "count leading zeros" when we extract the highest priority dispatch +// key.) +// +// Note [DispatchKey Classification] +// This enum actually contains several types of keys, which are explained +// in more detail further down: +// (1) non-customizable backends (e.g. FPGA) +// (2) non-customizable functionalities (e.g. Functionalize) +// (3) functionalized that are customizable per backend (e.g. Dense, Sparse, +// AutogradFunctionality) (4) per-backend instances of customizable +// functionalities (e.g. CPU, SparseCPU, AutogradCPU) (5) alias keys (e.g. +// CompositeImplicitAutograd) +// +// Of the categories above, it's important to note: +// (a) which keys are assigned individual bits in a DispatchKeySet +// (b) which keys are assigned individual slots in the runtime operator table +// ("Runtime keys") +// +// (1), (2) and (3) all get their own dedicated bits in the DispatchKeySet. +// (1), (2) and (4) all get their own dedicated slots in the runtime operator +// table. + +// See Note [DispatchKeySet Internal Representation] for more details. +// +// NOTE: Keep the list in sync with `DispatchKey` in torchgen/model.py +enum class DispatchKey : uint16_t { + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ UNDEFINED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // This is not a "real" functionality, but it exists to give us a "nullopt" + // element we can return for cases when a DispatchKeySet contains no elements. + // You can think a more semantically accurate definition of DispatchKey is: + // + // using DispatchKey = optional + // + // and Undefined == nullopt. We didn't actually represent + // it this way because optional would take two + // words, when DispatchKey fits in eight bits. + + Undefined = 0, + + // Define an alias for Undefined to represent CatchAll (long term + // this will get eliminated, but for now it's convenient) + CatchAll = Undefined, + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Functionality Keys ~~~~~~~~~~~~~~~~~~~~~~ // + // Every value in the enum (up to EndOfFunctionalityKeys) + // corresponds to an individual "functionality" that can be dispatched to. + // This is represented in the DispatchKeySet by assigning each of these enum + // values + // to each of the remaining (64 - len(BackendComponent)) bits. + // + // Most of these functionalities have a single handler assigned to them, + // making them "runtime keys". + // That map to a single slot in the runtime operator table. + // + // A few functionalities are allowed to be customizable per backend. + // See [Note: Per-Backend Functionality Dispatch Keys] for details. + + // See [Note: Per-Backend Functionality Dispatch Keys] + Dense, + + // Below are non-extensible backends. + // These are backends that currently don't have their own overrides for + // Autograd/Sparse/Quantized kernels, + // and we therefore don't waste space in the runtime operator table allocating + // space for them. + // If any of these backends ever need to customize, e.g., Autograd, then we'll + // need to add a DispatchKey::*Bit for them. + + // TODO: put this in BackendComponents + FPGA, // Xilinx support lives out of tree at + // https://gitlab.com/pytorch-complex/vitis_kernels + + // TODO: put this in BackendComponents + // ONNX Runtime, lives out of tree at https://github.com/pytorch/ort and + // https://github.com/microsoft/onnxruntime, and is also used to test general + // backend/extension machinery in the core. cf: + // - test/cpp_extensions/ort_extension.cpp + // - test/test_torch.py + // - aten/src/ATen/test/extension_backend_test.cpp + ORT, + + Vulkan, // TODO: put this in BackendComponents + Metal, // TODO: put this in BackendComponents + + // See [Note: Per-Backend Functionality Dispatch Keys] + Quantized, + + // This backend is to support custom RNGs; it lets you go + // to a different kernel if you pass in a generator that is not a + // traditional CPUGeneratorImpl/CUDAGeneratorImpl. To make use of this + // key: + // 1) set it as a second parameter of at::Generator constructor call in + // the user-defined PRNG class. + // 2) use it as a dispatch key while registering custom kernels + // (templatized kernels specialized for user-defined PRNG class) + // intended for out of tree use; tested by aten/src/ATen/test/rng_test.cpp + CustomRNGKeyId, + + // TODO: Make Mkldnn a functionality key, so we can give it Meta + // support + // Here are backends which specify more specialized operators + // based on the layout of the tensor. Note that the sparse backends + // are one case where ordering matters: sparse multi-dispatches with + // the corresponding dense tensors, and must be handled before them. + MkldnnCPU, // registered at build/aten/src/ATen/RegisterMkldnnCPU.cpp + // NB: not to be confused with MKLDNN, which is Caffe2 only + + // See [Note: Per-Backend Functionality Dispatch Keys] + Sparse, + + // TODO: Make SparseCsr a functionality key + SparseCsrCPU, + SparseCsrCUDA, + + NestedTensor, + + // In some situations, it is not immediately obvious what the correct + // backend for function is, because the function in question doesn't + // have any "tensor" arguments. In this case, a BackendSelect function + // can be registered to implement the custom determination of the + // correct backend. + BackendSelect, + + Python, + + // Out-of-core key for Fake Tensor in torchdistx. + // See https://pytorch.org/torchdistx/latest/fake_tensor.html + // TODO: delete this in favor of Python-implemented fake tensor + Fake, + // See Note [Out-of-tree vmap+grad prototype]. The purpose of this key + // is to insert code after the "autograd subsystem" runs, so this key should + // be directly after ADInplaceOrView and all of the autograd keys. + FuncTorchDynamicLayerBackMode, + + // Alias and mutation removal. + // If some backends want to opt into only alias removal or only mutation + // removal, + // we can consider adding separate keys dedicated to those individual passes. + // See Note [Functionalization Pass In Core] for details. + Functionalize, + + // The named dispatch key is set for any tensors with named dimensions. + // Although we have a dispatch key for named tensors, for historical reasons, + // this dispatch key doesn't do any of the substantive functionality for named + // tensor (though, hypothetically, it could!) At the moment, it's just + // responsible for letting us give good error messages when operations + // don't support named tensors. + // + // NB: If you ever consider moving named tensor functionality into + // this dispatch key, note that it might be necessary add another dispatch + // key that triggers before composite operators, in case a composite operator + // has named dimension propagation that doesn't match that of its + // constituent parts. + // TODO: delete this once torchdim lands in functorch + Named, + + // The Conjugate dispatch key is set for any tensors that need to perform + // conjugation + // This is implemented at a dispatch level right before any backends run + Conjugate, + + // The Negative dispatch key is set for any tensors that need to perform + // negation + // This is implemented at a dispatch level right before any backends run + Negative, + + ZeroTensor, // registered at build/aten/src/ATen/RegisterZeroTensor.cpp + + // Note [ADInplaceOrView key] + // ADInplaceOrView key is used by inplace or view ops to register a kernel + // that does additional setup for future autograd computation. + // + // 1. For inplace ops this kernel does version bump + // 2. For view ops this kernel does `as_view` setup where we properly setup + // DifferentiableViewMeta on the view tensors. + // + // For other ops it's fallthrough kernel since there's no extra + // work to do. + // + // Note [Dream: skip VariableType kernel when requires_grad=false] + // + // In an ideal world where we can skip VariableType kernel for inputs + // with requires_grad=false, instead of a fallthrough kernel, we'll + // register a kernel shown below to all functional ops as well: + // torch::Tensor my_functional_op(...) { + // { + // // Note for every op in VariableType, you need to go through + // // `AutoDispatchBelowADInplaceOrView` guard exactly once to add the + // // key to TLS excluded set. If you don't go through it at all, + // // inplace/view ops called through `at::` inside your backend + // // kernel will dispatch to ADInplaceOrView kernels and do a lot + // // of extra work. + // at::AutoDispatchBelowADInplaceOrView guard; + // at::redispatch::my_functional_op(...); + // } + // } + // But this work is currently blocked since it adds an extra dispatch + // for all ops and it's non-trivial overhead at model level(a few percents). + // Thus our current approach takes advantage of the fact every kernel go + // through VariableType kernel first and pulls the + // `at::AutoDispatchBelowADInplaceOrView` guard of functional ops + // up to the `VariableType` kernel. Thus we only add the extra dispatch + // to view/inplace ops to minimize its perf impact to real models. + ADInplaceOrView, + // Note [Alias Dispatch Key : Autograd] + // All backends are oblivious to autograd; autograd is handled as a + // layer which happens on top of all backends. It inspects the autograd + // metadata of all inputs, determines what autograd metadata should be + // constructed by the output, and otherwise defers to the backend to + // actually do the numeric computation. Autograd contains + // the bulk of this logic. + + // Autograd is now an alias dispatch key which by default maps to all + // backend-specific autograd keys. + // Backend-specific allow backends to override the default kernel registered + // to Autograd key as needed. + // For example, XLA wants to define autograd for einsum directly. + // Registering a custom autograd implementation at the XLA key won't work + // because we process Autograd before XLA. This key has higher priority and + // gets processed first. You generally should NOT redispatch after handling + // autograd here (since that would result in execution of the Autograd + // operator, which you're trying to skip). In AutogradXLA implementations, + // you are responsible for handling autograd yourself, or deferring to other + // operators which support autograd. + + // Currently we only have backend-specific autograd keys for CPU/CUDA/XLA and + // reserved user-defined backends. All other in-tree backends share the + // AutogradOther key. We can add specific autograd key for those backends + // upon request. + AutogradOther, + + // See [Note: Per-Backend Functionality Dispatch Keys] + AutogradFunctionality, + + // NestedTensor is an example of something that isn't a "real backend" + // (because it mostly consists of redispatching kernels) + // but it would like to override autograd functionality in C++. + // We can handle cases like this by adding an extra functionality key + // exclusively for handling autograd for NestedTensor. + // lives out of tree at + // https://github.com/pytorch/nestedtensor + AutogradNestedTensor, + + Tracer, + + // TODO: make Autocast a functionality key + // Autocasting precedes VariableTypeId, to ensure casts are autograd-exposed + // and inputs are saved for backward in the post-autocast type. + AutocastCPU, + AutocastXPU, + // Naughtily, AutocastCUDA is also being used for XLA. In the terminal state, + // it probably should get its own Autocast key + AutocastCUDA, + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ WRAPPERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // There are a number of alternative modes which may want to handle before + // autograd; for example, error checking, tracing, profiling or vmap. They + // go here. + + FuncTorchBatched, // See Note [Out-of-tree vmap+grad prototype] + FuncTorchVmapMode, // See Note [Out-of-tree vmap+grad prototype] + + // This is the dispatch key for BatchedTensorImpl, which is used to implement + // batching rules for vmap. + Batched, + + // When we are inside a vmap, all tensors dispatch on this key. + // See Note: [DispatchKey::VmapMode usage] for more details. + VmapMode, + + FuncTorchGradWrapper, // See Note [Out-of-tree vmap+grad prototype] + + // Out-of-core key for Deferred Module Initialization in torchdistx. + // See https://pytorch.org/torchdistx/latest/deferred_init.html + DeferredInit, + + // Used by Python key logic to know the set of tls on entry to the dispatcher + // This kernel assumes it is the top-most non-functorch-related DispatchKey. + // If you add a key above, make sure to update the fallback implementation for + // this. + PythonTLSSnapshot, + + // This key should be at the very top of the dispatcher + FuncTorchDynamicLayerFrontMode, // See Note [Out-of-tree vmap+grad prototype] + + // TESTING: This is intended to be a generic testing tensor type id. + // Don't use it for anything real; its only acceptable use is within a single + // process test. Use it by creating a TensorImpl with this DispatchKey, and + // then registering operators to operate on this type id. See + // aten/src/ATen/core/dispatch/backend_fallback_test.cpp for a usage example. + TESTING_ONLY_GenericWrapper, + + // TESTING: This is intended to be a generic testing tensor type id. + // Don't use it for anything real; its only acceptable use is within a ingle + // process test. Use it by toggling the mode on and off via + // TESTING_ONLY_tls_generic_mode_set_enabled and then registering operators + // to operate on this type id. See + // aten/src/ATen/core/dispatch/backend_fallback_test.cpp + // for a usage example + TESTING_ONLY_GenericMode, + + // This is a bypass that allows you to skip running the C++ dispatcher + // entirely + PythonDispatcher, + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + EndOfFunctionalityKeys, // End of functionality keys. + +// ~~~~~~~~~~~~~~ "Dense" Per-Backend Dispatch keys ~~~~~~~~~~~~~~~~~~~~ // +// Here are backends which you think of as traditionally specifying +// how to implement operations on some device. + +#define DEFINE_PER_BACKEND_KEYS_FOR_BACKEND(n, prefix) prefix##n, + +#define DEFINE_PER_BACKEND_KEYS(fullname, prefix) \ + StartOf##fullname##Backends, \ + C10_FORALL_BACKEND_COMPONENTS( \ + DEFINE_PER_BACKEND_KEYS_FOR_BACKEND, prefix) \ + EndOf##fullname##Backends = prefix##PrivateUse3, + + C10_FORALL_FUNCTIONALITY_KEYS(DEFINE_PER_BACKEND_KEYS) + +#undef DEFINE_PER_BACKEND_KEYS +#undef DEFINE_PER_BACKEND_KEYS_FOR_BACKEND + + EndOfRuntimeBackendKeys = EndOfAutogradFunctionalityBackends, + + // ~~~~~~~~~~~~~~~~~~~~~~ Alias Dispatch Keys ~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // Note [Alias Dispatch Keys] + // Alias dispatch keys are synthetic dispatch keys which map to multiple + // runtime dispatch keys. Alisa keys have precedence, but they are always + // lower precedence than runtime keys. You can register a kernel to an + // alias key, the kernel might be populated to the mapped runtime keys + // during dispatch table computation. + // If a runtime dispatch key has multiple kernels from alias keys, which + // kernel wins is done based on the precedence of alias keys (but runtime + // keys always have precedence over alias keys). + // Alias keys won't be directly called during runtime. + + // See Note [Alias Dispatch Key : Autograd] + Autograd, + CompositeImplicitAutograd, // registered at + // build/aten/src/ATen/RegisterCompositeImplicitAutograd.cpp + CompositeImplicitAutogradNestedTensor, // registered at + // build/aten/src/ATen/RegisterCompositeImplicitAutogradNestedTensor.cpp + CompositeExplicitAutograd, // registered at + // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp + // See Note [CompositeExplicitAutogradNonFunctional Key] + CompositeExplicitAutogradNonFunctional, // registered at + // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp + + // Define an alias key to represent end of alias dispatch keys. + // If you add new alias keys after Autograd, please also update it here. + StartOfAliasKeys = Autograd, + EndOfAliasKeys = CompositeExplicitAutogradNonFunctional, // + + // ~~~~~~~~~~~~~~~~~~~~~~~~~ BC ALIASES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // The aliases exist for backwards compatibility reasons, they shouldn't + // be used + CPUTensorId = CPU, + CUDATensorId = CUDA, + DefaultBackend = CompositeExplicitAutograd, + PrivateUse1_PreAutograd = AutogradPrivateUse1, + PrivateUse2_PreAutograd = AutogradPrivateUse2, + PrivateUse3_PreAutograd = AutogradPrivateUse3, + Autocast = AutocastCUDA, +}; + +// Note [Private use DispatchKey] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Private use tensor IDs are preallocated tensor type IDs for use in user +// applications. Similar to private use fields in HTTP, they can be used +// by end users for experimental or private applications, without needing +// to "standardize" the tensor ID (which would be done by submitting a PR +// to PyTorch to add your type ID). +// +// Private use tensor IDs are appropriate to use if you want to experiment +// with adding a new tensor type (without having to patch PyTorch first) or +// have a private, non-distributed application that needs to make use of a +// new tensor type. Private use tensor IDs are NOT appropriate to use for +// libraries intended to be distributed to further users: please contact +// the PyTorch developers to get a type ID registered in this case. +// +// We provide two classes of private user tensor id: regular DispatchKeys +// and Autograd DispatchKeys. DispatchKeys serve the role of ordinary "backend" +// DispatchKeys; if you were adding support for a new type of accelerator, you +// would use a backend DispatchKey, and ideally automatically reuse +// AutogradOther definitions already defined in PyTorch. AutogradPrivateUse +// DispatchKeys serve as "wrapper" DispatchKeys: they are only necessary for +// tensors that compose multiple internal tensors, and for cases when the +// built-in autograd formulas for operators are not appropriate. + +static_assert( + (static_cast(BackendComponent::EndOfBackendKeys) + + static_cast(DispatchKey::EndOfFunctionalityKeys)) <= 64, + "The BackendComponent and DispatchKey enums (below EndOfFunctionalityKeys)" + " both map to backend and functionality bits" + " into a 64-bit bitmask; you must have less than 64 total entries between them"); + +// Check if a DispatchKey is an alias mapping to other runtime keys. +constexpr bool isAliasDispatchKey(DispatchKey k) { + return k >= DispatchKey::StartOfAliasKeys && k <= DispatchKey::EndOfAliasKeys; +} + +// [Note: Per-Backend Functionality Dispatch Keys] +// Check if a DispatchKey is a per-backend functionality key +// Any functionalities that can be customized per-backend should be added here. +// These keys correspond to functionalities that can be customized indivually +// per backend. While they only take up one bit in the `DispatchKeySet` bitset, +// they map to (# backends) slots in the operator table. +// Each of these keys also has a separate set of "runtime keys" in the dispatch +// key enum, per backend, which *do* map to the individual operator table slots. +// For example, the "Sparse" key maps to an individual bit in the +// DispatchKeySet, while `SparseCPU`, `SparseCUDA`, etc all map to individual +// slots in the runtime operator table. + +constexpr bool isPerBackendFunctionalityKey(DispatchKey k) { + if (k == DispatchKey::Dense || k == DispatchKey::Quantized || + k == DispatchKey::Sparse || k == DispatchKey::AutogradFunctionality || + k == DispatchKey::NestedTensor) { + return true; + } else { + return false; + } +} + +// Note that this includes Undefined in the total count. +// BUT EndOfFunctionalityKeys is its own (placeholder) key. +// e.g. Undefined=0, Dense=1, Sparse=2, EndOfFunctionalityKeys=3. +// In the above example, there are 3 total functionality keys. +constexpr uint8_t num_functionality_keys = + static_cast(DispatchKey::EndOfFunctionalityKeys); + +constexpr uint8_t num_backends = + static_cast(BackendComponent::EndOfBackendKeys); + +// Note [No More Than 16 Backends] +// Search for this note to find places in the code where the "no more than 16 +// backends" invariant is baked in. +static_assert( + static_cast(BackendComponent::EndOfBackendKeys) <= 16, + "BackendComponent currently only supports <= 16 backends. If we really need to extend this, \ +there are a few places where this invariant is baked in"); + +constexpr uint8_t numPerBackendFunctionalityKeys() { + uint8_t count = 0; + for (uint8_t k = 0; k <= num_functionality_keys; ++k) { + if (isPerBackendFunctionalityKey(static_cast(k))) + ++count; + } + return count; +} + +#if defined(C10_MOBILE_TRIM_DISPATCH_KEYS) +// See [Note: Trimmed Mobile Dispatch Keys] +constexpr uint16_t num_runtime_entries = 8; +#else +constexpr uint16_t num_runtime_entries = num_functionality_keys + + (numPerBackendFunctionalityKeys() * (num_backends - 1)); +#endif + +// See Note [No More Than 16 Backends] +constexpr uint16_t full_backend_mask = + (static_cast(1) << num_backends) - 1; + +C10_API const char* toString(DispatchKey); +C10_API const char* toString(BackendComponent); +C10_API std::ostream& operator<<(std::ostream&, DispatchKey); +C10_API std::ostream& operator<<(std::ostream&, BackendComponent); + +C10_API DispatchKey getAutogradKeyFromBackend(BackendComponent k); + +// Parses a string into a dispatch key. +// If the string cannot be correctly parsed, throws an exception. +C10_API c10::DispatchKey parseDispatchKey(const std::string& k); + +// These are some convenience identifiers for dispatch keys which are +// shorter to type than their long counterparts. Note that some of these +// dispatch keys directly correspond to DeviceType; and most APIs that +// accept DispatchKey also accept DeviceType; e.g., +// torch::dispatch(torch::kCPU, ...) is also valid. +constexpr DispatchKey kAutograd = DispatchKey::Autograd; + +// See Note [The Ordering of Per-Backend Dispatch Keys Matters!] +// This function relies on the invariant that the dispatch keys between +// StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend +// in the same order as `BackendComponent`. +constexpr BackendComponent toBackendComponent(DispatchKey k) { + if (k >= DispatchKey::StartOfDenseBackends && + k <= DispatchKey::EndOfDenseBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfDenseBackends)); + } else if ( + k >= DispatchKey::StartOfQuantizedBackends && + k <= DispatchKey::EndOfQuantizedBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfQuantizedBackends)); + } else if ( + k >= DispatchKey::StartOfSparseBackends && + k <= DispatchKey::EndOfSparseBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfSparseBackends)); + } else if ( + k >= DispatchKey::StartOfNestedTensorBackends && + k <= DispatchKey::EndOfNestedTensorBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfNestedTensorBackends)); + } else if ( + k >= DispatchKey::StartOfAutogradFunctionalityBackends && + k <= DispatchKey::EndOfAutogradFunctionalityBackends) { + return static_cast( + static_cast(k) - + static_cast( + DispatchKey::StartOfAutogradFunctionalityBackends)); + } else { + return BackendComponent::InvalidBit; + } +} + +constexpr DispatchKey toFunctionalityKey(DispatchKey k) { + if (k <= DispatchKey::EndOfFunctionalityKeys) { + return k; + } else if (k <= DispatchKey::EndOfDenseBackends) { + return DispatchKey::Dense; + } else if (k <= DispatchKey::EndOfQuantizedBackends) { + return DispatchKey::Quantized; + } else if (k <= DispatchKey::EndOfSparseBackends) { + return DispatchKey::Sparse; + } else if (k <= DispatchKey::EndOfNestedTensorBackends) { + return DispatchKey::NestedTensor; + } else if (k <= DispatchKey::EndOfAutogradFunctionalityBackends) { + return DispatchKey::AutogradFunctionality; + } else { + return DispatchKey::Undefined; + } +} + +BackendComponent toBackendComponent(DeviceType device_type); + +// Given (DispatchKey::Dense, BackendComponent::CUDABit), returns +// DispatchKey::CUDA. +// See Note [The Ordering of Per-Backend Dispatch Keys Matters!] +// This function relies on the invariant that the dispatch keys between +// StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend +// in the same order as `BackendComponent`. +constexpr DispatchKey toRuntimePerBackendFunctionalityKey( + DispatchKey functionality_k, + BackendComponent backend_k) { + if (functionality_k == DispatchKey::Dense) { + return static_cast( + static_cast(DispatchKey::StartOfDenseBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::Sparse) { + return static_cast( + static_cast(DispatchKey::StartOfSparseBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::Quantized) { + return static_cast( + static_cast(DispatchKey::StartOfQuantizedBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::NestedTensor) { + return static_cast( + static_cast(DispatchKey::StartOfNestedTensorBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::AutogradFunctionality) { + return static_cast( + static_cast( + DispatchKey::StartOfAutogradFunctionalityBackends) + + static_cast(backend_k)); + } + return DispatchKey::Undefined; +} + +} // namespace c10 + +namespace torch { +// Expose the constant, but not the TYPE (DispatchKey is an implementation +// detail!) +using c10::kAutograd; +} // namespace torch + +// NB: You really shouldn't use this instance; this enum is guaranteed +// to be pretty small so a regular array should be acceptable. +namespace std { +template <> +struct hash { + typedef size_t result_type; + typedef c10::DispatchKey argument_type; + + size_t operator()(c10::DispatchKey x) const { + return static_cast(x); + } +}; +} // namespace std diff --git a/voice_bridge/torch/include/c10/core/DispatchKeySet.h b/voice_bridge/torch/include/c10/core/DispatchKeySet.h new file mode 100644 index 0000000000000000000000000000000000000000..cf07bb316113d86eec49776723b93036844e1593 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/DispatchKeySet.h @@ -0,0 +1,901 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace c10 { + +struct FunctionalityOffsetAndMask { + // empty constructor shouldn't be used; only needed to initialize + // the array before populating it. + FunctionalityOffsetAndMask() {} + FunctionalityOffsetAndMask(uint16_t offset, uint16_t mask) + : offset(offset), mask(mask) {} + // This needs to big enough to cover the size of the operator table. + uint16_t offset; + // See Note [No More Than 16 Backends] + // This mask needs to be big enough to mask all of the backend bits. + // We probably don't ever want to have more than 16 backend bits, so uint16_t + // should be enough. + uint16_t mask; +}; +static_assert( + c10::num_runtime_entries < 65536, + "The dispatcher currently only supports up to 2^16 runtime entries"); + +C10_API std::array +initializeFunctionalityOffsetsAndMasks(); + +C10_ALWAYS_INLINE static const std:: + array& + offsetsAndMasks() { + static auto offsets_and_masks_ = initializeFunctionalityOffsetsAndMasks(); + return offsets_and_masks_; +} + +// A representation of a set of DispatchKeys. A DispatchKeySet contains both +// "functionality" bits and "backend bits", and every tensor holds its own +// DispatchKeySet. The Dispatcher implements multiple dispatch by grabbing the +// keyset on every input tensor, or’ing them together, and dispatching to a +// specific piece of functionality. The functionality bits are *ordered*. When +// multiple functionality bits are set, we use the highest priority +// functionality. Similarly, multiple backend bits can theoretically be set if +// you call an operator with multiple tensors from difference devices (e.g. CPU +// and CUDA), although support for mixed device dispatch is limited (the only +// kernels that gracefully handle mixed device inputs for now are cuda kernels +// that take in a scalar cpu tensor). + +// A representation of a set of DispatchKeys. A tensor may have multiple +// tensor type ids, e.g., a Variable tensor can also be a CPU tensor; the +// DispatchKeySet specifies what type ids apply. The internal representation is +// as a 64-bit bit set (this means only 64 tensor type ids are supported). +// +// As mentioned above, DispatchKeys are ordered; thus, we can ask questions like +// "what is the highest priority DispatchKey in the set"? (The set itself is +// not ordered; two sets with the same ids will always have the ids ordered in +// the same way.) +// +// Note [DispatchKeySet Internal Representation] +// Internally, dispatch keys are packed into 64-bit DispatchKeySet objects +// that get passed around at runtime. +// However, there isn't necessarily a 1-to-1 mapping between bits in the keyset +// and individual dispatch keys. +// +// First: why do we have this distinction, and why not map every dispatch key +// directly to a bit? This is mostly because we have several types of +// functionalities that different backends would like to customize. For example, +// we have: +// - "Dense": CPU, CUDA, XLA, ... (~12 keys) +// - "Sparse": SparseCPU, SparseCUDA, ... +// - "Quantized": QuantizedCPU, QuantizedCUDA, QuantizedXLA, ... +// - "Autograd": AutogradCPU, AutogradCUDA, Autograd XLA, ... +// The problem is that total number of keys grows quadratically with [# +// backends] x [# functionalities], making it very difficult to map each key +// directly to a bit in a bitset without dramatically increasing the size of the +// bitset over time. +// +// The two enums (BackendComponent and DispatchKey) can be divided roughly into +// 5 categories. +// +// (1) "Building block" keys +// (a) backends: jEverything in the BackendComponent enum (e.g. CPUBit, +// CUDABIt) (b) functionalities: (per-backend) functionality-bit DispatchKeys +// (e.g. AutogradFunctionality, Sparse, Dense) +// (2) "Runtime" keys +// (a) "non-customizable backends" (e.g. FPGA) +// (b) "non-customizable functionalities" (e.g. Functionalize) +// (c) "per-backend instances of customizable functionalities" (e.g. CPU, +// SparseCPU, AutogradCPU) +// (3) "Alias" DispatchKeys (see Note [Alias Dispatch Keys]) +// +// (1) Building block keys always correspond to individual bits in a +// DispatchKeySet. They can also be combined in a DispatchKeySet to form actual +// runtime keys. e.g. +// auto dense_cpu_ks = DispatchKeySet({DispatchKey::CPUBit, +// DispatchKey::Dense}); +// // The keyset has the runtime dense-cpu key. +// dense_cpu_ks.has(DispatchKey::CPU); +// // And it contains the building block keys too. +// dense_cpu_ks.has(DispatchKey::CPUBit); +// dense_cpu_ks.has(DispatchKey::Dense); +// +// Not every backend and not every functionality counts as a "building block +// key". This is mostly to give us more levers to pull in the design space. +// Backend keys and functionality keys that count as "building blocks" will +// contribute to a full cross product of functionality that can be overriden. +// +// For example, right now we have at least 12 "backend" building blocks (CPU, +// CUDA, XLA, ...) and at least 4 "functionality" building blocks (Dense, +// Sparse, Quantized, AutogradFunctionality, ...). These keys together allow +// every dispatcher operator to be customized in up to 12*4 different ways. Each +// of those requires a slot in the operator table of every dispatcher operator. +// Not every piece of functionality necessarily needs to be customizeable +// per-backend, and not every backend necessarily needs to be able to customize +// every type of functionality. +// +// +// (2) Every runtime key corresponds directly to a slot in an operator's runtime +// dispatch table, and you can directly register kernels to a runtime dispatch +// key. +// +// For per-backend functionalities like "Dense" or "AutogradFunctionality", +// you can think of the corresponding runtime dispatch keys as "instances" of +// that functionality, per backend. E.g. "CPU", "CUDA", "XLA", etc. are all +// runtime instances of the "Dense" building block key. + +// (2a) and (2b) are represented identically in the DispatchKeySet logic: +// - backend-agnostic functionalities (e.g. FuncTorchBatched) are NOT +// customizeable per backend. +// In order to do so, we'd need to promote it to a per-backend functionality +// "building block" key. +// - non-customizeable backends (e.g. FPGA) can NOT customize existing +// functionality like Sparse, Autograd, etc. +// In order to do so, we'd need to promote it to a backend "building block" +// key. +// +// In both cases, these keys directly correspond to runtime slots in the +// operator table. +// +// +// (3) "Alias" keys +// See Note [Alias Dispatch Keys] +// +// Final note: for anyone making future changes to the Dispatcher + +// DispatchKeySet internals, there's a closed PR with a basic +// python-implementation of the Dispatcher that might be useful in quickly +// testing out and validating changes. See it at +// https://github.com/pytorch/pytorch/pull/68743 + +// An undefined tensor is one with an empty tensor type set. +class DispatchKeySet final { + public: + enum Full { FULL }; + enum FullAfter { FULL_AFTER }; + enum Raw { RAW }; + + // NB: default constructor representation as zero is MANDATORY as + // use of DispatchKeySet in TLS requires this. + constexpr DispatchKeySet() : repr_(0) {} + + constexpr DispatchKeySet(Full) + : repr_((1ULL << (num_backends + num_functionality_keys - 1)) - 1) {} + + constexpr DispatchKeySet(FullAfter, DispatchKey t) + // LSB after t are OK, but not t itself. + // "functionalities" have a notion of ordering (e.g. Autograd > Sparse > + // Quantized > Dense). But backends don't really have an ordering. + // Therefore, we're enforcing that FullAfter can only be used on + // "functionality" keys. + : repr_( + (1ULL + << (num_backends + static_cast(toFunctionalityKey(t)) - + 1)) - + 1) { + *this = add(DispatchKey::PythonDispatcher); + } + + // Public version of DispatchKeySet(uint64_t) API; external users + // must be explicit when they do this! + constexpr DispatchKeySet(Raw, uint64_t x) : repr_(x) {} + + constexpr explicit DispatchKeySet(BackendComponent k) { + if (k == BackendComponent::InvalidBit) { + repr_ = 0; + } else { + repr_ = 1ULL << (static_cast(k) - 1); + } + } + + constexpr explicit DispatchKeySet(DispatchKey k) { + if (k == DispatchKey::Undefined) { + // Case 1: handle Undefined specifically + repr_ = 0; + } else if (k <= DispatchKey::EndOfFunctionalityKeys) { + // Case 2: handle "functionality-only" keys + // These keys have a functionality bit set, but no backend bits + // These can technically be either: + // - valid runtime keys (e.g. DispatchKey::AutogradOther, + // DispatchKey::FuncTorchBatched, etc) + // - "building block" keys that aren't actual runtime keys (e.g. + // DispatchKey::Dense or Sparse) + uint64_t functionality_val = 1ULL + << (num_backends + static_cast(k) - 1); + repr_ = functionality_val; + } else if (k <= DispatchKey::EndOfRuntimeBackendKeys) { + // Case 3: "runtime" keys that have a functionality bit AND a backend bit. + // First compute which bit to flip for the functionality. + auto functionality_k = toFunctionalityKey(k); + // The - 1 is because Undefined is technically a "functionality" that + // doesn't show up in the bitset. So e.g. Dense is technically the second + // functionality, but the lowest functionality bit. + uint64_t functionality_val = 1ULL + << (num_backends + static_cast(functionality_k) - 1); + + // then compute which bit to flip for the backend + // Case 4a: handle the runtime instances of "per-backend functionality" + // keys For example, given DispatchKey::CPU, we should set: + // - the Dense functionality bit + // - the CPUBit backend bit + // first compute which bit to flip for the backend + auto backend_k = toBackendComponent(k); + uint64_t backend_val = backend_k == BackendComponent::InvalidBit + ? 0 + : 1ULL << (static_cast(backend_k) - 1); + repr_ = functionality_val + backend_val; + } else { + // At this point, we should have covered every case except for alias keys. + // Technically it would be possible to add alias dispatch keys to a + // DispatchKeySet, but the semantics are a little confusing and this + // currently isn't needed anywhere. + repr_ = 0; + } + } + + constexpr uint64_t keys_to_repr(std::initializer_list ks) { + uint64_t repr = 0; + for (auto k : ks) { + repr |= DispatchKeySet(k).repr_; + } + return repr; + } + + constexpr uint64_t backend_bits_to_repr( + std::initializer_list ks) { + uint64_t repr = 0; + for (auto k : ks) { + repr |= DispatchKeySet(k).repr_; + } + return repr; + } + + explicit constexpr DispatchKeySet(std::initializer_list ks) + : repr_(keys_to_repr(ks)) {} + + explicit constexpr DispatchKeySet(std::initializer_list ks) + // Note: for some reason, putting this logic directly in the constructor + // appears to fail to compile on CUDA 10.1. + // See an example internal failure at + // https://www.internalfb.com/intern/skycastle/run/76561193669136035/artifact/actionlog.76561193742069401.stderr + : repr_(backend_bits_to_repr(ks)) {} + + // Test if a DispatchKey is in the set + inline bool has(DispatchKey t) const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(t != DispatchKey::Undefined); + return has_all(DispatchKeySet(t)); + } + constexpr bool has_backend(BackendComponent t) const { + return has_all(DispatchKeySet(t)); + } + + // Test if a DispatchKey is in the set + // Given a DispatchKeySet of functionality keys and (potentially) backend + // keys, tests if all of them are in the current set. + constexpr bool has_all(DispatchKeySet ks) const { + return static_cast((repr_ & ks.repr_) == ks.repr_); + } + + // Given a DispatchKeySet of functionality keys and (potentially) backend + // keys, tests if any of them are in the current set. This could technically + // be pretty easily implemented using has(). It is strictly a perf + // optimization though. There are many places in the code base where we want + // to test for multiple functionality keys together. HOWEVER, runtime + // per-backend functionality keys aren't allowed to be used with this + // function, because you can end up with weird results. e.g. + // DispatchKeySet(DispatchKey::AutogradCPU).has_any(DispatchKeySet(DispatchKey::CPU)) + // would return true. + inline bool has_any(DispatchKeySet ks) const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + // Either there are no backend bits in the input keyset + ((ks.repr_ & full_backend_mask) == 0) || + // or there are no per-backend-functionality bits + // See [Note: Per-Backend Functionality Dispatch Keys] + ((ks & + DispatchKeySet({ + DispatchKey::Dense, + DispatchKey::Quantized, + DispatchKey::Sparse, + DispatchKey::AutogradFunctionality, + }) + .repr_) == 0)); + return static_cast((repr_ & ks.repr_) != 0); + } + // Test if DispatchKeySet is a superset of ks. + bool isSupersetOf(DispatchKeySet ks) const { + return (repr_ & ks.repr_) == ks.repr_; + } + // Perform set union + constexpr DispatchKeySet operator|(DispatchKeySet other) const { + return DispatchKeySet(repr_ | other.repr_); + } + // Perform set intersection + constexpr DispatchKeySet operator&(DispatchKeySet other) const { + return DispatchKeySet(repr_ & other.repr_); + } + // Compute the set difference self - other, + // but ONLY for the functionality keys. + // Any backend bits set on self will remain unchanged. + // See Note [Removing keys from DispatchKeySet Only Affects Functionality + // Keys] + constexpr DispatchKeySet operator-(DispatchKeySet other) const { + return DispatchKeySet(repr_ & (full_backend_mask | ~other.repr_)); + } + + // Compute self ^ other + constexpr DispatchKeySet operator^(DispatchKeySet other) const { + return DispatchKeySet(repr_ ^ other.repr_); + } + bool operator==(DispatchKeySet other) const { + return repr_ == other.repr_; + } + bool operator!=(DispatchKeySet other) const { + return repr_ != other.repr_; + } + // Add a DispatchKey to the DispatchKey set. Does NOT mutate, + // returns the extended DispatchKeySet! + C10_NODISCARD constexpr DispatchKeySet add(DispatchKey t) const { + return *this | DispatchKeySet(t); + } + C10_NODISCARD constexpr DispatchKeySet add(DispatchKeySet ks) const { + return *this | ks; + } + + // Remove a DispatchKey from the DispatchKey set. + // This is generally not an operation you should be doing + // (it's used to implement the printing overload, operator<<) + // + // Note [Removing keys from DispatchKeySet Only Affects Functionality Keys] + // Only functionality bits are allowed to be removed from a keyset. + // For now, we're only allowing removal of "functionality bits" from the + // keyset, which is specifically needed by the fallthrough key calculation + // logic. Why is removing backend bits problematic? Consider this example: + // + // DispatchKeySet([DispatchKey.CPU, DispatchKey.AutogradCUDA, + // DispatchKey.CUDA]).remove(DispatchKey.AutogradCUDA) + // DispatchKeySet([DispatchKey.CPU, + // DispatchKey.AutogradCUDA]).remove(DispatchKey.AutogradCUDA) + // + // What do we want to happen? + // Technically, we'd like it to be true that after removal, + // the first keyset still has the CUDA dispatch key while the second doesn't. + // Unfortunately there's no way to represent that, because the two keysets are + // represented the same way internally: functionality bits: Autograd, Dense + // backend bits: CPU, CUDA + // + // Instead, remove(DispatchKey.AutogradCPU) will only remove the "Autograd" + // bit from the bitset. + C10_NODISCARD constexpr DispatchKeySet remove(DispatchKey t) const { + return DispatchKeySet( + repr_ & ~(DispatchKeySet(t).repr_ & ~full_backend_mask)); + } + // You're allowed to remove a backend bit from a DispatchKeySet, + // but you have to be explicit about it (remove_backend() instead of + // remove()). + constexpr DispatchKeySet remove_backend(BackendComponent b) const { + return DispatchKeySet(repr_ & ~(DispatchKeySet(b).repr_)); + } + // Is the set empty? (AKA undefined tensor) + bool empty() const { + return repr_ == 0; + } + uint64_t raw_repr() { + return repr_; + } + + DispatchKey highestFunctionalityKey() const { + auto functionality_idx = indexOfHighestBit(); + // This means that none of the functionality bits were set. + if (functionality_idx < num_backends) + return DispatchKey::Undefined; + // The first num_backend bits in the keyset don't correspond to real + // dispatch keys. + return static_cast(functionality_idx - num_backends); + } + + // This is similar like toBackendComponent(DispatchKey), but less restrictive. + // toBackendComponent() errors out if the key that it was passed has no + // backend bits, which is useful for error checking. We need a version of that + // here that can also handle "fake" backends like FPGA, because they need to + // map to the AutogradOther key. For those backends, we return + // BackendComponent::InvalidBit. + BackendComponent highestBackendKey() const { + // mask to mask out functionality bits + auto backend_idx = + DispatchKeySet(repr_ & full_backend_mask).indexOfHighestBit(); + // all zeros across the backend bits means that no backend bits are set. + if (backend_idx == 0) + return BackendComponent::InvalidBit; + return static_cast(backend_idx); + } + + // returns the DispatchKey of highest priority in the set. + DispatchKey highestPriorityTypeId() const { + auto functionality_k = highestFunctionalityKey(); + if (isPerBackendFunctionalityKey(functionality_k)) { + return toRuntimePerBackendFunctionalityKey( + functionality_k, highestBackendKey()); + } + return functionality_k; + } + + // Returns the index of the most-significant bit in the keyset. + // This is used to as part of the calculation into the operator table to get: + // - the highest "functionality" bit in the keyset. + // - the highest "backend" bit in the keyset. + uint8_t indexOfHighestBit() const { + return 64 - llvm::countLeadingZeros(repr_); + } + +#if defined(C10_MOBILE_TRIM_DISPATCH_KEYS) + // [Note: Trimmed Mobile Dispatch Keys] + /** + * The method below maps the dispatch key in the enum DispatchKey to an + * integer index in the dispatchTable_ array in OperatorEntry. The array + * is trimmed for mobile to reduce peak memory usage since it's + * unnecessary to reserve additional space for dispatch keys that will + * never be used on mobile. + */ + int getDispatchTableIndexForDispatchKeySet() const { + auto dk = highestPriorityTypeId(); + switch (dk) { + case DispatchKey::Undefined: + return 0; + case DispatchKey::CPU: + return 1; + case DispatchKey::QuantizedCPU: + return 2; + case DispatchKey::SparseCPU: + return 3; + case DispatchKey::BackendSelect: + return 4; + case DispatchKey::ADInplaceOrView: + return 5; + case DispatchKey::AutogradOther: + return 6; + case DispatchKey::AutogradCPU: + return 7; + default: + return -1; + } + } +#else + // returns the index in the operator table of highest priority key in the the + // keyset Note that we could in theory implement this using + // highestPriorityTypeId(), but this code is very hotpath and we can do it + // faster without it. + int getDispatchTableIndexForDispatchKeySet() const { + auto functionality_idx = + DispatchKeySet(repr_ >> num_backends).indexOfHighestBit(); + auto offset_and_mask = offsetsAndMasks()[functionality_idx]; + // Mask the functionality bits out first, then right-shift by 1. + // right-shifting by 1 because everything is zero-indexed. + // E.g. 000001 (CPU) should give us an offset of 0, 000010 (CUDA) should + // give us an offset of 1, etc. + auto backend_idx = + DispatchKeySet((repr_ & offset_and_mask.mask) >> 1).indexOfHighestBit(); + return offset_and_mask.offset + backend_idx; + } +#endif + + // returns the "index" of the highest priority backend in the keyset. + // This is pretty similar to getBackendKey(), but: + // - It's hotpath code (part of the runtime bitset calculation) + // - I's returns an integer index, not an enum value + // - Everything is shifted to the right by 1. + // BackendComponent::InvalidBit is technically the lowest enum value, + // but it isn't included in the runtime table. So CPUBit = 1, CUDABit = 2, + // etc. + uint64_t getBackendIndex() const { + return DispatchKeySet((repr_ & full_backend_mask) >> 1).indexOfHighestBit(); + } + + private: + constexpr DispatchKeySet(uint64_t repr) : repr_(repr) {} + uint64_t repr_ = 0; + + public: + // STL iterator for DispatchKeySet. Iterates through all runtime DispatchKeys + // in the set. The iterator is only invalidated by the destruction of the + // underlying DispatchKeySet as the iterator stores a pointer to the raw + // representation of the DispatchKeySet. Note: When we encounter a per-backend + // functionality (e.g. Dense or Sparse), we will iterate through EVERY backend + // in the keyset, for that functionality. For example, if the next + // functionality key to iterate over is Autograd, and the backend bits in the + // keyset correspond to [BackendComponent::CPUBit, BackendComponent::CUDABit], + // then the next two keys we return will be DispatchKey::AutogradCPU, + // DispatchKey::AutogradCUDA (CPU first because it has lower precedence than + // CUDA in DispatchKey.h). + class iterator { + public: + using self_type = iterator; + using iterator_category = std::input_iterator_tag; + using value_type = DispatchKey; + using difference_type = ptrdiff_t; + using reference = value_type&; + using pointer = value_type*; + // final mask value should mask out the entire keyset + static const uint8_t end_iter_mask_val = + num_backends + num_functionality_keys; + // final key value should be the last DispatchKey + static const uint8_t end_iter_key_val = num_functionality_keys; + + // current_dispatchkey_idx_ will iterate through all functionality bits. + // current_backendcomponent_idx_ will iterate through all backend bits. + explicit iterator( + const uint64_t* data_ptr, + uint8_t next_functionality = num_backends, + uint8_t next_backend = 0) + : data_ptr_(data_ptr), + next_functionality_(next_functionality), + next_backend_(next_backend), + // These are in an invalid state at construction time, and set by the + // first increment call + current_dispatchkey_idx_(end_iter_key_val), + current_backendcomponent_idx_(end_iter_key_val) { + // Go to the first key in the set + TORCH_INTERNAL_ASSERT( + next_functionality_ >= num_backends, + "num_backends=", + static_cast(num_backends), + "next_functionality_=", + static_cast(next_functionality_)); + ++(*this); + } + + C10_API self_type& operator++(); + + self_type operator++(int) { + self_type previous_iterator = *this; + ++(*this); + return previous_iterator; + } + + bool operator==(const self_type& rhs) const { + return next_functionality_ == rhs.next_functionality_ && + current_dispatchkey_idx_ == rhs.current_dispatchkey_idx_ && + next_backend_ == rhs.next_backend_ && + current_backendcomponent_idx_ == rhs.current_backendcomponent_idx_; + } + bool operator!=(const self_type& rhs) const { + return next_functionality_ != rhs.next_functionality_ || + current_dispatchkey_idx_ != rhs.current_dispatchkey_idx_ || + next_backend_ != rhs.next_backend_ || + current_backendcomponent_idx_ != rhs.current_backendcomponent_idx_; + } + DispatchKey operator*() const { + auto functionality_key = + static_cast(current_dispatchkey_idx_); + if (isPerBackendFunctionalityKey(functionality_key)) { + auto next_key = toRuntimePerBackendFunctionalityKey( + functionality_key, + static_cast(current_backendcomponent_idx_)); + // We expect all of the Dense, Sparse, Quantized, and Autograd keys to + // be ordered the same way with respect to their backends + TORCH_INTERNAL_ASSERT( + toBackendComponent(next_key) == + static_cast(current_backendcomponent_idx_), + "Tried to map functionality key ", + toString(functionality_key), + " and backend bit ", + toString( + static_cast(current_backendcomponent_idx_)), + " to a runtime key, but ended up with ", + toString(next_key), + ". This can happen if the order of the backend dispatch keys in DispatchKey.h isn't consistent.", + " Please double check that enum for inconsistencies."); + return next_key; + } else { + return functionality_key; + } + } + + private: + const uint64_t* data_ptr_; + uint8_t next_functionality_; + uint8_t next_backend_; + uint8_t current_dispatchkey_idx_; + uint8_t current_backendcomponent_idx_; + }; + + public: + // Returns iterator to the first key in the set. If no keys are in the + // set, then will return the end iterator. + iterator begin() const { + return iterator(&repr_); + } + + // We do not need to iterate beyond EndOfFunctionalityKeys so we will treat + // this as the end iterator. + iterator end() const { + return iterator(&repr_, iterator::end_iter_mask_val); + } +}; + +C10_API std::string toString(DispatchKeySet); +C10_API std::ostream& operator<<(std::ostream&, DispatchKeySet); + +C10_API inline int getDispatchTableIndexForDispatchKey(DispatchKey k) { + return DispatchKeySet(k).getDispatchTableIndexForDispatchKeySet(); +} + +// Alias key DispatchKey::Autograd maps to +// (autograd_dispatch_keyset x full_backend_mask) +// NB: keys in this set also get associated with CompositeImplicitAutograd +// +// Note [autograd_dispatch_keyset Does Not Include Backend Bits] +// We don't want to include any backend bits (BackendComponent::CPUBit, etc) +// directly in autograd_dispatch_keyset. +// Why? keysets like autograd_dispatch_keyset are commonly used to remove +// autograd keys from a DispatchKeySet throughout the code base. However, you +// are only allowed to remove functionality bits from a keyset, not backend +// bits. See Note [Removing keys from DispatchKeySet Only Affects Functionality +// Keys] for details. To be consistent and avoid confusion, we're explicitly +// setting up autograd_dispatch_keyset to not have any backend bits. +constexpr DispatchKeySet autograd_dispatch_keyset = DispatchKeySet({ + DispatchKey::AutogradFunctionality, + DispatchKey::AutogradOther, + DispatchKey::AutogradNestedTensor, +}); + +constexpr DispatchKeySet autocast_dispatch_keyset = DispatchKeySet({ + DispatchKey::AutocastCPU, + DispatchKey::AutocastCUDA, + DispatchKey::AutocastXPU, +}); + +// See Note [TLS Initialization] +constexpr DispatchKeySet default_included_set = DispatchKeySet({ + DispatchKey::BackendSelect, + DispatchKey::ADInplaceOrView, +}); + +constexpr DispatchKeySet default_excluded_set = DispatchKeySet({ + DispatchKey::AutocastCPU, + DispatchKey::AutocastCUDA, + DispatchKey::AutocastXPU, +}); + +constexpr DispatchKeySet autograd_dispatch_keyset_with_ADInplaceOrView = + autograd_dispatch_keyset | DispatchKeySet(DispatchKey::ADInplaceOrView); + +constexpr DispatchKeySet python_ks = DispatchKeySet({ + DispatchKey::Python, + DispatchKey::PythonTLSSnapshot, +}); + +constexpr DispatchKeySet sparse_ks = DispatchKeySet(DispatchKey::Sparse); + +constexpr DispatchKeySet sparse_csr_ks = + DispatchKeySet({DispatchKey::SparseCsrCPU, DispatchKey::SparseCsrCUDA}); + +constexpr DispatchKeySet mkldnn_ks = DispatchKeySet(DispatchKey::MkldnnCPU); + +// backend dispatch keys that map to DispatchKey::AutogradOther +// NB: keys in this set also get associated with CompositeImplicitAutograd +constexpr DispatchKeySet autogradother_backends = + DispatchKeySet( + // HIP and VE aren't in this list: they now have their own backend bits + // which means that they can now have their own Autograd keys. + // Technically, HIP will now redispatch to its own custom AutogradHIP + // slot in the runtime table. + {DispatchKey::FPGA, + DispatchKey::ORT, + DispatchKey::Vulkan, + DispatchKey::Metal, + DispatchKey::SparseCsrCPU, + DispatchKey::SparseCsrCUDA, + DispatchKey::CustomRNGKeyId, + DispatchKey::MkldnnCPU, + // Sparse and Quantized backends also live here. + DispatchKey::Sparse, + DispatchKey::Quantized}) + // Including the backend bits because this keyset is used during op + // registration, which requires looping over all runtime autogradother + // backend keys. + | DispatchKeySet(DispatchKeySet::RAW, full_backend_mask); + +// The set of dispatch keys that come after autograd +// n.b. this relies on the fact that AutogradOther is currently the lowest +// Autograd key +constexpr DispatchKeySet after_autograd_keyset = + DispatchKeySet(DispatchKeySet::FULL_AFTER, c10::DispatchKey::AutogradOther); + +// The set of dispatch keys that come after ADInplaceOrView +constexpr DispatchKeySet after_ADInplaceOrView_keyset = DispatchKeySet( + DispatchKeySet::FULL_AFTER, + c10::DispatchKey::ADInplaceOrView); + +// The set of dispatch keys that come after Functionalize +constexpr DispatchKeySet after_func_keyset = + DispatchKeySet(DispatchKeySet::FULL_AFTER, c10::DispatchKey::Functionalize) + .remove( + // NOTE: we also need to remove ADInplaceOrView from the keyset when + // redispatching after the func kernels. This is because we're not + // calling the same op; we originally called an inplace op, and now + // we aren't. The original key calculation figured out which keys + // were Fallthrough based on the inplace op. That means that it did + // not include the ADInPlaceOrView kernel as a fallthrough key. + // However, we WANT the ADInPlaceOrView kernel to be ignored now + // that we're calling an out-of-place op. Re-invoking + // Dispatcher::call would re-run the Fallthrough key calculation and + // get us that, But at::redispatch is more performant. We can get + // away with it by explicitly removing the key here. + c10::DispatchKey::ADInplaceOrView); + +constexpr DispatchKeySet backend_bitset_mask = + DispatchKeySet(DispatchKeySet::RAW, (1ULL << num_backends) - 1); + +constexpr auto inplace_or_view_ks = + DispatchKeySet(DispatchKey::ADInplaceOrView); +constexpr auto autograd_cpu_ks = DispatchKeySet(DispatchKey::AutogradCPU); +constexpr auto autograd_ipu_ks = DispatchKeySet(DispatchKey::AutogradIPU); +constexpr auto autograd_xpu_ks = DispatchKeySet(DispatchKey::AutogradXPU); +constexpr auto autograd_cuda_ks = DispatchKeySet(DispatchKey::AutogradCUDA); +constexpr auto autograd_xla_ks = DispatchKeySet(DispatchKey::AutogradXLA); +constexpr auto autograd_lazy_ks = DispatchKeySet(DispatchKey::AutogradLazy); +constexpr auto autograd_meta_ks = DispatchKeySet(DispatchKey::AutogradMeta); +constexpr auto autograd_mps_ks = DispatchKeySet(DispatchKey::AutogradMPS); +constexpr auto autograd_hpu_ks = DispatchKeySet(DispatchKey::AutogradHPU); +constexpr auto autograd_privateuse1_ks = + DispatchKeySet(DispatchKey::AutogradPrivateUse1); +constexpr auto autograd_privateuse2_ks = + DispatchKeySet(DispatchKey::AutogradPrivateUse2); +constexpr auto autograd_privateuse3_ks = + DispatchKeySet(DispatchKey::AutogradPrivateUse3); +constexpr auto autograd_other_ks = DispatchKeySet(DispatchKey::AutogradOther); +constexpr auto autograd_nested = + DispatchKeySet(DispatchKey::AutogradNestedTensor); +// keyset correpsonding to functorch keys that have their own dedicated +// TensorImpl subclass. +constexpr auto functorch_transforms_ks = DispatchKeySet( + {DispatchKey::FuncTorchBatched, + DispatchKey::FuncTorchVmapMode, + DispatchKey::Batched, + DispatchKey::VmapMode, + DispatchKey::FuncTorchGradWrapper}); + +// This keyset has: +// (1) the functionality bits corresponding to backends (dense, sparse, +// quantized) (2) all of the backend bits set +constexpr DispatchKeySet backend_functionality_keys = + DispatchKeySet({ + DispatchKey::Dense, + DispatchKey::Quantized, + DispatchKey::Sparse, + }) | + DispatchKeySet(DispatchKeySet::RAW, full_backend_mask); + +struct OpTableOffsetAndMask { + uint16_t offset; + uint16_t backend_mask; +}; + +static_assert( + num_backends <= 16, + "Right now we expect the number of backends not to exceed 16. In the (unlikely) event" + " that this changes, the size of OpTableOffsetAndMask::backend_mask needs to be increased too."); + +// true if t is a backend dispatch key +C10_API bool isBackendDispatchKey(DispatchKey t); + +// Resolve alias dispatch key to DispatchKeySet if applicable +C10_API DispatchKeySet getRuntimeDispatchKeySet(DispatchKey t); + +// Resolve alias dispatch key to DispatchKeySet if applicable, +// and chek if k is a part of that set +C10_API bool runtimeDispatchKeySetHas(DispatchKey t, DispatchKey k); + +// Returns a DispatchKeySet of all backend keys mapped to Autograd dispatch key +// t, DispatchKeySet is empty if t is not alias of DispatchKey::Autograd. +C10_API DispatchKeySet getBackendKeySetFromAutograd(DispatchKey t); + +// Returns a DispatchKeySet of autograd related keys mapped to backend. +// for a given backend key, use the associated autograd key. +// for non-backend keys, use AutogradOther as a default. +// Note: it's convenient and fast to return a default here rather than (say) +// returning an optional, or throwing. But it makes callers +// responsible for either a) enforcing the invariant that only backend keys +// be passed as arguments, or b) interpreting our return value carefully. +inline DispatchKeySet getAutogradRelatedKeySetFromBackend(BackendComponent t) { + switch (t) { + case BackendComponent::CPUBit: + return inplace_or_view_ks | autograd_cpu_ks; + case BackendComponent::IPUBit: + return inplace_or_view_ks | autograd_ipu_ks; + case BackendComponent::XPUBit: + return inplace_or_view_ks | autograd_xpu_ks; + case BackendComponent::CUDABit: + return inplace_or_view_ks | autograd_cuda_ks; + case BackendComponent::XLABit: + return inplace_or_view_ks | autograd_xla_ks; + case BackendComponent::LazyBit: + return inplace_or_view_ks | autograd_lazy_ks; + case BackendComponent::MetaBit: + return inplace_or_view_ks | autograd_meta_ks; + case BackendComponent::MPSBit: + return inplace_or_view_ks | autograd_mps_ks; + case BackendComponent::HPUBit: + return inplace_or_view_ks | autograd_hpu_ks; + case BackendComponent::PrivateUse1Bit: + return inplace_or_view_ks | autograd_privateuse1_ks; + case BackendComponent::PrivateUse2Bit: + return inplace_or_view_ks | autograd_privateuse2_ks; + case BackendComponent::PrivateUse3Bit: + return inplace_or_view_ks | autograd_privateuse3_ks; + default: + return inplace_or_view_ks | autograd_other_ks; + } +} + +// Returns a DispatchKeySet of autocast related keys mapped to backend. +inline DispatchKeySet getAutocastRelatedKeySetFromBackend(BackendComponent t) { + constexpr auto autocast_cpu_ks = DispatchKeySet(DispatchKey::AutocastCPU); + constexpr auto autocast_xpu_ks = DispatchKeySet(DispatchKey::AutocastXPU); + constexpr auto autocast_cuda_ks = DispatchKeySet(DispatchKey::AutocastCUDA); + switch (t) { + case BackendComponent::CPUBit: + return autocast_cpu_ks; + case BackendComponent::XPUBit: + return autocast_xpu_ks; + case BackendComponent::CUDABit: + case BackendComponent::XLABit: + return autocast_cuda_ks; + default: + return DispatchKeySet(); + } +} + +// returns the "backend" DispatchKey of highest priority in the set. +// This is basically like highestBackendKey(), except that we have some +// "functionality" bits that correspond to backends (Sparse, Quantized) +inline DispatchKey highestPriorityBackendTypeId(DispatchKeySet ks) { + return (ks & backend_functionality_keys).highestPriorityTypeId(); +} + +// This API exists because we have a use case for checking +// getRuntimeDispatchKeySet(alias).has(DispatchKey::Undefined) +// in OperatorEntry.cpp but we disallow it in has() API. +C10_API bool isIncludedInAlias(DispatchKey k, DispatchKey alias); + +// Historically, every tensor only had a single DispatchKey, and it was always +// something like CPU, and there wasn't any of this business where TLS +// could cause the DispatchKey of a tensor to change. But we still have some +// legacy code that is still using DispatchKey for things like instanceof +// checks; if at all possible, refactor the code to stop using DispatchKey in +// those cases. +static inline DispatchKey legacyExtractDispatchKey(DispatchKeySet s) { + // NB: If you add any extra keys that can be stored in TensorImpl on + // top of existing "backend" keys like CPU/CUDA, you need to add it + // here. At the moment, autograd keys and ADInplaceOrView key need this + // treatment; + return (s - autograd_dispatch_keyset_with_ADInplaceOrView - + autocast_dispatch_keyset - + DispatchKeySet({DispatchKey::PythonTLSSnapshot, DispatchKey::Python})) + .highestPriorityTypeId(); +} + +template +using is_not_DispatchKeySet = guts::negation>; + +// Given a function type, constructs a function_traits type that drops the first +// parameter type if the first parameter is of type DispatchKeySet. NB: +// DispatchKeySet is currently explicitly hidden from JIT (mainly to avoid +// pushing unnecessary arguments on the stack - see Note [ Plumbing Keys Through +// the Dispatcher] for details). If at any point in the future we need to expose +// this type to JIT, revisit the usage of this type alias. +template +using remove_DispatchKeySet_arg_from_func = guts::make_function_traits_t< + typename guts::infer_function_traits_t::return_type, + typename std::conditional_t< + std::is_same< + DispatchKeySet, + typename guts::typelist::head_with_default_t< + void, + typename guts::infer_function_traits_t< + FuncType>::parameter_types>>::value, + guts::typelist::drop_if_nonempty_t< + typename guts::infer_function_traits_t::parameter_types, + 1>, + typename guts::infer_function_traits_t::parameter_types>>; +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/DynamicCast.h b/voice_bridge/torch/include/c10/core/DynamicCast.h new file mode 100644 index 0000000000000000000000000000000000000000..0a2664ad90ecacb785667ff3b9f0a183e59a2b92 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/DynamicCast.h @@ -0,0 +1,119 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { + +// Dynamic type casting utils: +// - fetch_and_cast +// - cast_and_store +// +// fetch_and_cast fetch a value with dynamic type specified by a ScalarType +// from a void pointer and cast it to a static type. +// +// cast_and_store casts a static typed value into dynamic type specified +// by a ScalarType, and store it into a void pointer. +// +// NOTE: +// +// Dynamic casting allows us to support type promotion without blowing up +// the combination space: For example, without dynamic cast, in order to +// implement `add_` with type promotion, we would need something like +// +// AT_DISPATCH_ALL_TYPES(output.dtype(), +// AT_DISPATCH_ALL_TYPES(input1.dtype(), +// AT_DISPATCH_ALL_TYPES(input2.dtype(), +// [](arg0_t a, arg1_t b) -> out_t { return a + b; } +// ) +// ) +// ) +// +// If we support N dtypes, the above code would generate the a+b kernel for +// all the N * N * N different supported types, the compilation time and +// binary size would become horrible. +// +// Dynamic casting might sounds like a bad idea in terms of performance. +// Especially if you ever do it in a loop, you are going to do a billion tests. +// But in practice it is not as bad as it might look: +// +// - on CPU, this is a branch that always has the same outcome, therefore +// hopefully the branch predictor could do the job pretty well +// - on GPU, these branches will not diverge, so we could still have the same +// warp executing the same line of code +// - Most kernels, like `add`, are bandwidth bound, adding a few clock cycles to +// check an integer does not hurt the performance much because the ALUs would +// wait for load instructions anyway. +// +// For the discussion and benchmark, refer to: +// - https://github.com/pytorch/pytorch/pull/28343 +// - https://github.com/pytorch/pytorch/pull/28344 +// - https://github.com/pytorch/pytorch/pull/28345 +// + +#ifdef C10_HOST_DEVICE +#define ERROR_UNSUPPORTED_CAST CUDA_KERNEL_ASSERT(false); +#else +#define ERROR_UNSUPPORTED_CAST TORCH_CHECK(false, "Unexpected scalar type"); +#endif + +// Fetch a value with dynamic type src_type from ptr, and cast it to static type +// dest_t. +#define FETCH_AND_CAST_CASE(type, scalartype) \ + case ScalarType::scalartype: \ + return c10::convert(c10::load(ptr)); + +template +C10_HOST_DEVICE inline dest_t fetch_and_cast( + const ScalarType src_type, + const void* ptr) { + switch (src_type) { + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(FETCH_AND_CAST_CASE) + default: + ERROR_UNSUPPORTED_CAST + } + return dest_t(0); // just to avoid compiler warning +} + +// Cast a value with static type src_t into dynamic dest_type, and store it to +// ptr. +#define CAST_AND_STORE_CASE(type, scalartype) \ + case ScalarType::scalartype: \ + *(type*)ptr = c10::convert(value); \ + return; +template +C10_HOST_DEVICE inline void cast_and_store( + const ScalarType dest_type, + void* ptr, + src_t value) { + switch (dest_type) { + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(CAST_AND_STORE_CASE) + default:; + } + ERROR_UNSUPPORTED_CAST +} + +#define DEFINE_UNCASTABLE(T, scalartype_) \ + template <> \ + C10_HOST_DEVICE inline T fetch_and_cast( \ + const ScalarType src_type, const void* ptr) { \ + CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type); \ + return c10::load(ptr); \ + } \ + template <> \ + C10_HOST_DEVICE inline void cast_and_store( \ + const ScalarType dest_type, void* ptr, T value) { \ + CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type); \ + *(T*)ptr = value; \ + } + +AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE) + +#undef FETCH_AND_CAST_CASE +#undef CAST_AND_STORE_CASE +#undef DEFINE_UNCASTABLE +#undef ERROR_UNSUPPORTED_CAST + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/Event.h b/voice_bridge/torch/include/c10/core/Event.h new file mode 100644 index 0000000000000000000000000000000000000000..d1d82646d410c32626fcddf34321fb41b1b76634 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/Event.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include + +namespace c10 { + +/** + * A backend-generic movable, not copyable, not thread-safe event. + * + * The design of this event follows that of CUDA and HIP events. These events + * are recorded and waited on by streams and can be rerecorded to, + * each rerecording essentially creating a new version of the event. + * For example, if (in CPU time), stream X is asked to record E, + * stream Y waits on E, and stream X is asked to record E again, then Y will + * wait for X to finish the first call to record and not the second, because + * it's waiting on the first version of event E, not the second. + * Querying an event only returns the status of its most recent version. + * + * Backend-generic events are implemented by this class and + * impl::InlineEvent. In addition to these events there are also + * some backend-specific events, like ATen's CUDAEvent. Each of these + * classes has its own use. + * + * impl::InlineEvent<...> or a backend-specific event should be + * preferred when the backend is known at compile time and known to + * be compiled. Backend-specific events may have additional functionality. + * + * This Event should be used if a particular backend may not be available, + * or the backend required is not known at compile time. + * + * These generic events are built on top of DeviceGuardImpls, analogous + * to DeviceGuard and InlineDeviceGuard. The name "DeviceGuardImpls," + * is no longer entirely accurate, as these classes implement the + * backend-specific logic for a generic backend interface. + * + * See DeviceGuardImplInterface.h for a list of all supported flags. + */ + +struct Event final { + // Constructors + Event() = delete; + Event( + const DeviceType _device_type, + const EventFlag _flag = EventFlag::PYTORCH_DEFAULT) + : impl_{_device_type, _flag} {} + + // Copy constructor and copy assignment operator (deleted) + Event(const Event&) = delete; + Event& operator=(const Event&) = delete; + + // Move constructor and move assignment operator + Event(Event&& other) : impl_{std::move(other.impl_)} {} + Event& operator=(Event&& other) { + impl_.swap(std::move(other.impl_)); + return *this; + } + + // Destructor + ~Event() = default; + + // Getters + Device device() const noexcept { + return Device(device_type(), device_index()); + } + DeviceType device_type() const noexcept { + return impl_.device_type(); + } + DeviceIndex device_index() const noexcept { + return impl_.device_index(); + } + EventFlag flag() const noexcept { + return impl_.flag(); + } + bool was_marked_for_recording() const noexcept { + return impl_.was_marked_for_recording(); + } + + /** + * Calls record() if and only if record() has never been called for this + * event. Note: because Event is not thread-safe recordOnce() may call + * record() multiple times if called from multiple threads. + */ + void recordOnce(const Stream& stream) { + impl_.recordOnce(stream); + } + + /** + * Increments the event's version and enqueues a job with this version + * in the stream's work queue. When the stream process that job + * it nofifies all streams waiting on / blocked by that version of the + * event to continue and marks that version as recorded. + * */ + void record(const Stream& stream) { + impl_.record(stream); + } + + /** + * Does nothing if the event has not been scheduled to be recorded. + * If the event was previously enqueued to be recorded, a command + * to wait for the version of the event that exists at the time of this call + * is inserted in the stream's work queue. + * When the stream reaches this command it will stop processing + * additional commands until that version of the event is marked as recorded. + */ + void block(const Stream& stream) const { + impl_.block(stream); + } + + /** + * Returns true if (and only if) + * (1) the event has never been scheduled to be recorded + * (2) the current version is marked as recorded. + * Returns false otherwise. + */ + bool query() const { + return impl_.query(); + } + + private: + impl::InlineEvent impl_; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/GeneratorImpl.h b/voice_bridge/torch/include/c10/core/GeneratorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..389bd6271403b74a8b0c9b7e74247afd6dafe442 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/GeneratorImpl.h @@ -0,0 +1,110 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/** + * Note [Generator] + * ~~~~~~~~~~~~~~~~ + * A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm + * to generate a seemingly random sequence of numbers, that may be later be used + * in creating a random distribution. Such an engine almost always maintains a + * state and requires a seed to start off the creation of random numbers. Often + * times, users have found it beneficial to be able to explicitly create, + * retain, and destroy PRNG states and also be able to have control over the + * seed value. + * + * A Generator in ATen gives users the ability to read, write and modify a PRNG + * engine. For instance, it does so by letting users seed a PRNG engine, fork + * the state of the engine, etc. + * + * By default, there is one generator per device, and a device's generator is + * lazily created. A user can use the torch.Generator() api to create their own + * generator. Currently torch.Generator() can only create a CPUGeneratorImpl. + */ + +/** + * Note [Acquire lock when using random generators] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Generator and its derived classes are NOT thread-safe. Please note that most + * of the places where we have inserted locking for generators are historically + * based, and we haven't actually checked that everything is truly thread safe + * (and it probably isn't). Please use the public mutex_ when using any methods + * from these classes, except for the read-only methods. You can learn about the + * usage by looking into the unittests (aten/src/ATen/cpu_generator_test.cpp) + * and other places where we have used lock_guard. + * + * TODO: Look into changing the threading semantics of Generators in ATen (e.g., + * making them non-thread safe and instead making the generator state + * splittable, to accommodate forks into other threads). + */ + +namespace c10 { + +// The default seed is selected to be a large number +// with good distribution of 0s and 1s in bit representation +constexpr uint64_t default_rng_seed_val = 67280421310721; + +struct C10_API GeneratorImpl : public c10::intrusive_ptr_target { + // Constructors + GeneratorImpl(Device device_in, DispatchKeySet key_set); + + // Delete all copy and move assignment in favor of clone() + // method + GeneratorImpl(const GeneratorImpl& other) = delete; + GeneratorImpl(GeneratorImpl&& other) = delete; + GeneratorImpl& operator=(const GeneratorImpl& other) = delete; + + virtual ~GeneratorImpl() = default; + c10::intrusive_ptr clone() const; + + // Common methods for all generators + virtual void set_current_seed(uint64_t seed) = 0; + virtual uint64_t current_seed() const = 0; + virtual uint64_t seed() = 0; + virtual void set_state(const c10::TensorImpl& new_state) = 0; + virtual c10::intrusive_ptr get_state() const = 0; + Device device() const; + + // See Note [Acquire lock when using random generators] + std::mutex mutex_; + + DispatchKeySet key_set() const { + return key_set_; + } + + inline void set_pyobj(PyObject* pyobj) noexcept { + pyobj_ = pyobj; + } + + inline PyObject* pyobj() const noexcept { + return pyobj_; + } + + protected: + Device device_; + DispatchKeySet key_set_; + PyObject* pyobj_ = nullptr; + + virtual GeneratorImpl* clone_impl() const = 0; +}; + +namespace detail { + +TORCH_API uint64_t getNonDeterministicRandom(bool is_cuda = false); + +} // namespace detail + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/GradMode.h b/voice_bridge/torch/include/c10/core/GradMode.h new file mode 100644 index 0000000000000000000000000000000000000000..d83ff6d0d0d3b66e4becf5f2ba01440fb70d5f3c --- /dev/null +++ b/voice_bridge/torch/include/c10/core/GradMode.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include + +namespace c10 { + +struct TORCH_API GradMode { + static bool is_enabled(); + static void set_enabled(bool enabled); +}; + +// A RAII, thread local (!) guard that enables or disables grad mode upon +// construction, and sets it back to the original value upon destruction. +struct TORCH_API AutoGradMode { + AutoGradMode(bool enabled) : prev_mode(GradMode::is_enabled()) { + GradMode::set_enabled(enabled); + } + ~AutoGradMode() { + GradMode::set_enabled(prev_mode); + } + bool prev_mode; +}; + +// A RAII, thread local (!) guard that stops future operations from building +// gradients. +struct TORCH_API NoGradGuard : public AutoGradMode { + NoGradGuard() : AutoGradMode(/*enabled=*/false) {} +}; + +// A RAII, thread local (!) guard that enables or disables forward grad mode +// upon construction, and sets it back to the original value upon destruction. +struct TORCH_API AutoFwGradMode { + AutoFwGradMode(bool enabled) + : prev_mode(AutogradState::get_tls_state().get_fw_grad_mode()) { + AutogradState::get_tls_state().set_fw_grad_mode(enabled); + } + ~AutoFwGradMode() { + AutogradState::get_tls_state().set_fw_grad_mode(prev_mode); + } + bool prev_mode; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/InferenceMode.h b/voice_bridge/torch/include/c10/core/InferenceMode.h new file mode 100644 index 0000000000000000000000000000000000000000..704c43b522c6d8eafa5df9be00273d67f6e25197 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/InferenceMode.h @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { + +// A RAII, thread local (!) guard that enables or disables inference mode upon +// construction, and sets it back to the original value upon destruction. +struct TORCH_API InferenceMode { + // Note [Expected TLS state in InferenceMode]: + // InferenceMode: ADInplaceOrView not in + // raw_local_dispatch_key_set.included(), + // Autograd in raw_local_dispatch_key_set.excluded() + // GradMode is disabled. + // NormalMode: ADInplaceOrView in raw_local_dispatch_key_set.included(), + // Autograd not in raw_local_dispatch_key_set.excluded() + // GradMode is enabled by default unless toggled manually + // through other APIs, e.g. NoGradGuard. + // + // Invariant: + // - ADInplaceOrView is never in the excluded set + // - Autograd is never in the included set + // - Setting InferenceMode will set GradMode accordingly, but not vice versa. + // + // 1. Why do we put ADInplaceOrView in included set outside InferenceMode? + // + // Inplace update to inference tensor outside InferenceMode is not + // allowed. See Note [Inplace update inference tensor] for more details. + // Without going through ADInplaceOrView kernel, we cannot throw error + // for `inference_tensor.add_(1)` case. + // + // 2. Why not put ADInplaceOrView in the excluded set inside InferenceMode? + // + // For example: + // torch::Tensor a = torch::ones({1, 2, 3}).set_requires_grad(true); + // torch::Tensor k = a + 2; + // { + // c10::InferenceMode guard(true); + // k.add_(2); + // } + // `k.add_(2)` still need to go through ADInplaceOrView kernel so that it's + // prepared for future autograd. + // + // 3. Why does setting InferenceMode also set GradMode? + // + // This is required since InferenceMode is a faster and more restricive + // version of NoGradGuard. All runtime checks using GradMode::is_enabled() + // are applicable to InferenceMode as well, e.g. + // `tensorTypeInCurrentExecutionContext` in interpreter.cpp. + InferenceMode(bool enabled = true) + : prev_mode(AutogradState::get_tls_state()), + prev_keyset(c10::impl::tls_local_dispatch_key_set()) { + // Enabling inference mode means disabling grad modes + // And disabling inference mode means enabling grad modes + AutogradState::set_tls_state(AutogradState( + /* grad_mode */ !enabled, + /* inference_mode */ enabled, + /* fw_grad_mode */ !enabled)); + DispatchKeySet included = enabled + ? prev_keyset.included_.remove(c10::DispatchKey::ADInplaceOrView) + : prev_keyset.included_.add(c10::DispatchKey::ADInplaceOrView); + DispatchKeySet excluded = enabled + ? (prev_keyset.excluded_ | c10::autograd_dispatch_keyset) + : (prev_keyset.excluded_ - c10::autograd_dispatch_keyset); + c10::impl::PODLocalDispatchKeySet cur_keyset; + cur_keyset.set_included(included); + cur_keyset.set_excluded(excluded); + c10::impl::_force_tls_local_dispatch_key_set(cur_keyset); + } + + ~InferenceMode() { + AutogradState::set_tls_state(prev_mode); + c10::impl::_force_tls_local_dispatch_key_set(prev_keyset); + } + static bool is_enabled(); + + private: + AutogradState prev_mode; + c10::impl::LocalDispatchKeySet prev_keyset; +}; +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/Layout.h b/voice_bridge/torch/include/c10/core/Layout.h new file mode 100644 index 0000000000000000000000000000000000000000..0ac72439b7f0682f323d4015091fd2d9ec0a29cf --- /dev/null +++ b/voice_bridge/torch/include/c10/core/Layout.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include + +#include + +namespace c10 { +enum class Layout : int8_t { + Strided, + Sparse, + SparseCsr, + Mkldnn, + SparseCsc, + SparseBsr, + SparseBsc, + NumOptions +}; + +constexpr auto kStrided = Layout::Strided; +constexpr auto kSparse = Layout::Sparse; +constexpr auto kSparseCsr = Layout::SparseCsr; +constexpr auto kMkldnn = Layout::Mkldnn; +constexpr auto kSparseCsc = Layout::SparseCsc; +constexpr auto kSparseBsr = Layout::SparseBsr; +constexpr auto kSparseBsc = Layout::SparseBsc; + +inline Layout layout_from_backend(Backend backend) { + switch (backend) { + case Backend::SparseCPU: + case Backend::SparseCUDA: + case Backend::SparseHIP: + case Backend::SparseVE: + case Backend::SparseXPU: + return Layout::Sparse; + case Backend::MkldnnCPU: + return Layout::Mkldnn; + case Backend::SparseCsrCPU: + case Backend::SparseCsrCUDA: + TORCH_CHECK( + false, + "Cannot map Backend SparseCsrCPU|SparseCsrCUDA to a unique layout."); + default: + return Layout::Strided; + } +} + +inline std::ostream& operator<<(std::ostream& stream, at::Layout layout) { + switch (layout) { + case at::kStrided: + return stream << "Strided"; + case at::kSparse: + return stream << "Sparse"; + case at::kSparseCsr: + return stream << "SparseCsr"; + case at::kSparseCsc: + return stream << "SparseCsc"; + case at::kSparseBsr: + return stream << "SparseBsr"; + case at::kSparseBsc: + return stream << "SparseBsc"; + case at::kMkldnn: + return stream << "Mkldnn"; + default: + TORCH_CHECK(false, "Unknown layout"); + } +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/MemoryFormat.h b/voice_bridge/torch/include/c10/core/MemoryFormat.h new file mode 100644 index 0000000000000000000000000000000000000000..b1363033ca4728a22581625d21ae0a6165d8ff3b --- /dev/null +++ b/voice_bridge/torch/include/c10/core/MemoryFormat.h @@ -0,0 +1,277 @@ +#pragma once + +#include +#include +#include + +#include + +// Memory format is not the property of a Tensor. It is the way to tell an +// operator how the result should be organized in memory and nothing more. That +// means memory format should never be used as return value for any tensor state +// interrogation functions (internally and externally). +// +// Possible options are: +// Preserve: +// If any of the input tensors is in channels_last format, operator output +// should be in channels_last format +// +// Contiguous: +// Regardless of input tensors format, the output should be contiguous +// Tensor. +// +// ChannelsLast: +// Regardless of input tensors format, the output should be in channels_last +// format. + +namespace c10 { +enum class MemoryFormat : int8_t { + Contiguous, + Preserve, + ChannelsLast, + ChannelsLast3d, + NumOptions +}; + +// If you are seeing this, it means that this call site was not checked if +// the memory format could be preserved, and it was switched to old default +// behaviour of contiguous +#define LEGACY_CONTIGUOUS_MEMORY_FORMAT c10::get_contiguous_memory_format() + +inline MemoryFormat get_contiguous_memory_format() { + return MemoryFormat::Contiguous; +} + +inline std::ostream& operator<<( + std::ostream& stream, + at::MemoryFormat memory_format) { + switch (memory_format) { + case MemoryFormat::Preserve: + return stream << "Preserve"; + case MemoryFormat::Contiguous: + return stream << "Contiguous"; + case MemoryFormat::ChannelsLast: + return stream << "ChannelsLast"; + case MemoryFormat::ChannelsLast3d: + return stream << "ChannelsLast3d"; + default: + TORCH_CHECK(false, "Unknown memory format ", memory_format); + } +} + +// Note: Hardcoded the channel last stride indices here to get better +// performance +inline std::vector get_channels_last_strides_2d(IntArrayRef sizes) { + std::vector strides(sizes.size()); + switch (sizes.size()) { + case 4: + strides[1] = 1; + strides[3] = sizes[1]; + strides[2] = strides[3] * sizes[3]; + strides[0] = strides[2] * sizes[2]; + return strides; + case 3: + strides[0] = 1; + strides[2] = sizes[0]; + strides[1] = strides[2] * sizes[2]; + return strides; + default: + TORCH_INTERNAL_ASSERT( + false, "ChannelsLast2d doesn't support size ", sizes.size()); + } +} + +inline std::vector get_channels_last_strides_3d(IntArrayRef sizes) { + std::vector strides(sizes.size()); + switch (sizes.size()) { + case 5: + strides[1] = 1; + strides[4] = sizes[1]; + strides[3] = strides[4] * sizes[4]; + strides[2] = strides[3] * sizes[3]; + strides[0] = strides[2] * sizes[2]; + return strides; + case 4: + strides[0] = 1; + strides[3] = sizes[0]; + strides[2] = strides[3] * sizes[3]; + strides[1] = strides[2] * sizes[2]; + return strides; + default: + TORCH_INTERNAL_ASSERT( + false, "ChannelsLast3d doesn't support size ", sizes.size()); + } +} + +// NOTE: +// Below are Helper functions for is_channels_last_strides_xd. +// 1. Please do not combine these helper functions, each helper function handles +// exactly one case of sizes + memory_format, by doing this, the strides indices +// will be a constant array and we can access it using constant index number, +// the compiler will fully unroll the loop on strides indices to gain a better +// performance. +// 2. No error check in helper function, caller ensures the correctness of the +// input +// 3. All helper functions have similar comments, only 1st helper function is +// commented here. +template +inline bool is_channels_last_strides_2d_s4( + const ArrayRef sizes, + const ArrayRef strides) { + T min = 0; + // special case for trivial C dimension. default to NCHW + if (strides[1] == 0) { + return false; + } + // loop strides indices + for (auto& d : {1, 3, 2, 0}) { + if (sizes[d] == 0) { + return false; + } + if (strides[d] < min) { + return false; + } + // Fallback to NCHW as default layout for ambiguous cases + // This is the flaw of implicit memory_format from strides. + // N111 tensor with identical strides for size 1 dimension; + // Two cases could lead us here: + // a. N111 contiguous Tensor ([N,1,1,1]@[1,1,1,1]) + // b. N11W contiguous Tensor sliced on the W-dimension. + // ([N,1,1,1]@[W,W,W,W]) + if (d == 0 && min == strides[1]) { + return false; + } + // This is necessary to: + // 1. distinguish the memory_format of N1H1; + // [H, 1, 1, 1] channels_last stride + // [H, H, 1, 1] contiguous stride + // 2. permutation of 1C1W: + // [1, C, 1, H]@[HC, H, H, 1] transpose(1, 3) + // [1, H, 1, C]@[HC, 1, H, H] shouldn't be identified as channels_last + min = strides[d]; + if (sizes[d] > 1) { + min *= sizes[d]; + } + } + return true; +} + +template +inline bool is_channels_last_strides_3d_s5( + const ArrayRef sizes, + const ArrayRef strides) { + T min = 0; + if (strides[1] == 0) { + return false; + } + for (auto& d : {1, 4, 3, 2, 0}) { + if (sizes[d] == 0) { + return false; + } + if (strides[d] < min) { + return false; + } + if (d == 0 && min == strides[1]) { + return false; + } + min = strides[d]; + if (sizes[d] > 1) { + min *= sizes[d]; + } + } + return true; +} + +// Note [Ambiguous is_channels_last_strides_xd] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// The flaw of carrying memory_format implicitly through strides is very hard +// to WAR properly. issue #24090 +// Without the history of permutation, we can't infer the memory_format of a +// tensor from the snapshot of its size & stride +// e.g. +// +// 1. We can NOT specify the memory_format of N111 tensor through strides in a +// meaningful way; +// +// 2. Two path that ended up with identical size/stride +// N11W contiguous tensor sliced at w-dimension becomes [N,1,1,1]@[W,W,W,W] +// NC11 channels_last tensor sliced at c-dimension becomes [N,1,1,1]@[C,C,C,C] +// So if we see a tensor [N,1,1,1]@[X,X,X,X], there's no way for us to infer +// the memory_format of the original tensor. +// +// Due to the limitations, our temporary WAR `is_channels_last_strides` does the +// best effort to infer whether the original memory_format of a tensor is +// at::MemoryFormat::ChannelsLast. The two objectives of this function (ordered +// by their importance): +// 1. Ensure that normal shape manipulation does not accidentally change the +// MemoryFormat of an existing tensor. +// 2. Allows user to mark MemoryFormat::ChannelsLast to tensors; +// +// The function does so via checking strides of the tensor, including strides of +// size-1 dimensions. Although conventionally PyTorch implies no restriction on +// trivial stride (stride for size-1 dimension). +// +// Note that this approach is a compromise. We did not solve the problem +// completely. Many cases we will not be able to infer the correct memory +// format. +// The implementation of `is_channels_last_strides` is to serve the objectives: +// MemoryFormat::ChannelsLast has to be explicitly opted-in (no accidental +// conversion); Best effort to maintain the ChannelsLast flag. +// +// Due to the fact that this is not a bulletproof solution, through testing +// (aten/src/ATen/test/memory_format_test.cpp) +// a. we ensure that the common tasks are supported; +// a. we identify corner cases where the implementation compromises on. +// +// By the time accumulated permutation is enabled to replace implicit +// memory_format through strides, we should be updating our tests and fix the +// issues in our tests. +// +// We use Channels Last 2d as an example above. +// This is a general problem for all the is_channels_last_strides_xd +// implementation. Please check the helper functions +// (is_channels_last_strides_*d_s*) for more details. + +template +inline bool is_channels_last_strides_2d( + const ArrayRef sizes, + const ArrayRef strides) { + switch (sizes.size()) { + case 4: + return is_channels_last_strides_2d_s4(sizes, strides); + case 3: + // TODO dim == 3 case will be enabled once it is fully tested + return false; + default: + return false; + } +} + +template +inline bool is_channels_last_strides_3d( + const ArrayRef sizes, + const ArrayRef strides) { + switch (sizes.size()) { + case 5: + return is_channels_last_strides_3d_s5(sizes, strides); + case 4: + // TODO dim == 4 case will be enabled once it is fully tested + return false; + default: + return false; + } +} + +inline bool is_channels_last_strides_2d( + const IntArrayRef sizes, + const IntArrayRef strides) { + return is_channels_last_strides_2d(sizes, strides); +} + +inline bool is_channels_last_strides_3d( + const IntArrayRef sizes, + const IntArrayRef strides) { + return is_channels_last_strides_3d(sizes, strides); +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/OptionalRef.h b/voice_bridge/torch/include/c10/core/OptionalRef.h new file mode 100644 index 0000000000000000000000000000000000000000..c8743e6d55b558ef3e4c201448a319885bfe52fe --- /dev/null +++ b/voice_bridge/torch/include/c10/core/OptionalRef.h @@ -0,0 +1,31 @@ +#pragma once + +namespace c10 { + +template +class OptionalRef { + public: + OptionalRef() : data_(nullptr) {} + OptionalRef(const T* data) : data_(data) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(data_); + } + OptionalRef(const T& data) : data_(&data) {} + + bool has_value() const { + return data_ != nullptr; + } + + const T& get() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(data_); + return *data_; + } + + operator bool() const { + return has_value(); + } + + private: + const T* data_; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/PyHandleCache.h b/voice_bridge/torch/include/c10/core/PyHandleCache.h new file mode 100644 index 0000000000000000000000000000000000000000..351c038132a21767cdf4bbaffc263dbca949aa91 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/PyHandleCache.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#include +#include + +#include + +namespace c10 { + +// A PyHandleCache represents a cached pointer from a C++ object to +// a Python object that represents that object analogously in Python. +// Upon a cache hit, the relevant object can be retrieved after a test +// and then a memory load. Two conditions must hold to be able to use this +// class: +// +// - This must truly be a cache; e.g., the caller must be able to produce +// the object some other way if the cache hit misses. +// +// - This must truly be a handle; e.g., the Python object referenced by +// this class must have static lifetime. This means we don't have to +// maintain strong ownership or deallocate the object when the C++ object +// dies. Static lifetime is a good idea in conjunction with the cache, +// since if you are producing a fresh object on miss you won't be +// maintaining object identity. If you need bidirectional ownership, +// you will want to factor out the pattern in TensorImpl with +// resurrection. +// +// This cache is expected to not improve perf under torchdeploy, as one +// interpreter will fill up the cache, and all the interpreters will be +// unable to use the slot. A potential improvement is to have multiple +// slots (one per interpreter), which will work in deployment scenarios +// where there a stable, fixed number of interpreters. You can also store +// the relevant state in the Python library, rather than in the non-Python +// library (although in many cases, this is not convenient, as there may +// not be a way to conveniently index based on the object.) +class PyHandleCache { + public: + PyHandleCache() : pyinterpreter_(nullptr), data_(nullptr) {} + + // Attempt to fetch the pointer from the cache, if the PyInterpreter + // matches. If it doesn't exist, or the cache entry is not valid, + // use slow_accessor to get the real pointer value and return that + // (possibly writing it to the cache, if the cache entry is + // available.) + template + PyObject* ptr_or(impl::PyInterpreter* self_interpreter, F slow_accessor) + const { + // Note [Memory ordering on Python interpreter tag] + impl::PyInterpreter* interpreter = + pyinterpreter_.load(std::memory_order_acquire); + if (C10_LIKELY(interpreter == self_interpreter)) { + return data_; + } else if (interpreter == nullptr) { + auto* r = slow_accessor(); + impl::PyInterpreter* expected = nullptr; + // attempt to claim this cache entry with the specified interpreter tag + if (pyinterpreter_.compare_exchange_strong( + expected, self_interpreter, std::memory_order_acq_rel)) { + data_ = r; + } + // This shouldn't be possible, as you should be GIL protected + TORCH_INTERNAL_ASSERT(expected != self_interpreter); + return r; + } else { + return slow_accessor(); + } + } + + private: + mutable std::atomic pyinterpreter_; + mutable PyObject* data_; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/QEngine.h b/voice_bridge/torch/include/c10/core/QEngine.h new file mode 100644 index 0000000000000000000000000000000000000000..71eb4b34ac9e11938eb45b86dca83cbe1a27acfa --- /dev/null +++ b/voice_bridge/torch/include/c10/core/QEngine.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +/** + * QEngine is an enum that is used to select the engine to run quantized ops. + * Keep this enum in sync with get_qengine_id() in + * torch/backends/quantized/__init__.py + */ +enum class QEngine : uint8_t { + NoQEngine = 0, + FBGEMM = 1, + QNNPACK = 2, + ONEDNN = 3, + X86 = 4, +}; + +constexpr auto kNoQEngine = QEngine::NoQEngine; +constexpr auto kFBGEMM = QEngine::FBGEMM; +constexpr auto kQNNPACK = QEngine::QNNPACK; +constexpr auto kONEDNN = QEngine::ONEDNN; +constexpr auto kX86 = QEngine::X86; + +inline std::string toString(QEngine qengine) { + switch (qengine) { + case kNoQEngine: + return "NoQEngine"; + case kFBGEMM: + return "FBGEMM"; + case kQNNPACK: + return "QNNPACK"; + case kONEDNN: + return "ONEDNN"; + case kX86: + return "X86"; + default: + TORCH_CHECK( + false, "Unrecognized Quantized Engine: ", static_cast(qengine)); + } +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/QScheme.h b/voice_bridge/torch/include/c10/core/QScheme.h new file mode 100644 index 0000000000000000000000000000000000000000..957618d74fc5e2129fc312f72db05f2860bfe62a --- /dev/null +++ b/voice_bridge/torch/include/c10/core/QScheme.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#include + +namespace c10 { + +/** + * QScheme is an enum that specifies the type of quantization. This has a one + * to one correspondence with Quantizer + * Please refer to ATen/quantized/Quantizer.h to see the Quantizers classes. + * Keep this file in sync with torch/nn/_qscheme.py + */ +enum class QScheme : uint8_t { + PER_TENSOR_AFFINE = 0, + PER_CHANNEL_AFFINE = 1, + PER_TENSOR_SYMMETRIC = 2, + PER_CHANNEL_SYMMETRIC = 3, + PER_CHANNEL_AFFINE_FLOAT_QPARAMS = 4, + COMPILE_TIME_NUM_QSCHEMES = 5, +}; + +constexpr auto kPerTensorAffine = QScheme::PER_TENSOR_AFFINE; +constexpr auto kPerChannelAffine = QScheme::PER_CHANNEL_AFFINE; +constexpr auto kPerTensorSymmetric = QScheme::PER_TENSOR_SYMMETRIC; +constexpr auto kPerChannelSymmetric = QScheme::PER_CHANNEL_SYMMETRIC; +constexpr auto kPerChannelAffineFloatQParams = + QScheme::PER_CHANNEL_AFFINE_FLOAT_QPARAMS; +constexpr int COMPILE_TIME_NUM_QSCHEMES = + static_cast(QScheme::COMPILE_TIME_NUM_QSCHEMES); + +inline std::string toString(QScheme qscheme) { + switch (qscheme) { + case kPerTensorAffine: + return "per_tensor_affine"; + case kPerChannelAffine: + return "per_channel_affine"; + case kPerTensorSymmetric: + return "per_tensor_symmetric"; + case kPerChannelSymmetric: + return "per_channel_symmetric"; + case kPerChannelAffineFloatQParams: + return "per_channel_affine_float_qparams"; + default: + TORCH_CHECK(false, "Unrecognized qscheme: ", static_cast(qscheme)); + } +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/SafePyObject.h b/voice_bridge/torch/include/c10/core/SafePyObject.h new file mode 100644 index 0000000000000000000000000000000000000000..f9ecb9c4de6d172146fe0be6e560e0cd18f3a46e --- /dev/null +++ b/voice_bridge/torch/include/c10/core/SafePyObject.h @@ -0,0 +1,70 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +// This is an safe owning holder for a PyObject, akin to pybind11's +// py::object, with two major differences: +// +// - It is in c10/core; i.e., you can use this type in contexts where +// you do not have a libpython dependency +// +// - It is multi-interpreter safe (ala torchdeploy); when you fetch +// the underlying PyObject* you are required to specify what the current +// interpreter context is and we will check that you match it. +// +// It is INVALID to store a reference to a Tensor object in this way; +// you should just use TensorImpl directly in that case! +struct C10_API SafePyObject { + // Steals a reference to data + SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter) + : data_(data), pyinterpreter_(pyinterpreter) {} + + // In principle this could be copyable if we add an incref to PyInterpreter + // but for now it's easier to just disallow it. + SafePyObject(SafePyObject const&) = delete; + SafePyObject& operator=(SafePyObject const&) = delete; + + ~SafePyObject() { + (*pyinterpreter_)->decref(data_, /*is_tensor*/ false); + } + + c10::impl::PyInterpreter& pyinterpreter() const { + return *pyinterpreter_; + } + PyObject* ptr(const c10::impl::PyInterpreter*) const; + + private: + PyObject* data_; + c10::impl::PyInterpreter* pyinterpreter_; +}; + +// Like SafePyObject, but non-owning. Good for references to global PyObjects +// that will be leaked on interpreter exit. You get a copy constructor/assign +// this way. +struct C10_API SafePyHandle { + SafePyHandle() : data_(nullptr), pyinterpreter_(nullptr) {} + SafePyHandle(PyObject* data, c10::impl::PyInterpreter* pyinterpreter) + : data_(data), pyinterpreter_(pyinterpreter) {} + + c10::impl::PyInterpreter& pyinterpreter() const { + return *pyinterpreter_; + } + PyObject* ptr(const c10::impl::PyInterpreter*) const; + void reset() { + data_ = nullptr; + pyinterpreter_ = nullptr; + } + operator bool() { + return data_; + } + + private: + PyObject* data_; + c10::impl::PyInterpreter* pyinterpreter_; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/Scalar.h b/voice_bridge/torch/include/c10/core/Scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..b9ca897c4a4e22fc9ab2a4f2f36fe2f5ade5e613 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/Scalar.h @@ -0,0 +1,342 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +namespace c10 { + +/** + * Scalar represents a 0-dimensional tensor which contains a single element. + * Unlike a tensor, numeric literals (in C++) are implicitly convertible to + * Scalar (which is why, for example, we provide both add(Tensor) and + * add(Scalar) overloads for many operations). It may also be used in + * circumstances where you statically know a tensor is 0-dim and single size, + * but don't know its type. + */ +class C10_API Scalar { + public: + Scalar() : Scalar(int64_t(0)) {} + + void destroy() { + if (Tag::HAS_si == tag || Tag::HAS_sd == tag) { + raw::intrusive_ptr::decref(v.p); + v.p = nullptr; + } + } + + ~Scalar() { + destroy(); + } + +#define DEFINE_IMPLICIT_CTOR(type, name) \ + Scalar(type vv) : Scalar(vv, true) {} + + AT_FORALL_SCALAR_TYPES_AND3(Half, BFloat16, ComplexHalf, DEFINE_IMPLICIT_CTOR) + AT_FORALL_COMPLEX_TYPES(DEFINE_IMPLICIT_CTOR) + +#undef DEFINE_IMPLICIT_CTOR + + // Value* is both implicitly convertible to SymbolicVariable and bool which + // causes ambiguity error. Specialized constructor for bool resolves this + // problem. + template < + typename T, + typename std::enable_if::value, bool>::type* = + nullptr> + Scalar(T vv) : tag(Tag::HAS_b) { + v.i = convert(vv); + } + +#define DEFINE_ACCESSOR(type, name) \ + type to##name() const { \ + if (Tag::HAS_d == tag) { \ + return checked_convert(v.d, #type); \ + } else if (Tag::HAS_z == tag) { \ + return checked_convert>(v.z, #type); \ + } \ + if (Tag::HAS_b == tag) { \ + return checked_convert(v.i, #type); \ + } else if (Tag::HAS_i == tag) { \ + return checked_convert(v.i, #type); \ + } else if (Tag::HAS_si == tag) { \ + TORCH_CHECK(false, "tried to get " #name " out of SymInt") \ + } else if (Tag::HAS_sd == tag) { \ + TORCH_CHECK(false, "tried to get " #name " out of SymFloat") \ + } \ + TORCH_CHECK(false) \ + } + + // TODO: Support ComplexHalf accessor + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_ACCESSOR) + +#undef DEFINE_ACCESSOR + + SymInt toSymInt() const { + if (Tag::HAS_si == tag) { + return c10::SymInt::toSymInt(intrusive_ptr::reclaim_copy( + static_cast(v.p))); + } else { + return toLong(); + } + } + + SymFloat toSymFloat() const { + if (Tag::HAS_sd == tag) { + return c10::SymFloat::toSymFloat( + intrusive_ptr::reclaim_copy( + static_cast(v.p))); + } else { + return toLong(); + } + } + + // also support scalar.to(); + // Deleted for unsupported types, but specialized below for supported types + template + T to() const = delete; + + // audit uses of data_ptr + const void* data_ptr() const { + TORCH_INTERNAL_ASSERT(!isSymbolic()); + return static_cast(&v); + } + + bool isFloatingPoint() const { + return Tag::HAS_d == tag || Tag::HAS_sd == tag; + } + + C10_DEPRECATED_MESSAGE( + "isIntegral is deprecated. Please use the overload with 'includeBool' parameter instead.") + bool isIntegral() const { + return Tag::HAS_i == tag || Tag::HAS_si == tag; + } + bool isIntegral(bool includeBool) const { + return Tag::HAS_i == tag || Tag::HAS_si == tag || + (includeBool && isBoolean()); + } + + bool isComplex() const { + return Tag::HAS_z == tag; + } + bool isBoolean() const { + return Tag::HAS_b == tag; + } + + // you probably don't actually want these; they're mostly for testing + bool isSymInt() const { + return Tag::HAS_si == tag; + } + bool isSymFloat() const { + return Tag::HAS_sd == tag; + } + + bool isSymbolic() const { + return Tag::HAS_si == tag || Tag::HAS_sd == tag; + } + + C10_ALWAYS_INLINE Scalar& operator=(Scalar&& other) { + if (&other == this) { + return *this; + } + + destroy(); + moveFrom(std::move(other)); + return *this; + } + + C10_ALWAYS_INLINE Scalar& operator=(const Scalar& other) { + if (&other == this) { + return *this; + } + + *this = Scalar(other); + return *this; + } + + Scalar operator-() const; + Scalar conj() const; + Scalar log() const; + + template < + typename T, + typename std::enable_if::value, int>::type = 0> + bool equal(T num) const { + if (isComplex()) { + TORCH_INTERNAL_ASSERT(!isSymbolic()); + auto val = v.z; + return (val.real() == num) && (val.imag() == T()); + } else if (isFloatingPoint()) { + TORCH_CHECK(!isSymbolic(), "NYI SymFloat equality"); + return v.d == num; + } else if (isIntegral(/*includeBool=*/false)) { + TORCH_CHECK(!isSymbolic(), "NYI SymInt equality"); + return v.i == num; + } else if (isBoolean()) { + // boolean scalar does not equal to a non boolean value + TORCH_INTERNAL_ASSERT(!isSymbolic()); + return false; + } else { + TORCH_INTERNAL_ASSERT(false); + } + } + + template < + typename T, + typename std::enable_if::value, int>::type = 0> + bool equal(T num) const { + if (isComplex()) { + TORCH_INTERNAL_ASSERT(!isSymbolic()); + return v.z == num; + } else if (isFloatingPoint()) { + TORCH_CHECK(!isSymbolic(), "NYI SymFloat equality"); + return (v.d == num.real()) && (num.imag() == T()); + } else if (isIntegral(/*includeBool=*/false)) { + TORCH_CHECK(!isSymbolic(), "NYI SymInt equality"); + return (v.i == num.real()) && (num.imag() == T()); + } else if (isBoolean()) { + // boolean scalar does not equal to a non boolean value + TORCH_INTERNAL_ASSERT(!isSymbolic()); + return false; + } else { + TORCH_INTERNAL_ASSERT(false); + } + } + + bool equal(bool num) const { + if (isBoolean()) { + TORCH_INTERNAL_ASSERT(!isSymbolic()); + return static_cast(v.i) == num; + } else { + return false; + } + } + + ScalarType type() const { + if (isComplex()) { + return ScalarType::ComplexDouble; + } else if (isFloatingPoint()) { + return ScalarType::Double; + } else if (isIntegral(/*includeBool=*/false)) { + return ScalarType::Long; + } else if (isBoolean()) { + return ScalarType::Bool; + } else { + throw std::runtime_error("Unknown scalar type."); + } + } + + Scalar(Scalar&& rhs) noexcept : tag(rhs.tag) { + moveFrom(std::move(rhs)); + } + + Scalar(const Scalar& rhs) : tag(rhs.tag), v(rhs.v) { + if (isSymbolic()) { + c10::raw::intrusive_ptr::incref(v.p); + } + } + + Scalar(c10::SymInt si) { + if (si.is_symbolic()) { + tag = Tag::HAS_si; + v.p = std::move(si).release(); + } else { + tag = Tag::HAS_i; + v.i = si.as_int_unchecked(); + } + } + + Scalar(c10::SymFloat sd) { + if (sd.is_symbolic()) { + tag = Tag::HAS_sd; + v.p = std::move(sd).release(); + } else { + tag = Tag::HAS_d; + v.d = sd.as_float_unchecked(); + } + } + + // We can't set v in the initializer list using the + // syntax v{ .member = ... } because it doesn't work on MSVC + private: + enum class Tag { HAS_d, HAS_i, HAS_z, HAS_b, HAS_sd, HAS_si }; + + // NB: assumes that self has already been cleared + C10_ALWAYS_INLINE void moveFrom(Scalar&& rhs) noexcept { + v = rhs.v; + tag = rhs.tag; + if (rhs.tag == Tag::HAS_si || rhs.tag == Tag::HAS_sd) { + // Move out of scalar + rhs.tag = Tag::HAS_i; + rhs.v.i = 0; + } + } + + Tag tag; + + union v_t { + double d; + int64_t i; + c10::complex z; + c10::intrusive_ptr_target* p; + v_t() {} // default constructor + } v; + + template < + typename T, + typename std::enable_if< + std::is_integral::value && !std::is_same::value, + bool>::type* = nullptr> + Scalar(T vv, bool) : tag(Tag::HAS_i) { + v.i = convert(vv); + } + + template < + typename T, + typename std::enable_if< + !std::is_integral::value && !c10::is_complex::value, + bool>::type* = nullptr> + Scalar(T vv, bool) : tag(Tag::HAS_d) { + v.d = convert(vv); + } + + template < + typename T, + typename std::enable_if::value, bool>::type* = nullptr> + Scalar(T vv, bool) : tag(Tag::HAS_z) { + v.z = convert(vv); + } +}; + +using OptionalScalarRef = c10::OptionalRef; + +// define the scalar.to() specializations +#define DEFINE_TO(T, name) \ + template <> \ + inline T Scalar::to() const { \ + return to##name(); \ + } +AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_TO) +#undef DEFINE_TO + +} // namespace c10 + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/voice_bridge/torch/include/c10/core/ScalarType.h b/voice_bridge/torch/include/c10/core/ScalarType.h new file mode 100644 index 0000000000000000000000000000000000000000..51de905def9c1547ca4168906d5579a12717fa7d --- /dev/null +++ b/voice_bridge/torch/include/c10/core/ScalarType.h @@ -0,0 +1,462 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace c10 { + +// For the macros below: +// NB: If you want to macro some code for all non-QInt scalar types (i.e. types +// with complete information, you probably want one of the +// AT_FORALL_SCALAR_TYPES / AT_FORALL_SCALAR_TYPES_AND +// macros below, which are designed to behave similarly to the Dispatch macros +// with the same name. + +// NB: Order matters for this macro; it is relied upon in +// _promoteTypesLookup and the serialization format. +#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(_) \ + _(uint8_t, Byte) /* 0 */ \ + _(int8_t, Char) /* 1 */ \ + _(int16_t, Short) /* 2 */ \ + _(int, Int) /* 3 */ \ + _(int64_t, Long) /* 4 */ \ + _(at::Half, Half) /* 5 */ \ + _(float, Float) /* 6 */ \ + _(double, Double) /* 7 */ \ + _(c10::complex, ComplexHalf) /* 8 */ \ + _(c10::complex, ComplexFloat) /* 9 */ \ + _(c10::complex, ComplexDouble) /* 10 */ \ + _(bool, Bool) /* 11 */ \ + _(c10::qint8, QInt8) /* 12 */ \ + _(c10::quint8, QUInt8) /* 13 */ \ + _(c10::qint32, QInt32) /* 14 */ \ + _(at::BFloat16, BFloat16) /* 15 */ \ + _(c10::quint4x2, QUInt4x2) /* 16 */ \ + _(c10::quint2x4, QUInt2x4) /* 17 */ + +// If you want to support ComplexHalf for real, add ComplexHalf +// into this macro (and change the name). But beware: convert() +// doesn't work for all the conversions you need... +#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF(_) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(at::Half, Half) \ + _(float, Float) \ + _(double, Double) \ + _(c10::complex, ComplexFloat) \ + _(c10::complex, ComplexDouble) \ + _(bool, Bool) \ + _(at::BFloat16, BFloat16) + +#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(_) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(at::Half, Half) \ + _(float, Float) \ + _(double, Double) \ + _(c10::complex, ComplexHalf) \ + _(c10::complex, ComplexFloat) \ + _(c10::complex, ComplexDouble) \ + _(bool, Bool) \ + _(at::BFloat16, BFloat16) + +enum class ScalarType : int8_t { +#define DEFINE_ENUM(_1, n) n, + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_ENUM) +#undef DEFINE_ENUM + Undefined, + NumOptions +}; + +constexpr uint16_t NumScalarTypes = + static_cast(ScalarType::NumOptions); + +namespace impl { + +// These are used to map ScalarTypes to C++ types. + +template +struct ScalarTypeToCPPType; + +#define SPECIALIZE_ScalarTypeToCPPType(cpp_type, scalar_type) \ + template <> \ + struct ScalarTypeToCPPType { \ + using type = cpp_type; \ + \ + /* This is a workaround for the CUDA bug which prevents */ \ + /* ::detail::ScalarTypeToCType::type being used directly due to */ \ + /* ambiguous reference which can't to be resolved. For some reason it */ \ + /* can't pick between at::detail and at::cuda::detail. */ \ + /* For repro example, please see: */ \ + /* https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba */ \ + /* TODO: remove once the bug is fixed. */ \ + static type t; \ + }; + +AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_ScalarTypeToCPPType) + +#undef SPECIALIZE_ScalarTypeToCPPType + +template +using ScalarTypeToCPPTypeT = typename ScalarTypeToCPPType::type; + +} // namespace impl + +template +struct CppTypeToScalarType; + +#define SPECIALIZE_CppTypeToScalarType(cpp_type, scalar_type) \ + template <> \ + struct CppTypeToScalarType \ + : std:: \ + integral_constant { \ + }; + +AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_CppTypeToScalarType) + +#undef SPECIALIZE_CppTypeToScalarType + +#define AT_FORALL_INT_TYPES(_) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) + +#define AT_FORALL_SCALAR_TYPES(_) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) + +#define AT_FORALL_SCALAR_TYPES_AND(SCALARTYPE, _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE>::t), \ + SCALARTYPE) + +#define AT_FORALL_SCALAR_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE1>::t), \ + SCALARTYPE1) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE2>::t), \ + SCALARTYPE2) + +#define AT_FORALL_SCALAR_TYPES_AND3(SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE1>::t), \ + SCALARTYPE1) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE2>::t), \ + SCALARTYPE2) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE3>::t), \ + SCALARTYPE3) + +#define AT_FORALL_QINT_TYPES(_) \ + _(c10::qint8, QInt8) \ + _(c10::quint8, QUInt8) \ + _(c10::qint32, QInt32) \ + _(c10::quint4x2, QUInt4x2) \ + _(c10::quint2x4, QUInt2x4) + +#define AT_FORALL_COMPLEX_TYPES(_) \ + _(c10::complex, ComplexFloat) \ + _(c10::complex, ComplexDouble) + +#define DEFINE_CONSTANT(_, name) \ + constexpr ScalarType k##name = ScalarType::name; + +AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CONSTANT) +#undef DEFINE_CONSTANT + +static inline const char* toString(ScalarType t) { +#define DEFINE_CASE(_, name) \ + case ScalarType::name: \ + return #name; + + switch (t) { + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CASE) + default: + return "UNKNOWN_SCALAR"; + } +#undef DEFINE_CASE +} + +static inline size_t elementSize(ScalarType t) { +#define CASE_ELEMENTSIZE_CASE(ctype, name) \ + case ScalarType::name: \ + return sizeof(ctype); + + switch (t) { + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(CASE_ELEMENTSIZE_CASE) + default: + TORCH_CHECK(false, "Unknown ScalarType"); + } +#undef CASE_ELEMENTSIZE_CASE +} + +C10_DEPRECATED_MESSAGE( + "isIntegralType is deprecated. Please use the overload with 'includeBool' parameter instead.") +static inline bool isIntegralType(ScalarType t) { + return ( + t == ScalarType::Byte || t == ScalarType::Char || t == ScalarType::Int || + t == ScalarType::Long || t == ScalarType::Short); +} + +static inline bool isIntegralType(ScalarType t, bool includeBool) { + bool isIntegral = + (t == ScalarType::Byte || t == ScalarType::Char || t == ScalarType::Int || + t == ScalarType::Long || t == ScalarType::Short); + + return includeBool ? isIntegral || (t == ScalarType::Bool) : isIntegral; +} + +static inline bool isFloatingType(ScalarType t) { + return ( + t == ScalarType::Double || t == ScalarType::Float || + t == ScalarType::Half || t == ScalarType::BFloat16); +} + +static inline bool isComplexType(ScalarType t) { + return ( + t == ScalarType::ComplexHalf || t == ScalarType::ComplexFloat || + t == ScalarType::ComplexDouble); +} + +static inline bool isQIntType(ScalarType t) { + // Don't forget to extend this when adding new QInt types + return t == ScalarType::QInt8 || t == ScalarType::QUInt8 || + t == ScalarType::QInt32 || t == ScalarType::QUInt4x2 || + t == ScalarType::QUInt2x4; +} + +static inline ScalarType toQIntType(ScalarType t) { + switch (t) { + case ScalarType::Byte: + return ScalarType::QUInt8; + case ScalarType::Char: + return ScalarType::QInt8; + case ScalarType::Int: + return ScalarType::QInt32; + default: + return t; + } +} + +static inline ScalarType toUnderlying(ScalarType t) { + switch (t) { + case ScalarType::QUInt8: + return ScalarType::Byte; + case ScalarType::QInt8: + return ScalarType::Char; + case ScalarType::QInt32: + return ScalarType::Int; + case ScalarType::QUInt4x2: + return ScalarType::Byte; + case ScalarType::QUInt2x4: + return ScalarType::Byte; + default: + return t; + } +} + +static inline bool isSignedType(ScalarType t) { + TORCH_CHECK(!isQIntType(t), "isSignedType not supported for quantized types"); +#define CASE_SIGNED(ctype, name) \ + case ScalarType::name: \ + return std::numeric_limits::is_signed; + + switch (t) { + case ScalarType::ComplexHalf: + case ScalarType::ComplexFloat: + case ScalarType::ComplexDouble: + return true; + AT_FORALL_SCALAR_TYPES_AND3(Half, Bool, BFloat16, CASE_SIGNED) + default: + TORCH_CHECK(false, "Unknown ScalarType"); + } +#undef CASE_SIGNED +} + +static inline bool isUnderlying(ScalarType type, ScalarType qtype) { + return type == toUnderlying(qtype); +} + +static inline ScalarType toRealValueType(ScalarType t) { + switch (t) { + case ScalarType::ComplexHalf: + return ScalarType::Half; + case ScalarType::ComplexFloat: + return ScalarType::Float; + case ScalarType::ComplexDouble: + return ScalarType::Double; + default: + return t; + } +} + +static inline ScalarType toComplexType(ScalarType t) { + switch (t) { + case ScalarType::BFloat16: + // BFloat16 has range equivalent to Float, + // so we map it to ComplexFloat. + return ScalarType::ComplexFloat; + case ScalarType::Half: + return ScalarType::ComplexHalf; + case ScalarType::Float: + return ScalarType::ComplexFloat; + case ScalarType::Double: + return ScalarType::ComplexDouble; + case ScalarType::ComplexHalf: + return ScalarType::ComplexHalf; + case ScalarType::ComplexFloat: + return ScalarType::ComplexFloat; + case ScalarType::ComplexDouble: + return ScalarType::ComplexDouble; + default: + TORCH_CHECK(false, "Unknown Complex ScalarType for ", t); + } +} + +// see tensor_attributes.rst for detailed explanation and examples +// of casting rules. +static inline bool canCast(const ScalarType from, const ScalarType to) { + // We disallow complex -> non complex, e.g., float_tensor *= complex is + // disallowed. + if (isComplexType(from) && !isComplexType(to)) { + return false; + } + // We disallow float -> integral, e.g., int_tensor *= float is disallowed. + if (isFloatingType(from) && isIntegralType(to, false)) { + return false; + } + + // Treat bool as a distinct "category," to be consistent with type promotion + // rules (e.g. `bool_tensor + 5 -> int64_tensor`). If `5` was in the same + // category as `bool_tensor`, we would not promote. Differing categories + // implies `bool_tensor += 5` is disallowed. + // + // NB: numpy distinguishes "unsigned" as a category to get the desired + // `bool_tensor + 5 -> int64_tensor` behavior. We don't, because: + // * We don't want the performance hit of checking the runtime sign of + // Scalars. + // * `uint8_tensor + 5 -> int64_tensor` would be undesirable. + if (from != ScalarType::Bool && to == ScalarType::Bool) { + return false; + } + return true; +} + +static inline ScalarType promoteTypes(ScalarType a, ScalarType b) { + // This is generated according to NumPy's promote_types + constexpr auto u1 = ScalarType::Byte; + constexpr auto i1 = ScalarType::Char; + constexpr auto i2 = ScalarType::Short; + constexpr auto i4 = ScalarType::Int; + constexpr auto i8 = ScalarType::Long; + constexpr auto f2 = ScalarType::Half; + constexpr auto f4 = ScalarType::Float; + constexpr auto f8 = ScalarType::Double; + constexpr auto c2 = ScalarType::ComplexHalf; + constexpr auto c4 = ScalarType::ComplexFloat; + constexpr auto c8 = ScalarType::ComplexDouble; + constexpr auto b1 = ScalarType::Bool; + constexpr auto bf = ScalarType::BFloat16; + constexpr auto ud = ScalarType::Undefined; + if (a == ud || b == ud) { + return ScalarType::Undefined; + } + + // For QInt types, we only allow exact match + if (isQIntType(a) && a == b) { + return a; + } + + if (isQIntType(a) || isQIntType(b)) { + TORCH_CHECK( + false, + "promoteTypes with quantized numbers is not handled yet; figure out what the correct rules should be, offending types: ", + toString(a), + " ", + toString(b)); + } + + // this matrix has to be consistent with + // AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS undefined is used where we + // are not sure about the correct value for type promotion. + static constexpr ScalarType _promoteTypesLookup[static_cast( + ScalarType::NumOptions)][static_cast(ScalarType::NumOptions)] = { + /* u1 i1 i2 i4 i8 f2 f4 f8 c2 c4 c8 b1 q1 q2 q3 bf*/ + /* u1 */ {u1, i2, i2, i4, i8, f2, f4, f8, c2, c4, c8, u1, ud, ud, ud, bf}, + /* i1 */ {i2, i1, i2, i4, i8, f2, f4, f8, c2, c4, c8, i1, ud, ud, ud, bf}, + /* i2 */ {i2, i2, i2, i4, i8, f2, f4, f8, c2, c4, c8, i2, ud, ud, ud, bf}, + /* i4 */ {i4, i4, i4, i4, i8, f2, f4, f8, c2, c4, c8, i4, ud, ud, ud, bf}, + /* i8 */ {i8, i8, i8, i8, i8, f2, f4, f8, c2, c4, c8, i8, ud, ud, ud, bf}, + /* f2 */ {f2, f2, f2, f2, f2, f2, f4, f8, c2, c4, c8, f2, ud, ud, ud, f4}, + /* f4 */ {f4, f4, f4, f4, f4, f4, f4, f8, c4, c4, c8, f4, ud, ud, ud, f4}, + /* f8 */ {f8, f8, f8, f8, f8, f8, f8, f8, c8, c8, c8, f8, ud, ud, ud, f8}, + /* c2 */ {c2, c2, c2, c2, c2, c2, c4, c8, c2, c4, c8, c2, ud, ud, ud, c4}, + /* c4 */ {c4, c4, c4, c4, c4, c4, c4, c8, c4, c4, c8, c4, ud, ud, ud, c4}, + /* c8 */ {c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, ud, ud, ud, c8}, + /* b1 */ {u1, i1, i2, i4, i8, f2, f4, f8, c2, c4, c8, b1, ud, ud, ud, bf}, + /* q1 */ {ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud}, + /* q2 */ {ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud}, + /* q3 */ {ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud}, + /* bf */ {bf, bf, bf, bf, bf, f4, f4, f8, c4, c4, c8, bf, ud, ud, ud, bf}, + }; + return _promoteTypesLookup[static_cast(a)][static_cast(b)]; +} + +inline std::ostream& operator<<( + std::ostream& stream, + at::ScalarType scalar_type) { + return stream << toString(scalar_type); +} + +#define AT_FORAUTOCAST_SCALAR_TYPES(_) \ + _(half, Half) /* 0 */ \ + _(bfloat16, BFloat16) /* 1 */ + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/ScalarTypeToTypeMeta.h b/voice_bridge/torch/include/c10/core/ScalarTypeToTypeMeta.h new file mode 100644 index 0000000000000000000000000000000000000000..910e0d24b0a3d8199fb74ebcbeaf2b111a03305b --- /dev/null +++ b/voice_bridge/torch/include/c10/core/ScalarTypeToTypeMeta.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include +#include + +// these just expose TypeMeta/ScalarType bridge functions in c10 +// TODO move to typeid.h (or codemod away) when TypeMeta et al +// are moved from caffe2 to c10 (see note at top of typeid.h) + +namespace c10 { + +/** + * convert ScalarType enum values to TypeMeta handles + */ +static inline caffe2::TypeMeta scalarTypeToTypeMeta(ScalarType scalar_type) { + return caffe2::TypeMeta::fromScalarType(scalar_type); +} + +/** + * convert TypeMeta handles to ScalarType enum values + */ +static inline ScalarType typeMetaToScalarType(caffe2::TypeMeta dtype) { + return dtype.toScalarType(); +} + +/** + * typeMetaToScalarType(), lifted to optional + */ +static inline optional optTypeMetaToScalarType( + optional type_meta) { + if (!type_meta.has_value()) { + return c10::nullopt; + } + return type_meta->toScalarType(); +} + +/** + * convenience: equality across TypeMeta/ScalarType conversion + */ +static inline bool operator==(ScalarType t, caffe2::TypeMeta m) { + return m.isScalarType(t); +} + +static inline bool operator==(caffe2::TypeMeta m, ScalarType t) { + return t == m; +} + +static inline bool operator!=(ScalarType t, caffe2::TypeMeta m) { + return !(t == m); +} + +static inline bool operator!=(caffe2::TypeMeta m, ScalarType t) { + return !(t == m); +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/Storage.h b/voice_bridge/torch/include/c10/core/Storage.h new file mode 100644 index 0000000000000000000000000000000000000000..a89a0039fdfe631b83584b242cbd64156cbf8ad7 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/Storage.h @@ -0,0 +1,184 @@ +#pragma once + +#include + +namespace c10 { + +struct C10_API Storage { + public: + struct use_byte_size_t {}; + + Storage() {} + Storage(c10::intrusive_ptr ptr) + : storage_impl_(std::move(ptr)) {} + + // Allocates memory buffer using given allocator and creates a storage with it + Storage( + use_byte_size_t /*use_byte_size*/, + SymInt size_bytes, + Allocator* allocator = nullptr, + bool resizable = false) + : storage_impl_(c10::make_intrusive( + StorageImpl::use_byte_size_t(), + size_bytes, + allocator, + resizable)) {} + + // Creates storage with pre-allocated memory buffer. Allocator is given for + // potential future reallocations, however it can be nullptr if the storage + // is non-resizable + Storage( + use_byte_size_t /*use_byte_size*/, + size_t size_bytes, + at::DataPtr data_ptr, + at::Allocator* allocator = nullptr, + bool resizable = false) + : storage_impl_(c10::make_intrusive( + StorageImpl::use_byte_size_t(), + size_bytes, + std::move(data_ptr), + allocator, + resizable)) {} + + // Legacy constructor for partially initialized (dtype or memory) storages + // that can be temporarily created with Caffe2 APIs. See the note on top of + // TensorImpl.h for details. + static Storage create_legacy(at::Device device) { + auto allocator = GetAllocator(device.type()); + return Storage(c10::make_intrusive( + StorageImpl::use_byte_size_t(), + 0, + allocator->allocate(0), // materialize a non-default Device. + allocator, + true)); + } + + // Mimic create_legacy, but without requiring a newly-created StorageImpl. + void reset_legacy() { + TORCH_CHECK(resizable() && allocator()); + set_nbytes(0); + set_data_ptr_noswap(allocator()->allocate(0)); + } + + template + T* data() const { + return storage_impl_->data(); + } + + template + T* unsafe_data() const { + return storage_impl_->unsafe_data(); + } + + // TODO: remove later + void set_nbytes(size_t size_bytes) const { + storage_impl_.get()->set_nbytes(size_bytes); + } + + void set_nbytes(c10::SymInt size_bytes) const { + storage_impl_.get()->set_nbytes(size_bytes); + } + + bool resizable() const { + return storage_impl_->resizable(); + } + + size_t nbytes() const { + return storage_impl_->nbytes(); + } + + SymInt sym_nbytes() const { + return storage_impl_->sym_nbytes(); + } + // get() use here is to get const-correctness + + void* data() const { + return storage_impl_.get()->data(); + } + + at::DataPtr& data_ptr() { + return storage_impl_->data_ptr(); + } + + const at::DataPtr& data_ptr() const { + return storage_impl_->data_ptr(); + } + + // Returns the previous data_ptr + at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) const { + return storage_impl_.get()->set_data_ptr(std::move(data_ptr)); + } + + void set_data_ptr_noswap(at::DataPtr&& data_ptr) const { + return storage_impl_.get()->set_data_ptr_noswap(std::move(data_ptr)); + } + + DeviceType device_type() const { + return storage_impl_->device_type(); + } + + at::Allocator* allocator() const { + return storage_impl_.get()->allocator(); + } + + at::Device device() const { + return storage_impl_->device(); + } + + StorageImpl* unsafeReleaseStorageImpl() { + return storage_impl_.release(); + } + + StorageImpl* unsafeGetStorageImpl() const noexcept { + return storage_impl_.get(); + } + + c10::weak_intrusive_ptr getWeakStorageImpl() const { + return c10::weak_intrusive_ptr(storage_impl_); + } + + operator bool() const { + return storage_impl_; + } + + size_t use_count() const { + return storage_impl_.use_count(); + } + + inline bool unique() const { + return storage_impl_.unique(); + } + + bool is_alias_of(const Storage& other) const { + return storage_impl_ == other.storage_impl_; + } + + void UniqueStorageShareExternalPointer( + void* src, + size_t capacity, + DeleterFnPtr d = nullptr) { + if (!storage_impl_.unique()) { + TORCH_CHECK( + false, + "UniqueStorageShareExternalPointer can only be called when use_count == 1"); + } + storage_impl_->UniqueStorageShareExternalPointer(src, capacity, d); + } + + void UniqueStorageShareExternalPointer( + at::DataPtr&& data_ptr, + size_t capacity) { + if (!storage_impl_.unique()) { + TORCH_CHECK( + false, + "UniqueStorageShareExternalPointer can only be called when use_count == 1"); + } + storage_impl_->UniqueStorageShareExternalPointer( + std::move(data_ptr), capacity); + } + + protected: + c10::intrusive_ptr storage_impl_; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/StorageImpl.h b/voice_bridge/torch/include/c10/core/StorageImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..bbf0803842537f2ab080657bbe13e09c4604ce7b --- /dev/null +++ b/voice_bridge/torch/include/c10/core/StorageImpl.h @@ -0,0 +1,226 @@ +#pragma once + +#include +#include +#include + +#include + +namespace c10 { + +// A storage represents the underlying backing data buffer for a +// tensor. This concept was inherited from the original Torch7 +// codebase; we'd kind of like to get rid of the concept +// (see https://github.com/pytorch/pytorch/issues/14797) but +// it's hard work and no one has gotten around to doing it. +// +// NB: storage is supposed to uniquely own a data pointer; e.g., +// two non-null data pointers alias if and only if they are from +// the same storage. Technically you can violate this invariant +// (e.g., you can create a non-owning StorageImpl with at::from_blob) +// but a lot of things won't work correctly, including: +// +// - An ordinary deleter on such a storage is wrong, because normal deleters +// assume unique ownership, but if you have two storages at the same data, +// that implies there is some sort of shared ownership. So your deleter would +// have to actually be internally doing some sort of refcount thing +// - Deepcopy in Python side relies on storage equality and not data pointer +// equality; so if there are two separate storages pointing to the same data, +// the data will actually get duplicated in that case (one data ptr before, +// two data ptrs after) +// - Version counts won't work correctly, because we do all VC tracking at the +// level of storages (unless you explicitly disconnect the VC with detach); +// mutation because data pointers are the same are totally untracked +struct C10_API StorageImpl : public c10::intrusive_ptr_target { + public: + struct use_byte_size_t {}; + + StorageImpl( + use_byte_size_t /*use_byte_size*/, + SymInt size_bytes, + at::DataPtr data_ptr, + at::Allocator* allocator, + bool resizable) + : data_ptr_(std::move(data_ptr)), + size_bytes_(std::move(size_bytes)), + size_bytes_is_symbolic_(size_bytes_.is_symbolic()), + resizable_(resizable), + received_cuda_(false), + allocator_(allocator) { + if (resizable) { + TORCH_INTERNAL_ASSERT( + allocator_, "For resizable storage, allocator must be provided"); + } + } + + StorageImpl( + use_byte_size_t /*use_byte_size*/, + SymInt size_bytes, + at::Allocator* allocator, + bool resizable) + : StorageImpl( + use_byte_size_t(), + size_bytes, + size_bytes.is_symbolic() + ? allocator->allocate(0) + : allocator->allocate(size_bytes.as_int_unchecked()), + allocator, + resizable) {} + + StorageImpl& operator=(StorageImpl&& other) = default; + StorageImpl& operator=(const StorageImpl&) = delete; + StorageImpl() = delete; + StorageImpl(StorageImpl&& other) = default; + StorageImpl(const StorageImpl&) = delete; + ~StorageImpl() override = default; + + void reset() { + data_ptr_.clear(); + size_bytes_ = 0; + size_bytes_is_symbolic_ = false; + } + + template + inline T* data() const { + return unsafe_data(); + } + + template + inline T* unsafe_data() const { + return static_cast(this->data_ptr_.get()); + } + + // Destructor doesn't call release_resources because it's + // unnecessary; don't forget to change that if needed! + void release_resources() override { + data_ptr_.clear(); + } + + size_t nbytes() const { + TORCH_CHECK(!size_bytes_is_symbolic_); + return size_bytes_.as_int_unchecked(); + } + + SymInt sym_nbytes() const { + return size_bytes_; + } + + // TODO: remove later + void set_nbytes(size_t size_bytes) { + size_bytes_ = size_bytes; + size_bytes_is_symbolic_ = false; + } + + void set_nbytes(c10::SymInt size_bytes) { + size_bytes_ = size_bytes; + } + + bool resizable() const { + return resizable_; + }; + + at::DataPtr& data_ptr() { + return data_ptr_; + }; + + const at::DataPtr& data_ptr() const { + return data_ptr_; + }; + + // Returns the previous data_ptr + at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) { + at::DataPtr old_data_ptr(std::move(data_ptr_)); + data_ptr_ = std::move(data_ptr); + return old_data_ptr; + }; + + void set_data_ptr_noswap(at::DataPtr&& data_ptr) { + data_ptr_ = std::move(data_ptr); + } + + // TODO: Return const ptr eventually if possible + void* data() { + return data_ptr_.get(); + } + + void* data() const { + return data_ptr_.get(); + } + + at::DeviceType device_type() const { + return data_ptr_.device().type(); + } + + at::Allocator* allocator() { + return allocator_; + } + + const at::Allocator* allocator() const { + return allocator_; + }; + + // You generally shouldn't use this method, but it is occasionally + // useful if you want to override how a tensor will be reallocated, + // after it was already allocated (and its initial allocator was + // set) + void set_allocator(at::Allocator* allocator) { + allocator_ = allocator; + } + + Device device() const { + return data_ptr_.device(); + } + + void set_resizable(bool resizable) { + if (resizable) { + // We need an allocator to be resizable + AT_ASSERT(allocator_); + } + resizable_ = resizable; + } + + /** + * Can only be called when use_count is 1 + */ + void UniqueStorageShareExternalPointer( + void* src, + size_t size_bytes, + DeleterFnPtr d = nullptr) { + UniqueStorageShareExternalPointer( + at::DataPtr(src, src, d, data_ptr_.device()), size_bytes); + } + + /** + * Can only be called when use_count is 1 + */ + void UniqueStorageShareExternalPointer( + at::DataPtr&& data_ptr, + size_t size_bytes) { + data_ptr_ = std::move(data_ptr); + size_bytes_ = size_bytes; + size_bytes_is_symbolic_ = false; + allocator_ = nullptr; + resizable_ = false; + } + + // This method can be used only after storage construction and cannot be used + // to modify storage status + void set_received_cuda(bool received_cuda) { + received_cuda_ = received_cuda; + } + + bool received_cuda() { + return received_cuda_; + } + + private: + DataPtr data_ptr_; + SymInt size_bytes_; + bool size_bytes_is_symbolic_; + bool resizable_; + // Identifies that Storage was received from another process and doesn't have + // local to process cuda memory allocation + bool received_cuda_; + Allocator* allocator_; +}; +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/Stream.h b/voice_bridge/torch/include/c10/core/Stream.h new file mode 100644 index 0000000000000000000000000000000000000000..d0abcb1e212691479dca183665d66c3df86b3554 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/Stream.h @@ -0,0 +1,185 @@ +#pragma once + +#include + +namespace c10 { + +/// An index representing a specific stream. A StreamId is not independently +/// meaningful without knowing the Device it is associated with; try to +/// use Stream rather than StreamId directly. +/// +/// StreamIds are opaque; they are assigned by some DeviceType-specific +/// numbering system which is not visible to the user. HOWEVER, we +/// guarantee that StreamId 0 is always a valid stream, and corresponds +/// to some sort of "default" stream. +using StreamId = int64_t; + +// NB: I decided not to call the above StreamIndex to avoid confusion with +// DeviceIndex. This way, you access device index with index(), and stream id +// with id() + +/** + * A stream is a software mechanism used to synchronize launched kernels + * without requiring explicit synchronizations between kernels. The basic + * model is that every kernel launch is associated with a stream: every + * kernel on the same stream is implicitly synchronized so that if I launch + * kernels A and B on the same stream, A is guaranteed to finish before B + * launches. If I want B to run concurrently with A, I must schedule + * it on a different stream. + * + * The Stream class is a backend agnostic value class representing a stream + * which I may schedule a kernel on. Every stream is associated with a device, + * which is recorded in stream, which is used to avoid confusion about which + * device a stream refers to. + * + * Streams are explicitly thread-safe, in the sense that it is OK to pass + * a Stream from one thread to another, and kernels queued from two different + * threads will still get serialized appropriately. (Of course, the + * time when the kernels get queued is undetermined unless you synchronize + * host side ;) + * + * Stream does NOT have a default constructor. Streams are for expert + * users; if you want to use Streams, we're going to assume you know + * how to deal with C++ template error messages if you try to + * resize() a vector of Streams. + * + * Known instances of streams in backends: + * + * - cudaStream_t (CUDA) + * - hipStream_t (HIP) + * - cl_command_queue (OpenCL) (NB: Caffe2's existing OpenCL integration + * does NOT support command queues.) + * + * Because this class is device agnostic, it cannot provide backend-specific + * functionality (e.g., get the cudaStream_t of a CUDA stream.) There are + * wrapper classes which provide this functionality, e.g., CUDAStream. + */ +class C10_API Stream final { + private: + Device device_; + StreamId id_; + + public: + enum Unsafe { UNSAFE }; + enum Default { DEFAULT }; + + /// Unsafely construct a stream from a Device and a StreamId. In + /// general, only specific implementations of streams for a + /// backend should manufacture Stream directly in this way; other users + /// should use the provided APIs to get a stream. In particular, + /// we don't require backends to give any guarantees about non-zero + /// StreamIds; they are welcome to allocate in whatever way they like. + explicit Stream(Unsafe, Device device, StreamId id) + : device_(device), id_(id) {} + + /// Construct the default stream of a Device. The default stream is + /// NOT the same as the current stream; default stream is a fixed stream + /// that never changes, whereas the current stream may be changed by + /// StreamGuard. + explicit Stream(Default, Device device) : device_(device), id_(0) {} + + bool operator==(const Stream& other) const noexcept { + return this->device_ == other.device_ && this->id_ == other.id_; + } + bool operator!=(const Stream& other) const noexcept { + return !(*this == other); + } + + Device device() const noexcept { + return device_; + } + DeviceType device_type() const noexcept { + return device_.type(); + } + DeviceIndex device_index() const noexcept { + return device_.index(); + } + StreamId id() const noexcept { + return id_; + } + + // Enqueues a wait instruction in the stream's work queue. + // This instruction is a no-op unless the event is marked + // for recording. In that case the stream stops processing + // until the event is recorded. + template + void wait(const T& event) const { + event.block(*this); + } + + // Return whether all asynchronous work previously enqueued on this stream + // has completed running on the device. + bool query() const; + + // Wait (by blocking the calling thread) until all asynchronous work enqueued + // on this stream has completed running on the device. + void synchronize() const; + + // The purpose of this function is to more conveniently permit binding + // of Stream to and from Python. Without packing, I have to setup a whole + // class with two fields (device and stream id); with packing I can just + // store a single uint64_t. + // + // The particular way we pack streams into a uint64_t is considered an + // implementation detail and should not be relied upon. + uint64_t pack() const noexcept { + // Are you here because this static assert failed? Make sure you ensure + // that the bitmasking code below is updated accordingly! + static_assert(sizeof(DeviceType) == 1, "DeviceType is not 8-bit"); + static_assert(sizeof(DeviceIndex) == 1, "DeviceIndex is not 8-bit"); + static_assert(sizeof(StreamId) == 8, "StreamId is not 64-bit"); + // Concat these together into a 64-bit integer + // See Note [Hazard when concatenating signed integers] + uint64_t bits = static_cast(static_cast(device_type())) + << 56 | + static_cast(static_cast(device_index())) << 48 | + // Remove the sign extension part of the 64-bit address because + // the id might be used to hold a pointer. + (static_cast(id()) & ((1ull << 48) - 1)); + TORCH_INTERNAL_ASSERT( + static_cast((bits >> 48) & 0xFFull) == device_index(), + "DeviceIndex is not correctly packed"); + TORCH_INTERNAL_ASSERT( + static_cast((bits >> 56)) == device_type(), + "DeviceType is not correctly packed"); + // Re-extend the sign of stream_id for checking + uint64_t mask = (1ull << 47); + TORCH_INTERNAL_ASSERT( + static_cast(((bits & 0xFFFFFFFFFFFFull) ^ mask) - mask) == + id(), + "DeviceType is not correctly packed"); + return bits; + } + + static Stream unpack(uint64_t bits) { + // Re-extend the sign of stream_id + uint64_t mask = (1ull << 47); + const auto stream_id = + (static_cast(bits & 0xFFFFFFFFFFFFull) ^ mask) - mask; + bits >>= 48; + const auto device_index = static_cast(bits & 0xFFull); + bits >>= 8; + const auto device_type = static_cast(bits); + TORCH_CHECK(isValidDeviceType(device_type)); + // Unfortunately, we can't check if the StreamId is valid here; it + // will be checked upon first use. + return Stream(UNSAFE, Device(device_type, device_index), stream_id); + } + + // I decided NOT to provide setters on this class, because really, + // why would you change the device of a stream? Just construct + // it correctly from the beginning dude. +}; + +C10_API std::ostream& operator<<(std::ostream& stream, const Stream& s); + +} // namespace c10 + +namespace std { +template <> +struct hash { + size_t operator()(c10::Stream s) const noexcept { + return std::hash{}(s.pack()); + } +}; +} // namespace std diff --git a/voice_bridge/torch/include/c10/core/StreamGuard.h b/voice_bridge/torch/include/c10/core/StreamGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..8a4116f80f0aecbdf3353121ce284d77464ab9c9 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/StreamGuard.h @@ -0,0 +1,165 @@ +#pragma once + +#include + +namespace c10 { + +/** + * A StreamGuard is an RAII class that changes the current device + * to the device corresponding to some stream, and changes the + * default stream on that device to be this stream. + * + * Use of StreamGuard is HIGHLY discouraged in operator definitions. In + * a single operator, you probably don't know enough about the global + * state of the world to profitably decide how to set streams. Let + * the caller handle this appropriately, and just use the current stream + * in your operator code. + * + * This StreamGuard does NOT have an uninitialized state; it is guaranteed + * to reset the stream and device on exit. If you are in a situation + * where you *might* want to setup a stream guard, see OptionalStreamGuard. + */ +struct StreamGuard { + /// No default constructor, see Note [Omitted default constructor from RAII] + explicit StreamGuard() = delete; + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + explicit StreamGuard(Stream stream) : guard_(stream) {} + + /// Copy is disallowed + StreamGuard(const StreamGuard&) = delete; + StreamGuard& operator=(const StreamGuard&) = delete; + + /// Move is disallowed, as StreamGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + StreamGuard(StreamGuard&& other) = delete; + StreamGuard& operator=(StreamGuard&& other) = delete; + + /// Resets the currently set stream to the original stream and + /// the currently set device to the original device. Then, + /// set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + /// + /// NOTE: this implementation may skip some stream/device setting if + /// it can prove that it is unnecessary. + /// + /// WARNING: reset_stream does NOT preserve previously set streams on + /// different devices. If you need to set streams on multiple devices + /// on , use MultiStreamGuard instead. + void reset_stream(Stream stream) { + guard_.reset_stream(stream); + } + + /// Returns the stream that was set at the time the guard was constructed. + Stream original_stream() const { + return guard_.original_stream(); + } + + /// Returns the most recent stream that was set using this device guard, + /// either from construction, or via set_stream. + Stream current_stream() const { + return guard_.current_stream(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device/reset_device/set_index. + Device current_device() const { + return guard_.current_device(); + } + + /// Returns the device that was set at the most recent reset_stream(), + /// or otherwise the device at construction time. + Device original_device() const { + return guard_.original_device(); + } + + private: + c10::impl::InlineStreamGuard guard_; +}; + +/** + * An OptionalStreamGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * See OptionalDeviceGuard for more guidance on how to use this class. + */ +struct OptionalStreamGuard { + /// Create an uninitialized guard. + explicit OptionalStreamGuard() : guard_() {} + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + explicit OptionalStreamGuard(Stream stream) : guard_(stream) {} + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream, + /// if the passed stream is not nullopt. + explicit OptionalStreamGuard(optional stream_opt) + : guard_(stream_opt) {} + + /// Copy is disallowed + OptionalStreamGuard(const OptionalStreamGuard&) = delete; + OptionalStreamGuard& operator=(const OptionalStreamGuard&) = delete; + + // See Note [Move construction for RAII guards is tricky] + OptionalStreamGuard(OptionalStreamGuard&& other) = delete; + + // See Note [Move assignment for RAII guards is tricky] + OptionalStreamGuard& operator=(OptionalStreamGuard&& other) = delete; + + /// Resets the currently set stream to the original stream and + /// the currently set device to the original device. Then, + /// set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + /// Initializes the guard if it was not previously initialized. + void reset_stream(Stream stream) { + guard_.reset_stream(stream); + } + + /// Returns the stream that was set at the time the guard was most recently + /// initialized, or nullopt if the guard is uninitialized. + optional original_stream() const { + return guard_.original_stream(); + } + + /// Returns the most recent stream that was set using this stream guard, + /// either from construction, or via reset_stream, if the guard is + /// initialized, or nullopt if the guard is uninitialized. + optional current_stream() const { + return guard_.current_stream(); + } + + /// Restore the original device and stream, resetting this guard to + /// uninitialized state. + void reset() { + guard_.reset(); + } + + private: + c10::impl::InlineOptionalStreamGuard guard_; +}; + +/** + * A MultiStreamGuard is an RAII class that sets the current streams of a set of + * devices all at once, and resets them to their original values on destruction. + */ +struct MultiStreamGuard { + /// Set the current streams to the passed streams on each of their respective + /// devices. + explicit MultiStreamGuard(ArrayRef streams) : guard_(streams) {} + + /// Copy is disallowed + MultiStreamGuard(const MultiStreamGuard&) = delete; + MultiStreamGuard& operator=(const MultiStreamGuard&) = delete; + + // See Note [Move construction for RAII guards is tricky] + MultiStreamGuard(MultiStreamGuard&& other) = delete; + + // See Note [Move assignment for RAII guards is tricky] + MultiStreamGuard& operator=(MultiStreamGuard&& other) = delete; + + private: + c10::impl::InlineMultiStreamGuard guard_; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/SymFloat.h b/voice_bridge/torch/include/c10/core/SymFloat.h new file mode 100644 index 0000000000000000000000000000000000000000..92abb81ea2a220260cfb23a8637d6ccbba2b8a14 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/SymFloat.h @@ -0,0 +1,60 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace c10 { + +// NB: this is actually double precision; we're using the Python naming here +class C10_API SymFloat { + public: + /*implicit*/ SymFloat(double d) : data_(d){}; + SymFloat(SymFloatNode ptr) + : data_(std::numeric_limits::quiet_NaN()), ptr_(std::move(ptr)){}; + SymFloat() : data_(0.0) {} + + SymFloatNodeImpl* toSymFloatNodeImplUnowned() const { + return ptr_.get(); + } + + SymFloatNodeImpl* release() && { + return std::move(ptr_).release(); + } + + SymFloatNode toSymFloatNodeImpl() const; + static c10::SymFloat toSymFloat(SymFloatNode sin); + + double expect_float() const { + TORCH_CHECK(!is_symbolic()); + return data_; + } + + SymFloat operator+(SymFloat) const; + SymFloat operator-(SymFloat) const; + SymFloat operator*(SymFloat) const; + SymFloat operator/(SymFloat) const; + + // N.B. It's important to keep this definition in the header + // as we expect if checks to be folded for mobile builds + // where `is_symbolic` is always false + C10_ALWAYS_INLINE bool is_symbolic() const { + return ptr_; + } + + double as_float_unchecked() const { + return data_; + } + + private: + // TODO: optimize to union + double data_; + SymFloatNode ptr_; +}; + +C10_API std::ostream& operator<<(std::ostream& os, SymFloat s); +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/SymFloatNodeImpl.h b/voice_bridge/torch/include/c10/core/SymFloatNodeImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..7305be9dd96d6835ef3e68e79fbbc3e8c8c50848 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/SymFloatNodeImpl.h @@ -0,0 +1,72 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace c10 { + +class SymIntNodeImpl; +using SymIntNode = c10::intrusive_ptr; + +class SymFloat; +class SymFloatNodeImpl; +using SymFloatNode = c10::intrusive_ptr; + +class C10_API SymFloatNodeImpl : public c10::intrusive_ptr_target { + public: + c10::SymFloat toSymFloat(); + virtual ~SymFloatNodeImpl(){}; + + template + c10::intrusive_ptr dyn_cast() const { + return c10::intrusive_ptr::reclaim_copy(dynamic_cast(this)); + } + + virtual SymFloatNode wrap(double num) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymFloatNode add(const SymFloatNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymFloatNode sub(const SymFloatNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymFloatNode mul(const SymFloatNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymFloatNode truediv(const SymFloatNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymFloatNode eq(const SymFloatNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymFloatNode ne(const SymFloatNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymFloatNode gt(const SymFloatNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymFloatNode lt(const SymFloatNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymFloatNode le(const SymFloatNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymFloatNode ge(const SymFloatNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode ceil(); + virtual std::string str() { + TORCH_CHECK(false, "NYI"); + }; + std::ostream& operator<<(std::ostream& os) { + os << str(); + return os; + }; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/SymInt.h b/voice_bridge/torch/include/c10/core/SymInt.h new file mode 100644 index 0000000000000000000000000000000000000000..00c51c89bc2924da0a0d0d9dd6fa762e3643d418 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/SymInt.h @@ -0,0 +1,246 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace c10 { + +class SymFloat; + +// `SymInt` is a C++ wrapper class around int64_t data_ which and is used to +// represent concrete dimension values. +// +// `SymInt` is also a data type in Pytorch that can be used in function schemas +// to enable tracing. +// +// `SymInt` is introduced to enable tracing arithmetic +// operations on symbolic integers (e.g. sizes). Tracing symbolic sizes will +// allow LTC and AOTAutograd representing dynamic shapes in expression graphs +// faithfully without baking in concrete dimension values. +// +// To trace the operations, SymInt will overload arithmetic operators (e.g. +, +// -, *) and will provide overloads taking SymInt for commonly used math +// functions. +// +// SymInt will be extenteded to represent a union structure Union[int64_t, +// SymIntNodeImpl*] which will be implemented as a single packed int64_t field +// named data_. + +#ifdef C10_MOBILE +#define SKIP_IS_SYMBOLIC_ON_MOBILE(_) \ + do { \ + } while (0) +#else +#define SKIP_IS_SYMBOLIC_ON_MOBILE(X) TORCH_CHECK(X) +#endif + +class C10_API SymInt { + public: + enum Unchecked { + UNCHECKED, + }; + + /*implicit*/ SymInt(int64_t d) : data_(d) { + SKIP_IS_SYMBOLIC_ON_MOBILE(!is_symbolic()); + }; + SymInt() : data_(0) {} + + // unchecked c-tor accepting raw `data_` + // One appropriate use for this is when you are constructing a symint + // in a situation where you know it is non-negative (or, if it is negative, + // the negative value is -1; i.e., not user controlled) + SymInt(Unchecked, int64_t d) : data_(d) {} + + // TODO: these implementations are not optimal because they allocate a + // temporary and then use the move constructor/assignment + SymInt(const SymInt& s) : data_(0) { + if (s.is_symbolic()) { + *this = SymInt::toSymInt(s.toSymIntNodeImpl()); + } else { + data_ = s.data_; + } + } + SymInt(SymInt&& s) : data_(s.data_) { + s.data_ = 0; + } + + SymInt& operator=(const SymInt& s) { + if (this != &s) { + if (s.is_symbolic()) { + *this = SymInt::toSymInt(s.toSymIntNodeImpl()); + } else { + data_ = s.data_; + } + } + return *this; + } + SymInt& operator=(SymInt&& s) { + if (this != &s) { + release_(); // release the current SymIntNode if any + data_ = s.data_; + if (s.is_symbolic()) + s.data_ = 0; + }; + return *this; + } + + SymInt clone() const { +#ifndef C10_MOBILE + if (is_symbolic()) { + return toSymIntNodeImplUnowned()->clone()->toSymInt(); + } +#else + TORCH_INTERNAL_ASSERT(!is_symbolic()); +#endif + return *this; + } + +#ifndef C10_MOBILE + SymIntNodeImpl* toSymIntNodeImplUnowned() const { + uint64_t unextended_bits = static_cast(data_) & ~MASK; + uint64_t sign_bit_mask = 1ULL << (62 - 1); + // https://stackoverflow.com/questions/42534749/signed-extension-from-24-bit-to-32-bit-in-c + uint64_t extended_bits = (unextended_bits ^ sign_bit_mask) - sign_bit_mask; + return static_cast( + reinterpret_cast(static_cast(extended_bits))); + } + + void release_() { + if (is_symbolic()) { + SymIntNode::reclaim(toSymIntNodeImplUnowned()); // steal + } + } + + SymIntNodeImpl* release() && { + TORCH_INTERNAL_ASSERT(is_symbolic()); + auto* r = toSymIntNodeImplUnowned(); + data_ = 0; // transfer ownership + return r; + } +#else + void release_() {} + + SymIntNodeImpl* release() && { + TORCH_INTERNAL_ASSERT(false); + } +#endif + + SymIntNode toSymIntNodeImpl() const; + static c10::SymInt toSymInt(SymIntNode sin); + + ~SymInt() { + release_(); + } + + // Require the int to be non-symbolic, and if it is symbolic raise an + // error. This is safe to use for C++ code that doesn't work for symbolic + // shapes, and you don't have time to fix it immediately, as if we + // try to trigger the path in C++ you'll appropriately get an error + int64_t expect_int() const { + SKIP_IS_SYMBOLIC_ON_MOBILE(!is_symbolic()); + return data_; + } + + // Insert a guard for the int to be its concrete value, and then return + // that value. This operation always works, even if the int is symbolic, + // so long as we know what the underlying value is (e.g., this won't work + // if you call it on the size of nonzero output). Don't blindly put this + // everywhere; you can cause overspecialization of PyTorch programs with + // this method. + // + // It should be called as guard_int(__FILE__, __LINE__). The file and line + // number can be used to diagnose overspecialization. + int64_t guard_int(const char* file, int64_t line) const; + + // N.B. It's important to keep this definition in the header + // as we expect if checks to be folded for mobile builds + // where `is_symbolic` is always false + C10_ALWAYS_INLINE bool is_symbolic() const { +#ifdef C10_MOBILE + return false; +#else + return (MASK & static_cast(this->data_)) == IS_SYM; +#endif + } + + SymInt operator+(SymInt sci) const; + SymInt operator-(SymInt sci) const; + SymInt operator*(SymInt sci) const; + SymInt operator/(SymInt sci) const; + SymInt operator%(SymInt sci) const; + bool operator==(SymInt sci) const; + bool operator!=(SymInt p2) const; + bool operator<(SymInt sci) const; + bool operator<=(SymInt sci) const; + bool operator>(SymInt sci) const; + bool operator>=(SymInt sci) const; + void operator*=(SymInt sci); + void operator+=(SymInt sci); + + SymInt operator*(int64_t sci) const; + bool operator<(int64_t sci) const; + bool operator==(int64_t sci) const; + bool operator!=(int64_t sci) const; + bool operator<=(int64_t sci) const; + bool operator>(int64_t sci) const; + bool operator>=(int64_t sci) const; + + operator SymFloat() const; + + int64_t as_int_unchecked() const { + return data_; + } + + // Return whether the integer is representable as a SymInt. + static bool check_range(int64_t i) { + return i > MIN_INT; + } + + private: + // Constraints on the internal representation: + // - Should represent positive and small negative ints + // - No conversion necessary for operations on ints. + // - Must represent valid 64-bit pointers + // + // So, the scheme is to reserve large negative numbers: + // - 0b0.... means we are a positive int (following two's complement) + // - 0b11... means we are a negative int (following two's complement) + // - 0b10... means we are are a pointer. This means that + // [-2^63, -2^62-1] are not representable as ints. + // We don't actually need all of this space as on x86_64 + // as the top 16bits aren't used for anything + static constexpr uint64_t MASK = 1ULL << 63 | 1ULL << 62; + static constexpr uint64_t IS_SYM = 1ULL << 63; + // Since we use the top two bits to determine whether something is symbolic, + // we cannot represent symbolic indices that are large enough to use those + // bits. This will probably never happen. + static constexpr uint64_t MAX_SYM_IDX = 1ULL << 62; + // Since 0b10... is reserved for symbolic indices, any integers lower than + // this value would collide with our representation. + static constexpr int64_t MIN_INT = -1LL & static_cast(~(1ULL << 62)); + int64_t data_; +}; + +#undef SKIP_IS_SYMBOLIC_ON_MOBILE + +/// Sum of a list of SymInt; accumulates into the c10::SymInt expression +template < + typename C, + typename std::enable_if< + std::is_same::value, + int>::type = 0> +inline c10::SymInt multiply_integers(const C& container) { + return std::accumulate( + container.begin(), + container.end(), + c10::SymInt(1), + [](c10::SymInt a, c10::SymInt b) { return a * b; }); +} + +C10_API std::ostream& operator<<(std::ostream& os, SymInt s); +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/SymIntArrayRef.h b/voice_bridge/torch/include/c10/core/SymIntArrayRef.h new file mode 100644 index 0000000000000000000000000000000000000000..f39f2ac2af814a07e8a400c8b095285750452556 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/SymIntArrayRef.h @@ -0,0 +1,43 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace c10 { +using SymIntArrayRef = ArrayRef; + +TORCH_API at::IntArrayRef asIntArrayRefSlow(c10::SymIntArrayRef ar); +TORCH_API at::IntArrayRef asIntArrayRefUnchecked(c10::SymIntArrayRef ar); +TORCH_API c10::optional asIntArrayRefSlowOpt( + c10::SymIntArrayRef ar); + +// Prefer using a more semantic constructor, like +// fromIntArrayRefKnownNonNegative +inline SymIntArrayRef fromIntArrayRefUnchecked(IntArrayRef array_ref) { + return SymIntArrayRef( + reinterpret_cast(array_ref.data()), array_ref.size()); +} + +inline SymIntArrayRef fromIntArrayRefKnownNonNegative(IntArrayRef array_ref) { + return fromIntArrayRefUnchecked(array_ref); +} + +inline SymIntArrayRef fromIntArrayRef(IntArrayRef array_ref) { + for (size_t i = 0; i < array_ref.size(); ++i) { + TORCH_CHECK( + SymInt::check_range(array_ref[i]), + "IntArrayRef contains an int that cannot be represented as a SymInt: ", + array_ref[i]); + } + return SymIntArrayRef( + reinterpret_cast(array_ref.data()), array_ref.size()); +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/SymIntNodeImpl.h b/voice_bridge/torch/include/c10/core/SymIntNodeImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..3386ce1f970ab5d83e5805ec831c2429eca66e05 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/SymIntNodeImpl.h @@ -0,0 +1,93 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +class SymInt; +class SymIntNodeImpl; + +class C10_API SymIntNodeImpl : public c10::intrusive_ptr_target { + public: + c10::SymInt toSymInt(); + virtual ~SymIntNodeImpl(){}; + + template + c10::intrusive_ptr dyn_cast() const { + return c10::intrusive_ptr::reclaim_copy(dynamic_cast(this)); + } + + // these could be pure virtual when we implement LTC versions + virtual SymIntNode add(const SymIntNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode sub(const SymIntNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode mul(const SymIntNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymFloatNode truediv(const SymIntNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode floordiv(const SymIntNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode mod(const SymIntNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode eq(const SymIntNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode ne(const SymIntNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode gt(const SymIntNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode lt(const SymIntNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode le(const SymIntNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode ge(const SymIntNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode ceil() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymIntNode clone() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymFloatNode sym_float() { + TORCH_CHECK(false, "NYI"); + } + virtual SymIntNode wrap(int64_t num) { + TORCH_CHECK(false, "NYI"); + }; + virtual int64_t guard_int(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual int64_t int_() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool bool_() { + TORCH_CHECK(false, "NYI"); + }; + virtual std::string str() { + TORCH_CHECK(false, "NYI"); + }; + std::ostream& operator<<(std::ostream& os) { + os << str(); + return os; + }; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/TensorImpl.h b/voice_bridge/torch/include/c10/core/TensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..f110b0e9fa460eb99ddf8c28b5864dc1eda80b75 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/TensorImpl.h @@ -0,0 +1,3215 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +// A global boolean variable to control whether we free memory when a Tensor +// is shrunk to a smaller size. As a result, a Tensor is always going to +// keep the memory allocated for its maximum capacity reshaped to so far. +// +// This parameter is respected "upper-case" methods which call Resize() +// (e.g., CopyFrom, ResizeLike); it is NOT respected by Tensor::resize_ +// or ShrinkTo, both of which guarantee to never to free memory. +C10_DECLARE_bool(caffe2_keep_on_shrink); + +// Since we can have high variance in blob memory allocated across different +// inputs in the same run, we will shrink the blob only if the memory gain +// is larger than this flag in bytes. This only applies to functions which +// respect caffe2_keep_on_shrink. +C10_DECLARE_int64(caffe2_max_keep_on_shrink_memory); + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +namespace at { +class Tensor; +class TensorBase; +} // namespace at + +namespace c10 { +class Scalar; +struct Storage; +} // namespace c10 + +namespace c10 { + +/** + * A utility function to convert vector to vector. + */ +inline std::vector ToVectorint64_t(const ArrayRef& src) { + return std::vector(src.begin(), src.end()); +} + +/** + * Return product of all dimensions starting from k + */ +inline int64_t size_from_dim_(int k, IntArrayRef dims) { + int64_t r = 1; + for (const auto i : c10::irange(k, dims.size())) { + r *= dims[i]; + } + return r; +} + +// Product of all dims up to k (not including dims[k]) +inline int64_t size_to_dim_(int k, IntArrayRef dims) { + TORCH_CHECK((unsigned)k <= dims.size()); + int64_t r = 1; + for (const auto i : c10::irange(k)) { + r *= dims[i]; + } + return r; +} + +// Product of all dims between k and l (not including dims[k] and dims[l]) +inline int64_t size_between_dim_(int k, int l, IntArrayRef dims) { + TORCH_CHECK((unsigned)l < dims.size() && (unsigned)k < dims.size()); + int64_t r = 1; + if (k < l) { + for (int i = k + 1; i < l; ++i) { + r *= dims[i]; + } + } else { + for (int i = l + 1; i < k; ++i) { + r *= dims[i]; + } + } + return r; +} + +// Wrap around axis_index if it is negative, s.t., -1 is the last dim +inline int canonical_axis_index_(int axis_index, int ndims) { + TORCH_CHECK(axis_index >= -ndims); + TORCH_CHECK(axis_index < ndims); + if (axis_index < 0) { + return axis_index + ndims; + } + return axis_index; +} + +using PlacementDtor = void (*)(void*, size_t); + +/* + * A Context that will call extra placement deleter during + * deconstruction. + * + * Accept a already constructed DataPtr and store it as member + * during destruction, we'll call extra deleter on the underlying + * data pointer before the DataPtr is destructed. + * `data_ptr_` owns the memory. + */ +struct C10_API PlacementDeleteContext { + DataPtr data_ptr_; + PlacementDtor placement_dtor_; + size_t size_; + PlacementDeleteContext( + DataPtr&& data_ptr, + PlacementDtor placement_dtor, + size_t size) + : data_ptr_(std::move(data_ptr)), + placement_dtor_(placement_dtor), + size_(size) {} + static DataPtr makeDataPtr( + DataPtr&& data_ptr, + PlacementDtor placement_dtor, + size_t size, + Device device); + ~PlacementDeleteContext() { + placement_dtor_(data_ptr_.get(), size_); + // original memory will be freed when data_ptr_ is destructed + } +}; + +struct TensorImpl; + +struct C10_API AutogradMetaInterface { + virtual void set_requires_grad( + bool requires_grad, + at::TensorImpl* self_impl) = 0; + virtual bool requires_grad() const = 0; + virtual at::Tensor& mutable_grad() = 0; + virtual const at::Tensor& grad() const = 0; + virtual const at::Tensor& fw_grad(uint64_t level, const at::TensorBase& self) + const = 0; + virtual void set_fw_grad( + const at::TensorBase& new_grad, + const at::TensorBase& self, + uint64_t level, + bool is_inplace_op) = 0; + virtual ~AutogradMetaInterface(); +}; + +namespace impl { + +// Unfortunately, the definition of AutogradMeta lives in a separate +// compilation unit than TensorImpl (libtorch.so versus libc10.so) +// which means that we cannot construct an AutogradMeta from TensorImpl, +// not even from the cpp file. So we have to indirect it through a factory +// function which will be initialized when we load libtorch.so. + +struct C10_API AutogradMetaFactory { + virtual ~AutogradMetaFactory() = default; + virtual std::unique_ptr make() const = 0; + // This method is the dumbest method. But I don't have access + // to Tensor (not TensorImpl) which is undefined in this header. + virtual const at::Tensor& undefined_tensor() const = 0; +}; + +C10_API void SetAutogradMetaFactory(AutogradMetaFactory* factory); +C10_API AutogradMetaFactory* GetAutogradMetaFactory(); + +struct C10_API AutogradMetaFactoryRegisterer { + explicit AutogradMetaFactoryRegisterer(AutogradMetaFactory* factory) { + SetAutogradMetaFactory(factory); + } +}; + +// PyInterpreterStatus describes what the state of its interpreter tag +// is, relative to the thread currently holding the GIL. +enum class PyInterpreterStatus { + // We just allocated the Tensor, it hasn't escaped to other threads, + // we know that it definitely hasn't been tagged to be associated + // with an interpreter. + DEFINITELY_UNINITIALIZED, + // We queried the interpreter field and it looked uninitialized. But + // another thread may have raced with us to tag it with some other + // interpreter id. So we will have to do a CEX to make sure we can + // actually nab it. + MAYBE_UNINITIALIZED, + // We queried the interpreter field and it was tagged to belong to us. + // This means we have sole write access (as we hold the GIL for this + // interpreter) + TAGGED_BY_US, + // Someone else tagged this. We can't use this TensorImpl from Python. + TAGGED_BY_OTHER, +}; + +} // namespace impl + +struct C10_API NamedTensorMetaInterface { + virtual ~NamedTensorMetaInterface(){}; + virtual std::unique_ptr clone() const { + TORCH_INTERNAL_ASSERT( + false, "Not implemented: NamedTensorMetaInterface::clone"); + }; + virtual int64_t slow_dim() const { + TORCH_INTERNAL_ASSERT( + false, "Not implemented: NamedTensorMetaInterface::slow_dim"); + }; +}; + +template +using strong_bool = strong:: + type; + +// For ease of copy pasting +#if 0 +is_contiguous +is_channels_last_contiguous +is_channels_last_3d_contiguous +is_channels_last +is_channels_last_3d +is_non_overlapping_and_dense +#endif + +using bool_is_contiguous = strong_bool; +using bool_is_channels_last_contiguous = + strong_bool; +using bool_is_channels_last_3d_contiguous = + strong_bool; +using bool_is_channels_last = strong_bool; +using bool_is_channels_last_3d = strong_bool; +using bool_is_non_overlapping_and_dense = + strong_bool; + +struct C10_API ExtraMeta { + SymDimVector sizes_ = {0}; + SymDimVector strides_ = {1}; + SymInt numel_ = 1; + SymInt storage_offset_ = 0; + // TODO: make these all SymBool + bool_is_contiguous is_contiguous_{true}; + bool_is_channels_last_contiguous is_channels_last_contiguous_{false}; + bool_is_channels_last_3d_contiguous is_channels_last_3d_contiguous_{false}; + bool_is_channels_last is_channels_last_{false}; + bool_is_channels_last_3d is_channels_last_3d_{false}; + bool_is_non_overlapping_and_dense is_non_overlapping_and_dense_{true}; + std::unique_ptr named_tensor_meta_ = nullptr; + + ExtraMeta() {} + + ExtraMeta( + SymDimVector sizes, + SymDimVector strides, + SymInt numel, + SymInt storage_offset, + bool_is_contiguous is_contiguous, + bool_is_channels_last_contiguous is_channels_last_contiguous, + bool_is_channels_last_3d_contiguous is_channels_last_3d_contiguous, + bool_is_channels_last is_channels_last, + bool_is_channels_last_3d is_channels_last_3d, + bool_is_non_overlapping_and_dense is_non_overlapping_and_dense, + std::unique_ptr named_tensor_meta) + : sizes_(std::move(sizes)), + strides_(std::move(strides)), + numel_(std::move(numel)), + storage_offset_(std::move(storage_offset)), + is_contiguous_(is_contiguous), + is_channels_last_contiguous_(is_channels_last_contiguous), + is_channels_last_3d_contiguous_(is_channels_last_3d_contiguous), + is_channels_last_(is_channels_last), + is_channels_last_3d_(is_channels_last_3d), + is_non_overlapping_and_dense_(is_non_overlapping_and_dense), + named_tensor_meta_(std::move(named_tensor_meta)) {} + + std::unique_ptr clone() const { + return std::make_unique( + sizes_, + strides_, + numel_, + storage_offset_, + is_contiguous_, + is_channels_last_contiguous_, + is_channels_last_3d_contiguous_, + is_channels_last_, + is_channels_last_3d_, + is_non_overlapping_and_dense_, + named_tensor_meta_ ? named_tensor_meta_->clone() : nullptr); + } +}; + +// NOTE [ Version Counter Sharing ] +// +// Every Tensor has a version counter. Version counters are incremented whenever +// the data or size of a tensor changes through in-place Variable operations. +// Version counters are used to detect modifications to saved variables which +// would result in incorrect gradient calculations. Version counters may be +// shared between Variables: +// +// 1. A view shares the version counter of the base Variable, +// 2. `x.detach()` shares the version counter of `x`, +// 3. Unpacked saved variables share the version counter of the source. +// +// Version counters are not shared in these scenarios: +// +// 1. When we replace a `Variable`'s underlying `Tensor` by calling +// `set_data(...)`, +// 2. `x.data` does not share the version counter of `x`. (See discussion at +// https://github.com/pytorch/pytorch/issues/5396) +// +// Question: Why do we put the version counter in TensorImpl instead of +// AutogradMeta? +// +// Answer: After the Variable/Tensor merge, a tensor will not have AutogradMeta +// when its `requires_grad_` is false, but when we use this tensor in the +// forward pass of a function that requires saving this tensor for backward, we +// need to keep track of this tensor's version to make sure it's always valid in +// the autograd graph. +// +// To achieve this goal, we put the version counter in TensorImpl instead of +// AutogradMeta, and have it always be available. This allows us to have the +// optimization of not carrying AutogradMeta when a tensor doesn't require +// gradient. +// +// A hypothetical alternative way to achieve this goal is to initialize +// AutogradMeta and create the version counter for the non-requires-grad tensor +// only when it's saved for backward. However, since saving a tensor for +// backward happens in the forward pass, and our invariant is that forward pass +// needs to be thread-safe, lazy-initializing AutogradMeta when saving a tensor +// can introduce race conditions when we are running the forward pass in +// multi-thread scenarios, thus making the forward pass not thread-safe anymore, +// which breaks the invariant. +struct C10_API VariableVersion { + private: + struct VersionCounter : intrusive_ptr_target { + VersionCounter(uint32_t version) : version_(version) {} + std::atomic version_; + }; + c10::intrusive_ptr version_counter_; + + public: + // Note [Disabled VariableVersion] + // VariableVersion struct has an intrusive_ptr pointing VersionCounter struct + // with an atomic variable. Thus `VariableVersion(/*version=*/0)` is not as + // cheap as we expected. In some cases constructing a VariableVersion with + // version 0 is not necessary so we add a cheap constructor which + // doesn't allocate the intrusive_ptr. + // Example use cases are: + // - Inference tensors don't track version counter, so they'll just always + // have disbaled VariableVersion. + // - In SavedVariable class we override version_counter_ inside its + // construtor + // so that we can use the cheap constructor there. + enum Disabled { DISABLED }; + // It's okay to return true even for inference tensor which + // doesn't have version counter enabled. + // We want to be permissive here since in many cases (e.g. make_variable) + // we can std::move a TensorImpl if there's no other uses which saves us + // an additional TensorImpl allocation. + bool unique() const { + return version_counter_ ? 1 == version_counter_.use_count() : true; + } + // NOTE: As of C++11 and 14, default-constructing a std::atomic variable + // leaves it in a persistently undefined state. See + // https://cplusplus.github.io/LWG/issue2334. + VariableVersion(uint32_t version) + : version_counter_(c10::make_intrusive(version)) {} + VariableVersion(Disabled = DISABLED) {} + + bool enabled() const { + return version_counter_; + } + + // Note [Inplace update inference tensor] + // 1. Inplace update to inference tensor is forbidden in normal mode. + // For example: + // inference_tensor.copy_(normal_tensor_requires_grad) + // This inplace makes inference_tensor have requires_grad=True and + // have a grad_fn. This is bad because views of `inference_tensor` + // created in InferenceMode won't be able to know the grad_fn since + // their ViewMeta were not recorded. To match NoGradMode behavior + // that "inplace update to a view created in NoGradMode raise an error", + // we just ban inplace update to inference tensor since we can't tell + // if an inference tensor is a view created in InferenceMode. + // + // Note that views of normal tensor created in InferenceMode has proper + // ViewMeta so that they're aware of the grad_fn correctly. + // + // 2. Inplace update to inference tensor in inference tensor doesn't bump + // version counter. + // * It either doesn't call bump() by skipping ADInplaceOrView kernel, + // - e.g. inference_tensor.add_(1) + // * or bump() is a no-op for inference tensor. + // - e.g. inference_tensor.add_(normal_tensor) + void bump() { + // TODO: Replace the link to the documentation once it's available. + TORCH_CHECK( + version_counter_ || InferenceMode::is_enabled(), + "Inplace update to inference tensor outside InferenceMode is not allowed." + "You can make a clone to get a normal tensor before doing inplace update." + "See https://github.com/pytorch/rfcs/pull/17 for more details."); + if (version_counter_) { + ++version_counter_->version_; + } + } + + // Inference tensor doesn't have version counter so it shouldn't be + // accessed. + uint32_t current_version() const { + TORCH_CHECK( + version_counter_, "Inference tensors do not track version counter."); + return version_counter_->version_; + } +}; + +// Forward declaration of TensorImpl needed for forward declaration of +// C10_TensorImpl_Size_Check_Dummy_Class +struct C10_API TensorImpl; + +// Forward declaration needed because TensorImpl needs to be friends with +// C10_TensorImpl_Size_Check_Dummy_Class in order to check the size +// of its private fields. +template < + size_t cplusplus, + size_t clang_ver_major, + size_t gcc_ver, + size_t gcc_ver_minor, + size_t nvcc, + size_t cuda_version, + size_t cuda_version_major, + size_t ptr_size> +class C10_TensorImpl_Size_Check_Dummy_Class; + +/** + * NOTE: Some TensorImpl methods are small and not overridden in the + * PyTorch codebase itself, but may theoretically need to be + * overridden by third-party TensorImpl subclasses. This macro allows + * users that need maximum performance and don't need these extension + * points to disable them with a build-time flag. (In particular, + * XLA's XLATensorImpl currently overrides these methods, so we can't + * enable this flag by default.) + */ +#ifdef C10_DISABLE_TENSORIMPL_EXTENSIBILITY +#define TENSORIMPL_MAYBE_VIRTUAL +#else +#define TENSORIMPL_MAYBE_VIRTUAL virtual +#endif + +/** + * The low-level representation of a tensor, which contains a pointer + * to a storage (which contains the actual data) and metadata (e.g., sizes and + * strides) describing this particular view of the data as a tensor. + * + * Some basic characteristics about our in-memory representation of + * tensors: + * + * - It contains a pointer to a storage struct (Storage/StorageImpl) + * which contains the pointer to the actual data and records the + * data type and device of the view. This allows multiple tensors + * to alias the same underlying data, which allows to efficiently + * implement differing *views* on a tensor. + * + * - The tensor struct itself records view-specific metadata about + * the tensor, e.g., sizes, strides and offset into storage. + * Each view of a storage can have a different size or offset. + * + * - This class is intrusively refcounted. It is refcounted so that + * we can support prompt deallocation of large tensors; it is + * intrusively refcounted so that we can still perform reference + * counted operations on raw pointers, which is often more convenient + * when passing tensors across language boundaries. + * + * - For backwards-compatibility reasons, a tensor may be in an + * uninitialized state. A tensor may be uninitialized in the following + * two ways: + * + * - A tensor may be DTYPE UNINITIALIZED. A tensor of this + * form has an uninitialized dtype. This situation most + * frequently arises when a user writes Tensor x(CPU). The dtype + * is subsequently initialized when mutable_data() is + * invoked for the first time. + * + * - A tensor may be STORAGE UNINITIALIZED. A tensor of this form + * has non-zero size, but has a storage with a null data pointer. + * This situation most frequently arises when a user calls + * Resize() or FreeMemory(). This is because Caffe2 historically + * does lazy allocation: allocation of data doesn't occur until + * mutable_data() is invoked. A tensor with zero size is + * always storage initialized, because no allocation is necessary + * in this case. + * + * All combinations of these two uninitialized states are possible. + * Consider the following transcript in idiomatic Caffe2 API: + * + * Tensor x(CPU); // x is storage-initialized, dtype-UNINITIALIZED + * x.Resize(4); // x is storage-UNINITIALIZED, dtype-UNINITIALIZED + * x.mutable_data(); // x is storage-initialized, dtype-initialized + * x.FreeMemory(); // x is storage-UNINITIALIZED, dtype-initialized. + * + * All other fields on tensor are always initialized. In particular, + * size is always valid. (Historically, a tensor declared as Tensor x(CPU) + * also had uninitialized size, encoded as numel == -1, but we have now + * decided to default to zero size, resulting in numel == 0). + * + * Uninitialized storages MUST be uniquely owned, to keep our model + * simple. Thus, we will reject operations which could cause an + * uninitialized storage to become shared (or a shared storage to + * become uninitialized, e.g., from FreeMemory). + * + * In practice, tensors which are storage-UNINITIALIZED and + * dtype-UNINITIALIZED are *extremely* ephemeral: essentially, + * after you do a Resize(), you basically always call mutable_data() + * immediately afterwards. Most functions are not designed to + * work if given a storage-UNINITIALIZED, dtype-UNINITIALIZED tensor. + * + * We intend to eliminate all uninitialized states, so that every + * tensor is fully initialized in all fields. Please do not write new code + * that depends on these uninitialized states. + */ +struct C10_API TensorImpl : public c10::intrusive_ptr_target { + TensorImpl() = delete; + virtual ~TensorImpl() override; + // Note [Enum ImplType] + // This enum is temporary. In the followup refactor we should + // think about how to specialize TensorImpl creation for view + // tensors. Currently we only special case its key_set_ but + // there's also potential to share version_counter_ directly + // without creating first and then override in as_view. + enum ImplType { VIEW }; + + /** + * Construct a 1-dim 0-size tensor backed by the given storage. + */ + TensorImpl( + Storage&& storage, + DispatchKeySet, + const caffe2::TypeMeta data_type); + + // See Note [Enum ImplType] + TensorImpl( + ImplType, + Storage&& storage, + DispatchKeySet, + const caffe2::TypeMeta data_type); + + /** + * Construct a 1-dim 0 size tensor that doesn't have a storage. + */ + TensorImpl( + DispatchKeySet, + const caffe2::TypeMeta data_type, + c10::optional device_opt); + + // Legacy constructors so I don't have to go update call sites. + // TODO: When Variable is added, delete these constructors + TensorImpl( + Storage&& storage, + DispatchKey dispatch_key, + const caffe2::TypeMeta data_type) + : TensorImpl( + std::move(storage), + DispatchKeySet(dispatch_key), + data_type) {} + TensorImpl( + DispatchKey dispatch_key, + const caffe2::TypeMeta data_type, + c10::optional device_opt) + : TensorImpl(DispatchKeySet(dispatch_key), data_type, device_opt) {} + + private: + // This constructor is private, because the data_type is redundant with + // storage. Still, we pass it in separately because it's easier to write + // the initializer list if we're not worried about storage being moved out + // from under us. + TensorImpl( + Storage&& storage, + DispatchKeySet, + const caffe2::TypeMeta data_type, + c10::optional); + + public: + TensorImpl(const TensorImpl&) = delete; + TensorImpl& operator=(const TensorImpl&) = delete; + TensorImpl(TensorImpl&&) = delete; + TensorImpl& operator=(TensorImpl&&) = delete; + + /** + * Release (decref) storage, and any other external allocations. This + * override is for `intrusive_ptr_target` and is used to implement weak + * tensors. + */ + void release_resources() override; + + private: + void destroy_pyobj_if_needed(); + + public: + /** + * Return the DispatchKeySet corresponding to this Tensor, specifying + * all of the DispatchKeys that this Tensor identifies as. This is the + * information used to dispatch operations on this tensor. + */ + DispatchKeySet key_set() const { + return key_set_; + } + + // NOTE: The general recipe for customizable methods is that the fastpath + // function (e.g., sizes()) does an unlikely policy test, and if doesn't + // trigger, it does the fast path implementation with no checks and going + // directly to on-TensorImpl fields. In particular, you never need to + // check ExtraMeta if the policy doesn't trigger, as non-trivial ExtraMeta + // implies the policy will always match. + // + // The default implementations of methods are "safe": they do extra tests + // to make sure the internal state is consistent no matter if you are + // doing symbolic shapes or not. If you don't want the tests, directly + // override the custom method (e.g., custom_sizes()) to do your preferred + // behavior. + + public: + /** + * Return a reference to the sizes of this tensor. This reference remains + * valid as long as the tensor is live and not resized. + */ + IntArrayRef sizes() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return sizes_custom(); + } + return sizes_and_strides_.sizes_arrayref(); + } + + SymIntArrayRef sym_sizes() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return sym_sizes_custom(); + } + // Sizes guaranteed to be non-negative, so unchecked cast is OK + return c10::fromIntArrayRefKnownNonNegative( + sizes_and_strides_.sizes_arrayref()); + } + + IntArrayRef sizes_default() const { + // TODO: force backtrace to be printed on this error + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "Cannot call sizes() on tensor with symbolic sizes/strides"); + return sizes_and_strides_.sizes_arrayref(); + } + + SymIntArrayRef sym_sizes_default() const { + if (has_symbolic_sizes_strides_) { + return extra_meta_->sizes_; + } else { + // Sizes guaranteed to be non-negative, so unchecked cast is OK + return c10::fromIntArrayRefKnownNonNegative(sizes_default()); + } + } + + /** + * The number of elements in a tensor. + * + * WARNING: Previously, if you were using the Caffe2 API, you could + * test numel() == -1 to see if a tensor was uninitialized. This + * is no longer true; numel always accurately reports the product + * of sizes of a tensor. + */ + int64_t numel() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return numel_custom(); + } + return numel_; + } + + c10::SymInt sym_numel() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return sym_numel_custom(); + } + return c10::SymInt(SymInt::UNCHECKED, numel_); + } + + int64_t numel_default() const { + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "Cannot call numel() on tensor with symbolic sizes/strides"); + return numel_; + } + + c10::SymInt sym_numel_default() const { + if (has_symbolic_sizes_strides_) { + return extra_meta_->numel_; + } else { + return c10::SymInt(SymInt::UNCHECKED, numel_); + } + } + + /** + * Return the number of dimensions of this tensor. Note that 0-dimension + * represents a Tensor that is a Scalar, e.g., one that has a single element. + */ + int64_t dim() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return dim_custom(); + } + return sizes_and_strides_.size(); + } + + int64_t dim_default() const { + if (has_symbolic_sizes_strides_) { + return extra_meta_->sizes_.size(); + } else { + return sizes_and_strides_.size(); + } + } + + /** + * Return the offset in number of elements into the storage that this + * tensor points to. Most tensors have storage_offset() == 0, but, + * for example, an index into a tensor will have a non-zero storage_offset(). + * + * WARNING: This is NOT computed in bytes. + */ + int64_t storage_offset() const { + // TODO: maybe this should be toggled by strides + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return storage_offset_custom(); + } + return storage_offset_; + } + + c10::SymInt sym_storage_offset() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return sym_storage_offset_custom(); + } + return c10::SymInt(SymInt::UNCHECKED, storage_offset_); + } + + int64_t storage_offset_default() const { + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "Cannot call storage_offset() on tensor with symbolic sizes/strides"); + return storage_offset_; + } + + c10::SymInt sym_storage_offset_default() const { + if (has_symbolic_sizes_strides_) { + return extra_meta_->storage_offset_; + } else { + return c10::SymInt(SymInt::UNCHECKED, storage_offset_); + } + } + + /** + * Return a reference to the strides of this tensor. This reference remains + * valid as long as the tensor is live and not restrided. + */ + IntArrayRef strides() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) { + return strides_custom(); + } + return sizes_and_strides_.strides_arrayref(); + } + + c10::SymIntArrayRef sym_strides() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) { + return sym_strides_custom(); + } + return c10::fromIntArrayRefKnownNonNegative(strides_default()); + } + + IntArrayRef strides_default() const { + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "Cannot call strides() on tensor with symbolic sizes/strides"); + return sizes_and_strides_.strides_arrayref(); + } + + c10::SymIntArrayRef sym_strides_default() const { + if (has_symbolic_sizes_strides_) { + return extra_meta_->strides_; + } else { + return c10::fromIntArrayRefKnownNonNegative(strides_default()); + } + } + + /** + * Whether or not a tensor is laid out in contiguous memory. + * + * Tensors with non-trivial strides are not contiguous. See + * compute_contiguous() for the exact definition of whether or not + * a tensor is contiguous or not. + */ + bool is_contiguous( + at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) { + return is_contiguous_custom(memory_format); + } + return is_contiguous_default(memory_format); + } + + // These are factored into separate functions in case subclasses + // want to use them + bool is_contiguous_default(at::MemoryFormat memory_format) const { + if (has_symbolic_sizes_strides_) { + if (memory_format == at::MemoryFormat::ChannelsLast) { + return bool(extra_meta_->is_channels_last_contiguous_); + } else if (memory_format == at::MemoryFormat::ChannelsLast3d) { + return bool(extra_meta_->is_channels_last_3d_contiguous_); + } + return bool(extra_meta_->is_contiguous_); + } + + if (memory_format == at::MemoryFormat::ChannelsLast) { + return is_channels_last_contiguous_; + } else if (memory_format == at::MemoryFormat::ChannelsLast3d) { + return is_channels_last_3d_contiguous_; + } + return is_contiguous_; + } + + bool is_strides_like_default(at::MemoryFormat memory_format) const { + if (has_symbolic_sizes_strides_) { + if (memory_format == at::MemoryFormat::ChannelsLast) { + return bool(extra_meta_->is_channels_last_); + } else if (memory_format == at::MemoryFormat::ChannelsLast3d) { + return bool(extra_meta_->is_channels_last_3d_); + } else { + return false; + } + } + + if (memory_format == at::MemoryFormat::ChannelsLast) { + return is_channels_last_; + } else if (memory_format == at::MemoryFormat::ChannelsLast3d) { + return is_channels_last_3d_; + } else { + return false; + } + } + + bool is_non_overlapping_and_dense_default() const { + if (has_symbolic_sizes_strides_) { + return bool(extra_meta_->is_non_overlapping_and_dense_); + } else { + return is_non_overlapping_and_dense_; + } + } + + // NB: these dim accessor functions don't have _default(), as you can use + // sizes_default/strides_default + /** + * Return the size of a tensor at some dimension, wrapping the dimension if + * necessary. + * + * NOTE: if you know wrapping is unnecessary, do sizes()[d] instead; it will + * be faster + */ + int64_t size(int64_t d) const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return size_custom(d); + } + d = maybe_wrap_dim(d, dim(), /*wrap_scalar=*/false); + return sizes_and_strides_.size_at_unchecked(d); + } + + c10::SymInt sym_size(int64_t d) const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return sym_size_custom(d); + } + d = maybe_wrap_dim(d, dim(), /*wrap_scalar=*/false); + const auto sizes = this->sym_sizes(); + return sizes[d]; + } + + /** + * Return the stride of a tensor at some dimension, wrapping the dimension + * if necessary. + * + * NOTE: if you know wrapping is unnecessary, do sizes()[d] instead; it will + * be faster + */ + int64_t stride(int64_t d) const { + d = maybe_wrap_dim(d, dim(), false); + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) { + // TODO: provide stride_custom, symmetrically with size_custom. + // There is presently no user for it; only NestedTensor is using + // size_custom overrideability + return strides_custom()[d]; // unchecked (maybe_wrap_dim enforces bounds) + } + // Intentionally don't call default, which also handles symbolic + return sizes_and_strides_.stride_at_unchecked(d); + } + + enum class SizesStridesPolicy : uint8_t { + // Default behavior, e.g., dense tensor. + // + // Can override: nothing + Default = 0, + // Customizable strides behavior, e.g., sparse tensor, + // mkldnn tensor. + // + // Can override: strides(), is_contiguous() + CustomStrides = 1, + // Customizable sizes behavior, e.g., nested tensor + // + // Can override: strides(), is_contiguous(), sizes(), dim(), numel() + CustomSizes = 2 + }; + + protected: + inline bool matches_policy(SizesStridesPolicy policy) const { + return sizes_strides_policy_ >= static_cast(policy); + } + + inline bool matches_custom(SizesStridesPolicy policy) const { + return custom_sizes_strides_ >= static_cast(policy); + } + + inline bool matches_python_custom(SizesStridesPolicy policy) const { + auto r = python_custom_sizes_strides_ >= static_cast(policy); + if (r) { + TORCH_INTERNAL_ASSERT(is_python_dispatch()) + } + return r; + } + + /** + * Customization points for the functions above. sizes_strides_policy_ + * must be set to enable these. + * + * NB: dim is overrideable separately from sizes because it is possible + * for a tensor to have rank, but not well defined sizes. + */ + // sizes_strides_policy_ >= CustomStrides + virtual bool is_contiguous_custom(at::MemoryFormat memory_format) const; + virtual bool is_strides_like_custom(at::MemoryFormat memory_format) const; + virtual bool is_non_overlapping_and_dense_custom() const; + // sizes_strides_policy_ >= CustomSizes + // Currently this method only exists to be overwritten by subclasses such as + // NestedTensorImpl. + virtual int64_t size_custom(int64_t d) const { + // TODO: We could add support to Python dispatch here. + // TODO: We could call into aten::size.int instead of + // sizes_custom()[d] and enable use of the dispatcher. + d = maybe_wrap_dim(d, dim(), /*wrap_scalar=*/false); + return sizes_custom()[d]; // unchecked (maybe_wrap_dim enforces bounds) + } + + virtual c10::SymInt sym_size_custom(int64_t d) const { + // TODO: We could add support to Python dispatch here. + // TODO: We could call into aten::size.int instead of + // sym_sizes_custom()[d] and enable use of the dispatcher. + d = maybe_wrap_dim(d, dim(), /*wrap_scalar=*/false); + return sym_sizes_custom()[d]; // unchecked (maybe_wrap_dim enforces bounds) + } + + virtual IntArrayRef sizes_custom() const; + virtual IntArrayRef strides_custom() const; + virtual int64_t numel_custom() const; + virtual int64_t storage_offset_custom() const; + virtual int64_t dim_custom() const; + virtual Device device_custom() const; + virtual Layout layout_custom() const; + + virtual c10::SymIntArrayRef sym_sizes_custom() const; + virtual c10::SymIntArrayRef sym_strides_custom() const; + virtual c10::SymInt sym_numel_custom() const; + virtual c10::SymInt sym_storage_offset_custom() const; + + public: + /** + * True if this tensor has storage. See storage() for details. + */ +#ifdef DEBUG + // Allow subclasses to check that their storage_ is never getting set in debug + // builds. + virtual +#else + TENSORIMPL_MAYBE_VIRTUAL +#endif + bool + has_storage() const + // NOTE: we devirtualize this because it arguably shouldn't be an + // error just to ask subclasses if they have storage. + // This used to throw for most subclasses, but OpaqueTensorImpl + // wanted it to successfully return false, so we went ahead and made + // it a non-error. +#ifdef C10_DISABLE_TENSORIMPL_EXTENSIBILITY + { + return storage_; + } +#else + ; +#endif + + /** + * Return the underlying storage of a Tensor. Multiple tensors may share + * a single storage. A Storage is an impoverished, Tensor-like class + * which supports far less operations than Tensor. + * + * Avoid using this method if possible; try to use only Tensor APIs to perform + * operations. + */ + TENSORIMPL_MAYBE_VIRTUAL const Storage& storage() const { + if (C10_UNLIKELY(storage_access_should_throw_)) { + throw_storage_access_error(); + } + return storage_; + } + + /** + * Return the underlying storage, unsafely assuming this is a basic strided + * tensor. In cases where `storage` access would throw, this returns a + * default-constructed Storage. + */ + inline const Storage& unsafe_storage() const { + return storage_; + } + + bool unique_version() const { + return version_counter_.unique(); + } + + protected: + virtual Layout layout_impl() const { + TORCH_CHECK( + false, "layout_impl is only implemented for TensorImpl subclasses."); + } + + public: + // Whether a tensor is sparse COO or not. + bool is_sparse() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + return key_set_.has_all(c10::sparse_ks); + } + + // Whether a tensor is sparse CSR or not. + bool is_sparse_csr() const { + return layout() == kSparseCsr; + } + + bool is_quantized() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + constexpr auto quantized_ks = DispatchKeySet(DispatchKey::Quantized); + return key_set_.has_all(quantized_ks); + } + + bool is_meta() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_meta(); + } + return device_opt_.has_value() && device_opt_->type() == kMeta; + } + + bool is_cpu() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_cpu(); + } + // Note: we cannot rely on dispatch keys to determine the device type + // of a tensor, because "wrapper" tensors (like FunctionalTensorWrapper) + // don't include backend dispatch keys. + return device_opt_.has_value() && device_opt_->type() == kCPU; + } + + bool is_cuda() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_cuda(); + } + return device_opt_.has_value() && device_opt_->type() == kCUDA; + } + + bool is_xpu() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_xpu(); + } + return device_opt_.has_value() && device_opt_->type() == kXPU; + } + + bool is_ipu() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_ipu(); + } + return device_opt_.has_value() && device_opt_->type() == kIPU; + } + + bool is_xla() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_xla(); + } + return device_opt_.has_value() && device_opt_->type() == kXLA; + } + + bool is_hpu() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_hpu(); + } + return device_opt_.has_value() && device_opt_->type() == kHPU; + } + + bool is_lazy() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_lazy(); + } + return device_opt_.has_value() && device_opt_->type() == kLazy; + } + + bool is_hip() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_hip(); + } + return device_opt_.has_value() && device_opt_->type() == kHIP; + } + + bool is_ve() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_ve(); + } + return device_opt_.has_value() && device_opt_->type() == kVE; + } + + bool is_mkldnn() const { + return key_set_.has_all(c10::mkldnn_ks); + } + + bool is_vulkan() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_vulkan(); + } + return device_opt_.has_value() && device_opt_->type() == kVulkan; + } + + bool is_metal() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_metal(); + } + return device_opt_.has_value() && device_opt_->type() == kMetal; + } + + bool is_mps() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_mps(); + } + return device_opt_.has_value() && device_opt_->type() == kMPS; + } + + bool is_ort() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_ort(); + } + return device_opt_.has_value() && device_opt_->type() == kORT; + } + + bool is_nested() const { + return key_set_.has(DispatchKey::NestedTensor); + } + + // TODO: remove this once we don't automatically enabled Autograd dispatch + // keys + // in TensorImpl constructor. + // DON'T USE THIS API!! It's only created for testing purpose in + // file aten/src/ATen/core/boxing/impl/test_helpers.h + void remove_autograd_key() { + key_set_ = key_set_ - autograd_dispatch_keyset; + } + + // Inference tensor doesn't have autograd or ADInplaceOrView key. + // Invariant: + // Inference tensor has version_counter_.enabled() == false + bool is_inference() { + bool no_ADInplaceOrView = !key_set_.has_any(c10::inplace_or_view_ks); + bool no_Autograd = !key_set_.has_any(c10::autograd_dispatch_keyset); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + no_ADInplaceOrView == no_Autograd, + "ADInplaceOrView and Autograd keys must be on/off at the same time."); + return no_ADInplaceOrView && no_Autograd; + } + + int64_t get_device() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().index(); + } + return device_default().index(); + } + + Device device() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom(); + } + return device_default(); + } + + protected: + c10::Device device_default() const { + TORCH_CHECK(device_opt_.has_value(), "tensor does not have a device"); + // See NOTE [c10::optional operator usage in CUDA] + return *device_opt_; + } + + public: + Layout layout() const { + if (C10_UNLIKELY(layout_policy_)) { + return layout_custom(); + } + + // NB: This method is not virtual and avoid dispatches for perf. + // strided is also the most common layout type, so we check for + // strided case first. + // This keyset must also be kept in sync with the logic in + // is_sparse() / is_sparse_csr() / is_mkldnn() + constexpr auto sparse_and_sparsecsr_and_mkldnn_ks = + c10::sparse_ks | c10::sparse_csr_ks | c10::mkldnn_ks; + if (!key_set_.has_any(sparse_and_sparsecsr_and_mkldnn_ks)) { + return kStrided; + } else if (is_sparse()) { + return kSparse; + } else if (key_set_.has_any(c10::sparse_csr_ks)) { + // Typically, the tensor dispatch keys define the tensor layout + // uniquely. This allows using non-virtual layout method for + // better performance. However, when tensor's layout depends, + // say, on tensor attributes, one must use this execution path + // where the corresponding tensor impl class overwrites virtual + // layout_impl() method. + // + // TODO: implement layout() as native function/method so that + // __torch_dispatch__ users will be able to redefine the + // layout() method. + return layout_impl(); + } else { + TORCH_INTERNAL_ASSERT( + is_mkldnn(), "There is an error in the layout calculation logic."); + return kMkldnn; + } + } + + /** + * True if a tensor was auto-wrapped from a C++ or Python number. + * For example, when you write 't + 2', 2 is auto-wrapped into a Tensor + * with `is_wrapped_number_` set to true. + * + * Wrapped numbers do not participate in the result type computation for + * mixed-type operations if there are any Tensors that are not wrapped + * numbers. This is useful, because we want 't + 2' to work with + * any type of tensor, not just LongTensor (which is what integers + * in Python represent). + * + * Otherwise, they behave like their non-wrapped equivalents. + * See [Result type computation] in TensorIterator.h. + * + * Why did we opt for wrapped numbers, as opposed to just having + * an extra function add(Tensor, Scalar)? This helps greatly reduce + * the amount of code we have to write for add, when actually + * a Tensor-Scalar addition is really just a Tensor-Tensor + * addition when the RHS is 0-dim (except for promotion behavior.) + */ + bool is_wrapped_number() const { + return is_wrapped_number_; + } + + /** + * Set whether or not a tensor was auto-wrapped from a C++ or Python + * number. You probably don't want to call this, unless you are + * writing binding code. + */ + void set_wrapped_number(bool value) { + TORCH_INTERNAL_ASSERT(dim() == 0); + is_wrapped_number_ = value; + } + + /** + * Returns true if Tensor supports as_strided and as_strided_backward. + * This is used in autograd to perform inplace update on view Tensors. + * See Note [View + Inplace update for base tensor] and + * [View + Inplace update for view tensor] for details. + * Note this method only returns true for XLA backend, where it + * simulates strided Tensor to support most view ops, but it cannot + * fully support general `as_strided` case. + * It can be expanded as needed in the future, e.g sparse Tensor. + */ + inline bool support_as_strided() const { + return is_nested() ? false : device().supports_as_strided(); + } + + // ~~~~~ Autograd API ~~~~~ + // Some methods below are defined in TensorImpl.cpp because Tensor is an + // incomplete type. + + /** + * Set whether or not a tensor requires gradient. + */ + void set_requires_grad(bool requires_grad); + + /** + * True if a tensor requires gradient. Tensors which require gradient + * have history tracked for any operations performed on them, so that + * we can automatically differentiate back to them. A tensor that + * requires gradient and has no history is a "leaf" tensor, which we + * accumulate gradients into. + */ + bool requires_grad() const; + + /** + * Return a mutable reference to the gradient. This is conventionally + * used as `t.grad() = x` to set a gradient to a completely new tensor. + */ + at::Tensor& mutable_grad(); + + /** + * Return the accumulated gradient of a tensor. This gradient is written + * into when performing backwards, when this tensor is a leaf tensor. + */ + const at::Tensor& grad() const; + + /** + * Whether or not the imaginary part of the tensor should be negated + */ + inline bool is_conj() const { + constexpr auto conjugate_ks = DispatchKeySet(DispatchKey::Conjugate); + return key_set_.has_all(conjugate_ks); + } + + /** + * Set whether or not to take the conjugate of the tensor (flip the imaginary + * bit). + */ + void _set_conj(bool value) { + if (value) { + key_set_ = key_set_.add(DispatchKey::Conjugate); + TORCH_INTERNAL_ASSERT(isComplexType(typeMetaToScalarType(dtype()))); + } else { + key_set_ = key_set_.remove(DispatchKey::Conjugate); + } + } + + /** + * XXX: do not use, private api! + * Update the backend component related keys to the backend component + * corresponding to this device. + */ + void _change_backend_component_keys(c10::Device device); + + /** + * Whether or not the tensor is a zerotensor + */ + inline bool _is_zerotensor() const { + constexpr auto zerotensor_ks = DispatchKeySet(DispatchKey::ZeroTensor); + return key_set_.has_all(zerotensor_ks); + } + + /** + Set whether or not the tensor is a zero tensor + */ + void _set_zero(bool value) { + if (value) { + TORCH_INTERNAL_ASSERT( + false, + "Please call `torch._efficientzerotensor` if you want to create a tensor with no storage."); + } else { + key_set_ = key_set_.remove(DispatchKey::ZeroTensor); + } + } + + /** + * Whether or not the tensor should be negated + */ + inline bool is_neg() const { + constexpr auto negative_ks = DispatchKeySet(DispatchKey::Negative); + return key_set_.has_all(negative_ks); + } + + /** + * Set whether or not to take the conjugate of the tensor (flip the imaginary + * bit). + */ + void _set_neg(bool value) { + if (value) { + key_set_ = key_set_.add(DispatchKey::Negative); + } else { + key_set_ = key_set_.remove(DispatchKey::Negative); + } + } + + /** + * Return the accumulated gradient of a tensor. This gradient is computed + * using forward mode AD. + * + * This is an internal API that should never be used by end users. + * + * The API is as follows: + * - "level" allows to specify the level of forward AD nesting for which the + * gradient should be returned. Note that since levels are not fully + * supported yet, this argument should be 0. See documentation for + * torch::autograd::enter_dual_level for more details about forward AD + * nesting. + * - "self" should represent the Tensor whose forward grad is accessed. It + * is required when dealing with view. + */ + const at::Tensor& _fw_grad(uint64_t level, const at::TensorBase& self) const; + + /** + * Sets the forward gradient for this Tensor. + * The given Tensor might not be used directly and its content will be copied. + * + * This is an internal API that should never be used by end users. + * + * The API is as follows: + * - "new_grad" is a Tensor containing the new value of the gradient that + * should be set + * - "self" should represent the Tensor whose forward grad is accessed. It + * is required when dealing with view. + * - "level" allows to specify the level of forward AD nesting for which the + * gradient should be set. Note that since levels are not fully supported + * yet, this argument should be 0. See documentation for + * torch::autograd::enter_dual_level for more details about forward AD + * nesting. + * - "is_inplace_op" is a boolean flag that tells if this gradient was + * generated by an inplace operation or an out of place one. This allows + * better error checking. + */ + void _set_fw_grad( + const at::TensorBase& new_grad, + const at::TensorBase& self, + uint64_t level, + bool is_inplace_op); + + /** + * Return a typed data pointer to the actual data which this tensor refers to. + * This checks that the requested type (from the template parameter) matches + * the internal type of the tensor. + * + * It is invalid to call data() on a dtype-uninitialized tensor, even if + * the size is 0. + * + * WARNING: If a tensor is not contiguous, you MUST use strides when + * performing index calculations to determine the location of elements in + * the tensor. We recommend using 'TensorAccessor' to handle this computation + * for you; this class is available from 'Tensor'. + */ + template + inline T* data() const { + TORCH_CHECK( + data_type_.Match(), + "Tensor type mismatch, caller expects elements to be ", + caffe2::TypeMeta::TypeName(), + ", while tensor contains ", + data_type_.name(), + ". "); + return data_ptr_impl(); + } + + /** + * More efficient helper for Tensor::data_ptr(). Like data(), but + * does not do a type check. Unlike the untemplated data(), does + * check has_storage() and storage_initialized(). + */ + template + inline T* data_ptr_impl() const { + TORCH_CHECK( + has_storage(), + "Cannot access data pointer of Tensor that doesn't have storage"); + TORCH_CHECK( + storage_initialized(), + "The tensor has a non-zero number of elements, but its data is not allocated yet. " + "Caffe2 uses a lazy allocation, so you will need to call " + "mutable_data() or raw_mutable_data() to actually allocate memory."); + // Caller does the type check. + return storage_.unsafe_data() + storage_offset_; + } + + /** + * Return a void* data pointer to the actual data which this tensor refers to. + * + * It is invalid to call data() on a dtype-uninitialized tensor, even if the + * size is 0. + * + * WARNING: The data pointed to by this tensor may not contiguous; do NOT + * assume that itemsize() * numel() is sufficient to compute the bytes that + * can be validly read from this tensor. + */ + inline void* data() const { + TORCH_CHECK( + has_storage(), + "Cannot access data pointer of Tensor that doesn't have storage"); + TORCH_CHECK( + dtype_initialized(), + "Cannot access data pointer of Tensor that doesn't have initialized dtype " + "(e.g., caffe2::Tensor x(CPU), prior to calling mutable_data() on x)"); + // Computing an offset into an empty tensor would be UB, since an empty + // tensor's storage will be nullptr, and adding a nonzero offset to nullptr + // is UB. So we skip the offset computation in this case. + if (is_empty()) { + return nullptr; + } + return static_cast( + static_cast(storage_.data()) + + data_type_.itemsize() * storage_offset_); + } + + /** + * Like data(), but performs no checks. You are responsible for ensuring + * that all invariants required by data() are upheld here. + */ + template + inline T* unsafe_data() const { + return storage_.unsafe_data() + storage_offset_; + } + + /** + * Returns the TypeMeta of a tensor, which describes what data type + * it is (e.g., int, float, ...) + */ + const caffe2::TypeMeta dtype() const { + return data_type_; + } + + /** + * Return the size of a single element of this tensor in bytes. + */ + size_t itemsize() const { + TORCH_CHECK( + dtype_initialized(), + "Cannot report itemsize of Tensor that doesn't have initialized dtype " + "(e.g., caffe2::Tensor x(CPU), prior to calling mutable_data() on x)"); + return data_type_.itemsize(); + } + + protected: + /** + * Returns the human-readable name of the actual type of this object (e.g., + * TensorImpl, BatchedTensorImpl, etc.). Used for error messages. + */ + virtual const char* tensorimpl_type_name() const { + return "TensorImpl"; + } + + private: + [[noreturn]] void throw_storage_access_error() const; + + public: + /** + * True if a tensor has no elements (e.g., numel() == 0). + */ + inline bool is_empty() const { + return numel() == 0; + } + + // if we are going to use sym sizes, we should be setting sym strides at the + // same time, otherwise it's very easy to misuse this API + void set_sizes_and_strides( + c10::SymIntArrayRef sizes, + c10::SymIntArrayRef strides, + c10::optional storage_offset = c10::nullopt); + + /** + * Change the size at some dimension. This DOES NOT update strides; + * thus, most changes to size will not preserve contiguity. You probably + * also want to call set_stride() when you call this. + * + * TODO: This should be jettisoned in favor of `set_sizes_and_strides`, + * which is harder to misuse. + */ + virtual void set_size(int64_t dim, int64_t new_size) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_size ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !matches_policy(SizesStridesPolicy::CustomSizes), + "set_size() called on tensor with dynamic shapes or customized size behavior") + sizes_and_strides_.size_at(dim) = new_size; + refresh_numel(); + refresh_contiguous(); + } + + /** + * Change the stride at some dimension. + * + * TODO: This should be jettisoned in favor of `set_sizes_and_strides`, + * which is harder to misuse. + */ + virtual void set_stride(int64_t dim, int64_t new_stride) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_stride ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "set_stride() called on tensor with symbolic shape") + sizes_and_strides_.stride_at_unchecked(dim) = new_stride; + refresh_contiguous(); + } + + /** + * Set the offset into the storage of this tensor. + * + * WARNING: This does NOT check if the tensor is in bounds for the new + * location at the storage; the caller is responsible for checking this + * (and resizing if necessary.) + */ + virtual void set_storage_offset(int64_t storage_offset) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_storage_offset ", + err_msg_tensor_metadata_change_not_allowed); + // TODO: this should probably consult policy + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "set_storage_offset() called on tensor with symbolic shape") + storage_offset_ = storage_offset; + } + + /** + * Like set_sizes_and_strides but assumes contiguous strides. + * + * WARNING: This function does not check if the requested + * sizes/strides are in bounds for the storage that is allocated; + * this is the responsibility of the caller + */ + void set_sizes_contiguous(IntArrayRef new_size) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_sizes_contiguous ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !matches_policy(SizesStridesPolicy::CustomStrides), + "tried to directly modify sizes for customized tensor"); + sizes_and_strides_.set_sizes(new_size); + + refresh_numel(); + empty_tensor_restride( + MemoryFormat::Contiguous); // calls refresh_contiguous() + } + + /** + * Set the sizes and strides of a tensor. + * + * WARNING: This function does not check if the requested + * sizes/strides are in bounds for the storage that is allocated; + * this is the responsibility of the caller + */ + void set_sizes_and_strides( + IntArrayRef new_size, + IntArrayRef new_stride, + c10::optional storage_offset = c10::nullopt) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_sizes_and_strides ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "set_sizes_and_strides() called on tensor with symbolic shape") + TORCH_CHECK( + new_size.size() == new_stride.size(), + "dimensionality of sizes (", + new_size.size(), + ") must match dimensionality of strides (", + new_stride.size(), + ")"); + const auto new_dim = new_size.size(); + + sizes_and_strides_.set_sizes(new_size); + + if (new_dim > 0) { + for (size_t dim = new_dim - 1;; dim--) { + if (new_stride[dim] >= 0) { + sizes_and_strides_.stride_at_unchecked(dim) = new_stride[dim]; + } else { + // XXX: This behavior is surprising and may need to be removed to + // support negative strides. Some pytorch functions rely on it: + // for example, torch.cat (run TestTorch.test_cat_empty). + if (dim == new_dim - 1) { + sizes_and_strides_.stride_at_unchecked(dim) = 1; + } else { + // Keep stride monotonically increasing to match NumPy. + sizes_and_strides_.stride_at_unchecked(dim) = + std::max( + sizes_and_strides_.size_at_unchecked(dim + 1), 1) * + sizes_and_strides_.stride_at_unchecked(dim + 1); + } + } + if (dim == 0) + break; + } + } + + refresh_numel(); + refresh_contiguous(); + + if (storage_offset.has_value()) { + storage_offset_ = *storage_offset; + } + } + + /** + * Set whether a tensor allows changes to its metadata (e.g. sizes / strides / + * storage / storage_offset). See NOTE [ Metadata Change for a Detached Tensor + * ] for details. + */ + void set_allow_tensor_metadata_change(bool value) { + // TODO: at some point, we should kill this field completely. + allow_tensor_metadata_change_ = true; + } + + /** + * True if a tensor allows changes to its metadata (e.g. sizes / strides / + * storage / storage_offset). See NOTE [ Metadata Change for a Detached Tensor + * ] for details. + */ + bool allow_tensor_metadata_change() const { + return allow_tensor_metadata_change_; + } + + /** + * Set the pointer to autograd metadata. + */ + void set_autograd_meta( + std::unique_ptr autograd_meta); + + /** + * Return the pointer to autograd metadata. May return nullptr if the + * tensor does not track gradients. + */ + c10::AutogradMetaInterface* autograd_meta() const; + + /** + * Set the pointer to named tensor metadata. + */ + void set_named_tensor_meta( + std::unique_ptr named_tensor_meta) { + TORCH_WARN_ONCE( + "Named tensors and all their associated APIs are an experimental feature ", + "and subject to change. Please do not use them for anything important ", + "until they are released as stable."); +#ifdef DEBUG + if (named_tensor_meta) { + TORCH_INTERNAL_ASSERT(named_tensor_meta->slow_dim() == dim()); + } +#endif + if (named_tensor_meta) { + if (!extra_meta_) { + extra_meta_ = std::make_unique(); + } + extra_meta_->named_tensor_meta_ = std::move(named_tensor_meta); + key_set_ = key_set_.add(DispatchKey::Named); + } else { + if (extra_meta_) { + extra_meta_->named_tensor_meta_ = nullptr; + } + key_set_ = key_set_.remove(DispatchKey::Named); + } + } + + void set_python_dispatch(bool k) { + if (k) { + key_set_ = key_set_.add(c10::python_ks); + } else { + key_set_ = key_set_ - c10::python_ks; + } + } + + bool is_python_dispatch() const { + return key_set_.has_all(c10::python_ks); + } + + /** + * Return the pointer to named tensor metadata. + */ + const c10::NamedTensorMetaInterface* named_tensor_meta() const { + if (!extra_meta_) { + return nullptr; + } + return extra_meta_->named_tensor_meta_.get(); + } + + c10::NamedTensorMetaInterface* named_tensor_meta() { + if (!extra_meta_) { + return nullptr; + } + return extra_meta_->named_tensor_meta_.get(); + } + + bool has_named_tensor_meta() const { + if (!extra_meta_) { + return false; + } + return extra_meta_->named_tensor_meta_ != nullptr; + } + + // NOTE [ TensorImpl Shallow-Copying ] + // + // TensorImpl shallow-copying is used when we want to have two Variables share + // the same tensor metadata (e.g. sizes / strides / storage pointer / + // storage_offset), but each with a different autograd history. Example call + // sites: + // + // 1. `var_detached = var.detach()` uses `shallow_copy_and_detach()` to create + // `var_detached` that shares the same tensor metadata with `var`, but with a + // completely new autograd history. + // 2. `var.set_data(tensor)` uses `shallow_copy_from()` to copy tensor + // metadata from `tensor` into `var`, while keeping `var`'s original + // AutogradMeta. + // + // Functions that shallow-copy a TensorImpl (such as + // `shallow_copy_and_detach()` / `shallow_copy_from()` / + // `copy_tensor_metadata()`) copy the tensor metadata fields (e.g. sizes / + // strides / storage pointer / storage_offset) by value. However, the + // following fields are not copied: + // + // 1. the AutogradMeta pointer, because it is unique for each Variable. + // 2. the version counter, because the destination TensorImpl's version + // counter is either set to the passed-in `version_counter` (in + // `shallow_copy_and_detach()` and `copy_tensor_metadata()`), or it is kept + // intact (in `shallow_copy_from()`). See NOTE [ Version Counter Sharing ] for + // details. + // + // In `shallow_copy_and_detach()` and `copy_tensor_metadata()`, the passed-in + // `allow_tensor_metadata_change` determines whether the TensorImpl + // shallow-copy allows changes to its metadata (e.g. sizes / strides / storage + // / storage_offset). See NOTE [ Metadata Change for a Detached Tensor ] for + // details. + // + // In `shallow_copy_from()`, we don't check the destination TensorImpl's + // `allow_tensor_metadata_change_`, because `shallow_copy_from()` is used for + // implementing functions such as `var.set_data(tensor)`, which changes + // `var`'s tensor metadata and expects its `allow_tensor_metadata_change_` to + // be ignored. + + /** + * One TensorImpl can be copied to another TensorImpl if they have the same + * DispatchKeySet. The only two special cases (for legacy reason) are: + * CPU is compatible with CUDA and SparseCPU is + * compatible with SparseCUDA. + */ + inline bool has_compatible_shallow_copy_type(DispatchKeySet from) { + auto is_dense = [](DispatchKeySet ts) { + constexpr auto dense_backends = DispatchKeySet( + {BackendComponent::CPUBit, + BackendComponent::CUDABit, + BackendComponent::MPSBit, + BackendComponent::HIPBit, + BackendComponent::XPUBit}); + constexpr auto dense_k = DispatchKeySet(DispatchKey::Dense); + return ts.has_any(dense_k) && ts.has_any(dense_backends); + }; + auto is_sparse = [](DispatchKeySet ts) { + constexpr auto sparse_backends = DispatchKeySet( + {BackendComponent::CPUBit, + BackendComponent::CUDABit, + BackendComponent::HIPBit, + BackendComponent::XPUBit}); + constexpr auto sparse_k = DispatchKeySet(DispatchKey::Sparse); + return ts.has_any(sparse_k) && ts.has_any(sparse_backends); + }; + return (key_set_ == from) || (is_dense(key_set_) && is_dense(from)) || + (is_sparse(key_set_) && is_sparse(from)); + } + + private: + template + c10::intrusive_ptr shallow_copy_and_detach_core( + VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const; + + public: + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + virtual c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const; + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + virtual c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const; + + /** + * Shallow-copies data from another TensorImpl into this TensorImpl. + * + * For why this function doesn't check this TensorImpl's + * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ]. + */ + virtual void shallow_copy_from(const c10::intrusive_ptr& impl) { + copy_tensor_metadata( + /*src_impl=*/impl.get(), + /*dest_impl=*/this, + /*version_counter=*/version_counter(), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); + refresh_numel(); + refresh_contiguous(); + } + + // Inference tensor doesn't have version counter, + // set_version_counter is no-op for them. + void set_version_counter(const c10::VariableVersion& version_counter) { + TORCH_CHECK( + !(is_inference() && version_counter.enabled()), + "Cannot set version_counter for inference tensor"); + version_counter_ = version_counter; + } + + void set_version_counter(c10::VariableVersion&& version_counter) { + TORCH_CHECK( + !(is_inference() && version_counter.enabled()), + "Cannot set version_counter for inference tensor"); + version_counter_ = std::move(version_counter); + } + + const c10::VariableVersion& version_counter() const noexcept { + return version_counter_; + } + + void bump_version() { + version_counter_.bump(); + } + + // Associate the TensorImpl with the specified PyObject, and, if necessary, + // also tag the interpreter. + // + // NB: This lives in a header so that we can inline away the switch on status + // + // NB: THIS FUNCTION CAN RAISE AN EXCEPTION. Make sure to clean up after + // PyObject if necessary! + void init_pyobj( + impl::PyInterpreter* self_interpreter, + PyObject* pyobj, + c10::impl::PyInterpreterStatus status) { + impl::PyInterpreter* expected = nullptr; + switch (status) { + case impl::PyInterpreterStatus::DEFINITELY_UNINITIALIZED: + // caller guarantees there is no multithreaded access; if there is + // no data race OK to do a relaxed store + pyobj_interpreter_.store(self_interpreter, std::memory_order_relaxed); + break; + case impl::PyInterpreterStatus::TAGGED_BY_US: + // no tagging is necessary, the tag is already correct + break; + case impl::PyInterpreterStatus::MAYBE_UNINITIALIZED: + // attempt to claim this TensorImpl with the specified interpreter + // tag + if (pyobj_interpreter_.compare_exchange_strong( + expected, self_interpreter, std::memory_order_acq_rel)) { + break; + } + // test if, actually, it was already tagged by us! this situation can't + // be caused by a race, but it could be caused by a situation + // where someone conservatively tagged the tensor as MAYBE_UNINITIALIZED + // (because they didn't pre-check the tag) when actually it was + // owned by the interpreter + if (expected == self_interpreter) { + break; + } + // fallthrough, we lost the race. We are guaranteed not to lose the + // race with ourself, as calls to init_pyobj with the same interpreter + // ID must be sequentialized by the GIL + C10_FALLTHROUGH; + case impl::PyInterpreterStatus::TAGGED_BY_OTHER: + TORCH_CHECK( + false, + "cannot allocate PyObject for Tensor on interpreter ", + self_interpreter, + " that has already been used by another torch deploy interpreter ", + pyobj_interpreter_.load()); + } + + // we are the ONLY thread that can have gotten to this point. It is not + // possible to conflict with another zero interpreter as access is protected + // by GIL + // NB: owns_pyobj tag is initially false + pyobj_ = pyobj; + } + + // Query the PyObject interpreter. This may return null if there is no + // interpreter. This is racy! + impl::PyInterpreter* pyobj_interpreter() { + return pyobj_interpreter_.load(std::memory_order_acquire); + } + + PyObject* _unchecked_untagged_pyobj() const { + return reinterpret_cast( + reinterpret_cast(pyobj_) & ~0x1ULL); + } + + // Test the interpreter tag. If tagged for the current interpreter, return + // a non-nullopt (but possibly null) PyObject. If (possibly) untagged, + // returns a nullopt. If it is definitely invalid, raises an error. + // + // NB: this lives in header so that we can avoid actually creating the + // c10::optional + c10::optional check_pyobj( + impl::PyInterpreter* self_interpreter) const { + // Note [Memory ordering on Python interpreter tag] + impl::PyInterpreter* interpreter = + pyobj_interpreter_.load(std::memory_order_acquire); + if (interpreter == nullptr) { + // NB: This never returns DEFINITELY_UNINITIALIZED because there is + // always the possibility that another thread races to initialize + // after we query here. The only time when we can conclude a tensor + // is definitely uninitialized is when we have just allocated it and + // it cannot have escaped to other threads yet + return c10::nullopt; + } else if (interpreter == self_interpreter) { + // NB: pyobj_ could still be null! + return c10::make_optional(_unchecked_untagged_pyobj()); + } else { + TORCH_CHECK( + false, + "cannot access PyObject for Tensor on interpreter ", + (*self_interpreter)->name(), + " that has already been used by another torch deploy interpreter ", + (*pyobj_interpreter_.load())->name()); + } + } + + // Clear the PyObject field for an interpreter, in situations where we + // statically know the tensor is tagged with our interpreter. + void unchecked_clear_pyobj(impl::PyInterpreter* interpreter) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(interpreter == pyobj_interpreter_.load()); + pyobj_ = nullptr; + } + + private: + // See NOTE [c10::optional operator usage in CUDA] + // We probably don't want to expose this publicly until + // the note is addressed. + c10::optional device_opt() const { + return device_opt_; + } + + impl::PyInterpreter& load_pyobj_interpreter() const; + + public: + /** + * The device type of a Tensor, e.g., DeviceType::CPU or DeviceType::CUDA. + */ + DeviceType device_type() const { + // TODO: A useful internal assert would be to show that device_opt_ is null + // only if you are an undefined tensor + TORCH_CHECK( + device_opt_.has_value(), + "device_type cannot be run on undefined Tensor"); + // See NOTE [c10::optional operator usage in CUDA] + return (*device_opt_).type(); + } + + /** + * @brief Extends the outer-most dimension of this tensor by num elements, + * preserving the existing data. + * + * The underlying data may be reallocated in order to accommodate the new + * elements, in which case this tensors' capacity is grown at a factor of + * growthPct. This ensures that Extend runs on an amortized O(1) time + * complexity. + * + * This op is auto-asynchronous if the underlying device (CUDA) supports it. + */ + void Extend(int64_t num, float growthPct); + + /** + * @brief Reserve space for the underlying tensor. + * + * This must be called after Resize(), since we only specify the first + * dimension This does not copy over the old data to the newly allocated space + */ + void ReserveSpace(int64_t outer_dim); + + /** + * @brief Resizes a tensor. + * + * Resize takes in a vector of ints specifying the dimensions of the tensor. + * You can pass in an empty vector to specify that it is a scalar (i.e. + * containing one single item). + * + * The underlying storage may be deleted after calling Resize: if the new + * shape leads to a different number of items in the tensor, the old memory + * is deleted and new memory will be allocated next time you call + * mutable_data(). However, if the shape is different but the total number of + * items is the same, the underlying storage is kept. + * + * This method respects caffe2_keep_on_shrink. Consult the internal logic + * of this method to see exactly under what circumstances this flag matters. + */ + template + void Resize(Ts... dim_source) { + bool size_changed = SetDims(dim_source...); + if (size_changed) { + HandleResize(); + } + } + + template + void Resize(const std::vector& dim_source) { + Resize(ArrayRef(dim_source)); + } + + /** + * Resizes the tensor without touching underlying storage. + * This requires the total size of the tensor to remains constant. + */ + void Reshape(const std::vector& dims); + + /** + * Release whatever memory the tensor was holding but keep size and type + * information. Subsequent call to mutable_data will trigger new memory + * allocation. + */ + void FreeMemory(); + + /** + * @brief Shares the data with another tensor. + * + * To share data between two tensors, the sizes of the two tensors must be + * equal already. The reason we do not implicitly do a Resize to make the two + * tensors have the same shape is that we want to allow tensors of different + * shapes but the same number of items to still be able to share data. This + * allows one to e.g. have a n-dimensional Tensor and a flattened version + * sharing the same underlying storage. + * + * The source tensor should already have its data allocated. + */ + // To be deprecated + void ShareData(const TensorImpl& src); + + void ShareExternalPointer( + DataPtr&& data_ptr, + const caffe2::TypeMeta data_type, + size_t size_bytes); + + /** + * Returns a mutable raw pointer of the underlying storage. Since we will need + * to know the type of the data for allocation, a TypeMeta object is passed in + * to specify the necessary information. This is conceptually equivalent of + * calling mutable_data() where the TypeMeta parameter meta is derived from + * the type T. This function differs from mutable_data() in the sense that + * the type T can be specified during runtime via the TypeMeta object. + * + * If the existing data does not match the desired type, it will be deleted + * and a new storage will be created. + */ + inline void* raw_mutable_data(const caffe2::TypeMeta meta) { + // For 0-size tensors it's fine to return any pointer (including nullptr) + if (data_type_ == meta && storage_initialized()) { + return static_cast( + static_cast(storage_.data()) + + storage_offset_ * meta.itemsize()); + } else { + bool had_special_dtor = data_type_.placementDelete() != nullptr; + storage_offset_ = 0; + data_type_ = meta; + // NB: device is not changed + + // We can reuse the existing buffer if the current data does not have + // a special destructor and the new data doesn't have a special + // constructor. + if (numel_ == 0 || + (meta.placementNew() == nullptr && !had_special_dtor && + (storage_.nbytes() >= (numel_ * data_type_.itemsize())))) { + TORCH_INTERNAL_ASSERT( + storage_offset_ == 0); // because we just reallocated + return storage_.data(); + } + const Allocator* allocator = storage_.allocator(); + // Storage might have nullptr allocator in rare cases, for example, if + // an external memory segment has been wrapped with Tensor and we don't + // know how to reallocate it. However, in order to preserve legacy C2 + // behavior, we allow reallocating the memory using default allocator. + if (allocator == nullptr) { + allocator = GetAllocator(storage_.device_type()); + } + if (meta.placementNew()) { + // For types that need placement new, we will call it, as well as + // making sure that when the data is freed, it calls the right + // destruction procedure. + auto size = numel_; + auto dtor = data_type_.placementDelete(); + auto data_ptr = allocator->allocate(numel_ * data_type_.itemsize()); + storage_.set_data_ptr_noswap(PlacementDeleteContext::makeDataPtr( + std::move(data_ptr), dtor, size, storage_.device())); + data_type_.placementNew()(storage_.data(), numel_); + } else { + // For fundamental type, new and delete is easier. + storage_.set_data_ptr_noswap( + allocator->allocate(numel_ * data_type_.itemsize())); + } + storage_.set_nbytes(numel_ * data_type_.itemsize()); + TORCH_INTERNAL_ASSERT( + storage_offset_ == 0); // because we just reallocated + device_opt_ = storage_.device(); + return storage_.data(); + } + } + + /** + * Returns a typed pointer of the underlying storage. + * + * For fundamental types, we reuse possible existing storage if there + * is sufficient capacity. + */ + template + inline T* mutable_data() { + if (storage_initialized() && data_type_.Match()) { + return static_cast(storage_.data()) + storage_offset_; + } + // Check it here statically - otherwise TypeMeta would throw the runtime + // error in attempt to invoke TypeMeta::ctor() + static_assert( + std::is_default_constructible::value, + "Tensor can't hold non-default-constructable types"); + return static_cast(raw_mutable_data(caffe2::TypeMeta::Make())); + } + + /** + * True if a tensor is storage initialized. A tensor may become + * storage UNINITIALIZED after a Resize() or FreeMemory() + */ + bool storage_initialized() const { + TORCH_CHECK( + has_storage(), + "cannot call storage_initialized on tensor that does not have storage"); + return storage_.data() || numel_ == 0; + } + + /** + * True if a tensor is dtype initialized. A tensor allocated with + * Caffe2-style constructors is dtype uninitialized until the + * first time mutable_data() is called. + */ + bool dtype_initialized() const noexcept { + return data_type_ != caffe2::TypeMeta(); + } + + void set_storage_keep_dtype(at::Storage storage) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_storage ", + err_msg_tensor_metadata_change_not_allowed); + storage_ = std::move(storage); + device_opt_ = storage_.device(); + } + + void set_storage_and_dtype( + at::Storage storage, + const caffe2::TypeMeta data_type) { + set_storage_keep_dtype(storage); + data_type_ = data_type; + } + + /** + * Set the strides of the tensor to match memory_format + * + * WARNING: This function doesn't rearrange data and assumes tensor is a + * memory contiguous + */ + void empty_tensor_restride(MemoryFormat memory_format) { + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "empty_tensor_restride() called on tensor with symbolic shape") +#ifdef DEBUG + TORCH_INTERNAL_ASSERT( + compute_numel() == numel_, + "If you are seeing this error, that means empty_tensor_restride was " + "called before setting correct numel"); +#endif + switch (memory_format) { + case MemoryFormat::Contiguous: { + // dim_ is a virtual call, don't repeat it + const auto dim_ = dim(); + sizes_and_strides_.resize(dim_); + if (dim_ > 0) { + const auto last_idx = dim_ - 1; + sizes_and_strides_.stride_at_unchecked(last_idx) = 1; + for (auto i = last_idx - 1; i >= 0; --i) { + sizes_and_strides_.stride_at_unchecked(i) = + sizes_and_strides_.stride_at_unchecked(i + 1) * + std::max( + sizes_and_strides_.size_at_unchecked(i + 1), 1); + } + } + break; + } + case MemoryFormat::ChannelsLast: { + TORCH_CHECK( + dim() == 4, "required rank 4 tensor to use channels_last format"); + set_sizes_and_strides(sizes(), get_channels_last_strides_2d(sizes())); + break; + } + case MemoryFormat::ChannelsLast3d: { + TORCH_CHECK( + dim() == 5, + "required rank 5 tensor to use channels_last_3d format"); + set_sizes_and_strides(sizes(), get_channels_last_strides_3d(sizes())); + break; + } + case MemoryFormat::Preserve: + TORCH_CHECK(false, "unsupported memory format ", memory_format); + // Cleaning warning messages, no need to break as TORCH_CHECK(false) + // terminates flow. + // break; + case MemoryFormat::NumOptions: + TORCH_INTERNAL_ASSERT(false, "invalid memory format ", memory_format); + } + // recompute contiguous flag, as currently NHWC/NCHW flags are not mutually + // exclusive see #24090 + refresh_contiguous(); + } + + bool is_strides_like(at::MemoryFormat memory_format) const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) { + return is_strides_like_custom(memory_format); + } + return is_strides_like_default(memory_format); + } + + bool is_strides_like_channels_last() const { + return is_strides_like(at::MemoryFormat::ChannelsLast); + } + + bool is_strides_like_channels_last_3d() const { + return is_strides_like(at::MemoryFormat::ChannelsLast3d); + } + + bool is_non_overlapping_and_dense() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) { + return is_non_overlapping_and_dense_custom(); + } + return is_non_overlapping_and_dense_default(); + } + + bool has_symbolic_sizes_strides() const { + return has_symbolic_sizes_strides_; + } + + private: + void HandleResize(); + + // The Caffe2 Resize() method supports being called both as Resize({2,2}) as + // well as variadic with Resize(2, 2). These overloads provide all of the + // supported calling configurations, while being overloads (and not templates) + // so that implicit conversions still work. + // + // SetDims on ArrayRef is internally implemented as a template, so we can + // handle both ArrayRefs of different types (there are some uses of + // Resize in Caffe2 which pass in int, not int64_t.) + + template < + typename T, + typename = typename std::enable_if::value>::type> + bool SetDimsTemplate(ArrayRef src) { + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "SetDims() called on tensor with symbolic shape") + + auto old_numel = numel_; + sizes_and_strides_.resize(src.size()); + int64_t new_numel = 1; + for (const auto i : c10::irange(src.size())) { + new_numel *= src[i]; + sizes_and_strides_.size_at_unchecked(i) = src[i]; + } + numel_ = new_numel; + empty_tensor_restride(MemoryFormat::Contiguous); + return numel_ != old_numel; + } + + bool SetDims(ArrayRef s) { + return SetDimsTemplate(s); + } + + bool SetDims(ArrayRef s) { + return SetDimsTemplate(s); + } + + bool SetDims(ArrayRef s) { + return SetDimsTemplate(s); + } + + bool SetDims() { + return SetDims(IntArrayRef{}); + } + + bool SetDims(const int64_t d0) { + return SetDims(IntArrayRef{d0}); + } + + bool SetDims(const int64_t d0, const int64_t d1) { + return SetDims(IntArrayRef{d0, d1}); + } + + bool SetDims(const int64_t d0, const int64_t d1, const int64_t d2) { + return SetDims(IntArrayRef{d0, d1, d2}); + } + + bool SetDims( + const int64_t d0, + const int64_t d1, + const int64_t d2, + const int64_t d3) { + return SetDims(IntArrayRef{d0, d1, d2, d3}); + } + + /** + * Compute the number of elements based on the sizes of a tensor. + */ + // NB: This is ONLY called when sizes_and_strides_ is used directly; if + // we are virtualizing, then numel calls are virtualized as well, and this + // should never get called + int64_t compute_numel() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!has_symbolic_sizes_strides_); +#if C10_HAS_BUILTIN_OVERFLOW() && !defined(C10_MOBILE) + // Use overflow checks if supported by the compiler + return safe_compute_numel(); +#else + return c10::multiply_integers(sizes_and_strides_.sizes_arrayref()); +#endif + } + + /** + * Compute the number of elements based on the sizes of a + * tensor. Catches integer overflow that may occur when a tensor + * using a sparse layout has multiple dimensions with large sizes. + */ + int64_t safe_compute_numel() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!has_symbolic_sizes_strides_); + uint64_t n = 1; + bool overflows = + c10::safe_multiplies_u64(sizes_and_strides_.sizes_arrayref(), &n); + constexpr auto numel_max = std::min( + static_cast(std::numeric_limits::max()), + static_cast(std::numeric_limits::max())); + + overflows |= (n > numel_max); + TORCH_CHECK(!overflows, "numel: integer multiplication overflow"); + return static_cast(n); + } + + SymInt compute_sym_numel() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(has_symbolic_sizes_strides_); + SymInt numel = 1; + for (const auto& s : extra_meta_->sizes_) { + numel *= s; + } + return numel; + } + + /** + * Compute whether or not a tensor is contiguous based on the sizes and + * strides of a tensor. + */ + bool_is_contiguous compute_contiguous() const; + + bool_is_channels_last_contiguous compute_channels_last_contiguous_2d() const; + + bool_is_channels_last_3d_contiguous compute_channels_last_contiguous_3d() + const; + + bool_is_channels_last compute_strides_like_channels_last_2d() const; + + bool_is_channels_last_3d compute_strides_like_channels_last_3d() const; + + bool_is_non_overlapping_and_dense compute_non_overlapping_and_dense() const; + + protected: + /** + * Recompute the cached numel of a tensor. Call this if you modify + * sizes. + * + * For tensors with sparse layouts, use safe_refresh_numel() instead + * because it will catch integer overflow that may occur for tensors + * with sparse layouts and large dimensions. + * + * NB: We may uselessly recompute cached numel even in situations where + * it is completely never used (e.g., if CustomSizes for Python). However, + * we still must keep it up to date in case the Python overload + * returns None (in which case we will consult the field here). This also + * implies that sizes/strides will never be complete garbage; in the + * very worst case scenario, it will reflect a 1-dim zero size tensor. + */ + void refresh_numel() { + if (has_symbolic_sizes_strides_) { + extra_meta_->numel_ = compute_sym_numel(); + } else { + numel_ = compute_numel(); + } + } + + /** + * Recompute the cached numel of a tensor. Call this if you modify + * sizes. Use only for tensors with sparse layouts because only + * sparse tensor are likely to have sizes that may lead to integer + * overflow when computing numel. + */ + void safe_refresh_numel() { + if (has_symbolic_sizes_strides_) { + // NB: sym numel is done with symbolic integers, which handle overflow + // checking + extra_meta_->numel_ = compute_sym_numel(); + } else { + numel_ = safe_compute_numel(); + } + } + + /** + * Recompute the cached contiguity of a tensor. Call this if you modify sizes + * or strides. + */ + void refresh_contiguous() { + auto set_fields = + [&](bool_is_contiguous is_contiguous, + bool_is_channels_last_contiguous is_channels_last_contiguous, + bool_is_channels_last_3d_contiguous is_channels_last_3d_contiguous, + bool_is_channels_last is_channels_last, + bool_is_channels_last_3d is_channels_last_3d, + bool_is_non_overlapping_and_dense is_non_overlapping_and_dense) { + if (has_symbolic_sizes_strides_) { + extra_meta_->is_contiguous_ = is_contiguous; + extra_meta_->is_channels_last_contiguous_ = + is_channels_last_contiguous; + extra_meta_->is_channels_last_3d_contiguous_ = + is_channels_last_3d_contiguous; + extra_meta_->is_channels_last_ = is_channels_last; + extra_meta_->is_channels_last_3d_ = is_channels_last_3d; + extra_meta_->is_non_overlapping_and_dense_ = + is_non_overlapping_and_dense; + } else { + is_contiguous_ = bool(is_contiguous); + is_channels_last_contiguous_ = bool(is_channels_last_contiguous); + is_channels_last_3d_contiguous_ = + bool(is_channels_last_3d_contiguous); + is_channels_last_ = bool(is_channels_last); + is_channels_last_3d_ = bool(is_channels_last_3d); + is_non_overlapping_and_dense_ = bool(is_non_overlapping_and_dense); + } + }; + + auto is_contiguous = compute_contiguous(); + // Note: + // Dim 0, 1, 2 will never be a channels last 2d/3d format + // Dim 3+ is possibly be a channels last 2d format (Dim 4 only at this + // point) Dim 4+ is possibly be a channels last 3d format (Dim 5 only at + // this point) + switch (dim()) { + case 4: { + auto is_channels_last_contiguous = + compute_channels_last_contiguous_2d(); + set_fields( + is_contiguous, + is_channels_last_contiguous, + bool_is_channels_last_3d_contiguous(false), + compute_strides_like_channels_last_2d(), + bool_is_channels_last_3d(false), + bool_is_non_overlapping_and_dense( + is_contiguous || is_channels_last_contiguous || + compute_non_overlapping_and_dense())); + break; + } + case 5: { + auto is_channels_last_contiguous = + compute_channels_last_contiguous_2d(); + auto is_channels_last_3d_contiguous = + bool_is_channels_last_3d_contiguous( + !is_channels_last_contiguous && + compute_channels_last_contiguous_3d()); + auto is_channels_last = bool_is_channels_last( + !is_channels_last_3d_contiguous && + compute_strides_like_channels_last_2d()); + auto is_channels_last_3d = bool_is_channels_last_3d( + !is_channels_last && compute_strides_like_channels_last_3d()); + auto is_non_overlapping_and_dense = bool_is_non_overlapping_and_dense( + is_contiguous || is_channels_last_contiguous || + is_channels_last_3d_contiguous || + compute_non_overlapping_and_dense()); + set_fields( + is_contiguous, + is_channels_last_contiguous, + is_channels_last_3d_contiguous, + is_channels_last, + is_channels_last_3d, + is_non_overlapping_and_dense); + break; + } + default: + // is_channels_last_ and is_channels_last_3d_ are suggested + // memory_format. Being channels_last_contiguous doesn't necessarily + // mean the tensor is strided like channels_last: for strides on channel + // dimension could suggest desired memory_layout, but it doesn't affect + // memory storage + set_fields( + is_contiguous, + bool_is_channels_last_contiguous(false), + bool_is_channels_last_3d_contiguous(false), + bool_is_channels_last(false), + bool_is_channels_last_3d(false), + bool_is_non_overlapping_and_dense( + is_contiguous || compute_non_overlapping_and_dense())); + } + } + + /** + * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / + * storage_offset) from one TensorImpl to another TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE + * [ TensorImpl Shallow-Copying ]. + */ + static void copy_tensor_metadata( + const TensorImpl* src_impl, + TensorImpl* dest_impl, + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change); + + /** + * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / + * storage_offset) from one TensorImpl to another TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE + * [ TensorImpl Shallow-Copying ]. + */ + static void copy_tensor_metadata( + const TensorImpl* src_impl, + TensorImpl* dest_impl, + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change); + + private: + static void copy_tensor_metadata_except_version_counter( + const TensorImpl* src_impl, + TensorImpl* dest_impl, + bool allow_tensor_metadata_change); + + protected: + // Error message to show when the user tries to change tensor metadata on + // Tensor created from .data or .detach(). + // + // See NOTE [ Metadata Change for a Detached Tensor ] for details. + static const char* const err_msg_tensor_metadata_change_not_allowed; + + static void copy_generic_tensor_metadata( + const TensorImpl* src_impl, + TensorImpl* dest_impl); + + public: + void set_storage_access_should_throw() { + storage_access_should_throw_ = true; + } + + bool owns_pyobj() { + return reinterpret_cast(pyobj_) & 1; + } + + void set_owns_pyobj(bool b) { + pyobj_ = reinterpret_cast( + reinterpret_cast(_unchecked_untagged_pyobj()) | b); + } + + public: + void set_custom_sizes_strides(SizesStridesPolicy policy) { + custom_sizes_strides_ = static_cast(policy); + refresh_sizes_strides_policy(); + } + + void set_python_custom_sizes_strides(SizesStridesPolicy policy) { + python_custom_sizes_strides_ = static_cast(policy); + refresh_sizes_strides_policy(); + } + + void set_custom_device(bool custom_device) { + custom_device_ = custom_device; + refresh_device_policy(); + } + + void set_custom_layout(bool custom_layout) { + custom_layout_ = custom_layout; + refresh_layout_policy(); + } + + void set_python_custom_device(bool custom_device) { + python_custom_device_ = custom_device; + refresh_device_policy(); + } + + void set_python_custom_layout(bool custom_layout) { + python_custom_layout_ = custom_layout; + refresh_layout_policy(); + } + + protected: + void refresh_sizes_strides_policy() { + if (has_symbolic_sizes_strides_) { + sizes_strides_policy_ = + static_cast(SizesStridesPolicy::CustomSizes); + } else { + sizes_strides_policy_ = + std::max(custom_sizes_strides_, python_custom_sizes_strides_); + } + } + + void refresh_device_policy() { + device_policy_ = custom_device_ || python_custom_device_; + } + + void refresh_layout_policy() { + layout_policy_ = custom_layout_ || python_custom_layout_; + } + + protected: + Storage storage_; + + private: + // This pointer points to an AutogradMeta struct that stores autograd-specific + // fields (such as grad_ / grad_fn_ / grad_accumulator_). This pointer always + // has unique ownership (meaning only one TensorImpl can own it at a time). + // + // autograd_meta_ can be nullptr, as an optimization. When this occurs, it is + // equivalent to having an autograd_meta_ pointing to a default constructed + // AutogradMeta; intuitively, tensors which don't require grad will have this + // field set to null. + // + // This means accessors on autograd_meta_ have to be careful to test if they + // got a nullptr, and handle default behavior appropriately in that case. + // + // Note that we don't enforce the invariant that if the AutogradMeta is + // default constructed, it is nullptr (to do this, we'd have to continuously + // check if an AutogradMeta became, by mutation, equal to the default + // constructed form. (This might be useful, but it seems rare enough that + // a requires_grad=True variable will turn back into the requires_grad=False + // version.) So there are three representable states: + // + // 1. autograd_meta_ == nullptr + // 2. autograd_meta_ is default constructed (semantically, same as (1)) + // 3. autograd_meta_ has nontrivial information content + // + std::unique_ptr autograd_meta_ = nullptr; + + protected: + std::unique_ptr extra_meta_ = nullptr; + + c10::VariableVersion version_counter_; + + // This field contains the interpreter tag for this object. See + // Note [Python interpreter tag] for general context + // + // Note [Memory ordering on Python interpreter tag] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // What memory_order do we need when accessing this atomic? We don't + // need a single total modification order (as provided by + // memory_order_seq_cst) as pyobj_interpreter_ is monotonic: it can only + // transition from -1 to some positive integer and never changes afterwards. + // Because there is only one modification, it trivially already has a total + // modification order (e.g., we don't need fences or locked instructions on + // x86) + // + // In fact, one could make a reasonable argument that relaxed reads are OK, + // due to the presence of external locking (GIL) to ensure that interactions + // with other data structures are still correctly synchronized, so that + // we fall in the "Single-Location Data Structures" case as described in + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf + // However, on x86, it doesn't matter if I use acquire or relaxed on the load + // as I get the same assembly in both cases. So I just use the more + // conservative acquire (which will impede compiler optimizations but I don't + // care) + std::atomic pyobj_interpreter_; + + // This field contains a reference to a PyObject representing this Tensor. + // If pyobj is nullptr, when we transfer Tensor to Python, we allocate a new + // PyObject for it and set this field. This field does not have to be + // protected by an atomic as it is only allowed to be accessed when you hold + // the GIL, or during destruction of the tensor. + // + // When a PyObject dies, you are obligated to clear this field + // (otherwise, you will try to use-after-free the pyobj); this currently + // occurs in THPVariable_clear in torch/csrc/autograd/python_variable.cpp + // + // NB: Ordinarily, this should not be a strong reference, as if the + // PyObject owns the Tensor, this would create a reference cycle. + // However, sometimes this ownership flips. To track who owns + // who, this has a single pointer tag indicating whether or not the + // C++ object owns the PyObject (the common case, zero, means PyObject + // owns the C++ object); see _unchecked_untagged_pyobj for raw access + // or check_pyobj for checked access. See references to PyObject + // resurrection in torch/csrc/autograd/python_variable.cpp + PyObject* pyobj_; + + c10::impl::SizesAndStrides sizes_and_strides_; + + int64_t storage_offset_ = 0; + // If sizes and strides are empty, the numel is 1!! However, most of the + // time, we will immediately set sizes to {0} and reset numel to 0. + // (Can't do that in the default initializers, because there's no way to + // spell "allocate a one-element array" for strides_). + int64_t numel_ = 1; + + // INVARIANT: When storage is non-null, this type meta must + // agree with the type meta in storage + caffe2::TypeMeta data_type_; + + // NOTE [c10::optional operator usage in CUDA] + // Our optional definition doesn't compile in .cu file if `value()` or + // `operator->` are used. Instead, we always use `operator*`. + // See https://github.com/pytorch/pytorch/issues/18496 for more info. + // If this is too burdensome to maintain, we can just + // manually implement this with an additional bool. + + // INVARIANT: When storage is non-null, this Device must + // agree with the type meta in storage. + // + // INVARIANT: device_opt_ is only nullopt for undefined tensors + // (which do not have a device.) + c10::optional device_opt_; + + // default member initializers for bit-fields only available with -std=c++2a + // or -std=gnu++2a + inline void init_bitfields() { + is_contiguous_ = true; + is_channels_last_ = false; + is_channels_last_contiguous_ = false; + is_channels_last_3d_ = false; + is_channels_last_3d_contiguous_ = false; + is_non_overlapping_and_dense_ = true; + is_wrapped_number_ = false; + allow_tensor_metadata_change_ = true; + reserved_ = false; + sizes_strides_policy_ = static_cast(SizesStridesPolicy::Default); + custom_sizes_strides_ = static_cast(SizesStridesPolicy::Default); + python_custom_sizes_strides_ = + static_cast(SizesStridesPolicy::Default); + python_custom_device_ = false; + python_custom_layout_ = false; + custom_device_ = false; + custom_layout_ = false; + device_policy_ = false; + layout_policy_ = false; + storage_access_should_throw_ = false; + has_symbolic_sizes_strides_ = false; + } + + // Tensor is contiguous + bool is_contiguous_ : 1; + + // Tensor is a subclass that does not permit storage access. + bool storage_access_should_throw_ : 1; + + // Tensor is stored in the channels last 2d memory format, when dimensions + // order is (N)CHW and C-strides < W-strides < H-strides (< N-strides) + // (If size of any dimension is equal to 1, this dimension strides value + // is not taken into account). + bool is_channels_last_ : 1; + + // Channels last contiguous tensor is channel last tensor which occupies + // contiguous memory block. + bool is_channels_last_contiguous_ : 1; + + // Tensor is stored in the channels last 3d memory format, when dimensions + // order is (N)CDHW and C-strides < W-strides < H-strides < D - strides (< + // N-strides) (If size of any dimension is equal to 1, this dimension strides + // value is not taken into account). + bool is_channels_last_3d_ : 1; + + // Channels last 3d contiguous tensor is channel last 3d tensor which occupies + // contiguous memory block. + bool is_channels_last_3d_contiguous_ : 1; + + // Dense tensor is the tensor that store values in a contiguous block of + // memory. Non-overlapping tensor is the tensor in which elements occupy + // individual non-repetitive memory. + bool is_non_overlapping_and_dense_ : 1; + + bool is_wrapped_number_ : 1; + + // NOTE [ Metadata Change for a Detached Tensor ] + // + // Normally, a user is allowed to change the tensor metadata + // (e.g. sizes / strides / storage / storage_offset) of a tensor. + // However, if the tensor is created by `t1_detached = t1.data` in Python + // or `t1_detached = t1.detach()` in Python/C++, those changes to the + // tensor metadata of `t1_detached` will not be propagated back to the + // original tensor `t1`. In order to make such changes explicitly illegal, + // we created the `allow_tensor_metadata_change_` flag, to prevent users + // from changing metadata of the detached tensor and expecting the original + // tensor to also be updated. + // + // NOTE: For a full list of tensor metadata fields, please see + // `copy_tensor_metadata()` in TensorImpl and its subclasses to find + // which fields are copied by value. + bool allow_tensor_metadata_change_ : 1; + + // we decide to keep reserved_ and it will + // live in Tensor after the split + // The logic is that if Extend() or ReserveSpace() were ever called, + // then subsequent Resize()s will not free up Storage. + bool reserved_ : 1; + + // Call _custom() virtual methods for + // strides()/is_contiguous()/sizes()/dim()/numel() + // This is a combination of sizes_strides_custom_dispatch_ + // and has_symbolic_sizes_strides_ + uint8_t sizes_strides_policy_ : 2; + + // Whether or not sizes_and_strides_ contains a symbolic value. + bool has_symbolic_sizes_strides_ : 1; + + // Call _custom() virtual method for + // strides()/is_contiguous()/sizes()/dim()/numel() + uint8_t custom_sizes_strides_ : 2; + + // Combo of custom_ and python_custom_ + bool device_policy_ : 1; + bool layout_policy_ : 1; + + // Call _custom() virtual method for device() + bool custom_device_ : 1; + + // Call _custom() virtual method for layout() + bool custom_layout_ : 1; + + // Call into Python for + // strides()/is_contiguous()/sizes()/dim()/numel() + uint8_t python_custom_sizes_strides_ : 2; + + // Call into Python for device() + bool python_custom_device_ : 1; + + // Call into Python for layout() + bool python_custom_layout_ : 1; + + // The set of DispatchKeys which describe this tensor. NB: this + // does NOT include Autograd (historically, it did, but + // not anymore!) + // + // INVARIANT: extra_meta_->named_tensor_meta_ != nullptr <==> + // key_set_.has(DispatchKey::Named) + DispatchKeySet key_set_; + + private: + // C10_TensorImpl_Size_Check_Dummy_Class needs to be friends with + // TensorImpl so it can inspect the size of private fields + template < + size_t cplusplus, + size_t clang_ver_major, + size_t gcc_ver, + size_t gcc_ver_minor, + size_t nvcc, + size_t cuda_version, + size_t cuda_version_major, + size_t ptr_size> + friend class C10_TensorImpl_Size_Check_Dummy_Class; +}; + +// Note [TensorImpl size constraints] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Changed the size of TensorImpl? If the size went down, good for +// you! Adjust the documentation below and the expected size. +// Did it go up? Read on... +// +// Struct size matters. In some production systems at Facebook, we have +// 400M live tensors during a training run. Do the math: every 64-bit +// word you add to Tensor is an extra 3.2 gigabytes in RAM. +// +// If you are a Facebook employee, you can check if the run in question +// has tipped you over the point using the command here: +// https://fburl.com/q5enpv98 +// +// For reference, we OOMed at 160 bytes (20 words) per TensorImpl. +// This is not counting overhead from strides out-of-line allocation and +// StorageImpl space and this is from before we inlined sizes and strides +// directly into TensorImpl as SmallVectors. +// +// Our memory usage on 32-bit systems is suboptimal, but we're not checking +// for it at the moment (to help avoid rage inducing cycles when the +// 32-bit number is wrong). +// +// Current breakdown: +// +// vtable pointer +// strong refcount TODO: pack these into one word +// weak refcount +// storage pointer +// autograd metadata pointer +// named tensor metadata pointer +// version counter pointer +// Python interpreter pointer +// PyObject pointer +// SizesAndStrides size/pointer +// SizesAndStrides sizes (pre-allocated 0) +// SizesAndStrides sizes (pre-allocated 1) +// SizesAndStrides sizes (pre-allocated 2) +// SizesAndStrides sizes (pre-allocated 3) +// SizesAndStrides sizes (pre-allocated 4) +// SizesAndStrides strides (pre-allocated 0) +// SizesAndStrides strides (pre-allocated 1) +// SizesAndStrides strides (pre-allocated 2) +// SizesAndStrides strides (pre-allocated 3) +// SizesAndStrides strides (pre-allocated 4) +// storage offset +// numel +// data type, device, is_contiguous, storage_access_should_throw_, bitfields +// DispatchKeySet +// + +// Various preprocessor macros we use to check that the +// TensorImpl size hasn't changed unexpectedly. We undef +// these later. +#ifndef __NVCC__ +#define C10_NVCC 0 +#else +#define C10_NVCC __NVCC__ +#endif + +#ifndef __CUDA_VER_MAJOR__ +#define C10_CUDA_VERSION_MAJOR 0 +#else +#define C10_CUDA_VERSION_MAJOR __CUDA_VER_MAJOR__ +#endif + +#ifndef CUDA_VERSION +#define C10_CUDA_VERSION 0 +#else +#define C10_CUDA_VERSION CUDA_VERSION +#endif + +#ifndef __clang_major__ +#define C10_CLANG_MAJOR_VERSION 0 +#else +#define C10_CLANG_MAJOR_VERSION __clang_major__ +#endif + +#ifndef __GNUC__ +#define C10_GCC_VERSION 0 +#else +#define C10_GCC_VERSION __GNUC__ +#endif + +#ifndef __GNUC_MINOR__ +#define C10_GCC_VERSION_MINOR 0 +#else +#define C10_GCC_VERSION_MINOR __GNUC_MINOR__ +#endif + +// We use a templatized class to both contain the logic of checking the sizes +// as well as to provide compile-time information that might be useful in +// figuring out why sizes may have changed. +// All the compile time information is given by the template fields that are +// always printed by the compiler when the static_assert fails. +template < + size_t cplusplus = __cplusplus, + size_t clang_ver_major = C10_CLANG_MAJOR_VERSION, + size_t gcc_ver = C10_GCC_VERSION, + size_t gcc_ver_minor = C10_GCC_VERSION_MINOR, + size_t nvcc = C10_NVCC, + size_t cuda_version = C10_CUDA_VERSION, + size_t cuda_version_major = C10_CUDA_VERSION_MAJOR, + size_t ptr_size = sizeof(void*)> +class C10_TensorImpl_Size_Check_Dummy_Class : private TensorImpl { + // Names of (non-bitfield) fields in TensorImpl; used to provide + // compile-time info about fields whose size changes unexpectedly. + enum class FieldNameEnum { + storage_, + autograd_meta_, + extra_meta_, + version_counter_, + pyobj_interpreter_, + pyobj_, + sizes_and_strides_, + storage_offset_, + numel_, + data_type_, + device_opt_, + key_set_, + TOTAL_SIZE + }; + + // Provides compile-time equality check that reveals what numbers + // were used and on which quantity + template + constexpr static bool are_equal() { + static_assert( + Actual == Expected, + "Actual and Expected sizes of a field did not match!"); + return true; + } + + // Provides compile-time <= check that reveals what numbers + // were used and on which quantity + template + constexpr static bool is_le() { + static_assert( + Actual <= Expected, + "Actual and Expected sizes of a field did not match!"); + return true; + } + + public: + // Compile-time check that TensorImpl field sizes are as expected + // + // Observed total sizes and associated versions + // If you find a flag that predicts when unique_ptr has 16 bytes + // on 64-bit systems or when sizes_and_strides_ is 84 vs 88 bytes + // on 32-bit systems you get a cookie! + // Length | LLVM | GCC | C++ | CUDA + // 192 | ? | 11.2 | 201703 | 11040 + // 208 | ? | 11.2 | 201703 | 11040 + // 208 | ? | 11.2 | 201402 | 11040 + // 192 | ? | 11.2 | 201402 | 11040 + // 160 | 12 | 4.2 | 201703 | 0 + // + // To keep things clean, we split on systems here. + +#if UINTPTR_MAX == 0xFFFFFFFF + // This is a 32-bit system + static constexpr bool check_sizes() { + constexpr size_t tsize = 20 * sizeof(int64_t); + + // clang-format off + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + is_le(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + is_le(); + // clang-format on + + return true; + } +#else + // This is a 64-bit system + static constexpr bool check_sizes() { + constexpr size_t tsize = 26 * sizeof(int64_t); + + // clang-format off + are_equal(); + // On some systems involving NVCC the size of unique_ptr is 16 bytes. We haven't + // figured out how to detect those via macro preprocessors yet, so we use <= + // comparisons for the relevant fields. + is_le(); + is_le(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + is_le(); + // clang-format on + + return true; + } +#endif +}; + +// We use a class to encapsulate size-checking logic with +// templates to capture sizes and flags. We call this within +// a static assert to prove there is no run-time behaviour. +// Since the methods we call return either true or fail their +// own static_asserts, we should never see the error messages +// below. We have to provide it though for c++ <17. +static_assert( + C10_TensorImpl_Size_Check_Dummy_Class<>::check_sizes(), + "You should not see this message."); + +// Clean up after ourselves +#undef C10_NVCC +#undef C10_CUDA_VERSION_MAJOR +#undef C10_CUDA_VERSION +#undef C10_CLANG_MAJOR_VERSION +#undef C10_GCC_VERSION +#undef C10_GCC_VERSION_MINOR + +} // namespace c10 + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/voice_bridge/torch/include/c10/core/TensorOptions.h b/voice_bridge/torch/include/c10/core/TensorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..432fe4f1e4b6c28654ec57d3a5967eed44be1c5e --- /dev/null +++ b/voice_bridge/torch/include/c10/core/TensorOptions.h @@ -0,0 +1,775 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +namespace c10 { + +DispatchKey computeDispatchKey( + c10::optional dtype, + c10::optional layout, + c10::optional device); + +inline ScalarType dtype_or_default(c10::optional dtype) { + return value_or_else(dtype, [] { return get_default_dtype_as_scalartype(); }); +} + +inline caffe2::TypeMeta dtype_or_default( + c10::optional dtype) { + return value_or_else(dtype, [] { return get_default_dtype(); }); +} + +inline Layout layout_or_default(c10::optional layout) { + return layout.value_or(kStrided); +} + +inline Device device_or_default(c10::optional device) { + return value_or_else(device, [] { return Device(kCPU); }); +} + +inline bool pinned_memory_or_default(c10::optional pinned_memory) { + return pinned_memory.value_or(false); +} + +/// A class to encapsulate construction axes of an Tensor. TensorOptions was +/// designed to support the Python style API for specifying construction options +/// on factory functions, e.g., +/// +/// torch.zeros(2, 3, dtype=torch.int32) +/// +/// Because C++ doesn't natively support keyword arguments, there must be +/// another way of specifying keyword-like arguments. TensorOptions is a +/// builder class which can be used to construct this "dictionary" of keyword +/// arguments: functions which support TensorOptions conventionally take this +/// argument optionally as their last argument. +/// +/// WARNING: In PyTorch, there are `torch::` variants of factory functions, +/// e.g., torch::zeros for at::zeros. These return Variables (while the +/// stock ATen functions return plain Tensors). If you mix these functions +/// up, you WILL BE SAD. +/// +/// Rather than use the constructor of this class directly, you should prefer to +/// use the constructor functions, and then chain setter methods on top of them. +/// +/// at::device(at::kCUDA).dtype(kInt) +/// at::dtype(at::kInt) +/// +/// Additionally, anywhere a TensorOptions is expected, you can directly +/// pass at::kCUDA / at::kInt, and it will implicitly convert to a +/// TensorOptions. +/// +/// Here are some recommended ways to create a 2x2 tensor of zeros +/// with certain properties. These all *implicitly* make use of +/// TensorOptions, even if they don't mention the class explicitly: +/// +/// at::zeros({2,2}, at::kCUDA); +/// at::zeros({2,2}, at::kLong); +/// at::zeros({2,2}, at::device(at::kCUDA).dtype(at::kLong())); +/// at::zeros({2,2}, at::device({at::kCUDA, 1})); // place on device 1 +/// at::zeros({2,2}, at::requires_grad()); +/// + +/// NOTE [ TensorOptions Constructors ] +/// +/// TensorOptions is like a dictionary with entries from the set: +/// {requires_grad, device, dtype, layout}, where each entry may be +/// unspecified (i.e., is optional). It is used to specify the properties of +/// tensors in many places both in C++ internal and API, e.g., tensor factory +/// methods like `at::empty({10}, options)`, tensor conversions like +/// `tensor.to(...)`, etc. +/// +/// To provide a simple API that is consistent with Python, where one can do +/// `torch.empty(sizes, X)` with `X` being a `torch.device`, `torch.dtype`, or a +/// `torch.layout`, we want TensorOptions to be implicitly convertible from +/// `ScalarType dtype`, `Layout layout` and `Device device`. Therefore, we have +/// three implicit constructors from each of these three types. +/// +/// This is sufficient for `ScalarType` and `Layout` as they are simple Enum +/// classes. However, `Device` is an ordinary class with implicit constructors +/// `Device(DeviceType, DeviceIndex = -1)` and `Device(std::string)` to be +/// consistent with Python API, where strings are treated as equivalent with a +/// `torch.device` object (e.g., "cuda:1" can be passed to everywhere a +/// `torch.device("cuda:1")` is accepted). To support the syntax +/// `at::empty({10}, {kCUDA, 1})` and `tensor.to(kCUDA)`, we need to make sure +/// that `TensorOptions` is implicitly constructible with any argments that a +/// `Device` can constructed from. So we have, +/// +/// /* implicit */ TensorOptions(T&& device) : TensorOptions() { +/// this->set_device(device); +/// } +/// +/// template ::value>> +/// /* implicit */ TensorOptions(Args&&... args) +/// : TensorOptions(Device(std::forward(args)...)) {} +/// +/// +/// But this will be problematic. Consider this: `TensorOptions({kCUDA, 1})`. +/// Compiler will compain about ambiguity between the copy constructor and the +/// `Device` constructor because `{kCUDA, 1}` can be converted to both a +/// `TensorOption` and a `Device`. +/// +/// To get around this, we templatize the `Device` constructor. Since overload +/// resolution is done before template resolution, our problem is solved. + +DispatchKey computeDispatchKey( + optional dtype, + optional layout, + optional device); + +struct C10_API TensorOptions { + TensorOptions() + : requires_grad_(false), + pinned_memory_(false), + has_device_(false), + has_dtype_(false), + has_layout_(false), + has_requires_grad_(false), + has_pinned_memory_(false), + has_memory_format_(false) {} + + /// Constructs a `TensorOptions` object with the given layout. + /* implicit */ TensorOptions(Layout layout) : TensorOptions() { + this->set_layout(layout); + } + + /// Constructs a `TensorOptions` object with the given device. + /// See NOTE [ TensorOptions Constructors ] on why this is templatized. + template < + typename T, + typename = std::enable_if_t, Device>::value>> + /* implicit */ TensorOptions(T&& device) : TensorOptions() { + this->set_device(std::forward(device)); + } + + /// Constructs a `TensorOptions` object from arguments allowed in `Device` + /// constructors. + /// + /// See NOTE [ TensorOptions Constructors ]. + /// + /// NB: Ideally we only allow implicit constructors here. But there is no easy + /// way to detect them. So we have this one that allows explicit + /// constructors too. + template < + typename... Args, + typename = + std::enable_if_t::value>> + /* implicit */ TensorOptions(Args&&... args) + : TensorOptions(Device(std::forward(args)...)) {} + + /// Constructs a `TensorOptions` object with the given dtype. + /* implicit */ TensorOptions(caffe2::TypeMeta dtype) : TensorOptions() { + this->set_dtype(dtype); + } + + /// legacy constructor to support ScalarType + /* implicit */ TensorOptions(ScalarType dtype) : TensorOptions() { + this->set_dtype(dtype); + } + + /// Constructs a `TensorOptions` object with the given memory format. + /* implicit */ TensorOptions(MemoryFormat memory_format) : TensorOptions() { + set_memory_format(memory_format); + } + + /// Return a copy of `TensorOptions` with `device` set to the given one, or + /// cleared if `device` is `nullopt`. + C10_NODISCARD TensorOptions + device(c10::optional device) const noexcept { + TensorOptions r = *this; + r.set_device(device); + return r; + } + + /// Return a copy of `TensorOptions` with `device` set to the given one. + /// (This overload ensures that variadic template c10::optional constructor + /// for Device work correctly.) + template + C10_NODISCARD TensorOptions device(Args&&... args) const noexcept { + return device( + c10::optional(c10::in_place, std::forward(args)...)); + } + + /// Return a copy of `TensorOptions`, but with device set to CUDA, and the + /// device index set to the given one. + /// + /// TODO: This function encourages bad behavior (assuming CUDA is + /// the only device that matters). Get rid of it / rename it. + C10_NODISCARD TensorOptions + device_index(c10::DeviceIndex device_index) const noexcept { + return device(Device::Type::CUDA, device_index); + } + + /// Return a copy of `TensorOptions` with `dtype` set to the given one. + C10_NODISCARD TensorOptions + dtype(c10::optional dtype) const noexcept { + TensorOptions r = *this; + r.set_dtype(dtype); + return r; + } + + // legacy function to support ScalarType + C10_NODISCARD TensorOptions + dtype(c10::optional dtype) const noexcept { + TensorOptions r = *this; + r.set_dtype(dtype); + return r; + } + + // Since dtype is taken... + template + TensorOptions& dtype() { + dtype_ = caffe2::TypeMeta::Make(); + has_dtype_ = true; + return *this; + } + + /// Sets the layout of the `TensorOptions`. + C10_NODISCARD TensorOptions + layout(c10::optional layout) const noexcept { + TensorOptions r = *this; + r.set_layout(layout); + return r; + } + + /// Sets the `requires_grad` property of the `TensorOptions`. + C10_NODISCARD TensorOptions + requires_grad(c10::optional requires_grad) const noexcept { + TensorOptions r = *this; + r.set_requires_grad(requires_grad); + return r; + } + + /// Sets the `pinned_memory` property on the `TensorOptions`. + C10_NODISCARD TensorOptions + pinned_memory(c10::optional pinned_memory) const noexcept { + TensorOptions r = *this; + r.set_pinned_memory(pinned_memory); + return r; + } + + /// Sets the `memory_format` property on `TensorOptions`. + C10_NODISCARD TensorOptions + memory_format(c10::optional memory_format) const noexcept { + TensorOptions r = *this; + r.set_memory_format(memory_format); + return r; + } + + /// Returns the device of the `TensorOptions`. + Device device() const noexcept { + return device_or_default(device_opt()); + } + + /// Returns whether the device is specified. + bool has_device() const noexcept { + return has_device_; + } + + /// Returns the device of the `TensorOptions`, or `c10::nullopt` if + /// device is not specified. + c10::optional device_opt() const noexcept { + return has_device_ ? c10::make_optional(device_) : c10::nullopt; + } + + /// Returns the device index of the `TensorOptions`. + int32_t device_index() const noexcept { + return device().index(); + } + + /// Returns the dtype of the `TensorOptions`. + caffe2::TypeMeta dtype() const noexcept { + return dtype_or_default(dtype_opt()); + } + + /// Returns whether the dtype is specified. + bool has_dtype() const noexcept { + return has_dtype_; + } + + /// Returns the dtype of the `TensorOptions`, or `c10::nullopt` if + /// device is not specified. + c10::optional dtype_opt() const noexcept { + return has_dtype_ ? c10::make_optional(dtype_) : c10::nullopt; + } + + /// Returns the layout of the `TensorOptions`. + Layout layout() const noexcept { + return layout_or_default(layout_opt()); + } + + /// Returns whether the layout is specified. + bool has_layout() const noexcept { + return has_layout_; + } + + /// Returns the layout of the `TensorOptions`, or `c10::nullopt` if + /// layout is not specified. + c10::optional layout_opt() const noexcept { + return has_layout_ ? c10::make_optional(layout_) : c10::nullopt; + } + + /// Returns the `requires_grad` property of the `TensorOptions`. + bool requires_grad() const noexcept { + return has_requires_grad_ ? requires_grad_ : false; + } + + /// Returns whether the `requires_grad` is specified. + bool has_requires_grad() const noexcept { + return has_requires_grad_; + } + + /// Returns the `requires_grad` property of the `TensorOptions`, or + /// `c10::nullopt` if `requires_grad` is not specified. + c10::optional requires_grad_opt() const noexcept { + return has_requires_grad_ ? c10::make_optional(requires_grad_) + : c10::nullopt; + } + + /// Returns the `pinned_memory` property of the `TensorOptions`. + bool pinned_memory() const noexcept { + return pinned_memory_or_default(pinned_memory_opt()); + } + + /// Returns whether the `pinned_memory` is specified. + bool has_pinned_memory() const noexcept { + return has_pinned_memory_; + } + + /// Returns if the layout is sparse + bool is_sparse() const { + return layout_ == c10::Layout::Sparse; + } + + bool is_sparse_csr() const { + return layout_ == c10::Layout::SparseCsr; + } + + // For compatibility with legacy tensor.type() comparisons + bool type_equal(const TensorOptions& other) const { + return computeDispatchKey() == other.computeDispatchKey() && + typeMetaToScalarType(dtype_) == typeMetaToScalarType(other.dtype()); + } + + /// Returns the `pinned_memory` property of the `TensorOptions`, or + /// `c10::nullopt` if `pinned_memory` is not specified. + c10::optional pinned_memory_opt() const noexcept { + return has_pinned_memory_ ? c10::make_optional(pinned_memory_) + : c10::nullopt; + } + + /// Returns whether the `memory_layout` is specified + bool has_memory_format() const noexcept { + return has_memory_format_; + } + + // NB: memory_format() getter is PURPOSELY not defined, as the default + // behavior of memory_format varies from function to function. + + /// Returns the `memory_layout` property of `TensorOptions, or + /// `c10::nullopt` if `memory_format` is not specified. + c10::optional memory_format_opt() const noexcept { + return has_memory_format_ ? c10::make_optional(memory_format_) + : c10::nullopt; + } + + // Resolves the ATen backend specified by the current construction axes. + // TODO: Deprecate this + Backend backend() const { + return at::dispatchKeyToBackend(computeDispatchKey()); + } + + /// Return the right-biased merge of two TensorOptions. This has the + /// effect of overwriting settings from self with specified options + /// of options. + /// + /// NB: This merging operation does NOT respect device merges. + /// For example, if you device({kCUDA, 1}).merge_in(kCUDA) + /// you will get kCUDA in the end! Functions like Tensor.new_empty + /// ensure the right device is selected anyway by way of a + /// device guard. + /// + TensorOptions merge_in(TensorOptions options) const noexcept { + TensorOptions merged = *this; + if (options.has_device()) + merged.set_device(options.device_opt()); + if (options.has_dtype()) + merged.set_dtype(options.dtype_opt()); + if (options.has_layout()) + merged.set_layout(options.layout_opt()); + // NB: requires grad is right biased; not a logical AND/OR! + if (options.has_requires_grad()) + merged.set_requires_grad(options.requires_grad_opt()); + if (options.has_pinned_memory()) + merged.set_pinned_memory(options.pinned_memory_opt()); + if (options.has_memory_format()) + merged.set_memory_format(options.memory_format_opt()); + return merged; + } + + // TODO remove after TensorOptions rationalization + TensorOptions merge_memory_format( + c10::optional optional_memory_format) const noexcept { + TensorOptions merged = *this; + if (optional_memory_format.has_value()) { + merged.set_memory_format(*optional_memory_format); + } + return merged; + } + + // INVARIANT: computeDispatchKey returns only the subset of dispatch keys for + // which dispatchKeyToBackend is injective, if it is defined at all (for + // the most part, this just means that this function never returns an + // Autograd key) + DispatchKey computeDispatchKey() const { + return c10::computeDispatchKey( + optTypeMetaToScalarType(dtype_opt()), layout_opt(), device_opt()); + } + + private: + // These methods are currently private because I'm not sure if it's wise + // to actually publish them. They are methods because I need them in + // the constructor and the functional API implementation. + // + // If you really, really need it, you can make these public, but check if you + // couldn't just do what you need with the functional API. Similarly, these + // methods are not chainable, because if you wanted chaining, you probably + // want to use the functional API instead. (It's probably OK to make + // these chainable, because these functions are all explicitly annotated + // with a ref-qualifier, the trailing &, that makes them illegal to call + // on temporaries.) + + /// Mutably set the device of `TensorOptions`. + void set_device(c10::optional device) & noexcept { + if (device) { + device_ = *device; + has_device_ = true; + } else { + has_device_ = false; + } + } + + /// Mutably set the dtype of `TensorOptions`. + void set_dtype(c10::optional dtype) & noexcept { + if (dtype) { + dtype_ = *dtype; + has_dtype_ = true; + } else { + has_dtype_ = false; + } + } + + // legacy function to support ScalarType + void set_dtype(c10::optional dtype) & noexcept { + if (dtype) { + dtype_ = scalarTypeToTypeMeta(*dtype); + has_dtype_ = true; + } else { + has_dtype_ = false; + } + } + + /// Mutably set the layout of `TensorOptions`. + void set_layout(c10::optional layout) & noexcept { + if (layout) { + layout_ = *layout; + has_layout_ = true; + } else { + has_layout_ = false; + } + } + + /// Mutably set the `requires_grad` property of `TensorOptions`. + void set_requires_grad(c10::optional requires_grad) & noexcept { + if (requires_grad) { + requires_grad_ = *requires_grad; + has_requires_grad_ = true; + } else { + has_requires_grad_ = false; + } + } + + /// Mutably set the `pinned_memory` property of `TensorOptions`. + void set_pinned_memory(c10::optional pinned_memory) & noexcept { + if (pinned_memory) { + pinned_memory_ = *pinned_memory; + has_pinned_memory_ = true; + } else { + has_pinned_memory_ = false; + } + } + + /// Mutably set the `memory_Format` property of `TensorOptions`. + void set_memory_format(c10::optional memory_format) & noexcept { + if (memory_format) { + memory_format_ = *memory_format; + has_memory_format_ = true; + } else { + has_memory_format_ = false; + } + } + + // WARNING: If you edit TensorOptions to add more options, you + // may need to adjust the implementation of Tensor::options. + // The criteria for whether or not Tensor::options must be adjusted + // is whether or not the new option you added should preserved + // by functions such as empty_like(); if it should be preserved, + // you must adjust options(). + // + // TODO: MemoryFormat is not implemented in this way + + // NB: We didn't use c10::optional here, because then we can't pack + // the has_***_ boolean fields. + + Device device_ = at::kCPU; // 16-bit + caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make(); // 16-bit + Layout layout_ = at::kStrided; // 8-bit + MemoryFormat memory_format_ = MemoryFormat::Contiguous; // 8-bit + + // Bitmask required here to get this to fit inside 32 bits (or even 64 bits, + // for that matter) + + bool requires_grad_ : 1; + bool pinned_memory_ : 1; + + bool has_device_ : 1; + bool has_dtype_ : 1; + bool has_layout_ : 1; + bool has_requires_grad_ : 1; + bool has_pinned_memory_ : 1; + bool has_memory_format_ : 1; +}; + +// We should aspire to fit in one machine-size word; but a size greater than two +// words is too much. (We are doing terribly on 32-bit archs, where we require +// three machine size words to store tensor options. Eek!) +static_assert( + sizeof(TensorOptions) <= sizeof(int64_t) * 2, + "TensorOptions must fit in 128-bits"); + +/// Convenience function that returns a `TensorOptions` object with the `dtype` +/// set to the given one. +inline TensorOptions dtype(caffe2::TypeMeta dtype) { + return TensorOptions().dtype(dtype); +} + +// legacy function to support ScalarType +inline TensorOptions dtype(ScalarType dtype) { + return TensorOptions().dtype(scalarTypeToTypeMeta(dtype)); +} + +/// Convenience function that returns a `TensorOptions` object with the `layout` +/// set to the given one. +inline TensorOptions layout(Layout layout) { + return TensorOptions().layout(layout); +} + +/// Convenience function that returns a `TensorOptions` object with the `device` +/// set to the given one. +inline TensorOptions device(Device device) { + return TensorOptions().device(std::move(device)); +} + +/// Convenience function that returns a `TensorOptions` object with the +/// `device` set to CUDA and the `device_index` set to the given one. +inline TensorOptions device_index(int16_t device_index) { + return TensorOptions().device_index( + static_cast(device_index)); +} + +/// Convenience function that returns a `TensorOptions` object with the +/// `requires_grad` set to the given one. +inline TensorOptions requires_grad(bool requires_grad = true) { + return TensorOptions().requires_grad(requires_grad); +} + +/// Convenience function that returns a `TensorOptions` object with the +/// `memory_format` set to the given one. +inline TensorOptions memory_format(MemoryFormat memory_format) { + return TensorOptions().memory_format(memory_format); +} + +C10_API std::ostream& operator<<( + std::ostream& stream, + const TensorOptions& options); + +template +inline TensorOptions dtype() { + return dtype(caffe2::TypeMeta::Make()); +} + +inline std::string toString(const TensorOptions options) { + std::ostringstream stream; + stream << options; + return stream.str(); +} + +// This is intended to be a centralized location by which we can determine +// what an appropriate DispatchKey for a tensor is. +inline DispatchKey computeDispatchKey( + c10::optional dtype, + c10::optional layout, + c10::optional device) { + const auto layout_ = layout_or_default(layout); + const auto device_ = device_or_default(device); + switch (layout_) { + case Layout::Strided: { + const auto dtype_ = dtype_or_default(dtype); + switch (device_.type()) { +#define DO_CASE(device, _) \ + case DeviceType::device: { \ + if (isQIntType(dtype_)) { \ + return DispatchKey::Quantized##device; \ + } \ + return DispatchKey::device; \ + } + C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused) +#undef DO_CASE + case DeviceType::FPGA: + return DispatchKey::FPGA; + case DeviceType::ORT: + return DispatchKey::ORT; + case DeviceType::Vulkan: + return DispatchKey::Vulkan; + case DeviceType::Metal: + return DispatchKey::Metal; + case DeviceType::MKLDNN: + case DeviceType::OPENGL: + case DeviceType::OPENCL: + case DeviceType::IDEEP: + TORCH_INTERNAL_ASSERT( + 0, + "This is a grandfathered Caffe2 device type ", + device_.type(), + ", it shouldn't ever convert to a DispatchKey. File a bug describing what you were doing if you think this is in error."); + default: + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "Unsupported device type for dense layout: ", + device_.type()); + } + } + case Layout::Sparse: + switch (device_.type()) { +#define DO_CASE(device, _) \ + case DeviceType::device: { \ + return DispatchKey::Sparse##device; \ + } + C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused) +#undef DO_CASE + default: + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "Unsupported device type for sparse layout: ", + device_.type()); + } + case Layout::Mkldnn: + switch (device_.type()) { + case DeviceType::CPU: + return DispatchKey::MkldnnCPU; + default: + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "Unsupported device type for mkldnn layout: ", + device_.type()); + } + case Layout::SparseCsr: + case Layout::SparseCsc: + case Layout::SparseBsr: + case Layout::SparseBsc: + switch (device_.type()) { + case DeviceType::CPU: + return DispatchKey::SparseCsrCPU; + case DeviceType::CUDA: + return DispatchKey::SparseCsrCUDA; + default: + AT_ERROR( + "Unsupported device type for ", + layout_, + " layout: ", + device_.type()); + } + default: + TORCH_CHECK(false, "Unsupported layout: ", layout_); + } +} + +inline Layout dispatchKeyToLayout(DispatchKey dispatch_key) { + switch (dispatch_key) { +#define DO_CASE(bc, _) case DispatchKey::Sparse##bc: + C10_FORALL_BACKEND_COMPONENTS(DO_CASE, unused) +#undef DO_CASE + return Layout::Sparse; + case DispatchKey::SparseCsrCPU: + case DispatchKey::SparseCsrCUDA: + TORCH_CHECK( + false, + "Cannot map DispatchKey ", + dispatch_key, + " to a unique layout."); + case DispatchKey::MkldnnCPU: + return Layout::Mkldnn; + default: + return Layout::Strided; + } +} + +inline DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key) { + switch (dispatch_key) { + // stuff that's real +#define DO_CASE(suffix, prefix) \ + case DispatchKey::prefix##suffix: \ + return DeviceType::suffix; +#define DO_CASES(_, prefix) C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, prefix) + C10_FORALL_FUNCTIONALITY_KEYS(DO_CASES) +#undef DO_CASES +#undef DO_CASE + + case DispatchKey::MkldnnCPU: + return DeviceType::CPU; + case DispatchKey::Vulkan: + return DeviceType::Vulkan; + + case DispatchKey::ORT: + return DeviceType::ORT; + default: + TORCH_CHECK( + false, + "DispatchKey ", + dispatch_key, + " doesn't correspond to a device"); + } +} + +inline TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key) { + return TensorOptions() + .layout(dispatchKeyToLayout(dispatch_key)) + .device(dispatchKeyToDeviceType(dispatch_key)); +} + +namespace detail { +inline bool backend_supports_empty_operator(const TensorOptions options) { + // Quantized backends don't support at::empty(). + // They have separate operators like at::empty_quantized() that take in + // extra information about how to quantize the tensor. + return !isQIntType(typeMetaToScalarType(options.dtype())); +} + +} // namespace detail + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/UndefinedTensorImpl.h b/voice_bridge/torch/include/c10/core/UndefinedTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..b2a73ddf0a91c9af25958c67ac30b037081c78dd --- /dev/null +++ b/voice_bridge/torch/include/c10/core/UndefinedTensorImpl.h @@ -0,0 +1,37 @@ +#pragma once + +#include + +namespace c10 { + +struct C10_API UndefinedTensorImpl final : public TensorImpl { + public: + // Without this, we get: + // error: identifier "at::UndefinedTensorImpl::_singleton" is undefined in + // device code + // (ostensibly because the constexpr tricks MSVC into trying to compile this + // function for device as well). +#ifdef _WIN32 + static inline TensorImpl* singleton() { +#else + static constexpr inline TensorImpl* singleton() { +#endif + return &_singleton; + } +#ifdef DEBUG + bool has_storage() const override; +#endif + void set_storage_offset(int64_t offset) override; + + protected: + bool is_contiguous_custom(MemoryFormat format) const override; + IntArrayRef strides_custom() const override; + SymIntArrayRef sym_strides_custom() const override; + + private: + UndefinedTensorImpl(); + static UndefinedTensorImpl _singleton; + const char* tensorimpl_type_name() const override; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/WrapDimMinimal.h b/voice_bridge/torch/include/c10/core/WrapDimMinimal.h new file mode 100644 index 0000000000000000000000000000000000000000..4a6f375147491a523da860abaffbad9e1ba7783d --- /dev/null +++ b/voice_bridge/torch/include/c10/core/WrapDimMinimal.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +namespace c10 { + +namespace detail { +C10_API int64_t +maybe_wrap_dim_slow(int64_t dim, int64_t dim_post_expr, bool wrap_scalar); +} + +static inline int64_t maybe_wrap_dim( + int64_t dim, + int64_t dim_post_expr, + bool wrap_scalar = true) { + // Inline the fast paths + if (C10_LIKELY(-dim_post_expr <= dim && dim < dim_post_expr)) { + // Branch-less version of dim + (dim < 0 ? dim_post_expr : 0) + return dim + dim_post_expr * (dim < 0); + } + // Check edge-cases out-of-line (wrapping scalars and out-of-bounds errors) + return c10::detail::maybe_wrap_dim_slow(dim, dim_post_expr, wrap_scalar); +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/alignment.h b/voice_bridge/torch/include/c10/core/alignment.h new file mode 100644 index 0000000000000000000000000000000000000000..4a8c732ef42d0ca8c4fce37ec95ecb03e027ce0e --- /dev/null +++ b/voice_bridge/torch/include/c10/core/alignment.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace c10 { + +#ifdef C10_MOBILE +// Use 16-byte alignment on mobile +// - ARM NEON AArch32 and AArch64 +// - x86[-64] < AVX +constexpr size_t gAlignment = 16; +#else +// Use 64-byte alignment should be enough for computation up to AVX512. +constexpr size_t gAlignment = 64; +#endif + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/DeviceGuardImplInterface.h b/voice_bridge/torch/include/c10/core/impl/DeviceGuardImplInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..5a409715a622bce10e0f6baeb78f72af30be0f15 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/DeviceGuardImplInterface.h @@ -0,0 +1,328 @@ +#pragma once + +#include +#include +#include +#include + +// Just for C10_ANONYMOUS_VARIABLE +#include + +#include + +namespace c10 { + +// Forward declaration +class DataPtr; + +/** + * Flags defining the behavior of events. + * + * PYTORCH_DEFAULT and BACKEND_DEFAULT are valid for all backends. The + * BACKEND_DEFAULT is what a particular backend would select if no + * flags were given. PYTORCH_DEFAULT is the PyTorch's framework default + * choice for events on that backend, which may not be the same. For example, + * when PyTorch creates a CUDA event it sets the flag + * CUDA_EVENT_DISABLING_TIMING by default to improve performance. + * + * The mapping of PYTORCH_DEFAULT and BACKEND_DEFAULT is done by each + * backend implementation. Backend-specific flags, like CUDA_EVENT_DEFAULT, + * should map one-to-one with actual event flags for those backends. + */ +enum class EventFlag { + PYTORCH_DEFAULT, + BACKEND_DEFAULT, + // CUDA flags + CUDA_EVENT_DEFAULT, + CUDA_EVENT_DISABLE_TIMING, // PyTorch-default for CUDA + // HIP flags + HIP_EVENT_DEFAULT, + HIP_EVENT_DISABLE_TIMING, // PyTorch-default for HIP + // FOR TESTING ONLY + INVALID +}; + +namespace impl { + +/** + * DeviceGuardImplInterface represents the virtual interface which provides + * functionality to provide an RAII class for device and stream switching, + * via DeviceGuard. Every distinct device type, e.g., CUDA and HIP, is + * expected to implement and register an implementation of this interface. + * All classes which inherit from DeviceGuardImplInterface should be declared + * 'final'. + * + * This class exists because we provide a unified interface for performing + * device guards via DeviceGuard, but we cannot assume that we have actually + * compiled against the, e.g., CUDA library, which actually implements + * this guard functionality. In this case, a dynamic dispatch is required + * to cross the library boundary. + * + * If possible, you should directly use implementations of this interface; + * those uses will be devirtualized. + */ +struct C10_API DeviceGuardImplInterface { + /** + * Return the type of device managed by this guard implementation. + */ + virtual DeviceType type() const = 0; + + /** + * Set the current device to Device, and return the previous Device. + */ + virtual Device exchangeDevice(Device) const = 0; + // NB: Implementations of exchangeDevice can be a bit boilerplatey. You might + // consider replacing exchangeDevice with a non-virtual function with a baked + // in implementation; however, note that this will triple the number of + // virtual calls (when you implement exchangeDevice in a final subclass, + // the compiler gets to devirtualize everything; it won't do that if you don't + // define it in the subclass!) A common way to solve this problem is to use + // some sort of CRTP; however, we can template DeviceGuardImplInterface since + // we really *do* need it to be virtual. A little boilerplate seems easiest + // to explain. (Another way around this problem is to provide inline + // functions that provide the default implementations, but this seems a little + // hard to explain. In any case, we're only going to have on order of ten + // implementations of this anyway.) + + /** + * Get the current device. + */ + virtual Device getDevice() const = 0; + + /** + * Set the current device to Device. + */ + virtual void setDevice(Device) const = 0; + + /** + * Set the current device to Device, without checking for errors + * (so, e.g., this can be called from a destructor). + */ + virtual void uncheckedSetDevice(Device) const noexcept = 0; + + /** + * Get the current stream for a given device. + */ + virtual Stream getStream(Device) const noexcept = 0; + + /** + * Get the default stream for a given device. + */ + virtual Stream getDefaultStream(Device) const { + TORCH_CHECK(false, "Backend doesn't support acquiring a default stream.") + } + + /** + * Get a stream from the global pool for a given device. + */ + virtual Stream getStreamFromGlobalPool(Device, bool isHighPriority = false) + const { + (void)isHighPriority; // Suppress unused varaible warning + TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.") + } + + /** + * Set a stream to be the thread local current stream for its device. + * Return the previous stream for that device. You are NOT required + * to set the current device to match the device of this stream. + */ + virtual Stream exchangeStream(Stream) const noexcept = 0; + + /** + * Destroys the given event. + */ + virtual void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/) + const noexcept {} + + /** + * Increments the event's version and enqueues a job with this version + * in the stream's work queue. When the stream process that job + * it notifies all streams waiting on / blocked by that version of the + * event to continue and marks that version as recorded. + * */ + virtual void record( + void** /*event*/, + const Stream& /*stream*/, + const DeviceIndex /*device_index*/, + const c10::EventFlag /*flag*/) const { + TORCH_CHECK(false, "Backend doesn't support events."); + } + + /** + * Does nothing if the event has not been scheduled to be recorded. + * If the event was previously enqueued to be recorded, a command + * to wait for the version of the event that exists at the time of this call + * is inserted in the stream's work queue. + * When the stream reaches this command it will stop processing + * additional commands until that version of the event is marked as recorded. + */ + virtual void block(void* /*event*/, const Stream& /*stream*/) const { + TORCH_CHECK(false, "Backend doesn't support events."); + } + + /** + * Returns true if (and only if) + * (1) the event has never been scheduled to be recorded + * (2) the current version is marked as recorded. + * Returns false otherwise. + */ + virtual bool queryEvent(void* /*event*/) const { + TORCH_CHECK(false, "Backend doesn't support events."); + } + + /** + * Get the number of devices. WARNING: This is REQUIRED to not raise + * an exception. If there is some sort of problem, e.g., driver error, + * you should report that there are zero available devices. + */ + virtual DeviceIndex deviceCount() const noexcept = 0; + + /** + * Return true if all the work previously enqueued on the stream for + * asynchronous execution has completed running on the device. + */ + virtual bool queryStream(const Stream& /*stream*/) const { + TORCH_CHECK(false, "Backend doesn't support querying streams."); + } + + /** + * Wait (by blocking the calling thread) until all the work previously + * enqueued on the stream has completed running on the device. + */ + virtual void synchronizeStream(const Stream& /*stream*/) const { + TORCH_CHECK(false, "Backend doesn't support synchronizing streams."); + } + + /** + * Ensure the caching allocator (if any) is aware that the given DataPtr is + * being used on the given stream, and that it should thus avoid recycling the + * DataPtr until all work on that stream is done. + */ + virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const { + } + + /** + * Intended use of this class is to leak the DeviceGuardImpl at program end. + * So you better not call the destructor, buster! + */ + virtual ~DeviceGuardImplInterface() = default; +}; + +// A no-op device guard impl that doesn't do anything interesting. Useful +// for devices that don't actually have a concept of device index. Prominent +// examples are CPU and Meta. +template +struct NoOpDeviceGuardImpl final : public DeviceGuardImplInterface { + NoOpDeviceGuardImpl() {} + DeviceType type() const override { + return D; + } + Device exchangeDevice(Device) const override { + return Device(D, -1); // no-op + } + Device getDevice() const override { + return Device(D, -1); + } + void setDevice(Device) const override { + // no-op + } + void uncheckedSetDevice(Device) const noexcept override { + // no-op + } + Stream getStream(Device) const noexcept override { + // no-op + return Stream(Stream::DEFAULT, Device(D, -1)); + } + // NB: These do NOT set the current device + Stream exchangeStream(Stream) const noexcept override { + // no-op + return Stream(Stream::DEFAULT, Device(D, -1)); + } + DeviceIndex deviceCount() const noexcept override { + return 1; + } + + // Event-related functions + void record( + void** /*event*/, + const Stream& /*stream*/, + const DeviceIndex /*device_index*/, + const EventFlag /*flag*/) const override { + TORCH_CHECK(false, D, " backend doesn't support events."); + } + void block(void* /*event*/, const Stream& /*stream*/) const override { + TORCH_CHECK(false, D, " backend doesn't support events.") + } + bool queryEvent(void* /*event*/) const override { + TORCH_CHECK(false, D, " backend doesn't support events.") + } + void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/) + const noexcept override {} + + // Stream-related functions + bool queryStream(const Stream& /*stream*/) const override { + return true; + } + void synchronizeStream(const Stream& /*stream*/) const override { + // Don't wait for anything. + } +}; + +// The registry is NON-owning. Each stored pointer is std::atomic so +// that under all interleavings of registry calls the structure is +// race-free. This doesn't cost us anything on reads in X86. (An +// unsynchronized implementation probably is OK too, but I didn't want +// to prove that we never read from device_guard_impl_registry at the +// same time some registration is occurring. Shiver.) +// +// I'd like this registry to be valid even at program destruction time +// (in case someone uses a DeviceGuard in a destructor to do some cleanup +// in the CUDA API.) Since there are no direct accesses of the underlying +// owning objects which I can use to enforce initialization order (unlike +// in a Meyer singleton), it implies that you must *leak* objects when +// putting them in the registry. This is done by deleting the destructor +// on DeviceGuardImplInterface. +extern C10_API std::atomic + device_guard_impl_registry[static_cast( + DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)]; + +// I can't conveniently use c10/util/Registry.h for the following reason: +// c10/util/Registry.h gives me a slow way of Create'ing a object of some +// interface from the registry, but no way of quickly accessing an already +// created object. I'll be banging on getDeviceGuardImpl every time we do a +// DeviceGuard, so I really don't want to be doing an unordered_map lookup. +// Better if the registration mechanism directly drops its implementation +// into device_guard_impl_registry. + +class C10_API DeviceGuardImplRegistrar { + public: + DeviceGuardImplRegistrar(DeviceType, const DeviceGuardImplInterface*); +}; + +#define C10_REGISTER_GUARD_IMPL(DevType, DeviceGuardImpl) \ + static ::c10::impl::DeviceGuardImplRegistrar C10_ANONYMOUS_VARIABLE( \ + g_##DeviceType)(::c10::DeviceType::DevType, new DeviceGuardImpl()); + +inline const DeviceGuardImplInterface* getDeviceGuardImpl(DeviceType type) { + // Two adjacent int16_t fields DeviceType and DeviceIndex has field access + // miscompiled on NVCC. To workaround this issue, we apply a mask to the + // DeviceType. First check if the DeviceType is 16-bit. + // FB employees can see + // https://fb.workplace.com/groups/llvm.gcc/permalink/4053565044692080/ + // for more details + static_assert(sizeof(DeviceType) == 1, "DeviceType is not 8-bit"); + auto p = device_guard_impl_registry[static_cast(type) & 0xFF].load(); + + // This seems to be the first place where you make use of a device + // when you pass devices to factory functions. Give a nicer error + // message in this case. + TORCH_CHECK(p, "PyTorch is not linked with support for ", type, " devices"); + return p; +} + +inline bool hasDeviceGuardImpl(DeviceType type) { + return device_guard_impl_registry[static_cast(type)].load(); +} + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/FakeGuardImpl.h b/voice_bridge/torch/include/c10/core/impl/FakeGuardImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..c86255220c1c1f6549484408ea2cf304d1b992f2 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/FakeGuardImpl.h @@ -0,0 +1,107 @@ +#pragma once + +#include + +#include + +namespace c10 { +namespace impl { + +// FakeGuardImpl is hardcoded to have eight devices. Not for +// any good reason, just to simplify code. +constexpr DeviceIndex kFakeGuardImplMaxDevices = 8; + +/** + * A fake implementation of DeviceGuardImplInterface suitable for testing. + * The current device is modeled as a mutable field in the guard implementation + * class. See DeviceGuard_test.cpp for an example use. + */ +template +struct FakeGuardImpl final : public DeviceGuardImplInterface { + static constexpr DeviceType static_type = T; + // Runtime device type is not used + FakeGuardImpl(DeviceType) {} + FakeGuardImpl() = default; + DeviceType type() const override { + return T; + } + Device exchangeDevice(Device d) const override { + AT_ASSERT(d.type() == type()); + AT_ASSERT(d.index() < kFakeGuardImplMaxDevices); + Device old_device = getDevice(); + if (old_device.index() != d.index()) { + current_device_ = d.index(); + } + return old_device; + } + Device getDevice() const override { + return Device(type(), current_device_); + } + void setDevice(Device d) const override { + AT_ASSERT(d.type() == type()); + AT_ASSERT(d.index() >= 0); + AT_ASSERT(d.index() < kFakeGuardImplMaxDevices); + current_device_ = d.index(); + } + void uncheckedSetDevice(Device d) const noexcept override { + current_device_ = d.index(); + } + Stream getStream(Device d) const noexcept override { + return Stream(Stream::UNSAFE, d, current_streams_[d.index()]); + } + Stream exchangeStream(Stream s) const noexcept override { + auto old_id = current_streams_[s.device_index()]; + current_streams_[s.device_index()] = s.id(); + return Stream(Stream::UNSAFE, s.device(), old_id); + } + DeviceIndex deviceCount() const noexcept override { + return kFakeGuardImplMaxDevices; + } + + // Event-related functions + void record( + void** event, + const Stream& stream, + const DeviceIndex device_index, + const EventFlag flag) const override {} + void block(void* event, const Stream& stream) const override {} + bool queryEvent(void* event) const override { + return true; + } + void destroyEvent(void* event, const DeviceIndex device_index) + const noexcept override {} + + // Convenience methods for testing + static DeviceIndex getDeviceIndex() { + return current_device_; + } + static void setDeviceIndex(DeviceIndex i) { + AT_ASSERT(i >= 0); + AT_ASSERT(i < kFakeGuardImplMaxDevices); + current_device_ = i; + } + static StreamId getCurrentStreamIdFor(DeviceIndex i) { + return current_streams_.at(i); + } + static void resetStreams() { + current_streams_.fill(0); + } + + private: + thread_local static DeviceIndex current_device_; + thread_local static std::array + current_streams_; +}; + +template +thread_local DeviceIndex FakeGuardImpl::current_device_ = 0; + +template +constexpr DeviceType FakeGuardImpl::static_type; + +template +thread_local std::array + FakeGuardImpl::current_streams_ = {0, 0, 0, 0, 0, 0, 0, 0}; + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/GPUTrace.h b/voice_bridge/torch/include/c10/core/impl/GPUTrace.h new file mode 100644 index 0000000000000000000000000000000000000000..377af88be034abd07e8dd7c1956a24030479a502 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/GPUTrace.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +namespace c10 { +namespace impl { + +struct C10_API GPUTrace { + // On the x86 architecture the atomic operations are lock-less. + static std::atomic gpuTraceState; + + // When PyTorch migrates to C++20, this should be changed to an atomic flag. + // Currently, the access to this variable is not synchronized, on the basis + // that it will only be flipped once and by the first interpreter that + // accesses it. + static bool haveState; + + // This function will only register the first interpreter that tries to invoke + // it. For all of the next ones it will be a no-op. + static void set_trace(const PyInterpreter*); + + static const PyInterpreter* get_trace() { + if (!haveState) + return nullptr; + return gpuTraceState.load(std::memory_order_acquire); + } +}; + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/InlineDeviceGuard.h b/voice_bridge/torch/include/c10/core/impl/InlineDeviceGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..007c9e9bcfcaf8803bd3d4825b089ee80912274a --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/InlineDeviceGuard.h @@ -0,0 +1,431 @@ +#pragma once + +// This file provides implementations of InlineDeviceGuard and +// InlineOptionalDeviceGuard. + +#include +#include +#include +#include +#include + +namespace c10 { +namespace impl { + +/** + * A DeviceGuard is an RAII class that sets a device to some value + * on construction, and resets the device to its original value on + * destruction. + * + * InlineDeviceGuard is a helper class for implementing DeviceGuards. + * It is templated over a DeviceGuardImpl (anything that implements + * DeviceGuardImplInterface). There are two primary ways to instantiate + * InlineDeviceGuard: + * + * - With a concrete implementation of DeviceGuardImpl, e.g., CUDAGuardImpl. + * This is the best way to use InlineDeviceGuard, as all calls are + * devirtualized, giving you code as efficient as straight line + * calls to cudaGetDevice/cudaSetDevice. + * + * - With VirtualGuardImpl, which does a virtual dispatch to a DeviceGuardImpl + * retrieved from a DeviceType registry. We have explicitly instantiated + * InlineDeviceGuard this way as c10::DeviceGuard. + * + * If you are in a hurry, you can use InlineDeviceGuard directly: + * + * using CUDAGuard = impl::InlineDeviceGuard; + * + * However, you can provide a better user experience if you explicitly write a + * wrapper class that itself contains the template instantiation: + * + * class CUDAGuard { + * public: + * // ... the API ... + * private: + * impl::InlineDeviceGuard guard_; + * } + * + * The wrapper class provides a good place to write documentation, and helps + * avoid weird template instantiation errors when a user incorrectly uses the + * class. + * + * If you need to test this class, consider instantiating it with FakeGuardImpl. + */ +template +class InlineDeviceGuard { + public: + // Note [Omitted default constructor from RAII] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // In principle, we could add a default constructor to + // DeviceGuard which reads the current device and promises to + // restore to that device on exit. However, most cases where you + // would have written this, you probably meant to actually just + // use OptionalDeviceGuard (since you don't actually need the + // restore to happen if you don't ever actually set the device). + // We remove the constructor here to encourage you to think about + // what you actually want to happen. + explicit InlineDeviceGuard() = delete; + + /// Set the current device to the passed Device. + explicit InlineDeviceGuard(Device device) + : impl_(device.type()), + original_device_( + device.index() == -1 ? impl_.getDevice() + : impl_.exchangeDevice(device)), + current_device_(device.index() == -1 ? original_device_ : device) {} + + /// Set the current device index to the passed DeviceIndex. (The + /// device type is inferred from the template parameter T). + template < + typename U = T, + typename = typename std::enable_if< + !std::is_same::value>::type> + explicit InlineDeviceGuard(DeviceIndex device_index) + : InlineDeviceGuard(Device(U::static_type, device_index)) {} + + /// Construct an InlineDeviceGuard using VirtualGuardImpl with an explicit + /// DeviceGuardImplInterface pointer. + template < + typename U = T, + typename = typename std::enable_if< + std::is_same::value>::type> + explicit InlineDeviceGuard( + Device device, + const DeviceGuardImplInterface* impl) + : impl_( + VirtualGuardImpl(impl ? impl : getDeviceGuardImpl(device.type()))), + original_device_( + device.index() == -1 ? impl_.getDevice() + : impl_.exchangeDevice(device)), + current_device_(device.index() == -1 ? original_device_ : device) {} + + /// Copy is disallowed + InlineDeviceGuard(const InlineDeviceGuard&) = delete; + InlineDeviceGuard& operator=(const InlineDeviceGuard&) = delete; + + /// Move is disallowed, as DeviceGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + InlineDeviceGuard(InlineDeviceGuard&& other) = delete; + InlineDeviceGuard& operator=(InlineDeviceGuard&& other) = delete; + + ~InlineDeviceGuard() { + impl_.uncheckedSetDevice(original_device_); + } + + /// Sets the device to the given one. + template < + typename U = T, + typename std::enable_if::value, int>:: + type = 0> + void set_device(at::Device device) { + AT_ASSERT( + (U::static_type == DeviceType::HIP && device.is_cuda()) || + device.type() == U::static_type); + auto index = device.index(); + if (index == -1) + return; + impl_.setDevice(device); + current_device_ = device; + } + + /// Resets the currently set device to its original device, and then sets the + /// current device to the passed device. This is effectively equivalent to + /// set_device when a guard supports only a single device type. + template + typename std::enable_if::value>::type + reset_device(at::Device device) { + set_device(device); + } + + /// Resets the currently set device to its original device, and then sets the + /// current device to the passed device (for a possibly different device + /// type). + /// + /// This method is named reset_device to highlight the fact that previous + /// device settings from this guard are NOT preserved, even if the device + /// has a different device type. For example: + /// + /// // CUDA device is 0 + /// DeviceGuard g(Device(kCUDA, 1)); + /// g.reset_device(Device(kHIP, 2)); + /// // CUDA device is 0 (!!) + /// + /// NOTE: this implementation may skip some device setting if it can prove + /// that it is unnecessary. + /// + /// Optional argument is for testing only. + template + typename std::enable_if::value>::type + reset_device( + at::Device device, + const impl::DeviceGuardImplInterface* impl = nullptr) { + auto index = device.index(); + if (index == -1) + return; + if (device.type() == original_device_.type()) { + AT_ASSERT(impl == nullptr || impl->type() == device.type()); + impl_.setDevice(device); + current_device_ = device; + } else { + // Destruct and reconstruct the DeviceGuard in place + impl_.setDevice(original_device_); + impl_ = !impl ? VirtualGuardImpl(device.type()) : VirtualGuardImpl(impl); + original_device_ = impl_.exchangeDevice(device); + current_device_ = device; + } + } + + /// Sets the device index to the given one. The device type is inferred + /// from the original device type. + void set_index(DeviceIndex index) { + reset_device(Device(original_device_.type(), index)); + } + + /// Returns the device that was set at the time the most recent + /// reset_device(), or otherwise the device at construction time. + Device original_device() const { + return original_device_; + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device/reset_device/set_index. + Device current_device() const { + return current_device_; + } + + protected: + T impl_; + + private: + Device original_device_; + Device current_device_; +}; + +/** + * A OptionalDeviceGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * + * InlineOptionalDeviceGuard is a helper class for implementing + * OptionalDeviceGuards. See guidance in InlineDeviceGuard on how to + * use this. See OptionalDeviceGuard for user-oriented usage notes. + */ +template +class InlineOptionalDeviceGuard { + public: + // Note [Explicit initialization of optional fields] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Explicit initialization of optional fields + // required to workaround an nvcc bug; see + // https://github.com/pytorch/pytorch/issues/12117 + + /// Creates an uninitialized OptionalDeviceGuard. + explicit InlineOptionalDeviceGuard() + : guard_() // See Note [Explicit initialization of optional fields] + {} + + /// Set the current device to the passed Device, if it is not nullopt. + explicit InlineOptionalDeviceGuard(optional device_opt) + : guard_() { // See Note [Explicit initialization of optional fields] + if (device_opt.has_value()) { + guard_.emplace(device_opt.value()); + } + } + + /// Set the current device to the passed DeviceIndex, if it is not nullopt. + template < + typename U = T, + typename = typename std::enable_if< + !std::is_same::value>::type> + explicit InlineOptionalDeviceGuard(optional device_index_opt) + : guard_() { // See Note [Explicit initialization of optional fields] + if (device_index_opt.has_value()) { + guard_.emplace(device_index_opt.value()); + } + } + + /// All constructors of DeviceGuard are valid for OptionalDeviceGuard + /// and result in initialized OptionalDeviceGuard. + template + explicit InlineOptionalDeviceGuard(Args&&... args) + : guard_(in_place, std::forward(args)...) {} + + // TODO: Consider readding Tensor and TensorList constructors here, when + // Tensor moves to c10. (These are only valid on OptionalDeviceGuard, + // because a Tensor may be undefined, in which case we need an uninitialized + // tensor guard.) + + // Note [Move construction for RAII guards is tricky] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // In principle, move construction is useful for terminating + // the lifetime of a `OptionalDeviceGuard` early; for example: + // + // // current device is d0 + // OptionalDeviceGuard g1(d1); + // // current device is d1 + // { + // OptionalDeviceGuard g2(std::move(g1)); + // } + // // current device is d0!! + // + // However, it's difficult to implement the move constructor + // in a way that works in all situations. For example, consider + // the following example: + // + // OptionalDeviceGuard g1(d1); + // { + // OptionalDeviceGuard g2(d2); + // { + // OptionalDeviceGuard g3(std::move(g1)); // !!! + // } + // } + // + // What should the current device be while g3 in scope... and what + // should it be after it goes out of scope? What about g2? + // There don't seem to be satisfactory answers for these questions. + // + // It's in principle possible to raise an error when this occurs + // by doing some extra thread-local bookkeeping. But why bother? + // Just don't provide the constructor. + InlineOptionalDeviceGuard(InlineOptionalDeviceGuard&& other) = delete; + + // Note [Move assignment for RAII guards is tricky] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Move assignment is deleted, because you need to know which guard was + // defined "first", as that guard's original_device_ wins--with the current + // representation, we have no way of telling which is the case. (Move + // construction does not have this problem, as one guard is always + // uninitialized.) + // + // We can make this clear by way of a pair of examples: + // + // Example 1: + // + // // initial device is n0 + // { + // CUDAGuard g1(n1); + // { + // CUDAGuard g2(n2); + // // current device should be n2 + // g1 = std::move(g2); + // // current device should still be n2 + // } + // // current device should still be n2 + // } + // // current device should be n0 + // + // Example 2 (flip the order of the two guards): + // + // // initial device is n0 + // { + // CUDAGuard g2(n2); + // { + // CUDAGuard g1(n1); + // // current device should be n1 + // g1 = std::move(g2); + // // current device should be n2 + // } + // // current device should be n0 (since g2 has been vacated) + // } + // + // In both examples, we need g1 to restore to n0 after move assignment. + // However, in example 1, this is determined by the restore value of g1 + // (prior to the move). In example 2, however, it is determined by the the + // restore value of g2(!!). We don't know which one should win, without having + // a way of telling which guard was allocated first. + // + // We could solve this with an extra thread-local variable. But no one is + // actually using move-assignment. So just get rid of it. + InlineOptionalDeviceGuard& operator=(InlineOptionalDeviceGuard&& other) = + delete; + + /// Sets the device to the given one. Initializes OptionalDeviceGuard if it + /// is not already initialized. + template < + typename U = T, + typename = typename std::enable_if< + !std::is_same::value>::type> + void set_device(at::Device device) { + if (!guard_.has_value()) { + guard_.emplace(device); + } else { + guard_->set_device(device); + } + } + + /// Resets the currently set device to its original device, and then sets the + /// current device to the passed device (for a possibly different device + /// type). Initializes OptionalDeviceGuard if it is not already initialized. + /// + /// See notes on why this is called reset_device on InlineDeviceGuard. + /// + /// Optional argument is for testing only. + template < + typename U = T, + typename = typename std::enable_if< + std::is_same::value>::type> + void reset_device( + at::Device device, + const DeviceGuardImplInterface* impl = nullptr) { + if (!guard_.has_value()) { + guard_.emplace(device, impl); + } else { + guard_->reset_device(device, impl); + } + } + + /// Resets the currently set device to its original device, and then sets the + /// current device to the passed device. Initializes the guard if it is + /// not already initialized. This is effectively equivalent to set_device + /// when a guard supports only a single device type. + template < + typename U = T, + typename = typename std::enable_if< + !std::is_same::value>::type> + void reset_device(at::Device device) { + if (!guard_.has_value()) { + guard_.emplace(device); + } else { + guard_->reset_device(device); + } + } + + /// Sets the device index to the given one. The device type is statically + /// known. + template < + typename U = T, + typename = typename std::enable_if< + !std::is_same::value>::type> + void set_index(DeviceIndex index) { + if (!guard_.has_value()) { + guard_.emplace(index); + } else { + guard_->set_index(index); + } + } + + /// Returns the device that was set immediately prior to initialization of + /// the, guard, or nullopt if the guard is uninitialized. + optional original_device() const { + return guard_.has_value() ? make_optional(guard_->original_device()) + : nullopt; + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device, if the guard is initialized, + /// or nullopt if the guard is uninitialized. + optional current_device() const { + return guard_.has_value() ? make_optional(guard_->current_device()) + : nullopt; + } + + /// Restore the original device, resetting this guard to uninitialized state. + void reset() { + guard_.reset(); + } + + private: + optional> guard_; +}; + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/InlineEvent.h b/voice_bridge/torch/include/c10/core/impl/InlineEvent.h new file mode 100644 index 0000000000000000000000000000000000000000..003a0b8407a1057457fc27011b21398cba4cd298 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/InlineEvent.h @@ -0,0 +1,110 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { +namespace impl { + +template +struct InlineEvent final { + InlineEvent() = delete; + InlineEvent( + const DeviceType _device_type, + const EventFlag _flag = EventFlag::PYTORCH_DEFAULT) + : backend_{_device_type}, device_type_{_device_type}, flag_{_flag} {} + + // Copy constructor and copy assignment operator (deleted) + InlineEvent(const InlineEvent&) = delete; + InlineEvent& operator=(const InlineEvent&) = delete; + + // Move constructor and move assignment operator + InlineEvent(InlineEvent&& other) + : InlineEvent(other.device_type_, other.flag_) { + swap(std::move(other)); + } + InlineEvent& operator=(InlineEvent&& other) { + swap(std::move(other)); + return *this; + } + + void swap(InlineEvent&& other) { + std::swap(event_, other.event_); + std::swap(backend_, other.backend_); + std::swap(device_type_, other.device_type_); + std::swap(device_index_, other.device_index_); + std::swap(flag_, other.flag_); + std::swap(was_marked_for_recording_, other.was_marked_for_recording_); + } + + ~InlineEvent() noexcept { + if (event_) + backend_.destroyEvent(event_, device_index_); + } + + DeviceType device_type() const noexcept { + return device_type_; + } + DeviceIndex device_index() const noexcept { + return device_index_; + } + EventFlag flag() const noexcept { + return flag_; + } + bool was_marked_for_recording() const noexcept { + return was_marked_for_recording_; + } + + void recordOnce(const Stream& stream) { + if (!was_marked_for_recording_) + record(stream); + } + + void record(const Stream& stream) { + TORCH_CHECK( + stream.device_type() == device_type_, + "Event device type ", + DeviceTypeName(device_type_), + " does not match recording stream's device type ", + DeviceTypeName(stream.device_type()), + "."); + + backend_.record(&event_, stream, device_index_, flag_); + was_marked_for_recording_ = true; + device_index_ = stream.device_index(); + } + + void block(const Stream& stream) const { + if (!was_marked_for_recording_) + return; + + TORCH_CHECK( + stream.device_type() == device_type_, + "Event device type ", + DeviceTypeName(device_type_), + " does not match blocking stream's device type ", + DeviceTypeName(stream.device_type()), + "."); + + backend_.block(event_, stream); + } + + bool query() const { + if (!was_marked_for_recording_) + return true; + return backend_.queryEvent(event_); + } + + private: + void* event_ = nullptr; + T backend_; + DeviceType device_type_; + DeviceIndex device_index_ = -1; + EventFlag flag_ = EventFlag::PYTORCH_DEFAULT; + bool was_marked_for_recording_ = false; +}; + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/InlineStreamGuard.h b/voice_bridge/torch/include/c10/core/impl/InlineStreamGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..7f4691e84a790304a54cbf0532751d062376375c --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/InlineStreamGuard.h @@ -0,0 +1,256 @@ +#pragma once + +#include +#include +#include + +namespace c10 { +namespace impl { + +/** + * A StreamGuard is an RAII class that changes the current device + * to the device corresponding to some stream, and changes the + * default stream on that device to be this stream. + * + * InlineStreamGuard is a helper class for implementing StreamGuards. + * See InlineDeviceGuard for guidance on how to use this class. + */ +template +class InlineStreamGuard : private InlineDeviceGuard { + public: + /// No default constructor, see Note [Omitted default constructor from RAII] + explicit InlineStreamGuard() = delete; + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + explicit InlineStreamGuard(Stream stream) + : InlineDeviceGuard(stream.device()), + original_stream_of_original_device_( + this->impl_.getStream(original_device())), + original_stream_of_current_device_(this->impl_.exchangeStream(stream)), + current_stream_(stream) {} + + /// This constructor exists purely for testing + template < + typename U = T, + typename = typename std::enable_if< + std::is_same::value>::type> + explicit InlineStreamGuard( + Stream stream, + const DeviceGuardImplInterface* impl) + : InlineDeviceGuard( + stream.device(), + impl ? impl : getDeviceGuardImpl(stream.device_type())), + original_stream_of_original_device_( + this->impl_.getStream(original_device())), + original_stream_of_current_device_(this->impl_.exchangeStream(stream)), + current_stream_(stream) {} + + /// Copy is disallowed + InlineStreamGuard(const InlineStreamGuard&) = delete; + InlineStreamGuard& operator=(const InlineStreamGuard&) = delete; + + /// Move is disallowed, as StreamGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + InlineStreamGuard(InlineStreamGuard&& other) = delete; + InlineStreamGuard& operator=(InlineStreamGuard&& other) = delete; + + ~InlineStreamGuard() { + this->impl_.exchangeStream(original_stream_of_current_device_); + } + + /// Resets the currently set stream to the original stream and + /// the currently set device to the original device. Then, + /// set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + /// + /// NOTE: this implementation may skip some stream/device setting if + /// it can prove that it is unnecessary. + /// + /// WARNING: reset_stream does NOT preserve previously set streams on + /// different devices. If you need to set streams on multiple devices + /// use MultiStreamGuard instead. + void reset_stream(Stream stream) { + // TODO: make a version that takes an impl argument. Unfortunately, + // that will require SFINAE because impl is only valid for the + // VirtualGuardImpl specialization. + if (stream.device() == this->current_device()) { + this->impl_.exchangeStream(stream); + current_stream_ = stream; + } else { + // Destruct and reconstruct the StreamGuard in-place + this->impl_.exchangeStream(original_stream_of_current_device_); + this->reset_device(stream.device()); + original_stream_of_current_device_ = this->impl_.exchangeStream(stream); + current_stream_ = stream; + } + } + + // It's not clear if set_device should also reset the current stream + // if the device is unchanged; therefore, we don't provide it. + // The situation is somewhat clearer with reset_device, but it's still + // a pretty weird thing to do, so haven't added this either. + + /// Returns the stream of the original device prior to this guard. Subtly, + /// the stream returned here is the original stream of the *original* + /// device; i.e., it's the stream that your computation *would* have + /// been put on, if it hadn't been for this meddling stream guard. + /// This is usually what you want. + Stream original_stream() const { + return original_stream_of_original_device_; + } + + /// Returns the most recent stream that was set using this device guard, + /// either from construction, or via set_stream. + Stream current_stream() const { + return current_stream_; + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device/reset_device/set_index. + Device current_device() const { + return InlineDeviceGuard::current_device(); + } + + /// Returns the device that was set at the most recent reset_stream(), + /// or otherwise the device at construction time. + Device original_device() const { + return InlineDeviceGuard::original_device(); + } + + private: + Stream + original_stream_of_original_device_; // what the user probably cares about + Stream original_stream_of_current_device_; // what we need to restore + Stream current_stream_; +}; + +/** + * An OptionalStreamGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * See InlineOptionalDeviceGuard for more guidance on how to use this class. + */ +template +class InlineOptionalStreamGuard { + public: + /// Creates an uninitialized stream guard. + explicit InlineOptionalStreamGuard() + : guard_() // See Note [Explicit initialization of optional fields] + {} + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream, + /// if the passed stream is not nullopt. + explicit InlineOptionalStreamGuard(optional stream_opt) : guard_() { + if (stream_opt.has_value()) { + guard_.emplace(stream_opt.value()); + } + } + + /// All constructors of StreamGuard are valid for OptionalStreamGuard + template + explicit InlineOptionalStreamGuard(Args&&... args) + : guard_(in_place, std::forward(args)...) {} + + // See Note [Move construction for RAII guards is tricky] + InlineOptionalStreamGuard(InlineOptionalStreamGuard&& other) = delete; + + // See Note [Move assignment for RAII guards is tricky] + InlineOptionalStreamGuard& operator=(InlineOptionalStreamGuard&& other) = + delete; + + /// Resets the currently set stream to the original stream and + /// the currently set device to the original device. Then, + /// set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + /// Initializes the OptionalStreamGuard if it was not previously initialized. + void reset_stream(Stream stream) { + if (guard_.has_value()) { + guard_->reset_stream(stream); + } else { + guard_.emplace(stream); + } + } + + /// Returns the stream that was set at the time the guard was most recently + /// initialized, or nullopt if the guard is uninitialized. + optional original_stream() const { + return guard_.has_value() ? make_optional(guard_->original_stream()) + : nullopt; + } + + /// Returns the most recent stream that was set using this stream guard, + /// either from construction, or via reset_stream, if the guard is + /// initialized, or nullopt if the guard is uninitialized. + optional current_stream() const { + return guard_.has_value() ? make_optional(guard_->current_stream()) + : nullopt; + } + + /// Restore the original device and stream, resetting this guard to + /// uninitialized state. + void reset() { + guard_.reset(); + } + + private: + optional> guard_; +}; + +template +class InlineMultiStreamGuard { + public: + /// Calls `set_stream` on each of the streams in the list. + /// This may be useful if you need to set different streams + /// for different devices. + explicit InlineMultiStreamGuard(ArrayRef streams) { + if (!streams.empty()) { + impl_.emplace(getDeviceTypeOfStreams(streams)); + original_streams_.reserve(streams.size()); + for (const Stream& s : streams) { + original_streams_.push_back(this->impl_->exchangeStream(s)); + } + } + } + + /// Copy is disallowed + InlineMultiStreamGuard(const InlineMultiStreamGuard&) = delete; + InlineMultiStreamGuard& operator=(const InlineMultiStreamGuard&) = delete; + + /// Move is disallowed, as StreamGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + InlineMultiStreamGuard(InlineMultiStreamGuard&& other) = delete; + InlineMultiStreamGuard& operator=(InlineMultiStreamGuard&& other) = delete; + + ~InlineMultiStreamGuard() { + for (const Stream& s : original_streams_) { + this->impl_->exchangeStream(s); + } + } + + protected: + optional impl_; + + private: + /// The original streams that were active on all devices. + std::vector original_streams_; + + static DeviceType getDeviceTypeOfStreams(ArrayRef streams) { + TORCH_INTERNAL_ASSERT(!streams.empty()); + DeviceType type = streams[0].device_type(); + for (const auto idx : c10::irange(1, streams.size())) { + TORCH_CHECK_VALUE( + streams[idx].device_type() == type, + "Streams have a mix of device types: stream 0 is on ", + streams[0].device(), + " while stream ", + idx, + " is on device ", + streams[idx].device()); + } + return type; + } +}; + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/LocalDispatchKeySet.h b/voice_bridge/torch/include/c10/core/impl/LocalDispatchKeySet.h new file mode 100644 index 0000000000000000000000000000000000000000..70af58b957165e68572ee7433a21f9ada75d1802 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/LocalDispatchKeySet.h @@ -0,0 +1,156 @@ +#pragma once + +#include +#include +#include + +// TLS management for DispatchKeySet (the "local" DispatchKeySet(s)) +// +// This manages two thread-local DispatchKeySets: +// +// - The included type set, which adds a tensor type for consideration +// in dispatch. (For example, you might add Profiling to +// the included type set to turn on profiling on all tensor operations.) +// +// - The excluded type set, which disqualifies a tensor type from dispatch. +// (For example, after redispatching on variable, we disqualify +// Autograd so we don't attempt to handle variable again.) +// (Exclusion wins over inclusion.) +// +// NB: Originally, I implemented the excluded type set as storing the inverted +// set, but TLS is defined to be zero-initialized, so this doesn't actually work +// (if it's inverted, you want the set to be -1 initialized). + +namespace c10 { +namespace impl { + +// POD version of LocalDispatchKeySet. Declared here just so that +// we can put it in the guards. +// This struct encapsulates special handling for TLS initialization +// in set_included()/included() API so that they reflect the truth. +// If you want to create PODLocalDispatchKeySet with non-zero state, +// use set_included() instead of default constructor. +struct C10_API PODLocalDispatchKeySet { + uint64_t included_; + uint64_t excluded_; + + // See Note [TLS Initialization] + DispatchKeySet included() const { + return DispatchKeySet(DispatchKeySet::RAW, included_) ^ + c10::default_included_set; + } + DispatchKeySet excluded() const { + return DispatchKeySet(DispatchKeySet::RAW, excluded_) ^ + c10::default_excluded_set; + } + + void set_included(DispatchKeySet x) { + included_ = (x ^ c10::default_included_set).raw_repr(); + } + void set_excluded(DispatchKeySet x) { + excluded_ = (x ^ c10::default_excluded_set).raw_repr(); + } +}; +static_assert( + std::is_pod::value, + "PODLocalDispatchKeySet must be a POD type."); + +struct C10_API LocalDispatchKeySet { + /* implicit */ LocalDispatchKeySet(PODLocalDispatchKeySet x) + : included_(x.included()), excluded_(x.excluded()) {} + DispatchKeySet included_; + DispatchKeySet excluded_; +}; + +// thread_local variables cannot be C10_API on Windows. +// Inlining this seems to break AutoDispatchBelowAutograd on Android. +#if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +C10_API LocalDispatchKeySet tls_local_dispatch_key_set(); +#else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +extern C10_API thread_local PODLocalDispatchKeySet raw_local_dispatch_key_set; + +inline C10_API LocalDispatchKeySet tls_local_dispatch_key_set() { + // Don't let people fiddle with the thread_local directly just + // because they include this header. + return raw_local_dispatch_key_set; +} +#endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) + +// Internal, use ThreadLocalStateGuard +C10_API void _force_tls_local_dispatch_key_set(LocalDispatchKeySet key_set); + +// RAII API for manipulating the thread-local dispatch state. + +class C10_API IncludeDispatchKeyGuard { + public: + IncludeDispatchKeyGuard(DispatchKeySet); + IncludeDispatchKeyGuard(DispatchKey k) + : IncludeDispatchKeyGuard(DispatchKeySet(k)) {} + IncludeDispatchKeyGuard(const IncludeDispatchKeyGuard&) = delete; + IncludeDispatchKeyGuard operator=(const IncludeDispatchKeyGuard&) = delete; + IncludeDispatchKeyGuard(IncludeDispatchKeyGuard&&) = delete; + IncludeDispatchKeyGuard operator=(IncludeDispatchKeyGuard&&) = delete; + ~IncludeDispatchKeyGuard(); + + private: + // A little micro-optimization to save us from tls_get_addr call + // on destruction + PODLocalDispatchKeySet* tls_; + DispatchKeySet include_; +}; + +class C10_API ExcludeDispatchKeyGuard { + public: + ExcludeDispatchKeyGuard(DispatchKeySet); + ExcludeDispatchKeyGuard(DispatchKey k) + : ExcludeDispatchKeyGuard(DispatchKeySet(k)) {} + ExcludeDispatchKeyGuard(const ExcludeDispatchKeyGuard&) = delete; + ExcludeDispatchKeyGuard operator=(const ExcludeDispatchKeyGuard&) = delete; + ExcludeDispatchKeyGuard(ExcludeDispatchKeyGuard&&) = delete; + ExcludeDispatchKeyGuard operator=(ExcludeDispatchKeyGuard&&) = delete; + ~ExcludeDispatchKeyGuard(); + + private: + // A little micro-optimization to save us from tls_get_addr call + // on destruction + PODLocalDispatchKeySet* tls_; + DispatchKeySet exclude_; +}; + +struct C10_API ForceDispatchKeyGuard { + public: + ForceDispatchKeyGuard(c10::impl::LocalDispatchKeySet key_set) + : saved_keyset_(c10::impl::tls_local_dispatch_key_set()) { + c10::impl::_force_tls_local_dispatch_key_set(key_set); + } + ~ForceDispatchKeyGuard() { + c10::impl::_force_tls_local_dispatch_key_set(saved_keyset_); + } + + private: + c10::impl::LocalDispatchKeySet saved_keyset_; +}; + +// Non-RAII API for manipulating the thread-local dispatch state. +// Please prefer the RAII API. The non-RAII API may be useful when +// the included/excluded state of a given DispatchKey must span +// many calls from the Python to the C++, so you cannot conveniently +// use an RAII guard. +// +// Example use case: a Python context manager that includes a certain +// DispatchKey, to ensure ops running under the context manager dispatch +// through that DispatchKey's registered overrides. +// +// The non-RAII API is less efficient than the RAII guards because both the +// getter and setter will do a tls_getaddr lookup (the RAII struct only needs +// one!) + +C10_API bool tls_is_dispatch_key_excluded(DispatchKey x); +C10_API void tls_set_dispatch_key_excluded(DispatchKey x, bool desired_state); +C10_API bool tls_is_dispatch_key_included(DispatchKey x); +C10_API void tls_set_dispatch_key_included(DispatchKey x, bool desired_state); +C10_API bool tls_is_dispatch_keyset_excluded(DispatchKeySet ks); +C10_API bool tls_is_dispatch_keyset_included(DispatchKeySet ks); + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/PyInterpreter.h b/voice_bridge/torch/include/c10/core/impl/PyInterpreter.h new file mode 100644 index 0000000000000000000000000000000000000000..90fbb8dfebf888f9862f1993546447e5813366d1 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/PyInterpreter.h @@ -0,0 +1,203 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Forward declarations + +namespace c10 { +struct IValue; +class OperatorHandle; +struct TensorImpl; +struct SafePyObject; +} // namespace c10 + +namespace torch { +namespace jit { +using Stack = std::vector; +} +} // namespace torch + +// Actual implementation + +namespace c10 { +namespace impl { + +struct C10_API PyInterpreter; + +// Note [Python interpreter tag] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Traditionally, PyTorch is layered such that our Python library +// (libtorch_python) references our pure C++ library (libtorch) as the +// natural order of things. However, sometimes this natural order is +// subverted: C++ objects refer to Python objects (for example, we +// store a PyObject* pointer on TensorImpl so that converting from a +// C++ Tensor to a Python Tensor is just a memory dereference). +// +// These unusual orderings must be treated with care. To start, you need to +// virtualize the destructor so that the PyObject can be decref'ed on +// destruction (because the C++ object itself doesn't know anything about +// Python--remember, layering!). This process itself is fraught, since +// acquiring the GIL could lead to deadlocks if someone is blocking on you +// while holding the GIL. Furthermore, if the C++ objects outlive the +// interpreter (which can happen if you stash them in a static global +// variable defined in libtorch), you may attempt to decref the object when +// the Python interpreter has already been shutdown. +// +// BUT WAIT, IT GETS WORSE. With torchdeploy, there may be multiple Python +// interpreters in a single process. If a C++ object is accessible from +// multiple interpreters, we must take care not to accidentally pass a +// PyObject from one interpreter with another interpreter. +// +// To prevent these mixups, we introduce a PyInterpreter "tag" (object with +// a vtable), which specifies a specific Python interpreter. +// +// - Any given object can be associated with AT MOST one Python interpreter. +// We represent the interpreter tag as a memory address to an instance of +// a virtual class that is allocated once per interpreter (this is so that +// we can request the interpreter to perform operations for us, if +// necessary). +// +// - It can be recorded with a PyObject (PyInterpreterObject) so that +// we know what interpreter the object is associated with, and we can +// raise an error if you try to use the PyObject from the wrong +// interpreter context. +// +// - It contains a vtable that can be used to perform various Python +// operations from ordinary C++ code that ordinarily wouldn't be accessible +// from libtorch. +// +// A simple use case is when a C++ object must be associated with a PyObject. +// However, for TensorImpl, we lazily allocate a PyObject the first time the +// object passes into Python. The invariants for this situation are more +// subtle: +// +// - A given TensorImpl's interpreter tag can only go from uninitialized to +// tagged; once tagged, this is a quiescent state (once tagged to an +// interpreter, ALWAYS tagged to that interpreter) +// +// - A thread may mutate the PyObject field of a TensorImpl if and only if it +// holds the GIL for the interpreter tagged on the TensorImpl. (If the +// TensorImpl is not tagged, it must first atomically claim its tag before it +// can validly write) +// +// WARNING: This class has to be written very carefully, because it may be +// possible for a Tensor to have a reference an interpreter corresponding to +// a shared library that has ALREADY BEEN UNLOADED. This makes blindly calling +// virtual methods very dangerous, because the vtable may be garbage at that +// point (on a good day, you might get "pure virtual method called"). +// +// The idea to solve this problem is we always leak PyInterpreters (so they +// always stay live even after dlclose), and make sure we can disarm their +// virtual methods by indirecting through a separate PyInterpreterVTable +// object. This can be replaced with a no-op vtable from libc10.so, which +// is guaranteed to stick around until the bitter end. +// +// NB: The downside with representing PyInterpreter tags as full objects is that +// it takes an extra word on TensorImpl. If tags were instead just integer +// indices, on 64-bit architectures we could pack the tag and PyObject together +// into a single atomic word. On 32-bit architectures we could simply say that +// only one Python interpreter is supported (erroring if a nontrivial +// interpreter tag is attempted to be set). +// +// The difficulty with this scheme is we need to maintain an out-of-line table +// to get at the PyInterpreters so that we can do virtual method calls on them, +// and registration/deregistration to this table must be done in a thread safe +// manner. This can be easily done if the number of possible PyInterpreters is +// small enough (e.g., 8-bit integer) by simply preallocating an array of +// sufficient size to hold all possible interpreters. Surely 128 threads is +// more than enough for anyone! +// +// I didn't decide to do this technique at the moment, because the extra word +// added by the PyInterpreter tag takes us to 24 words, which means that we +// still fit inside three eight word cache lines. If you need to penny pinch +// another word consider doing this! + +struct C10_API PyInterpreterVTable { + virtual ~PyInterpreterVTable() {} + + // Report the name of this interpreter + virtual std::string name() const = 0; + + // Run Py_DECREF on a PyObject. We DO NOT assume the GIL is held on call + // See NOTE [PyInterpreter::decref takes an `is_tensor` arg] + virtual void decref(PyObject* pyobj, bool is_tensor) const = 0; + + // Perform a detach by deferring to the __torch_dispatch__ implementation of + // detach, which will also arrange for the PyObject to get copied in this + // situation + virtual c10::intrusive_ptr detach( + const TensorImpl* self) const = 0; + + // Invoke the Python boxed fallback dispatch to go back into Python + virtual void dispatch(const c10::OperatorHandle& op, torch::jit::Stack* stack) + const = 0; + + // Invoke the Python dispatcher to handle this call + virtual void python_dispatcher( + const c10::OperatorHandle& op, + c10::DispatchKeySet, + torch::jit::Stack* stack) const = 0; + + virtual bool is_contiguous(const TensorImpl* self, at::MemoryFormat) + const = 0; + virtual bool is_strides_like(const TensorImpl* self, at::MemoryFormat) + const = 0; + virtual bool is_non_overlapping_and_dense(const TensorImpl* self) const = 0; + virtual c10::Device device(const TensorImpl* self) const = 0; + virtual int64_t dim(const TensorImpl* self) const = 0; + virtual c10::IntArrayRef strides(const TensorImpl* self) const = 0; + virtual c10::IntArrayRef sizes(const TensorImpl* self) const = 0; + virtual c10::SymIntArrayRef sym_sizes(const TensorImpl* self) const = 0; + virtual c10::Layout layout(const TensorImpl* self) const = 0; + virtual c10::SymInt sym_numel(const TensorImpl* self) const = 0; + virtual c10::SymIntArrayRef sym_strides(const TensorImpl* self) const = 0; + virtual c10::SymInt sym_storage_offset(const TensorImpl* self) const = 0; + + virtual void trace_gpu_event_creation(uintptr_t event) const = 0; + virtual void trace_gpu_event_deletion(uintptr_t event) const = 0; + virtual void trace_gpu_event_record(uintptr_t event, uintptr_t stream) + const = 0; + virtual void trace_gpu_event_wait(uintptr_t event, uintptr_t stream) + const = 0; + virtual void trace_gpu_memory_allocation(uintptr_t ptr) const = 0; + virtual void trace_gpu_memory_deallocation(uintptr_t ptr) const = 0; + virtual void trace_gpu_stream_creation(uintptr_t stream) const = 0; + virtual void trace_gpu_device_synchronization() const = 0; + virtual void trace_gpu_stream_synchronization(uintptr_t stream) const = 0; + virtual void trace_gpu_event_synchronization(uintptr_t event) const = 0; +}; + +struct C10_API PyInterpreter { + const PyInterpreterVTable* vtable_; + + PyInterpreter(const PyInterpreterVTable* vtable) : vtable_(vtable){}; + + const PyInterpreterVTable& operator*() const noexcept { + return *vtable_; + } + const PyInterpreterVTable* operator->() const noexcept { + return vtable_; + } + + // Disarm this PyInterpreter, making all of its methods noops. + // The vtable pointer is not an atomic at the moment, which means + // a disarm() invocation that is concurrent with active destructors + // is not thread safe and will trigger TSAN. My hope is that this + // situations doesn't ever actually happen; tensor destruction should + // quiesce when a dlclose happens, and any long lived tensors whose + // destructors would be disarmed here only begin the destruction process + // on process shutdown (long after the dlclose has occurred). + void disarm() noexcept; +}; + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/PythonDispatcherTLS.h b/voice_bridge/torch/include/c10/core/impl/PythonDispatcherTLS.h new file mode 100644 index 0000000000000000000000000000000000000000..1c055a59fb1591dec9fae98dd861b30d6079192b --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/PythonDispatcherTLS.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include +#include + +namespace c10 { +namespace impl { + +struct C10_API PythonDispatcherTLS { + static void set_state(PyInterpreter* state); + static PyInterpreter* get_state(); + static void reset_state(); +}; + +struct C10_API DisablePythonDispatcher { + DisablePythonDispatcher() : old_(PythonDispatcherTLS::get_state()) { + PythonDispatcherTLS::set_state({}); + } + ~DisablePythonDispatcher() { + PythonDispatcherTLS::set_state(old_); + } + PyInterpreter* old_; +}; + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/SizesAndStrides.h b/voice_bridge/torch/include/c10/core/impl/SizesAndStrides.h new file mode 100644 index 0000000000000000000000000000000000000000..9074b252c6db5271116234bf3645953fbf8e5ba2 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/SizesAndStrides.h @@ -0,0 +1,308 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#define C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE 5 + +namespace c10 { +namespace impl { + +// Packed container for TensorImpl sizes and strides. +// This design improves on the previous approach of using a pair of +// c10::SmallVector by specializing for the operations we +// actually use and enforcing that the number of sizes is the same as +// the number of strides. The memory layout is as follows: +// +// 1 size_t for the size +// 5 eightbytes of inline sizes and 5 eightbytes of inline strides, OR pointer +// to out-of-line array +class C10_API SizesAndStrides { + public: + // TODO: different iterator types for sizes & strides to prevent + // mixing the two accidentally. + using sizes_iterator = int64_t*; + using sizes_const_iterator = const int64_t*; + using strides_iterator = int64_t*; + using strides_const_iterator = const int64_t*; + + SizesAndStrides() : size_(1) { + size_at_unchecked(0) = 0; + stride_at_unchecked(0) = 1; + } + + ~SizesAndStrides() { + if (C10_UNLIKELY(!isInline())) { + free(outOfLineStorage_); + } + } + + SizesAndStrides(const SizesAndStrides& rhs) : size_(rhs.size_) { + if (C10_LIKELY(rhs.isInline())) { + copyDataInline(rhs); + } else { + allocateOutOfLineStorage(size_); + copyDataOutline(rhs); + } + } + + SizesAndStrides& operator=(const SizesAndStrides& rhs) { + if (this == &rhs) { + return *this; + } + if (C10_LIKELY(rhs.isInline())) { + if (C10_UNLIKELY(!isInline())) { + free(outOfLineStorage_); + } + copyDataInline(rhs); + } else { + if (isInline()) { + allocateOutOfLineStorage(rhs.size_); + } else { + resizeOutOfLineStorage(rhs.size_); + } + copyDataOutline(rhs); + } + size_ = rhs.size_; + return *this; + } + + // Move from rhs. rhs.size() == 0 afterwards. + SizesAndStrides(SizesAndStrides&& rhs) noexcept : size_(rhs.size_) { + if (C10_LIKELY(isInline())) { + memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_)); + } else { + outOfLineStorage_ = rhs.outOfLineStorage_; + rhs.outOfLineStorage_ = nullptr; + } + + rhs.size_ = 0; + } + + // Move from rhs. rhs.size() == 0 afterwards. + SizesAndStrides& operator=(SizesAndStrides&& rhs) noexcept { + if (this == &rhs) { + return *this; + } + if (C10_LIKELY(rhs.isInline())) { + if (C10_UNLIKELY(!isInline())) { + free(outOfLineStorage_); + } + copyDataInline(rhs); + } else { + // They're outline. We're going to steal their vector. + if (!isInline()) { + free(outOfLineStorage_); + } + outOfLineStorage_ = rhs.outOfLineStorage_; + rhs.outOfLineStorage_ = nullptr; + } + size_ = rhs.size_; + rhs.size_ = 0; + + return *this; + } + + size_t size() const noexcept { + return size_; + } + + const int64_t* sizes_data() const noexcept { + if (C10_LIKELY(isInline())) { + return &inlineStorage_[0]; + } else { + return &outOfLineStorage_[0]; + } + } + + int64_t* sizes_data() noexcept { + if (C10_LIKELY(isInline())) { + return &inlineStorage_[0]; + } else { + return &outOfLineStorage_[0]; + } + } + + sizes_const_iterator sizes_begin() const noexcept { + return sizes_data(); + } + + sizes_iterator sizes_begin() noexcept { + return sizes_data(); + } + + sizes_const_iterator sizes_end() const noexcept { + return sizes_begin() + size(); + } + + sizes_iterator sizes_end() noexcept { + return sizes_begin() + size(); + } + + IntArrayRef sizes_arrayref() const noexcept { + return IntArrayRef{sizes_data(), size()}; + } + + void set_sizes(IntArrayRef newSizes) { + resize(newSizes.size()); + std::copy(newSizes.begin(), newSizes.end(), sizes_begin()); + } + + void set_strides(IntArrayRef strides) { + TORCH_INTERNAL_ASSERT(strides.size() == size()); + std::copy(strides.begin(), strides.end(), strides_begin()); + } + + const int64_t* strides_data() const noexcept { + if (C10_LIKELY(isInline())) { + return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE]; + } else { + return &outOfLineStorage_[size()]; + } + } + + int64_t* strides_data() noexcept { + if (C10_LIKELY(isInline())) { + return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE]; + } else { + return &outOfLineStorage_[size()]; + } + } + + strides_const_iterator strides_begin() const noexcept { + if (C10_LIKELY(isInline())) { + return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE]; + } else { + return &outOfLineStorage_[size()]; + } + } + + strides_iterator strides_begin() noexcept { + if (C10_LIKELY(isInline())) { + return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE]; + } else { + return &outOfLineStorage_[size()]; + } + } + + strides_const_iterator strides_end() const noexcept { + return strides_begin() + size(); + } + + strides_iterator strides_end() noexcept { + return strides_begin() + size(); + } + + IntArrayRef strides_arrayref() const noexcept { + return IntArrayRef{strides_data(), size()}; + } + + // Size accessors. + int64_t size_at(size_t idx) const noexcept { + assert(idx < size()); + return sizes_data()[idx]; + } + + int64_t& size_at(size_t idx) noexcept { + assert(idx < size()); + return sizes_data()[idx]; + } + + int64_t size_at_unchecked(size_t idx) const noexcept { + return sizes_data()[idx]; + } + + int64_t& size_at_unchecked(size_t idx) noexcept { + return sizes_data()[idx]; + } + + // Size accessors. + int64_t stride_at(size_t idx) const noexcept { + assert(idx < size()); + return strides_data()[idx]; + } + + int64_t& stride_at(size_t idx) noexcept { + assert(idx < size()); + return strides_data()[idx]; + } + + int64_t stride_at_unchecked(size_t idx) const noexcept { + return strides_data()[idx]; + } + + int64_t& stride_at_unchecked(size_t idx) noexcept { + return strides_data()[idx]; + } + + void resize(size_t newSize) { + const auto oldSize = size(); + if (newSize == oldSize) { + return; + } + if (C10_LIKELY( + newSize <= C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE && isInline())) { + if (oldSize < newSize) { + const auto bytesToZero = + (newSize - oldSize) * sizeof(inlineStorage_[0]); + memset(&inlineStorage_[oldSize], 0, bytesToZero); + memset( + &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE + oldSize], + 0, + bytesToZero); + } + size_ = newSize; + } else { + resizeSlowPath(newSize, oldSize); + } + } + + void resizeSlowPath(size_t newSize, size_t oldSize); + + private: + bool isInline() const noexcept { + return size_ <= C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE; + } + + void copyDataInline(const SizesAndStrides& rhs) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.isInline()); + memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_)); + } + + static size_t storageBytes(size_t size) noexcept { + return size * 2 * sizeof(int64_t); + } + + void allocateOutOfLineStorage(size_t size) { + outOfLineStorage_ = static_cast(malloc(storageBytes(size))); + TORCH_CHECK( + outOfLineStorage_, + "Could not allocate memory for Tensor SizesAndStrides!"); + } + + void resizeOutOfLineStorage(size_t newSize) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!isInline()); + outOfLineStorage_ = static_cast( + realloc(outOfLineStorage_, storageBytes(newSize))); + TORCH_CHECK( + outOfLineStorage_, + "Could not allocate memory for Tensor SizesAndStrides!"); + } + + void copyDataOutline(const SizesAndStrides& rhs) noexcept { + memcpy(outOfLineStorage_, rhs.outOfLineStorage_, storageBytes(rhs.size_)); + } + + size_t size_; + union { + int64_t* outOfLineStorage_; + int64_t inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE * 2]{}; + }; +}; + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/TorchDispatchModeTLS.h b/voice_bridge/torch/include/c10/core/impl/TorchDispatchModeTLS.h new file mode 100644 index 0000000000000000000000000000000000000000..708c22e014ad41af18156a27ff17751f6f89c850 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/TorchDispatchModeTLS.h @@ -0,0 +1,38 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { +namespace impl { + +struct C10_API TorchDispatchModeTLS { + static void set_mode(std::shared_ptr mode); + static const std::shared_ptr& get_mode(); + static void reset_mode(); + static void swap_mode(std::shared_ptr& mode); + + static void push_onto_stack(std::shared_ptr mode); + static const std::shared_ptr pop_stack(); + static const std::shared_ptr& get_stack_at(int64_t idx); + static int64_t stack_len(); + + static const TorchDispatchModeTLS& get_state(); + static void set_state(const TorchDispatchModeTLS& state); + + private: + // The mode TLS is split into + // - mode_, which is the C++ mode, that can only be the mode handling mode + // or null + // - stack_, which is a vector of modes representing the stack of user + // defined modes + std::shared_ptr mode_; + std::vector> stack_; +}; + +C10_API bool dispatch_mode_enabled(); + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/VirtualGuardImpl.h b/voice_bridge/torch/include/c10/core/impl/VirtualGuardImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..1b8da6a1a23e3e638d3072ed6476681c4646a527 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/VirtualGuardImpl.h @@ -0,0 +1,89 @@ +#pragma once + +#include + +namespace c10 { +namespace impl { + +/** + * An implementation of DeviceGuardImplInterface which delegates + * to virtual dispatch on the DeviceGuardImpl registry. + */ +class VirtualGuardImpl final : public DeviceGuardImplInterface { + public: + VirtualGuardImpl(DeviceType device_type) + : impl_(getDeviceGuardImpl(device_type)) {} + // This constructor exists purely for testing + VirtualGuardImpl(const DeviceGuardImplInterface* impl) : impl_(impl) {} + + // Copying and moving is OK! + + DeviceType type() const override { + return impl_->type(); + } + Device exchangeDevice(Device d) const override { + return impl_->exchangeDevice(d); + } + Device getDevice() const override { + return impl_->getDevice(); + } + void setDevice(Device d) const override { + impl_->setDevice(d); + } + void uncheckedSetDevice(Device d) const noexcept override { + impl_->uncheckedSetDevice(d); + } + Stream getStream(Device d) const noexcept override { + return impl_->getStream(d); + } + Stream getDefaultStream(Device d) const override { + return impl_->getDefaultStream(d); + } + Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false) + const override { + return impl_->getStreamFromGlobalPool(d, isHighPriority); + } + Stream exchangeStream(Stream s) const noexcept override { + return impl_->exchangeStream(s); + } + DeviceIndex deviceCount() const noexcept override { + return impl_->deviceCount(); + } + + // Event functions + void record( + void** event, + const Stream& stream, + const DeviceIndex device_index, + const EventFlag flag) const override { + impl_->record(event, stream, device_index, flag); + } + void block(void* event, const Stream& stream) const override { + impl_->block(event, stream); + } + bool queryEvent(void* event) const override { + return impl_->queryEvent(event); + } + void destroyEvent(void* event, const DeviceIndex device_index) + const noexcept override { + impl_->destroyEvent(event, device_index); + } + + bool queryStream(const Stream& stream) const override { + return impl_->queryStream(stream); + } + void synchronizeStream(const Stream& stream) const override { + impl_->synchronizeStream(stream); + } + + void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream) + const override { + impl_->recordDataPtrOnStream(data_ptr, stream); + } + + private: + const DeviceGuardImplInterface* impl_ = nullptr; +}; + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/impl/alloc_cpu.h b/voice_bridge/torch/include/c10/core/impl/alloc_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..dc0f97f0f3c1f12291f4c96d090c58c4fb49301c --- /dev/null +++ b/voice_bridge/torch/include/c10/core/impl/alloc_cpu.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +#include + +namespace c10 { + +C10_API void* alloc_cpu(size_t nbytes); +C10_API void free_cpu(void* data); + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/core/thread_pool.h b/voice_bridge/torch/include/c10/core/thread_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..87cc70303c33876a9d1a29be24d002984d24b293 --- /dev/null +++ b/voice_bridge/torch/include/c10/core/thread_pool.h @@ -0,0 +1,129 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32") +#endif + +namespace c10 { + +// TODO: move this to C10 and make it C10_API +class C10_API TaskThreadPoolBase { + public: + virtual void run(std::function func) = 0; + + virtual size_t size() const = 0; + + /** + * The number of available (i.e. idle) threads in this thread pool. + */ + virtual size_t numAvailable() const = 0; + + /** + * Check if the current thread is from the thread pool. + */ + virtual bool inThreadPool() const = 0; + + virtual ~TaskThreadPoolBase() noexcept {} + + static size_t defaultNumThreads() { + auto num_threads = std::thread::hardware_concurrency(); +#if defined(_M_X64) || defined(__x86_64__) + num_threads /= 2; +#endif + return num_threads; + } +}; + +class C10_API ThreadPool : public c10::TaskThreadPoolBase { + protected: + struct task_element_t { + bool run_with_id; + const std::function no_id; + const std::function with_id; + + explicit task_element_t(std::function f) + : run_with_id(false), no_id(std::move(f)), with_id(nullptr) {} + explicit task_element_t(std::function f) + : run_with_id(true), no_id(nullptr), with_id(std::move(f)) {} + }; + + std::queue tasks_; + std::vector threads_; + mutable std::mutex mutex_; + std::condition_variable condition_; + std::condition_variable completed_; + std::atomic_bool running_; + bool complete_; + std::size_t available_; + std::size_t total_; + int numa_node_id_; + + public: + ThreadPool() = delete; + + explicit ThreadPool( + int pool_size, + int numa_node_id = -1, + std::function init_thread = nullptr); + + ~ThreadPool() override; + + size_t size() const override; + + size_t numAvailable() const override; + + bool inThreadPool() const override; + + void run(std::function func) override; + + template + void runTaskWithID(Task task) { + std::unique_lock lock(mutex_); + + // Set task and signal condition variable so that a worker thread will + // wake up and use the task. + tasks_.emplace(static_cast>(task)); + complete_ = false; + condition_.notify_one(); + } + + /// @brief Wait for queue to be empty + void waitWorkComplete(); + + private: + // @brief Entry point for pool threads. + void main_loop(std::size_t index); +}; + +class C10_API TaskThreadPool : public c10::ThreadPool { + public: + explicit TaskThreadPool(std::size_t pool_size, int numa_node_id = -1) + : ThreadPool(pool_size, numa_node_id, [numa_node_id]() { + setThreadName("CaffeTaskThread"); + NUMABind(numa_node_id); + }) {} +}; + +C10_DECLARE_SHARED_REGISTRY( + ThreadPoolRegistry, + TaskThreadPoolBase, + int, + int, + bool); + +} // namespace c10 + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/voice_bridge/torch/include/c10/cuda/CUDAAlgorithm.h b/voice_bridge/torch/include/c10/cuda/CUDAAlgorithm.h new file mode 100644 index 0000000000000000000000000000000000000000..166c5264e5bdf7d53d69af0ee26c55c99a4c0cd7 --- /dev/null +++ b/voice_bridge/torch/include/c10/cuda/CUDAAlgorithm.h @@ -0,0 +1,33 @@ +#ifdef THRUST_DEVICE_LOWER_BOUND_WORKS +#include +#include +#include +#include +#endif +namespace c10 { +namespace cuda { +#ifdef THRUST_DEVICE_LOWER_BOUND_WORKS +template +__forceinline__ __device__ Iter +lower_bound(Iter start, Iter end, Scalar value) { + return thrust::lower_bound(thrust::device, start, end, value); +} +#else +// thrust::lower_bound is broken on device, see +// https://github.com/NVIDIA/thrust/issues/1734 Implementation inspired by +// https://github.com/pytorch/pytorch/blob/805120ab572efef66425c9f595d9c6c464383336/aten/src/ATen/native/cuda/Bucketization.cu#L28 +template +__device__ Iter lower_bound(Iter start, Iter end, Scalar value) { + while (start < end) { + auto mid = start + ((end - start) >> 1); + if (*mid < value) { + start = mid + 1; + } else { + end = mid; + } + } + return end; +} +#endif // THRUST_DEVICE_LOWER_BOUND_WORKS +} // namespace cuda +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/cuda/CUDACachingAllocator.h b/voice_bridge/torch/include/c10/cuda/CUDACachingAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..a8f165907d5258eac2d89bff5948276ea8361660 --- /dev/null +++ b/voice_bridge/torch/include/c10/cuda/CUDACachingAllocator.h @@ -0,0 +1,172 @@ +#ifndef THC_DEVICE_ALLOCATOR_INC +#define THC_DEVICE_ALLOCATOR_INC +#include +#include +#include +#include +#include + +#include +#include + +namespace c10 { + +// Caching allocator will execute every registered callback if it unable to find +// block inside of already allocated area. +class C10_CUDA_API FreeMemoryCallback { + public: + virtual ~FreeMemoryCallback() = default; + virtual bool Execute() = 0; +}; + +C10_DECLARE_REGISTRY(FreeCudaMemoryCallbacksRegistry, FreeMemoryCallback); +#define REGISTER_FREE_MEMORY_CALLBACK(name, ...) \ + C10_REGISTER_CLASS(FreeCudaMemoryCallbacksRegistry, name, __VA_ARGS__); + +namespace cuda { + +// TODO: Turn this into an honest to goodness class. I briefly attempted to do +// this, but it was a bit irritating to figure out how to also correctly +// apply pimpl pattern so I didn't have to leak any internal implementation +// details in the header (CUDACachingAllocator could be made a pimpl, but +// you also need to appropriately define a class which is a subclass +// of Allocator. Not impossible, but required a bit more surgery than +// I wanted to do at the time.) +// +// Why is this using a namespace rather than old-style THCCachingAllocator_ +// prefix? Mostly because it made the HIPify rules easier to write; _ is +// not counted as a word boundary, so you would otherwise have to list each +// of these functions. + +namespace CUDACachingAllocator { + +struct Stat { + int64_t current = 0; + int64_t peak = 0; + int64_t allocated = 0; + int64_t freed = 0; +}; + +enum struct StatType : uint64_t { + AGGREGATE = 0, + SMALL_POOL = 1, + LARGE_POOL = 2, + NUM_TYPES = 3 // remember to update this whenever a new stat type is added +}; + +typedef std::array(StatType::NUM_TYPES)> StatArray; + +// Struct containing memory allocator summary statistics for a device. +struct DeviceStats { + // COUNT: allocations requested by client code + StatArray allocation; + // COUNT: number of allocated segments from cudaMalloc(). + StatArray segment; + // COUNT: number of active memory blocks (allocated or used by stream) + StatArray active; + // COUNT: number of inactive, split memory blocks (unallocated but can't be + // released via cudaFree) + StatArray inactive_split; + + // SUM: bytes requested by client code + StatArray allocated_bytes; + // SUM: bytes reserved by this memory allocator (both free and used) + StatArray reserved_bytes; + // SUM: bytes within active memory blocks + StatArray active_bytes; + // SUM: bytes within inactive, split memory blocks + StatArray inactive_split_bytes; + + // COUNT: total number of failed calls to CUDA malloc necessitating cache + // flushes. + int64_t num_alloc_retries = 0; + + // COUNT: total number of OOMs (i.e. failed calls to CUDA after cache flush) + int64_t num_ooms = 0; + + // COUNT: total number of oversize blocks allocated from pool + Stat oversize_allocations; + + // COUNT: total number of oversize blocks requiring malloc + Stat oversize_segments; + + // SIZE: maximum block size that is allowed to be split. + int64_t max_split_size = 0; +}; + +struct Context { + virtual ~Context() {} +}; + +typedef std::unique_ptr (*CreateContextFn)(void); + +struct History { + void* addr; + size_t real_size; // unrounded, actually requested size + std::unique_ptr context; // per-watcher context + std::unique_ptr next; // when blocks are merged we keep records of + // what used to be in the block +}; + +// Struct containing info of an allocation block (i.e. a fractional part of a +// cudaMalloc).. +struct BlockInfo { + int64_t size = 0; + int32_t gc_counter = 0; + bool allocated = false; + bool active = false; + History* history = + nullptr; // borrowed reference because it is owned by the allocator +}; + +// Struct containing info of a memory segment (i.e. one contiguous cudaMalloc). +struct SegmentInfo { + int64_t device = 0; + int64_t address = 0; + int64_t total_size = 0; + int64_t allocated_size = 0; + int64_t active_size = 0; + cudaStream_t stream = 0; + bool is_large = false; + std::vector blocks; +}; + +C10_CUDA_API void* raw_alloc(size_t nbytes); +C10_CUDA_API void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream); +C10_CUDA_API void raw_delete(void* ptr); + +C10_CUDA_API Allocator* get(); +C10_CUDA_API void init(int device_count); +C10_CUDA_API void setMemoryFraction(double fraction, int device); +C10_CUDA_API void setAllocatorSettings(const std::string& env); +C10_CUDA_API void emptyCache(); +C10_CUDA_API void cacheInfo( + int dev_id, + size_t* cachedAndFree, + size_t* largestBlock); +C10_CUDA_API void* getBaseAllocation(void* ptr, size_t* size); +C10_CUDA_API void recordStream(const DataPtr&, CUDAStream stream); +C10_CUDA_API DeviceStats getDeviceStats(int device); +C10_CUDA_API void resetAccumulatedStats(int device); +C10_CUDA_API void resetPeakStats(int device); +C10_CUDA_API std::vector snapshot(); + +// CUDAGraph interactions +C10_CUDA_API void notifyCaptureBegin( + int device, + CaptureId_t graph_id, + MempoolId_t mempool_id); +C10_CUDA_API void notifyCaptureEnd(int device, CaptureId_t graph_id); +C10_CUDA_API void notifyCaptureDestroy(int device, MempoolId_t mempool_id); + +C10_CUDA_API std::mutex* getFreeMutex(); + +C10_CUDA_API void setContextRecorder(CreateContextFn recorder); + +C10_CUDA_API std::shared_ptr getIpcDevPtr(std::string handle); +} // namespace CUDACachingAllocator + +} // namespace cuda +} // namespace c10 + +#endif diff --git a/voice_bridge/torch/include/c10/cuda/CUDAException.h b/voice_bridge/torch/include/c10/cuda/CUDAException.h new file mode 100644 index 0000000000000000000000000000000000000000..cfc7424503a96ba177aee9e3a468b38f7cf4f8da --- /dev/null +++ b/voice_bridge/torch/include/c10/cuda/CUDAException.h @@ -0,0 +1,85 @@ +#pragma once + +#include +#include +#include +#include +#include + +// Note [CHECK macro] +// ~~~~~~~~~~~~~~~~~~ +// This is a macro so that AT_ERROR can get accurate __LINE__ +// and __FILE__ information. We could split this into a short +// macro and a function implementation if we pass along __LINE__ +// and __FILE__, but no one has found this worth doing. + +// Used to denote errors from CUDA framework. +// This needs to be declared here instead util/Exception.h for proper conversion +// during hipify. +namespace c10 { +class C10_CUDA_API CUDAError : public c10::Error { + using Error::Error; +}; +} // namespace c10 + +#define C10_CUDA_CHECK(EXPR) \ + do { \ + const cudaError_t __err = EXPR; \ + if (C10_UNLIKELY(__err != cudaSuccess)) { \ + c10::cuda::c10_cuda_check_implementation( \ + __FILE__, \ + __func__, /* Line number's data type is not well-defined between \ + compilers, so we perform an explicit cast */ \ + static_cast(__LINE__), \ + true); \ + } \ + } while (0) + +#define C10_CUDA_CHECK_WARN(EXPR) \ + do { \ + const cudaError_t __err = EXPR; \ + if (C10_UNLIKELY(__err != cudaSuccess)) { \ + auto error_unused C10_UNUSED = cudaGetLastError(); \ + (void)error_unused; \ + TORCH_WARN("CUDA warning: ", cudaGetErrorString(__err)); \ + } \ + } while (0) + +// Indicates that a CUDA error is handled in a non-standard way +#define C10_CUDA_ERROR_HANDLED(EXPR) EXPR + +// Intentionally ignore a CUDA error +#define C10_CUDA_IGNORE_ERROR(EXPR) \ + do { \ + const cudaError_t __err = EXPR; \ + if (C10_UNLIKELY(__err != cudaSuccess)) { \ + cudaError_t error_unused C10_UNUSED = cudaGetLastError(); \ + (void)error_unused; \ + } \ + } while (0) + +// Clear the last CUDA error +#define C10_CUDA_CLEAR_ERROR() \ + do { \ + cudaError_t error_unused C10_UNUSED = cudaGetLastError(); \ + (void)error_unused; \ + } while (0) + +// This should be used directly after every kernel launch to ensure +// the launch happened correctly and provide an early, close-to-source +// diagnostic if it didn't. +#define C10_CUDA_KERNEL_LAUNCH_CHECK() C10_CUDA_CHECK(cudaGetLastError()) + +namespace c10 { +namespace cuda { + +/// In the event of a CUDA failure, formats a nice error message about that +/// failure and also checks for device-side assertion failures +C10_CUDA_API void c10_cuda_check_implementation( + const std::string& filename, + const std::string& function_name, + const int line_number, + const bool include_device_assertions); + +} // namespace cuda +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/cuda/CUDAFunctions.h b/voice_bridge/torch/include/c10/cuda/CUDAFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..32b0ae62506de41c1cd57e9cc4d53a874f0ed8d0 --- /dev/null +++ b/voice_bridge/torch/include/c10/cuda/CUDAFunctions.h @@ -0,0 +1,100 @@ +#pragma once + +// This header provides C++ wrappers around commonly used CUDA API functions. +// The benefit of using C++ here is that we can raise an exception in the +// event of an error, rather than explicitly pass around error codes. This +// leads to more natural APIs. +// +// The naming convention used here matches the naming convention of torch.cuda + +#include +#include +#include +#include +#include +namespace c10 { +namespace cuda { + +// NB: In the past, we were inconsistent about whether or not this reported +// an error if there were driver problems are not. Based on experience +// interacting with users, it seems that people basically ~never want this +// function to fail; it should just return zero if things are not working. +// Oblige them. +// It still might log a warning for user first time it's invoked +C10_CUDA_API DeviceIndex device_count() noexcept; + +// Version of device_count that throws is no devices are detected +C10_CUDA_API DeviceIndex device_count_ensure_non_zero(); + +C10_CUDA_API DeviceIndex current_device(); + +C10_CUDA_API void set_device(DeviceIndex device); + +C10_CUDA_API void device_synchronize(); + +C10_CUDA_API void warn_or_error_on_sync(); + +enum class SyncDebugMode { L_DISABLED = 0, L_WARN, L_ERROR }; + +// this is a holder for c10 global state (similar to at GlobalContext) +// currently it's used to store cuda synchronization warning state, +// but can be expanded to hold other related global state, e.g. to +// record stream usage +class WarningState { + public: + void set_sync_debug_mode(SyncDebugMode l) { + sync_debug_mode = l; + } + + SyncDebugMode get_sync_debug_mode() { + return sync_debug_mode; + } + + private: + SyncDebugMode sync_debug_mode = SyncDebugMode::L_DISABLED; +}; + +C10_CUDA_API __inline__ WarningState& warning_state() { + static WarningState warning_state_; + return warning_state_; +} +// the subsequent functions are defined in the header because for performance +// reasons we want them to be inline +C10_CUDA_API void __inline__ memcpy_and_sync( + void* dst, + void* src, + int64_t nbytes, + cudaMemcpyKind kind, + cudaStream_t stream) { + if (C10_UNLIKELY( + warning_state().get_sync_debug_mode() != SyncDebugMode::L_DISABLED)) { + warn_or_error_on_sync(); + } + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_stream_synchronization( + reinterpret_cast(stream)); + } +#if defined(TORCH_HIP_VERSION) && (TORCH_HIP_VERSION >= 301) + C10_CUDA_CHECK(hipMemcpyWithStream(dst, src, nbytes, kind, stream)); +#else + C10_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream)); + C10_CUDA_CHECK(cudaStreamSynchronize(stream)); +#endif +} + +C10_CUDA_API void __inline__ stream_synchronize(cudaStream_t stream) { + if (C10_UNLIKELY( + warning_state().get_sync_debug_mode() != SyncDebugMode::L_DISABLED)) { + warn_or_error_on_sync(); + } + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_stream_synchronization( + reinterpret_cast(stream)); + } + C10_CUDA_CHECK(cudaStreamSynchronize(stream)); +} + +} // namespace cuda +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/cuda/CUDAGraphsC10Utils.h b/voice_bridge/torch/include/c10/cuda/CUDAGraphsC10Utils.h new file mode 100644 index 0000000000000000000000000000000000000000..ba8031d3e611026d5cd59f35d59b841a8e593c4c --- /dev/null +++ b/voice_bridge/torch/include/c10/cuda/CUDAGraphsC10Utils.h @@ -0,0 +1,92 @@ +#pragma once + +#include +#include + +// CUDA Graphs utils used by c10 and aten. +// aten/cuda/CUDAGraphsUtils.cuh adds utils used by aten only. + +namespace c10 { +namespace cuda { + +using CaptureId_t = unsigned long long; + +// first is set if the instance is created by CUDAGraph::capture_begin. +// second is set if the instance is created by at::cuda::graph_pool_handle. +using MempoolId_t = std::pair; + +// RAII guard for "cudaStreamCaptureMode", a thread-local value +// that controls the error-checking strictness of a capture. +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 +struct C10_CUDA_API CUDAStreamCaptureModeGuard { + CUDAStreamCaptureModeGuard(cudaStreamCaptureMode desired) { + strictness_ = desired; + C10_CUDA_CHECK(cudaThreadExchangeStreamCaptureMode(&strictness_)); + } + ~CUDAStreamCaptureModeGuard() { + C10_CUDA_CHECK_WARN(cudaThreadExchangeStreamCaptureMode(&strictness_)); + } + + private: + cudaStreamCaptureMode strictness_; +}; +#endif + +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 +// Protects against enum cudaStreamCaptureStatus implementation changes. +// Some compilers seem not to like static_assert without the messages. +static_assert( + int(cudaStreamCaptureStatus::cudaStreamCaptureStatusNone) == 0, + "unexpected int(cudaStreamCaptureStatusNone) value"); +static_assert( + int(cudaStreamCaptureStatus::cudaStreamCaptureStatusActive) == 1, + "unexpected int(cudaStreamCaptureStatusActive) value"); +static_assert( + int(cudaStreamCaptureStatus::cudaStreamCaptureStatusInvalidated) == 2, + "unexpected int(cudaStreamCaptureStatusInvalidated) value"); +#endif + +enum class CaptureStatus : int { +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 + None = int(cudaStreamCaptureStatus::cudaStreamCaptureStatusNone), + Active = int(cudaStreamCaptureStatus::cudaStreamCaptureStatusActive), + Invalidated = int(cudaStreamCaptureStatus::cudaStreamCaptureStatusInvalidated) +#else + None = 0 +#endif +}; + +inline std::ostream& operator<<(std::ostream& os, CaptureStatus status) { + switch (status) { + case CaptureStatus::None: + os << "cudaStreamCaptureStatusNone"; + break; +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 + case CaptureStatus::Active: + os << "cudaStreamCaptureStatusActive"; + break; + case CaptureStatus::Invalidated: + os << "cudaStreamCaptureStatusInvalidated"; + break; +#endif + default: + TORCH_INTERNAL_ASSERT( + false, "Unknown CUDA graph CaptureStatus", int(status)); + } + return os; +} + +// Use this version where you're sure a CUDA context exists already. +inline CaptureStatus currentStreamCaptureStatusMayInitCtx() { +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 + cudaStreamCaptureStatus is_capturing; + C10_CUDA_CHECK( + cudaStreamIsCapturing(c10::cuda::getCurrentCUDAStream(), &is_capturing)); + return CaptureStatus(is_capturing); +#else + return CaptureStatus::None; +#endif +} + +} // namespace cuda +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/cuda/CUDAGuard.h b/voice_bridge/torch/include/c10/cuda/CUDAGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..905dcf9c6ff21e050ef39bf2e651ed18de420e00 --- /dev/null +++ b/voice_bridge/torch/include/c10/cuda/CUDAGuard.h @@ -0,0 +1,305 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace c10 { +namespace cuda { + +// This code is kind of boilerplatey. See Note [Whither the DeviceGuard +// boilerplate] + +/// A variant of DeviceGuard that is specialized for CUDA. It accepts +/// integer indices (interpreting them as CUDA devices) and is a little +/// more efficient than DeviceGuard (it compiles to straight line +/// cudaSetDevice/cudaGetDevice calls); however, it can only be used +/// from code that links against CUDA directly. +struct CUDAGuard { + /// No default constructor; see Note [Omitted default constructor from RAII] + explicit CUDAGuard() = delete; + + /// Set the current CUDA device to the passed device index. + explicit CUDAGuard(DeviceIndex device_index) : guard_(device_index) {} + + /// Sets the current CUDA device to the passed device. Errors if the passed + /// device is not a CUDA device. + explicit CUDAGuard(Device device) : guard_(device) {} + + // Copy is not allowed + CUDAGuard(const CUDAGuard&) = delete; + CUDAGuard& operator=(const CUDAGuard&) = delete; + + // Move is not allowed (there is no uninitialized state) + CUDAGuard(CUDAGuard&& other) = delete; + CUDAGuard& operator=(CUDAGuard&& other) = delete; + + /// Sets the CUDA device to the given device. Errors if the given device + /// is not a CUDA device. + void set_device(Device device) { + guard_.set_device(device); + } + + /// Sets the CUDA device to the given device. Errors if the given device + /// is not a CUDA device. (This method is provided for uniformity with + /// DeviceGuard). + void reset_device(Device device) { + guard_.reset_device(device); + } + + /// Sets the CUDA device to the given device index. + void set_index(DeviceIndex device_index) { + guard_.set_index(device_index); + } + + /// Returns the device that was set upon construction of the guard + Device original_device() const { + return guard_.original_device(); + } + + /// Returns the last device that was set via `set_device`, if any, otherwise + /// the device passed during construction. + Device current_device() const { + return guard_.current_device(); + } + + private: + /// The guard for the current device. + c10::impl::InlineDeviceGuard guard_; +}; + +/// A variant of OptionalDeviceGuard that is specialized for CUDA. See +/// CUDAGuard for when you can use this. +struct OptionalCUDAGuard { + /// Create an uninitialized OptionalCUDAGuard. + explicit OptionalCUDAGuard() : guard_() {} + + /// Set the current CUDA device to the passed Device, if it is not nullopt. + explicit OptionalCUDAGuard(optional device_opt) + : guard_(device_opt) {} + + /// Set the current CUDA device to the passed device index, if it is not + /// nullopt + explicit OptionalCUDAGuard(optional device_index_opt) + : guard_(device_index_opt) {} + + // Copy is not allowed + OptionalCUDAGuard(const OptionalCUDAGuard&) = delete; + OptionalCUDAGuard& operator=(const OptionalCUDAGuard&) = delete; + + // See Note [Move construction for RAII guards is tricky] + OptionalCUDAGuard(OptionalCUDAGuard&& other) = delete; + + // See Note [Move assignment for RAII guards is tricky] + OptionalCUDAGuard& operator=(OptionalCUDAGuard&& other) = delete; + + /// Sets the CUDA device to the given device, initializing the guard if it + /// is not already initialized. Errors if the given device is not a CUDA + /// device. + void set_device(Device device) { + guard_.set_device(device); + } + + /// Sets the CUDA device to the given device, initializing the guard if it is + /// not already initialized. Errors if the given device is not a CUDA device. + /// (This method is provided for uniformity with OptionalDeviceGuard). + void reset_device(Device device) { + guard_.reset_device(device); + } + + /// Sets the CUDA device to the given device index, initializing the guard if + /// it is not already initialized. + void set_index(DeviceIndex device_index) { + guard_.set_index(device_index); + } + + /// Returns the device that was set immediately prior to initialization of the + /// guard, or nullopt if the guard is uninitialized. + optional original_device() const { + return guard_.original_device(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device, if the guard is initialized, + /// or nullopt if the guard is uninitialized. + optional current_device() const { + return guard_.current_device(); + } + + /// Restore the original CUDA device, resetting this guard to uninitialized + /// state. + void reset() { + guard_.reset(); + } + + private: + c10::impl::InlineOptionalDeviceGuard guard_; +}; + +/// A variant of StreamGuard that is specialized for CUDA. See CUDAGuard +/// for when you can use this. +struct CUDAStreamGuard { + /// No default constructor, see Note [Omitted default constructor from RAII] + explicit CUDAStreamGuard() = delete; + + /// Set the current CUDA device to the device associated with the passed + /// stream, and set the current CUDA stream on that device to the passed + /// stream. Errors if the Stream is not a CUDA stream. + explicit CUDAStreamGuard(Stream stream) : guard_(stream) {} + + /// Copy is disallowed + CUDAStreamGuard(const CUDAStreamGuard&) = delete; + CUDAStreamGuard& operator=(const CUDAStreamGuard&) = delete; + + /// Move is disallowed, as CUDAStreamGuard does not have an uninitialized + /// state, which is required for moves on types with nontrivial destructors. + CUDAStreamGuard(CUDAStreamGuard&& other) = delete; + CUDAStreamGuard& operator=(CUDAStreamGuard&& other) = delete; + + /// Resets the currently set stream to the original stream and + /// the currently set device to the original device. Then, + /// set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + /// Errors if the stream passed is not a CUDA stream. + /// + /// NOTE: this implementation may skip some stream/device setting if + /// it can prove that it is unnecessary. + /// + /// WARNING: reset_stream does NOT preserve previously set streams on + /// different devices. If you need to set streams on multiple devices + /// on CUDA, use CUDAMultiStreamGuard instead. + void reset_stream(Stream stream) { + guard_.reset_stream(stream); + } + + /// Returns the CUDA stream that was set at the time the guard was + /// constructed. + CUDAStream original_stream() const { + return CUDAStream(CUDAStream::UNCHECKED, guard_.original_stream()); + } + + /// Returns the most recent CUDA stream that was set using this device guard, + /// either from construction, or via set_stream. + CUDAStream current_stream() const { + return CUDAStream(CUDAStream::UNCHECKED, guard_.current_stream()); + } + + /// Returns the most recent CUDA device that was set using this device guard, + /// either from construction, or via set_device/reset_device/set_index. + Device current_device() const { + return guard_.current_device(); + } + + /// Returns the CUDA device that was set at the most recent reset_stream(), + /// or otherwise the device at construction time. + Device original_device() const { + return guard_.original_device(); + } + + private: + c10::impl::InlineStreamGuard guard_; +}; + +/// A variant of OptionalStreamGuard that is specialized for CUDA. See +/// CUDAGuard for when you can use this. +struct OptionalCUDAStreamGuard { + /// Create an uninitialized guard. + explicit OptionalCUDAStreamGuard() : guard_() {} + + /// Set the current CUDA device to the device associated with the passed + /// stream, and set the current CUDA stream on that device to the passed + /// stream. Errors if the Stream is not a CUDA stream. + explicit OptionalCUDAStreamGuard(Stream stream) : guard_(stream) {} + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream, + /// if the passed stream is not nullopt. + explicit OptionalCUDAStreamGuard(optional stream_opt) + : guard_(stream_opt) {} + + /// Copy is disallowed + OptionalCUDAStreamGuard(const OptionalCUDAStreamGuard&) = delete; + OptionalCUDAStreamGuard& operator=(const OptionalCUDAStreamGuard&) = delete; + + // See Note [Move construction for RAII guards is tricky] + OptionalCUDAStreamGuard(OptionalCUDAStreamGuard&& other) = delete; + + // See Note [Move assignment for RAII guards is tricky] + OptionalCUDAStreamGuard& operator=(OptionalCUDAStreamGuard&& other) = delete; + + /// Resets the currently set CUDA stream to the original stream and + /// the currently set device to the original device. Then, + /// set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + /// Initializes the guard if it was not previously initialized. + void reset_stream(Stream stream) { + guard_.reset_stream(stream); + } + + /// Returns the CUDA stream that was set at the time the guard was most + /// recently initialized, or nullopt if the guard is uninitialized. + optional original_stream() const { + auto r = guard_.original_stream(); + if (r.has_value()) { + return make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value())); + } else { + return nullopt; + } + } + + /// Returns the most recent CUDA stream that was set using this stream guard, + /// either from construction, or via reset_stream, if the guard is + /// initialized, or nullopt if the guard is uninitialized. + optional current_stream() const { + auto r = guard_.current_stream(); + if (r.has_value()) { + return make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value())); + } else { + return nullopt; + } + } + + /// Restore the original CUDA device and stream, resetting this guard to + /// uninitialized state. + void reset() { + guard_.reset(); + } + + private: + c10::impl::InlineOptionalStreamGuard guard_; +}; + +/// A variant of MultiStreamGuard that is specialized for CUDA. +struct CUDAMultiStreamGuard { + explicit CUDAMultiStreamGuard(ArrayRef streams) + : guard_(unwrapStreams(streams)) {} + + /// Copy is disallowed + CUDAMultiStreamGuard(const CUDAMultiStreamGuard&) = delete; + CUDAMultiStreamGuard& operator=(const CUDAMultiStreamGuard&) = delete; + + // See Note [Move construction for RAII guards is tricky] + CUDAMultiStreamGuard(CUDAMultiStreamGuard&& other) = delete; + + // See Note [Move assignment for RAII guards is tricky] + CUDAMultiStreamGuard& operator=(CUDAMultiStreamGuard&& other) = delete; + + private: + c10::impl::InlineMultiStreamGuard guard_; + + static std::vector unwrapStreams(ArrayRef cudaStreams) { + std::vector streams; + streams.reserve(cudaStreams.size()); + for (const CUDAStream& cudaStream : cudaStreams) { + streams.push_back(cudaStream); + } + return streams; + } +}; + +} // namespace cuda +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/cuda/CUDAMacros.h b/voice_bridge/torch/include/c10/cuda/CUDAMacros.h new file mode 100644 index 0000000000000000000000000000000000000000..35499e36db3a0da6b376545ad8635a0c7425781e --- /dev/null +++ b/voice_bridge/torch/include/c10/cuda/CUDAMacros.h @@ -0,0 +1,44 @@ +#pragma once + +#ifndef C10_USING_CUSTOM_GENERATED_MACROS + +// We have not yet modified the AMD HIP build to generate this file so +// we add an extra option to specifically ignore it. +#ifndef C10_CUDA_NO_CMAKE_CONFIGURE_FILE +#include +#endif // C10_CUDA_NO_CMAKE_CONFIGURE_FILE + +#endif + +// See c10/macros/Export.h for a detailed explanation of what the function +// of these macros are. We need one set of macros for every separate library +// we build. + +#ifdef _WIN32 +#if defined(C10_CUDA_BUILD_SHARED_LIBS) +#define C10_CUDA_EXPORT __declspec(dllexport) +#define C10_CUDA_IMPORT __declspec(dllimport) +#else +#define C10_CUDA_EXPORT +#define C10_CUDA_IMPORT +#endif +#else // _WIN32 +#if defined(__GNUC__) +#define C10_CUDA_EXPORT __attribute__((__visibility__("default"))) +#else // defined(__GNUC__) +#define C10_CUDA_EXPORT +#endif // defined(__GNUC__) +#define C10_CUDA_IMPORT C10_CUDA_EXPORT +#endif // _WIN32 + +// This one is being used by libc10_cuda.so +#ifdef C10_CUDA_BUILD_MAIN_LIB +#define C10_CUDA_API C10_CUDA_EXPORT +#else +#define C10_CUDA_API C10_CUDA_IMPORT +#endif + +/** + * The maximum number of GPUs that we recognizes. + */ +#define C10_COMPILE_TIME_MAX_GPUS 16 diff --git a/voice_bridge/torch/include/c10/cuda/CUDAMathCompat.h b/voice_bridge/torch/include/c10/cuda/CUDAMathCompat.h new file mode 100644 index 0000000000000000000000000000000000000000..ebd02ac2cf19dd10968af36bed12c1645a0448eb --- /dev/null +++ b/voice_bridge/torch/include/c10/cuda/CUDAMathCompat.h @@ -0,0 +1,156 @@ +#pragma once + +/* This file defines math functions compatible across different gpu + * platforms (currently CUDA and HIP). + */ +#if defined(__CUDACC__) || defined(__HIPCC__) + +#include +#include + +#ifdef __HIPCC__ +#define __MATH_FUNCTIONS_DECL__ inline C10_DEVICE +#else /* __HIPCC__ */ +#ifdef __CUDACC_RTC__ +#define __MATH_FUNCTIONS_DECL__ C10_HOST_DEVICE +#else /* __CUDACC_RTC__ */ +#define __MATH_FUNCTIONS_DECL__ static inline C10_HOST_DEVICE +#endif /* __CUDACC_RTC__ */ +#endif /* __HIPCC__ */ + +namespace c10 { +namespace cuda { +namespace compat { + +__MATH_FUNCTIONS_DECL__ float abs(float x) { + return ::fabsf(x); +} +__MATH_FUNCTIONS_DECL__ double abs(double x) { + return ::fabs(x); +} + +__MATH_FUNCTIONS_DECL__ float exp(float x) { + return ::expf(x); +} +__MATH_FUNCTIONS_DECL__ double exp(double x) { + return ::exp(x); +} + +__MATH_FUNCTIONS_DECL__ float ceil(float x) { + return ::ceilf(x); +} +__MATH_FUNCTIONS_DECL__ double ceil(double x) { + return ::ceil(x); +} + +__MATH_FUNCTIONS_DECL__ float copysign(float x, float y) { +#if defined(__CUDA_ARCH__) || defined(__HIPCC__) + return ::copysignf(x, y); +#else + // std::copysign gets ICE/Segfaults with gcc 7.5/8 on arm64 + // (e.g. Jetson), see PyTorch PR #51834 + // This host function needs to be here for the compiler but is never used + TORCH_INTERNAL_ASSERT( + false, "CUDAMathCompat copysign should not run on the CPU"); +#endif +} +__MATH_FUNCTIONS_DECL__ double copysign(double x, double y) { +#if defined(__CUDA_ARCH__) || defined(__HIPCC__) + return ::copysign(x, y); +#else + // see above + TORCH_INTERNAL_ASSERT( + false, "CUDAMathCompat copysign should not run on the CPU"); +#endif +} + +__MATH_FUNCTIONS_DECL__ float floor(float x) { + return ::floorf(x); +} +__MATH_FUNCTIONS_DECL__ double floor(double x) { + return ::floor(x); +} + +__MATH_FUNCTIONS_DECL__ float log(float x) { + return ::logf(x); +} +__MATH_FUNCTIONS_DECL__ double log(double x) { + return ::log(x); +} + +__MATH_FUNCTIONS_DECL__ float log1p(float x) { + return ::log1pf(x); +} + +__MATH_FUNCTIONS_DECL__ double log1p(double x) { + return ::log1p(x); +} + +__MATH_FUNCTIONS_DECL__ float max(float x, float y) { + return ::fmaxf(x, y); +} +__MATH_FUNCTIONS_DECL__ double max(double x, double y) { + return ::fmax(x, y); +} + +__MATH_FUNCTIONS_DECL__ float min(float x, float y) { + return ::fminf(x, y); +} +__MATH_FUNCTIONS_DECL__ double min(double x, double y) { + return ::fmin(x, y); +} + +__MATH_FUNCTIONS_DECL__ float pow(float x, float y) { + return ::powf(x, y); +} +__MATH_FUNCTIONS_DECL__ double pow(double x, double y) { + return ::pow(x, y); +} + +__MATH_FUNCTIONS_DECL__ void sincos(float x, float* sptr, float* cptr) { + return ::sincosf(x, sptr, cptr); +} +__MATH_FUNCTIONS_DECL__ void sincos(double x, double* sptr, double* cptr) { + return ::sincos(x, sptr, cptr); +} + +__MATH_FUNCTIONS_DECL__ float sqrt(float x) { + return ::sqrtf(x); +} +__MATH_FUNCTIONS_DECL__ double sqrt(double x) { + return ::sqrt(x); +} + +__MATH_FUNCTIONS_DECL__ float rsqrt(float x) { + return ::rsqrtf(x); +} +__MATH_FUNCTIONS_DECL__ double rsqrt(double x) { + return ::rsqrt(x); +} + +__MATH_FUNCTIONS_DECL__ float tan(float x) { + return ::tanf(x); +} +__MATH_FUNCTIONS_DECL__ double tan(double x) { + return ::tan(x); +} + +__MATH_FUNCTIONS_DECL__ float tanh(float x) { + return ::tanhf(x); +} +__MATH_FUNCTIONS_DECL__ double tanh(double x) { + return ::tanh(x); +} + +__MATH_FUNCTIONS_DECL__ float normcdf(float x) { + return ::normcdff(x); +} +__MATH_FUNCTIONS_DECL__ double normcdf(double x) { + return ::normcdf(x); +} + +} // namespace compat +} // namespace cuda +} // namespace c10 + +#endif diff --git a/voice_bridge/torch/include/c10/cuda/CUDAMiscFunctions.h b/voice_bridge/torch/include/c10/cuda/CUDAMiscFunctions.h new file mode 100644 index 0000000000000000000000000000000000000000..eca8fd042f61560c21994680c12d5aadcbd09094 --- /dev/null +++ b/voice_bridge/torch/include/c10/cuda/CUDAMiscFunctions.h @@ -0,0 +1,11 @@ +#pragma once +// this file is to avoid circular dependency between CUDAFunctions.h and +// CUDAExceptions.h + +#include + +namespace c10 { +namespace cuda { +C10_CUDA_API const char* get_cuda_check_suffix() noexcept; +} +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/cuda/CUDAStream.h b/voice_bridge/torch/include/c10/cuda/CUDAStream.h new file mode 100644 index 0000000000000000000000000000000000000000..6d17136341c6ec953532c6d1118dd737373419ad --- /dev/null +++ b/voice_bridge/torch/include/c10/cuda/CUDAStream.h @@ -0,0 +1,251 @@ +#pragma once + +#include +#include + +#include + +#include +#include +#include +#include + +/* + * Stream pool note. + * + * A CUDAStream is an abstraction of an actual cuStream on the GPU. CUDAStreams + * are backed by cuStreams, but they use several pools to minimize the costs + * associated with creating, retaining, and destroying cuStreams. + * + * There are three pools per device, and a device's pools are lazily created. + * + * The first pool contains only the default stream. When the default stream + * is requested it's returned. + * + * The second pool is the "low priority" or "default priority" streams. In + * HIP builds there is no distinction between streams in this pool and streams + * in the third pool (below). There are 32 of these streams per device, and + * when a stream is requested one of these streams is returned round-robin. + * That is, the first stream requested is at index 0, the second at index 1... + * to index 31, then index 0 again. + * + * This means that if 33 low priority streams are requested, the first and + * last streams requested are actually the same stream (under the covers) + * and kernels enqueued on them cannot run concurrently. + * + * The third pool is the "high priority" streams. The third pool acts like + * the second pool except the streams are created with a higher priority. + * + * These pools suggest that stream users should prefer many short-lived streams, + * as the cost of acquiring and releasing streams is effectively zero. If + * many longer-lived streams are required in performance critical scenarios + * then the functionality here may need to be extended to allow, for example, + * "reserving" a subset of the pool so that other streams do not accidentally + * overlap the performance critical streams. + * + * Note: although the notion of "current stream for device" is thread local + * (every OS thread has a separate current stream, as one might expect), + * the stream pool is global across all threads; stream 0 is always stream 0 + * no matter which thread you use it on. Multiple threads can synchronize + * on the same stream. Although the CUDA documentation is not very clear + * on the matter, streams are thread safe; e.g., it is safe to enqueue + * a kernel on the same stream from two different threads. + */ + +namespace c10 { +namespace cuda { + +// Value object representing a CUDA stream. This is just a wrapper +// around c10::Stream, but it comes with a little extra CUDA-specific +// functionality (conversion to cudaStream_t), and a guarantee that +// the wrapped c10::Stream really is a CUDA stream. +class C10_CUDA_API CUDAStream { + public: + enum Unchecked { UNCHECKED }; + + /// Construct a CUDAStream from a Stream. This construction is checked, + /// and will raise an error if the Stream is not, in fact, a CUDA stream. + explicit CUDAStream(Stream stream) : stream_(stream) { + TORCH_CHECK(stream_.device_type() == DeviceType::CUDA); + } + + /// Construct a CUDAStream from a Stream with no error checking. + /// This constructor uses the "named" constructor idiom, and can + /// be invoked as: CUDAStream(CUDAStream::UNCHECKED, stream) + explicit CUDAStream(Unchecked, Stream stream) : stream_(stream) {} + + bool operator==(const CUDAStream& other) const noexcept { + return unwrap() == other.unwrap(); + } + + bool operator!=(const CUDAStream& other) const noexcept { + return unwrap() != other.unwrap(); + } + + /// Implicit conversion to cudaStream_t. + operator cudaStream_t() const { + return stream(); + } + + /// Implicit conversion to Stream (a.k.a., forget that the stream is a + /// CUDA stream). + operator Stream() const { + return unwrap(); + } + + /// Get the CUDA device index that this stream is associated with. + DeviceIndex device_index() const { + return stream_.device_index(); + } + + /// Get the full Device that this stream is associated with. The Device + /// is guaranteed to be a CUDA device. + Device device() const { + return Device(DeviceType::CUDA, device_index()); + } + + /// Return the stream ID corresponding to this particular stream. + StreamId id() const { + return stream_.id(); + } + + bool query() const { + DeviceGuard guard{stream_.device()}; + cudaError_t err = C10_CUDA_ERROR_HANDLED(cudaStreamQuery(stream())); + + if (err == cudaSuccess) { + return true; + } else if (err != cudaErrorNotReady) { + C10_CUDA_CHECK(err); + } else { + // ignore and clear the error if not ready + (void)cudaGetLastError(); + } + + return false; + } + + void synchronize() const { + DeviceGuard guard{stream_.device()}; + c10::cuda::stream_synchronize(stream()); + } + + int priority() const { + DeviceGuard guard{stream_.device()}; + int priority = 0; + C10_CUDA_CHECK(cudaStreamGetPriority(stream(), &priority)); + return priority; + } + + /// Explicit conversion to cudaStream_t. + cudaStream_t stream() const; + + /// Explicit conversion to Stream. + Stream unwrap() const { + return stream_; + } + + /// Reversibly pack a CUDAStream into a uint64_t representation. This may + /// be helpful when storing a CUDAStream in a C struct, where you cannot + /// conveniently place the CUDAStream object itself (which is morally + /// equivalent, but unfortunately is not POD due to the fact that it + /// has constructors.) + /// + /// The CUDAStream can be unpacked using unpack(). The format of + /// the uint64_t is unspecified and may be changed. + uint64_t pack() const noexcept { + return stream_.pack(); + } + + // Unpack a CUDAStream from the uint64_t representation generated by pack(). + static CUDAStream unpack(uint64_t bits) { + return CUDAStream(Stream::unpack(bits)); + } + + static std::tuple priority_range() { + // Note: this returns the range of priority **supported by PyTorch**, not + // the range of priority **supported by CUDA**. The former is a subset of + // the latter. Currently PyTorch only supports 0 and -1, which are "low" and + // "high" priority. + int least_priority, greatest_priority; + C10_CUDA_CHECK( + cudaDeviceGetStreamPriorityRange(&least_priority, &greatest_priority)); + TORCH_INTERNAL_ASSERT( + least_priority >= 0, "Unexpected CUDA stream priority range"); + TORCH_INTERNAL_ASSERT( + greatest_priority <= -1, "Unexpected CUDA stream priority range"); + return std::make_tuple(0, -1); + } + + // Deleted for now; use CUDAEvent::block instead + // void synchronize_with(const CUDAEvent& event) const; + + private: + Stream stream_; +}; + +/** + * Get a new stream from the CUDA stream pool. You can think of this + * as "creating" a new stream, but no such creation actually happens; + * instead, streams are preallocated from the pool and returned in a + * round-robin fashion. + * + * You can request a stream from the high priority pool by setting + * isHighPriority to true, or a stream for a specific device by setting device + * (defaulting to the current CUDA stream.) + */ +TORCH_API CUDAStream +getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1); + +/** + * Get a CUDAStream from a externally allocated one. + * + * This is mainly for interoperability with different libraries where we + * want to operate on a non-torch allocated stream for data exchange or similar + * purposes + */ +TORCH_API CUDAStream +getStreamFromExternal(cudaStream_t ext_stream, DeviceIndex device_index); + +/** + * Get the default CUDA stream, for the passed CUDA device, or for the + * current device if no device index is passed. The default stream is + * where most computation occurs when you aren't explicitly using + * streams. + */ +TORCH_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1); + +/** + * Get the current CUDA stream, for the passed CUDA device, or for the + * current device if no device index is passed. The current CUDA stream + * will usually be the default CUDA stream for the device, but it may + * be different if someone called 'setCurrentCUDAStream' or used 'StreamGuard' + * or 'CUDAStreamGuard'. + */ +TORCH_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1); + +/** + * Set the current stream on the device of the passed in stream to be + * the passed in stream. Yes, you read that right: this function + * has *nothing* to do with the current device: it toggles the current + * stream of the device of the passed stream. + * + * Confused? Avoid using this function; prefer using 'CUDAStreamGuard' instead + * (which will switch both your current device and current stream in the way you + * expect, and reset it back to its original state afterwards). + */ +TORCH_API void setCurrentCUDAStream(CUDAStream stream); + +C10_API std::ostream& operator<<(std::ostream& stream, const CUDAStream& s); + +} // namespace cuda +} // namespace c10 + +namespace std { +template <> +struct hash { + size_t operator()(c10::cuda::CUDAStream s) const noexcept { + return std::hash{}(s.unwrap()); + } +}; +} // namespace std diff --git a/voice_bridge/torch/include/c10/cuda/impl/CUDAGuardImpl.h b/voice_bridge/torch/include/c10/cuda/impl/CUDAGuardImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..c2365e449a406eca099d966941e5531bf6cc7894 --- /dev/null +++ b/voice_bridge/torch/include/c10/cuda/impl/CUDAGuardImpl.h @@ -0,0 +1,220 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +namespace c10 { +namespace cuda { +namespace impl { + +struct CUDAGuardImpl final : public c10::impl::DeviceGuardImplInterface { + static constexpr DeviceType static_type = DeviceType::CUDA; + + CUDAGuardImpl() {} + explicit CUDAGuardImpl(DeviceType t) { + TORCH_INTERNAL_ASSERT(t == DeviceType::CUDA); + } + DeviceType type() const override { + return DeviceType::CUDA; + } + Device exchangeDevice(Device d) const override { + TORCH_INTERNAL_ASSERT(d.is_cuda()); + Device old_device = getDevice(); + if (old_device.index() != d.index()) { + C10_CUDA_CHECK(cudaSetDevice(d.index())); + } + return old_device; + } + Device getDevice() const override { + int device; + C10_CUDA_CHECK(cudaGetDevice(&device)); + return Device(DeviceType::CUDA, device); + } + c10::optional uncheckedGetDevice() const noexcept { + int device; + const auto err = C10_CUDA_ERROR_HANDLED(cudaGetDevice(&device)); + C10_CUDA_CHECK_WARN(err); + if (err != cudaSuccess) { + return c10::nullopt; + } + return Device(DeviceType::CUDA, device); + } + void setDevice(Device d) const override { + TORCH_INTERNAL_ASSERT(d.is_cuda()); + Device current_device = getDevice(); + if (current_device != d) { + C10_CUDA_CHECK(cudaSetDevice(d.index())); + } + } + void uncheckedSetDevice(Device d) const noexcept override { + auto current_device = uncheckedGetDevice(); + if (!current_device.has_value() || current_device.value() != d) { + C10_CUDA_CHECK_WARN(cudaSetDevice(d.index())); + } + } + Stream getStream(Device d) const noexcept override { + return getCurrentCUDAStream(d.index()).unwrap(); + } + Stream getDefaultStream(Device d) const override { + return getDefaultCUDAStream(d.index()); + } + Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false) + const override { + return getStreamFromPool(isHighPriority, d.index()); + } + // NB: These do NOT set the current device + Stream exchangeStream(Stream s) const noexcept override { + CUDAStream cs(s); + auto old_stream = getCurrentCUDAStream(s.device().index()); + setCurrentCUDAStream(cs); + return old_stream.unwrap(); + } + DeviceIndex deviceCount() const noexcept override { + return device_count(); + } + + // Event-related functions + void createEvent(cudaEvent_t* cuda_event, const EventFlag flag) const { + // Maps PyTorch's Event::Flag to CUDA flag + auto cuda_flag = cudaEventDefault; + switch (flag) { + case EventFlag::PYTORCH_DEFAULT: + case EventFlag::CUDA_EVENT_DISABLE_TIMING: + cuda_flag = cudaEventDisableTiming; + break; + case EventFlag::BACKEND_DEFAULT: + case EventFlag::CUDA_EVENT_DEFAULT: + cuda_flag = cudaEventDefault; + break; + default: + TORCH_CHECK(false, "CUDA event received unknown flag"); + } + + C10_CUDA_CHECK(cudaEventCreateWithFlags(cuda_event, cuda_flag)); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_creation( + reinterpret_cast(cuda_event)); + } + } + + void destroyEvent(void* event, const DeviceIndex device_index) + const noexcept override { + if (!event) + return; + auto cuda_event = static_cast(event); + int orig_device; + C10_CUDA_CHECK_WARN(cudaGetDevice(&orig_device)); + C10_CUDA_CHECK_WARN(cudaSetDevice(device_index)); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_deletion( + reinterpret_cast(cuda_event)); + } + C10_CUDA_CHECK_WARN(cudaEventDestroy(cuda_event)); + C10_CUDA_CHECK_WARN(cudaSetDevice(orig_device)); + } + + void record( + void** event, + const Stream& stream, + const DeviceIndex device_index, + const EventFlag flag) const override { + TORCH_CHECK( + device_index == -1 || device_index == stream.device_index(), + "Event device index ", + device_index, + " does not match recording stream's device index ", + stream.device_index(), + "."); + + cudaEvent_t cuda_event = static_cast(*event); + CUDAStream cuda_stream{stream}; + + // Moves to stream's device to record + const auto orig_device = getDevice(); + setDevice(stream.device()); + + // Creates the event (lazily) + if (!cuda_event) + createEvent(&cuda_event, flag); + C10_CUDA_CHECK(cudaEventRecord(cuda_event, cuda_stream)); + // Makes the void* point to the (possibly just allocated) CUDA event + *event = cuda_event; + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_record( + reinterpret_cast(cuda_event), + reinterpret_cast(cuda_stream.stream())); + } + + // Resets device + setDevice(orig_device); + } + + void block(void* event, const Stream& stream) const override { + if (!event) + return; + cudaEvent_t cuda_event = static_cast(event); + CUDAStream cuda_stream{stream}; + const auto orig_device = getDevice(); + setDevice(stream.device()); + C10_CUDA_CHECK(cudaStreamWaitEvent( + cuda_stream, + cuda_event, + /*flags (must be zero)=*/0)); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_wait( + reinterpret_cast(cuda_event), + reinterpret_cast(cuda_stream.stream())); + } + setDevice(orig_device); + } + + // May be called from any device + bool queryEvent(void* event) const override { + if (!event) + return true; + cudaEvent_t cuda_event = static_cast(event); + const cudaError_t err = C10_CUDA_ERROR_HANDLED(cudaEventQuery(cuda_event)); + if (err != cudaErrorNotReady) { + C10_CUDA_CHECK(err); + } else { + // ignore and clear the error if not ready + (void)cudaGetLastError(); + } + return (err == cudaSuccess); + } + + // Stream-related functions + bool queryStream(const Stream& stream) const override { + CUDAStream cuda_stream{stream}; + return cuda_stream.query(); + } + + void synchronizeStream(const Stream& stream) const override { + CUDAStream cuda_stream{stream}; + cuda_stream.synchronize(); + } + + void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream) + const override { + CUDAStream cuda_stream{stream}; + CUDACachingAllocator::recordStream(data_ptr, cuda_stream); + } +}; + +} // namespace impl +} // namespace cuda +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/cuda/impl/CUDATest.h b/voice_bridge/torch/include/c10/cuda/impl/CUDATest.h new file mode 100644 index 0000000000000000000000000000000000000000..593905d1567218f817f030e4b699ce7457edfc45 --- /dev/null +++ b/voice_bridge/torch/include/c10/cuda/impl/CUDATest.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace c10 { +namespace cuda { +namespace impl { + +C10_CUDA_API int c10_cuda_test(); + +} +} // namespace cuda +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/macros/Export.h b/voice_bridge/torch/include/c10/macros/Export.h new file mode 100644 index 0000000000000000000000000000000000000000..b439e74b37e0569c0817f5a3f85b6f9512bba315 --- /dev/null +++ b/voice_bridge/torch/include/c10/macros/Export.h @@ -0,0 +1,154 @@ +#ifndef C10_MACROS_EXPORT_H_ +#define C10_MACROS_EXPORT_H_ + +/* Header file to define the common scaffolding for exported symbols. + * + * Export is by itself a quite tricky situation to deal with, and if you are + * hitting this file, make sure you start with the background here: + * - Linux: https://gcc.gnu.org/wiki/Visibility + * - Windows: + * https://docs.microsoft.com/en-us/cpp/cpp/dllexport-dllimport?view=vs-2017 + * + * Do NOT include this file directly. Instead, use c10/macros/Macros.h + */ + +// You do not need to edit this part of file unless you are changing the core +// pytorch export abstractions. +// +// This part defines the C10 core export and import macros. This is controlled +// by whether we are building shared libraries or not, which is determined +// during build time and codified in c10/core/cmake_macros.h. +// When the library is built as a shared lib, EXPORT and IMPORT will contain +// visibility attributes. If it is being built as a static lib, then EXPORT +// and IMPORT basically have no effect. + +// As a rule of thumb, you should almost NEVER mix static and shared builds for +// libraries that depend on c10. AKA, if c10 is built as a static library, we +// recommend everything dependent on c10 to be built statically. If c10 is built +// as a shared library, everything dependent on it should be built as shared. In +// the PyTorch project, all native libraries shall use the macro +// C10_BUILD_SHARED_LIB to check whether pytorch is building shared or static +// libraries. + +// For build systems that do not directly depend on CMake and directly build +// from the source directory (such as Buck), one may not have a cmake_macros.h +// file at all. In this case, the build system is responsible for providing +// correct macro definitions corresponding to the cmake_macros.h.in file. +// +// In such scenarios, one should define the macro +// C10_USING_CUSTOM_GENERATED_MACROS +// to inform this header that it does not need to include the cmake_macros.h +// file. + +#ifndef C10_USING_CUSTOM_GENERATED_MACROS +#include +#endif // C10_USING_CUSTOM_GENERATED_MACROS + +#ifdef _WIN32 +#define C10_HIDDEN +#if defined(C10_BUILD_SHARED_LIBS) +#define C10_EXPORT __declspec(dllexport) +#define C10_IMPORT __declspec(dllimport) +#else +#define C10_EXPORT +#define C10_IMPORT +#endif +#else // _WIN32 +#if defined(__GNUC__) +#define C10_EXPORT __attribute__((__visibility__("default"))) +#define C10_HIDDEN __attribute__((__visibility__("hidden"))) +#else // defined(__GNUC__) +#define C10_EXPORT +#define C10_HIDDEN +#endif // defined(__GNUC__) +#define C10_IMPORT C10_EXPORT +#endif // _WIN32 + +#ifdef NO_EXPORT +#undef C10_EXPORT +#define C10_EXPORT +#endif + +// Definition of an adaptive XX_API macro, that depends on whether you are +// building the library itself or not, routes to XX_EXPORT and XX_IMPORT. +// Basically, you will need to do this for each shared library that you are +// building, and the instruction is as follows: assuming that you are building +// a library called libawesome.so. You should: +// (1) for your cmake target (usually done by "add_library(awesome, ...)"), +// define a macro called AWESOME_BUILD_MAIN_LIB using +// target_compile_options. +// (2) define the AWESOME_API macro similar to the one below. +// And in the source file of your awesome library, use AWESOME_API to +// annotate public symbols. + +// Here, for the C10 library, we will define the macro C10_API for both import +// and export. + +// This one is being used by libc10.so +#ifdef C10_BUILD_MAIN_LIB +#define C10_API C10_EXPORT +#else +#define C10_API C10_IMPORT +#endif + +// This one is being used by libtorch.so +#ifdef CAFFE2_BUILD_MAIN_LIB +#define TORCH_API C10_EXPORT +#else +#define TORCH_API C10_IMPORT +#endif + +// You may be wondering: Whose brilliant idea was it to split torch_cuda into +// two pieces with confusing names? +// Once upon a time, there _was_ only TORCH_CUDA_API. All was happy until we +// tried to compile PyTorch for CUDA 11.1, which ran into relocation marker +// issues when linking big binaries. +// (https://github.com/pytorch/pytorch/issues/39968) We had two choices: +// (1) Stop supporting so many GPU architectures +// (2) Do something else +// We chose #2 and decided to split the behemoth that was torch_cuda into two +// smaller libraries, one with most of the core kernel functions (torch_cuda_cu) +// and the other that had..well..everything else (torch_cuda_cpp). The idea was +// this: instead of linking our static libraries (like the hefty +// libcudnn_static.a) with another huge library, torch_cuda, and run into pesky +// relocation marker issues, we could link our static libraries to a smaller +// part of torch_cuda (torch_cuda_cpp) and avoid the issues. + +// libtorch_cuda_cu.so +#ifdef TORCH_CUDA_CU_BUILD_MAIN_LIB +#define TORCH_CUDA_CU_API C10_EXPORT +#elif defined(BUILD_SPLIT_CUDA) +#define TORCH_CUDA_CU_API C10_IMPORT +#endif + +// libtorch_cuda_cpp.so +#ifdef TORCH_CUDA_CPP_BUILD_MAIN_LIB +#define TORCH_CUDA_CPP_API C10_EXPORT +#elif defined(BUILD_SPLIT_CUDA) +#define TORCH_CUDA_CPP_API C10_IMPORT +#endif + +// libtorch_cuda.so (where torch_cuda_cu and torch_cuda_cpp are a part of the +// same api) +#ifdef TORCH_CUDA_BUILD_MAIN_LIB +#define TORCH_CUDA_CPP_API C10_EXPORT +#define TORCH_CUDA_CU_API C10_EXPORT +#elif !defined(BUILD_SPLIT_CUDA) +#define TORCH_CUDA_CPP_API C10_IMPORT +#define TORCH_CUDA_CU_API C10_IMPORT +#endif + +#if defined(TORCH_HIP_BUILD_MAIN_LIB) +#define TORCH_HIP_API C10_EXPORT +#else +#define TORCH_HIP_API C10_IMPORT +#endif + +// Enums only need to be exported on windows for non-CUDA files +#if defined(_WIN32) && defined(__CUDACC__) +#define C10_API_ENUM C10_API +#else +#define C10_API_ENUM +#endif + +#endif // C10_MACROS_MACROS_H_ diff --git a/voice_bridge/torch/include/c10/macros/Macros.h b/voice_bridge/torch/include/c10/macros/Macros.h new file mode 100644 index 0000000000000000000000000000000000000000..beefca1d63c6075938671c51c7d49106aaf0f7bd --- /dev/null +++ b/voice_bridge/torch/include/c10/macros/Macros.h @@ -0,0 +1,538 @@ +#ifndef C10_MACROS_MACROS_H_ +#define C10_MACROS_MACROS_H_ +#include + +/* Main entry for c10/macros. + * + * In your code, include c10/macros/Macros.h directly, instead of individual + * files in this folder. + */ + +// For build systems that do not directly depend on CMake and directly build +// from the source directory (such as Buck), one may not have a cmake_macros.h +// file at all. In this case, the build system is responsible for providing +// correct macro definitions corresponding to the cmake_macros.h.in file. +// +// In such scenarios, one should define the macro +// C10_USING_CUSTOM_GENERATED_MACROS +// to inform this header that it does not need to include the cmake_macros.h +// file. + +#ifndef C10_USING_CUSTOM_GENERATED_MACROS +#include +#endif // C10_USING_CUSTOM_GENERATED_MACROS + +#include + +#if defined(__clang__) +#define __ubsan_ignore_float_divide_by_zero__ \ + __attribute__((no_sanitize("float-divide-by-zero"))) +#define __ubsan_ignore_undefined__ __attribute__((no_sanitize("undefined"))) +#define __ubsan_ignore_signed_int_overflow__ \ + __attribute__((no_sanitize("signed-integer-overflow"))) +#define __ubsan_ignore_function__ __attribute__((no_sanitize("function"))) +#else +#define __ubsan_ignore_float_divide_by_zero__ +#define __ubsan_ignore_undefined__ +#define __ubsan_ignore_signed_int_overflow__ +#define __ubsan_ignore_function__ +#endif + +// Detect address sanitizer as some stuff doesn't work with it +#undef C10_ASAN_ENABLED + +// for clang +#if defined(__has_feature) +#if ((__has_feature(address_sanitizer))) +#define C10_ASAN_ENABLED 1 +#endif +#endif + +// for gcc +#if defined(__SANITIZE_ADDRESS__) +#if __SANITIZE_ADDRESS__ +#if !defined(C10_ASAN_ENABLED) +#define C10_ASAN_ENABLED 1 +#endif +#endif +#endif + +#if !defined(C10_ASAN_ENABLED) +#define C10_ASAN_ENABLED 0 +#endif + +// Disable the copy and assignment operator for a class. Note that this will +// disable the usage of the class in std containers. +#define C10_DISABLE_COPY_AND_ASSIGN(classname) \ + classname(const classname&) = delete; \ + classname& operator=(const classname&) = delete + +#define C10_CONCATENATE_IMPL(s1, s2) s1##s2 +#define C10_CONCATENATE(s1, s2) C10_CONCATENATE_IMPL(s1, s2) + +#define C10_MACRO_EXPAND(args) args + +#define C10_STRINGIZE_IMPL(x) #x +#define C10_STRINGIZE(x) C10_STRINGIZE_IMPL(x) + +/** + * C10_ANONYMOUS_VARIABLE(str) introduces an identifier starting with + * str and ending with a number that varies with the line. + */ +#ifdef __COUNTER__ +#define C10_UID __COUNTER__ +#define C10_ANONYMOUS_VARIABLE(str) C10_CONCATENATE(str, __COUNTER__) +#else +#define C10_UID __LINE__ +#define C10_ANONYMOUS_VARIABLE(str) C10_CONCATENATE(str, __LINE__) +#endif + +#ifdef __has_cpp_attribute +#define C10_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define C10_HAS_CPP_ATTRIBUTE(x) (0) +#endif + +/// C10_NODISCARD - Warn if a type or return value is discarded. + +// Technically, we should check if __cplusplus > 201402L here, because +// [[nodiscard]] is only defined in C++17. However, some compilers +// we care about don't advertise being C++17 (e.g., clang), but +// support the attribute anyway. In fact, this is not just a good idea, +// it's the law: clang::warn_unused_result doesn't work on nvcc + clang +// and the best workaround for this case is to use [[nodiscard]] +// instead; see https://github.com/pytorch/pytorch/issues/13118 +// +// Note to future editors: if you have noticed that a compiler is +// misbehaving (e.g., it advertises support, but the support doesn't +// actually work, or it is emitting warnings). Some compilers which +// are strict about the matter include MSVC, which will complain: +// +// error C2429: attribute 'nodiscard' requires compiler flag '/std:c++latest' +// +// Exhibits: +// - MSVC 19.14: https://godbolt.org/z/Dzd7gn (requires /std:c++latest) +// - Clang 8.0.0: https://godbolt.org/z/3PYL4Z (always advertises support) +// - gcc 8.3: https://godbolt.org/z/4tLMQS (always advertises support) +#if C10_HAS_CPP_ATTRIBUTE(nodiscard) +#define C10_NODISCARD [[nodiscard]] +// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious +// error when __has_cpp_attribute is given a scoped attribute in C mode. +#elif __cplusplus && C10_HAS_CPP_ATTRIBUTE(clang::warn_unused_result) +// TODO: It's possible this is still triggering +// https://github.com/pytorch/pytorch/issues/13118 on Windows; if it is, better +// fix it. +#define C10_NODISCARD [[clang::warn_unused_result]] +#else +#define C10_NODISCARD +#endif + +// suppress an unused variable. +#if defined(_MSC_VER) && !defined(__clang__) +#define C10_UNUSED __pragma(warning(suppress : 4100 4101)) +#else +#define C10_UNUSED __attribute__((__unused__)) +#endif //_MSC_VER + +// Direct port of LLVM_ATTRIBUTE_USED. +#if __has_attribute(used) +#define C10_USED __attribute__((__used__)) +#else +#define C10_USED +#endif + +#define C10_RESTRICT __restrict + +// Simply define the namespace, in case a dependent library want to refer to +// the c10 namespace but not any nontrivial files. +namespace c10 {} // namespace c10 +namespace c10 { +namespace cuda {} +} // namespace c10 +namespace c10 { +namespace hip {} +} // namespace c10 + +// Since C10 is the core library for caffe2 (and aten), we will simply reroute +// all abstractions defined in c10 to be available in caffe2 as well. +// This is only for backwards compatibility. Please use the symbols from the +// c10 namespace where possible. +namespace caffe2 { +using namespace c10; +} +namespace at { +using namespace c10; +} +namespace at { +namespace cuda { +using namespace c10::cuda; +} +} // namespace at + +// WARNING!!! THIS IS A GIANT HACK!!! +// This line means you cannot simultaneously include c10/hip +// and c10/cuda and then use them from the at::cuda namespace. +// This is true in practice, because HIPIFY works inplace on +// files in ATen/cuda, so it assumes that c10::hip is available +// from at::cuda. This namespace makes that happen. When +// HIPIFY is no longer out-of-place, we can switch the cuda +// here to hip and everyone is happy. +namespace at { +namespace cuda { +using namespace c10::hip; +} +} // namespace at + +// C10_LIKELY/C10_UNLIKELY +// +// These macros provide parentheses, so you can use these macros as: +// +// if C10_LIKELY(some_expr) { +// ... +// } +// +// NB: static_cast to boolean is mandatory in C++, because __builtin_expect +// takes a long argument, which means you may trigger the wrong conversion +// without it. +// +#if defined(__GNUC__) || defined(__ICL) || defined(__clang__) +#define C10_LIKELY(expr) (__builtin_expect(static_cast(expr), 1)) +#define C10_UNLIKELY(expr) (__builtin_expect(static_cast(expr), 0)) +#else +#define C10_LIKELY(expr) (expr) +#define C10_UNLIKELY(expr) (expr) +#endif + +/// C10_NOINLINE - Functions whose declaration is annotated with this will not +/// be inlined. +#ifdef __GNUC__ +#define C10_NOINLINE __attribute__((noinline)) +#elif _MSC_VER +#define C10_NOINLINE __declspec(noinline) +#else +#define C10_NOINLINE +#endif + +#if defined(_MSC_VER) +#define C10_ALWAYS_INLINE __forceinline +#elif __has_attribute(always_inline) || defined(__GNUC__) +#define C10_ALWAYS_INLINE __attribute__((__always_inline__)) inline +#else +#define C10_ALWAYS_INLINE inline +#endif + +#if defined(_MSC_VER) +#define C10_ATTR_VISIBILITY_HIDDEN +#elif defined(__GNUC__) +#define C10_ATTR_VISIBILITY_HIDDEN __attribute__((__visibility__("hidden"))) +#else +#define C10_ATTR_VISIBILITY_HIDDEN +#endif + +#define C10_ERASE C10_ALWAYS_INLINE C10_ATTR_VISIBILITY_HIDDEN + +// C10_FALLTHROUGH - Annotate fallthrough to the next case in a switch. +#if C10_HAS_CPP_ATTRIBUTE(fallthrough) +#define C10_FALLTHROUGH [[fallthrough]] +#else +#define C10_FALLTHROUGH +#endif + +#include + +#ifdef __HIPCC__ +// Unlike CUDA, HIP requires a HIP header to be included for __host__ to work. +// We do this #include here so that C10_HOST_DEVICE and friends will Just Work. +// See https://github.com/ROCm-Developer-Tools/HIP/issues/441 +#include +#endif + +#if defined(__CUDACC__) || defined(__HIPCC__) +// Designates functions callable from the host (CPU) and the device (GPU) +#define C10_HOST_DEVICE __host__ __device__ +#define C10_DEVICE __device__ +#define C10_HOST __host__ +// constants from +// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications) +// The maximum number of threads per multiprocessor is 1024 for Turing +// architecture (7.5), 1536 for Geforce Ampere (8.6), and 2048 for all other +// architectures. You'll get warnings if you exceed these constants. Hence, the +// following macros adjust the input values from the user to resolve potential +// warnings. +#if __CUDA_ARCH__ == 750 +constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1024; +#elif __CUDA_ARCH__ == 860 +constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1536; +#else +constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 2048; +#endif +// CUDA_MAX_THREADS_PER_BLOCK is same for all architectures currently +constexpr uint32_t CUDA_MAX_THREADS_PER_BLOCK = 1024; +// CUDA_THREADS_PER_BLOCK_FALLBACK is the "canonical fallback" choice of block +// size. 256 is a good number for this fallback and should give good occupancy +// and versatility across all architectures. +constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256; +// NOTE: if you are thinking of constexpr-ify the inputs to launch bounds, it +// turns out that although __launch_bounds__ can take constexpr, it +// can't take a constexpr that has anything to do with templates. +// Currently we use launch_bounds that depend on template arguments in +// Loops.cuh, Reduce.cuh and LossCTC.cuh. Hence, C10_MAX_THREADS_PER_BLOCK +// and C10_MIN_BLOCKS_PER_SM are kept as macros. +// Suppose you were planning to write __launch_bounds__(a, b), based on your +// performance tuning on a modern GPU. Instead, you should write +// __launch_bounds__(C10_MAX_THREADS_PER_BLOCK(a), C10_MIN_BLOCKS_PER_SM(a, b)), +// which will also properly respect limits on old architectures. +#define C10_MAX_THREADS_PER_BLOCK(val) \ + (((val) <= CUDA_MAX_THREADS_PER_BLOCK) ? (val) \ + : CUDA_THREADS_PER_BLOCK_FALLBACK) +#define C10_MIN_BLOCKS_PER_SM(threads_per_block, blocks_per_sm) \ + ((((threads_per_block) * (blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) \ + ? (blocks_per_sm) \ + : ((CUDA_MAX_THREADS_PER_SM + (threads_per_block)-1) / \ + (threads_per_block)))) +// C10_LAUNCH_BOUNDS is analogous to __launch_bounds__ +#define C10_LAUNCH_BOUNDS_0 \ + __launch_bounds__( \ + 256, 4) // default launch bounds that should give good occupancy and + // versatility across all architectures. +#define C10_LAUNCH_BOUNDS_1(max_threads_per_block) \ + __launch_bounds__((C10_MAX_THREADS_PER_BLOCK((max_threads_per_block)))) +#define C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) \ + __launch_bounds__( \ + (C10_MAX_THREADS_PER_BLOCK((max_threads_per_block))), \ + (C10_MIN_BLOCKS_PER_SM((max_threads_per_block), (min_blocks_per_sm)))) +#else +#define C10_HOST_DEVICE +#define C10_HOST +#define C10_DEVICE +#endif + +#if defined(USE_ROCM) +#define C10_HIP_HOST_DEVICE __host__ __device__ +#else +#define C10_HIP_HOST_DEVICE +#endif + +#if defined(USE_ROCM) +#define C10_WARP_SIZE warpSize // = 64 or 32 (Defined in hip_runtime.h) +#else +#define C10_WARP_SIZE 32 +#endif + +#if defined(_MSC_VER) && _MSC_VER <= 1900 +#define __func__ __FUNCTION__ +#endif + +// CUDA_KERNEL_ASSERT checks the assertion +// even when NDEBUG is defined. This is useful for important assertions in CUDA +// code that would otherwise be suppressed when building Release. +#if defined(__ANDROID__) || defined(__APPLE__) || \ + (defined(USE_ROCM) && ROCM_VERSION < 40100) || \ + (defined(USE_ROCM) && defined(ROCM_DISABLE_GPU_ASSERTS)) +// Those platforms do not support assert() +#define CUDA_KERNEL_ASSERT(cond) +#define SYCL_KERNEL_ASSERT(cond) +#elif defined(_MSC_VER) +#if defined(NDEBUG) +extern "C" { +C10_IMPORT +#if defined(__SYCL_DEVICE_ONLY__) +extern SYCL_EXTERNAL void _wassert( + const wchar_t* wexpr, + const wchar_t* wfile, + unsigned line); +#else +#if defined(__CUDA_ARCH__) +__host__ __device__ +#endif // __CUDA_ARCH__ + void + _wassert(wchar_t const* _Message, wchar_t const* _File, unsigned _Line); +} +#endif // __SYCL_DEVICE_ONLY__ +#endif // NDEBUG +#define CUDA_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + (void)(_wassert(_CRT_WIDE(#cond), _CRT_WIDE(__FILE__), static_cast(__LINE__)), 0); \ + } +#define SYCL_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + (void)(_wassert(_CRT_WIDE(#cond), _CRT_WIDE(__FILE__), static_cast(__LINE__)), 0); \ + } +#else // __APPLE__, _MSC_VER +#if defined(NDEBUG) +extern "C" { +#if defined(__SYCL_DEVICE_ONLY__) +extern SYCL_EXTERNAL void __assert_fail( + const char* expr, + const char* file, + unsigned int line, + const char* func); +#else // __SYCL_DEVICE_ONLY__ +#if (defined(__CUDA_ARCH__) && !(defined(__clang__) && defined(__CUDA__))) +// CUDA supports __assert_fail function which are common for both device +// and host side code. +__host__ __device__ +#endif + + // This forward declaration matching the declaration of __assert_fail + // exactly how it is in glibc in case parts of the program are compiled with + // different NDEBUG settings. Otherwise we might get 'ambiguous declaration' + // error. Note: On ROCm - this declaration serves for host side compilation. + void + __assert_fail( + const char* assertion, + const char* file, + unsigned int line, + const char* function) throw() __attribute__((__noreturn__)); + +#if (defined(__HIP_ARCH__) || defined(__HIP__)) && \ + !defined(ROCM_DISABLE_GPU_ASSERTS) +// ROCm supports __assert_fail only as a device side function. +__device__ __attribute__((noinline)) __attribute__((weak)) void __assert_fail( + const char* assertion, + const char* file, + unsigned int line, + const char* function); +#endif // defined(__HIP_ARCH__) || defined(__HIP__) +#endif // __SYCL_DEVICE_ONLY__ +} +#endif // NDEBUG +#define CUDA_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + __assert_fail( \ + #cond, __FILE__, static_cast(__LINE__), __func__); \ + } +#define SYCL_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + __assert_fail( \ + #cond, __FILE__, static_cast(__LINE__), __func__); \ + } +#endif // __APPLE__ + +#ifdef __APPLE__ +#include +#endif + +#if defined(__ANDROID__) +#define C10_ANDROID 1 +#define C10_MOBILE 1 +#elif ( \ + defined(__APPLE__) && \ + (TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE)) +#define C10_IOS 1 +#define C10_MOBILE 1 +#endif // ANDROID / IOS + +#if defined(C10_MOBILE) && C10_MOBILE +#define C10_ALWAYS_INLINE_UNLESS_MOBILE inline +#else +#define C10_ALWAYS_INLINE_UNLESS_MOBILE C10_ALWAYS_INLINE +#endif + +// Portable determination of whether type T is trivially copyable. +// Warning: __has_trivial_copy for GCC may not always detect the non-POD +// correctly. For example, T = std::unique_ptr may evaluate to true and be +// treated as POD. This can cause unexpected behavior. +#if defined(__GNUG__) && __GNUC__ < 5 +#define C10_IS_TRIVIALLY_COPYABLE(T) __has_trivial_copy(T) +#else +#define C10_IS_TRIVIALLY_COPYABLE(T) std::is_trivially_copyable::value +#endif + +#if !defined(__clang__) && !defined(_MSC_VER) && defined(__GNUC__) && \ + __GNUC__ < 6 +#define CONSTEXPR_EXCEPT_GCC5 +#define IS_NOT_GCC5_CONSTEXPR 0 +#else +#define CONSTEXPR_EXCEPT_GCC5 constexpr +#define IS_NOT_GCC5_CONSTEXPR 1 +#endif + +#if defined(__CUDA_ARCH__) +#if defined(_MSC_VER) && defined(__CUDACC__) +#define CONSTEXPR_EXCEPT_WIN_CUDA const +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA __host__ + +// Note [static constexpr char* members for windows NVCC] +// The Windows NVCC compiler doesn't handle static constexpr class members, +// although it's fixed in a later version. +// (see +// https://developercommunity.visualstudio.com/t/intellisense-error-c11-static-constexpr-member-ini/245425) +// +// If we want to ensure that our field is static under all builds, then we need +// to work around it specifically for windows NVCC by making it (a) const, (b) +// defined outside of the class definition We need to define it outside of the +// class definition because of the C++ standard; char* is not an integral type +// (see +// https://stackoverflow.com/questions/24278473/intellisense-a-member-of-type-const-char-const-cannot-have-an-in-class-in) +// +// So instead of this: +// struct Foo { +// static constexpr const char* name = "foo"; +// } +// In Windows NVCC, we end up with this: +// struct Foo { +// static const char* name; +// } +// const char* Foo::name = "foo"; +// +// This gives us a small perf hit for any code that wants to access these field +// members, but right now it isn't used in any perf-critical code paths. +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static const char* field; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) \ + const char* cls::field = val; +#else +#define CONSTEXPR_EXCEPT_WIN_CUDA constexpr +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA __host__ + +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static constexpr const char* field = val; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) +#endif +#else +#if defined(_MSC_VER) && defined(__CUDACC__) +#define CONSTEXPR_EXCEPT_WIN_CUDA const +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA + +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static const char* field; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) \ + const char* cls::field = val; +#else +#define CONSTEXPR_EXCEPT_WIN_CUDA constexpr +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA constexpr + +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static constexpr const char* field = val; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) +#endif +#endif + +#ifndef HAS_DEMANGLE +#if defined(__ANDROID__) || defined(_WIN32) || defined(__EMSCRIPTEN__) +#define HAS_DEMANGLE 0 +#elif defined(__APPLE__) && \ + (TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE) +#define HAS_DEMANGLE 0 +#else +#define HAS_DEMANGLE 1 +#endif +#endif // HAS_DEMANGLE + +#ifdef __clang__ +#define _C10_PRAGMA__(string) _Pragma(#string) +#define _C10_PRAGMA_(string) _C10_PRAGMA__(string) +#define C10_CLANG_DIAGNOSTIC_PUSH() _Pragma("clang diagnostic push") +#define C10_CLANG_DIAGNOSTIC_POP() _Pragma("clang diagnostic pop") +#define C10_CLANG_DIAGNOSTIC_IGNORE(flag) \ + _C10_PRAGMA_(clang diagnostic ignored flag) +#define C10_CLANG_HAS_WARNING(flag) __has_warning(flag) +#else +#define C10_CLANG_DIAGNOSTIC_PUSH() +#define C10_CLANG_DIAGNOSTIC_POP() +#define C10_CLANG_DIAGNOSTIC_IGNORE(flag) +#define C10_CLANG_HAS_WARNING(flag) 0 +#endif + +#endif // C10_MACROS_MACROS_H_ diff --git a/voice_bridge/torch/include/c10/macros/cmake_macros.h b/voice_bridge/torch/include/c10/macros/cmake_macros.h new file mode 100644 index 0000000000000000000000000000000000000000..6c8b60b9eaecf5a0ce1c55a82cda6fbafd47c727 --- /dev/null +++ b/voice_bridge/torch/include/c10/macros/cmake_macros.h @@ -0,0 +1,13 @@ +#ifndef C10_MACROS_CMAKE_MACROS_H_ +#define C10_MACROS_CMAKE_MACROS_H_ + +// Automatically generated header file for the C10 library. +// Do not include this file directly. Instead, include c10/macros/Macros.h. + +#define C10_BUILD_SHARED_LIBS +/* #undef C10_USE_GLOG */ +/* #undef C10_USE_GFLAGS */ +/* #undef C10_USE_NUMA */ +/* #undef C10_USE_MSVC_STATIC_RUNTIME */ + +#endif // C10_MACROS_CMAKE_MACROS_H_ diff --git a/voice_bridge/torch/include/c10/util/AlignOf.h b/voice_bridge/torch/include/c10/util/AlignOf.h new file mode 100644 index 0000000000000000000000000000000000000000..7f99a0718b1f87c5a519cc3814d28abc40e94efb --- /dev/null +++ b/voice_bridge/torch/include/c10/util/AlignOf.h @@ -0,0 +1,173 @@ +//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the AlignedCharArray and AlignedCharArrayUnion classes. +// +//===----------------------------------------------------------------------===// + +// ATen: modified from llvm::AlignOf +// replaced LLVM_ALIGNAS with alignas + +#pragma once + +#include + +namespace c10 { + +/// \struct AlignedCharArray +/// \brief Helper for building an aligned character array type. +/// +/// This template is used to explicitly build up a collection of aligned +/// character array types. We have to build these up using a macro and explicit +/// specialization to cope with MSVC (at least till 2015) where only an +/// integer literal can be used to specify an alignment constraint. Once built +/// up here, we can then begin to indirect between these using normal C++ +/// template parameters. + +// MSVC requires special handling here. +#ifndef _MSC_VER + +template +struct AlignedCharArray { + alignas(Alignment) char buffer[Size]; +}; + +#else // _MSC_VER + +/// \brief Create a type with an aligned char buffer. +template +struct AlignedCharArray; + +// We provide special variations of this template for the most common +// alignments because __declspec(align(...)) doesn't actually work when it is +// a member of a by-value function argument in MSVC, even if the alignment +// request is something reasonably like 8-byte or 16-byte. Note that we can't +// even include the declspec with the union that forces the alignment because +// MSVC warns on the existence of the declspec despite the union member forcing +// proper alignment. + +template +struct AlignedCharArray<1, Size> { + union { + char aligned; + char buffer[Size]; + }; +}; + +template +struct AlignedCharArray<2, Size> { + union { + short aligned; + char buffer[Size]; + }; +}; + +template +struct AlignedCharArray<4, Size> { + union { + int aligned; + char buffer[Size]; + }; +}; + +template +struct AlignedCharArray<8, Size> { + union { + double aligned; + char buffer[Size]; + }; +}; + +// The rest of these are provided with a __declspec(align(...)) and we simply +// can't pass them by-value as function arguments on MSVC. + +#define AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \ + template \ + struct AlignedCharArray { \ + __declspec(align(x)) char buffer[Size]; \ + }; + +AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16) +AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32) +AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64) +AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128) + +#undef AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT + +#endif // _MSC_VER + +namespace detail { +template < + typename T1, + typename T2 = char, + typename T3 = char, + typename T4 = char, + typename T5 = char, + typename T6 = char, + typename T7 = char, + typename T8 = char, + typename T9 = char, + typename T10 = char> +class AlignerImpl { + T1 t1; + T2 t2; + T3 t3; + T4 t4; + T5 t5; + T6 t6; + T7 t7; + T8 t8; + T9 t9; + T10 t10; + + AlignerImpl() = delete; +}; + +template < + typename T1, + typename T2 = char, + typename T3 = char, + typename T4 = char, + typename T5 = char, + typename T6 = char, + typename T7 = char, + typename T8 = char, + typename T9 = char, + typename T10 = char> +union SizerImpl { + char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)], + arr5[sizeof(T5)], arr6[sizeof(T6)], arr7[sizeof(T7)], arr8[sizeof(T8)], + arr9[sizeof(T9)], arr10[sizeof(T10)]; +}; +} // end namespace detail + +/// \brief This union template exposes a suitably aligned and sized character +/// array member which can hold elements of any of up to ten types. +/// +/// These types may be arrays, structs, or any other types. The goal is to +/// expose a char array buffer member which can be used as suitable storage for +/// a placement new of any of these types. Support for more than ten types can +/// be added at the cost of more boilerplate. +template < + typename T1, + typename T2 = char, + typename T3 = char, + typename T4 = char, + typename T5 = char, + typename T6 = char, + typename T7 = char, + typename T8 = char, + typename T9 = char, + typename T10 = char> +struct AlignedCharArrayUnion + : AlignedCharArray< + alignof(detail::AlignerImpl), + sizeof(::c10::detail:: + SizerImpl)> {}; +} // end namespace c10 diff --git a/voice_bridge/torch/include/c10/util/Array.h b/voice_bridge/torch/include/c10/util/Array.h new file mode 100644 index 0000000000000000000000000000000000000000..f33a36e8e84b8e1e6fc7aa7d4ece17521563f62b --- /dev/null +++ b/voice_bridge/torch/include/c10/util/Array.h @@ -0,0 +1,391 @@ +/** + * This file is based on the std::array implementation of libstdc++ at + * https://gcc.gnu.org/onlinedocs/gcc-7.1.0/libstdc++/api/a01056_source.html + * + * Changes: + * - isolate, i.e. remove dependencies on internal libstdc++ stuff + * - use c++17 behavior even in c++11 or c++14 + * - remove std::swappable special case because that doesn't work with MSVC + * - constexpr more things + * - add some features like prepend/tail + * + * If using std::array at runtime, feel free to either keep using std::array or + * use this one - it doesn't really matter. For compile time computations, this + * one here is preferred because std::array in C++11 misses some constexpr + * specifiers, forcing these methods to be called at runtime instead of compile + * time. + */ + +// Copyright (C) 2007-2017 Free Software Foundation, Inc. +// +// This file is part of the GNU ISO C++ Library. This library is free +// software; you can redistribute it and/or modify it under the +// terms of the GNU General Public License as published by the +// Free Software Foundation; either version 3, or (at your option) +// any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. + +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// . + +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { +namespace guts { + +namespace detail { +template +struct __array_traits final { + using _Type = _Tp[_Nm]; + + static constexpr _Tp& _S_ref(const _Type& __t, std::size_t __n) noexcept { + return const_cast<_Tp&>(__t[__n]); + } + + static constexpr _Tp* _S_ptr(const _Type& __t) noexcept { + return const_cast<_Tp*>(__t); + } +}; + +template +struct __array_traits<_Tp, 0> final { + struct _Type final {}; + + static constexpr _Tp& _S_ref(const _Type& __t, std::size_t) noexcept { + return *_S_ptr(__t); + } + + static constexpr _Tp* _S_ptr(const _Type&) noexcept { + return nullptr; + } +}; + +[[noreturn]] inline void __throw_out_of_range(std::string msg) { + throw std::out_of_range(std::move(msg)); +} +} // namespace detail + +template +class array final { + public: + using value_type = _Tp; + using pointer = value_type*; + using const_pointer = const value_type*; + using reference = value_type&; + using const_reference = const value_type&; + using iterator = value_type*; + using const_iterator = const value_type*; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + private: + using _AT_Type = detail::__array_traits<_Tp, _Nm>; + + public: // needs to be public member for aggregate initialization + typename _AT_Type::_Type _M_elems; + + public: + // No explicit construct/copy/destroy for aggregate type. + + // DR 776. + constexpr void fill(const value_type& __u) { + std::fill_n(begin(), size(), __u); + } + + constexpr void swap(array& __other) { + std::swap_ranges(begin(), end(), __other.begin()); + } + + // Iterators. + constexpr iterator begin() noexcept { + return iterator(data()); + } + + constexpr const_iterator begin() const noexcept { + return const_iterator(data()); + } + + constexpr iterator end() noexcept { + return iterator(data() + _Nm); + } + + constexpr const_iterator end() const noexcept { + return const_iterator(data() + _Nm); + } + + constexpr reverse_iterator rbegin() noexcept { + return reverse_iterator(end()); + } + + constexpr const_reverse_iterator rbegin() const noexcept { + return const_reverse_iterator(end()); + } + + constexpr reverse_iterator rend() noexcept { + return reverse_iterator(begin()); + } + + constexpr const_reverse_iterator rend() const noexcept { + return const_reverse_iterator(begin()); + } + + constexpr const_iterator cbegin() const noexcept { + return const_iterator(data()); + } + + constexpr const_iterator cend() const noexcept { + return const_iterator(data() + _Nm); + } + + constexpr const_reverse_iterator crbegin() const noexcept { + return const_reverse_iterator(end()); + } + + constexpr const_reverse_iterator crend() const noexcept { + return const_reverse_iterator(begin()); + } + + // Capacity. + constexpr size_type size() const noexcept { + return _Nm; + } + + constexpr size_type max_size() const noexcept { + return _Nm; + } + + constexpr bool empty() const noexcept { + return size() == 0; + } + + // Element access. + constexpr reference operator[](size_type __n) noexcept { + return _AT_Type::_S_ref(_M_elems, __n); + } + + constexpr const_reference operator[](size_type __n) const noexcept { + return _AT_Type::_S_ref(_M_elems, __n); + } + + constexpr reference at(size_type __n) { + if (__n >= _Nm) { + detail::__throw_out_of_range( + std::string() + "array::at: __n (which is " + to_string(__n) + ") " + + ">= _Nm (which is " + to_string(_Nm) + ")"); + } + return _AT_Type::_S_ref(_M_elems, __n); + } + + constexpr const_reference at(size_type __n) const { + // Result of conditional expression must be an lvalue so use + // boolean ? lvalue : (throw-expr, lvalue) + return __n < _Nm + ? _AT_Type::_S_ref(_M_elems, __n) + : (detail::__throw_out_of_range( + std::string() + "array::at: __n (which is " + to_string(__n) + + ") " + ">= _Nm (which is " + to_string(_Nm) + ")"), + _AT_Type::_S_ref(_M_elems, 0)); + } + + constexpr reference front() noexcept { + return *begin(); + } + + constexpr const_reference front() const noexcept { + return _AT_Type::_S_ref(_M_elems, 0); + } + + constexpr reference back() noexcept { + return _Nm ? *(end() - 1) : *end(); + } + + constexpr const_reference back() const noexcept { + return _Nm ? _AT_Type::_S_ref(_M_elems, _Nm - 1) + : _AT_Type::_S_ref(_M_elems, 0); + } + + constexpr pointer data() noexcept { + return _AT_Type::_S_ptr(_M_elems); + } + + constexpr const_pointer data() const noexcept { + return _AT_Type::_S_ptr(_M_elems); + } +}; + +#if defined(__cpp_deduction_guides) && __cpp_deduction_guides >= 201606 +template +array(_Tp, _Up...) -> array< + std::enable_if_t<(std::is_same<_Tp, _Up>::value && ...), _Tp>, + 1 + sizeof...(_Up)>; +#endif + +// Array comparisons. +namespace detail { +template +constexpr inline bool array_equals_( + const array& lhs, + const array& rhs, + size_t current_index) { + return (current_index == N) + ? true + : (lhs.at(current_index) == rhs.at(current_index) && + array_equals_(lhs, rhs, current_index + 1)); +} +template +constexpr inline bool array_less_( + const array& lhs, + const array& rhs, + size_t current_index) { + return (current_index == N) + ? false + : (lhs.at(current_index) < rhs.at(current_index) || + array_less_(lhs, rhs, current_index + 1)); +} +} // namespace detail +template +constexpr inline bool operator==( + const array<_Tp, _Nm>& __one, + const array<_Tp, _Nm>& __two) { + return detail::array_equals_(__one, __two, 0); +} + +template +constexpr inline bool operator!=( + const array<_Tp, _Nm>& __one, + const array<_Tp, _Nm>& __two) { + return !(__one == __two); +} + +template +constexpr inline bool operator<( + const array<_Tp, _Nm>& __a, + const array<_Tp, _Nm>& __b) { + return detail::array_less_(__a, __b, 0); +} + +template +constexpr inline bool operator>( + const array<_Tp, _Nm>& __one, + const array<_Tp, _Nm>& __two) { + return __two < __one; +} + +template +constexpr inline bool operator<=( + const array<_Tp, _Nm>& __one, + const array<_Tp, _Nm>& __two) { + return !(__one > __two); +} + +template +constexpr inline bool operator>=( + const array<_Tp, _Nm>& __one, + const array<_Tp, _Nm>& __two) { + return !(__one < __two); +} + +// Specialized algorithms. +template +inline void swap(array<_Tp, _Nm>& __one, array<_Tp, _Nm>& __two) noexcept( + noexcept(__one.swap(__two))) { + __one.swap(__two); +} + +template +constexpr _Tp& get(array<_Tp, _Nm>& __arr) noexcept { + static_assert(_Int < _Nm, "array index is within bounds"); + return detail::__array_traits<_Tp, _Nm>::_S_ref(__arr._M_elems, _Int); +} + +template +constexpr _Tp&& get(array<_Tp, _Nm>&& __arr) noexcept { + static_assert(_Int < _Nm, "array index is within bounds"); + return std::move(get<_Int>(__arr)); +} + +template +constexpr const _Tp& get(const array<_Tp, _Nm>& __arr) noexcept { + static_assert(_Int < _Nm, "array index is within bounds"); + return detail::__array_traits<_Tp, _Nm>::_S_ref(__arr._M_elems, _Int); +} + +/** + * Some added features not available in std::array. + * Only call these at compile time, they're slow if called at runtime. + * Examples: + * tail({2, 3, 4}) == {3, 4} + * prepend(2, {3, 4}) == {2, 3, 4} + */ +namespace detail { +template +constexpr inline array tail_( + const array& arg, + std::index_sequence) { + static_assert(sizeof...(INDEX) == N - 1, "invariant"); + return {{get(arg)...}}; +} +} // namespace detail +template +constexpr inline array tail(const array& arg) { + static_assert( + N > 0, "Can only call tail() on an array with at least one element"); + return detail::tail_(arg, std::make_index_sequence()); +} + +namespace detail { +template +constexpr inline array prepend_( + T&& head, + const array& tail, + std::index_sequence) { + return {{std::forward(head), get(tail)...}}; +} +} // namespace detail +template +constexpr inline array prepend(T&& head, const array& tail) { + return detail::prepend_( + std::forward(head), tail, std::make_index_sequence()); +} + +/** + * Convert a C array into a std::array. + * Example: + * int source[3] = {2, 3, 4}; + * std::array target = to_std_array(source); + */ + +namespace detail { +template +constexpr array to_array_( + const T (&arr)[N], + std::index_sequence) { + return {{arr[INDEX]...}}; +} +} // namespace detail + +template +constexpr array to_array(const T (&arr)[N]) { + return detail::to_array_(arr, std::make_index_sequence()); +} + +} // namespace guts +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/util/ArrayRef.h b/voice_bridge/torch/include/c10/util/ArrayRef.h new file mode 100644 index 0000000000000000000000000000000000000000..4d45c5e6c4133b8def3e2b1f520d6ddce0847757 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/ArrayRef.h @@ -0,0 +1,370 @@ +//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +// ATen: modified from llvm::ArrayRef. +// removed llvm-specific functionality +// removed some implicit const -> non-const conversions that rely on +// complicated std::enable_if meta-programming +// removed a bunch of slice variants for simplicity... + +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace c10 { +/// ArrayRef - Represent a constant reference to an array (0 or more elements +/// consecutively in memory), i.e. a start pointer and a length. It allows +/// various APIs to take consecutive elements easily and conveniently. +/// +/// This class does not own the underlying data, it is expected to be used in +/// situations where the data resides in some other buffer, whose lifetime +/// extends past that of the ArrayRef. For this reason, it is not in general +/// safe to store an ArrayRef. +/// +/// This is intended to be trivially copyable, so it should be passed by +/// value. +template +class ArrayRef final { + public: + using iterator = const T*; + using const_iterator = const T*; + using size_type = size_t; + using value_type = T; + + using reverse_iterator = std::reverse_iterator; + + private: + /// The start of the array, in an external buffer. + const T* Data; + + /// The number of elements. + size_type Length; + + void debugCheckNullptrInvariant() { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + Data != nullptr || Length == 0, + "created ArrayRef with nullptr and non-zero length! c10::optional relies on this being illegal"); + } + + public: + /// @name Constructors + /// @{ + + /// Construct an empty ArrayRef. + /* implicit */ constexpr ArrayRef() : Data(nullptr), Length(0) {} + + /// Construct an ArrayRef from a single element. + // TODO Make this explicit + constexpr ArrayRef(const T& OneElt) : Data(&OneElt), Length(1) {} + + /// Construct an ArrayRef from a pointer and length. + C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef(const T* data, size_t length) + : Data(data), Length(length) { + debugCheckNullptrInvariant(); + } + + /// Construct an ArrayRef from a range. + C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef(const T* begin, const T* end) + : Data(begin), Length(end - begin) { + debugCheckNullptrInvariant(); + } + + /// Construct an ArrayRef from a SmallVector. This is templated in order to + /// avoid instantiating SmallVectorTemplateCommon whenever we + /// copy-construct an ArrayRef. + template + /* implicit */ ArrayRef(const SmallVectorTemplateCommon& Vec) + : Data(Vec.data()), Length(Vec.size()) { + debugCheckNullptrInvariant(); + } + + template < + typename Container, + typename = std::enable_if_t().data())>, + T*>::value>> + /* implicit */ ArrayRef(const Container& container) + : Data(container.data()), Length(container.size()) { + debugCheckNullptrInvariant(); + } + + /// Construct an ArrayRef from a std::vector. + // The enable_if stuff here makes sure that this isn't used for + // std::vector, because ArrayRef can't work on a std::vector + // bitfield. + template + /* implicit */ ArrayRef(const std::vector& Vec) + : Data(Vec.data()), Length(Vec.size()) { + static_assert( + !std::is_same::value, + "ArrayRef cannot be constructed from a std::vector bitfield."); + } + + /// Construct an ArrayRef from a std::array + template + /* implicit */ constexpr ArrayRef(const std::array& Arr) + : Data(Arr.data()), Length(N) {} + + /// Construct an ArrayRef from a C array. + template + /* implicit */ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {} + + /// Construct an ArrayRef from a std::initializer_list. + /* implicit */ constexpr ArrayRef(const std::initializer_list& Vec) + : Data( + std::begin(Vec) == std::end(Vec) ? static_cast(nullptr) + : std::begin(Vec)), + Length(Vec.size()) {} + + /// @} + /// @name Simple Operations + /// @{ + + constexpr iterator begin() const { + return Data; + } + constexpr iterator end() const { + return Data + Length; + } + + // These are actually the same as iterator, since ArrayRef only + // gives you const iterators. + constexpr const_iterator cbegin() const { + return Data; + } + constexpr const_iterator cend() const { + return Data + Length; + } + + constexpr reverse_iterator rbegin() const { + return reverse_iterator(end()); + } + constexpr reverse_iterator rend() const { + return reverse_iterator(begin()); + } + + /// empty - Check if the array is empty. + constexpr bool empty() const { + return Length == 0; + } + + constexpr const T* data() const { + return Data; + } + + /// size - Get the array size. + constexpr size_t size() const { + return Length; + } + + /// front - Get the first element. + C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& front() const { + TORCH_CHECK( + !empty(), "ArrayRef: attempted to access front() of empty list"); + return Data[0]; + } + + /// back - Get the last element. + C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& back() const { + TORCH_CHECK(!empty(), "ArrayRef: attempted to access back() of empty list"); + return Data[Length - 1]; + } + + /// equals - Check for element-wise equality. + constexpr bool equals(ArrayRef RHS) const { + return Length == RHS.Length && std::equal(begin(), end(), RHS.begin()); + } + + /// slice(n, m) - Take M elements of the array starting at element N + C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef slice(size_t N, size_t M) + const { + TORCH_CHECK( + N + M <= size(), + "ArrayRef: invalid slice, N = ", + N, + "; M = ", + M, + "; size = ", + size()); + return ArrayRef(data() + N, M); + } + + /// slice(n) - Chop off the first N elements of the array. + constexpr ArrayRef slice(size_t N) const { + return slice(N, size() - N); + } + + /// @} + /// @name Operator Overloads + /// @{ + constexpr const T& operator[](size_t Index) const { + return Data[Index]; + } + + /// Vector compatibility + C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& at(size_t Index) const { + TORCH_CHECK( + Index < Length, + "ArrayRef: invalid index Index = ", + Index, + "; Length = ", + Length); + return Data[Index]; + } + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + typename std::enable_if::value, ArrayRef>::type& + operator=(U&& Temporary) = delete; + + /// Disallow accidental assignment from a temporary. + /// + /// The declaration here is extra complicated so that "arrayRef = {}" + /// continues to select the move assignment operator. + template + typename std::enable_if::value, ArrayRef>::type& + operator=(std::initializer_list) = delete; + + /// @} + /// @name Expensive Operations + /// @{ + std::vector vec() const { + return std::vector(Data, Data + Length); + } + + /// @} +}; + +template +std::ostream& operator<<(std::ostream& out, ArrayRef list) { + int i = 0; + out << "["; + for (auto e : list) { + if (i++ > 0) + out << ", "; + out << e; + } + out << "]"; + return out; +} + +/// @name ArrayRef Convenience constructors +/// @{ + +/// Construct an ArrayRef from a single element. +template +ArrayRef makeArrayRef(const T& OneElt) { + return OneElt; +} + +/// Construct an ArrayRef from a pointer and length. +template +ArrayRef makeArrayRef(const T* data, size_t length) { + return ArrayRef(data, length); +} + +/// Construct an ArrayRef from a range. +template +ArrayRef makeArrayRef(const T* begin, const T* end) { + return ArrayRef(begin, end); +} + +/// Construct an ArrayRef from a SmallVector. +template +ArrayRef makeArrayRef(const SmallVectorImpl& Vec) { + return Vec; +} + +/// Construct an ArrayRef from a SmallVector. +template +ArrayRef makeArrayRef(const SmallVector& Vec) { + return Vec; +} + +/// Construct an ArrayRef from a std::vector. +template +ArrayRef makeArrayRef(const std::vector& Vec) { + return Vec; +} + +/// Construct an ArrayRef from a std::array. +template +ArrayRef makeArrayRef(const std::array& Arr) { + return Arr; +} + +/// Construct an ArrayRef from an ArrayRef (no-op) (const) +template +ArrayRef makeArrayRef(const ArrayRef& Vec) { + return Vec; +} + +/// Construct an ArrayRef from an ArrayRef (no-op) +template +ArrayRef& makeArrayRef(ArrayRef& Vec) { + return Vec; +} + +/// Construct an ArrayRef from a C array. +template +ArrayRef makeArrayRef(const T (&Arr)[N]) { + return ArrayRef(Arr); +} + +// WARNING: Template instantiation will NOT be willing to do an implicit +// conversions to get you to an c10::ArrayRef, which is why we need so +// many overloads. + +template +bool operator==(c10::ArrayRef a1, c10::ArrayRef a2) { + return a1.equals(a2); +} + +template +bool operator!=(c10::ArrayRef a1, c10::ArrayRef a2) { + return !a1.equals(a2); +} + +template +bool operator==(const std::vector& a1, c10::ArrayRef a2) { + return c10::ArrayRef(a1).equals(a2); +} + +template +bool operator!=(const std::vector& a1, c10::ArrayRef a2) { + return !c10::ArrayRef(a1).equals(a2); +} + +template +bool operator==(c10::ArrayRef a1, const std::vector& a2) { + return a1.equals(c10::ArrayRef(a2)); +} + +template +bool operator!=(c10::ArrayRef a1, const std::vector& a2) { + return !a1.equals(c10::ArrayRef(a2)); +} + +using IntArrayRef = ArrayRef; + +// This alias is deprecated because it doesn't make ownership +// semantics obvious. Use IntArrayRef instead! +C10_DEFINE_DEPRECATED_USING(IntList, ArrayRef) + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/util/BFloat16-inl.h b/voice_bridge/torch/include/c10/util/BFloat16-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..29fda9c3880001e905d2d9edbca2dbbcbc8efa4d --- /dev/null +++ b/voice_bridge/torch/include/c10/util/BFloat16-inl.h @@ -0,0 +1,315 @@ +#pragma once + +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +namespace c10 { + +/// Constructors +inline C10_HOST_DEVICE BFloat16::BFloat16(float value) { +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 && \ + defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 + x = __bfloat16_as_ushort(__float2bfloat16(value)); +#else + // RNE by default + x = detail::round_to_nearest_even(value); +#endif +} + +/// Implicit conversions +inline C10_HOST_DEVICE BFloat16::operator float() const { +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 + return __bfloat162float(*reinterpret_cast(&x)); +#else + return detail::f32_from_bits(x); +#endif +} + +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 +inline C10_HOST_DEVICE BFloat16::BFloat16(const __nv_bfloat16& value) { + x = *reinterpret_cast(&value); +} +inline C10_HOST_DEVICE BFloat16::operator __nv_bfloat16() const { + return *reinterpret_cast(&x); +} +#endif + +// CUDA intrinsics + +#if defined(__CUDACC__) || defined(__HIPCC__) +inline C10_DEVICE BFloat16 __ldg(const BFloat16* ptr) { +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 && \ + defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 + return __ldg(reinterpret_cast(ptr)); +#else + return *ptr; +#endif +} +#endif + +/// Arithmetic + +inline C10_HOST_DEVICE BFloat16 +operator+(const BFloat16& a, const BFloat16& b) { + return static_cast(a) + static_cast(b); +} + +inline C10_HOST_DEVICE BFloat16 +operator-(const BFloat16& a, const BFloat16& b) { + return static_cast(a) - static_cast(b); +} + +inline C10_HOST_DEVICE BFloat16 +operator*(const BFloat16& a, const BFloat16& b) { + return static_cast(a) * static_cast(b); +} + +inline C10_HOST_DEVICE BFloat16 operator/(const BFloat16& a, const BFloat16& b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / static_cast(b); +} + +inline C10_HOST_DEVICE BFloat16 operator-(const BFloat16& a) { + return -static_cast(a); +} + +inline C10_HOST_DEVICE BFloat16& operator+=(BFloat16& a, const BFloat16& b) { + a = a + b; + return a; +} + +inline C10_HOST_DEVICE BFloat16& operator-=(BFloat16& a, const BFloat16& b) { + a = a - b; + return a; +} + +inline C10_HOST_DEVICE BFloat16& operator*=(BFloat16& a, const BFloat16& b) { + a = a * b; + return a; +} + +inline C10_HOST_DEVICE BFloat16& operator/=(BFloat16& a, const BFloat16& b) { + a = a / b; + return a; +} + +inline C10_HOST_DEVICE BFloat16& operator|(BFloat16& a, const BFloat16& b) { + a.x = a.x | b.x; + return a; +} + +inline C10_HOST_DEVICE BFloat16& operator^(BFloat16& a, const BFloat16& b) { + a.x = a.x ^ b.x; + return a; +} + +inline C10_HOST_DEVICE BFloat16& operator&(BFloat16& a, const BFloat16& b) { + a.x = a.x & b.x; + return a; +} + +/// Arithmetic with floats + +inline C10_HOST_DEVICE float operator+(BFloat16 a, float b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE float operator-(BFloat16 a, float b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE float operator*(BFloat16 a, float b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE float operator/(BFloat16 a, float b) { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE float operator+(float a, BFloat16 b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE float operator-(float a, BFloat16 b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE float operator*(float a, BFloat16 b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE float operator/(float a, BFloat16 b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE float& operator+=(float& a, const BFloat16& b) { + return a += static_cast(b); +} +inline C10_HOST_DEVICE float& operator-=(float& a, const BFloat16& b) { + return a -= static_cast(b); +} +inline C10_HOST_DEVICE float& operator*=(float& a, const BFloat16& b) { + return a *= static_cast(b); +} +inline C10_HOST_DEVICE float& operator/=(float& a, const BFloat16& b) { + return a /= static_cast(b); +} + +/// Arithmetic with doubles + +inline C10_HOST_DEVICE double operator+(BFloat16 a, double b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE double operator-(BFloat16 a, double b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE double operator*(BFloat16 a, double b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE double operator/(BFloat16 a, double b) { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE double operator+(double a, BFloat16 b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE double operator-(double a, BFloat16 b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE double operator*(double a, BFloat16 b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE double operator/(double a, BFloat16 b) { + return a / static_cast(b); +} + +/// Arithmetic with ints + +inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE BFloat16 operator+(int a, BFloat16 b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE BFloat16 operator-(int a, BFloat16 b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE BFloat16 operator*(int a, BFloat16 b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE BFloat16 operator/(int a, BFloat16 b) { + return static_cast(a) / b; +} + +//// Arithmetic with int64_t + +inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int64_t b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int64_t b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int64_t b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int64_t b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE BFloat16 operator+(int64_t a, BFloat16 b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE BFloat16 operator-(int64_t a, BFloat16 b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE BFloat16 operator*(int64_t a, BFloat16 b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE BFloat16 operator/(int64_t a, BFloat16 b) { + return static_cast(a) / b; +} + +// Overloading < and > operators, because std::max and std::min use them. + +inline C10_HOST_DEVICE bool operator>(BFloat16& lhs, BFloat16& rhs) { + return float(lhs) > float(rhs); +} + +inline C10_HOST_DEVICE bool operator<(BFloat16& lhs, BFloat16& rhs) { + return float(lhs) < float(rhs); +} + +} // namespace c10 + +namespace std { + +template <> +class numeric_limits { + public: + static constexpr bool is_signed = true; + static constexpr bool is_specialized = true; + static constexpr bool is_integer = false; + static constexpr bool is_exact = false; + static constexpr bool has_infinity = true; + static constexpr bool has_quiet_NaN = true; + static constexpr bool has_signaling_NaN = true; + static constexpr auto has_denorm = numeric_limits::has_denorm; + static constexpr auto has_denorm_loss = + numeric_limits::has_denorm_loss; + static constexpr auto round_style = numeric_limits::round_style; + static constexpr bool is_iec559 = false; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + static constexpr int digits = 8; + static constexpr int digits10 = 2; + static constexpr int max_digits10 = 4; + static constexpr int radix = 2; + static constexpr int min_exponent = -125; + static constexpr int min_exponent10 = -37; + static constexpr int max_exponent = 128; + static constexpr int max_exponent10 = 38; + static constexpr auto traps = numeric_limits::traps; + static constexpr auto tinyness_before = + numeric_limits::tinyness_before; + + static constexpr c10::BFloat16 min() { + return c10::BFloat16(0x0080, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 lowest() { + return c10::BFloat16(0xFF7F, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 max() { + return c10::BFloat16(0x7F7F, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 epsilon() { + return c10::BFloat16(0x3C00, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 round_error() { + return c10::BFloat16(0x3F00, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 infinity() { + return c10::BFloat16(0x7F80, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 quiet_NaN() { + return c10::BFloat16(0x7FC0, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 signaling_NaN() { + return c10::BFloat16(0x7F80, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 denorm_min() { + return c10::BFloat16(0x0001, c10::BFloat16::from_bits()); + } +}; + +} // namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/voice_bridge/torch/include/c10/util/BFloat16-math.h b/voice_bridge/torch/include/c10/util/BFloat16-math.h new file mode 100644 index 0000000000000000000000000000000000000000..e990d553469cca0c35aac6ef5d40da1ff15ac8b2 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/BFloat16-math.h @@ -0,0 +1,184 @@ +#pragma once + +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion") +#endif + +namespace std { + +/// Used by vec256::map +inline c10::BFloat16 acos(c10::BFloat16 a) { + return std::acos(float(a)); +} +inline c10::BFloat16 asin(c10::BFloat16 a) { + return std::asin(float(a)); +} +inline c10::BFloat16 atan(c10::BFloat16 a) { + return std::atan(float(a)); +} +inline c10::BFloat16 erf(c10::BFloat16 a) { + return std::erf(float(a)); +} +inline c10::BFloat16 erfc(c10::BFloat16 a) { + return std::erfc(float(a)); +} +inline c10::BFloat16 exp(c10::BFloat16 a) { + return std::exp(float(a)); +} +inline c10::BFloat16 expm1(c10::BFloat16 a) { + return std::expm1(float(a)); +} +inline c10::BFloat16 log(c10::BFloat16 a) { + return std::log(float(a)); +} +inline c10::BFloat16 log10(c10::BFloat16 a) { + return std::log10(float(a)); +} +inline c10::BFloat16 log1p(c10::BFloat16 a) { + return std::log1p(float(a)); +} +inline c10::BFloat16 log2(c10::BFloat16 a) { + return std::log2(float(a)); +} +inline c10::BFloat16 ceil(c10::BFloat16 a) { + return std::ceil(float(a)); +} +inline c10::BFloat16 cos(c10::BFloat16 a) { + return std::cos(float(a)); +} +inline c10::BFloat16 floor(c10::BFloat16 a) { + return std::floor(float(a)); +} +inline c10::BFloat16 nearbyint(c10::BFloat16 a) { + return std::nearbyint(float(a)); +} +inline c10::BFloat16 sin(c10::BFloat16 a) { + return std::sin(float(a)); +} +inline c10::BFloat16 tan(c10::BFloat16 a) { + return std::tan(float(a)); +} +inline c10::BFloat16 sinh(c10::BFloat16 a) { + return std::sinh(float(a)); +} +inline c10::BFloat16 cosh(c10::BFloat16 a) { + return std::cosh(float(a)); +} +inline c10::BFloat16 tanh(c10::BFloat16 a) { + return std::tanh(float(a)); +} +inline c10::BFloat16 trunc(c10::BFloat16 a) { + return std::trunc(float(a)); +} +inline c10::BFloat16 lgamma(c10::BFloat16 a) { + return std::lgamma(float(a)); +} +inline c10::BFloat16 sqrt(c10::BFloat16 a) { + return std::sqrt(float(a)); +} +inline c10::BFloat16 rsqrt(c10::BFloat16 a) { + return 1.0 / std::sqrt(float(a)); +} +inline c10::BFloat16 abs(c10::BFloat16 a) { + return std::abs(float(a)); +} +#if defined(_MSC_VER) && defined(__CUDACC__) +inline c10::BFloat16 pow(c10::BFloat16 a, double b) { + return std::pow(float(a), float(b)); +} +#else +inline c10::BFloat16 pow(c10::BFloat16 a, double b) { + return std::pow(float(a), b); +} +#endif +inline c10::BFloat16 pow(c10::BFloat16 a, c10::BFloat16 b) { + return std::pow(float(a), float(b)); +} +inline c10::BFloat16 fmod(c10::BFloat16 a, c10::BFloat16 b) { + return std::fmod(float(a), float(b)); +} + +/* + The following function is inspired from the implementation in `musl` + Link to License: https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT + ---------------------------------------------------------------------- + Copyright Β© 2005-2020 Rich Felker, et al. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + ---------------------------------------------------------------------- + */ +C10_HOST_DEVICE inline c10::BFloat16 nextafter( + c10::BFloat16 from, + c10::BFloat16 to) { + // Reference: + // https://git.musl-libc.org/cgit/musl/tree/src/math/nextafter.c + using int_repr_t = uint16_t; + using float_t = c10::BFloat16; + constexpr uint8_t bits = 16; + union { + float_t f; + int_repr_t i; + } ufrom = {from}, uto = {to}; + + // get a mask to get the sign bit i.e. MSB + int_repr_t sign_mask = int_repr_t{1} << (bits - 1); + + // short-circuit: if either is NaN, return NaN + if (from != from || to != to) { + return from + to; + } + + // short-circuit: if they are exactly the same. + if (ufrom.i == uto.i) { + return from; + } + + // mask the sign-bit to zero i.e. positive + // equivalent to abs(x) + int_repr_t abs_from = ufrom.i & ~sign_mask; + int_repr_t abs_to = uto.i & ~sign_mask; + if (abs_from == 0) { + // if both are zero but with different sign, + // preserve the sign of `to`. + if (abs_to == 0) { + return to; + } + // smallest subnormal with sign of `to`. + ufrom.i = (uto.i & sign_mask) | int_repr_t{1}; + return ufrom.f; + } + + // if abs(from) > abs(to) or sign(from) != sign(to) + if (abs_from > abs_to || ((ufrom.i ^ uto.i) & sign_mask)) { + ufrom.i--; + } else { + ufrom.i++; + } + + return ufrom.f; +} + +} // namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/voice_bridge/torch/include/c10/util/BFloat16.h b/voice_bridge/torch/include/c10/util/BFloat16.h new file mode 100644 index 0000000000000000000000000000000000000000..1ada02bba1ce79f3d19b1a70b1cf68f8bef5a1b3 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/BFloat16.h @@ -0,0 +1,101 @@ +#pragma once + +// Defines the bloat16 type (brain floating-point). This representation uses +// 1 bit for the sign, 8 bits for the exponent and 7 bits for the mantissa. + +#include +#include +#include + +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 +#include +#endif + +namespace c10 { + +namespace detail { +inline C10_HOST_DEVICE float f32_from_bits(uint16_t src) { + float res = 0; + uint32_t tmp = src; + tmp <<= 16; + +#if defined(USE_ROCM) + float* tempRes; + + // We should be using memcpy in order to respect the strict aliasing rule + // but it fails in the HIP environment. + tempRes = reinterpret_cast(&tmp); + res = *tempRes; +#else + std::memcpy(&res, &tmp, sizeof(tmp)); +#endif + + return res; +} + +inline C10_HOST_DEVICE uint16_t bits_from_f32(float src) { + uint32_t res = 0; + +#if defined(USE_ROCM) + // We should be using memcpy in order to respect the strict aliasing rule + // but it fails in the HIP environment. + uint32_t* tempRes = reinterpret_cast(&src); + res = *tempRes; +#else + std::memcpy(&res, &src, sizeof(res)); +#endif + + return res >> 16; +} + +inline C10_HOST_DEVICE uint16_t round_to_nearest_even(float src) { +#if defined(USE_ROCM) + if (src != src) { +#elif defined(_MSC_VER) + if (isnan(src)) { +#else + if (std::isnan(src)) { +#endif + return UINT16_C(0x7FC0); + } else { + union { + uint32_t U32; + float F32; + }; + + F32 = src; + uint32_t rounding_bias = ((U32 >> 16) & 1) + UINT32_C(0x7FFF); + return static_cast((U32 + rounding_bias) >> 16); + } +} +} // namespace detail + +struct alignas(2) BFloat16 { + uint16_t x; + + // HIP wants __host__ __device__ tag, CUDA does not +#if defined(USE_ROCM) + C10_HOST_DEVICE BFloat16() = default; +#else + BFloat16() = default; +#endif + + struct from_bits_t {}; + static constexpr C10_HOST_DEVICE from_bits_t from_bits() { + return from_bits_t(); + } + + constexpr C10_HOST_DEVICE BFloat16(unsigned short bits, from_bits_t) + : x(bits){}; + inline C10_HOST_DEVICE BFloat16(float value); + inline C10_HOST_DEVICE operator float() const; + +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 + inline C10_HOST_DEVICE BFloat16(const __nv_bfloat16& value); + explicit inline C10_HOST_DEVICE operator __nv_bfloat16() const; +#endif +}; + +} // namespace c10 + +#include // IWYU pragma: keep diff --git a/voice_bridge/torch/include/c10/util/Backtrace.h b/voice_bridge/torch/include/c10/util/Backtrace.h new file mode 100644 index 0000000000000000000000000000000000000000..75691286d9019a778b0b6752d3fbde492a2ebdfb --- /dev/null +++ b/voice_bridge/torch/include/c10/util/Backtrace.h @@ -0,0 +1,17 @@ +#ifndef C10_UTIL_BACKTRACE_H_ +#define C10_UTIL_BACKTRACE_H_ + +#include +#include +#include + +#include + +namespace c10 { +C10_API std::string get_backtrace( + size_t frames_to_skip = 0, + size_t maximum_number_of_frames = 64, + bool skip_python_frames = true); +} // namespace c10 + +#endif // C10_UTIL_BACKTRACE_H_ diff --git a/voice_bridge/torch/include/c10/util/Bitset.h b/voice_bridge/torch/include/c10/util/Bitset.h new file mode 100644 index 0000000000000000000000000000000000000000..4143ae595e317df593e58d2271e625c057026845 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/Bitset.h @@ -0,0 +1,120 @@ +#pragma once + +#include +#include +#include +#if defined(_MSC_VER) +#include +#endif + +namespace c10 { +namespace utils { + +/** + * This is a simple bitset class with sizeof(long long int) bits. + * You can set bits, unset bits, query bits by index, + * and query for the first set bit. + * Before using this class, please also take a look at std::bitset, + * which has more functionality and is more generic. It is probably + * a better fit for your use case. The sole reason for c10::utils::bitset + * to exist is that std::bitset misses a find_first_set() method. + */ +struct bitset final { + private: +#if defined(_MSC_VER) + // MSVCs _BitScanForward64 expects int64_t + using bitset_type = int64_t; +#else + // POSIX ffsll expects long long int + using bitset_type = long long int; +#endif + public: + static constexpr size_t NUM_BITS() { + return 8 * sizeof(bitset_type); + } + + constexpr bitset() noexcept : bitset_(0) {} + constexpr bitset(const bitset&) noexcept = default; + constexpr bitset(bitset&&) noexcept = default; + // there is an issure for gcc 5.3.0 when define default function as constexpr + // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68754. + bitset& operator=(const bitset&) noexcept = default; + bitset& operator=(bitset&&) noexcept = default; + + constexpr void set(size_t index) noexcept { + bitset_ |= (static_cast(1) << index); + } + + constexpr void unset(size_t index) noexcept { + bitset_ &= ~(static_cast(1) << index); + } + + constexpr bool get(size_t index) const noexcept { + return bitset_ & (static_cast(1) << index); + } + + constexpr bool is_entirely_unset() const noexcept { + return 0 == bitset_; + } + + // Call the given functor with the index of each bit that is set + template + void for_each_set_bit(Func&& func) const { + bitset cur = *this; + size_t index = cur.find_first_set(); + while (0 != index) { + // -1 because find_first_set() is not one-indexed. + index -= 1; + func(index); + cur.unset(index); + index = cur.find_first_set(); + } + } + + private: + // Return the index of the first set bit. The returned index is one-indexed + // (i.e. if the very first bit is set, this function returns '1'), and a + // return of '0' means that there was no bit set. + size_t find_first_set() const { +#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) + unsigned long result; + bool has_bits_set = (0 != _BitScanForward64(&result, bitset_)); + if (!has_bits_set) { + return 0; + } + return result + 1; +#elif defined(_MSC_VER) && defined(_M_IX86) + unsigned long result; + if (static_cast(bitset_) != 0) { + bool has_bits_set = + (0 != _BitScanForward(&result, static_cast(bitset_))); + if (!has_bits_set) { + return 0; + } + return result + 1; + } else { + bool has_bits_set = + (0 != _BitScanForward(&result, static_cast(bitset_ >> 32))); + if (!has_bits_set) { + return 32; + } + return result + 33; + } +#else + return __builtin_ffsll(bitset_); +#endif + } + + friend bool operator==(bitset lhs, bitset rhs) noexcept { + return lhs.bitset_ == rhs.bitset_; + } + + bitset_type bitset_; +}; + +inline bool operator!=(bitset lhs, bitset rhs) noexcept { + return !(lhs == rhs); +} + +} // namespace utils +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/util/C++17.h b/voice_bridge/torch/include/c10/util/C++17.h new file mode 100644 index 0000000000000000000000000000000000000000..c51275721e5849adfa6ac9d424f2f3d31d8f45c4 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/C++17.h @@ -0,0 +1,455 @@ +#pragma once +#ifndef C10_UTIL_CPP17_H_ +#define C10_UTIL_CPP17_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#if !defined(__clang__) && !defined(_MSC_VER) && defined(__GNUC__) && \ + __GNUC__ < 5 +#error \ + "You're trying to build PyTorch with a too old version of GCC. We need GCC 5 or later." +#endif + +#if defined(__clang__) && __clang_major__ < 4 +#error \ + "You're trying to build PyTorch with a too old version of Clang. We need Clang 4 or later." +#endif + +#if (defined(_MSC_VER) && (!defined(_MSVC_LANG) || _MSVC_LANG < 201402L)) || \ + (!defined(_MSC_VER) && __cplusplus < 201402L) +#error You need C++14 to compile PyTorch +#endif + +#if defined(_WIN32) && (defined(min) || defined(max)) +#error Macro clash with min and max -- define NOMINMAX when compiling your program on Windows +#endif + +/* + * This header adds some polyfills with C++17 functionality + */ + +namespace c10 { + +// in c++17 std::result_of has been superceded by std::invoke_result. Since +// c++20, std::result_of is removed. +template +#if defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L +using invoke_result = typename std::invoke_result; +#else +using invoke_result = typename std::result_of; +#endif + +template +using invoke_result_t = typename invoke_result::type; + +namespace guts { + +template +typename std::enable_if< + !std::is_array::value && !std::is_array::value && + std::is_base_of::value, + std::unique_ptr>::type +make_unique_base(Args&&... args) { + return std::unique_ptr(new Child(std::forward(args)...)); +} + +#if defined(__cpp_lib_logical_traits) && !(defined(_MSC_VER) && _MSC_VER < 1920) + +template +using conjunction = std::conjunction; +template +using disjunction = std::disjunction; +template +using bool_constant = std::bool_constant; +template +using negation = std::negation; + +#else + +// Implementation taken from http://en.cppreference.com/w/cpp/types/conjunction +template +struct conjunction : std::true_type {}; +template +struct conjunction : B1 {}; +template +struct conjunction + : std::conditional_t, B1> {}; + +// Implementation taken from http://en.cppreference.com/w/cpp/types/disjunction +template +struct disjunction : std::false_type {}; +template +struct disjunction : B1 {}; +template +struct disjunction + : std::conditional_t> {}; + +// Implementation taken from +// http://en.cppreference.com/w/cpp/types/integral_constant +template +using bool_constant = std::integral_constant; + +// Implementation taken from http://en.cppreference.com/w/cpp/types/negation +template +struct negation : bool_constant {}; + +#endif + +#ifdef __cpp_lib_void_t + +template +using void_t = std::void_t; + +#else + +// Implementation taken from http://en.cppreference.com/w/cpp/types/void_t +// (it takes CWG1558 into account and also works for older compilers) +template +struct make_void { + typedef void type; +}; +template +using void_t = typename make_void::type; + +#endif + +#if defined(USE_ROCM) +// rocm doesn't like the C10_HOST_DEVICE +#define CUDA_HOST_DEVICE +#else +#define CUDA_HOST_DEVICE C10_HOST_DEVICE +#endif + +#ifdef __cpp_lib_apply + +template +CUDA_HOST_DEVICE inline constexpr decltype(auto) apply(F&& f, Tuple&& t) { + return std::apply(std::forward(f), std::forward(t)); +} + +#else + +// Implementation from http://en.cppreference.com/w/cpp/utility/apply (but +// modified) +// TODO This is an incomplete implementation of std::apply, not working for +// member functions. +namespace detail { +template +#if defined(_MSC_VER) +// MSVC has a problem with the decltype() return type, but it also doesn't need +// it +C10_HOST_DEVICE constexpr auto apply_impl( + F&& f, + Tuple&& t, + std::index_sequence) +#else +// GCC/Clang need the decltype() return type +CUDA_HOST_DEVICE constexpr decltype(auto) apply_impl( + F&& f, + Tuple&& t, + std::index_sequence) +#endif +{ + return std::forward(f)(std::get(std::forward(t))...); +} +} // namespace detail + +template +CUDA_HOST_DEVICE constexpr decltype(auto) apply(F&& f, Tuple&& t) { + return detail::apply_impl( + std::forward(f), + std::forward(t), + std::make_index_sequence< + std::tuple_size>::value>{}); +} + +#endif + +#undef CUDA_HOST_DEVICE + +template +typename std::enable_if< + std::is_member_pointer::type>::value, + typename c10::invoke_result_t>::type +invoke(Functor&& f, Args&&... args) { + return std::mem_fn(std::forward(f))(std::forward(args)...); +} + +template +typename std::enable_if< + !std::is_member_pointer::type>::value, + typename c10::invoke_result_t>::type +invoke(Functor&& f, Args&&... args) { + return std::forward(f)(std::forward(args)...); +} + +namespace detail { +struct _identity final { + template + using type_identity = T; + + template + decltype(auto) operator()(T&& arg) { + return std::forward(arg); + } +}; + +template +struct function_takes_identity_argument : std::false_type {}; +#if defined(_MSC_VER) +// For some weird reason, MSVC shows a compiler error when using guts::void_t +// instead of std::void_t. But we're only building on MSVC versions that have +// std::void_t, so let's just use that one. +template +struct function_takes_identity_argument< + Func, + std::void_t()(_identity()))>> : std::true_type { +}; +#else +template +struct function_takes_identity_argument< + Func, + void_t()(_identity()))>> : std::true_type {}; +#endif + +template +struct _if_constexpr; + +template <> +struct _if_constexpr final { + template < + class ThenCallback, + class ElseCallback, + std::enable_if_t< + function_takes_identity_argument::value, + void*> = nullptr> + static decltype(auto) call( + ThenCallback&& thenCallback, + ElseCallback&& /* elseCallback */) { + // The _identity instance passed in can be used to delay evaluation of an + // expression, because the compiler can't know that it's just the identity + // we're passing in. + return thenCallback(_identity()); + } + + template < + class ThenCallback, + class ElseCallback, + std::enable_if_t< + !function_takes_identity_argument::value, + void*> = nullptr> + static decltype(auto) call( + ThenCallback&& thenCallback, + ElseCallback&& /* elseCallback */) { + return thenCallback(); + } +}; + +template <> +struct _if_constexpr final { + template < + class ThenCallback, + class ElseCallback, + std::enable_if_t< + function_takes_identity_argument::value, + void*> = nullptr> + static decltype(auto) call( + ThenCallback&& /* thenCallback */, + ElseCallback&& elseCallback) { + // The _identity instance passed in can be used to delay evaluation of an + // expression, because the compiler can't know that it's just the identity + // we're passing in. + return elseCallback(_identity()); + } + + template < + class ThenCallback, + class ElseCallback, + std::enable_if_t< + !function_takes_identity_argument::value, + void*> = nullptr> + static decltype(auto) call( + ThenCallback&& /* thenCallback */, + ElseCallback&& elseCallback) { + return elseCallback(); + } +}; +} // namespace detail + +/* + * Get something like C++17 if constexpr in C++14. + * + * Example 1: simple constexpr if/then/else + * template int increment_absolute_value() { + * int result = arg; + * if_constexpr<(arg > 0)>( + * [&] { ++result; } // then-case + * [&] { --result; } // else-case + * ); + * return result; + * } + * + * Example 2: without else case (i.e. conditionally prune code from assembly) + * template int decrement_if_positive() { + * int result = arg; + * if_constexpr<(arg > 0)>( + * // This decrement operation is only present in the assembly for + * // template instances with arg > 0. + * [&] { --result; } + * ); + * return result; + * } + * + * Example 3: branch based on type (i.e. replacement for SFINAE) + * struct MyClass1 {int value;}; + * struct MyClass2 {int val}; + * template + * int func(T t) { + * return if_constexpr::value>( + * [&](auto _) { return _(t).value; }, // this code is invalid for T == + * MyClass2, so a regular non-constexpr if statement wouldn't compile + * [&](auto _) { return _(t).val; } // this code is invalid for T == + * MyClass1 + * ); + * } + * + * Note: The _ argument passed in Example 3 is the identity function, i.e. it + * does nothing. It is used to force the compiler to delay type checking, + * because the compiler doesn't know what kind of _ is passed in. Without it, + * the compiler would fail when you try to access t.value but the member doesn't + * exist. + * + * Note: In Example 3, both branches return int, so func() returns int. This is + * not necessary. If func() had a return type of "auto", then both branches + * could return different types, say func() could return int and + * func() could return string. + * + * Note: if_constexpr is *eager* w.r.t. template expansion - meaning + * this polyfill does not behave like a true "if statement at compilation time". + * The `_` trick above only defers typechecking, which happens after + * templates have been expanded. (Of course this is all that's necessary for + * many use cases). + */ +template +decltype(auto) if_constexpr( + ThenCallback&& thenCallback, + ElseCallback&& elseCallback) { +#if defined(__cpp_if_constexpr) + // If we have C++17, just use it's "if constexpr" feature instead of wrapping + // it. This will give us better error messages. + if constexpr (Condition) { + if constexpr (detail::function_takes_identity_argument< + ThenCallback>::value) { + // Note that we use static_cast(t) instead of std::forward (or + // ::std::forward) because using the latter produces some compilation + // errors about ambiguous `std` on MSVC when using C++17. This static_cast + // is just what std::forward is doing under the hood, and is equivalent. + return static_cast(thenCallback)(detail::_identity()); + } else { + return static_cast(thenCallback)(); + } + } else { + if constexpr (detail::function_takes_identity_argument< + ElseCallback>::value) { + return static_cast(elseCallback)(detail::_identity()); + } else { + return static_cast(elseCallback)(); + } + } +#else + // C++14 implementation of if constexpr + return detail::_if_constexpr::call( + static_cast(thenCallback), + static_cast(elseCallback)); +#endif +} + +template +decltype(auto) if_constexpr(ThenCallback&& thenCallback) { +#if defined(__cpp_if_constexpr) + // If we have C++17, just use it's "if constexpr" feature instead of wrapping + // it. This will give us better error messages. + if constexpr (Condition) { + if constexpr (detail::function_takes_identity_argument< + ThenCallback>::value) { + // Note that we use static_cast(t) instead of std::forward (or + // ::std::forward) because using the latter produces some compilation + // errors about ambiguous `std` on MSVC when using C++17. This static_cast + // is just what std::forward is doing under the hood, and is equivalent. + return static_cast(thenCallback)(detail::_identity()); + } else { + return static_cast(thenCallback)(); + } + } +#else + // C++14 implementation of if constexpr + return if_constexpr( + static_cast(thenCallback), [](auto) {}); +#endif +} + +// GCC 4.8 doesn't define std::to_string, even though that's in C++11. Let's +// define it. +namespace detail { +class DummyClassForToString final {}; +} // namespace detail +} // namespace guts +} // namespace c10 +namespace std { +// We use SFINAE to detect if std::to_string exists for a type, but that only +// works if the function name is defined. So let's define a std::to_string for a +// dummy type. If you're getting an error here saying that this overload doesn't +// match your std::to_string() call, then you're calling std::to_string() but +// should be calling c10::guts::to_string(). +inline std::string to_string(c10::guts::detail::DummyClassForToString) { + return ""; +} + +} // namespace std +namespace c10 { +namespace guts { +namespace detail { + +template +struct to_string_ final { + static std::string call(T value) { + std::ostringstream str; + str << value; + return str.str(); + } +}; +// If a std::to_string exists, use that instead +template +struct to_string_()))>> + final { + static std::string call(T value) { + return std::to_string(value); + } +}; +} // namespace detail +template +inline std::string to_string(T value) { + return detail::to_string_::call(value); +} + +template +constexpr const T& min(const T& a, const T& b) { + return (b < a) ? b : a; +} + +template +constexpr const T& max(const T& a, const T& b) { + return (a < b) ? b : a; +} + +} // namespace guts +} // namespace c10 + +#endif // C10_UTIL_CPP17_H_ diff --git a/voice_bridge/torch/include/c10/util/CallOnce.h b/voice_bridge/torch/include/c10/util/CallOnce.h new file mode 100644 index 0000000000000000000000000000000000000000..a31600ef2e7c032526aeef8f35fa9785fe5d4d3a --- /dev/null +++ b/voice_bridge/torch/include/c10/util/CallOnce.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace c10 { + +// custom c10 call_once implementation to avoid the deadlock in std::call_once. +// The implementation here is a simplified version from folly and likely much +// much higher memory footprint. +template +inline void call_once(Flag& flag, F&& f, Args&&... args) { + if (C10_LIKELY(flag.test_once())) { + return; + } + flag.call_once_slow(std::forward(f), std::forward(args)...); +} + +class once_flag { + public: +#ifndef _WIN32 + // running into build error on MSVC. Can't seem to get a repro locally so I'm + // just avoiding constexpr + // + // C:/actions-runner/_work/pytorch/pytorch\c10/util/CallOnce.h(26): error: + // defaulted default constructor cannot be constexpr because the + // corresponding implicitly declared default constructor would not be + // constexpr 1 error detected in the compilation of + // "C:/actions-runner/_work/pytorch/pytorch/aten/src/ATen/cuda/cub.cu". + constexpr +#endif + once_flag() noexcept = default; + once_flag(const once_flag&) = delete; + once_flag& operator=(const once_flag&) = delete; + + private: + template + friend void call_once(Flag& flag, F&& f, Args&&... args); + + template + void call_once_slow(F&& f, Args&&... args) { + std::lock_guard guard(mutex_); + if (init_.load(std::memory_order_relaxed)) { + return; + } + c10::guts::invoke(f, std::forward(args)...); + init_.store(true, std::memory_order_release); + } + + bool test_once() { + return init_.load(std::memory_order_acquire); + } + + void reset_once() { + init_.store(false, std::memory_order_release); + } + + private: + std::mutex mutex_; + std::atomic init_{false}; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/util/ConstexprCrc.h b/voice_bridge/torch/include/c10/util/ConstexprCrc.h new file mode 100644 index 0000000000000000000000000000000000000000..5e36e464a43e355a4989cc7d40fe422c7bcd2860 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/ConstexprCrc.h @@ -0,0 +1,131 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { +namespace util { + +namespace detail { +constexpr uint64_t crc64_table[] = { + 0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2, + 0x8f689158505e9b8b, 0xc038e5739841b68f, 0xbae095bba8743ff6, + 0x358804e3f82aa47d, 0x4f50742bc81f2d04, 0xab28ecb46814fe75, + 0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe, + 0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08, + 0xe478989fa00bd371, 0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8, + 0x88b81eabe8d57d73, 0xf2606e63d8e0f40a, 0xbd301a4810ffd90e, + 0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285, + 0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306, + 0x594882d7b0f40a7f, 0x1618f6fc78eb277b, 0x6cc0863448deae02, + 0xe3a8176c18803589, 0x997067a428b5bcf0, 0xfa11fe77117cdf02, + 0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489, + 0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f, + 0xb5418a5cd963f206, 0x513912c379682177, 0x2be1620b495da80e, + 0xa489f35319033385, 0xde51839b2936bafc, 0x9101f7b0e12997f8, + 0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73, + 0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271, + 0x08719014c99c2b08, 0x4721e43f0183060c, 0x3df994f731b68f75, + 0xb29105af61e814fe, 0xc849756751dd9d87, 0x2c31edf8f1d64ef6, + 0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d, + 0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b, + 0x636199d339c963f2, 0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416, + 0x2aca3b2d1a053f9d, 0x50124be52a30b6e4, 0x1f423fcee22f9be0, + 0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b, + 0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8, + 0xfb3aa75142244891, 0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec, + 0x41da32eaea507767, 0x3b024222da65fe1e, 0xa2722586f2d042ee, + 0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965, + 0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693, + 0xed2251ad3acf6fea, 0x095ac9329ac4bc9b, 0x7382b9faaaf135e2, + 0xfcea28a2faafae69, 0x8632586aca9a2710, 0xc9622c4102850a14, + 0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f, + 0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f, + 0xaa03b5923b4c69e6, 0xe553c1b9f35344e2, 0x9f8bb171c366cd9b, + 0x10e3202993385610, 0x6a3b50e1a30ddf69, 0x8e43c87e03060c18, + 0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793, + 0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865, + 0xc113bc55cb19211c, 0x5863dbf1e3ac9dec, 0x22bbab39d3991495, + 0xadd33a6183c78f1e, 0xd70b4aa9b3f20667, 0x985b3e827bed2b63, + 0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8, + 0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b, + 0x7c23a61ddbe6f812, 0x3373d23613f9d516, 0x49aba2fe23cc5c6f, + 0xc6c333a67392c7e4, 0xbc1b436e43a74e9d, 0x95ac9329ac4bc9b5, + 0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e, + 0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8, + 0xdafce7026454e4b1, 0x3e847f9dc45f37c0, 0x445c0f55f46abeb9, + 0xcb349e0da4342532, 0xb1eceec59401ac4b, 0xfebc9aee5c1e814f, + 0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4, + 0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6, + 0x67ccfd4a74ab3dbf, 0x289c8961bcb410bb, 0x5244f9a98c8199c2, + 0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30, 0x438c80a64ce15841, + 0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca, + 0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c, + 0x0cdcf48d84fe7545, 0x6fbd6d5ebd3716b7, 0x15651d968d029fce, + 0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c, 0xaf85882d2576a038, + 0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3, + 0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30, + 0x4bfd10b2857d7349, 0x04ad64994d625e4d, 0x7e7514517d57d734, + 0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6, 0x12b5926535897936, + 0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd, + 0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b, + 0x5de5e64efd965432, 0xb99d7ed15d9d8743, 0xc3450e196da80e3a, + 0x4c2d9f413df695b1, 0x36f5ef890dc31cc8, 0x79a59ba2c5dc31cc, + 0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47, + 0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628, + 0xc5bed8cc867b7f51, 0x8aeeace74e645255, 0xf036dc2f7e51db2c, + 0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de, 0xe1fea520be311aaf, + 0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124, + 0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2, + 0xaeaed10b762e37ab, 0x37deb6af5e9b8b5b, 0x4d06c6676eae0222, + 0xc26e573f3ef099a9, 0xb8b627f70ec510d0, 0xf7e653dcc6da3dd4, + 0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f, + 0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc, + 0x139ecb4366d1eea5, 0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8, + 0xa97e5ef8cea5d153, 0xd3a62e30fe90582a, 0xb0c7b7e3c7593bd8, + 0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053, + 0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5, + 0xff97c3c80f4616dc, 0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4, + 0xee5fbac7cf26d75f, 0x9487ca0fff135e26, 0xdbd7be24370c7322, + 0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9, + 0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab, + 0x42a7d9801fb9cfd2, 0x0df7adabd7a6e2d6, 0x772fdd63e7936baf, + 0xf8474c3bb7cdf024, 0x829f3cf387f8795d, 0x66e7a46c27f3aa2c, + 0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7, + 0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51, + 0x29b7d047efec8728, +}; + +inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA uint64_t +crc64impl(uint64_t accumulator, const char* data, size_t size) { + for (size_t i = 0; i < size; ++i) { + accumulator = + crc64_table[(accumulator ^ data[i]) & 0xFF] ^ (accumulator >> 8); + } + return accumulator; +} +} // namespace detail + +struct crc64_t final : IdWrapper { + constexpr crc64_t(uint64_t checksum) : IdWrapper(checksum) {} + constexpr uint64_t checksum() const { + return this->underlyingId(); + } +}; + +// CRC64 with Jones coefficients and an init value of 0. +inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA crc64_t +crc64(const char* str, size_t size) { + return crc64_t{detail::crc64impl(0, str, size)}; +} + +inline C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA crc64_t crc64(c10::string_view str) { + return crc64(str.data(), str.size()); +} +} // namespace util +} // namespace c10 + +// Allow usage of crc64_t in std::unordered_set +C10_DEFINE_HASH_FOR_IDWRAPPER(c10::util::crc64_t); diff --git a/voice_bridge/torch/include/c10/util/DeadlockDetection.h b/voice_bridge/torch/include/c10/util/DeadlockDetection.h new file mode 100644 index 0000000000000000000000000000000000000000..da177995ad74e9dd5841e0deb1fcbf05f8408358 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/DeadlockDetection.h @@ -0,0 +1,50 @@ +#pragma once + +#include + +/// This file provides some simple utilities for detecting common deadlocks in +/// PyTorch. For now, we focus exclusively on detecting Python GIL deadlocks, +/// as the GIL is a wide ranging lock that is taken out in many situations. +/// The basic strategy is before performing an operation that may block, you +/// can use TORCH_ASSERT_NO_GIL_WITHOUT_PYTHON_DEP() to assert that the GIL is +/// not held. This macro is to be used in contexts where no static dependency +/// on Python is available (we will handle indirecting a virtual call for you). +/// +/// If the GIL is held by a torchdeploy interpreter, we always report false. +/// If you are in a context where Python bindings are available, it's better +/// to directly assert on PyGILState_Check (as it avoids a vcall and also +/// works correctly with torchdeploy.) + +namespace c10 { + +#define TORCH_ASSERT_NO_GIL_WITHOUT_PYTHON_DEP() \ + TORCH_INTERNAL_ASSERT( \ + !c10::impl::check_python_gil(), \ + "Holding GIL before a blocking operation! Please release the GIL before blocking, or see https://github.com/pytorch/pytorch/issues/56297 for how to release the GIL for destructors of objects") + +namespace impl { + +C10_API bool check_python_gil(); + +struct C10_API PythonGILHooks { + virtual ~PythonGILHooks() = default; + // Returns true if we hold the GIL. If not linked against Python we + // always return false. + virtual bool check_python_gil() const = 0; +}; + +C10_API void SetPythonGILHooks(PythonGILHooks* factory); + +// DO NOT call this registerer from a torch deploy instance! You will clobber +// other registrations +struct C10_API PythonGILHooksRegisterer { + explicit PythonGILHooksRegisterer(PythonGILHooks* factory) { + SetPythonGILHooks(factory); + } + ~PythonGILHooksRegisterer() { + SetPythonGILHooks(nullptr); + } +}; + +} // namespace impl +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/util/Deprecated.h b/voice_bridge/torch/include/c10/util/Deprecated.h new file mode 100644 index 0000000000000000000000000000000000000000..88440a0242eb4e9e87433278006863fd38c5450d --- /dev/null +++ b/voice_bridge/torch/include/c10/util/Deprecated.h @@ -0,0 +1,102 @@ +#pragma once + +/** + * This file provides portable macros for marking declarations + * as deprecated. You should generally use C10_DEPRECATED, + * except when marking 'using' declarations as deprecated, + * in which case you should use C10_DEFINE_DEPRECATED_USING + * (due to portability concerns). + */ + +// Sample usage: +// +// C10_DEPRECATED void bad_func(); +// struct C10_DEPRECATED BadStruct { +// ... +// }; + +// NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses +// the "__declspec(deprecated)" implementation and not the C++14 +// "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on +// MSVC, but ran into issues with some older MSVC versions. +#if (defined(__cplusplus) && __cplusplus >= 201402L) +#define C10_DEPRECATED [[deprecated]] +#define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]] +#elif defined(__GNUC__) +#define C10_DEPRECATED __attribute__((deprecated)) +// TODO Is there some way to implement this? +#define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated)) + +#elif defined(_MSC_VER) +#define C10_DEPRECATED __declspec(deprecated) +#define C10_DEPRECATED_MESSAGE(message) __declspec(deprecated(message)) +#else +#warning "You need to implement C10_DEPRECATED for this compiler" +#define C10_DEPRECATED +#endif + +// Sample usage: +// +// C10_DEFINE_DEPRECATED_USING(BadType, int) +// +// which is the portable version of +// +// using BadType [[deprecated]] = int; + +// technically [[deprecated]] syntax is from c++14 standard, but it works in +// many compilers. +#if defined(__has_cpp_attribute) +#if __has_cpp_attribute(deprecated) && !defined(__CUDACC__) +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName [[deprecated]] = TypeThingy; +#endif +#endif + +#if defined(_MSC_VER) +#if defined(__CUDACC__) +// neither [[deprecated]] nor __declspec(deprecated) work on nvcc on Windows; +// you get the error: +// +// error: attribute does not apply to any entity +// +// So we just turn the macro off in this case. +#if defined(C10_DEFINE_DEPRECATED_USING) +#undef C10_DEFINE_DEPRECATED_USING +#endif +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName = TypeThingy; +#else +// [[deprecated]] does work in windows without nvcc, though msc doesn't support +// `__has_cpp_attribute` when c++14 is supported, otherwise +// __declspec(deprecated) is used as the alternative. +#ifndef C10_DEFINE_DEPRECATED_USING +#if defined(_MSVC_LANG) && _MSVC_LANG >= 201402L +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName [[deprecated]] = TypeThingy; +#else +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName = __declspec(deprecated) TypeThingy; +#endif +#endif +#endif +#endif + +#if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__) +// nvcc has a bug where it doesn't understand __attribute__((deprecated)) +// declarations even when the host compiler supports it. We'll only use this gcc +// attribute when not cuda, and when using a GCC compiler that doesn't support +// the c++14 syntax we checked for above (available in __GNUC__ >= 5) +#if !defined(__CUDACC__) +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName __attribute__((deprecated)) = TypeThingy; +#else +// using cuda + gcc < 5, neither deprecated syntax is available so turning off. +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName = TypeThingy; +#endif +#endif + +#if !defined(C10_DEFINE_DEPRECATED_USING) +#warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler" +#define C10_DEFINE_DEPRECATED_USING +#endif diff --git a/voice_bridge/torch/include/c10/util/DimVector.h b/voice_bridge/torch/include/c10/util/DimVector.h new file mode 100644 index 0000000000000000000000000000000000000000..7d1ff1962c447ea47198aa84d738620d02497a53 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/DimVector.h @@ -0,0 +1,16 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { + +constexpr size_t kDimVectorStaticSize = C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE; + +/// A container for sizes or strides +using DimVector = SmallVector; +using SymDimVector = SmallVector; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/util/Exception.h b/voice_bridge/torch/include/c10/util/Exception.h new file mode 100644 index 0000000000000000000000000000000000000000..a869038ea444f43a493c463f32b7b2acc931f199 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/Exception.h @@ -0,0 +1,644 @@ +#ifndef C10_UTIL_EXCEPTION_H_ +#define C10_UTIL_EXCEPTION_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) && _MSC_VER <= 1900 +#define __func__ __FUNCTION__ +#endif + +namespace c10 { + +/// The primary ATen error class. +/// Provides a complete error message with source location information via +/// `what()`, and a more concise message via `what_without_backtrace()`. +/// Don't throw this directly; use TORCH_CHECK/TORCH_INTERNAL_ASSERT instead. +/// +/// NB: c10::Error is handled specially by the default torch to suppress the +/// backtrace, see torch/csrc/Exceptions.h +class C10_API Error : public std::exception { + // The actual error message. + std::string msg_; + + // Context for the message (in order of decreasing specificity). Context will + // be automatically formatted appropriately, so it is not necessary to add + // extra leading/trailing newlines to strings inside this vector + std::vector context_; + + // The C++ backtrace at the point when this exception was raised. This + // may be empty if there is no valid backtrace. (We don't use optional + // here to reduce the dependencies this file has.) + std::string backtrace_; + + // These two are derived fields from msg_stack_ and backtrace_, but we need + // fields for the strings so that we can return a const char* (as the + // signature of std::exception requires). Currently, the invariant + // is that these fields are ALWAYS populated consistently with respect + // to msg_stack_ and backtrace_. + std::string what_; + std::string what_without_backtrace_; + + // This is a little debugging trick: you can stash a relevant pointer + // in caller, and then when you catch the exception, you can compare + // against pointers you have on hand to get more information about + // where the exception came from. In Caffe2, this is used to figure + // out which operator raised an exception. + const void* caller_; + + public: + // PyTorch-style Error constructor. NB: the implementation of this + // is actually in Logging.cpp + Error(SourceLocation source_location, std::string msg); + + // Caffe2-style error message + Error( + const char* file, + const uint32_t line, + const char* condition, + const std::string& msg, + const std::string& backtrace, + const void* caller = nullptr); + + // Base constructor + Error(std::string msg, std::string backtrace, const void* caller = nullptr); + + // Add some new context to the message stack. The last added context + // will be formatted at the end of the context list upon printing. + // WARNING: This method is O(n) in the size of the stack, so don't go + // wild adding a ridiculous amount of context to error messages. + void add_context(std::string msg); + + const std::string& msg() const { + return msg_; + } + + const std::vector& context() const { + return context_; + } + + const std::string& backtrace() const { + return backtrace_; + } + + /// Returns the complete error message, including the source location. + /// The returned pointer is invalidated if you call add_context() on + /// this object. + const char* what() const noexcept override { + return what_.c_str(); + } + + const void* caller() const noexcept { + return caller_; + } + + /// Returns only the error message string, without source location. + /// The returned pointer is invalidated if you call add_context() on + /// this object. + const char* what_without_backtrace() const noexcept { + return what_without_backtrace_.c_str(); + } + + private: + void refresh_what(); + std::string compute_what(bool include_backtrace) const; +}; + +class C10_API WarningHandler { + public: + virtual ~WarningHandler() = default; + /// The default warning handler. Prints the message to stderr. + virtual void process( + const SourceLocation& source_location, + const std::string& msg, + const bool verbatim); +}; + +namespace Warning { + +// Note: [Verbatim Warnings] +// Warnings originating in C++ code can appear out-of-place to Python users: +// a user runs a line in Python, but the warning references a line in C++. +// Some parts of PyTorch, like the JIT, are cognizant of this mismatch +// and take care to map warnings back to the user's program, but most +// of PyTorch simply throws a context-free warning. To allow warning +// handlers to add context where appropriate, warn takes the +// "verbatim" flag. When this is false a warning handler might append +// the C++ warning to a Python warning message that relates the warning +// back to the user's program. Callers who have already accounted for +// context in their warnings should set verbatim to true so their warnings +// appear without modification. + +/// Issue a warning with a given message. Dispatched to the current +/// warning handler. +C10_API void warn( + const SourceLocation& source_location, + const std::string& msg, + bool verbatim); +C10_API void warn( + SourceLocation source_location, + const char* msg, + bool verbatim); +C10_API void warn( + SourceLocation source_location, + ::c10::detail::CompileTimeEmptyString msg, + bool verbatim); +/// Sets the global warning handler. This is not thread-safe, so it should +/// generally be called once during initialization or while holding the GIL +/// for programs that use python. +/// User is responsible for keeping the WarningHandler alive until +/// it is not needed. +C10_API void set_warning_handler(WarningHandler* handler) noexcept(true); +/// Gets the global warning handler. +C10_API WarningHandler* get_warning_handler() noexcept(true); + +class C10_API WarningHandlerGuard { + WarningHandler* prev_handler_; + + public: + WarningHandlerGuard(WarningHandler* new_handler) + : prev_handler_(c10::Warning::get_warning_handler()) { + c10::Warning::set_warning_handler(new_handler); + } + ~WarningHandlerGuard() { + c10::Warning::set_warning_handler(prev_handler_); + } +}; + +/// The TORCH_WARN_ONCE macro is difficult to test for. Use +/// setWarnAlways(true) to turn it into TORCH_WARN, which can be +/// tested for more easily. +C10_API void set_warnAlways(bool) noexcept(true); +C10_API bool get_warnAlways(void) noexcept(true); + +// A RAII guard that sets warn_always (not thread-local) on +// construction, and sets it back to the original value upon destruction. +struct C10_API WarnAlways { + public: + explicit WarnAlways(bool setting = true); + ~WarnAlways(); + + private: + bool prev_setting; +}; + +} // namespace Warning + +// Used in ATen for out-of-bound indices that can reasonably only be detected +// lazily inside a kernel (See: advanced indexing). These turn into +// IndexError when they cross to Python. +class C10_API IndexError : public Error { + using Error::Error; +}; + +// Used in ATen for invalid values. These turn into +// ValueError when they cross to Python. +class C10_API ValueError : public Error { + using Error::Error; +}; + +// Used in ATen for invalid types. These turn into +// TypeError when they cross to Python. +class C10_API TypeError : public Error { + using Error::Error; +}; + +// Used in ATen for functionality that is not implemented. These turn into +// NotImplementedError when they cross to Python. +class C10_API NotImplementedError : public Error { + using Error::Error; +}; + +// Used in ATen for non finite indices. These turn into +// ExitException when they cross to Python. +class C10_API EnforceFiniteError : public Error { + using Error::Error; +}; + +// Used in Onnxifi backend lowering. These turn into +// ExitException when they cross to Python. +class C10_API OnnxfiBackendSystemError : public Error { + using Error::Error; +}; + +// Used for numerical errors from the linalg module. These +// turn into LinAlgError when they cross into Python. +class C10_API LinAlgError : public Error { + using Error::Error; +}; + +class C10_API OutOfMemoryError : public Error { + using Error::Error; +}; + +// A utility function to return an exception std::string by prepending its +// exception type before its what() content +C10_API std::string GetExceptionString(const std::exception& e); + +} // namespace c10 + +// Private helper macro for implementing TORCH_INTERNAL_ASSERT and TORCH_CHECK +// +// Note: In the debug build With MSVC, __LINE__ might be of long type (a.k.a +// int32_t), which is different from the definition of `SourceLocation` that +// requires unsigned int (a.k.a uint32_t) and may cause a compile error with the +// message: error C2397: conversion from 'long' to 'uint32_t' requires a +// narrowing conversion Here the static cast is used to pass the build. if this +// is used inside a lambda the __func__ macro expands to operator(), which isn't +// very useful, but hard to fix in a macro so suppressing the warning. +#define C10_THROW_ERROR(err_type, msg) \ + throw ::c10::err_type( \ + {__func__, __FILE__, static_cast(__LINE__)}, msg) + +// Private helper macro for workaround MSVC misexpansion of nested macro +// invocations involving __VA_ARGS__. See +// https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly +#define C10_EXPAND_MSVC_WORKAROUND(x) x + +// On nvcc, C10_UNLIKELY thwarts missing return statement analysis. In cases +// where the unlikely expression may be a constant, use this macro to ensure +// return statement analysis keeps working (at the cost of not getting the +// likely/unlikely annotation on nvcc). +// https://github.com/pytorch/pytorch/issues/21418 +// +// Currently, this is only used in the error reporting macros below. If you +// want to use it more generally, move me to Macros.h +// +// TODO: Brian Vaughan observed that we might be able to get this to work on +// nvcc by writing some sort of C++ overload that distinguishes constexpr inputs +// from non-constexpr. Since there isn't any evidence that losing C10_UNLIKELY +// in nvcc is causing us perf problems, this is not yet implemented, but this +// might be an interesting piece of C++ code for an intrepid bootcamper to +// write. +#if defined(__CUDACC__) +#define C10_UNLIKELY_OR_CONST(e) e +#else +#define C10_UNLIKELY_OR_CONST(e) C10_UNLIKELY(e) +#endif + +// ---------------------------------------------------------------------------- +// Error reporting macros +// ---------------------------------------------------------------------------- + +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_RETHROW(e, ...) throw +#else +#define TORCH_RETHROW(e, ...) \ + do { \ + e.add_context(::c10::str(__VA_ARGS__)); \ + throw; \ + } while (false) +#endif + +// A utility macro to provide assert()-like functionality; that is, enforcement +// of internal invariants in code. It supports an arbitrary number of extra +// arguments (evaluated only on failure), which will be printed in the assert +// failure message using operator<< (this is useful to print some variables +// which may be useful for debugging.) +// +// Usage: +// TORCH_INTERNAL_ASSERT(should_be_true); +// TORCH_INTERNAL_ASSERT(x == 0, "x = ", x); +// +// Assuming no bugs in PyTorch, the conditions tested by this macro should +// always be true; e.g., it should be possible to disable all of these +// conditions without changing observable user behavior. If you would like to +// do error reporting for user input, please use TORCH_CHECK instead. +// +// NOTE: It is SAFE to use this macro in production code; on failure, this +// simply raises an exception, it does NOT unceremoniously quit the process +// (unlike assert()). +// +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_INTERNAL_ASSERT(cond, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + ::c10::detail::torchCheckFail( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + #cond " INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__)); \ + } +#else +// It would be nice if we could build a combined string literal out of +// the TORCH_INTERNAL_ASSERT prefix and a user-provided string literal +// as the first argument, but there doesn't seem to be any good way to +// do that while still supporting having a first argument that isn't a +// string literal. +#define TORCH_INTERNAL_ASSERT(cond, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + ::c10::detail::torchInternalAssertFail( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + #cond \ + " INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__) ":" C10_STRINGIZE( \ + __LINE__) ", please report a bug to PyTorch. ", \ + c10::str(__VA_ARGS__)); \ + } +#endif + +// A utility macro to make it easier to test for error conditions from user +// input. Like TORCH_INTERNAL_ASSERT, it supports an arbitrary number of extra +// arguments (evaluated only on failure), which will be printed in the error +// message using operator<< (e.g., you can pass any object which has +// operator<< defined. Most objects in PyTorch have these definitions!) +// +// Usage: +// TORCH_CHECK(should_be_true); // A default error message will be provided +// // in this case; but we recommend writing an +// // explicit error message, as it is more +// // user friendly. +// TORCH_CHECK(x == 0, "Expected x to be 0, but got ", x); +// +// On failure, this macro will raise an exception. If this exception propagates +// to Python, it will convert into a Python RuntimeError. +// +// NOTE: It is SAFE to use this macro in production code; on failure, this +// simply raises an exception, it does NOT unceremoniously quit the process +// (unlike CHECK() from glog.) +// +#define TORCH_CHECK_WITH(error_t, cond, ...) \ + TORCH_CHECK_WITH_MSG(error_t, cond, "", __VA_ARGS__) + +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_CHECK_MSG(cond, type, ...) \ + (#cond #type " CHECK FAILED at " C10_STRINGIZE(__FILE__)) +#define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + C10_THROW_ERROR(Error, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \ + } +#else +namespace c10 { +namespace detail { +template +decltype(auto) torchCheckMsgImpl(const char* /*msg*/, const Args&... args) { + return ::c10::str(args...); +} +inline C10_API const char* torchCheckMsgImpl(const char* msg) { + return msg; +} +// If there is just 1 user-provided C-string argument, use it. +inline C10_API const char* torchCheckMsgImpl( + const char* /*msg*/, + const char* args) { + return args; +} +} // namespace detail +} // namespace c10 + +#define TORCH_CHECK_MSG(cond, type, ...) \ + (::c10::detail::torchCheckMsgImpl( \ + "Expected " #cond \ + " to be true, but got false. " \ + "(Could this error message be improved? If so, " \ + "please report an enhancement request to PyTorch.)", \ + ##__VA_ARGS__)) +#define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + C10_THROW_ERROR(error_t, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \ + } +#endif + +namespace c10 { +namespace detail { + +[[noreturn]] C10_API void torchCheckFail( + const char* func, + const char* file, + uint32_t line, + const std::string& msg); +[[noreturn]] C10_API void torchCheckFail( + const char* func, + const char* file, + uint32_t line, + const char* msg); + +// The c10::str() call that creates userMsg can have 1 of 3 return +// types depending on the number and types of arguments passed to +// TORCH_INTERNAL_ASSERT. 0 arguments will get a +// CompileTimeEmptyString, 1 const char * will be passed straight +// through, and anything else will get converted to std::string. +[[noreturn]] C10_API void torchInternalAssertFail( + const char* func, + const char* file, + uint32_t line, + const char* condMsg, + const char* userMsg); +[[noreturn]] inline C10_API void torchInternalAssertFail( + const char* func, + const char* file, + uint32_t line, + const char* condMsg, + ::c10::detail::CompileTimeEmptyString /*userMsg*/) { + torchCheckFail(func, file, line, condMsg); +} +[[noreturn]] C10_API void torchInternalAssertFail( + const char* func, + const char* file, + uint32_t line, + const char* condMsg, + const std::string& userMsg); + +} // namespace detail +} // namespace c10 + +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_CHECK(cond, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + ::c10::detail::torchCheckFail( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + TORCH_CHECK_MSG(cond, "", __VA_ARGS__)); \ + } +#else +#define TORCH_CHECK(cond, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + ::c10::detail::torchCheckFail( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + TORCH_CHECK_MSG(cond, "", ##__VA_ARGS__)); \ + } +#endif + +// An utility macro that does what `TORCH_CHECK` does if compiled in the host +// code, otherwise does nothing. Supposed to be used in the code shared between +// host and device code as an alternative for `TORCH_CHECK`. +#if defined(__CUDACC__) || defined(__HIPCC__) +#define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...) +#else +#define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...) TORCH_CHECK(cond, ##__VA_ARGS__) +#endif + +// Debug only version of TORCH_INTERNAL_ASSERT. This macro only checks in debug +// build, and does nothing in release build. It is appropriate to use +// in situations where you want to add an assert to a hotpath, but it is +// too expensive to run this assert on production builds. +#ifdef NDEBUG +// Optimized version - generates no code. +#define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \ + while (false) \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)) +#else +#define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)) +#endif + +// TODO: We're going to get a lot of similar looking string literals +// this way; check if this actually affects binary size. + +// Like TORCH_CHECK, but raises LinAlgError instead of Error. +#define TORCH_CHECK_LINALG(cond, ...) \ + TORCH_CHECK_WITH_MSG(LinAlgError, cond, "LINALG", __VA_ARGS__) + +// Like TORCH_CHECK, but raises IndexErrors instead of Errors. +#define TORCH_CHECK_INDEX(cond, ...) \ + TORCH_CHECK_WITH_MSG(IndexError, cond, "INDEX", __VA_ARGS__) + +// Like TORCH_CHECK, but raises ValueErrors instead of Errors. +#define TORCH_CHECK_VALUE(cond, ...) \ + TORCH_CHECK_WITH_MSG(ValueError, cond, "VALUE", __VA_ARGS__) + +// Like TORCH_CHECK, but raises TypeErrors instead of Errors. +#define TORCH_CHECK_TYPE(cond, ...) \ + TORCH_CHECK_WITH_MSG(TypeError, cond, "TYPE", __VA_ARGS__) + +// Like TORCH_CHECK, but raises NotImplementedErrors instead of Errors. +#define TORCH_CHECK_NOT_IMPLEMENTED(cond, ...) \ + TORCH_CHECK_WITH_MSG(NotImplementedError, cond, "TYPE", __VA_ARGS__) + +// Report a warning to the user. Accepts an arbitrary number of extra +// arguments which are concatenated into the warning message using operator<< +// +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_WARN(...) \ + ::c10::Warning::warn( \ + {__func__, __FILE__, static_cast(__LINE__)}, \ + ::c10::detail::CompileTimeEmptyString{}, \ + false) +#else +#define TORCH_WARN(...) \ + ::c10::Warning::warn( \ + {__func__, __FILE__, static_cast(__LINE__)}, \ + ::c10::str(__VA_ARGS__), \ + false) +#endif + +// Report a warning to the user only once. Accepts an arbitrary number of extra +// arguments which are concatenated into the warning message using operator<< +// +#ifdef STRIP_ERROR_MESSAGES +#define _TORCH_WARN_ONCE(...) \ + C10_UNUSED static const auto C10_ANONYMOUS_VARIABLE(torch_warn_once_) = \ + [&] { \ + ::c10::Warning::warn( \ + {__func__, __FILE__, static_cast(__LINE__)}, \ + ::c10::detail::CompileTimeEmptyString{}, \ + false); \ + return true; \ + }() +#else +#define _TORCH_WARN_ONCE(...) \ + C10_UNUSED static const auto C10_ANONYMOUS_VARIABLE(torch_warn_once_) = \ + [&] { \ + ::c10::Warning::warn( \ + {__func__, __FILE__, static_cast(__LINE__)}, \ + ::c10::str(__VA_ARGS__), \ + false); \ + return true; \ + }() +#endif + +#define TORCH_WARN_ONCE(...) \ + if (::c10::Warning::get_warnAlways()) { \ + TORCH_WARN(__VA_ARGS__); \ + } else { \ + _TORCH_WARN_ONCE(__VA_ARGS__); \ + } + +// Report an error with a specific argument +// NOTE: using the argument name in TORCH_CHECK's message is preferred +#define TORCH_CHECK_ARG(cond, argN, ...) \ + TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__) + +// ---------------------------------------------------------------------------- +// Deprecated macros +// ---------------------------------------------------------------------------- + +namespace c10 { +namespace detail { + +/* +// Deprecation disabled until we fix sites in our codebase +C10_DEPRECATED_MESSAGE("AT_ERROR(msg) is deprecated, use TORCH_CHECK(false, msg) +instead.") +*/ +inline void deprecated_AT_ERROR() {} + +/* +// Deprecation disabled until we fix sites in our codebase +C10_DEPRECATED_MESSAGE("AT_ASSERT is deprecated, if you mean to indicate an +internal invariant failure, use " \ + "TORCH_INTERNAL_ASSERT instead; if you mean to do user +error checking, use " \ "TORCH_CHECK. See +https://github.com/pytorch/pytorch/issues/20287 for more details.") +*/ +inline void deprecated_AT_ASSERT() {} + +/* +// Deprecation disabled until we fix sites in our codebase +C10_DEPRECATED_MESSAGE("AT_ASSERTM is deprecated, if you mean to indicate an +internal invariant failure, use " \ + "TORCH_INTERNAL_ASSERT instead; if you mean to do user +error checking, use " \ "TORCH_CHECK. See +https://github.com/pytorch/pytorch/issues/20287 for more details.") +*/ +inline void deprecated_AT_ASSERTM() {} + +} // namespace detail +} // namespace c10 + +// Deprecated alias; this alias was deprecated because people kept mistakenly +// using it for user error checking. Use TORCH_INTERNAL_ASSERT or TORCH_CHECK +// instead. See https://github.com/pytorch/pytorch/issues/20287 for more +// details. +#define AT_ASSERT(...) \ + do { \ + ::c10::detail::deprecated_AT_ASSERT(); \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)); \ + } while (false) + +// Deprecated alias, like AT_ASSERT. The new TORCH_INTERNAL_ASSERT macro +// supports both 0-ary and variadic calls, so having a separate +// message-accepting macro is not necessary. +// +// NB: we MUST include cond explicitly here, as MSVC will miscompile the macro +// expansion, shunting all of __VA_ARGS__ to cond. An alternate workaround +// can be seen at +// https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly +#define AT_ASSERTM(cond, ...) \ + do { \ + ::c10::detail::deprecated_AT_ASSERTM(); \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(cond, __VA_ARGS__)); \ + } while (false) + +// Deprecated alias; this alias was deprecated because it represents extra API +// surface that makes it hard for people to understand what macro to use. +// Use TORCH_CHECK(false, ...) or TORCH_INTERNAL_ASSERT(false, ...) to +// unconditionally fail at a line of code. +#define AT_ERROR(...) \ + do { \ + ::c10::detail::deprecated_AT_ERROR(); \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \ + } while (false) + +#endif // C10_UTIL_EXCEPTION_H_ diff --git a/voice_bridge/torch/include/c10/util/ExclusivelyOwned.h b/voice_bridge/torch/include/c10/util/ExclusivelyOwned.h new file mode 100644 index 0000000000000000000000000000000000000000..4f74917d1072f350eee326b826e91ea5d70f7159 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/ExclusivelyOwned.h @@ -0,0 +1,143 @@ +#pragma once + +#include + +namespace c10 { + +// See example implementation in TensorBase.h and TensorBody.h. +// Synopsis: +// +// repr_type -- type to use to store an owned T in ExclusivelyOwned. +// +// pointer_type -- pointer-esque type to return from +// ExclusivelyOwned's get() and operator*() methods. +// +// const_pointer_type -- similar to pointer_type, used for the const methods. +// +// static repr_type nullRepr() -- return a null instance of repr_type. +// +// template +// static repr_type createInPlace(Args&&... args) -- used by the in-place +// ExclusivelyOwned constructor. +// +// static repr_type moveToRepr(T&& x) -- move the given x into an +// instance of repr_type. used by the ExclusivelyOwned(T&&) +// constructor. +// +// static void destroyOwned(repr_type x) -- free memory for a +// known-exclusively-owned instance of x. Replaces calling repr_type's +// destructor. Being able to implement this more efficiently than +// repr_type's destructor is the main reason to use ExclusivelyOwned +// for a type. +// +// static T take(repr_type&) -- move out of the given repr_type into an owned T. +// +// static pointer_type getImpl(const repr_type&) -- return a pointer +// to the given repr_type. May take repr_type by value if that is more +// efficient. +template +struct ExclusivelyOwnedTraits; + +/// ExclusivelyOwned is a smart-pointer-like wrapper around an +/// exclusively-owned instance of some type T that normally has +/// mandatory reference counting (currently just Tensor). If you have +/// an isolated piece of code that knows that it has sole ownership of +/// an object of one of these types (i.e., because you created it +/// directly or using a factory function) and that object will not +/// escape from that isolated piece of code, then moving the object +/// into an ExclusivelyOwned will avoid an atomic reference count +/// decrement at destruction time. +/// +/// If you directly create the Tensor in the first +/// place, you can use the in_place constructor of ExclusivelyOwned to +/// additionally avoid doing any stores to initialize the refcount & +/// weakcount. +template +class ExclusivelyOwned { + using EOT = ExclusivelyOwnedTraits; + union { + char dummy_; + typename ExclusivelyOwnedTraits::repr_type repr_; + }; + + public: + ExclusivelyOwned() : repr_(EOT::nullRepr()) {} + + explicit ExclusivelyOwned(T&& t) : repr_(EOT::moveToRepr(std::move(t))) {} + + template + explicit ExclusivelyOwned(in_place_t, Args&&... args) + : repr_(EOT::createInPlace(std::forward(args)...)) {} + + ExclusivelyOwned(const ExclusivelyOwned&) = delete; + + ExclusivelyOwned(ExclusivelyOwned&& rhs) noexcept + : repr_(std::move(rhs.repr_)) { + rhs.repr_ = EOT::nullRepr(); + } + + ExclusivelyOwned& operator=(const ExclusivelyOwned&) = delete; + + ExclusivelyOwned& operator=(ExclusivelyOwned&& rhs) noexcept { + EOT::destroyOwned(repr_); + repr_ = std::move(rhs.repr_); + rhs.repr_ = EOT::nullRepr(); + return *this; + } + + ExclusivelyOwned& operator=(T&& rhs) noexcept { + EOT::destroyOwned(repr_); + repr_ = EOT::moveToRepr(std::move(rhs)); + return *this; + } + + ~ExclusivelyOwned() { + EOT::destroyOwned(repr_); + // Don't bother to call the destructor of repr_, since we already + // did specialized destruction for the exclusively-owned case in + // destroyOwned! + } + + // We don't provide this because it would require us to be able to + // differentiate an owned-but-empty T from a lack of T. This is + // particularly problematic for Tensor, which wants to use an + // undefined Tensor as its null state. + explicit operator bool() const noexcept = delete; + + operator T() && { + return take(); + } + + // NOTE: the equivalent operation on MaybeOwned is a moving + // operator*. For ExclusivelyOwned, take() and operator*() may well + // have different return types, so they are different functions. + T take() && { + return EOT::take(repr_); + } + + typename EOT::const_pointer_type operator->() const { + return get(); + } + + typename EOT::const_pointer_type get() const { + return EOT::getImpl(repr_); + } + + typename EOT::pointer_type operator->() { + return get(); + } + + typename EOT::pointer_type get() { + return EOT::getImpl(repr_); + } + + std::remove_pointer_t& operator*() const { + return *get(); + } + + std::remove_pointer_t& operator*() { + return *get(); + } +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h b/voice_bridge/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h new file mode 100644 index 0000000000000000000000000000000000000000..143b4df0a4e5f4623a0f9109e74c002064c49292 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h @@ -0,0 +1,74 @@ +#pragma once + +#include + +#include + +namespace c10 { +// Shared ExclusivelyOwnedTraits implementation between caffe2::Tensor and +// at::TensorBase. +template +struct ExclusivelyOwnedTensorTraits { + using repr_type = TensorType; + using pointer_type = TensorType*; + using const_pointer_type = const TensorType*; + + static repr_type nullRepr() { + return TensorType(); + } + + template + static repr_type createInPlace(Args&&... args) { + return TensorType(std::forward(args)...); + } + + static repr_type moveToRepr(TensorType&& x) { + return std::move(x); + } + + static void destroyOwned(TensorType& x) { + TensorImpl* const toDestroy = x.unsafeReleaseTensorImpl(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + toDestroy != nullptr, "Tensor somehow got null TensorImpl?"); + // May be 0 because UndefinedTensorImpl doesn't get its refcount + // incremented. + const bool isUndefined = toDestroy == UndefinedTensorImpl::singleton(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + toDestroy->refcount_ == 1 || (toDestroy->refcount_ == 0 && isUndefined), + "ExclusivelyOwned destroyed with isUndefined ", + isUndefined, + " and refcount ", + toDestroy->refcount_, + ", expected 1 or, if isUndefined, 0!"); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + toDestroy->weakcount_ == 1 || + (toDestroy->weakcount_ == 0 && + toDestroy == UndefinedTensorImpl::singleton()), + "ExclusivelyOwned destroyed with isUndefined ", + isUndefined, + " and weakcount ", + toDestroy->weakcount_, + ", expected 1 or, if isUndefined, 0!"); + if (!isUndefined) { +#ifndef NDEBUG + // Needed to pass the debug assertions in ~intrusive_ptr_target. + toDestroy->refcount_ = 0; + toDestroy->weakcount_ = 0; +#endif + delete toDestroy; + } + } + + static TensorType take(TensorType& x) { + return std::move(x); + } + + static pointer_type getImpl(repr_type& x) { + return &x; + } + + static const_pointer_type getImpl(const repr_type& x) { + return &x; + } +}; +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/util/Flags.h b/voice_bridge/torch/include/c10/util/Flags.h new file mode 100644 index 0000000000000000000000000000000000000000..1f9698dc990d000e401b6f2854b94e5599b981a8 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/Flags.h @@ -0,0 +1,226 @@ +#ifndef C10_UTIL_FLAGS_H_ +#define C10_UTIL_FLAGS_H_ + +/* Commandline flags support for C10. + * + * This is a portable commandline flags tool for c10, so we can optionally + * choose to use gflags or a lightweight custom implementation if gflags is + * not possible on a certain platform. If you have gflags installed, set the + * macro C10_USE_GFLAGS will seamlessly route everything to gflags. + * + * To define a flag foo of type bool default to true, do the following in the + * *global* namespace: + * C10_DEFINE_bool(foo, true, "An example."); + * + * To use it in another .cc file, you can use C10_DECLARE_* as follows: + * C10_DECLARE_bool(foo); + * + * In both cases, you can then access the flag via FLAGS_foo. + * + * It is recommended that you build with gflags. To learn more about the flags + * usage, refer to the gflags page here: + * + * https://gflags.github.io/gflags/ + * + * Note about Python users / devs: gflags is initiated from a C++ function + * ParseCommandLineFlags, and is usually done in native binaries in the main + * function. As Python does not have a modifiable main function, it is usually + * difficult to change the flags after Python starts. Hence, it is recommended + * that one sets the default value of the flags to one that's acceptable in + * general - that will allow Python to run without wrong flags. + */ + +#include + +#include +#include + +namespace c10 { +/** + * Sets the usage message when a commandline tool is called with "--help". + */ +C10_API void SetUsageMessage(const std::string& str); + +/** + * Returns the usage message for the commandline tool set by SetUsageMessage. + */ +C10_API const char* UsageMessage(); + +/** + * Parses the commandline flags. + * + * This command parses all the commandline arguments passed in via pargc + * and argv. Once it is finished, partc and argv will contain the remaining + * commandline args that c10 does not deal with. Note that following + * convention, argv[0] contains the binary name and is not parsed. + */ +C10_API bool ParseCommandLineFlags(int* pargc, char*** pargv); + +/** + * Checks if the commandline flags has already been passed. + */ +C10_API bool CommandLineFlagsHasBeenParsed(); + +} // namespace c10 + +//////////////////////////////////////////////////////////////////////////////// +// Below are gflags and non-gflags specific implementations. +// In general, they define the following macros for one to declare (use +// C10_DECLARE) or define (use C10_DEFINE) flags: +// C10_{DECLARE,DEFINE}_{int,int64,double,bool,string} +//////////////////////////////////////////////////////////////////////////////// + +#ifdef C10_USE_GFLAGS + +//////////////////////////////////////////////////////////////////////////////// +// Begin gflags section: most functions are basically rerouted to gflags. +//////////////////////////////////////////////////////////////////////////////// +#include + +// C10 uses hidden visibility by default. However, in gflags, it only uses +// export on Windows platform (with dllexport) but not on linux/mac (with +// default visibility). As a result, to ensure that we are always exporting +// global variables, we will redefine the GFLAGS_DLL_DEFINE_FLAG macro if we +// are building C10 as a shared libray. +// This has to be done after the inclusion of gflags, because some early +// versions of gflags.h (e.g. 2.0 on ubuntu 14.04) directly defines the +// macros, so we need to do definition after gflags is done. +#ifdef GFLAGS_DLL_DEFINE_FLAG +#undef GFLAGS_DLL_DEFINE_FLAG +#endif // GFLAGS_DLL_DEFINE_FLAG +#ifdef GFLAGS_DLL_DECLARE_FLAG +#undef GFLAGS_DLL_DECLARE_FLAG +#endif // GFLAGS_DLL_DECLARE_FLAG +#define GFLAGS_DLL_DEFINE_FLAG C10_EXPORT +#define GFLAGS_DLL_DECLARE_FLAG C10_IMPORT + +// gflags before 2.0 uses namespace google and after 2.1 uses namespace gflags. +// Using GFLAGS_GFLAGS_H_ to capture this change. +#ifndef GFLAGS_GFLAGS_H_ +namespace gflags = google; +#endif // GFLAGS_GFLAGS_H_ + +// Motivation about the gflags wrapper: +// (1) We would need to make sure that the gflags version and the non-gflags +// version of C10 are going to expose the same flags abstraction. One should +// explicitly use FLAGS_flag_name to access the flags. +// (2) For flag names, it is recommended to start with c10_ to distinguish it +// from regular gflags flags. For example, do +// C10_DEFINE_BOOL(c10_my_flag, true, "An example"); +// to allow one to use FLAGS_c10_my_flag. +// (3) Gflags has a design issue that does not properly expose the global flags, +// if one builds the library with -fvisibility=hidden. The current gflags (as of +// Aug 2018) only deals with the Windows case using dllexport, and not the Linux +// counterparts. As a result, we will explciitly use C10_EXPORT to export the +// flags defined in C10. This is done via a global reference, so the flag +// itself is not duplicated - under the hood it is the same global gflags flag. +#define C10_GFLAGS_DEF_WRAPPER(type, real_type, name, default_value, help_str) \ + DEFINE_##type(name, default_value, help_str); + +#define C10_DEFINE_int(name, default_value, help_str) \ + C10_GFLAGS_DEF_WRAPPER(int32, gflags::int32, name, default_value, help_str) +#define C10_DEFINE_int32(name, default_value, help_str) \ + C10_DEFINE_int(name, default_value, help_str) +#define C10_DEFINE_int64(name, default_value, help_str) \ + C10_GFLAGS_DEF_WRAPPER(int64, gflags::int64, name, default_value, help_str) +#define C10_DEFINE_double(name, default_value, help_str) \ + C10_GFLAGS_DEF_WRAPPER(double, double, name, default_value, help_str) +#define C10_DEFINE_bool(name, default_value, help_str) \ + C10_GFLAGS_DEF_WRAPPER(bool, bool, name, default_value, help_str) +#define C10_DEFINE_string(name, default_value, help_str) \ + C10_GFLAGS_DEF_WRAPPER(string, ::fLS::clstring, name, default_value, help_str) + +// DECLARE_typed_var should be used in header files and in the global namespace. +#define C10_GFLAGS_DECLARE_WRAPPER(type, real_type, name) DECLARE_##type(name); + +#define C10_DECLARE_int(name) \ + C10_GFLAGS_DECLARE_WRAPPER(int32, gflags::int32, name) +#define C10_DECLARE_int32(name) C10_DECLARE_int(name) +#define C10_DECLARE_int64(name) \ + C10_GFLAGS_DECLARE_WRAPPER(int64, gflags::int64, name) +#define C10_DECLARE_double(name) \ + C10_GFLAGS_DECLARE_WRAPPER(double, double, name) +#define C10_DECLARE_bool(name) C10_GFLAGS_DECLARE_WRAPPER(bool, bool, name) +#define C10_DECLARE_string(name) \ + C10_GFLAGS_DECLARE_WRAPPER(string, ::fLS::clstring, name) + +//////////////////////////////////////////////////////////////////////////////// +// End gflags section. +//////////////////////////////////////////////////////////////////////////////// + +#else // C10_USE_GFLAGS + +//////////////////////////////////////////////////////////////////////////////// +// Begin non-gflags section: providing equivalent functionality. +//////////////////////////////////////////////////////////////////////////////// + +namespace c10 { + +class C10_API C10FlagParser { + public: + bool success() { + return success_; + } + + protected: + template + bool Parse(const std::string& content, T* value); + bool success_{false}; +}; + +C10_DECLARE_REGISTRY(C10FlagsRegistry, C10FlagParser, const std::string&); + +} // namespace c10 + +// The macros are defined outside the c10 namespace. In your code, you should +// write the C10_DEFINE_* and C10_DECLARE_* macros outside any namespace +// as well. + +#define C10_DEFINE_typed_var(type, name, default_value, help_str) \ + C10_EXPORT type FLAGS_##name = default_value; \ + namespace c10 { \ + namespace { \ + class C10FlagParser_##name : public C10FlagParser { \ + public: \ + explicit C10FlagParser_##name(const std::string& content) { \ + success_ = C10FlagParser::Parse(content, &FLAGS_##name); \ + } \ + }; \ + } \ + RegistererC10FlagsRegistry g_C10FlagsRegistry_##name( \ + #name, \ + C10FlagsRegistry(), \ + RegistererC10FlagsRegistry::DefaultCreator, \ + "(" #type ", default " #default_value ") " help_str); \ + } + +#define C10_DEFINE_int(name, default_value, help_str) \ + C10_DEFINE_typed_var(int, name, default_value, help_str) +#define C10_DEFINE_int32(name, default_value, help_str) \ + C10_DEFINE_int(name, default_value, help_str) +#define C10_DEFINE_int64(name, default_value, help_str) \ + C10_DEFINE_typed_var(int64_t, name, default_value, help_str) +#define C10_DEFINE_double(name, default_value, help_str) \ + C10_DEFINE_typed_var(double, name, default_value, help_str) +#define C10_DEFINE_bool(name, default_value, help_str) \ + C10_DEFINE_typed_var(bool, name, default_value, help_str) +#define C10_DEFINE_string(name, default_value, help_str) \ + C10_DEFINE_typed_var(std::string, name, default_value, help_str) + +// DECLARE_typed_var should be used in header files and in the global namespace. +#define C10_DECLARE_typed_var(type, name) C10_IMPORT extern type FLAGS_##name + +#define C10_DECLARE_int(name) C10_DECLARE_typed_var(int, name) +#define C10_DECLARE_int32(name) C10_DECLARE_int(name) +#define C10_DECLARE_int64(name) C10_DECLARE_typed_var(int64_t, name) +#define C10_DECLARE_double(name) C10_DECLARE_typed_var(double, name) +#define C10_DECLARE_bool(name) C10_DECLARE_typed_var(bool, name) +#define C10_DECLARE_string(name) C10_DECLARE_typed_var(std::string, name) + +//////////////////////////////////////////////////////////////////////////////// +// End non-gflags section. +//////////////////////////////////////////////////////////////////////////////// + +#endif // C10_USE_GFLAGS + +#endif // C10_UTIL_FLAGS_H_ diff --git a/voice_bridge/torch/include/c10/util/FunctionRef.h b/voice_bridge/torch/include/c10/util/FunctionRef.h new file mode 100644 index 0000000000000000000000000000000000000000..33a54d525ace417d37fdbbfdc463decf4c95c535 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/FunctionRef.h @@ -0,0 +1,72 @@ +//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains some templates that are useful if you are working with the +// STL at all. +// +// No library is required when using these functions. +// +//===----------------------------------------------------------------------===// + +// c10: modified from llvm::function_ref +// c10: added more SFINAE to enable use in overloaded functions + +#pragma once + +#include +#include +#include + +namespace c10 { + +/// An efficient, type-erasing, non-owning reference to a callable. This is +/// intended for use as the type of a function parameter that is not used +/// after the function in question returns. +/// +/// This class does not own the callable, so it is not in general safe to store +/// a function_ref. +template +class function_ref; + +template +class function_ref { + Ret (*callback)(intptr_t callable, Params... params) = nullptr; + intptr_t callable; + + template + static Ret callback_fn(intptr_t callable, Params... params) { + return (*reinterpret_cast(callable))(std::forward( + params)...); + } + + public: + function_ref() = default; + function_ref(std::nullptr_t) {} + + template + function_ref( + Callable&& callable, + typename std::enable_if::type, + function_ref>::value>::type* = nullptr, + typename std::enable_if, + Ret>::value>::type* = nullptr) + : callback(callback_fn::type>), + callable(reinterpret_cast(&callable)) {} + + Ret operator()(Params... params) const { + return callback(callable, std::forward(params)...); + } + + operator bool() const { + return callback; + } +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/util/Half-inl.h b/voice_bridge/torch/include/c10/util/Half-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..3ed8cf80d1169de20df6e30a910b5d8de6637377 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/Half-inl.h @@ -0,0 +1,324 @@ +#pragma once + +#include +#include +#include + +#ifdef __CUDACC__ +#include +#endif + +#ifdef __HIPCC__ +#include +#endif + +#if defined(SYCL_LANGUAGE_VERSION) +#include // for SYCL 2020 +#elif defined(CL_SYCL_LANGUAGE_VERSION) +#include // for SYCL 1.2.1 +#endif + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +namespace c10 { + +/// Constructors + +inline C10_HOST_DEVICE Half::Half(float value) { +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) + x = __half_as_short(__float2half(value)); +#elif defined(__SYCL_DEVICE_ONLY__) + x = sycl::bit_cast(sycl::half(value)); +#else + x = detail::fp16_ieee_from_fp32_value(value); +#endif +} + +/// Implicit conversions + +inline C10_HOST_DEVICE Half::operator float() const { +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) + return __half2float(*reinterpret_cast(&x)); +#elif defined(__SYCL_DEVICE_ONLY__) + return float(sycl::bit_cast(x)); +#else + return detail::fp16_ieee_to_fp32_value(x); +#endif +} + +#if defined(__CUDACC__) || defined(__HIPCC__) +inline C10_HOST_DEVICE Half::Half(const __half& value) { + x = *reinterpret_cast(&value); +} +inline C10_HOST_DEVICE Half::operator __half() const { + return *reinterpret_cast(&x); +} +#endif + +#ifdef SYCL_LANGUAGE_VERSION +inline C10_HOST_DEVICE Half::Half(const sycl::half& value) { + x = *reinterpret_cast(&value); +} +inline C10_HOST_DEVICE Half::operator sycl::half() const { + return *reinterpret_cast(&x); +} +#endif + +// CUDA intrinsics + +#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350)) || \ + (defined(__clang__) && defined(__CUDA__)) +inline __device__ Half __ldg(const Half* ptr) { + return __ldg(reinterpret_cast(ptr)); +} +#endif + +/// Arithmetic + +inline C10_HOST_DEVICE Half operator+(const Half& a, const Half& b) { + return static_cast(a) + static_cast(b); +} + +inline C10_HOST_DEVICE Half operator-(const Half& a, const Half& b) { + return static_cast(a) - static_cast(b); +} + +inline C10_HOST_DEVICE Half operator*(const Half& a, const Half& b) { + return static_cast(a) * static_cast(b); +} + +inline C10_HOST_DEVICE Half operator/(const Half& a, const Half& b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / static_cast(b); +} + +inline C10_HOST_DEVICE Half operator-(const Half& a) { +#if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530) || \ + defined(__HIP_DEVICE_COMPILE__) + return __hneg(a); +#elif defined(__SYCL_DEVICE_ONLY__) + return -sycl::bit_cast(a); +#else + return -static_cast(a); +#endif +} + +inline C10_HOST_DEVICE Half& operator+=(Half& a, const Half& b) { + a = a + b; + return a; +} + +inline C10_HOST_DEVICE Half& operator-=(Half& a, const Half& b) { + a = a - b; + return a; +} + +inline C10_HOST_DEVICE Half& operator*=(Half& a, const Half& b) { + a = a * b; + return a; +} + +inline C10_HOST_DEVICE Half& operator/=(Half& a, const Half& b) { + a = a / b; + return a; +} + +/// Arithmetic with floats + +inline C10_HOST_DEVICE float operator+(Half a, float b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE float operator-(Half a, float b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE float operator*(Half a, float b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE float operator/(Half a, float b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE float operator+(float a, Half b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE float operator-(float a, Half b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE float operator*(float a, Half b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE float operator/(float a, Half b) + __ubsan_ignore_float_divide_by_zero__ { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE float& operator+=(float& a, const Half& b) { + return a += static_cast(b); +} +inline C10_HOST_DEVICE float& operator-=(float& a, const Half& b) { + return a -= static_cast(b); +} +inline C10_HOST_DEVICE float& operator*=(float& a, const Half& b) { + return a *= static_cast(b); +} +inline C10_HOST_DEVICE float& operator/=(float& a, const Half& b) { + return a /= static_cast(b); +} + +/// Arithmetic with doubles + +inline C10_HOST_DEVICE double operator+(Half a, double b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE double operator-(Half a, double b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE double operator*(Half a, double b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE double operator/(Half a, double b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE double operator+(double a, Half b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE double operator-(double a, Half b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE double operator*(double a, Half b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE double operator/(double a, Half b) + __ubsan_ignore_float_divide_by_zero__ { + return a / static_cast(b); +} + +/// Arithmetic with ints + +inline C10_HOST_DEVICE Half operator+(Half a, int b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE Half operator-(Half a, int b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE Half operator*(Half a, int b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE Half operator/(Half a, int b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE Half operator+(int a, Half b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE Half operator-(int a, Half b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE Half operator*(int a, Half b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE Half operator/(int a, Half b) { + return static_cast(a) / b; +} + +//// Arithmetic with int64_t + +inline C10_HOST_DEVICE Half operator+(Half a, int64_t b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE Half operator-(Half a, int64_t b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE Half operator*(Half a, int64_t b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE Half operator/(Half a, int64_t b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE Half operator+(int64_t a, Half b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE Half operator-(int64_t a, Half b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE Half operator*(int64_t a, Half b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE Half operator/(int64_t a, Half b) { + return static_cast(a) / b; +} + +/// NOTE: we do not define comparisons directly and instead rely on the implicit +/// conversion from c10::Half to float. + +} // namespace c10 + +namespace std { + +template <> +class numeric_limits { + public: + static constexpr bool is_specialized = true; + static constexpr bool is_signed = true; + static constexpr bool is_integer = false; + static constexpr bool is_exact = false; + static constexpr bool has_infinity = true; + static constexpr bool has_quiet_NaN = true; + static constexpr bool has_signaling_NaN = true; + static constexpr auto has_denorm = numeric_limits::has_denorm; + static constexpr auto has_denorm_loss = + numeric_limits::has_denorm_loss; + static constexpr auto round_style = numeric_limits::round_style; + static constexpr bool is_iec559 = true; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + static constexpr int digits = 11; + static constexpr int digits10 = 3; + static constexpr int max_digits10 = 5; + static constexpr int radix = 2; + static constexpr int min_exponent = -13; + static constexpr int min_exponent10 = -4; + static constexpr int max_exponent = 16; + static constexpr int max_exponent10 = 4; + static constexpr auto traps = numeric_limits::traps; + static constexpr auto tinyness_before = + numeric_limits::tinyness_before; + static constexpr c10::Half min() { + return c10::Half(0x0400, c10::Half::from_bits()); + } + static constexpr c10::Half lowest() { + return c10::Half(0xFBFF, c10::Half::from_bits()); + } + static constexpr c10::Half max() { + return c10::Half(0x7BFF, c10::Half::from_bits()); + } + static constexpr c10::Half epsilon() { + return c10::Half(0x1400, c10::Half::from_bits()); + } + static constexpr c10::Half round_error() { + return c10::Half(0x3800, c10::Half::from_bits()); + } + static constexpr c10::Half infinity() { + return c10::Half(0x7C00, c10::Half::from_bits()); + } + static constexpr c10::Half quiet_NaN() { + return c10::Half(0x7E00, c10::Half::from_bits()); + } + static constexpr c10::Half signaling_NaN() { + return c10::Half(0x7D00, c10::Half::from_bits()); + } + static constexpr c10::Half denorm_min() { + return c10::Half(0x0001, c10::Half::from_bits()); + } +}; + +} // namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/voice_bridge/torch/include/c10/util/Half.h b/voice_bridge/torch/include/c10/util/Half.h new file mode 100644 index 0000000000000000000000000000000000000000..a786db956cd64e2bbd3764ae5dee753bc95231d2 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/Half.h @@ -0,0 +1,547 @@ +#pragma once + +/// Defines the Half type (half-precision floating-point) including conversions +/// to standard C types and basic arithmetic operations. Note that arithmetic +/// operations are implemented by converting to floating point and +/// performing the operation in float32, instead of using CUDA half intrinsics. +/// Most uses of this type within ATen are memory bound, including the +/// element-wise kernels, and the half intrinsics aren't efficient on all GPUs. +/// If you are writing a compute bound kernel, you can use the CUDA half +/// intrinsics directly on the Half type from device code. + +#include +#include +#include +#include +#include + +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#include +#include +#elif !defined(__OPENCL_VERSION__) +#include +#include +#endif + +#ifdef _MSC_VER +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __CUDACC__ +#include +#endif + +#ifdef __HIPCC__ +#include +#endif + +#if defined(SYCL_LANGUAGE_VERSION) +#include // for SYCL 2020 +#elif defined(CL_SYCL_LANGUAGE_VERSION) +#include // for SYCL 1.2.1 +#endif + +// Standard check for compiling CUDA with clang +#if defined(__clang__) && defined(__CUDA__) && defined(__CUDA_ARCH__) +#define C10_DEVICE_HOST_FUNCTION __device__ __host__ +#else +#define C10_DEVICE_HOST_FUNCTION +#endif + +#include // operator typeid + +namespace c10 { + +namespace detail { + +C10_DEVICE_HOST_FUNCTION inline float fp32_from_bits(uint32_t w) { +#if defined(__OPENCL_VERSION__) + return as_float(w); +#elif defined(__CUDA_ARCH__) + return __uint_as_float((unsigned int)w); +#elif defined(__INTEL_COMPILER) + return _castu32_f32(w); +#else + union { + uint32_t as_bits; + float as_value; + } fp32 = {w}; + return fp32.as_value; +#endif +} + +C10_DEVICE_HOST_FUNCTION inline uint32_t fp32_to_bits(float f) { +#if defined(__OPENCL_VERSION__) + return as_uint(f); +#elif defined(__CUDA_ARCH__) + return (uint32_t)__float_as_uint(f); +#elif defined(__INTEL_COMPILER) + return _castf32_u32(f); +#else + union { + float as_value; + uint32_t as_bits; + } fp32 = {f}; + return fp32.as_bits; +#endif +} + +/* + * Convert a 16-bit floating-point number in IEEE half-precision format, in bit + * representation, to a 32-bit floating-point number in IEEE single-precision + * format, in bit representation. + * + * @note The implementation doesn't use any floating-point operations. + */ +inline uint32_t fp16_ieee_to_fp32_bits(uint16_t h) { + /* + * Extend the half-precision floating-point number to 32 bits and shift to the + * upper part of the 32-bit word: + * +---+-----+------------+-------------------+ + * | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000| + * +---+-----+------------+-------------------+ + * Bits 31 26-30 16-25 0-15 + * + * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0 + * - zero bits. + */ + const uint32_t w = (uint32_t)h << 16; + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = w & UINT32_C(0x80000000); + /* + * Extract mantissa and biased exponent of the input number into the bits 0-30 + * of the 32-bit word: + * + * +---+-----+------------+-------------------+ + * | 0 |EEEEE|MM MMMM MMMM|0000 0000 0000 0000| + * +---+-----+------------+-------------------+ + * Bits 30 27-31 17-26 0-16 + */ + const uint32_t nonsign = w & UINT32_C(0x7FFFFFFF); + /* + * Renorm shift is the number of bits to shift mantissa left to make the + * half-precision number normalized. If the initial number is normalized, some + * of its high 6 bits (sign == 0 and 5-bit exponent) equals one. In this case + * renorm_shift == 0. If the number is denormalize, renorm_shift > 0. Note + * that if we shift denormalized nonsign by renorm_shift, the unit bit of + * mantissa will shift into exponent, turning the biased exponent into 1, and + * making mantissa normalized (i.e. without leading 1). + */ +#ifdef _MSC_VER + unsigned long nonsign_bsr; + _BitScanReverse(&nonsign_bsr, (unsigned long)nonsign); + uint32_t renorm_shift = (uint32_t)nonsign_bsr ^ 31; +#else + uint32_t renorm_shift = __builtin_clz(nonsign); +#endif + renorm_shift = renorm_shift > 5 ? renorm_shift - 5 : 0; + /* + * Iff half-precision number has exponent of 15, the addition overflows + * it into bit 31, and the subsequent shift turns the high 9 bits + * into 1. Thus inf_nan_mask == 0x7F800000 if the half-precision number + * had exponent of 15 (i.e. was NaN or infinity) 0x00000000 otherwise + */ + const int32_t inf_nan_mask = + ((int32_t)(nonsign + 0x04000000) >> 8) & INT32_C(0x7F800000); + /* + * Iff nonsign is 0, it overflows into 0xFFFFFFFF, turning bit 31 + * into 1. Otherwise, bit 31 remains 0. The signed shift right by 31 + * broadcasts bit 31 into all bits of the zero_mask. Thus zero_mask == + * 0xFFFFFFFF if the half-precision number was zero (+0.0h or -0.0h) + * 0x00000000 otherwise + */ + const int32_t zero_mask = (int32_t)(nonsign - 1) >> 31; + /* + * 1. Shift nonsign left by renorm_shift to normalize it (if the input + * was denormal) + * 2. Shift nonsign right by 3 so the exponent (5 bits originally) + * becomes an 8-bit field and 10-bit mantissa shifts into the 10 high + * bits of the 23-bit mantissa of IEEE single-precision number. + * 3. Add 0x70 to the exponent (starting at bit 23) to compensate the + * different in exponent bias (0x7F for single-precision number less 0xF + * for half-precision number). + * 4. Subtract renorm_shift from the exponent (starting at bit 23) to + * account for renormalization. As renorm_shift is less than 0x70, this + * can be combined with step 3. + * 5. Binary OR with inf_nan_mask to turn the exponent into 0xFF if the + * input was NaN or infinity. + * 6. Binary ANDNOT with zero_mask to turn the mantissa and exponent + * into zero if the input was zero. + * 7. Combine with the sign of the input number. + */ + return sign | + ((((nonsign << renorm_shift >> 3) + ((0x70 - renorm_shift) << 23)) | + inf_nan_mask) & + ~zero_mask); +} + +/* + * Convert a 16-bit floating-point number in IEEE half-precision format, in bit + * representation, to a 32-bit floating-point number in IEEE single-precision + * format. + * + * @note The implementation relies on IEEE-like (no assumption about rounding + * mode and no operations on denormals) floating-point operations and bitcasts + * between integer and floating-point variables. + */ +inline float fp16_ieee_to_fp32_value(uint16_t h) { + /* + * Extend the half-precision floating-point number to 32 bits and shift to the + * upper part of the 32-bit word: + * +---+-----+------------+-------------------+ + * | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000| + * +---+-----+------------+-------------------+ + * Bits 31 26-30 16-25 0-15 + * + * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0 + * - zero bits. + */ + const uint32_t w = (uint32_t)h << 16; + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = w & UINT32_C(0x80000000); + /* + * Extract mantissa and biased exponent of the input number into the high bits + * of the 32-bit word: + * + * +-----+------------+---------------------+ + * |EEEEE|MM MMMM MMMM|0 0000 0000 0000 0000| + * +-----+------------+---------------------+ + * Bits 27-31 17-26 0-16 + */ + const uint32_t two_w = w + w; + + /* + * Shift mantissa and exponent into bits 23-28 and bits 13-22 so they become + * mantissa and exponent of a single-precision floating-point number: + * + * S|Exponent | Mantissa + * +-+---+-----+------------+----------------+ + * |0|000|EEEEE|MM MMMM MMMM|0 0000 0000 0000| + * +-+---+-----+------------+----------------+ + * Bits | 23-31 | 0-22 + * + * Next, there are some adjustments to the exponent: + * - The exponent needs to be corrected by the difference in exponent bias + * between single-precision and half-precision formats (0x7F - 0xF = 0x70) + * - Inf and NaN values in the inputs should become Inf and NaN values after + * conversion to the single-precision number. Therefore, if the biased + * exponent of the half-precision input was 0x1F (max possible value), the + * biased exponent of the single-precision output must be 0xFF (max possible + * value). We do this correction in two steps: + * - First, we adjust the exponent by (0xFF - 0x1F) = 0xE0 (see exp_offset + * below) rather than by 0x70 suggested by the difference in the exponent bias + * (see above). + * - Then we multiply the single-precision result of exponent adjustment by + * 2**(-112) to reverse the effect of exponent adjustment by 0xE0 less the + * necessary exponent adjustment by 0x70 due to difference in exponent bias. + * The floating-point multiplication hardware would ensure than Inf and + * NaN would retain their value on at least partially IEEE754-compliant + * implementations. + * + * Note that the above operations do not handle denormal inputs (where biased + * exponent == 0). However, they also do not operate on denormal inputs, and + * do not produce denormal results. + */ + constexpr uint32_t exp_offset = UINT32_C(0xE0) << 23; + // const float exp_scale = 0x1.0p-112f; + constexpr uint32_t scale_bits = (uint32_t)15 << 23; + float exp_scale_val; + std::memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val)); + const float exp_scale = exp_scale_val; + const float normalized_value = + fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale; + + /* + * Convert denormalized half-precision inputs into single-precision results + * (always normalized). Zero inputs are also handled here. + * + * In a denormalized number the biased exponent is zero, and mantissa has + * on-zero bits. First, we shift mantissa into bits 0-9 of the 32-bit word. + * + * zeros | mantissa + * +---------------------------+------------+ + * |0000 0000 0000 0000 0000 00|MM MMMM MMMM| + * +---------------------------+------------+ + * Bits 10-31 0-9 + * + * Now, remember that denormalized half-precision numbers are represented as: + * FP16 = mantissa * 2**(-24). + * The trick is to construct a normalized single-precision number with the + * same mantissa and thehalf-precision input and with an exponent which would + * scale the corresponding mantissa bits to 2**(-24). A normalized + * single-precision floating-point number is represented as: FP32 = (1 + + * mantissa * 2**(-23)) * 2**(exponent - 127) Therefore, when the biased + * exponent is 126, a unit change in the mantissa of the input denormalized + * half-precision number causes a change of the constructud single-precision + * number by 2**(-24), i.e. the same amount. + * + * The last step is to adjust the bias of the constructed single-precision + * number. When the input half-precision number is zero, the constructed + * single-precision number has the value of FP32 = 1 * 2**(126 - 127) = + * 2**(-1) = 0.5 Therefore, we need to subtract 0.5 from the constructed + * single-precision number to get the numerical equivalent of the input + * half-precision number. + */ + constexpr uint32_t magic_mask = UINT32_C(126) << 23; + constexpr float magic_bias = 0.5f; + const float denormalized_value = + fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; + + /* + * - Choose either results of conversion of input as a normalized number, or + * as a denormalized number, depending on the input exponent. The variable + * two_w contains input exponent in bits 27-31, therefore if its smaller than + * 2**27, the input is either a denormal number, or zero. + * - Combine the result of conversion of exponent and mantissa with the sign + * of the input number. + */ + constexpr uint32_t denormalized_cutoff = UINT32_C(1) << 27; + const uint32_t result = sign | + (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) + : fp32_to_bits(normalized_value)); + return fp32_from_bits(result); +} + +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a + * 16-bit floating-point number in IEEE half-precision format, in bit + * representation. + * + * @note The implementation relies on IEEE-like (no assumption about rounding + * mode and no operations on denormals) floating-point operations and bitcasts + * between integer and floating-point variables. + */ +inline uint16_t fp16_ieee_from_fp32_value(float f) { + // const float scale_to_inf = 0x1.0p+112f; + // const float scale_to_zero = 0x1.0p-110f; + constexpr uint32_t scale_to_inf_bits = (uint32_t)239 << 23; + constexpr uint32_t scale_to_zero_bits = (uint32_t)17 << 23; + float scale_to_inf_val, scale_to_zero_val; + std::memcpy(&scale_to_inf_val, &scale_to_inf_bits, sizeof(scale_to_inf_val)); + std::memcpy( + &scale_to_zero_val, &scale_to_zero_bits, sizeof(scale_to_zero_val)); + const float scale_to_inf = scale_to_inf_val; + const float scale_to_zero = scale_to_zero_val; + +#if defined(_MSC_VER) && _MSC_VER == 1916 + float base = ((signbit(f) != 0 ? -f : f) * scale_to_inf) * scale_to_zero; +#else + float base = (fabsf(f) * scale_to_inf) * scale_to_zero; +#endif + + const uint32_t w = fp32_to_bits(f); + const uint32_t shl1_w = w + w; + const uint32_t sign = w & UINT32_C(0x80000000); + uint32_t bias = shl1_w & UINT32_C(0xFF000000); + if (bias < UINT32_C(0x71000000)) { + bias = UINT32_C(0x71000000); + } + + base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base; + const uint32_t bits = fp32_to_bits(base); + const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00); + const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF); + const uint32_t nonsign = exp_bits + mantissa_bits; + return static_cast( + (sign >> 16) | + (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign)); +} + +} // namespace detail + +struct alignas(2) Half { + unsigned short x; + + struct from_bits_t {}; + C10_HOST_DEVICE static constexpr from_bits_t from_bits() { + return from_bits_t(); + } + + // HIP wants __host__ __device__ tag, CUDA does not +#if defined(USE_ROCM) + C10_HOST_DEVICE Half() = default; +#else + Half() = default; +#endif + + constexpr C10_HOST_DEVICE Half(unsigned short bits, from_bits_t) : x(bits){}; + inline C10_HOST_DEVICE Half(float value); + inline C10_HOST_DEVICE operator float() const; + +#if defined(__CUDACC__) || defined(__HIPCC__) + inline C10_HOST_DEVICE Half(const __half& value); + inline C10_HOST_DEVICE operator __half() const; +#endif +#ifdef SYCL_LANGUAGE_VERSION + inline C10_HOST_DEVICE Half(const sycl::half& value); + inline C10_HOST_DEVICE operator sycl::half() const; +#endif +}; + +// TODO : move to complex.h +template <> +struct alignas(4) complex { + Half real_; + Half imag_; + + // Constructors + complex() = default; + // Half constructor is not constexpr so the following constructor can't + // be constexpr + C10_HOST_DEVICE explicit inline complex(const Half& real, const Half& imag) + : real_(real), imag_(imag) {} + C10_HOST_DEVICE inline complex(const c10::complex& value) + : real_(value.real()), imag_(value.imag()) {} + + // Conversion operator + inline C10_HOST_DEVICE operator c10::complex() const { + return {real_, imag_}; + } + + constexpr C10_HOST_DEVICE Half real() const { + return real_; + } + constexpr C10_HOST_DEVICE Half imag() const { + return imag_; + } + + C10_HOST_DEVICE complex& operator+=(const complex& other) { + real_ = static_cast(real_) + static_cast(other.real_); + imag_ = static_cast(imag_) + static_cast(other.imag_); + return *this; + } + + C10_HOST_DEVICE complex& operator-=(const complex& other) { + real_ = static_cast(real_) - static_cast(other.real_); + imag_ = static_cast(imag_) - static_cast(other.imag_); + return *this; + } + + C10_HOST_DEVICE complex& operator*=(const complex& other) { + auto a = static_cast(real_); + auto b = static_cast(imag_); + auto c = static_cast(other.real()); + auto d = static_cast(other.imag()); + real_ = a * c - b * d; + imag_ = a * d + b * c; + return *this; + } +}; + +// In some versions of MSVC, there will be a compiler error when building. +// C4146: unary minus operator applied to unsigned type, result still unsigned +// C4804: unsafe use of type 'bool' in operation +// It can be addressed by disabling the following warning. +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4146) +#pragma warning(disable : 4804) +#pragma warning(disable : 4018) +#endif + +// The overflow checks may involve float to int conversion which may +// trigger precision loss warning. Re-enable the warning once the code +// is fixed. See T58053069. +#ifdef __clang__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunknown-warning-option" +#pragma GCC diagnostic ignored "-Wimplicit-int-float-conversion" +#endif + +// bool can be converted to any type. +// Without specializing on bool, in pytorch_linux_trusty_py2_7_9_build: +// `error: comparison of constant '255' with boolean expression is always false` +// for `f > limit::max()` below +template +typename std::enable_if::value, bool>::type overflows( + From /*f*/) { + return false; +} + +// skip isnan and isinf check for integral types +template +typename std::enable_if< + std::is_integral::value && !std::is_same::value, + bool>::type +overflows(From f) { + using limit = std::numeric_limits::type>; + if (!limit::is_signed && std::numeric_limits::is_signed) { + // allow for negative numbers to wrap using two's complement arithmetic. + // For example, with uint8, this allows for `a - b` to be treated as + // `a + 255 * b`. + return greater_than_max(f) || + (c10::is_negative(f) && -static_cast(f) > limit::max()); + } else { + return c10::less_than_lowest(f) || greater_than_max(f); + } +} + +template +typename std::enable_if::value, bool>::type +overflows(From f) { + using limit = std::numeric_limits::type>; + if (limit::has_infinity && std::isinf(static_cast(f))) { + return false; + } + if (!limit::has_quiet_NaN && (f != f)) { + return true; + } + return f < limit::lowest() || f > limit::max(); +} + +#ifdef __clang__ +#pragma GCC diagnostic pop +#endif + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +template +typename std::enable_if::value, bool>::type overflows(From f) { + // casts from complex to real are considered to overflow if the + // imaginary component is non-zero + if (!is_complex::value && f.imag() != 0) { + return true; + } + // Check for overflow componentwise + // (Technically, the imag overflow check is guaranteed to be false + // when !is_complex, but any optimizer worth its salt will be + // able to figure it out.) + return overflows< + typename scalar_value_type::type, + typename From::value_type>(f.real()) || + overflows< + typename scalar_value_type::type, + typename From::value_type>(f.imag()); +} + +C10_API std::ostream& operator<<(std::ostream& out, const Half& value); + +} // namespace c10 + +#include // IWYU pragma: keep diff --git a/voice_bridge/torch/include/c10/util/IdWrapper.h b/voice_bridge/torch/include/c10/util/IdWrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..59b5088c270f82f3e215e95eb9a830a4c8fee505 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/IdWrapper.h @@ -0,0 +1,78 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { + +/** + * This template simplifies generation of simple classes that wrap an id + * in a typesafe way. Namely, you can use it to create a very lightweight + * type that only offers equality comparators and hashing. Example: + * + * struct MyIdType final : IdWrapper { + * constexpr explicit MyIdType(uint32_t id): IdWrapper(id) {} + * }; + * + * Then in the global top level namespace: + * + * C10_DEFINE_HASH_FOR_IDWRAPPER(MyIdType); + * + * That's it - equality operators and hash functions are automatically defined + * for you, given the underlying type supports it. + */ +template +class IdWrapper { + public: + using underlying_type = UnderlyingType; + using concrete_type = ConcreteType; + + protected: + constexpr explicit IdWrapper(underlying_type id) noexcept( + noexcept(underlying_type(std::declval()))) + : id_(id) {} + + constexpr underlying_type underlyingId() const + noexcept(noexcept(underlying_type(std::declval()))) { + return id_; + } + + private: + friend size_t hash_value(const concrete_type& v) { + return std::hash()(v.id_); + } + + // TODO Making operator== noexcept if underlying type is noexcept equality + // comparable doesn't work with GCC 4.8. + // Fix this once we don't need GCC 4.8 anymore. + friend constexpr bool operator==( + const concrete_type& lhs, + const concrete_type& rhs) noexcept { + return lhs.id_ == rhs.id_; + } + + // TODO Making operator!= noexcept if operator== is noexcept doesn't work with + // GCC 4.8. + // Fix this once we don't need GCC 4.8 anymore. + friend constexpr bool operator!=( + const concrete_type& lhs, + const concrete_type& rhs) noexcept { + return !(lhs == rhs); + } + + underlying_type id_; +}; + +} // namespace c10 + +#define C10_DEFINE_HASH_FOR_IDWRAPPER(ClassName) \ + namespace std { \ + template <> \ + struct hash { \ + size_t operator()(ClassName x) const { \ + return hash_value(x); \ + } \ + }; \ + } diff --git a/voice_bridge/torch/include/c10/util/LeftRight.h b/voice_bridge/torch/include/c10/util/LeftRight.h new file mode 100644 index 0000000000000000000000000000000000000000..a399c61bef8c736e1fdd3c982dd6580abeb6d34d --- /dev/null +++ b/voice_bridge/torch/include/c10/util/LeftRight.h @@ -0,0 +1,223 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +namespace detail { + +struct IncrementRAII final { + public: + explicit IncrementRAII(std::atomic* counter) : _counter(counter) { + _counter->fetch_add(1); + } + + ~IncrementRAII() { + _counter->fetch_sub(1); + } + + private: + std::atomic* _counter; + + C10_DISABLE_COPY_AND_ASSIGN(IncrementRAII); +}; + +} // namespace detail + +// LeftRight wait-free readers synchronization primitive +// https://hal.archives-ouvertes.fr/hal-01207881/document +// +// LeftRight is quite easy to use (it can make an arbitrary +// data structure permit wait-free reads), but it has some +// particular performance characteristics you should be aware +// of if you're deciding to use it: +// +// - Reads still incur an atomic write (this is how LeftRight +// keeps track of how long it needs to keep around the old +// data structure) +// +// - Writes get executed twice, to keep both the left and right +// versions up to date. So if your write is expensive or +// nondeterministic, this is also an inappropriate structure +// +// LeftRight is used fairly rarely in PyTorch's codebase. If you +// are still not sure if you need it or not, consult your local +// C++ expert. +// +template +class LeftRight final { + public: + template + explicit LeftRight(const Args&... args) + : _counters{{{0}, {0}}}, + _foregroundCounterIndex(0), + _foregroundDataIndex(0), + _data{{T{args...}, T{args...}}}, + _writeMutex() {} + + // Copying and moving would not be threadsafe. + // Needs more thought and careful design to make that work. + LeftRight(const LeftRight&) = delete; + LeftRight(LeftRight&&) noexcept = delete; + LeftRight& operator=(const LeftRight&) = delete; + LeftRight& operator=(LeftRight&&) noexcept = delete; + + ~LeftRight() { + // wait until any potentially running writers are finished + { std::unique_lock lock(_writeMutex); } + + // wait until any potentially running readers are finished + while (_counters[0].load() != 0 || _counters[1].load() != 0) { + std::this_thread::yield(); + } + } + + template + auto read(F&& readFunc) const -> typename c10::invoke_result_t { + detail::IncrementRAII _increment_counter( + &_counters[_foregroundCounterIndex.load()]); + + return readFunc(_data[_foregroundDataIndex.load()]); + } + + // Throwing an exception in writeFunc is ok but causes the state to be either + // the old or the new state, depending on if the first or the second call to + // writeFunc threw. + template + auto write(F&& writeFunc) -> typename c10::invoke_result_t { + std::unique_lock lock(_writeMutex); + + return _write(writeFunc); + } + + private: + template + auto _write(const F& writeFunc) -> typename c10::invoke_result_t { + /* + * Assume, A is in background and B in foreground. In simplified terms, we + * want to do the following: + * 1. Write to A (old background) + * 2. Switch A/B + * 3. Write to B (new background) + * + * More detailed algorithm (explanations on why this is important are below + * in code): + * 1. Write to A + * 2. Switch A/B data pointers + * 3. Wait until A counter is zero + * 4. Switch A/B counters + * 5. Wait until B counter is zero + * 6. Write to B + */ + + auto localDataIndex = _foregroundDataIndex.load(); + + // 1. Write to A + _callWriteFuncOnBackgroundInstance(writeFunc, localDataIndex); + + // 2. Switch A/B data pointers + localDataIndex = localDataIndex ^ 1; + _foregroundDataIndex = localDataIndex; + + /* + * 3. Wait until A counter is zero + * + * In the previous write run, A was foreground and B was background. + * There was a time after switching _foregroundDataIndex (B to foreground) + * and before switching _foregroundCounterIndex, in which new readers could + * have read B but incremented A's counter. + * + * In this current run, we just switched _foregroundDataIndex (A back to + * foreground), but before writing to the new background B, we have to make + * sure A's counter was zero briefly, so all these old readers are gone. + */ + auto localCounterIndex = _foregroundCounterIndex.load(); + _waitForBackgroundCounterToBeZero(localCounterIndex); + + /* + * 4. Switch A/B counters + * + * Now that we know all readers on B are really gone, we can switch the + * counters and have new readers increment A's counter again, which is the + * correct counter since they're reading A. + */ + localCounterIndex = localCounterIndex ^ 1; + _foregroundCounterIndex = localCounterIndex; + + /* + * 5. Wait until B counter is zero + * + * This waits for all the readers on B that came in while both data and + * counter for B was in foreground, i.e. normal readers that happened + * outside of that brief gap between switching data and counter. + */ + _waitForBackgroundCounterToBeZero(localCounterIndex); + + // 6. Write to B + return _callWriteFuncOnBackgroundInstance(writeFunc, localDataIndex); + } + + template + auto _callWriteFuncOnBackgroundInstance( + const F& writeFunc, + uint8_t localDataIndex) -> typename c10::invoke_result_t { + try { + return writeFunc(_data[localDataIndex ^ 1]); + } catch (...) { + // recover invariant by copying from the foreground instance + _data[localDataIndex ^ 1] = _data[localDataIndex]; + // rethrow + throw; + } + } + + void _waitForBackgroundCounterToBeZero(uint8_t counterIndex) { + while (_counters[counterIndex ^ 1].load() != 0) { + std::this_thread::yield(); + } + } + + mutable std::array, 2> _counters; + std::atomic _foregroundCounterIndex; + std::atomic _foregroundDataIndex; + std::array _data; + std::mutex _writeMutex; +}; + +// RWSafeLeftRightWrapper is API compatible with LeftRight and uses a +// read-write lock to protect T (data). +template +class RWSafeLeftRightWrapper final { + public: + template + explicit RWSafeLeftRightWrapper(const Args&... args) : data_{args...} {} + + // RWSafeLeftRightWrapper is not copyable or moveable since LeftRight + // is not copyable or moveable. + RWSafeLeftRightWrapper(const RWSafeLeftRightWrapper&) = delete; + RWSafeLeftRightWrapper(RWSafeLeftRightWrapper&&) noexcept = delete; + RWSafeLeftRightWrapper& operator=(const RWSafeLeftRightWrapper&) = delete; + RWSafeLeftRightWrapper& operator=(RWSafeLeftRightWrapper&&) noexcept = delete; + + template + auto read(F&& readFunc) const -> typename c10::invoke_result_t { + return data_.withLock( + [&readFunc](T const& data) { return readFunc(data); }); + } + + template + auto write(F&& writeFunc) -> typename c10::invoke_result_t { + return data_.withLock([&writeFunc](T& data) { return writeFunc(data); }); + } + + private: + c10::Synchronized data_; +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/util/Load.h b/voice_bridge/torch/include/c10/util/Load.h new file mode 100644 index 0000000000000000000000000000000000000000..b1b07ed76d5682cb4a3ea6ce31aa6a491407c41e --- /dev/null +++ b/voice_bridge/torch/include/c10/util/Load.h @@ -0,0 +1,38 @@ +#pragma once +#include +#include + +namespace c10 { +namespace detail { + +template +struct LoadImpl { + C10_HOST_DEVICE static T apply(const void* src) { + return *reinterpret_cast(src); + } +}; + +template <> +struct LoadImpl { + C10_HOST_DEVICE static bool apply(const void* src) { + static_assert(sizeof(bool) == sizeof(char), ""); + // NOTE: [Loading boolean values] + // Protect against invalid boolean values by loading as a byte + // first, then converting to bool (see gh-54789). + return *reinterpret_cast(src); + } +}; + +} // namespace detail + +template +C10_HOST_DEVICE T load(const void* src) { + return c10::detail::LoadImpl::apply(src); +} + +template +C10_HOST_DEVICE scalar_t load(const scalar_t* src) { + return c10::detail::LoadImpl::apply(src); +} + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/util/Logging.h b/voice_bridge/torch/include/c10/util/Logging.h new file mode 100644 index 0000000000000000000000000000000000000000..b25d7841e3f402ab4d10c503efafa84d171df9c6 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/Logging.h @@ -0,0 +1,313 @@ +#ifndef C10_UTIL_LOGGING_H_ +#define C10_UTIL_LOGGING_H_ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +// CAFFE2_LOG_THRESHOLD is a compile time flag that would allow us to turn off +// logging at compile time so no logging message below that level is produced +// at all. The value should be between INT_MIN and CAFFE_FATAL. +#ifndef CAFFE2_LOG_THRESHOLD +// If we have not defined the compile time log threshold, we keep all the +// log cases. +#define CAFFE2_LOG_THRESHOLD INT_MIN +#endif // CAFFE2_LOG_THRESHOLD + +// Below are different implementations for glog and non-glog cases. +#ifdef C10_USE_GLOG +#include +#else // !C10_USE_GLOG +#include +#endif // C10_USE_GLOG + +C10_DECLARE_int(caffe2_log_level); +C10_DECLARE_bool(caffe2_use_fatal_for_enforce); + +// Some versions of GLOG support less-spammy version of LOG_EVERY_MS. If it's +// not available - just short-circuit to the always working one one. +// We define the C10_ name to avoid confusing other files +#ifdef LOG_EVERY_MS +#define C10_LOG_EVERY_MS(severity, ms) LOG_EVERY_MS(severity, ms) +#else +#define C10_LOG_EVERY_MS(severity, ms) LOG(severity) +#endif + +// Same for LOG_FIRST_N +#ifdef LOG_FIRST_N +#define C10_LOG_FIRST_N(severity, n) LOG_FIRST_N(severity, n) +#else +#define C10_LOG_FIRST_N(severity, n) LOG(severity) +#endif + +// Same for LOG_EVERY_N +#ifdef LOG_EVERY_N +#define C10_LOG_EVERY_N(severity, n) LOG_EVERY_N(severity, n) +#else +#define C10_LOG_EVERY_N(severity, n) LOG(severity) +#endif + +namespace c10 { + +using std::string; + +// Functions that we use for initialization. +C10_API bool InitCaffeLogging(int* argc, char** argv); +C10_API void UpdateLoggingLevelsFromFlags(); + +[[noreturn]] C10_API void ThrowEnforceNotMet( + const char* file, + const int line, + const char* condition, + const std::string& msg, + const void* caller = nullptr); + +[[noreturn]] C10_API void ThrowEnforceNotMet( + const char* file, + const int line, + const char* condition, + const char* msg, + const void* caller = nullptr); + +[[noreturn]] C10_API inline void ThrowEnforceNotMet( + const char* file, + const int line, + const char* condition, + detail::CompileTimeEmptyString /*msg*/, + const void* caller = nullptr) { + ThrowEnforceNotMet(file, line, condition, "", caller); +} + +[[noreturn]] C10_API void ThrowEnforceFiniteNotMet( + const char* file, + const int line, + const char* condition, + const std::string& msg, + const void* caller = nullptr); + +[[noreturn]] C10_API void ThrowEnforceFiniteNotMet( + const char* file, + const int line, + const char* condition, + const char* msg, + const void* caller = nullptr); + +[[noreturn]] C10_API inline void ThrowEnforceFiniteNotMet( + const char* file, + const int line, + const char* condition, + detail::CompileTimeEmptyString /*msg*/, + const void* caller = nullptr) { + ThrowEnforceFiniteNotMet(file, line, condition, "", caller); +} + +constexpr bool IsUsingGoogleLogging() { +#ifdef C10_USE_GLOG + return true; +#else + return false; +#endif +} + +/** + * A utility to allow one to show log info to stderr after the program starts. + * + * This is similar to calling GLOG's --logtostderr, or setting caffe2_log_level + * to smaller than INFO. You are recommended to only use this in a few sparse + * cases, such as when you want to write a tutorial or something. Normally, use + * the commandline flags to set the log level. + */ +C10_API void ShowLogInfoToStderr(); + +C10_API void SetStackTraceFetcher(std::function fetcher); + +using EnforceNotMet = ::c10::Error; + +#define CAFFE_ENFORCE(condition, ...) \ + do { \ + if (C10_UNLIKELY(!(condition))) { \ + ::c10::ThrowEnforceNotMet( \ + __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); \ + } \ + } while (false) + +#define CAFFE_ENFORCE_FINITE(condition, ...) \ + do { \ + if (C10_UNLIKELY(!(condition))) { \ + ::c10::ThrowEnforceFiniteNotMet( \ + __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__)); \ + } \ + } while (false) + +#define CAFFE_ENFORCE_WITH_CALLER(condition, ...) \ + do { \ + if (C10_UNLIKELY(!(condition))) { \ + ::c10::ThrowEnforceNotMet( \ + __FILE__, __LINE__, #condition, ::c10::str(__VA_ARGS__), this); \ + } \ + } while (false) + +#define CAFFE_THROW(...) \ + ::c10::ThrowEnforceNotMet(__FILE__, __LINE__, "", ::c10::str(__VA_ARGS__)) + +/** + * Rich logging messages + * + * CAFFE_ENFORCE_THAT can be used with one of the "checker functions" that + * capture input argument values and add it to the exception message. E.g. + * `CAFFE_ENFORCE_THAT(Equals(foo(x), bar(y)), "Optional additional message")` + * would evaluate both foo and bar only once and if the results are not equal - + * include them in the exception message. + * + * Some of the basic checker functions like Equals or Greater are already + * defined below. Other header might define customized checkers by adding + * functions to caffe2::enforce_detail namespace. For example: + * + * namespace caffe2 { namespace enforce_detail { + * inline EnforceFailMessage IsVector(const vector& shape) { + * if (shape.size() == 1) { return EnforceOK(); } + * return c10::str("Shape ", shape, " is not a vector"); + * } + * }} + * + * With further usages like `CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))` + * + * Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided + * too. Please use them instead of TORCH_CHECK_EQ and friends for failures in + * user-provided input. + */ + +namespace enforce_detail { + +template +std::string enforceFailMsgImpl(const T1& x, const T2& y) { + return c10::str(x, " vs ", y); +} + +template +std::string enforceFailMsgImpl(const T1& x, const T2& y, const Args&... args) { + return c10::str(x, " vs ", y, ". ", args...); +} + +template +void enforceThatImpl( + Pred p, + const T1& lhs, + const T2& rhs, + const char* file, + int line, + const char* expr, + const void* caller, + const Args&... args) { + if (C10_UNLIKELY(!(p(lhs, rhs)))) { + ::c10::ThrowEnforceNotMet( + file, + line, + expr, + ::c10::enforce_detail::enforceFailMsgImpl(lhs, rhs, args...), + caller); + } +} +#define CAFFE_ENFORCE_THAT_IMPL(op, lhs, rhs, expr, ...) \ + ::c10::enforce_detail::enforceThatImpl( \ + op, lhs, rhs, __FILE__, __LINE__, expr, nullptr, ##__VA_ARGS__) + +#define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(op, lhs, rhs, expr, ...) \ + ::c10::enforce_detail::enforceThatImpl( \ + op, (lhs), (rhs), __FILE__, __LINE__, expr, this, ##__VA_ARGS__) + +} // namespace enforce_detail + +#define CAFFE_ENFORCE_THAT(cmp, op, lhs, rhs, ...) \ + CAFFE_ENFORCE_THAT_IMPL(cmp, lhs, rhs, #lhs " " #op " " #rhs, ##__VA_ARGS__) + +#define CAFFE_ENFORCE_BINARY_OP(cmp, op, x, y, ...) \ + CAFFE_ENFORCE_THAT_IMPL(cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) +#define CAFFE_ENFORCE_EQ(x, y, ...) \ + CAFFE_ENFORCE_BINARY_OP(std::equal_to(), ==, x, y, ##__VA_ARGS__) +#define CAFFE_ENFORCE_NE(x, y, ...) \ + CAFFE_ENFORCE_BINARY_OP(std::not_equal_to(), !=, x, y, ##__VA_ARGS__) +#define CAFFE_ENFORCE_LE(x, y, ...) \ + CAFFE_ENFORCE_BINARY_OP(std::less_equal(), <=, x, y, ##__VA_ARGS__) +#define CAFFE_ENFORCE_LT(x, y, ...) \ + CAFFE_ENFORCE_BINARY_OP(std::less(), <, x, y, ##__VA_ARGS__) +#define CAFFE_ENFORCE_GE(x, y, ...) \ + CAFFE_ENFORCE_BINARY_OP(std::greater_equal(), >=, x, y, ##__VA_ARGS__) +#define CAFFE_ENFORCE_GT(x, y, ...) \ + CAFFE_ENFORCE_BINARY_OP(std::greater(), >, x, y, ##__VA_ARGS__) + +#define CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(cmp, op, x, y, ...) \ + CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER( \ + cmp, x, y, #x " " #op " " #y, ##__VA_ARGS__) +#define CAFFE_ENFORCE_EQ_WITH_CALLER(x, y, ...) \ + CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \ + std::equal_to(), ==, x, y, ##__VA_ARGS__) +#define CAFFE_ENFORCE_NE_WITH_CALLER(x, y, ...) \ + CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \ + std::not_equal_to(), !=, x, y, ##__VA_ARGS__) +#define CAFFE_ENFORCE_LE_WITH_CALLER(x, y, ...) \ + CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \ + std::less_equal(), <=, x, y, ##__VA_ARGS__) +#define CAFFE_ENFORCE_LT_WITH_CALLER(x, y, ...) \ + CAFFE_ENFORCE_BINARY_OP_WITH_CALLER(std::less(), <, x, y, ##__VA_ARGS__) +#define CAFFE_ENFORCE_GE_WITH_CALLER(x, y, ...) \ + CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \ + std::greater_equal(), >=, x, y, ##__VA_ARGS__) +#define CAFFE_ENFORCE_GT_WITH_CALLER(x, y, ...) \ + CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( \ + std::greater(), >, x, y, ##__VA_ARGS__) + +/** + * Very lightweight logging for the first time API usage. It's beneficial for + * tracking of individual functionality usage in larger applications. + * + * In order to ensure light-weightedness of logging, we utilize static variable + * trick - LogAPIUsage will be invoked only once and further invocations will + * just do an atomic check. + * + * Example: + * // Logs caller info with an arbitrary text event, if there is a usage. + * C10_LOG_API_USAGE_ONCE("my_api"); + */ +#define C10_LOG_API_USAGE_ONCE(...) \ + C10_UNUSED static bool C10_ANONYMOUS_VARIABLE(logFlag) = \ + ::c10::detail::LogAPIUsageFakeReturn(__VA_ARGS__); + +// API usage logging capabilities +C10_API void SetAPIUsageLogger(std::function logger); +C10_API void LogAPIUsage(const std::string& context); + +// PyTorch ddp usage logging capabilities +// DDPLoggingData holds data that can be logged in applications +// for analysis and debugging. Data structure is defined in +// c10 directory so that it can be easily imported by both c10 +// and torch files. +struct DDPLoggingData { + // logging fields that are string types. + std::map strs_map; + // logging fields that are int64_t types. + std::map ints_map; +}; + +C10_API void SetPyTorchDDPUsageLogger( + std::function logger); +C10_API void LogPyTorchDDPUsage(const DDPLoggingData& ddpData); + +namespace detail { +// Return value is needed to do the static variable initialization trick +C10_API bool LogAPIUsageFakeReturn(const std::string& context); +} // namespace detail + +// Initializes the c10 logger. +C10_API void initLogging(); + +} // namespace c10 + +#endif // C10_UTIL_LOGGING_H_ diff --git a/voice_bridge/torch/include/c10/util/MathConstants.h b/voice_bridge/torch/include/c10/util/MathConstants.h new file mode 100644 index 0000000000000000000000000000000000000000..b0b0b4ab24755bcf6e9c04f12f508abe416f115e --- /dev/null +++ b/voice_bridge/torch/include/c10/util/MathConstants.h @@ -0,0 +1,134 @@ +#pragma once + +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion") +#endif + +namespace c10 { +// TODO: Replace me with inline constexpr variable when C++17 becomes available +namespace detail { +template +C10_HOST_DEVICE inline constexpr T e() { + return static_cast(2.718281828459045235360287471352662); +} + +template +C10_HOST_DEVICE inline constexpr T euler() { + return static_cast(0.577215664901532860606512090082402); +} + +template +C10_HOST_DEVICE inline constexpr T frac_1_pi() { + return static_cast(0.318309886183790671537767526745028); +} + +template +C10_HOST_DEVICE inline constexpr T frac_1_sqrt_pi() { + return static_cast(0.564189583547756286948079451560772); +} + +template +C10_HOST_DEVICE inline constexpr T frac_sqrt_3() { + return static_cast(0.577350269189625764509148780501957); +} + +template +C10_HOST_DEVICE inline constexpr T golden_ratio() { + return static_cast(1.618033988749894848204586834365638); +} + +template +C10_HOST_DEVICE inline constexpr T ln_10() { + return static_cast(2.302585092994045684017991454684364); +} + +template +C10_HOST_DEVICE inline constexpr T ln_2() { + return static_cast(0.693147180559945309417232121458176); +} + +template +C10_HOST_DEVICE inline constexpr T log_10_e() { + return static_cast(0.434294481903251827651128918916605); +} + +template +C10_HOST_DEVICE inline constexpr T log_2_e() { + return static_cast(1.442695040888963407359924681001892); +} + +template +C10_HOST_DEVICE inline constexpr T pi() { + return static_cast(3.141592653589793238462643383279502); +} + +template +C10_HOST_DEVICE inline constexpr T sqrt_2() { + return static_cast(1.414213562373095048801688724209698); +} + +template +C10_HOST_DEVICE inline constexpr T sqrt_3() { + return static_cast(1.732050807568877293527446341505872); +} + +template <> +C10_HOST_DEVICE inline constexpr BFloat16 pi() { + // According to + // https://en.wikipedia.org/wiki/Bfloat16_floating-point_format#Special_values + // pi is encoded as 4049 + return BFloat16(0x4049, BFloat16::from_bits()); +} + +template <> +C10_HOST_DEVICE inline constexpr Half pi() { + return Half(0x4248, Half::from_bits()); +} +} // namespace detail + +template +constexpr T e = c10::detail::e(); + +template +constexpr T euler = c10::detail::euler(); + +template +constexpr T frac_1_pi = c10::detail::frac_1_pi(); + +template +constexpr T frac_1_sqrt_pi = c10::detail::frac_1_sqrt_pi(); + +template +constexpr T frac_sqrt_3 = c10::detail::frac_sqrt_3(); + +template +constexpr T golden_ratio = c10::detail::golden_ratio(); + +template +constexpr T ln_10 = c10::detail::ln_10(); + +template +constexpr T ln_2 = c10::detail::ln_2(); + +template +constexpr T log_10_e = c10::detail::log_10_e(); + +template +constexpr T log_2_e = c10::detail::log_2_e(); + +template +constexpr T pi = c10::detail::pi(); + +template +constexpr T sqrt_2 = c10::detail::sqrt_2(); + +template +constexpr T sqrt_3 = c10::detail::sqrt_3(); +} // namespace c10 + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/voice_bridge/torch/include/c10/util/MaybeOwned.h b/voice_bridge/torch/include/c10/util/MaybeOwned.h new file mode 100644 index 0000000000000000000000000000000000000000..a698e275c11934a56c663d2379984249161769b7 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/MaybeOwned.h @@ -0,0 +1,228 @@ +#pragma once + +#include +#include +#include + +#include + +namespace c10 { + +/// MaybeOwnedTraits describes how to borrow from T. Here is how we +/// can implement borrowing from an arbitrary type T using a raw +/// pointer to const: +template +struct MaybeOwnedTraitsGenericImpl { + using owned_type = T; + using borrow_type = const T*; + + static borrow_type createBorrow(const owned_type& from) { + return &from; + } + + static void assignBorrow(borrow_type& lhs, borrow_type rhs) { + lhs = rhs; + } + + static void destroyBorrow(borrow_type& /*toDestroy*/) {} + + static const owned_type& referenceFromBorrow(const borrow_type& borrow) { + return *borrow; + } + + static const owned_type* pointerFromBorrow(const borrow_type& borrow) { + return borrow; + } + + static bool debugBorrowIsValid(const borrow_type& borrow) { + return borrow != nullptr; + } +}; + +/// It is possible to eliminate the extra layer of indirection for +/// borrows for some types that we control. For examples, see +/// intrusive_ptr.h and TensorBody.h. + +template +struct MaybeOwnedTraits; + +// Explicitly enable MaybeOwned>, rather than allowing +// MaybeOwned to be used for any type right away. +template +struct MaybeOwnedTraits> + : public MaybeOwnedTraitsGenericImpl> {}; + +/// A smart pointer around either a borrowed or owned T. When +/// constructed with borrowed(), the caller MUST ensure that the +/// borrowed-from argument outlives this MaybeOwned. Compare to +/// Rust's std::borrow::Cow +/// (https://doc.rust-lang.org/std/borrow/enum.Cow.html), but note +/// that it is probably not suitable for general use because C++ has +/// no borrow checking. Included here to support +/// Tensor::expect_contiguous. +template +class MaybeOwned final { + using borrow_type = typename MaybeOwnedTraits::borrow_type; + using owned_type = typename MaybeOwnedTraits::owned_type; + + bool isBorrowed_; + union { + borrow_type borrow_; + owned_type own_; + }; + + /// Don't use this; use borrowed() instead. + explicit MaybeOwned(const owned_type& t) + : isBorrowed_(true), borrow_(MaybeOwnedTraits::createBorrow(t)) {} + + /// Don't use this; use owned() instead. + explicit MaybeOwned(T&& t) noexcept( + std::is_nothrow_move_constructible::value) + : isBorrowed_(false), own_(std::move(t)) {} + + /// Don't use this; use owned() instead. + template + explicit MaybeOwned(in_place_t, Args&&... args) + : isBorrowed_(false), own_(std::forward(args)...) {} + + public: + explicit MaybeOwned() : isBorrowed_(true), borrow_() {} + + // Copying a borrow yields another borrow of the original, as with a + // T*. Copying an owned T yields another owned T for safety: no + // chains of borrowing by default! (Note you could get that behavior + // with MaybeOwned::borrowed(*rhs) if you wanted it.) + MaybeOwned(const MaybeOwned& rhs) : isBorrowed_(rhs.isBorrowed_) { + if (C10_LIKELY(rhs.isBorrowed_)) { + MaybeOwnedTraits::assignBorrow(borrow_, rhs.borrow_); + } else { + new (&own_) T(rhs.own_); + } + } + + MaybeOwned& operator=(const MaybeOwned& rhs) { + if (this == &rhs) { + return *this; + } + if (C10_UNLIKELY(!isBorrowed_)) { + if (rhs.isBorrowed_) { + own_.~T(); + MaybeOwnedTraits::assignBorrow(borrow_, rhs.borrow_); + isBorrowed_ = true; + } else { + own_ = rhs.own_; + } + } else { + if (C10_LIKELY(rhs.isBorrowed_)) { + MaybeOwnedTraits::assignBorrow(borrow_, rhs.borrow_); + } else { + MaybeOwnedTraits::destroyBorrow(borrow_); + new (&own_) T(rhs.own_); + isBorrowed_ = false; + } + } + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isBorrowed_ == rhs.isBorrowed_); + return *this; + } + + MaybeOwned(MaybeOwned&& rhs) noexcept( + std::is_nothrow_move_constructible::value) + : isBorrowed_(rhs.isBorrowed_) { + if (C10_LIKELY(rhs.isBorrowed_)) { + MaybeOwnedTraits::assignBorrow(borrow_, rhs.borrow_); + } else { + new (&own_) T(std::move(rhs.own_)); + } + } + + MaybeOwned& operator=(MaybeOwned&& rhs) noexcept( + std::is_nothrow_move_assignable::value) { + if (this == &rhs) { + return *this; + } + if (C10_UNLIKELY(!isBorrowed_)) { + if (rhs.isBorrowed_) { + own_.~T(); + MaybeOwnedTraits::assignBorrow(borrow_, rhs.borrow_); + isBorrowed_ = true; + } else { + own_ = std::move(rhs.own_); + } + } else { + if (C10_LIKELY(rhs.isBorrowed_)) { + MaybeOwnedTraits::assignBorrow(borrow_, rhs.borrow_); + } else { + MaybeOwnedTraits::destroyBorrow(borrow_); + new (&own_) T(std::move(rhs.own_)); + isBorrowed_ = false; + } + } + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isBorrowed_ == rhs.isBorrowed_); + return *this; + } + + static MaybeOwned borrowed(const T& t) { + return MaybeOwned(t); + } + + static MaybeOwned owned(T&& t) noexcept( + std::is_nothrow_move_constructible::value) { + return MaybeOwned(std::move(t)); + } + + template + static MaybeOwned owned(in_place_t, Args&&... args) { + return MaybeOwned(in_place, std::forward(args)...); + } + + ~MaybeOwned() { + if (C10_UNLIKELY(!isBorrowed_)) { + own_.~T(); + } else { + MaybeOwnedTraits::destroyBorrow(borrow_); + } + } + + // This is an implementation detail! You should know what you're doing + // if you are testing this. If you just want to guarantee ownership move + // this into a T + bool unsafeIsBorrowed() const { + return isBorrowed_; + } + + const T& operator*() const& { + if (isBorrowed_) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + MaybeOwnedTraits::debugBorrowIsValid(borrow_)); + } + return C10_LIKELY(isBorrowed_) + ? MaybeOwnedTraits::referenceFromBorrow(borrow_) + : own_; + } + + const T* operator->() const { + if (isBorrowed_) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + MaybeOwnedTraits::debugBorrowIsValid(borrow_)); + } + return C10_LIKELY(isBorrowed_) + ? MaybeOwnedTraits::pointerFromBorrow(borrow_) + : &own_; + } + + // If borrowed, copy the underlying T. If owned, move from + // it. borrowed/owned state remains the same, and either we + // reference the same borrow as before or we are an owned moved-from + // T. + T operator*() && { + if (isBorrowed_) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + MaybeOwnedTraits::debugBorrowIsValid(borrow_)); + return MaybeOwnedTraits::referenceFromBorrow(borrow_); + } else { + return std::move(own_); + } + } +}; + +} // namespace c10 diff --git a/voice_bridge/torch/include/c10/util/Metaprogramming.h b/voice_bridge/torch/include/c10/util/Metaprogramming.h new file mode 100644 index 0000000000000000000000000000000000000000..1f7fcf363f396f41f03ed738e8fc5591ddd67bd9 --- /dev/null +++ b/voice_bridge/torch/include/c10/util/Metaprogramming.h @@ -0,0 +1,485 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { +namespace guts { + +/** + * Access information about result type or arguments from a function type. + * Example: + * using A = function_traits::return_type // A == int + * using A = function_traits::parameter_types::tuple_type + * // A == tuple + */ +template +struct function_traits { + static_assert( + !std::is_same::value, + "In function_traits, Func must be a plain function type."); +}; +template +struct function_traits { + using func_type = Result(Args...); + using return_type = Result; + using parameter_types = typelist::typelist; + static constexpr auto number_of_parameters = sizeof...(Args); +}; + +/** + * infer_function_traits: creates a `function_traits` type for a simple + * function (pointer) or functor (lambda/struct). Currently does not support + * class methods. + */ + +template +struct infer_function_traits { + using type = function_traits< + c10::guts::detail::strip_class_t>; +}; + +template +struct infer_function_traits { + using type = function_traits; +}; + +template +struct infer_function_traits { + using type = function_traits; +}; + +template +using infer_function_traits_t = typename infer_function_traits::type; + +/** + * make_function_traits: creates a `function_traits` type given a Return type + * and a typelist of Argument types + * + * Example: + * bool f(int, int); + * + * infer_function_traits_t == make_function_traits_t> + */ +template +struct make_function_traits { + static_assert( + false_t::value, + "In guts::make_function_traits, the ArgList argument must be typelist<...>."); +}; + +template +struct make_function_traits> { + using type = function_traits; +}; + +template +using make_function_traits_t = + typename make_function_traits::type; + +/** + * Use extract_arg_by_filtered_index to return the i-th argument whose + * type fulfills a given type trait. The argument itself is perfectly forwarded. + * + * Example: + * std::string arg1 = "Hello"; + * std::string arg2 = "World"; + * std::string&& result = extract_arg_by_filtered_index(0, + * arg1, 2.0, std::move(arg2)); + * + * Warning: Taking the result by rvalue reference can cause segfaults because + * ownership will not be passed on from the original reference. The original + * reference dies after the expression and the resulting + */ +namespace detail { +template < + template + class Condition, + size_t index, + class Enable, + class... Args> +struct extract_arg_by_filtered_index_; +template < + template + class Condition, + size_t index, + class Head, + class... Tail> +struct extract_arg_by_filtered_index_< + Condition, + index, + std::enable_if_t::value>, + Head, + Tail...> { + static decltype(auto) call(Head&& /*head*/, Tail&&... tail) { + return extract_arg_by_filtered_index_:: + call(std::forward(tail)...); + } +}; +template < + template + class Condition, + size_t index, + class Head, + class... Tail> +struct extract_arg_by_filtered_index_< + Condition, + index, + std::enable_if_t::value && index != 0>, + Head, + Tail...> { + static decltype(auto) call(Head&& /*head*/, Tail&&... tail) { + return extract_arg_by_filtered_index_:: + call(std::forward(tail)...); + } +}; +template